repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
benvanwerkhoven/kernel_tuner | kernel_tuner/core.py | DeviceInterface.create_kernel_instance | def create_kernel_instance(self, kernel_options, params, verbose):
"""create kernel instance from kernel source, parameters, problem size, grid divisors, and so on"""
instance_string = util.get_instance_string(params)
grid_div = (kernel_options.grid_div_x, kernel_options.grid_div_y, kernel_options.grid_div_z)
#insert default block_size_names if needed
if not kernel_options.block_size_names:
kernel_options.block_size_names = util.default_block_size_names
#setup thread block and grid dimensions
threads, grid = util.setup_block_and_grid(kernel_options.problem_size, grid_div, params, kernel_options.block_size_names)
if numpy.prod(threads) > self.dev.max_threads:
if verbose:
print("skipping config", instance_string, "reason: too many threads per block")
return None
#obtain the kernel_string and prepare additional files, if any
temp_files = dict()
kernel_source = kernel_options.kernel_string
if not isinstance(kernel_source, list):
kernel_source = [kernel_source]
name, kernel_string, temp_files = util.prepare_list_of_files(kernel_options.kernel_name, kernel_source, params, grid, threads, kernel_options.block_size_names)
#collect everything we know about this instance and return it
return KernelInstance(name, kernel_string, temp_files, threads, grid, params, kernel_options.arguments) | python | def create_kernel_instance(self, kernel_options, params, verbose):
"""create kernel instance from kernel source, parameters, problem size, grid divisors, and so on"""
instance_string = util.get_instance_string(params)
grid_div = (kernel_options.grid_div_x, kernel_options.grid_div_y, kernel_options.grid_div_z)
#insert default block_size_names if needed
if not kernel_options.block_size_names:
kernel_options.block_size_names = util.default_block_size_names
#setup thread block and grid dimensions
threads, grid = util.setup_block_and_grid(kernel_options.problem_size, grid_div, params, kernel_options.block_size_names)
if numpy.prod(threads) > self.dev.max_threads:
if verbose:
print("skipping config", instance_string, "reason: too many threads per block")
return None
#obtain the kernel_string and prepare additional files, if any
temp_files = dict()
kernel_source = kernel_options.kernel_string
if not isinstance(kernel_source, list):
kernel_source = [kernel_source]
name, kernel_string, temp_files = util.prepare_list_of_files(kernel_options.kernel_name, kernel_source, params, grid, threads, kernel_options.block_size_names)
#collect everything we know about this instance and return it
return KernelInstance(name, kernel_string, temp_files, threads, grid, params, kernel_options.arguments) | create kernel instance from kernel source, parameters, problem size, grid divisors, and so on | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L216-L240 |
benvanwerkhoven/kernel_tuner | kernel_tuner/core.py | DeviceInterface.run_kernel | def run_kernel(self, func, gpu_args, instance):
""" Run a compiled kernel instance on a device """
logging.debug('run_kernel %s', instance.name)
logging.debug('thread block dims (%d, %d, %d)', *instance.threads)
logging.debug('grid dims (%d, %d, %d)', *instance.grid)
try:
self.dev.run_kernel(func, gpu_args, instance.threads, instance.grid)
except Exception as e:
if "too many resources requested for launch" in str(e) or "OUT_OF_RESOURCES" in str(e):
logging.debug('ignoring runtime failure due to too many resources required')
return False
else:
logging.debug('encountered unexpected runtime failure: ' + str(e))
raise e
return True | python | def run_kernel(self, func, gpu_args, instance):
""" Run a compiled kernel instance on a device """
logging.debug('run_kernel %s', instance.name)
logging.debug('thread block dims (%d, %d, %d)', *instance.threads)
logging.debug('grid dims (%d, %d, %d)', *instance.grid)
try:
self.dev.run_kernel(func, gpu_args, instance.threads, instance.grid)
except Exception as e:
if "too many resources requested for launch" in str(e) or "OUT_OF_RESOURCES" in str(e):
logging.debug('ignoring runtime failure due to too many resources required')
return False
else:
logging.debug('encountered unexpected runtime failure: ' + str(e))
raise e
return True | Run a compiled kernel instance on a device | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L254-L269 |
benvanwerkhoven/kernel_tuner | kernel_tuner/runners/noodles.py | _chunk_list | def _chunk_list(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n] | python | def _chunk_list(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n] | Yield successive n-sized chunks from l. | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/runners/noodles.py#L21-L24 |
benvanwerkhoven/kernel_tuner | kernel_tuner/runners/noodles.py | NoodlesRunner.run | def run(self, parameter_space, kernel_options, tuning_options):
""" Tune all instances in parameter_space using a multiple threads
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
workflow = self._parameter_sweep(parameter_space, kernel_options, self.device_options,
tuning_options)
if tuning_options.verbose:
with NCDisplay(_error_filter) as display:
answer = run_parallel_with_display(workflow, self.max_threads, display)
else:
answer = run_parallel(workflow, self.max_threads)
if answer is None:
print("Tuning did not return any results, did an error occur?")
return None
# Filter out None times
result = []
for chunk in answer:
result += [d for d in chunk if d['time']]
return result, {} | python | def run(self, parameter_space, kernel_options, tuning_options):
""" Tune all instances in parameter_space using a multiple threads
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
workflow = self._parameter_sweep(parameter_space, kernel_options, self.device_options,
tuning_options)
if tuning_options.verbose:
with NCDisplay(_error_filter) as display:
answer = run_parallel_with_display(workflow, self.max_threads, display)
else:
answer = run_parallel(workflow, self.max_threads)
if answer is None:
print("Tuning did not return any results, did an error occur?")
return None
# Filter out None times
result = []
for chunk in answer:
result += [d for d in chunk if d['time']]
return result, {} | Tune all instances in parameter_space using a multiple threads
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/runners/noodles.py#L34-L70 |
benvanwerkhoven/kernel_tuner | kernel_tuner/runners/noodles.py | NoodlesRunner._parameter_sweep | def _parameter_sweep(self, parameter_space, kernel_options, device_options, tuning_options):
"""Build a Noodles workflow by sweeping the parameter space"""
results = []
#randomize parameter space to do pseudo load balancing
parameter_space = list(parameter_space)
random.shuffle(parameter_space)
#split parameter space into chunks
work_per_thread = int(numpy.ceil(len(parameter_space) / float(self.max_threads)))
chunks = _chunk_list(parameter_space, work_per_thread)
for chunk in chunks:
chunked_result = self._run_chunk(chunk, kernel_options, device_options, tuning_options)
results.append(lift(chunked_result))
return gather(*results) | python | def _parameter_sweep(self, parameter_space, kernel_options, device_options, tuning_options):
"""Build a Noodles workflow by sweeping the parameter space"""
results = []
#randomize parameter space to do pseudo load balancing
parameter_space = list(parameter_space)
random.shuffle(parameter_space)
#split parameter space into chunks
work_per_thread = int(numpy.ceil(len(parameter_space) / float(self.max_threads)))
chunks = _chunk_list(parameter_space, work_per_thread)
for chunk in chunks:
chunked_result = self._run_chunk(chunk, kernel_options, device_options, tuning_options)
results.append(lift(chunked_result))
return gather(*results) | Build a Noodles workflow by sweeping the parameter space | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/runners/noodles.py#L76-L94 |
benvanwerkhoven/kernel_tuner | kernel_tuner/runners/noodles.py | NoodlesRunner._run_chunk | def _run_chunk(self, chunk, kernel_options, device_options, tuning_options):
"""Benchmark a single kernel instance in the parameter space"""
#detect language and create high-level device interface
self.dev = DeviceInterface(kernel_options.kernel_string, iterations=tuning_options.iterations, **device_options)
#move data to the GPU
gpu_args = self.dev.ready_argument_list(kernel_options.arguments)
results = []
for element in chunk:
params = dict(OrderedDict(zip(tuning_options.tune_params.keys(), element)))
try:
time = self.dev.compile_and_benchmark(gpu_args, params, kernel_options, tuning_options)
params['time'] = time
results.append(params)
except Exception:
params['time'] = None
results.append(params)
return results | python | def _run_chunk(self, chunk, kernel_options, device_options, tuning_options):
"""Benchmark a single kernel instance in the parameter space"""
#detect language and create high-level device interface
self.dev = DeviceInterface(kernel_options.kernel_string, iterations=tuning_options.iterations, **device_options)
#move data to the GPU
gpu_args = self.dev.ready_argument_list(kernel_options.arguments)
results = []
for element in chunk:
params = dict(OrderedDict(zip(tuning_options.tune_params.keys(), element)))
try:
time = self.dev.compile_and_benchmark(gpu_args, params, kernel_options, tuning_options)
params['time'] = time
results.append(params)
except Exception:
params['time'] = None
results.append(params)
return results | Benchmark a single kernel instance in the parameter space | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/runners/noodles.py#L99-L122 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/random_sample.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Tune a random sample of sample_fraction fraction in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
tune_params = tuning_options.tune_params
#compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
#check for search space restrictions
if tuning_options.restrictions is not None:
parameter_space = filter(lambda p: util.check_restrictions(tuning_options.restrictions, p,
tune_params.keys(),
tuning_options.verbose),
parameter_space)
#reduce parameter space to a random sample using sample_fraction
parameter_space = numpy.array(list(parameter_space))
size = len(parameter_space)
fraction = int(numpy.ceil(size * float(tuning_options.sample_fraction)))
sample_indices = numpy.random.choice(range(size), size=fraction, replace=False)
parameter_space = parameter_space[sample_indices]
#call the runner
results, env = runner.run(parameter_space, kernel_options, tuning_options)
return results, env | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Tune a random sample of sample_fraction fraction in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
tune_params = tuning_options.tune_params
#compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
#check for search space restrictions
if tuning_options.restrictions is not None:
parameter_space = filter(lambda p: util.check_restrictions(tuning_options.restrictions, p,
tune_params.keys(),
tuning_options.verbose),
parameter_space)
#reduce parameter space to a random sample using sample_fraction
parameter_space = numpy.array(list(parameter_space))
size = len(parameter_space)
fraction = int(numpy.ceil(size * float(tuning_options.sample_fraction)))
sample_indices = numpy.random.choice(range(size), size=fraction, replace=False)
parameter_space = parameter_space[sample_indices]
#call the runner
results, env = runner.run(parameter_space, kernel_options, tuning_options)
return results, env | Tune a random sample of sample_fraction fraction in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/random_sample.py#L9-L55 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | check_argument_type | def check_argument_type(dtype, kernel_argument, i):
"""check if the numpy.dtype matches the type used in the code"""
types_map = {"uint8": ["uchar", "unsigned char", "uint8_t"],
"int8": ["char", "int8_t"],
"uint16": ["ushort", "unsigned short", "uint16_t"],
"int16": ["short", "int16_t"],
"uint32": ["uint", "unsigned int", "uint32_t"],
"int32": ["int", "int32_t"], #discrepancy between OpenCL and C here, long may be 32bits in C
"uint64": ["ulong", "unsigned long", "uint64_t"],
"int64": ["long", "int64_t"],
"float16": ["half"],
"float32": ["float"],
"float64": ["double"]}
if dtype in types_map:
return any([substr in kernel_argument for substr in types_map[dtype]])
else:
return False | python | def check_argument_type(dtype, kernel_argument, i):
"""check if the numpy.dtype matches the type used in the code"""
types_map = {"uint8": ["uchar", "unsigned char", "uint8_t"],
"int8": ["char", "int8_t"],
"uint16": ["ushort", "unsigned short", "uint16_t"],
"int16": ["short", "int16_t"],
"uint32": ["uint", "unsigned int", "uint32_t"],
"int32": ["int", "int32_t"], #discrepancy between OpenCL and C here, long may be 32bits in C
"uint64": ["ulong", "unsigned long", "uint64_t"],
"int64": ["long", "int64_t"],
"float16": ["half"],
"float32": ["float"],
"float64": ["double"]}
if dtype in types_map:
return any([substr in kernel_argument for substr in types_map[dtype]])
else:
return False | check if the numpy.dtype matches the type used in the code | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L16-L32 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | check_argument_list | def check_argument_list(kernel_name, kernel_string, args):
""" raise an exception if a kernel arguments do not match host arguments """
kernel_arguments = list()
collected_errors = list()
for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string):
kernel_start = iterator.end()
kernel_end = kernel_string.find(")", kernel_start)
if kernel_start != 0:
kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(","))
for arguments_set, arguments in enumerate(kernel_arguments):
collected_errors.append(list())
if len(arguments) != len(args):
collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.")
continue
for (i, arg) in enumerate(args):
kernel_argument = arguments[i]
if not isinstance(arg, (numpy.ndarray, numpy.generic)):
raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar")
correct = True
if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument:
correct = False #array is passed to non-pointer kernel argument
if correct and check_argument_type(str(arg.dtype), kernel_argument, i):
continue
collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) +
" does not match " + kernel_argument + ".")
if not collected_errors[arguments_set]:
# We assume that if there is a possible list of arguments that matches with the provided one
# it is the right one
return
for errors in collected_errors:
warnings.warn(errors[0], UserWarning) | python | def check_argument_list(kernel_name, kernel_string, args):
""" raise an exception if a kernel arguments do not match host arguments """
kernel_arguments = list()
collected_errors = list()
for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string):
kernel_start = iterator.end()
kernel_end = kernel_string.find(")", kernel_start)
if kernel_start != 0:
kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(","))
for arguments_set, arguments in enumerate(kernel_arguments):
collected_errors.append(list())
if len(arguments) != len(args):
collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.")
continue
for (i, arg) in enumerate(args):
kernel_argument = arguments[i]
if not isinstance(arg, (numpy.ndarray, numpy.generic)):
raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar")
correct = True
if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument:
correct = False #array is passed to non-pointer kernel argument
if correct and check_argument_type(str(arg.dtype), kernel_argument, i):
continue
collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) +
" does not match " + kernel_argument + ".")
if not collected_errors[arguments_set]:
# We assume that if there is a possible list of arguments that matches with the provided one
# it is the right one
return
for errors in collected_errors:
warnings.warn(errors[0], UserWarning) | raise an exception if a kernel arguments do not match host arguments | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L34-L68 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | check_tune_params_list | def check_tune_params_list(tune_params):
""" raise an exception if a tune parameter has a forbidden name """
forbidden_names = ("grid_size_x", "grid_size_y", "grid_size_z")
forbidden_name_substr = ("time", "times")
for name, param in tune_params.items():
if name in forbidden_names:
raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name!")
for forbidden_substr in forbidden_name_substr:
if forbidden_substr in name:
raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name: not allowed to use " + forbidden_substr + " in tune parameter names!") | python | def check_tune_params_list(tune_params):
""" raise an exception if a tune parameter has a forbidden name """
forbidden_names = ("grid_size_x", "grid_size_y", "grid_size_z")
forbidden_name_substr = ("time", "times")
for name, param in tune_params.items():
if name in forbidden_names:
raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name!")
for forbidden_substr in forbidden_name_substr:
if forbidden_substr in name:
raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name: not allowed to use " + forbidden_substr + " in tune parameter names!") | raise an exception if a tune parameter has a forbidden name | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L71-L80 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | check_restrictions | def check_restrictions(restrictions, element, keys, verbose):
""" check whether a specific instance meets the search space restrictions """
params = OrderedDict(zip(keys, element))
for restrict in restrictions:
if not eval(replace_param_occurrences(restrict, params)):
if verbose:
print("skipping config", get_instance_string(params), "reason: config fails restriction")
return False
return True | python | def check_restrictions(restrictions, element, keys, verbose):
""" check whether a specific instance meets the search space restrictions """
params = OrderedDict(zip(keys, element))
for restrict in restrictions:
if not eval(replace_param_occurrences(restrict, params)):
if verbose:
print("skipping config", get_instance_string(params), "reason: config fails restriction")
return False
return True | check whether a specific instance meets the search space restrictions | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L106-L114 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | detect_language | def detect_language(lang, kernel_source):
"""attempt to detect language from the kernel_string if not specified"""
if lang is None:
if callable(kernel_source):
raise TypeError("Please specify language when using a code generator function")
kernel_string = get_kernel_string(kernel_source)
if "__global__" in kernel_string:
lang = "CUDA"
elif "__kernel" in kernel_string:
lang = "OpenCL"
else:
lang = "C"
return lang | python | def detect_language(lang, kernel_source):
"""attempt to detect language from the kernel_string if not specified"""
if lang is None:
if callable(kernel_source):
raise TypeError("Please specify language when using a code generator function")
kernel_string = get_kernel_string(kernel_source)
if "__global__" in kernel_string:
lang = "CUDA"
elif "__kernel" in kernel_string:
lang = "OpenCL"
else:
lang = "C"
return lang | attempt to detect language from the kernel_string if not specified | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L124-L136 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | get_config_string | def get_config_string(params, units=None):
""" return a compact string representation of a dictionary """
compact_str_items = []
# first make a list of compact strings for each parameter
for k, v in params.items():
unit = ""
if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors
unit = units.get(k, "")
compact_str_items.append(k + "=" + str(v) + unit)
# and finally join them
compact_str = ", ".join(compact_str_items)
return compact_str | python | def get_config_string(params, units=None):
""" return a compact string representation of a dictionary """
compact_str_items = []
# first make a list of compact strings for each parameter
for k, v in params.items():
unit = ""
if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors
unit = units.get(k, "")
compact_str_items.append(k + "=" + str(v) + unit)
# and finally join them
compact_str = ", ".join(compact_str_items)
return compact_str | return a compact string representation of a dictionary | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L139-L150 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | get_grid_dimensions | def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names):
"""compute grid dims based on problem sizes and listed grid divisors"""
def get_dimension_divisor(divisor_list, default, params):
if divisor_list is None:
if default in params:
divisor_list = [default]
else:
return 1
return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list])
divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)]
return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors)) | python | def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names):
"""compute grid dims based on problem sizes and listed grid divisors"""
def get_dimension_divisor(divisor_list, default, params):
if divisor_list is None:
if default in params:
divisor_list = [default]
else:
return 1
return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list])
divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)]
return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors)) | compute grid dims based on problem sizes and listed grid divisors | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L153-L163 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | get_kernel_string | def get_kernel_string(kernel_source, params=None):
""" retrieve the kernel source and return as a string
This function processes the passed kernel_source argument, which could be
a function, a string with a filename, or just a string with code already.
If kernel_source is a function, the function is called with instance
parameters in 'params' as the only argument.
If kernel_source looks like filename, the file is read in, but if
the file does not exist, it is assumed that the string is not a filename
after all.
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param params: Dictionary containing the tunable parameters for this specific
kernel instance, only needed when kernel_source is a generator.
:type param: dict
:returns: A string containing the kernel code.
:rtype: string
"""
#logging.debug('get_kernel_string called with %s', str(kernel_source))
logging.debug('get_kernel_string called')
kernel_string = None
if callable(kernel_source):
kernel_string = kernel_source(params)
elif isinstance(kernel_source, str):
if looks_like_a_filename(kernel_source):
kernel_string = read_file(kernel_source) or kernel_source
else:
kernel_string = kernel_source
else:
raise TypeError("Error kernel_source is not a string nor a callable function")
return kernel_string | python | def get_kernel_string(kernel_source, params=None):
""" retrieve the kernel source and return as a string
This function processes the passed kernel_source argument, which could be
a function, a string with a filename, or just a string with code already.
If kernel_source is a function, the function is called with instance
parameters in 'params' as the only argument.
If kernel_source looks like filename, the file is read in, but if
the file does not exist, it is assumed that the string is not a filename
after all.
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param params: Dictionary containing the tunable parameters for this specific
kernel instance, only needed when kernel_source is a generator.
:type param: dict
:returns: A string containing the kernel code.
:rtype: string
"""
#logging.debug('get_kernel_string called with %s', str(kernel_source))
logging.debug('get_kernel_string called')
kernel_string = None
if callable(kernel_source):
kernel_string = kernel_source(params)
elif isinstance(kernel_source, str):
if looks_like_a_filename(kernel_source):
kernel_string = read_file(kernel_source) or kernel_source
else:
kernel_string = kernel_source
else:
raise TypeError("Error kernel_source is not a string nor a callable function")
return kernel_string | retrieve the kernel source and return as a string
This function processes the passed kernel_source argument, which could be
a function, a string with a filename, or just a string with code already.
If kernel_source is a function, the function is called with instance
parameters in 'params' as the only argument.
If kernel_source looks like filename, the file is read in, but if
the file does not exist, it is assumed that the string is not a filename
after all.
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param params: Dictionary containing the tunable parameters for this specific
kernel instance, only needed when kernel_source is a generator.
:type param: dict
:returns: A string containing the kernel code.
:rtype: string | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L171-L209 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | get_problem_size | def get_problem_size(problem_size, params):
"""compute current problem size"""
if isinstance(problem_size, (str, int, numpy.integer)):
problem_size = (problem_size, )
current_problem_size = [1, 1, 1]
for i, s in enumerate(problem_size):
if isinstance(s, str):
current_problem_size[i] = int(eval(replace_param_occurrences(s, params)))
elif isinstance(s, (int, numpy.integer)):
current_problem_size[i] = s
else:
raise TypeError("Error: problem_size should only contain strings or integers")
return current_problem_size | python | def get_problem_size(problem_size, params):
"""compute current problem size"""
if isinstance(problem_size, (str, int, numpy.integer)):
problem_size = (problem_size, )
current_problem_size = [1, 1, 1]
for i, s in enumerate(problem_size):
if isinstance(s, str):
current_problem_size[i] = int(eval(replace_param_occurrences(s, params)))
elif isinstance(s, (int, numpy.integer)):
current_problem_size[i] = s
else:
raise TypeError("Error: problem_size should only contain strings or integers")
return current_problem_size | compute current problem size | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L211-L223 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | get_temp_filename | def get_temp_filename(suffix=None):
""" return a string in the form of temp_X, where X is a large integer """
file = tempfile.mkstemp(suffix=suffix or "", prefix="temp_", dir=os.getcwd()) # or "" for Python 2 compatibility
os.close(file[0])
return file[1] | python | def get_temp_filename(suffix=None):
""" return a string in the form of temp_X, where X is a large integer """
file = tempfile.mkstemp(suffix=suffix or "", prefix="temp_", dir=os.getcwd()) # or "" for Python 2 compatibility
os.close(file[0])
return file[1] | return a string in the form of temp_X, where X is a large integer | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L225-L229 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | get_thread_block_dimensions | def get_thread_block_dimensions(params, block_size_names=None):
"""thread block size from tuning params, currently using convention"""
if not block_size_names:
block_size_names = default_block_size_names
block_size_x = params.get(block_size_names[0], 256)
block_size_y = params.get(block_size_names[1], 1)
block_size_z = params.get(block_size_names[2], 1)
return (int(block_size_x), int(block_size_y), int(block_size_z)) | python | def get_thread_block_dimensions(params, block_size_names=None):
"""thread block size from tuning params, currently using convention"""
if not block_size_names:
block_size_names = default_block_size_names
block_size_x = params.get(block_size_names[0], 256)
block_size_y = params.get(block_size_names[1], 1)
block_size_z = params.get(block_size_names[2], 1)
return (int(block_size_x), int(block_size_y), int(block_size_z)) | thread block size from tuning params, currently using convention | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L231-L239 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | looks_like_a_filename | def looks_like_a_filename(kernel_source):
""" attempt to detect whether source code or a filename was passed """
logging.debug('looks_like_a_filename called')
result = False
if isinstance(kernel_source, str):
result = True
#test if not too long
if len(kernel_source) > 250:
result = False
#test if not contains special characters
for c in "();{}\\":
if c in kernel_source:
result = False
#just a safeguard for stuff that looks like code
for s in ["__global__ ", "__kernel ", "void ", "float "]:
if s in kernel_source:
result = False
#string must contain substring ".c", ".opencl", or ".F"
result = result and any([s in kernel_source for s in (".c", ".opencl", ".F")])
logging.debug('kernel_source is a filename: %s' % str(result))
return result | python | def looks_like_a_filename(kernel_source):
""" attempt to detect whether source code or a filename was passed """
logging.debug('looks_like_a_filename called')
result = False
if isinstance(kernel_source, str):
result = True
#test if not too long
if len(kernel_source) > 250:
result = False
#test if not contains special characters
for c in "();{}\\":
if c in kernel_source:
result = False
#just a safeguard for stuff that looks like code
for s in ["__global__ ", "__kernel ", "void ", "float "]:
if s in kernel_source:
result = False
#string must contain substring ".c", ".opencl", or ".F"
result = result and any([s in kernel_source for s in (".c", ".opencl", ".F")])
logging.debug('kernel_source is a filename: %s' % str(result))
return result | attempt to detect whether source code or a filename was passed | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L241-L261 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | prepare_kernel_string | def prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names):
""" prepare kernel string for compilation
Prepends the kernel with a series of C preprocessor defines specific
to this kernel instance:
* the thread block dimensions
* the grid dimensions
* tunable parameters
Additionally the name of kernel is replace with an instance specific name. This
is done to prevent that the kernel compilation could be skipped by PyCUDA and/or PyOpenCL,
which may use caching to save compilation time. This feature could lead to strange bugs
in the source code if the name of the kernel is also used for other stuff.
:param kernel_name: Name of the kernel.
:type kernel_name: string
:param kernel_string: One of the source files of the kernel as a string containing code.
:type kernel_string: string
:param params: A dictionary containing the tunable parameters specific to this instance.
:type params: dict
:param grid: A tuple with the grid dimensions for this specific instance.
:type grid: tuple(x,y,z)
:param threads: A tuple with the thread block dimensions for this specific instance.
:type threads: tuple(x,y,z)
:param block_size_names: A tuple with the names of the thread block dimensions used
in the code. By default this is ["block_size_x", ...], but the user
may supply different names if they prefer.
:type block_size_names: tuple(string)
:returns: A string containing the source code made specific to this kernel instance.
:rtype: string
"""
logging.debug('prepare_kernel_string called for %s', kernel_name)
grid_dim_names = ["grid_size_x", "grid_size_y", "grid_size_z"]
for i, g in enumerate(grid):
kernel_string = "#define " + grid_dim_names[i] + " " + str(g) + "\n" + kernel_string
for i, g in enumerate(threads):
kernel_string = "#define " + block_size_names[i] + " " + str(g) + "\n" + kernel_string
for k, v in params.items():
if k not in block_size_names:
kernel_string = "#define " + k + " " + str(v) + "\n" + kernel_string
name = kernel_name
#name = kernel_name + "_" + get_instance_string(params)
#kernel_string = kernel_string.replace(kernel_name, name)
return name, kernel_string | python | def prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names):
""" prepare kernel string for compilation
Prepends the kernel with a series of C preprocessor defines specific
to this kernel instance:
* the thread block dimensions
* the grid dimensions
* tunable parameters
Additionally the name of kernel is replace with an instance specific name. This
is done to prevent that the kernel compilation could be skipped by PyCUDA and/or PyOpenCL,
which may use caching to save compilation time. This feature could lead to strange bugs
in the source code if the name of the kernel is also used for other stuff.
:param kernel_name: Name of the kernel.
:type kernel_name: string
:param kernel_string: One of the source files of the kernel as a string containing code.
:type kernel_string: string
:param params: A dictionary containing the tunable parameters specific to this instance.
:type params: dict
:param grid: A tuple with the grid dimensions for this specific instance.
:type grid: tuple(x,y,z)
:param threads: A tuple with the thread block dimensions for this specific instance.
:type threads: tuple(x,y,z)
:param block_size_names: A tuple with the names of the thread block dimensions used
in the code. By default this is ["block_size_x", ...], but the user
may supply different names if they prefer.
:type block_size_names: tuple(string)
:returns: A string containing the source code made specific to this kernel instance.
:rtype: string
"""
logging.debug('prepare_kernel_string called for %s', kernel_name)
grid_dim_names = ["grid_size_x", "grid_size_y", "grid_size_z"]
for i, g in enumerate(grid):
kernel_string = "#define " + grid_dim_names[i] + " " + str(g) + "\n" + kernel_string
for i, g in enumerate(threads):
kernel_string = "#define " + block_size_names[i] + " " + str(g) + "\n" + kernel_string
for k, v in params.items():
if k not in block_size_names:
kernel_string = "#define " + k + " " + str(v) + "\n" + kernel_string
name = kernel_name
#name = kernel_name + "_" + get_instance_string(params)
#kernel_string = kernel_string.replace(kernel_name, name)
return name, kernel_string | prepare kernel string for compilation
Prepends the kernel with a series of C preprocessor defines specific
to this kernel instance:
* the thread block dimensions
* the grid dimensions
* tunable parameters
Additionally the name of kernel is replace with an instance specific name. This
is done to prevent that the kernel compilation could be skipped by PyCUDA and/or PyOpenCL,
which may use caching to save compilation time. This feature could lead to strange bugs
in the source code if the name of the kernel is also used for other stuff.
:param kernel_name: Name of the kernel.
:type kernel_name: string
:param kernel_string: One of the source files of the kernel as a string containing code.
:type kernel_string: string
:param params: A dictionary containing the tunable parameters specific to this instance.
:type params: dict
:param grid: A tuple with the grid dimensions for this specific instance.
:type grid: tuple(x,y,z)
:param threads: A tuple with the thread block dimensions for this specific instance.
:type threads: tuple(x,y,z)
:param block_size_names: A tuple with the names of the thread block dimensions used
in the code. By default this is ["block_size_x", ...], but the user
may supply different names if they prefer.
:type block_size_names: tuple(string)
:returns: A string containing the source code made specific to this kernel instance.
:rtype: string | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L263-L315 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | prepare_list_of_files | def prepare_list_of_files(kernel_name, kernel_file_list, params, grid, threads, block_size_names):
""" prepare the kernel string along with any additional files
The first file in the list is allowed to include or read in the others
The files beyond the first are considered additional files that may also contain tunable parameters
For each file beyond the first this function creates a temporary file with
preprocessors statements inserted. Occurences of the original filenames in the
first file are replaced with their temporary counterparts.
:param kernel_file_list: A list of filenames. The first file in the list is
allowed to read or include the other files in the list. All files may
will have access to the tunable parameters.
:type kernel_file_list: list(string)
:param params: A dictionary with the tunable parameters for this particular
instance.
:type params: dict()
:param grid: The grid dimensions for this instance. The grid dimensions are
also inserted into the code as if they are tunable parameters for
convenience.
:type grid: tuple()
"""
temp_files = dict()
kernel_string = get_kernel_string(kernel_file_list[0], params)
name, kernel_string = prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names)
if len(kernel_file_list) > 1:
for f in kernel_file_list[1:]:
#generate temp filename with the same extension
temp_file = get_temp_filename(suffix="." + f.split(".")[-1])
temp_files[f] = temp_file
#add preprocessor statements to the additional file
_, temp_file_string = prepare_kernel_string(kernel_name, get_kernel_string(f, params), params, grid, threads, block_size_names)
write_file(temp_file, temp_file_string)
#replace occurences of the additional file's name in the first kernel_string with the name of the temp file
kernel_string = kernel_string.replace(f, temp_file)
return name, kernel_string, temp_files | python | def prepare_list_of_files(kernel_name, kernel_file_list, params, grid, threads, block_size_names):
""" prepare the kernel string along with any additional files
The first file in the list is allowed to include or read in the others
The files beyond the first are considered additional files that may also contain tunable parameters
For each file beyond the first this function creates a temporary file with
preprocessors statements inserted. Occurences of the original filenames in the
first file are replaced with their temporary counterparts.
:param kernel_file_list: A list of filenames. The first file in the list is
allowed to read or include the other files in the list. All files may
will have access to the tunable parameters.
:type kernel_file_list: list(string)
:param params: A dictionary with the tunable parameters for this particular
instance.
:type params: dict()
:param grid: The grid dimensions for this instance. The grid dimensions are
also inserted into the code as if they are tunable parameters for
convenience.
:type grid: tuple()
"""
temp_files = dict()
kernel_string = get_kernel_string(kernel_file_list[0], params)
name, kernel_string = prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names)
if len(kernel_file_list) > 1:
for f in kernel_file_list[1:]:
#generate temp filename with the same extension
temp_file = get_temp_filename(suffix="." + f.split(".")[-1])
temp_files[f] = temp_file
#add preprocessor statements to the additional file
_, temp_file_string = prepare_kernel_string(kernel_name, get_kernel_string(f, params), params, grid, threads, block_size_names)
write_file(temp_file, temp_file_string)
#replace occurences of the additional file's name in the first kernel_string with the name of the temp file
kernel_string = kernel_string.replace(f, temp_file)
return name, kernel_string, temp_files | prepare the kernel string along with any additional files
The first file in the list is allowed to include or read in the others
The files beyond the first are considered additional files that may also contain tunable parameters
For each file beyond the first this function creates a temporary file with
preprocessors statements inserted. Occurences of the original filenames in the
first file are replaced with their temporary counterparts.
:param kernel_file_list: A list of filenames. The first file in the list is
allowed to read or include the other files in the list. All files may
will have access to the tunable parameters.
:type kernel_file_list: list(string)
:param params: A dictionary with the tunable parameters for this particular
instance.
:type params: dict()
:param grid: The grid dimensions for this instance. The grid dimensions are
also inserted into the code as if they are tunable parameters for
convenience.
:type grid: tuple() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L317-L358 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | read_file | def read_file(filename):
""" return the contents of the file named filename or None if file not found """
if os.path.isfile(filename):
with open(filename, 'r') as f:
return f.read() | python | def read_file(filename):
""" return the contents of the file named filename or None if file not found """
if os.path.isfile(filename):
with open(filename, 'r') as f:
return f.read() | return the contents of the file named filename or None if file not found | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L360-L364 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | replace_param_occurrences | def replace_param_occurrences(string, params):
"""replace occurrences of the tuning params with their current value"""
for k, v in params.items():
string = string.replace(k, str(v))
return string | python | def replace_param_occurrences(string, params):
"""replace occurrences of the tuning params with their current value"""
for k, v in params.items():
string = string.replace(k, str(v))
return string | replace occurrences of the tuning params with their current value | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L366-L370 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | setup_block_and_grid | def setup_block_and_grid(problem_size, grid_div, params, block_size_names=None):
"""compute problem size, thread block and grid dimensions for this kernel"""
threads = get_thread_block_dimensions(params, block_size_names)
current_problem_size = get_problem_size(problem_size, params)
grid = get_grid_dimensions(current_problem_size, params, grid_div, block_size_names)
return threads, grid | python | def setup_block_and_grid(problem_size, grid_div, params, block_size_names=None):
"""compute problem size, thread block and grid dimensions for this kernel"""
threads = get_thread_block_dimensions(params, block_size_names)
current_problem_size = get_problem_size(problem_size, params)
grid = get_grid_dimensions(current_problem_size, params, grid_div, block_size_names)
return threads, grid | compute problem size, thread block and grid dimensions for this kernel | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L372-L377 |
benvanwerkhoven/kernel_tuner | kernel_tuner/util.py | write_file | def write_file(filename, string):
"""dump the contents of string to a file called filename"""
import sys
#ugly fix, hopefully we can find a better one
if sys.version_info[0] >= 3:
with open(filename, 'w', encoding="utf-8") as f:
f.write(string)
else:
with open(filename, 'w') as f:
f.write(string.encode("utf-8")) | python | def write_file(filename, string):
"""dump the contents of string to a file called filename"""
import sys
#ugly fix, hopefully we can find a better one
if sys.version_info[0] >= 3:
with open(filename, 'w', encoding="utf-8") as f:
f.write(string)
else:
with open(filename, 'w') as f:
f.write(string.encode("utf-8")) | dump the contents of string to a file called filename | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L379-L388 |
benvanwerkhoven/kernel_tuner | kernel_tuner/opencl.py | OpenCLFunctions.ready_argument_list | def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the OpenCL kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an OpenCL kernel.
:rtype: list( pyopencl.Buffer, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, numpy.ndarray):
gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg))
else: # if not an array, just pass argument along
gpu_args.append(arg)
return gpu_args | python | def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the OpenCL kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an OpenCL kernel.
:rtype: list( pyopencl.Buffer, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, numpy.ndarray):
gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg))
else: # if not an array, just pass argument along
gpu_args.append(arg)
return gpu_args | ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the OpenCL kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an OpenCL kernel.
:rtype: list( pyopencl.Buffer, numpy.int32, ... ) | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/opencl.py#L52-L70 |
benvanwerkhoven/kernel_tuner | kernel_tuner/opencl.py | OpenCLFunctions.compile | def compile(self, kernel_name, kernel_string):
"""call the OpenCL compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The OpenCL kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An OpenCL kernel that can be called directly.
:rtype: pyopencl.Kernel
"""
prg = cl.Program(self.ctx, kernel_string).build(options=self.compiler_options)
func = getattr(prg, kernel_name)
return func | python | def compile(self, kernel_name, kernel_string):
"""call the OpenCL compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The OpenCL kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An OpenCL kernel that can be called directly.
:rtype: pyopencl.Kernel
"""
prg = cl.Program(self.ctx, kernel_string).build(options=self.compiler_options)
func = getattr(prg, kernel_name)
return func | call the OpenCL compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The OpenCL kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An OpenCL kernel that can be called directly.
:rtype: pyopencl.Kernel | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/opencl.py#L72-L87 |
benvanwerkhoven/kernel_tuner | kernel_tuner/opencl.py | OpenCLFunctions.benchmark | def benchmark(self, func, gpu_args, threads, grid, times):
"""runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of OpenCLFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyOpenCL kernel compiled for this specific kernel configuration
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
time = []
for _ in range(self.iterations):
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait()
time.append((event.profile.end - event.profile.start)*1e-6)
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | python | def benchmark(self, func, gpu_args, threads, grid, times):
"""runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of OpenCLFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyOpenCL kernel compiled for this specific kernel configuration
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
time = []
for _ in range(self.iterations):
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait()
time.append((event.profile.end - event.profile.start)*1e-6)
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of OpenCLFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyOpenCL kernel compiled for this specific kernel configuration
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/opencl.py#L89-L136 |
benvanwerkhoven/kernel_tuner | kernel_tuner/opencl.py | OpenCLFunctions.run_kernel | def run_kernel(self, func, gpu_args, threads, grid):
"""runs the OpenCL kernel passed as 'func'
:param func: An OpenCL Kernel
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait() | python | def run_kernel(self, func, gpu_args, threads, grid):
"""runs the OpenCL kernel passed as 'func'
:param func: An OpenCL Kernel
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait() | runs the OpenCL kernel passed as 'func'
:param func: An OpenCL Kernel
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int) | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/opencl.py#L138-L160 |
benvanwerkhoven/kernel_tuner | kernel_tuner/opencl.py | OpenCLFunctions.memset | def memset(self, buffer, value, size):
"""set the memory in allocation to the value in value
:param allocation: An OpenCL Buffer to fill
:type allocation: pyopencl.Buffer
:param value: The value to set the memory to
:type value: a single 32-bit int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
if isinstance(buffer, cl.Buffer):
try:
cl.enqueue_fill_buffer(self.queue, buffer, numpy.uint32(value), 0, size)
except AttributeError:
src=numpy.zeros(size, dtype='uint8')+numpy.uint8(value)
cl.enqueue_copy(self.queue, buffer, src) | python | def memset(self, buffer, value, size):
"""set the memory in allocation to the value in value
:param allocation: An OpenCL Buffer to fill
:type allocation: pyopencl.Buffer
:param value: The value to set the memory to
:type value: a single 32-bit int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
if isinstance(buffer, cl.Buffer):
try:
cl.enqueue_fill_buffer(self.queue, buffer, numpy.uint32(value), 0, size)
except AttributeError:
src=numpy.zeros(size, dtype='uint8')+numpy.uint8(value)
cl.enqueue_copy(self.queue, buffer, src) | set the memory in allocation to the value in value
:param allocation: An OpenCL Buffer to fill
:type allocation: pyopencl.Buffer
:param value: The value to set the memory to
:type value: a single 32-bit int
:param size: The size of to the allocation unit in bytes
:type size: int | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/opencl.py#L162-L180 |
benvanwerkhoven/kernel_tuner | kernel_tuner/opencl.py | OpenCLFunctions.memcpy_dtoh | def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: An OpenCL Buffer to copy data from
:type src: pyopencl.Buffer
"""
if isinstance(src, cl.Buffer):
cl.enqueue_copy(self.queue, dest, src) | python | def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: An OpenCL Buffer to copy data from
:type src: pyopencl.Buffer
"""
if isinstance(src, cl.Buffer):
cl.enqueue_copy(self.queue, dest, src) | perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: An OpenCL Buffer to copy data from
:type src: pyopencl.Buffer | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/opencl.py#L182-L192 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/diff_evo.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
tuning_options["scaling"] = False
#build a bounds array as needed for the optimizer
bounds = get_bounds(tuning_options.tune_params)
args = (kernel_options, tuning_options, runner, results, cache)
#call the differential evolution optimizer
opt_result = differential_evolution(_cost_func, bounds, args, maxiter=1,
polish=False, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
tuning_options["scaling"] = False
#build a bounds array as needed for the optimizer
bounds = get_bounds(tuning_options.tune_params)
args = (kernel_options, tuning_options, runner, results, cache)
#call the differential evolution optimizer
opt_result = differential_evolution(_cost_func, bounds, args, maxiter=1,
polish=False, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/diff_evo.py#L8-L48 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/pso.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
#scale variables in x because PSO works with velocities to visit different configurations
tuning_options["scaling"] = True
#using this instead of get_bounds because scaling is used
bounds, _, _ = get_bounds_x0_eps(tuning_options)
args = (kernel_options, tuning_options, runner, results, cache)
num_particles = 20
maxiter = 100
best_time_global = 1e20
best_position_global = []
# init particle swarm
swarm = []
for i in range(0, num_particles):
swarm.append(Particle(bounds, args))
for i in range(maxiter):
if tuning_options.verbose:
print("start iteration ", i, "best time global", best_time_global)
# evaluate particle positions
for j in range(num_particles):
swarm[j].evaluate(_cost_func)
# update global best if needed
if swarm[j].time <= best_time_global:
best_position_global = swarm[j].position
best_time_global = swarm[j].time
# update particle velocities and positions
for j in range(0, num_particles):
swarm[j].update_velocity(best_position_global)
swarm[j].update_position(bounds)
if tuning_options.verbose:
print('Final result:')
print(best_position_global)
print(best_time_global)
return results, runner.dev.get_environment() | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
#scale variables in x because PSO works with velocities to visit different configurations
tuning_options["scaling"] = True
#using this instead of get_bounds because scaling is used
bounds, _, _ = get_bounds_x0_eps(tuning_options)
args = (kernel_options, tuning_options, runner, results, cache)
num_particles = 20
maxiter = 100
best_time_global = 1e20
best_position_global = []
# init particle swarm
swarm = []
for i in range(0, num_particles):
swarm.append(Particle(bounds, args))
for i in range(maxiter):
if tuning_options.verbose:
print("start iteration ", i, "best time global", best_time_global)
# evaluate particle positions
for j in range(num_particles):
swarm[j].evaluate(_cost_func)
# update global best if needed
if swarm[j].time <= best_time_global:
best_position_global = swarm[j].position
best_time_global = swarm[j].time
# update particle velocities and positions
for j in range(0, num_particles):
swarm[j].update_velocity(best_position_global)
swarm[j].update_position(bounds)
if tuning_options.verbose:
print('Final result:')
print(best_position_global)
print(best_time_global)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/pso.py#L10-L79 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/genetic_algorithm.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
dna_size = len(tuning_options.tune_params.keys())
pop_size = 20
generations = 100
tuning_options["scaling"] = False
tune_params = tuning_options.tune_params
population = random_population(dna_size, pop_size, tune_params)
best_time = 1e20
all_results = []
cache = {}
for generation in range(generations):
if tuning_options.verbose:
print("Generation %d, best_time %f" % (generation, best_time))
#determine fitness of population members
weighted_population = []
for dna in population:
time = _cost_func(dna, kernel_options, tuning_options, runner, all_results, cache)
weighted_population.append((dna, time))
population = []
#'best_time' is used only for printing
if tuning_options.verbose and all_results:
best_time = min(all_results, key=lambda x: x["time"])["time"]
#population is sorted such that better configs have higher chance of reproducing
weighted_population.sort(key=lambda x: x[1])
#crossover and mutate
for _ in range(pop_size//2):
ind1 = weighted_choice(weighted_population)
ind2 = weighted_choice(weighted_population)
ind1, ind2 = crossover(ind1, ind2)
population.append(mutate(ind1, dna_size, tune_params))
population.append(mutate(ind2, dna_size, tune_params))
return all_results, runner.dev.get_environment() | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
dna_size = len(tuning_options.tune_params.keys())
pop_size = 20
generations = 100
tuning_options["scaling"] = False
tune_params = tuning_options.tune_params
population = random_population(dna_size, pop_size, tune_params)
best_time = 1e20
all_results = []
cache = {}
for generation in range(generations):
if tuning_options.verbose:
print("Generation %d, best_time %f" % (generation, best_time))
#determine fitness of population members
weighted_population = []
for dna in population:
time = _cost_func(dna, kernel_options, tuning_options, runner, all_results, cache)
weighted_population.append((dna, time))
population = []
#'best_time' is used only for printing
if tuning_options.verbose and all_results:
best_time = min(all_results, key=lambda x: x["time"])["time"]
#population is sorted such that better configs have higher chance of reproducing
weighted_population.sort(key=lambda x: x[1])
#crossover and mutate
for _ in range(pop_size//2):
ind1 = weighted_choice(weighted_population)
ind2 = weighted_choice(weighted_population)
ind1, ind2 = crossover(ind1, ind2)
population.append(mutate(ind1, dna_size, tune_params))
population.append(mutate(ind2, dna_size, tune_params))
return all_results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/genetic_algorithm.py#L8-L73 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/genetic_algorithm.py | weighted_choice | def weighted_choice(population):
"""Randomly select, fitness determines probability of being selected"""
random_number = random.betavariate(1, 2.5) #increased probability of selecting members early in the list
#random_number = random.random()
ind = int(random_number*len(population))
ind = min(max(ind, 0), len(population)-1)
return population[ind][0] | python | def weighted_choice(population):
"""Randomly select, fitness determines probability of being selected"""
random_number = random.betavariate(1, 2.5) #increased probability of selecting members early in the list
#random_number = random.random()
ind = int(random_number*len(population))
ind = min(max(ind, 0), len(population)-1)
return population[ind][0] | Randomly select, fitness determines probability of being selected | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/genetic_algorithm.py#L77-L83 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/genetic_algorithm.py | random_population | def random_population(dna_size, pop_size, tune_params):
"""create a random population"""
population = []
for _ in range(pop_size):
dna = []
for i in range(dna_size):
dna.append(random_val(i, tune_params))
population.append(dna)
return population | python | def random_population(dna_size, pop_size, tune_params):
"""create a random population"""
population = []
for _ in range(pop_size):
dna = []
for i in range(dna_size):
dna.append(random_val(i, tune_params))
population.append(dna)
return population | create a random population | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/genetic_algorithm.py#L85-L93 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/genetic_algorithm.py | random_val | def random_val(index, tune_params):
"""return a random value for a parameter"""
key = list(tune_params.keys())[index]
return random.choice(tune_params[key]) | python | def random_val(index, tune_params):
"""return a random value for a parameter"""
key = list(tune_params.keys())[index]
return random.choice(tune_params[key]) | return a random value for a parameter | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/genetic_algorithm.py#L95-L98 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/genetic_algorithm.py | mutate | def mutate(dna, dna_size, tune_params):
"""Mutate DNA with 1/mutation_chance chance"""
dna_out = []
mutation_chance = 10
for i in range(dna_size):
if int(random.random()*mutation_chance) == 1:
dna_out.append(random_val(i, tune_params))
else:
dna_out.append(dna[i])
return dna_out | python | def mutate(dna, dna_size, tune_params):
"""Mutate DNA with 1/mutation_chance chance"""
dna_out = []
mutation_chance = 10
for i in range(dna_size):
if int(random.random()*mutation_chance) == 1:
dna_out.append(random_val(i, tune_params))
else:
dna_out.append(dna[i])
return dna_out | Mutate DNA with 1/mutation_chance chance | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/genetic_algorithm.py#L100-L109 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/genetic_algorithm.py | crossover | def crossover(dna1, dna2):
"""crossover dna1 and dna2 at a random index"""
pos = int(random.random()*len(dna1))
if random.random() < 0.5:
return (dna1[:pos]+dna2[pos:], dna2[:pos]+dna1[pos:])
else:
return (dna2[:pos]+dna1[pos:], dna1[:pos]+dna2[pos:]) | python | def crossover(dna1, dna2):
"""crossover dna1 and dna2 at a random index"""
pos = int(random.random()*len(dna1))
if random.random() < 0.5:
return (dna1[:pos]+dna2[pos:], dna2[:pos]+dna1[pos:])
else:
return (dna2[:pos]+dna1[pos:], dna1[:pos]+dna2[pos:]) | crossover dna1 and dna2 at a random index | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/genetic_algorithm.py#L111-L117 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
method = tuning_options.method
#scale variables in x to make 'eps' relevant for multiple variables
tuning_options["scaling"] = True
bounds, x0, _ = get_bounds_x0_eps(tuning_options)
kwargs = setup_method_arguments(method, bounds)
options = setup_method_options(method, tuning_options)
#not all methods support 'disp' option
if not method in ['TNC']:
options['disp'] = tuning_options.verbose
args = (kernel_options, tuning_options, runner, results, cache)
opt_result = scipy.optimize.minimize(_cost_func, x0, args=args, method=method, options=options, **kwargs)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
method = tuning_options.method
#scale variables in x to make 'eps' relevant for multiple variables
tuning_options["scaling"] = True
bounds, x0, _ = get_bounds_x0_eps(tuning_options)
kwargs = setup_method_arguments(method, bounds)
options = setup_method_options(method, tuning_options)
#not all methods support 'disp' option
if not method in ['TNC']:
options['disp'] = tuning_options.verbose
args = (kernel_options, tuning_options, runner, results, cache)
opt_result = scipy.optimize.minimize(_cost_func, x0, args=args, method=method, options=options, **kwargs)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L10-L57 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | _cost_func | def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | python | def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | Cost function used by minimize | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L60-L103 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | get_bounds_x0_eps | def get_bounds_x0_eps(tuning_options):
"""compute bounds, x0 (the initial guess), and eps"""
values = tuning_options.tune_params.values()
if tuning_options.scaling:
#bounds = [(0, 1) for _ in values]
#x0 = [0.5 for _ in bounds]
eps = numpy.amin([1.0/len(v) for v in values])
#reducing interval from [0, 1] to [0, eps*len(v)]
bounds = [(0, eps*len(v)) for v in values]
x0 = [0.5*eps*len(v) for v in values]
else:
bounds = get_bounds(tuning_options.tune_params)
x0 = [(min_v+max_v)/2.0 for (min_v, max_v) in bounds]
eps = 1e9
for v_list in values:
vals = numpy.sort(v_list)
eps = min(eps, numpy.amin(numpy.gradient(vals)))
tuning_options["eps"] = eps
logging.debug('get_bounds_x0_eps called')
logging.debug('bounds ' + str(bounds))
logging.debug('x0 ' + str(x0))
logging.debug('eps ' + str(eps))
return bounds, x0, eps | python | def get_bounds_x0_eps(tuning_options):
"""compute bounds, x0 (the initial guess), and eps"""
values = tuning_options.tune_params.values()
if tuning_options.scaling:
#bounds = [(0, 1) for _ in values]
#x0 = [0.5 for _ in bounds]
eps = numpy.amin([1.0/len(v) for v in values])
#reducing interval from [0, 1] to [0, eps*len(v)]
bounds = [(0, eps*len(v)) for v in values]
x0 = [0.5*eps*len(v) for v in values]
else:
bounds = get_bounds(tuning_options.tune_params)
x0 = [(min_v+max_v)/2.0 for (min_v, max_v) in bounds]
eps = 1e9
for v_list in values:
vals = numpy.sort(v_list)
eps = min(eps, numpy.amin(numpy.gradient(vals)))
tuning_options["eps"] = eps
logging.debug('get_bounds_x0_eps called')
logging.debug('bounds ' + str(bounds))
logging.debug('x0 ' + str(x0))
logging.debug('eps ' + str(eps))
return bounds, x0, eps | compute bounds, x0 (the initial guess), and eps | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L107-L133 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | get_bounds | def get_bounds(tune_params):
""" create a bounds array from the tunable parameters """
bounds = []
for values in tune_params.values():
sorted_values = numpy.sort(values)
bounds.append((sorted_values[0], sorted_values[-1]))
return bounds | python | def get_bounds(tune_params):
""" create a bounds array from the tunable parameters """
bounds = []
for values in tune_params.values():
sorted_values = numpy.sort(values)
bounds.append((sorted_values[0], sorted_values[-1]))
return bounds | create a bounds array from the tunable parameters | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L136-L142 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | setup_method_options | def setup_method_options(method, tuning_options):
""" prepare method specific options """
kwargs = {}
#pass size of parameter space as max iterations to methods that support it
#it seems not all methods iterpret this value in the same manner
maxiter = numpy.prod([len(v) for v in tuning_options.tune_params.values()])
kwargs['maxiter'] = maxiter
if method in ["Nelder-Mead", "Powell"]:
kwargs['maxfev'] = maxiter
elif method == "L-BFGS-B":
kwargs['maxfun'] = maxiter
#pass eps to methods that support it
if method in ["CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP"]:
kwargs['eps'] = tuning_options.eps
elif method == "COBYLA":
kwargs['rhobeg'] = tuning_options.eps
return kwargs | python | def setup_method_options(method, tuning_options):
""" prepare method specific options """
kwargs = {}
#pass size of parameter space as max iterations to methods that support it
#it seems not all methods iterpret this value in the same manner
maxiter = numpy.prod([len(v) for v in tuning_options.tune_params.values()])
kwargs['maxiter'] = maxiter
if method in ["Nelder-Mead", "Powell"]:
kwargs['maxfev'] = maxiter
elif method == "L-BFGS-B":
kwargs['maxfun'] = maxiter
#pass eps to methods that support it
if method in ["CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP"]:
kwargs['eps'] = tuning_options.eps
elif method == "COBYLA":
kwargs['rhobeg'] = tuning_options.eps
return kwargs | prepare method specific options | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L154-L173 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | snap_to_nearest_config | def snap_to_nearest_config(x, tune_params):
"""helper func that for each param selects the closest actual value"""
params = []
for i, k in enumerate(tune_params.keys()):
values = numpy.array(tune_params[k])
idx = numpy.abs(values-x[i]).argmin()
params.append(int(values[idx]))
return params | python | def snap_to_nearest_config(x, tune_params):
"""helper func that for each param selects the closest actual value"""
params = []
for i, k in enumerate(tune_params.keys()):
values = numpy.array(tune_params[k])
idx = numpy.abs(values-x[i]).argmin()
params.append(int(values[idx]))
return params | helper func that for each param selects the closest actual value | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L176-L183 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | unscale_and_snap_to_nearest | def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u | python | def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u | helper func that snaps a scaled variable to the nearest config | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L186-L204 |
benvanwerkhoven/kernel_tuner | kernel_tuner/runners/sequential.py | SequentialRunner.run | def run(self, parameter_space, kernel_options, tuning_options):
""" Iterate through the entire parameter space using a single Python process
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.iterface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
logging.debug('sequential runner started for ' + kernel_options.kernel_name)
results = []
#iterate over parameter space
for element in parameter_space:
params = OrderedDict(zip(tuning_options.tune_params.keys(), element))
time = self.dev.compile_and_benchmark(self.gpu_args, params, kernel_options, tuning_options)
if time is None:
logging.debug('received time is None, kernel configuration was skipped silently due to compile or runtime failure')
continue
#print and append to results
params['time'] = time
output_string = get_config_string(params, self.units)
logging.debug(output_string)
if not self.quiet:
print(output_string)
results.append(params)
return results, self.dev.get_environment() | python | def run(self, parameter_space, kernel_options, tuning_options):
""" Iterate through the entire parameter space using a single Python process
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.iterface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
logging.debug('sequential runner started for ' + kernel_options.kernel_name)
results = []
#iterate over parameter space
for element in parameter_space:
params = OrderedDict(zip(tuning_options.tune_params.keys(), element))
time = self.dev.compile_and_benchmark(self.gpu_args, params, kernel_options, tuning_options)
if time is None:
logging.debug('received time is None, kernel configuration was skipped silently due to compile or runtime failure')
continue
#print and append to results
params['time'] = time
output_string = get_config_string(params, self.units)
logging.debug(output_string)
if not self.quiet:
print(output_string)
results.append(params)
return results, self.dev.get_environment() | Iterate through the entire parameter space using a single Python process
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.iterface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/runners/sequential.py#L38-L79 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/basinhopping.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
method = tuning_options.method
#scale variables in x to make 'eps' relevant for multiple variables
tuning_options["scaling"] = True
bounds, x0, eps = get_bounds_x0_eps(tuning_options)
kwargs = setup_method_arguments(method, bounds)
options = setup_method_options(method, tuning_options)
kwargs['options'] = options
args = (kernel_options, tuning_options, runner, results, cache)
minimizer_kwargs = dict(**kwargs)
minimizer_kwargs["method"] = method
minimizer_kwargs["args"] = args
opt_result = scipy.optimize.basinhopping(_cost_func, x0, stepsize=eps, minimizer_kwargs=minimizer_kwargs, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
method = tuning_options.method
#scale variables in x to make 'eps' relevant for multiple variables
tuning_options["scaling"] = True
bounds, x0, eps = get_bounds_x0_eps(tuning_options)
kwargs = setup_method_arguments(method, bounds)
options = setup_method_options(method, tuning_options)
kwargs['options'] = options
args = (kernel_options, tuning_options, runner, results, cache)
minimizer_kwargs = dict(**kwargs)
minimizer_kwargs["method"] = method
minimizer_kwargs["args"] = args
opt_result = scipy.optimize.basinhopping(_cost_func, x0, stepsize=eps, minimizer_kwargs=minimizer_kwargs, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/basinhopping.py#L8-L57 |
benvanwerkhoven/kernel_tuner | examples/cuda/convolution_streams.py | allocate | def allocate(n, dtype=numpy.float32):
""" allocate context-portable pinned host memory """
return drv.pagelocked_empty(int(n), dtype, order='C', mem_flags=drv.host_alloc_flags.PORTABLE) | python | def allocate(n, dtype=numpy.float32):
""" allocate context-portable pinned host memory """
return drv.pagelocked_empty(int(n), dtype, order='C', mem_flags=drv.host_alloc_flags.PORTABLE) | allocate context-portable pinned host memory | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/examples/cuda/convolution_streams.py#L10-L12 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.ready_argument_list | def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the CUDA kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an CUDA kernel.
:rtype: list( pycuda.driver.DeviceAllocation, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, numpy.ndarray):
alloc = drv.mem_alloc(arg.nbytes)
self.allocations.append(alloc)
gpu_args.append(alloc)
drv.memcpy_htod(gpu_args[-1], arg)
else: # if not an array, just pass argument along
gpu_args.append(arg)
return gpu_args | python | def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the CUDA kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an CUDA kernel.
:rtype: list( pycuda.driver.DeviceAllocation, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, numpy.ndarray):
alloc = drv.mem_alloc(arg.nbytes)
self.allocations.append(alloc)
gpu_args.append(alloc)
drv.memcpy_htod(gpu_args[-1], arg)
else: # if not an array, just pass argument along
gpu_args.append(arg)
return gpu_args | ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the CUDA kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an CUDA kernel.
:rtype: list( pycuda.driver.DeviceAllocation, numpy.int32, ... ) | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L85-L106 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.compile | def compile(self, kernel_name, kernel_string):
"""call the CUDA compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The CUDA kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An CUDA kernel that can be called directly.
:rtype: pycuda.driver.Function
"""
try:
no_extern_c = 'extern "C"' in kernel_string
compiler_options = ['-Xcompiler=-Wall']
if self.compiler_options:
compiler_options += self.compiler_options
self.current_module = self.source_mod(kernel_string, options=compiler_options + ["-e", kernel_name],
arch=('compute_' + self.cc) if self.cc != "00" else None,
code=('sm_' + self.cc) if self.cc != "00" else None,
cache_dir=False, no_extern_c=no_extern_c)
func = self.current_module.get_function(kernel_name)
return func
except drv.CompileError as e:
if "uses too much shared data" in e.stderr:
raise Exception("uses too much shared data")
else:
raise e | python | def compile(self, kernel_name, kernel_string):
"""call the CUDA compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The CUDA kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An CUDA kernel that can be called directly.
:rtype: pycuda.driver.Function
"""
try:
no_extern_c = 'extern "C"' in kernel_string
compiler_options = ['-Xcompiler=-Wall']
if self.compiler_options:
compiler_options += self.compiler_options
self.current_module = self.source_mod(kernel_string, options=compiler_options + ["-e", kernel_name],
arch=('compute_' + self.cc) if self.cc != "00" else None,
code=('sm_' + self.cc) if self.cc != "00" else None,
cache_dir=False, no_extern_c=no_extern_c)
func = self.current_module.get_function(kernel_name)
return func
except drv.CompileError as e:
if "uses too much shared data" in e.stderr:
raise Exception("uses too much shared data")
else:
raise e | call the CUDA compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The CUDA kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An CUDA kernel that can be called directly.
:rtype: pycuda.driver.Function | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L109-L140 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.benchmark | def benchmark(self, func, gpu_args, threads, grid, times):
"""runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of CudaFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
start = drv.Event()
end = drv.Event()
time = []
for _ in range(self.iterations):
self.context.synchronize()
start.record()
self.run_kernel(func, gpu_args, threads, grid)
end.record()
self.context.synchronize()
time.append(end.time_since(start))
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | python | def benchmark(self, func, gpu_args, threads, grid, times):
"""runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of CudaFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
start = drv.Event()
end = drv.Event()
time = []
for _ in range(self.iterations):
self.context.synchronize()
start.record()
self.run_kernel(func, gpu_args, threads, grid)
end.record()
self.context.synchronize()
time.append(end.time_since(start))
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of CudaFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L143-L193 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.copy_constant_memory_args | def copy_constant_memory_args(self, cmem_args):
"""adds constant memory arguments to the most recently compiled module
:param cmem_args: A dictionary containing the data to be passed to the
device constant memory. The format to be used is as follows: A
string key is used to name the constant memory symbol to which the
value needs to be copied. Similar to regular arguments, these need
to be numpy objects, such as numpy.ndarray or numpy.int32, and so on.
:type cmem_args: dict( string: numpy.ndarray, ... )
"""
logging.debug('copy_constant_memory_args called')
logging.debug('current module: ' + str(self.current_module))
for k, v in cmem_args.items():
symbol = self.current_module.get_global(k)[0]
logging.debug('copying to symbol: ' + str(symbol))
logging.debug('array to be copied: ')
logging.debug(v.nbytes)
logging.debug(v.dtype)
logging.debug(v.flags)
drv.memcpy_htod(symbol, v) | python | def copy_constant_memory_args(self, cmem_args):
"""adds constant memory arguments to the most recently compiled module
:param cmem_args: A dictionary containing the data to be passed to the
device constant memory. The format to be used is as follows: A
string key is used to name the constant memory symbol to which the
value needs to be copied. Similar to regular arguments, these need
to be numpy objects, such as numpy.ndarray or numpy.int32, and so on.
:type cmem_args: dict( string: numpy.ndarray, ... )
"""
logging.debug('copy_constant_memory_args called')
logging.debug('current module: ' + str(self.current_module))
for k, v in cmem_args.items():
symbol = self.current_module.get_global(k)[0]
logging.debug('copying to symbol: ' + str(symbol))
logging.debug('array to be copied: ')
logging.debug(v.nbytes)
logging.debug(v.dtype)
logging.debug(v.flags)
drv.memcpy_htod(symbol, v) | adds constant memory arguments to the most recently compiled module
:param cmem_args: A dictionary containing the data to be passed to the
device constant memory. The format to be used is as follows: A
string key is used to name the constant memory symbol to which the
value needs to be copied. Similar to regular arguments, these need
to be numpy objects, such as numpy.ndarray or numpy.int32, and so on.
:type cmem_args: dict( string: numpy.ndarray, ... ) | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L195-L214 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.copy_texture_memory_args | def copy_texture_memory_args(self, texmem_args):
"""adds texture memory arguments to the most recently compiled module
:param texmem_args: A dictionary containing the data to be passed to the
device texture memory. TODO
"""
filter_mode_map = { 'point': drv.filter_mode.POINT,
'linear': drv.filter_mode.LINEAR }
address_mode_map = { 'border': drv.address_mode.BORDER,
'clamp': drv.address_mode.CLAMP,
'mirror': drv.address_mode.MIRROR,
'wrap': drv.address_mode.WRAP }
logging.debug('copy_texture_memory_args called')
logging.debug('current module: ' + str(self.current_module))
self.texrefs = []
for k, v in texmem_args.items():
tex = self.current_module.get_texref(k)
self.texrefs.append(tex)
logging.debug('copying to texture: ' + str(k))
if not isinstance(v, dict):
data = v
else:
data = v['array']
logging.debug('texture to be copied: ')
logging.debug(data.nbytes)
logging.debug(data.dtype)
logging.debug(data.flags)
drv.matrix_to_texref(data, tex, order="C")
if isinstance(v, dict):
if 'address_mode' in v and v['address_mode'] is not None:
# address_mode is set per axis
amode = v['address_mode']
if not isinstance(amode, list):
amode = [ amode ] * data.ndim
for i, m in enumerate(amode):
try:
if m is not None:
tex.set_address_mode(i, address_mode_map[m])
except KeyError:
raise ValueError('Unknown address mode: ' + m)
if 'filter_mode' in v and v['filter_mode'] is not None:
fmode = v['filter_mode']
try:
tex.set_filter_mode(filter_mode_map[fmode])
except KeyError:
raise ValueError('Unknown filter mode: ' + fmode)
if 'normalized_coordinates' in v and v['normalized_coordinates']:
tex.set_flags(tex.get_flags() | drv.TRSF_NORMALIZED_COORDINATES) | python | def copy_texture_memory_args(self, texmem_args):
"""adds texture memory arguments to the most recently compiled module
:param texmem_args: A dictionary containing the data to be passed to the
device texture memory. TODO
"""
filter_mode_map = { 'point': drv.filter_mode.POINT,
'linear': drv.filter_mode.LINEAR }
address_mode_map = { 'border': drv.address_mode.BORDER,
'clamp': drv.address_mode.CLAMP,
'mirror': drv.address_mode.MIRROR,
'wrap': drv.address_mode.WRAP }
logging.debug('copy_texture_memory_args called')
logging.debug('current module: ' + str(self.current_module))
self.texrefs = []
for k, v in texmem_args.items():
tex = self.current_module.get_texref(k)
self.texrefs.append(tex)
logging.debug('copying to texture: ' + str(k))
if not isinstance(v, dict):
data = v
else:
data = v['array']
logging.debug('texture to be copied: ')
logging.debug(data.nbytes)
logging.debug(data.dtype)
logging.debug(data.flags)
drv.matrix_to_texref(data, tex, order="C")
if isinstance(v, dict):
if 'address_mode' in v and v['address_mode'] is not None:
# address_mode is set per axis
amode = v['address_mode']
if not isinstance(amode, list):
amode = [ amode ] * data.ndim
for i, m in enumerate(amode):
try:
if m is not None:
tex.set_address_mode(i, address_mode_map[m])
except KeyError:
raise ValueError('Unknown address mode: ' + m)
if 'filter_mode' in v and v['filter_mode'] is not None:
fmode = v['filter_mode']
try:
tex.set_filter_mode(filter_mode_map[fmode])
except KeyError:
raise ValueError('Unknown filter mode: ' + fmode)
if 'normalized_coordinates' in v and v['normalized_coordinates']:
tex.set_flags(tex.get_flags() | drv.TRSF_NORMALIZED_COORDINATES) | adds texture memory arguments to the most recently compiled module
:param texmem_args: A dictionary containing the data to be passed to the
device texture memory. TODO | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L216-L269 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.run_kernel | def run_kernel(self, func, gpu_args, threads, grid):
"""runs the CUDA kernel passed as 'func'
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
"""
func(*gpu_args, block=threads, grid=grid, texrefs=self.texrefs) | python | def run_kernel(self, func, gpu_args, threads, grid):
"""runs the CUDA kernel passed as 'func'
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
"""
func(*gpu_args, block=threads, grid=grid, texrefs=self.texrefs) | runs the CUDA kernel passed as 'func'
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int) | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L271-L290 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.memset | def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: A GPU memory allocation unit
:type allocation: pycuda.driver.DeviceAllocation
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
drv.memset_d8(allocation, value, size) | python | def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: A GPU memory allocation unit
:type allocation: pycuda.driver.DeviceAllocation
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
drv.memset_d8(allocation, value, size) | set the memory in allocation to the value in value
:param allocation: A GPU memory allocation unit
:type allocation: pycuda.driver.DeviceAllocation
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L292-L305 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.memcpy_dtoh | def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: A GPU memory allocation unit
:type src: pycuda.driver.DeviceAllocation
"""
if isinstance(src, drv.DeviceAllocation):
drv.memcpy_dtoh(dest, src)
else:
dest = src | python | def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: A GPU memory allocation unit
:type src: pycuda.driver.DeviceAllocation
"""
if isinstance(src, drv.DeviceAllocation):
drv.memcpy_dtoh(dest, src)
else:
dest = src | perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: A GPU memory allocation unit
:type src: pycuda.driver.DeviceAllocation | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L308-L320 |
benvanwerkhoven/kernel_tuner | kernel_tuner/cuda.py | CudaFunctions.memcpy_htod | def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, drv.DeviceAllocation):
drv.memcpy_htod(dest, src)
else:
dest = src | python | def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, drv.DeviceAllocation):
drv.memcpy_htod(dest, src)
else:
dest = src | perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L322-L334 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/simulated_annealing.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
# SA works with real parameter values and does not need scaling
tuning_options["scaling"] = False
args = (kernel_options, tuning_options, runner, results, cache)
tune_params = tuning_options.tune_params
# optimization parameters
T = 1.0
T_min = 0.001
alpha = 0.9
niter = 20
# generate random starting point and evaluate cost
pos = []
for i, _ in enumerate(tune_params.keys()):
pos.append(random_val(i, tune_params))
old_cost = _cost_func(pos, *args)
if tuning_options.verbose:
c = 0
# main optimization loop
while T > T_min:
if tuning_options.verbose:
print("iteration: ", c, "T", T, "cost: ", old_cost)
c += 1
for i in range(niter):
new_pos = neighbor(pos, tune_params)
new_cost = _cost_func(new_pos, *args)
ap = acceptance_prob(old_cost, new_cost, T)
r = random.random()
if ap > r:
if tuning_options.verbose:
print("new position accepted", new_pos, new_cost, 'old:', pos, old_cost, 'ap', ap, 'r', r, 'T', T)
pos = new_pos
old_cost = new_cost
T = T * alpha
return results, runner.dev.get_environment() | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
# SA works with real parameter values and does not need scaling
tuning_options["scaling"] = False
args = (kernel_options, tuning_options, runner, results, cache)
tune_params = tuning_options.tune_params
# optimization parameters
T = 1.0
T_min = 0.001
alpha = 0.9
niter = 20
# generate random starting point and evaluate cost
pos = []
for i, _ in enumerate(tune_params.keys()):
pos.append(random_val(i, tune_params))
old_cost = _cost_func(pos, *args)
if tuning_options.verbose:
c = 0
# main optimization loop
while T > T_min:
if tuning_options.verbose:
print("iteration: ", c, "T", T, "cost: ", old_cost)
c += 1
for i in range(niter):
new_pos = neighbor(pos, tune_params)
new_cost = _cost_func(new_pos, *args)
ap = acceptance_prob(old_cost, new_cost, T)
r = random.random()
if ap > r:
if tuning_options.verbose:
print("new position accepted", new_pos, new_cost, 'old:', pos, old_cost, 'ap', ap, 'r', r, 'T', T)
pos = new_pos
old_cost = new_cost
T = T * alpha
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/simulated_annealing.py#L10-L78 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/simulated_annealing.py | acceptance_prob | def acceptance_prob(old_cost, new_cost, T):
"""annealing equation, with modifications to work towards a lower value"""
#if start pos is not valid, always move
if old_cost == 1e20:
return 1.0
#if we have found a valid ps before, never move to nonvalid pos
if new_cost == 1e20:
return 0.0
#always move if new cost is better
if new_cost < old_cost:
return 1.0
#maybe move if old cost is better than new cost depending on T and random value
return np.exp(((old_cost-new_cost)/old_cost)/T) | python | def acceptance_prob(old_cost, new_cost, T):
"""annealing equation, with modifications to work towards a lower value"""
#if start pos is not valid, always move
if old_cost == 1e20:
return 1.0
#if we have found a valid ps before, never move to nonvalid pos
if new_cost == 1e20:
return 0.0
#always move if new cost is better
if new_cost < old_cost:
return 1.0
#maybe move if old cost is better than new cost depending on T and random value
return np.exp(((old_cost-new_cost)/old_cost)/T) | annealing equation, with modifications to work towards a lower value | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/simulated_annealing.py#L80-L92 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/simulated_annealing.py | neighbor | def neighbor(pos, tune_params):
"""return a random neighbor of pos"""
size = len(pos)
pos_out = []
# random mutation
# expected value is set that values all dimensions attempt to get mutated
for i in range(size):
key = list(tune_params.keys())[i]
values = tune_params[key]
if random.random() < 0.2: #replace with random value
new_value = random_val(i, tune_params)
else: #adjacent value
ind = values.index(pos[i])
if random.random() > 0.5:
ind += 1
else:
ind -= 1
ind = min(max(ind, 0), len(values)-1)
new_value = values[ind]
pos_out.append(new_value)
return pos_out | python | def neighbor(pos, tune_params):
"""return a random neighbor of pos"""
size = len(pos)
pos_out = []
# random mutation
# expected value is set that values all dimensions attempt to get mutated
for i in range(size):
key = list(tune_params.keys())[i]
values = tune_params[key]
if random.random() < 0.2: #replace with random value
new_value = random_val(i, tune_params)
else: #adjacent value
ind = values.index(pos[i])
if random.random() > 0.5:
ind += 1
else:
ind -= 1
ind = min(max(ind, 0), len(values)-1)
new_value = values[ind]
pos_out.append(new_value)
return pos_out | return a random neighbor of pos | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/simulated_annealing.py#L95-L117 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/brute_force.py | tune | def tune(runner, kernel_options, device_options, tuning_options):
""" Tune all instances in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
tune_params = tuning_options.tune_params
restrictions = tuning_options.restrictions
verbose = tuning_options.verbose
#compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
#check for search space restrictions
if restrictions is not None:
parameter_space = filter(lambda p: util.check_restrictions(restrictions, p, tune_params.keys(), verbose), parameter_space)
results, env = runner.run(parameter_space, kernel_options, tuning_options)
return results, env | python | def tune(runner, kernel_options, device_options, tuning_options):
""" Tune all instances in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
tune_params = tuning_options.tune_params
restrictions = tuning_options.restrictions
verbose = tuning_options.verbose
#compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
#check for search space restrictions
if restrictions is not None:
parameter_space = filter(lambda p: util.check_restrictions(restrictions, p, tune_params.keys(), verbose), parameter_space)
results, env = runner.run(parameter_space, kernel_options, tuning_options)
return results, env | Tune all instances in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/brute_force.py#L8-L45 |
benvanwerkhoven/kernel_tuner | kernel_tuner/wrappers.py | cpp | def cpp(function_name, kernel_source, args, convert_to_array=None):
""" Generate a wrapper to call C++ functions from Python
This function allows Kernel Tuner to call templated C++ functions
that use primitive data types (double, float, int, ...).
There is support to convert function arguments from plain pointers
to array references. If this is needed, there should be a True value
in convert_to_array in the location corresponding to the location in
the args array.
For example, a Numpy array argument of type float64 and length 10
will be cast using:
``*reinterpret_cast<double(*)[10]>(arg)``
which allows it to be used to call a C++ that is defined as:
``template<typename T, int s>void my_function(T (&arg)[s], ...)``
Arrays of size 1 will be converted to simple non-array references.
False indicates that no conversion is performed. Conversion
is only support for numpy array arguments. If convert_to_array is
passed it should have the same length as the args array.
:param function_name: A string containing the name of the C++ function
to be wrapped
:type function_name: string
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param args: A list of kernel arguments, use numpy arrays for
arrays, use numpy.int32 or numpy.float32 for scalars.
:type args: list
:param convert_to_array: A list of same length as args, containing
True or False values indicating whether the corresponding argument
in args should be cast to a reference to an array or not.
:type convert_to_array: list (True or False)
:returns: A string containing the orignal code extended with the wrapper
function. The wrapper has "extern C" binding and can be passed to
other Kernel Tuner functions, for example run_kernel with lang="C".
The name of the wrapper function will be the name of the function with
a "_wrapper" postfix.
:rtype: string
"""
if convert_to_array and len(args) != len(convert_to_array):
raise ValueError("convert_to_array length should be same as args")
type_map = {"int8": "char",
"int16": "short",
"int32": "int",
"float32": "float",
"float64": "double"}
def type_str(arg):
if not str(arg.dtype) in type_map:
raise Value("only primitive data types are supported by the C++ wrapper")
typestring = type_map[str(arg.dtype)]
if isinstance(arg, np.ndarray):
typestring += " *"
return typestring + " "
signature = ",".join([type_str(arg) + "arg" + str(i) for i, arg in enumerate(args)])
if not convert_to_array:
call_args = ",".join(["arg" + str(i) for i in range(len(args))])
else:
call_args = []
for i, arg in enumerate(args):
if convert_to_array[i]:
if not isinstance(arg, np.ndarray):
ValueError("conversion to array reference only supported for arguments that are numpy arrays, use length-1 numpy array to pass a scalar by reference")
if np.prod(arg.shape) > 1:
#convert pointer to a reference to an array
arg_shape = "".join("[%d]" % i for i in arg.shape)
arg_str = "*reinterpret_cast<" + type_map[str(arg.dtype)] + "(*)" + arg_shape + ">(arg" + str(i) + ")"
else:
#a reference is accepted rather than a pointer, just dereference
arg_str = "*arg" + str(i)
call_args.append(arg_str)
#call_args = ",".join(["*reinterpret_cast<double(*)[9]>(arg" + str(i) + ")" for i in range(len(args))])
else:
call_args.append("arg" + str(i))
call_args_str = ",".join(call_args)
kernel_string = util.get_kernel_string(kernel_source)
return """
%s
extern "C"
float %s_wrapper(%s) {
%s(%s);
return 0.0f;
}""" % (kernel_string, function_name, signature, function_name, call_args_str) | python | def cpp(function_name, kernel_source, args, convert_to_array=None):
""" Generate a wrapper to call C++ functions from Python
This function allows Kernel Tuner to call templated C++ functions
that use primitive data types (double, float, int, ...).
There is support to convert function arguments from plain pointers
to array references. If this is needed, there should be a True value
in convert_to_array in the location corresponding to the location in
the args array.
For example, a Numpy array argument of type float64 and length 10
will be cast using:
``*reinterpret_cast<double(*)[10]>(arg)``
which allows it to be used to call a C++ that is defined as:
``template<typename T, int s>void my_function(T (&arg)[s], ...)``
Arrays of size 1 will be converted to simple non-array references.
False indicates that no conversion is performed. Conversion
is only support for numpy array arguments. If convert_to_array is
passed it should have the same length as the args array.
:param function_name: A string containing the name of the C++ function
to be wrapped
:type function_name: string
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param args: A list of kernel arguments, use numpy arrays for
arrays, use numpy.int32 or numpy.float32 for scalars.
:type args: list
:param convert_to_array: A list of same length as args, containing
True or False values indicating whether the corresponding argument
in args should be cast to a reference to an array or not.
:type convert_to_array: list (True or False)
:returns: A string containing the orignal code extended with the wrapper
function. The wrapper has "extern C" binding and can be passed to
other Kernel Tuner functions, for example run_kernel with lang="C".
The name of the wrapper function will be the name of the function with
a "_wrapper" postfix.
:rtype: string
"""
if convert_to_array and len(args) != len(convert_to_array):
raise ValueError("convert_to_array length should be same as args")
type_map = {"int8": "char",
"int16": "short",
"int32": "int",
"float32": "float",
"float64": "double"}
def type_str(arg):
if not str(arg.dtype) in type_map:
raise Value("only primitive data types are supported by the C++ wrapper")
typestring = type_map[str(arg.dtype)]
if isinstance(arg, np.ndarray):
typestring += " *"
return typestring + " "
signature = ",".join([type_str(arg) + "arg" + str(i) for i, arg in enumerate(args)])
if not convert_to_array:
call_args = ",".join(["arg" + str(i) for i in range(len(args))])
else:
call_args = []
for i, arg in enumerate(args):
if convert_to_array[i]:
if not isinstance(arg, np.ndarray):
ValueError("conversion to array reference only supported for arguments that are numpy arrays, use length-1 numpy array to pass a scalar by reference")
if np.prod(arg.shape) > 1:
#convert pointer to a reference to an array
arg_shape = "".join("[%d]" % i for i in arg.shape)
arg_str = "*reinterpret_cast<" + type_map[str(arg.dtype)] + "(*)" + arg_shape + ">(arg" + str(i) + ")"
else:
#a reference is accepted rather than a pointer, just dereference
arg_str = "*arg" + str(i)
call_args.append(arg_str)
#call_args = ",".join(["*reinterpret_cast<double(*)[9]>(arg" + str(i) + ")" for i in range(len(args))])
else:
call_args.append("arg" + str(i))
call_args_str = ",".join(call_args)
kernel_string = util.get_kernel_string(kernel_source)
return """
%s
extern "C"
float %s_wrapper(%s) {
%s(%s);
return 0.0f;
}""" % (kernel_string, function_name, signature, function_name, call_args_str) | Generate a wrapper to call C++ functions from Python
This function allows Kernel Tuner to call templated C++ functions
that use primitive data types (double, float, int, ...).
There is support to convert function arguments from plain pointers
to array references. If this is needed, there should be a True value
in convert_to_array in the location corresponding to the location in
the args array.
For example, a Numpy array argument of type float64 and length 10
will be cast using:
``*reinterpret_cast<double(*)[10]>(arg)``
which allows it to be used to call a C++ that is defined as:
``template<typename T, int s>void my_function(T (&arg)[s], ...)``
Arrays of size 1 will be converted to simple non-array references.
False indicates that no conversion is performed. Conversion
is only support for numpy array arguments. If convert_to_array is
passed it should have the same length as the args array.
:param function_name: A string containing the name of the C++ function
to be wrapped
:type function_name: string
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param args: A list of kernel arguments, use numpy arrays for
arrays, use numpy.int32 or numpy.float32 for scalars.
:type args: list
:param convert_to_array: A list of same length as args, containing
True or False values indicating whether the corresponding argument
in args should be cast to a reference to an array or not.
:type convert_to_array: list (True or False)
:returns: A string containing the orignal code extended with the wrapper
function. The wrapper has "extern C" binding and can be passed to
other Kernel Tuner functions, for example run_kernel with lang="C".
The name of the wrapper function will be the name of the function with
a "_wrapper" postfix.
:rtype: string | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/wrappers.py#L18-L119 |
benvanwerkhoven/kernel_tuner | kernel_tuner/c.py | CFunctions.ready_argument_list | def ready_argument_list(self, arguments):
"""ready argument list to be passed to the C function
:param arguments: List of arguments to be passed to the C function.
The order should match the argument list on the C function.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to the C function.
:rtype: list(Argument)
"""
ctype_args = [ None for _ in arguments]
for i, arg in enumerate(arguments):
if not isinstance(arg, (numpy.ndarray, numpy.number)):
raise TypeError("Argument is not numpy ndarray or numpy scalar %s" % type(arg))
dtype_str = str(arg.dtype)
data = arg.copy()
if isinstance(arg, numpy.ndarray):
if dtype_str in dtype_map.keys():
# In numpy <= 1.15, ndarray.ctypes.data_as does not itself keep a reference
# to its underlying array, so we need to store a reference to arg.copy()
# in the Argument object manually to avoid it being deleted.
# (This changed in numpy > 1.15.)
data_ctypes = data.ctypes.data_as(C.POINTER(dtype_map[dtype_str]))
else:
raise TypeError("unknown dtype for ndarray")
elif isinstance(arg, numpy.generic):
data_ctypes = dtype_map[dtype_str](arg)
ctype_args[i] = Argument(numpy=data, ctypes=data_ctypes)
return ctype_args | python | def ready_argument_list(self, arguments):
"""ready argument list to be passed to the C function
:param arguments: List of arguments to be passed to the C function.
The order should match the argument list on the C function.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to the C function.
:rtype: list(Argument)
"""
ctype_args = [ None for _ in arguments]
for i, arg in enumerate(arguments):
if not isinstance(arg, (numpy.ndarray, numpy.number)):
raise TypeError("Argument is not numpy ndarray or numpy scalar %s" % type(arg))
dtype_str = str(arg.dtype)
data = arg.copy()
if isinstance(arg, numpy.ndarray):
if dtype_str in dtype_map.keys():
# In numpy <= 1.15, ndarray.ctypes.data_as does not itself keep a reference
# to its underlying array, so we need to store a reference to arg.copy()
# in the Argument object manually to avoid it being deleted.
# (This changed in numpy > 1.15.)
data_ctypes = data.ctypes.data_as(C.POINTER(dtype_map[dtype_str]))
else:
raise TypeError("unknown dtype for ndarray")
elif isinstance(arg, numpy.generic):
data_ctypes = dtype_map[dtype_str](arg)
ctype_args[i] = Argument(numpy=data, ctypes=data_ctypes)
return ctype_args | ready argument list to be passed to the C function
:param arguments: List of arguments to be passed to the C function.
The order should match the argument list on the C function.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to the C function.
:rtype: list(Argument) | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/c.py#L72-L102 |
benvanwerkhoven/kernel_tuner | kernel_tuner/c.py | CFunctions.compile | def compile(self, kernel_name, kernel_string):
"""call the C compiler to compile the kernel, return the function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The C code that contains the function `kernel_name`
:type kernel_string: string
:returns: An ctypes function that can be called directly.
:rtype: ctypes._FuncPtr
"""
logging.debug('compiling ' + kernel_name)
if self.lib != None:
self.cleanup_lib()
compiler_options = ["-fPIC"]
#detect openmp
if "#include <omp.h>" in kernel_string or "use omp_lib" in kernel_string:
logging.debug('set using_openmp to true')
self.using_openmp = True
if self.compiler == "pgfortran":
compiler_options.append("-mp")
else:
compiler_options.append("-fopenmp")
#select right suffix based on compiler
suffix = ".cc"
#detect whether to use nvcc as default instead of g++, may overrule an explicitly passed g++
if ("#include <cuda" in kernel_string) or ("cudaMemcpy" in kernel_string):
if self.compiler == "g++" and self.nvcc_available:
self.compiler = "nvcc"
#if contains device code suffix .cu is required by nvcc
if self.compiler == "nvcc" and "__global__" in kernel_string:
suffix = ".cu"
if self.compiler in ["gfortran", "pgfortran", "ftn", "ifort"]:
suffix = ".F90"
if self.compiler == "nvcc":
compiler_options = ["-Xcompiler=" + c for c in compiler_options]
if ".c" in suffix:
if not "extern \"C\"" in kernel_string:
kernel_string = "extern \"C\" {\n" + kernel_string + "\n}"
#copy user specified compiler options to current list
if self.compiler_options:
compiler_options += self.compiler_options
lib_args = []
if "CL/cl.h" in kernel_string:
lib_args = ["-lOpenCL"]
logging.debug('using compiler ' + self.compiler)
logging.debug('compiler_options ' + " ".join(compiler_options))
logging.debug('lib_args ' + " ".join(lib_args))
source_file = get_temp_filename(suffix=suffix)
filename = ".".join(source_file.split(".")[:-1])
#detect Fortran modules
match = re.search(r"\s*module\s+([a-zA-Z_]*)", kernel_string)
if match:
if self.compiler == "gfortran":
kernel_name = "__" + match.group(1) + "_MOD_" + kernel_name
elif self.compiler in ["ftn", "ifort"]:
kernel_name = match.group(1) + "_mp_" + kernel_name + "_"
elif self.compiler == "pgfortran":
kernel_name = match.group(1) + "_" + kernel_name + "_"
try:
write_file(source_file, kernel_string)
lib_extension = ".so"
if platform.system() == "Darwin":
lib_extension = ".dylib"
subprocess.check_call([self.compiler, "-c", source_file] + compiler_options + ["-o", filename + ".o"])
subprocess.check_call([self.compiler, filename + ".o"] + compiler_options + ["-shared", "-o", filename + lib_extension] + lib_args)
self.lib = numpy.ctypeslib.load_library(filename, '.')
func = getattr(self.lib, kernel_name)
func.restype = C.c_float
finally:
delete_temp_file(source_file)
delete_temp_file(filename+".o")
delete_temp_file(filename+".so")
delete_temp_file(filename+".dylib")
return func | python | def compile(self, kernel_name, kernel_string):
"""call the C compiler to compile the kernel, return the function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The C code that contains the function `kernel_name`
:type kernel_string: string
:returns: An ctypes function that can be called directly.
:rtype: ctypes._FuncPtr
"""
logging.debug('compiling ' + kernel_name)
if self.lib != None:
self.cleanup_lib()
compiler_options = ["-fPIC"]
#detect openmp
if "#include <omp.h>" in kernel_string or "use omp_lib" in kernel_string:
logging.debug('set using_openmp to true')
self.using_openmp = True
if self.compiler == "pgfortran":
compiler_options.append("-mp")
else:
compiler_options.append("-fopenmp")
#select right suffix based on compiler
suffix = ".cc"
#detect whether to use nvcc as default instead of g++, may overrule an explicitly passed g++
if ("#include <cuda" in kernel_string) or ("cudaMemcpy" in kernel_string):
if self.compiler == "g++" and self.nvcc_available:
self.compiler = "nvcc"
#if contains device code suffix .cu is required by nvcc
if self.compiler == "nvcc" and "__global__" in kernel_string:
suffix = ".cu"
if self.compiler in ["gfortran", "pgfortran", "ftn", "ifort"]:
suffix = ".F90"
if self.compiler == "nvcc":
compiler_options = ["-Xcompiler=" + c for c in compiler_options]
if ".c" in suffix:
if not "extern \"C\"" in kernel_string:
kernel_string = "extern \"C\" {\n" + kernel_string + "\n}"
#copy user specified compiler options to current list
if self.compiler_options:
compiler_options += self.compiler_options
lib_args = []
if "CL/cl.h" in kernel_string:
lib_args = ["-lOpenCL"]
logging.debug('using compiler ' + self.compiler)
logging.debug('compiler_options ' + " ".join(compiler_options))
logging.debug('lib_args ' + " ".join(lib_args))
source_file = get_temp_filename(suffix=suffix)
filename = ".".join(source_file.split(".")[:-1])
#detect Fortran modules
match = re.search(r"\s*module\s+([a-zA-Z_]*)", kernel_string)
if match:
if self.compiler == "gfortran":
kernel_name = "__" + match.group(1) + "_MOD_" + kernel_name
elif self.compiler in ["ftn", "ifort"]:
kernel_name = match.group(1) + "_mp_" + kernel_name + "_"
elif self.compiler == "pgfortran":
kernel_name = match.group(1) + "_" + kernel_name + "_"
try:
write_file(source_file, kernel_string)
lib_extension = ".so"
if platform.system() == "Darwin":
lib_extension = ".dylib"
subprocess.check_call([self.compiler, "-c", source_file] + compiler_options + ["-o", filename + ".o"])
subprocess.check_call([self.compiler, filename + ".o"] + compiler_options + ["-shared", "-o", filename + lib_extension] + lib_args)
self.lib = numpy.ctypeslib.load_library(filename, '.')
func = getattr(self.lib, kernel_name)
func.restype = C.c_float
finally:
delete_temp_file(source_file)
delete_temp_file(filename+".o")
delete_temp_file(filename+".so")
delete_temp_file(filename+".dylib")
return func | call the C compiler to compile the kernel, return the function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The C code that contains the function `kernel_name`
:type kernel_string: string
:returns: An ctypes function that can be called directly.
:rtype: ctypes._FuncPtr | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/c.py#L104-L201 |
benvanwerkhoven/kernel_tuner | kernel_tuner/c.py | CFunctions.benchmark | def benchmark(self, func, c_args, threads, grid, times):
"""runs the kernel repeatedly, returns averaged returned value
The C function tuning is a little bit more flexible than direct CUDA
or OpenCL kernel tuning. The C function needs to measure time, or some
other quality metric you wish to tune on, on its own and should
therefore return a single floating-point value.
Benchmark runs the C function repeatedly and returns the average of the
values returned by the C function. The number of iterations is set
during the creation of the CFunctions object. For all measurements the
lowest and highest values are discarded and the rest is included in the
average. The reason for this is to be robust against initialization
artifacts and other exceptional cases.
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
time = []
for _ in range(self.iterations):
value = self.run_kernel(func, c_args, threads, grid)
#I would like to replace the following with actually capturing
#stderr and detecting the error directly in Python, it proved
#however that capturing stderr for non-Python functions from Python
#is a rather difficult thing to do
#
#The current, less than ideal, scheme uses the convention that a
#negative time indicates a 'too many resources requested for launch'
if value < 0.0:
raise Exception("too many resources requested for launch")
time.append(value)
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | python | def benchmark(self, func, c_args, threads, grid, times):
"""runs the kernel repeatedly, returns averaged returned value
The C function tuning is a little bit more flexible than direct CUDA
or OpenCL kernel tuning. The C function needs to measure time, or some
other quality metric you wish to tune on, on its own and should
therefore return a single floating-point value.
Benchmark runs the C function repeatedly and returns the average of the
values returned by the C function. The number of iterations is set
during the creation of the CFunctions object. For all measurements the
lowest and highest values are discarded and the rest is included in the
average. The reason for this is to be robust against initialization
artifacts and other exceptional cases.
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
time = []
for _ in range(self.iterations):
value = self.run_kernel(func, c_args, threads, grid)
#I would like to replace the following with actually capturing
#stderr and detecting the error directly in Python, it proved
#however that capturing stderr for non-Python functions from Python
#is a rather difficult thing to do
#
#The current, less than ideal, scheme uses the convention that a
#negative time indicates a 'too many resources requested for launch'
if value < 0.0:
raise Exception("too many resources requested for launch")
time.append(value)
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | runs the kernel repeatedly, returns averaged returned value
The C function tuning is a little bit more flexible than direct CUDA
or OpenCL kernel tuning. The C function needs to measure time, or some
other quality metric you wish to tune on, on its own and should
therefore return a single floating-point value.
Benchmark runs the C function repeatedly and returns the average of the
values returned by the C function. The number of iterations is set
during the creation of the CFunctions object. For all measurements the
lowest and highest values are discarded and the rest is included in the
average. The reason for this is to be robust against initialization
artifacts and other exceptional cases.
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/c.py#L204-L264 |
benvanwerkhoven/kernel_tuner | kernel_tuner/c.py | CFunctions.run_kernel | def run_kernel(self, func, c_args, threads, grid):
"""runs the kernel once, returns whatever the kernel returns
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:returns: A robust average of values returned by the C function.
:rtype: float
"""
logging.debug("run_kernel")
logging.debug("arguments=" + str([str(arg.ctypes) for arg in c_args]))
time = func(*[arg.ctypes for arg in c_args])
return time | python | def run_kernel(self, func, c_args, threads, grid):
"""runs the kernel once, returns whatever the kernel returns
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:returns: A robust average of values returned by the C function.
:rtype: float
"""
logging.debug("run_kernel")
logging.debug("arguments=" + str([str(arg.ctypes) for arg in c_args]))
time = func(*[arg.ctypes for arg in c_args])
return time | runs the kernel once, returns whatever the kernel returns
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:returns: A robust average of values returned by the C function.
:rtype: float | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/c.py#L267-L294 |
benvanwerkhoven/kernel_tuner | kernel_tuner/c.py | CFunctions.memset | def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
C.memset(allocation.ctypes, value, size) | python | def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
C.memset(allocation.ctypes, value, size) | set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/c.py#L296-L308 |
benvanwerkhoven/kernel_tuner | kernel_tuner/c.py | CFunctions.cleanup_lib | def cleanup_lib(self):
""" unload the previously loaded shared library """
if not self.using_openmp:
#this if statement is necessary because shared libraries that use
#OpenMP will core dump when unloaded, this is a well-known issue with OpenMP
logging.debug('unloading shared library')
_ctypes.dlclose(self.lib._handle) | python | def cleanup_lib(self):
""" unload the previously loaded shared library """
if not self.using_openmp:
#this if statement is necessary because shared libraries that use
#OpenMP will core dump when unloaded, this is a well-known issue with OpenMP
logging.debug('unloading shared library')
_ctypes.dlclose(self.lib._handle) | unload the previously loaded shared library | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/c.py#L332-L338 |
pengutronix/aiohttp-json-rpc | examples/publish_subscribe_example.py | clock | def clock(rpc):
"""
This task runs forever and notifies all clients subscribed to
'clock' once a second.
"""
while True:
yield from rpc.notify('clock', str(datetime.datetime.now()))
yield from asyncio.sleep(1) | python | def clock(rpc):
"""
This task runs forever and notifies all clients subscribed to
'clock' once a second.
"""
while True:
yield from rpc.notify('clock', str(datetime.datetime.now()))
yield from asyncio.sleep(1) | This task runs forever and notifies all clients subscribed to
'clock' once a second. | https://github.com/pengutronix/aiohttp-json-rpc/blob/24ec9ae9ae0633b2deaae7e29f6056f150cb4025/examples/publish_subscribe_example.py#L12-L20 |
pengutronix/aiohttp-json-rpc | aiohttp_json_rpc/django/__init__.py | patch_db_connections | def patch_db_connections():
"""
This wraps django.db.connections._connections with a TaskLocal object.
The Django transactions are only thread-safe, using threading.local,
and don't know about coroutines.
"""
global __already_patched
if not __already_patched:
from django.db import connections
connections._connections = local(connections._connections)
__already_patched = True | python | def patch_db_connections():
"""
This wraps django.db.connections._connections with a TaskLocal object.
The Django transactions are only thread-safe, using threading.local,
and don't know about coroutines.
"""
global __already_patched
if not __already_patched:
from django.db import connections
connections._connections = local(connections._connections)
__already_patched = True | This wraps django.db.connections._connections with a TaskLocal object.
The Django transactions are only thread-safe, using threading.local,
and don't know about coroutines. | https://github.com/pengutronix/aiohttp-json-rpc/blob/24ec9ae9ae0633b2deaae7e29f6056f150cb4025/aiohttp_json_rpc/django/__init__.py#L42-L56 |
pengutronix/aiohttp-json-rpc | aiohttp_json_rpc/protocol.py | decode_msg | def decode_msg(raw_msg):
"""
Decodes jsonrpc 2.0 raw message objects into JsonRpcMsg objects.
Examples:
Request:
{
"jsonrpc": "2.0",
"id": 1,
"method": "subtract",
"params": [42, 23]
}
Notification:
{
"jsonrpc": "2.0",
"method": "clock",
"params": "12:00",
}
Response:
{
"jsonrpc": "2.0",
"id": 1,
"result": 0,
}
Error:
{
"jsonrpc": "2.0",
"id": 1,
"error": {
"code": -32600,
"message": "Invalid request",
"data": null
}
}
"""
try:
msg_data = json.loads(raw_msg)
except ValueError:
raise RpcParseError
# check jsonrpc version
if 'jsonrpc' not in msg_data or not msg_data['jsonrpc'] == JSONRPC:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# check requierd fields
if not len(set(['error', 'result', 'method']) & set(msg_data)) == 1:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# find message type
if 'method' in msg_data:
if 'id' in msg_data and msg_data['id'] is not None:
msg_type = JsonRpcMsgTyp.REQUEST
else:
msg_type = JsonRpcMsgTyp.NOTIFICATION
elif 'result' in msg_data:
msg_type = JsonRpcMsgTyp.RESULT
elif 'error' in msg_data:
msg_type = JsonRpcMsgTyp.ERROR
# Request Objects
if msg_type in (JsonRpcMsgTyp.REQUEST, JsonRpcMsgTyp.NOTIFICATION):
# 'method' fields have to be strings
if type(msg_data['method']) is not str:
raise RpcInvalidRequestError
# set empty 'params' if not set
if 'params' not in msg_data:
msg_data['params'] = None
# set empty 'id' if not set
if 'id' not in msg_data:
msg_data['id'] = None
# Response Objects
if msg_type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
# every Response object has to define an id
if 'id' not in msg_data:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# Error objects
if msg_type == JsonRpcMsgTyp.ERROR:
# the error field has to be a dict
if type(msg_data['error']) is not dict:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# the error field has to define 'code' and 'message'
if not len(set(['code', 'message']) & set(msg_data['error'])) == 2:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# the error code has to be in the specified ranges
if not msg_data['error']['code'] in RpcError.lookup_table.keys():
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# set empty 'data' field if not set
if 'data' not in msg_data['error']:
msg_data['error']['data'] = None
return JsonRpcMsg(msg_type, msg_data) | python | def decode_msg(raw_msg):
"""
Decodes jsonrpc 2.0 raw message objects into JsonRpcMsg objects.
Examples:
Request:
{
"jsonrpc": "2.0",
"id": 1,
"method": "subtract",
"params": [42, 23]
}
Notification:
{
"jsonrpc": "2.0",
"method": "clock",
"params": "12:00",
}
Response:
{
"jsonrpc": "2.0",
"id": 1,
"result": 0,
}
Error:
{
"jsonrpc": "2.0",
"id": 1,
"error": {
"code": -32600,
"message": "Invalid request",
"data": null
}
}
"""
try:
msg_data = json.loads(raw_msg)
except ValueError:
raise RpcParseError
# check jsonrpc version
if 'jsonrpc' not in msg_data or not msg_data['jsonrpc'] == JSONRPC:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# check requierd fields
if not len(set(['error', 'result', 'method']) & set(msg_data)) == 1:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# find message type
if 'method' in msg_data:
if 'id' in msg_data and msg_data['id'] is not None:
msg_type = JsonRpcMsgTyp.REQUEST
else:
msg_type = JsonRpcMsgTyp.NOTIFICATION
elif 'result' in msg_data:
msg_type = JsonRpcMsgTyp.RESULT
elif 'error' in msg_data:
msg_type = JsonRpcMsgTyp.ERROR
# Request Objects
if msg_type in (JsonRpcMsgTyp.REQUEST, JsonRpcMsgTyp.NOTIFICATION):
# 'method' fields have to be strings
if type(msg_data['method']) is not str:
raise RpcInvalidRequestError
# set empty 'params' if not set
if 'params' not in msg_data:
msg_data['params'] = None
# set empty 'id' if not set
if 'id' not in msg_data:
msg_data['id'] = None
# Response Objects
if msg_type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
# every Response object has to define an id
if 'id' not in msg_data:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# Error objects
if msg_type == JsonRpcMsgTyp.ERROR:
# the error field has to be a dict
if type(msg_data['error']) is not dict:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# the error field has to define 'code' and 'message'
if not len(set(['code', 'message']) & set(msg_data['error'])) == 2:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# the error code has to be in the specified ranges
if not msg_data['error']['code'] in RpcError.lookup_table.keys():
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# set empty 'data' field if not set
if 'data' not in msg_data['error']:
msg_data['error']['data'] = None
return JsonRpcMsg(msg_type, msg_data) | Decodes jsonrpc 2.0 raw message objects into JsonRpcMsg objects.
Examples:
Request:
{
"jsonrpc": "2.0",
"id": 1,
"method": "subtract",
"params": [42, 23]
}
Notification:
{
"jsonrpc": "2.0",
"method": "clock",
"params": "12:00",
}
Response:
{
"jsonrpc": "2.0",
"id": 1,
"result": 0,
}
Error:
{
"jsonrpc": "2.0",
"id": 1,
"error": {
"code": -32600,
"message": "Invalid request",
"data": null
}
} | https://github.com/pengutronix/aiohttp-json-rpc/blob/24ec9ae9ae0633b2deaae7e29f6056f150cb4025/aiohttp_json_rpc/protocol.py#L23-L131 |
maciej-gol/tenant-schemas-celery | tenant_schemas_celery/app.py | switch_schema | def switch_schema(task, kwargs, **kw):
""" Switches schema of the task, before it has been run. """
# Lazily load needed functions, as they import django model functions which
# in turn load modules that need settings to be loaded and we can't
# guarantee this module was loaded when the settings were ready.
from .compat import get_public_schema_name, get_tenant_model
old_schema = (connection.schema_name, connection.include_public_schema)
setattr(task, '_old_schema', old_schema)
schema = (
get_schema_name_from_task(task, kwargs) or
get_public_schema_name()
)
# If the schema has not changed, don't do anything.
if connection.schema_name == schema:
return
if connection.schema_name != get_public_schema_name():
connection.set_schema_to_public()
if schema == get_public_schema_name():
return
tenant = get_tenant_model().objects.get(schema_name=schema)
connection.set_tenant(tenant, include_public=True) | python | def switch_schema(task, kwargs, **kw):
""" Switches schema of the task, before it has been run. """
# Lazily load needed functions, as they import django model functions which
# in turn load modules that need settings to be loaded and we can't
# guarantee this module was loaded when the settings were ready.
from .compat import get_public_schema_name, get_tenant_model
old_schema = (connection.schema_name, connection.include_public_schema)
setattr(task, '_old_schema', old_schema)
schema = (
get_schema_name_from_task(task, kwargs) or
get_public_schema_name()
)
# If the schema has not changed, don't do anything.
if connection.schema_name == schema:
return
if connection.schema_name != get_public_schema_name():
connection.set_schema_to_public()
if schema == get_public_schema_name():
return
tenant = get_tenant_model().objects.get(schema_name=schema)
connection.set_tenant(tenant, include_public=True) | Switches schema of the task, before it has been run. | https://github.com/maciej-gol/tenant-schemas-celery/blob/277ad98a756826d0fe311b9be6870988de3cd661/tenant_schemas_celery/app.py#L25-L51 |
maciej-gol/tenant-schemas-celery | tenant_schemas_celery/app.py | restore_schema | def restore_schema(task, **kwargs):
""" Switches the schema back to the one from before running the task. """
from .compat import get_public_schema_name
schema_name = get_public_schema_name()
include_public = True
if hasattr(task, '_old_schema'):
schema_name, include_public = task._old_schema
# If the schema names match, don't do anything.
if connection.schema_name == schema_name:
return
connection.set_schema(schema_name, include_public=include_public) | python | def restore_schema(task, **kwargs):
""" Switches the schema back to the one from before running the task. """
from .compat import get_public_schema_name
schema_name = get_public_schema_name()
include_public = True
if hasattr(task, '_old_schema'):
schema_name, include_public = task._old_schema
# If the schema names match, don't do anything.
if connection.schema_name == schema_name:
return
connection.set_schema(schema_name, include_public=include_public) | Switches the schema back to the one from before running the task. | https://github.com/maciej-gol/tenant-schemas-celery/blob/277ad98a756826d0fe311b9be6870988de3cd661/tenant_schemas_celery/app.py#L54-L68 |
GiulioRossetti/dynetx | dynetx/readwrite/json_graph/node_link.py | node_link_data | def node_link_data(G, attrs=_attrs):
"""Return data in node-link format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : DyNetx graph
attrs : dict
A dictionary that contains three keys 'id', 'source' and 'target'.
The corresponding values provide the attribute names for storing
DyNetx-internal graph data. The values should be unique. Default
value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes are stored in this format. Note that
attribute keys will be converted to strings in order to comply with
JSON.
See Also
--------
node_link_graph
"""
id_ = attrs['id']
data = {}
data['directed'] = G.is_directed()
data['graph'] = G.graph
data['nodes'] = [dict(chain(G.node[n].items(), [(id_, n)])) for n in G]
data['links'] = []
for u, v, timeline in G.interactions_iter():
for t in timeline['t']:
for tid in past.builtins.xrange(t[0], t[-1]+1):
data['links'].append({"source": u, "target": v, "time": tid})
return data | python | def node_link_data(G, attrs=_attrs):
"""Return data in node-link format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : DyNetx graph
attrs : dict
A dictionary that contains three keys 'id', 'source' and 'target'.
The corresponding values provide the attribute names for storing
DyNetx-internal graph data. The values should be unique. Default
value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes are stored in this format. Note that
attribute keys will be converted to strings in order to comply with
JSON.
See Also
--------
node_link_graph
"""
id_ = attrs['id']
data = {}
data['directed'] = G.is_directed()
data['graph'] = G.graph
data['nodes'] = [dict(chain(G.node[n].items(), [(id_, n)])) for n in G]
data['links'] = []
for u, v, timeline in G.interactions_iter():
for t in timeline['t']:
for tid in past.builtins.xrange(t[0], t[-1]+1):
data['links'].append({"source": u, "target": v, "time": tid})
return data | Return data in node-link format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : DyNetx graph
attrs : dict
A dictionary that contains three keys 'id', 'source' and 'target'.
The corresponding values provide the attribute names for storing
DyNetx-internal graph data. The values should be unique. Default
value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes are stored in this format. Note that
attribute keys will be converted to strings in order to comply with
JSON.
See Also
--------
node_link_graph | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/readwrite/json_graph/node_link.py#L10-L63 |
GiulioRossetti/dynetx | dynetx/readwrite/json_graph/node_link.py | node_link_graph | def node_link_graph(data, directed=False, attrs=_attrs):
"""Return graph from node-link data format.
Parameters
----------
data : dict
node-link formatted graph data
directed : bool
If True, and direction not specified in data, return a directed graph.
attrs : dict
A dictionary that contains three keys 'id', 'source', 'target'.
The corresponding values provide the attribute names for storing
Dynetx-internal graph data. Default value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
G : DyNetx graph
A DyNetx graph object
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
>>> H = json_graph.node_link_graph(data)
See Also
--------
node_link_data
"""
directed = data.get('directed', directed)
graph = dn.DynGraph()
if directed:
graph = graph.to_directed()
id_ = attrs['id']
mapping = []
graph.graph = data.get('graph', {})
c = count()
for d in data['nodes']:
node = d.get(id_, next(c))
mapping.append(node)
nodedata = dict((make_str(k), v) for k, v in d.items() if k != id_)
graph.add_node(node, **nodedata)
for d in data['links']:
graph.add_interaction(d['source'], d["target"], d['time'])
return graph | python | def node_link_graph(data, directed=False, attrs=_attrs):
"""Return graph from node-link data format.
Parameters
----------
data : dict
node-link formatted graph data
directed : bool
If True, and direction not specified in data, return a directed graph.
attrs : dict
A dictionary that contains three keys 'id', 'source', 'target'.
The corresponding values provide the attribute names for storing
Dynetx-internal graph data. Default value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
G : DyNetx graph
A DyNetx graph object
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
>>> H = json_graph.node_link_graph(data)
See Also
--------
node_link_data
"""
directed = data.get('directed', directed)
graph = dn.DynGraph()
if directed:
graph = graph.to_directed()
id_ = attrs['id']
mapping = []
graph.graph = data.get('graph', {})
c = count()
for d in data['nodes']:
node = d.get(id_, next(c))
mapping.append(node)
nodedata = dict((make_str(k), v) for k, v in d.items() if k != id_)
graph.add_node(node, **nodedata)
for d in data['links']:
graph.add_interaction(d['source'], d["target"], d['time'])
return graph | Return graph from node-link data format.
Parameters
----------
data : dict
node-link formatted graph data
directed : bool
If True, and direction not specified in data, return a directed graph.
attrs : dict
A dictionary that contains three keys 'id', 'source', 'target'.
The corresponding values provide the attribute names for storing
Dynetx-internal graph data. Default value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
G : DyNetx graph
A DyNetx graph object
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
>>> H = json_graph.node_link_graph(data)
See Also
--------
node_link_data | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/readwrite/json_graph/node_link.py#L66-L117 |
GiulioRossetti/dynetx | dynetx/utils/transform.py | compact_timeslot | def compact_timeslot(sind_list):
"""
Test method. Compact all snapshots into a single one.
:param sind_list:
:return:
"""
tls = sorted(sind_list)
conversion = {val: idx for idx, val in enumerate(tls)}
return conversion | python | def compact_timeslot(sind_list):
"""
Test method. Compact all snapshots into a single one.
:param sind_list:
:return:
"""
tls = sorted(sind_list)
conversion = {val: idx for idx, val in enumerate(tls)}
return conversion | Test method. Compact all snapshots into a single one.
:param sind_list:
:return: | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/utils/transform.py#L11-L20 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.nodes_iter | def nodes_iter(self, t=None, data=False):
"""Return an iterator over the nodes with respect to a given temporal snapshot.
Parameters
----------
t : snapshot id (default=None).
If None the iterator returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False the iterator returns nodes. If True
return a two-tuple of node and node data dictionary
Returns
-------
niter : iterator
An iterator over nodes. If data=True the iterator gives
two-tuples containing (node, node data, dictionary)
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> [n for n, d in G.nodes_iter(t=0)]
[0, 1, 2]
"""
if t is not None:
return iter([n for n in self.degree(t=t).values() if n > 0])
return iter(self._node) | python | def nodes_iter(self, t=None, data=False):
"""Return an iterator over the nodes with respect to a given temporal snapshot.
Parameters
----------
t : snapshot id (default=None).
If None the iterator returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False the iterator returns nodes. If True
return a two-tuple of node and node data dictionary
Returns
-------
niter : iterator
An iterator over nodes. If data=True the iterator gives
two-tuples containing (node, node data, dictionary)
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> [n for n, d in G.nodes_iter(t=0)]
[0, 1, 2]
"""
if t is not None:
return iter([n for n in self.degree(t=t).values() if n > 0])
return iter(self._node) | Return an iterator over the nodes with respect to a given temporal snapshot.
Parameters
----------
t : snapshot id (default=None).
If None the iterator returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False the iterator returns nodes. If True
return a two-tuple of node and node data dictionary
Returns
-------
niter : iterator
An iterator over nodes. If data=True the iterator gives
two-tuples containing (node, node data, dictionary)
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> [n for n, d in G.nodes_iter(t=0)]
[0, 1, 2] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L124-L151 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.nodes | def nodes(self, t=None, data=False):
"""Return a list of the nodes in the graph at a given snapshot.
Parameters
----------
t : snapshot id (default=None)
If None the the method returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False return a list of nodes. If True return a
two-tuple of node and node data dictionary
Returns
-------
nlist : list
A list of nodes. If data=True a list of two-tuples containing
(node, node data dictionary).
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> G.nodes(t=0)
[0, 1, 2]
>>> G.add_edge(1, 4, t=1)
>>> G.nodes(t=0)
[0, 1, 2]
"""
return list(self.nodes_iter(t=t, data=data)) | python | def nodes(self, t=None, data=False):
"""Return a list of the nodes in the graph at a given snapshot.
Parameters
----------
t : snapshot id (default=None)
If None the the method returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False return a list of nodes. If True return a
two-tuple of node and node data dictionary
Returns
-------
nlist : list
A list of nodes. If data=True a list of two-tuples containing
(node, node data dictionary).
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> G.nodes(t=0)
[0, 1, 2]
>>> G.add_edge(1, 4, t=1)
>>> G.nodes(t=0)
[0, 1, 2]
"""
return list(self.nodes_iter(t=t, data=data)) | Return a list of the nodes in the graph at a given snapshot.
Parameters
----------
t : snapshot id (default=None)
If None the the method returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False return a list of nodes. If True return a
two-tuple of node and node data dictionary
Returns
-------
nlist : list
A list of nodes. If data=True a list of two-tuples containing
(node, node data dictionary).
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> G.nodes(t=0)
[0, 1, 2]
>>> G.add_edge(1, 4, t=1)
>>> G.nodes(t=0)
[0, 1, 2] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L153-L180 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.interactions_iter | def interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
See Also
--------
interaction : return a list of interaction
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2], 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
seen = {} # helper dict to keep track of multiply stored interactions
if nbunch is None:
nodes_nbrs = self._adj.items()
else:
nodes_nbrs = ((n, self._adj[n]) for n in self.nbunch_iter(nbunch))
for n, nbrs in nodes_nbrs:
for nbr in nbrs:
if t is not None:
if nbr not in seen and self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr not in seen:
yield (n, nbr, self._adj[n][nbr])
seen[n] = 1
del seen | python | def interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
See Also
--------
interaction : return a list of interaction
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2], 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
seen = {} # helper dict to keep track of multiply stored interactions
if nbunch is None:
nodes_nbrs = self._adj.items()
else:
nodes_nbrs = ((n, self._adj[n]) for n in self.nbunch_iter(nbunch))
for n, nbrs in nodes_nbrs:
for nbr in nbrs:
if t is not None:
if nbr not in seen and self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr not in seen:
yield (n, nbr, self._adj[n][nbr])
seen[n] = 1
del seen | Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
See Also
--------
interaction : return a list of interaction
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2], 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L238-L291 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.add_interaction | def add_interaction(self, u, v, t=None, e=None):
"""Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9]
"""
if t is None:
raise nx.NetworkXError(
"The t argument must be specified.")
if u not in self._node:
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = {}
if v not in self._node:
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = {}
if type(t) != list:
t = [t, t]
for idt in [t[0]]:
if self.has_edge(u, v) and not self.edge_removal:
continue
else:
if idt not in self.time_to_edge:
self.time_to_edge[idt] = {(u, v, "+"): None}
else:
if (u, v, "+") not in self.time_to_edge[idt]:
self.time_to_edge[idt][(u, v, "+")] = None
if e is not None and self.edge_removal:
t[1] = e - 1
if e not in self.time_to_edge:
self.time_to_edge[e] = {(u, v, "-"): None}
else:
self.time_to_edge[e][(u, v, "-")] = None
# add the interaction
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
if 't' in datadict:
app = datadict['t']
max_end = app[-1][1]
if max_end == app[-1][0] and t[0] == app[-1][0] + 1:
app[-1] = [app[-1][0], t[1]]
if app[-1][0] + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[app[-1][0] + 1]:
del self.time_to_edge[app[-1][0] + 1][(u, v, "+")]
else:
if t[0] < app[-1][0]:
raise ValueError("The specified interaction extension is broader than "
"the ones already present for the given nodes.")
if t[0] <= max_end < t[1]:
app[-1][1] = t[1]
if max_end + 1 in self.time_to_edge:
if self.edge_removal:
del self.time_to_edge[max_end + 1][(u, v, "-")]
del self.time_to_edge[t[0]][(u, v, "+")]
elif max_end == t[0] - 1:
if max_end + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, "+")]
if self.edge_removal:
if max_end + 1 in self.time_to_edge and (u, v, '-') in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, '-')]
if t[1] + 1 in self.time_to_edge:
self.time_to_edge[t[1] + 1][(u, v, "-")] = None
else:
self.time_to_edge[t[1] + 1] = {(u, v, "-"): None}
app[-1][1] = t[1]
else:
app.append(t)
else:
datadict['t'] = [t]
if e is not None:
span = range(t[0], t[1] + 1)
for idt in span:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
else:
for idt in t:
if idt is not None:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
self._adj[u][v] = datadict
self._adj[v][u] = datadict | python | def add_interaction(self, u, v, t=None, e=None):
"""Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9]
"""
if t is None:
raise nx.NetworkXError(
"The t argument must be specified.")
if u not in self._node:
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = {}
if v not in self._node:
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = {}
if type(t) != list:
t = [t, t]
for idt in [t[0]]:
if self.has_edge(u, v) and not self.edge_removal:
continue
else:
if idt not in self.time_to_edge:
self.time_to_edge[idt] = {(u, v, "+"): None}
else:
if (u, v, "+") not in self.time_to_edge[idt]:
self.time_to_edge[idt][(u, v, "+")] = None
if e is not None and self.edge_removal:
t[1] = e - 1
if e not in self.time_to_edge:
self.time_to_edge[e] = {(u, v, "-"): None}
else:
self.time_to_edge[e][(u, v, "-")] = None
# add the interaction
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
if 't' in datadict:
app = datadict['t']
max_end = app[-1][1]
if max_end == app[-1][0] and t[0] == app[-1][0] + 1:
app[-1] = [app[-1][0], t[1]]
if app[-1][0] + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[app[-1][0] + 1]:
del self.time_to_edge[app[-1][0] + 1][(u, v, "+")]
else:
if t[0] < app[-1][0]:
raise ValueError("The specified interaction extension is broader than "
"the ones already present for the given nodes.")
if t[0] <= max_end < t[1]:
app[-1][1] = t[1]
if max_end + 1 in self.time_to_edge:
if self.edge_removal:
del self.time_to_edge[max_end + 1][(u, v, "-")]
del self.time_to_edge[t[0]][(u, v, "+")]
elif max_end == t[0] - 1:
if max_end + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, "+")]
if self.edge_removal:
if max_end + 1 in self.time_to_edge and (u, v, '-') in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, '-')]
if t[1] + 1 in self.time_to_edge:
self.time_to_edge[t[1] + 1][(u, v, "-")] = None
else:
self.time_to_edge[t[1] + 1] = {(u, v, "-"): None}
app[-1][1] = t[1]
else:
app.append(t)
else:
datadict['t'] = [t]
if e is not None:
span = range(t[0], t[1] + 1)
for idt in span:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
else:
for idt in t:
if idt is not None:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
self._adj[u][v] = datadict
self._adj[v][u] = datadict | Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L293-L419 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.add_interactions_from | def add_interactions_from(self, ebunch, t=None, e=None):
"""Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0)
"""
# set up attribute dict
if t is None:
raise nx.NetworkXError(
"The t argument must be a specified.")
# process ebunch
for ed in ebunch:
self.add_interaction(ed[0], ed[1], t, e) | python | def add_interactions_from(self, ebunch, t=None, e=None):
"""Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0)
"""
# set up attribute dict
if t is None:
raise nx.NetworkXError(
"The t argument must be a specified.")
# process ebunch
for ed in ebunch:
self.add_interaction(ed[0], ed[1], t, e) | Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0) | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L421-L449 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.neighbors | def neighbors(self, n, t=None):
"""Return a list of the nodes connected to the node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned the neighbors of the node on the flattened graph.
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.neighbors(0, t=0)
[1]
>>> G.neighbors(0, t=1)
[]
"""
try:
if t is None:
return list(self._adj[n])
else:
return [i for i in self._adj[n] if self.__presence_test(n, i, t)]
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,)) | python | def neighbors(self, n, t=None):
"""Return a list of the nodes connected to the node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned the neighbors of the node on the flattened graph.
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.neighbors(0, t=0)
[1]
>>> G.neighbors(0, t=1)
[]
"""
try:
if t is None:
return list(self._adj[n])
else:
return [i for i in self._adj[n] if self.__presence_test(n, i, t)]
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,)) | Return a list of the nodes connected to the node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned the neighbors of the node on the flattened graph.
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.neighbors(0, t=0)
[1]
>>> G.neighbors(0, t=1)
[] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L539-L575 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.neighbors_iter | def neighbors_iter(self, n, t=None):
"""Return an iterator over all neighbors of node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned an iterator over the neighbors of the node on the flattened graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> [n for n in G.neighbors_iter(0, t=0)]
[1]
"""
try:
if t is None:
return iter(self._adj[n])
else:
return iter([i for i in self._adj[n] if self.__presence_test(n, i, t)])
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,)) | python | def neighbors_iter(self, n, t=None):
"""Return an iterator over all neighbors of node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned an iterator over the neighbors of the node on the flattened graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> [n for n in G.neighbors_iter(0, t=0)]
[1]
"""
try:
if t is None:
return iter(self._adj[n])
else:
return iter([i for i in self._adj[n] if self.__presence_test(n, i, t)])
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,)) | Return an iterator over all neighbors of node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned an iterator over the neighbors of the node on the flattened graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> [n for n in G.neighbors_iter(0, t=0)]
[1] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L577-L600 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.degree | def degree(self, nbunch=None, t=None):
"""Return the degree of a node or nodes at time t.
The node degree is the number of interaction adjacent to that node in a given time frame.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned the degree of nodes on the flattened graph.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.degree(0, t=0)
1
>>> G.degree([0,1], t=1)
{0: 0, 1: 0}
>>> list(G.degree([0,1], t=0).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.degree_iter(nbunch, t))[1]
else: # return a dict
return dict(self.degree_iter(nbunch, t)) | python | def degree(self, nbunch=None, t=None):
"""Return the degree of a node or nodes at time t.
The node degree is the number of interaction adjacent to that node in a given time frame.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned the degree of nodes on the flattened graph.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.degree(0, t=0)
1
>>> G.degree([0,1], t=1)
{0: 0, 1: 0}
>>> list(G.degree([0,1], t=0).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.degree_iter(nbunch, t))[1]
else: # return a dict
return dict(self.degree_iter(nbunch, t)) | Return the degree of a node or nodes at time t.
The node degree is the number of interaction adjacent to that node in a given time frame.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned the degree of nodes on the flattened graph.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.degree(0, t=0)
1
>>> G.degree([0,1], t=1)
{0: 0, 1: 0}
>>> list(G.degree([0,1], t=0).values())
[1, 2] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L602-L637 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.degree_iter | def degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, degree) at time t.
The node degree is the number of edges adjacent to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> list(G.degree_iter(0, t=0))
[(0, 1)]
>>> list(G.degree_iter([0,1], t=0))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs = self._adj.items()
else:
nodes_nbrs = ((n, self._adj[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, nbrs in nodes_nbrs:
deg = len(self._adj[n])
yield (n, deg)
else:
for n, nbrs in nodes_nbrs:
edges_t = len([v for v in nbrs.keys() if self.__presence_test(n, v, t)])
if edges_t > 0:
yield (n, edges_t)
else:
yield (n, 0) | python | def degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, degree) at time t.
The node degree is the number of edges adjacent to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> list(G.degree_iter(0, t=0))
[(0, 1)]
>>> list(G.degree_iter([0,1], t=0))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs = self._adj.items()
else:
nodes_nbrs = ((n, self._adj[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, nbrs in nodes_nbrs:
deg = len(self._adj[n])
yield (n, deg)
else:
for n, nbrs in nodes_nbrs:
edges_t = len([v for v in nbrs.keys() if self.__presence_test(n, v, t)])
if edges_t > 0:
yield (n, edges_t)
else:
yield (n, 0) | Return an iterator for (node, degree) at time t.
The node degree is the number of edges adjacent to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> list(G.degree_iter(0, t=0))
[(0, 1)]
>>> list(G.degree_iter([0,1], t=0))
[(0, 1), (1, 2)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L639-L687 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.size | def size(self, t=None):
"""Return the number of edges at time t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned the size of the flattened graph.
Returns
-------
nedges : int
The number of edges
See Also
--------
number_of_edges
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.size(t=0)
3
"""
s = sum(self.degree(t=t).values()) / 2
return int(s) | python | def size(self, t=None):
"""Return the number of edges at time t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned the size of the flattened graph.
Returns
-------
nedges : int
The number of edges
See Also
--------
number_of_edges
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.size(t=0)
3
"""
s = sum(self.degree(t=t).values()) / 2
return int(s) | Return the number of edges at time t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned the size of the flattened graph.
Returns
-------
nedges : int
The number of edges
See Also
--------
number_of_edges
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.size(t=0)
3 | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L689-L715 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.number_of_nodes | def number_of_nodes(self, t=None):
"""Return the number of nodes in the t snpashot of a dynamic graph.
Parameters
----------
t : snapshot id (default=None)
If None return the number of nodes in the flattened graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order which is identical
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.number_of_nodes(0)
3
"""
if t is None:
return len(self._node)
else:
nds = sum([1 for n in self.degree(t=t).values() if n > 0])
return nds | python | def number_of_nodes(self, t=None):
"""Return the number of nodes in the t snpashot of a dynamic graph.
Parameters
----------
t : snapshot id (default=None)
If None return the number of nodes in the flattened graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order which is identical
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.number_of_nodes(0)
3
"""
if t is None:
return len(self._node)
else:
nds = sum([1 for n in self.degree(t=t).values() if n > 0])
return nds | Return the number of nodes in the t snpashot of a dynamic graph.
Parameters
----------
t : snapshot id (default=None)
If None return the number of nodes in the flattened graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order which is identical
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.number_of_nodes(0)
3 | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L717-L746 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.has_node | def has_node(self, n, t=None):
"""Return True if the graph, at time t, contains the node n.
Parameters
----------
n : node
t : snapshot id (default None)
If None return the presence of the node in the flattened graph.
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.has_node(0, t=0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
if t is None:
try:
return n in self._node
except TypeError:
return False
else:
deg = list(self.degree([n], t).values())
if len(deg) > 0:
return deg[0] > 0
else:
return False | python | def has_node(self, n, t=None):
"""Return True if the graph, at time t, contains the node n.
Parameters
----------
n : node
t : snapshot id (default None)
If None return the presence of the node in the flattened graph.
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.has_node(0, t=0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
if t is None:
try:
return n in self._node
except TypeError:
return False
else:
deg = list(self.degree([n], t).values())
if len(deg) > 0:
return deg[0] > 0
else:
return False | Return True if the graph, at time t, contains the node n.
Parameters
----------
n : node
t : snapshot id (default None)
If None return the presence of the node in the flattened graph.
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.has_node(0, t=0)
True
It is more readable and simpler to use
>>> 0 in G
True | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L775-L807 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.add_star | def add_star(self, nodes, t=None):
"""Add a star at time t.
The first node in nodes is the middle of the star. It is connected
to all other nodes.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_star([0,1,2,3], t=0)
"""
nlist = list(nodes)
v = nlist[0]
interaction = ((v, n) for n in nlist[1:])
self.add_interactions_from(interaction, t) | python | def add_star(self, nodes, t=None):
"""Add a star at time t.
The first node in nodes is the middle of the star. It is connected
to all other nodes.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_star([0,1,2,3], t=0)
"""
nlist = list(nodes)
v = nlist[0]
interaction = ((v, n) for n in nlist[1:])
self.add_interactions_from(interaction, t) | Add a star at time t.
The first node in nodes is the middle of the star. It is connected
to all other nodes.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_star([0,1,2,3], t=0) | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L809-L833 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.add_path | def add_path(self, nodes, t=None):
"""Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
"""
nlist = list(nodes)
interaction = zip(nlist[:-1], nlist[1:])
self.add_interactions_from(interaction, t) | python | def add_path(self, nodes, t=None):
"""Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
"""
nlist = list(nodes)
interaction = zip(nlist[:-1], nlist[1:])
self.add_interactions_from(interaction, t) | Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0) | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L835-L855 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.to_directed | def to_directed(self):
"""Return a directed representation of the graph.
Returns
-------
G : DynDiGraph
A dynamic directed graph with the same name, same nodes, and with
each edge (u,v,data) replaced by two directed edges
(u,v,data) and (v,u,data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DynDiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed Graph to use dict-like objects in the
data structure, those changes do not transfer to the DynDiGraph
created by this method.
Examples
--------
>>> G = dn.DynGraph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = dn.DynDiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
from .dyndigraph import DynDiGraph
G = DynDiGraph()
G.name = self.name
G.add_nodes_from(self)
for it in self.interactions_iter():
for t in it[2]['t']:
G.add_interaction(it[0], it[1], t=t[0], e=t[1])
G.graph = deepcopy(self.graph)
G._node = deepcopy(self._node)
return G | python | def to_directed(self):
"""Return a directed representation of the graph.
Returns
-------
G : DynDiGraph
A dynamic directed graph with the same name, same nodes, and with
each edge (u,v,data) replaced by two directed edges
(u,v,data) and (v,u,data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DynDiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed Graph to use dict-like objects in the
data structure, those changes do not transfer to the DynDiGraph
created by this method.
Examples
--------
>>> G = dn.DynGraph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = dn.DynDiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
from .dyndigraph import DynDiGraph
G = DynDiGraph()
G.name = self.name
G.add_nodes_from(self)
for it in self.interactions_iter():
for t in it[2]['t']:
G.add_interaction(it[0], it[1], t=t[0], e=t[1])
G.graph = deepcopy(self.graph)
G._node = deepcopy(self._node)
return G | Return a directed representation of the graph.
Returns
-------
G : DynDiGraph
A dynamic directed graph with the same name, same nodes, and with
each edge (u,v,data) replaced by two directed edges
(u,v,data) and (v,u,data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DynDiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed Graph to use dict-like objects in the
data structure, those changes do not transfer to the DynDiGraph
created by this method.
Examples
--------
>>> G = dn.DynGraph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = dn.DynDiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L879-L931 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.stream_interactions | def stream_interactions(self):
"""Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
"""
timestamps = sorted(self.time_to_edge.keys())
for t in timestamps:
for e in self.time_to_edge[t]:
yield (e[0], e[1], e[2], t) | python | def stream_interactions(self):
"""Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
"""
timestamps = sorted(self.time_to_edge.keys())
for t in timestamps:
for e in self.time_to_edge[t]:
yield (e[0], e[1], e[2], t) | Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L933-L953 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.time_slice | def time_slice(self, t_from, t_to=None):
"""Return an new graph containing nodes and interactions present in [t_from, t_to].
Parameters
----------
t_from : snapshot id, mandatory
t_to : snapshot id, optional (default=None)
If None t_to will be set equal to t_from
Returns
-------
H : a DynGraph object
the graph described by interactions in [t_from, t_to]
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> H = G.time_slice(0)
>>> H.interactions()
[(0, 1), (1, 2), (1, 3)]
>>> H = G.time_slice(0, 1)
>>> H.interactions()
[(0, 1), (1, 2), (1, 3), (0, 4), (4, 5), (5, 6)]
"""
# create new graph and copy subgraph into it
H = self.__class__()
if t_to is not None:
if t_to < t_from:
raise ValueError("Invalid range: t_to must be grater that t_from")
else:
t_to = t_from
for u, v, ts in self.interactions_iter():
I = t_to
F = t_from
for a, b in ts['t']:
if I <= a and b <= F:
H.add_interaction(u, v, a, b)
elif a <= I and F <= b:
H.add_interaction(u, v, I, F)
elif a <= I <= b and b <= F:
H.add_interaction(u, v, I, b)
elif I <= a <= F and F <= b:
H.add_interaction(u, v, a, F)
return H | python | def time_slice(self, t_from, t_to=None):
"""Return an new graph containing nodes and interactions present in [t_from, t_to].
Parameters
----------
t_from : snapshot id, mandatory
t_to : snapshot id, optional (default=None)
If None t_to will be set equal to t_from
Returns
-------
H : a DynGraph object
the graph described by interactions in [t_from, t_to]
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> H = G.time_slice(0)
>>> H.interactions()
[(0, 1), (1, 2), (1, 3)]
>>> H = G.time_slice(0, 1)
>>> H.interactions()
[(0, 1), (1, 2), (1, 3), (0, 4), (4, 5), (5, 6)]
"""
# create new graph and copy subgraph into it
H = self.__class__()
if t_to is not None:
if t_to < t_from:
raise ValueError("Invalid range: t_to must be grater that t_from")
else:
t_to = t_from
for u, v, ts in self.interactions_iter():
I = t_to
F = t_from
for a, b in ts['t']:
if I <= a and b <= F:
H.add_interaction(u, v, a, b)
elif a <= I and F <= b:
H.add_interaction(u, v, I, F)
elif a <= I <= b and b <= F:
H.add_interaction(u, v, I, b)
elif I <= a <= F and F <= b:
H.add_interaction(u, v, a, F)
return H | Return an new graph containing nodes and interactions present in [t_from, t_to].
Parameters
----------
t_from : snapshot id, mandatory
t_to : snapshot id, optional (default=None)
If None t_to will be set equal to t_from
Returns
-------
H : a DynGraph object
the graph described by interactions in [t_from, t_to]
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> H = G.time_slice(0)
>>> H.interactions()
[(0, 1), (1, 2), (1, 3)]
>>> H = G.time_slice(0, 1)
>>> H.interactions()
[(0, 1), (1, 2), (1, 3), (0, 4), (4, 5), (5, 6)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L955-L1005 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.interactions_per_snapshots | def interactions_per_snapshots(self, t=None):
"""Return the number of interactions within snapshot t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned total number of interactions across all snapshots
Returns
-------
nd : dictionary, or number
A dictionary with snapshot ids as keys and interaction count as values or
a number if a single snapshot id is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> G.interactions_per_snapshots(t=0)
3
>>> G.interactions_per_snapshots()
{0: 3, 1: 3, 2: 3}
"""
if t is None:
return {k: v / 2 for k, v in self.snapshots.items()}
else:
try:
return self.snapshots[t] / 2
except KeyError:
return 0 | python | def interactions_per_snapshots(self, t=None):
"""Return the number of interactions within snapshot t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned total number of interactions across all snapshots
Returns
-------
nd : dictionary, or number
A dictionary with snapshot ids as keys and interaction count as values or
a number if a single snapshot id is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> G.interactions_per_snapshots(t=0)
3
>>> G.interactions_per_snapshots()
{0: 3, 1: 3, 2: 3}
"""
if t is None:
return {k: v / 2 for k, v in self.snapshots.items()}
else:
try:
return self.snapshots[t] / 2
except KeyError:
return 0 | Return the number of interactions within snapshot t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned total number of interactions across all snapshots
Returns
-------
nd : dictionary, or number
A dictionary with snapshot ids as keys and interaction count as values or
a number if a single snapshot id is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> G.interactions_per_snapshots(t=0)
3
>>> G.interactions_per_snapshots()
{0: 3, 1: 3, 2: 3} | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L1027-L1060 |
GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.inter_event_time_distribution | def inter_event_time_distribution(self, u=None, v=None):
"""Return the distribution of inter event time.
If u and v are None the dynamic graph intere event distribution is returned.
If u is specified the inter event time distribution of interactions involving u is returned.
If u and v are specified the inter event time distribution of (u, v) interactions is returned
Parameters
----------
u : node id
v : node id
Returns
-------
nd : dictionary
A dictionary from inter event time to number of occurrences
"""
dist = {}
if u is None:
# global inter event
first = True
delta = None
for ext in self.stream_interactions():
if first:
delta = ext
first = False
continue
disp = ext[-1] - delta[-1]
delta = ext
if disp in dist:
dist[disp] += 1
else:
dist[disp] = 1
elif u is not None and v is None:
# node inter event
delta = (0, 0, 0, 0)
flag = False
for ext in self.stream_interactions():
if ext[0] == u or ext[1] == u:
if flag:
disp = ext[-1] - delta[-1]
delta = ext
if disp in dist:
dist[disp] += 1
else:
dist[disp] = 1
else:
delta = ext
flag = True
else:
# interaction inter event
evt = self._adj[u][v]['t']
delta = []
for i in evt:
if i[0] != i[1]:
for j in [0, 1]:
delta.append(i[j])
else:
delta.append(i[0])
if len(delta) == 2 and delta[0] == delta[1]:
return {}
for i in range(0, len(delta) - 1):
e = delta[i + 1] - delta[i]
if e not in dist:
dist[e] = 1
else:
dist[e] += 1
return dist | python | def inter_event_time_distribution(self, u=None, v=None):
"""Return the distribution of inter event time.
If u and v are None the dynamic graph intere event distribution is returned.
If u is specified the inter event time distribution of interactions involving u is returned.
If u and v are specified the inter event time distribution of (u, v) interactions is returned
Parameters
----------
u : node id
v : node id
Returns
-------
nd : dictionary
A dictionary from inter event time to number of occurrences
"""
dist = {}
if u is None:
# global inter event
first = True
delta = None
for ext in self.stream_interactions():
if first:
delta = ext
first = False
continue
disp = ext[-1] - delta[-1]
delta = ext
if disp in dist:
dist[disp] += 1
else:
dist[disp] = 1
elif u is not None and v is None:
# node inter event
delta = (0, 0, 0, 0)
flag = False
for ext in self.stream_interactions():
if ext[0] == u or ext[1] == u:
if flag:
disp = ext[-1] - delta[-1]
delta = ext
if disp in dist:
dist[disp] += 1
else:
dist[disp] = 1
else:
delta = ext
flag = True
else:
# interaction inter event
evt = self._adj[u][v]['t']
delta = []
for i in evt:
if i[0] != i[1]:
for j in [0, 1]:
delta.append(i[j])
else:
delta.append(i[0])
if len(delta) == 2 and delta[0] == delta[1]:
return {}
for i in range(0, len(delta) - 1):
e = delta[i + 1] - delta[i]
if e not in dist:
dist[e] = 1
else:
dist[e] += 1
return dist | Return the distribution of inter event time.
If u and v are None the dynamic graph intere event distribution is returned.
If u is specified the inter event time distribution of interactions involving u is returned.
If u and v are specified the inter event time distribution of (u, v) interactions is returned
Parameters
----------
u : node id
v : node id
Returns
-------
nd : dictionary
A dictionary from inter event time to number of occurrences | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L1062-L1136 |
GiulioRossetti/dynetx | dynetx/classes/dyndigraph.py | DynDiGraph.degree_iter | def degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, degree) at time t.
The node degree is the number of edges adjacent to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.degree_iter(0, t=0))
[(0, 1)]
>>> list(G.degree_iter([0,1], t=0))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = ((n, succs, self._pred[n]) for n, succs in self._succ.items())
else:
nodes_nbrs = ((n, self._succ[n], self._pred[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, succ, pred in nodes_nbrs:
yield (n, len(succ) + len(pred))
else:
for n, succ, pred in nodes_nbrs:
edges_succ = len([v for v in succ.keys() if self.__presence_test(n, v, t)])
edges_pred = len([v for v in pred.keys() if self.__presence_test(v, n, t)])
yield (n, edges_succ + edges_pred) | python | def degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, degree) at time t.
The node degree is the number of edges adjacent to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.degree_iter(0, t=0))
[(0, 1)]
>>> list(G.degree_iter([0,1], t=0))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = ((n, succs, self._pred[n]) for n, succs in self._succ.items())
else:
nodes_nbrs = ((n, self._succ[n], self._pred[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, succ, pred in nodes_nbrs:
yield (n, len(succ) + len(pred))
else:
for n, succ, pred in nodes_nbrs:
edges_succ = len([v for v in succ.keys() if self.__presence_test(n, v, t)])
edges_pred = len([v for v in pred.keys() if self.__presence_test(v, n, t)])
yield (n, edges_succ + edges_pred) | Return an iterator for (node, degree) at time t.
The node degree is the number of edges adjacent to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.degree_iter(0, t=0))
[(0, 1)]
>>> list(G.degree_iter([0,1], t=0))
[(0, 1), (1, 1)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyndigraph.py#L294-L341 |
GiulioRossetti/dynetx | dynetx/classes/dyndigraph.py | DynDiGraph.interactions_iter | def interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
seen = {} # helper dict to keep track of multiply stored interactions
if nbunch is None:
nodes_nbrs_succ = self._succ.items()
else:
nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_succ:
for nbr in nbrs:
if t is not None:
if nbr not in seen and self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr not in seen:
yield (nbr, n, self._succ[n][nbr])
seen[n] = 1
del seen | python | def interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
seen = {} # helper dict to keep track of multiply stored interactions
if nbunch is None:
nodes_nbrs_succ = self._succ.items()
else:
nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_succ:
for nbr in nbrs:
if t is not None:
if nbr not in seen and self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr not in seen:
yield (nbr, n, self._succ[n][nbr])
seen[n] = 1
del seen | Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyndigraph.py#L382-L433 |
GiulioRossetti/dynetx | dynetx/classes/dyndigraph.py | DynDiGraph.in_interactions_iter | def in_interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the in interactions present in a given snapshot.
Edges are returned as tuples in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.in_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.in_interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
if nbunch is None:
nodes_nbrs_pred = self._pred.items()
else:
nodes_nbrs_pred = [(n, self._pred[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_pred:
for nbr in nbrs:
if t is not None:
if self.__presence_test(nbr, n, t):
yield (nbr, n, {"t": [t]})
else:
if nbr in self._pred[n]:
yield (nbr, n, self._pred[n][nbr]) | python | def in_interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the in interactions present in a given snapshot.
Edges are returned as tuples in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.in_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.in_interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
if nbunch is None:
nodes_nbrs_pred = self._pred.items()
else:
nodes_nbrs_pred = [(n, self._pred[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_pred:
for nbr in nbrs:
if t is not None:
if self.__presence_test(nbr, n, t):
yield (nbr, n, {"t": [t]})
else:
if nbr in self._pred[n]:
yield (nbr, n, self._pred[n][nbr]) | Return an iterator over the in interactions present in a given snapshot.
Edges are returned as tuples in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.in_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.in_interactions_iter())
[(0, 1), (1, 2), (2, 3)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyndigraph.py#L591-L638 |
GiulioRossetti/dynetx | dynetx/classes/dyndigraph.py | DynDiGraph.out_interactions_iter | def out_interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the out interactions present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.out_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.out_interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
if nbunch is None:
nodes_nbrs_succ = self._succ.items()
else:
nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_succ:
for nbr in nbrs:
if t is not None:
if self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr in self._succ[n]:
yield (n, nbr, self._succ[n][nbr]) | python | def out_interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the out interactions present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.out_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.out_interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
if nbunch is None:
nodes_nbrs_succ = self._succ.items()
else:
nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_succ:
for nbr in nbrs:
if t is not None:
if self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr in self._succ[n]:
yield (n, nbr, self._succ[n][nbr]) | Return an iterator over the out interactions present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.out_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.out_interactions_iter())
[(0, 1), (1, 2), (2, 3)] | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyndigraph.py#L640-L687 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.