language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = { 'use_tpu': bool(benchmark_spec.tpus), 'data_dir': benchmark_spec.data_dir, 'model_dir': benchmark_spec.model_dir, 'train_steps': benchmark_spec.train_steps, 'eval_steps': benchmark_spec.eval_steps, 'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]), 'iterations': benchmark_spec.iterations, 'num_train_images': benchmark_spec.num_train_images, 'num_eval_images': benchmark_spec.num_eval_images, 'train_epochs': benchmark_spec.train_epochs, 'eval_epochs': benchmark_spec.eval_epochs, 'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch, 'train_batch_size': benchmark_spec.batch_size, 'eval_batch_size': benchmark_spec.batch_size } if benchmark_spec.tpus: metadata.update({ 'train_tpu_num_shards': benchmark_spec.tpu_groups['train'].GetNumShards(), 'train_tpu_accelerator_type': benchmark_spec.tpu_groups['train'].GetAcceleratorType() }) return metadata
def CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = { 'use_tpu': bool(benchmark_spec.tpus), 'data_dir': benchmark_spec.data_dir, 'model_dir': benchmark_spec.model_dir, 'train_steps': benchmark_spec.train_steps, 'eval_steps': benchmark_spec.eval_steps, 'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]), 'iterations': benchmark_spec.iterations, 'num_train_images': benchmark_spec.num_train_images, 'num_eval_images': benchmark_spec.num_eval_images, 'train_epochs': benchmark_spec.train_epochs, 'eval_epochs': benchmark_spec.eval_epochs, 'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch, 'train_batch_size': benchmark_spec.batch_size, 'eval_batch_size': benchmark_spec.batch_size } if benchmark_spec.tpus: metadata.update({ 'train_tpu_num_shards': benchmark_spec.tpu_groups['train'].GetNumShards(), 'train_tpu_accelerator_type': benchmark_spec.tpu_groups['train'].GetAcceleratorType() }) return metadata
Python
def ExtractThroughput(regex, output, metadata, metric, unit): """Extract throughput from MNIST output. Args: regex: string. Regular expression. output: MNIST output metadata: dict. Additional metadata to include with the sample. metric: string. Name of the metric within the benchmark. unit: string. Units for 'value'. Returns: samples containing the throughput """ matches = regex_util.ExtractAllMatches(regex, output) samples = [] for index, value in enumerate(matches): metadata_with_index = copy.deepcopy(metadata) metadata_with_index['index'] = index samples.append(sample.Sample(metric, float(value), unit, metadata_with_index)) return samples
def ExtractThroughput(regex, output, metadata, metric, unit): """Extract throughput from MNIST output. Args: regex: string. Regular expression. output: MNIST output metadata: dict. Additional metadata to include with the sample. metric: string. Name of the metric within the benchmark. unit: string. Units for 'value'. Returns: samples containing the throughput """ matches = regex_util.ExtractAllMatches(regex, output) samples = [] for index, value in enumerate(matches): metadata_with_index = copy.deepcopy(metadata) metadata_with_index['index'] = index samples.append(sample.Sample(metric, float(value), unit, metadata_with_index)) return samples
Python
def MakeSamplesFromTrainOutput(metadata, output, elapsed_seconds, step): """Create a sample containing training metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output elapsed_seconds: float, elapsed seconds from saved checkpoint. step: int, the global steps in the training process. Example output: perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py Returns: a Sample containing training metrics, current step, elapsed seconds """ samples = [] metadata_copy = metadata.copy() metadata_copy['step'] = int(step) metadata_copy['epoch'] = step / metadata['num_examples_per_epoch'] metadata_copy['elapsed_seconds'] = elapsed_seconds get_mean = lambda matches: sum(float(x) for x in matches) / len(matches) loss = get_mean(regex_util.ExtractAllMatches( r'Loss for final step: (\d+\.\d+)', output)) samples.append(sample.Sample('Loss', float(loss), '', metadata_copy)) if 'global_step/sec: ' in output: global_step_sec = get_mean(regex_util.ExtractAllMatches( r'global_step/sec: (\S+)', output)) samples.append(sample.Sample( 'Global Steps Per Second', global_step_sec, 'global_steps/sec', metadata_copy)) examples_sec = global_step_sec * metadata['train_batch_size'] if 'examples/sec: ' in output: examples_sec_log = get_mean(regex_util.ExtractAllMatches( r'examples/sec: (\S+)', output)) precision = abs(examples_sec_log - examples_sec) / examples_sec_log assert precision < EXAMPLES_PER_SECOND_PRECISION, 'examples/sec is wrong.' examples_sec = examples_sec_log samples.append(sample.Sample('Examples Per Second', examples_sec, 'examples/sec', metadata_copy)) return samples
def MakeSamplesFromTrainOutput(metadata, output, elapsed_seconds, step): """Create a sample containing training metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output elapsed_seconds: float, elapsed seconds from saved checkpoint. step: int, the global steps in the training process. Example output: perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py Returns: a Sample containing training metrics, current step, elapsed seconds """ samples = [] metadata_copy = metadata.copy() metadata_copy['step'] = int(step) metadata_copy['epoch'] = step / metadata['num_examples_per_epoch'] metadata_copy['elapsed_seconds'] = elapsed_seconds get_mean = lambda matches: sum(float(x) for x in matches) / len(matches) loss = get_mean(regex_util.ExtractAllMatches( r'Loss for final step: (\d+\.\d+)', output)) samples.append(sample.Sample('Loss', float(loss), '', metadata_copy)) if 'global_step/sec: ' in output: global_step_sec = get_mean(regex_util.ExtractAllMatches( r'global_step/sec: (\S+)', output)) samples.append(sample.Sample( 'Global Steps Per Second', global_step_sec, 'global_steps/sec', metadata_copy)) examples_sec = global_step_sec * metadata['train_batch_size'] if 'examples/sec: ' in output: examples_sec_log = get_mean(regex_util.ExtractAllMatches( r'examples/sec: (\S+)', output)) precision = abs(examples_sec_log - examples_sec) / examples_sec_log assert precision < EXAMPLES_PER_SECOND_PRECISION, 'examples/sec is wrong.' examples_sec = examples_sec_log samples.append(sample.Sample('Examples Per Second', examples_sec, 'examples/sec', metadata_copy)) return samples
Python
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds): """Create a sample containing evaluation metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output elapsed_seconds: float, elapsed seconds from saved checkpoint. Example output: perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py Returns: a Sample containing evaluation metrics """ pattern = (r'Saving dict for global step \d+: accuracy = (\d+\.\d+), ' r'global_step = (\d+), loss = (\d+\.\d+)') accuracy, step, loss = regex_util.ExtractAllMatches(pattern, output).pop() metadata_copy = metadata.copy() step = int(step) metadata_copy['step'] = step num_examples_per_epoch = metadata['num_examples_per_epoch'] metadata_copy['epoch'] = step / num_examples_per_epoch metadata_copy['elapsed_seconds'] = elapsed_seconds return [sample.Sample('Eval Loss', float(loss), '', metadata_copy), sample.Sample('Accuracy', float(accuracy) * 100, '%', metadata_copy)]
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds): """Create a sample containing evaluation metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output elapsed_seconds: float, elapsed seconds from saved checkpoint. Example output: perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py Returns: a Sample containing evaluation metrics """ pattern = (r'Saving dict for global step \d+: accuracy = (\d+\.\d+), ' r'global_step = (\d+), loss = (\d+\.\d+)') accuracy, step, loss = regex_util.ExtractAllMatches(pattern, output).pop() metadata_copy = metadata.copy() step = int(step) metadata_copy['step'] = step num_examples_per_epoch = metadata['num_examples_per_epoch'] metadata_copy['epoch'] = step / num_examples_per_epoch metadata_copy['elapsed_seconds'] = elapsed_seconds return [sample.Sample('Eval Loss', float(loss), '', metadata_copy), sample.Sample('Accuracy', float(accuracy) * 100, '%', metadata_copy)]
Python
def Run(benchmark_spec): """Run MNIST on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] mnist_benchmark_script = 'mnist_tpu.py' mnist_benchmark_cmd = ( 'cd models/official/mnist && ' 'python {script} ' '--data_dir={data_dir} ' '--iterations={iterations} ' '--model_dir={model_dir} ' '--batch_size={batch_size}'.format( script=mnist_benchmark_script, data_dir=benchmark_spec.data_dir, iterations=benchmark_spec.iterations, model_dir=benchmark_spec.model_dir, batch_size=benchmark_spec.batch_size)) if cuda_toolkit.CheckNvidiaGpuExists(vm): mnist_benchmark_cmd = '{env} {cmd}'.format( env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd) samples = [] metadata = CreateMetadataDict(benchmark_spec) if benchmark_spec.train_steps: if benchmark_spec.tpus: tpu = benchmark_spec.tpu_groups['train'].GetName() num_shards = '--num_shards={}'.format( benchmark_spec.tpu_groups['train'].GetNumShards()) else: tpu = num_shards = '' mnist_benchmark_train_cmd = ( '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} ' '{num_shards} --noenable_predict'.format( cmd=mnist_benchmark_cmd, tpu=tpu, use_tpu=bool(benchmark_spec.tpus), train_steps=benchmark_spec.train_steps, num_shards=num_shards)) start = time.time() stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd, should_log=True) elapsed_seconds = (time.time() - start) samples.extend(MakeSamplesFromTrainOutput( metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps)) if benchmark_spec.eval_steps: mnist_benchmark_eval_cmd = ( '{cmd} --tpu="" --use_tpu=False --eval_steps={eval_steps}'.format( cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps)) stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd, should_log=True) samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr, elapsed_seconds)) return samples
def Run(benchmark_spec): """Run MNIST on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] mnist_benchmark_script = 'mnist_tpu.py' mnist_benchmark_cmd = ( 'cd models/official/mnist && ' 'python {script} ' '--data_dir={data_dir} ' '--iterations={iterations} ' '--model_dir={model_dir} ' '--batch_size={batch_size}'.format( script=mnist_benchmark_script, data_dir=benchmark_spec.data_dir, iterations=benchmark_spec.iterations, model_dir=benchmark_spec.model_dir, batch_size=benchmark_spec.batch_size)) if cuda_toolkit.CheckNvidiaGpuExists(vm): mnist_benchmark_cmd = '{env} {cmd}'.format( env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd) samples = [] metadata = CreateMetadataDict(benchmark_spec) if benchmark_spec.train_steps: if benchmark_spec.tpus: tpu = benchmark_spec.tpu_groups['train'].GetName() num_shards = '--num_shards={}'.format( benchmark_spec.tpu_groups['train'].GetNumShards()) else: tpu = num_shards = '' mnist_benchmark_train_cmd = ( '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} ' '{num_shards} --noenable_predict'.format( cmd=mnist_benchmark_cmd, tpu=tpu, use_tpu=bool(benchmark_spec.tpus), train_steps=benchmark_spec.train_steps, num_shards=num_shards)) start = time.time() stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd, should_log=True) elapsed_seconds = (time.time() - start) samples.extend(MakeSamplesFromTrainOutput( metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps)) if benchmark_spec.eval_steps: mnist_benchmark_eval_cmd = ( '{cmd} --tpu="" --use_tpu=False --eval_steps={eval_steps}'.format( cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps)) stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd, should_log=True) samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr, elapsed_seconds)) return samples
Python
def Cleanup(benchmark_spec): """Cleanup MNIST on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ if benchmark_spec.tpus: vm = benchmark_spec.vms[0] vm.RemoteCommand( '{gsutil} rm -r {model_dir}'.format( gsutil=vm.gsutil_path, model_dir=benchmark_spec.model_dir), should_log=True) benchmark_spec.storage_service.CleanupVM(vm)
def Cleanup(benchmark_spec): """Cleanup MNIST on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ if benchmark_spec.tpus: vm = benchmark_spec.vms[0] vm.RemoteCommand( '{gsutil} rm -r {model_dir}'.format( gsutil=vm.gsutil_path, model_dir=benchmark_spec.model_dir), should_log=True) benchmark_spec.storage_service.CleanupVM(vm)
Python
def CheckPrerequisites(benchmark_config): """Verifies that the required resources are present. Args: benchmark_config: Unused. Raises: perfkitbenchmarker.data.ResourceNotFound: On missing resource. """ del benchmark_config ycsb.CheckPrerequisites()
def CheckPrerequisites(benchmark_config): """Verifies that the required resources are present. Args: benchmark_config: Unused. Raises: perfkitbenchmarker.data.ResourceNotFound: On missing resource. """ del benchmark_config ycsb.CheckPrerequisites()
Python
def Prepare(benchmark_spec): """Install YCSB on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ benchmark_spec.always_call_cleanup = True benchmark_spec.dynamodb_instance = aws_dynamodb.AwsDynamoDBInstance( table_name='pkb-{0}'.format(FLAGS.run_uri)) benchmark_spec.dynamodb_instance.Create() vms = benchmark_spec.vms # Install required packages. vm_util.RunThreaded(_Install, vms) benchmark_spec.executor = ycsb.YCSBExecutor('dynamodb')
def Prepare(benchmark_spec): """Install YCSB on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ benchmark_spec.always_call_cleanup = True benchmark_spec.dynamodb_instance = aws_dynamodb.AwsDynamoDBInstance( table_name='pkb-{0}'.format(FLAGS.run_uri)) benchmark_spec.dynamodb_instance.Create() vms = benchmark_spec.vms # Install required packages. vm_util.RunThreaded(_Install, vms) benchmark_spec.executor = ycsb.YCSBExecutor('dynamodb')
Python
def Run(benchmark_spec): """Run YCSB on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms run_kwargs = { 'dynamodb.awsCredentialsFile': GetRemoteVMCredentialsFullPath(vms[0]), 'dynamodb.primaryKey': FLAGS.aws_dynamodb_primarykey, 'dynamodb.endpoint': benchmark_spec.dynamodb_instance.GetEndPoint(), 'table': 'pkb-{0}'.format(FLAGS.run_uri), } if FLAGS.aws_dynamodb_use_sort: run_kwargs.update({'dynamodb.primaryKeyType': 'HASH_AND_RANGE', 'dynamodb.hashKeyName': FLAGS.aws_dynamodb_primarykey, 'dynamodb.primaryKey': FLAGS.aws_dynamodb_sortkey}) if FLAGS.aws_dynamodb_ycsb_consistentReads: run_kwargs.update({'dynamodb.consistentReads': 'true'}) load_kwargs = run_kwargs.copy() if FLAGS['ycsb_preload_threads'].present: load_kwargs['threads'] = FLAGS['ycsb_preload_threads'] samples = list(benchmark_spec.executor.LoadAndRun( vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs)) return samples
def Run(benchmark_spec): """Run YCSB on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms run_kwargs = { 'dynamodb.awsCredentialsFile': GetRemoteVMCredentialsFullPath(vms[0]), 'dynamodb.primaryKey': FLAGS.aws_dynamodb_primarykey, 'dynamodb.endpoint': benchmark_spec.dynamodb_instance.GetEndPoint(), 'table': 'pkb-{0}'.format(FLAGS.run_uri), } if FLAGS.aws_dynamodb_use_sort: run_kwargs.update({'dynamodb.primaryKeyType': 'HASH_AND_RANGE', 'dynamodb.hashKeyName': FLAGS.aws_dynamodb_primarykey, 'dynamodb.primaryKey': FLAGS.aws_dynamodb_sortkey}) if FLAGS.aws_dynamodb_ycsb_consistentReads: run_kwargs.update({'dynamodb.consistentReads': 'true'}) load_kwargs = run_kwargs.copy() if FLAGS['ycsb_preload_threads'].present: load_kwargs['threads'] = FLAGS['ycsb_preload_threads'] samples = list(benchmark_spec.executor.LoadAndRun( vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs)) return samples
Python
def Cleanup(benchmark_spec): """Cleanup YCSB on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ benchmark_spec.dynamodb_instance.Delete()
def Cleanup(benchmark_spec): """Cleanup YCSB on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ benchmark_spec.dynamodb_instance.Delete()
Python
def GetRemoteVMCredentialsFullPath(vm): """Returns the full path for AWS credentials file.""" home_dir, _ = vm.RemoteCommand('echo ~') # aws credentials are always located in the 'credentials' directory of .aws # configurations folder # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html return os.path.join(home_dir.rstrip('\n'), FLAGS.aws_credentials_remote_path, 'credentials')
def GetRemoteVMCredentialsFullPath(vm): """Returns the full path for AWS credentials file.""" home_dir, _ = vm.RemoteCommand('echo ~') # aws credentials are always located in the 'credentials' directory of .aws # configurations folder # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html return os.path.join(home_dir.rstrip('\n'), FLAGS.aws_credentials_remote_path, 'credentials')
Python
def _Exists(self): """Check to see whether the cluster exists.""" cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'describe', self.cluster_id) self.append_region(cmd) _, _, retcode = cmd.Issue() return retcode == 0
def _Exists(self): """Check to see whether the cluster exists.""" cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'describe', self.cluster_id) self.append_region(cmd) _, _, retcode = cmd.Issue() return retcode == 0
Python
def generate_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to generate data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION self.append_region(cmd) job_arguments = [TESTDFSIO_PROGRAM] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-write', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) return {dpb_service.SUCCESS: retcode == 0}
def generate_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to generate data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION self.append_region(cmd) job_arguments = [TESTDFSIO_PROGRAM] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-write', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) return {dpb_service.SUCCESS: retcode == 0}
Python
def read_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to read data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION self.append_region(cmd) job_arguments = [TESTDFSIO_PROGRAM] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-read', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) return {dpb_service.SUCCESS: retcode == 0}
def read_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to read data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION self.append_region(cmd) job_arguments = [TESTDFSIO_PROGRAM] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-read', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) return {dpb_service.SUCCESS: retcode == 0}
Python
def distributed_copy(self, source_location, destination_location): """Method to copy data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['class'] = 'org.apache.hadoop.tools.DistCp' self.append_region(cmd) job_arguments = (['-m={}'.format(FLAGS.dpb_dataproc_distcp_num_maps)] if FLAGS.dpb_dataproc_distcp_num_maps is not None else []) job_arguments.extend([source_location, destination_location]) cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) return {dpb_service.SUCCESS: retcode == 0}
def distributed_copy(self, source_location, destination_location): """Method to copy data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['class'] = 'org.apache.hadoop.tools.DistCp' self.append_region(cmd) job_arguments = (['-m={}'.format(FLAGS.dpb_dataproc_distcp_num_maps)] if FLAGS.dpb_dataproc_distcp_num_maps is not None else []) job_arguments.extend([source_location, destination_location]) cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) return {dpb_service.SUCCESS: retcode == 0}
Python
def cleanup_data(self, base_dir, udpate_default_fs): """Method to cleanup data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION self.append_region(cmd) job_arguments = [TESTDFSIO_PROGRAM] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(base_dir)) job_arguments.append('-Dtest.build.data={}'.format(base_dir)) job_arguments.append('-clean') cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) if retcode != 0: return {dpb_service.SUCCESS: False} if udpate_default_fs: vm_util.IssueCommand(['gsutil', '-m', 'rm', '-r', base_dir]) return {dpb_service.SUCCESS: True}
def cleanup_data(self, base_dir, udpate_default_fs): """Method to cleanup data using a distributed job on the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop') cmd.flags['cluster'] = self.cluster_id cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION self.append_region(cmd) job_arguments = [TESTDFSIO_PROGRAM] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(base_dir)) job_arguments.append('-Dtest.build.data={}'.format(base_dir)) job_arguments.append('-clean') cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) if retcode != 0: return {dpb_service.SUCCESS: False} if udpate_default_fs: vm_util.IssueCommand(['gsutil', '-m', 'rm', '-r', base_dir]) return {dpb_service.SUCCESS: True}
Python
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = super(GcpDpbDataproc, self).GetMetadata() if self.dpb_dataproc_image_version: basic_data['dpb_service'] = ('dataproc_{}'. format(self.dpb_dataproc_image_version)) return basic_data
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = super(GcpDpbDataproc, self).GetMetadata() if self.dpb_dataproc_image_version: basic_data['dpb_service'] = ('dataproc_{}'. format(self.dpb_dataproc_image_version)) return basic_data
Python
def EbsDriveIsNvme(machine_type): """Check if the machine type uses NVMe driver.""" instance_family = machine_type.split('.')[0].lower() return (instance_family not in NON_EBS_NVME_TYPES or 'metal' in machine_type)
def EbsDriveIsNvme(machine_type): """Check if the machine type uses NVMe driver.""" instance_family = machine_type.split('.')[0].lower() return (instance_family not in NON_EBS_NVME_TYPES or 'metal' in machine_type)
Python
def Attach(self, vm): """Attaches the disk to a VM. Args: vm: The AwsVirtualMachine instance to which the disk will be attached. """ with self._lock: self.attached_vm_id = vm.id if self.attached_vm_id not in AwsDisk.vm_devices: AwsDisk.vm_devices[self.attached_vm_id] = set( string.ascii_lowercase) self.device_letter = min(AwsDisk.vm_devices[self.attached_vm_id]) AwsDisk.vm_devices[self.attached_vm_id].remove(self.device_letter) device_name = '/dev/xvdb%s' % self.device_letter attach_cmd = util.AWS_PREFIX + [ 'ec2', 'attach-volume', '--region=%s' % self.region, '--instance-id=%s' % self.attached_vm_id, '--volume-id=%s' % self.id, '--device=%s' % device_name] logging.info('Attaching AWS volume %s. This may fail if the disk is not ' 'ready, but will be retried.', self.id) util.IssueRetryableCommand(attach_cmd)
def Attach(self, vm): """Attaches the disk to a VM. Args: vm: The AwsVirtualMachine instance to which the disk will be attached. """ with self._lock: self.attached_vm_id = vm.id if self.attached_vm_id not in AwsDisk.vm_devices: AwsDisk.vm_devices[self.attached_vm_id] = set( string.ascii_lowercase) self.device_letter = min(AwsDisk.vm_devices[self.attached_vm_id]) AwsDisk.vm_devices[self.attached_vm_id].remove(self.device_letter) device_name = '/dev/xvdb%s' % self.device_letter attach_cmd = util.AWS_PREFIX + [ 'ec2', 'attach-volume', '--region=%s' % self.region, '--instance-id=%s' % self.attached_vm_id, '--volume-id=%s' % self.id, '--device=%s' % device_name] logging.info('Attaching AWS volume %s. This may fail if the disk is not ' 'ready, but will be retried.', self.id) util.IssueRetryableCommand(attach_cmd)
Python
def GetDevicePath(self): """Returns the path to the device inside the VM.""" if self.disk_type == disk.LOCAL: if LocalDriveIsNvme(self.machine_type): first_device_letter = 'b' if EbsDriveIsNvme(self.machine_type): # If the root drive is also NVME, assume the second drive is the # local drive. first_device_letter = 'a' return '/dev/nvme%sn1' % str( ord(self.device_letter) - ord(first_device_letter)) return '/dev/xvd%s' % self.device_letter else: if EbsDriveIsNvme(self.machine_type): first_device_letter = 'a' return '/dev/nvme%sn1' % ( 1 + ord(self.device_letter) - ord(first_device_letter)) else: return '/dev/xvdb%s' % self.device_letter
def GetDevicePath(self): """Returns the path to the device inside the VM.""" if self.disk_type == disk.LOCAL: if LocalDriveIsNvme(self.machine_type): first_device_letter = 'b' if EbsDriveIsNvme(self.machine_type): # If the root drive is also NVME, assume the second drive is the # local drive. first_device_letter = 'a' return '/dev/nvme%sn1' % str( ord(self.device_letter) - ord(first_device_letter)) return '/dev/xvd%s' % self.device_letter else: if EbsDriveIsNvme(self.machine_type): first_device_letter = 'a' return '/dev/nvme%sn1' % ( 1 + ord(self.device_letter) - ord(first_device_letter)) else: return '/dev/xvdb%s' % self.device_letter
Python
def Install(vm, package_name='python-pip'): """Install pip on the VM.""" vm.InstallPackages(package_name) if vm.PYTHON_PIP_PACKAGE_VERSION: vm.RemoteCommand( 'sudo pip install --upgrade ' '--force-reinstall pip=={0}'.format(vm.PYTHON_PIP_PACKAGE_VERSION)) else: vm.RemoteCommand('sudo pip install -U pip') # Make pip upgrade pip # Add a symbolic link to /usr/local/bin/pip if pip ends up there. This lets # us run pip under sudo since /usr/local/bin is not typically available to # sudo. if not vm.TryRemoteCommand('sudo which pip'): pip_location, _ = vm.RemoteCommand('which pip') if pip_location.startswith('/usr/local/bin/pip'): vm.RemoteCommand('sudo ln -s /usr/local/bin/pip /usr/bin/pip') vm.RemoteCommand('mkdir -p {0} && pip freeze > {0}/requirements.txt'.format( INSTALL_DIR))
def Install(vm, package_name='python-pip'): """Install pip on the VM.""" vm.InstallPackages(package_name) if vm.PYTHON_PIP_PACKAGE_VERSION: vm.RemoteCommand( 'sudo pip install --upgrade ' '--force-reinstall pip=={0}'.format(vm.PYTHON_PIP_PACKAGE_VERSION)) else: vm.RemoteCommand('sudo pip install -U pip') # Make pip upgrade pip # Add a symbolic link to /usr/local/bin/pip if pip ends up there. This lets # us run pip under sudo since /usr/local/bin is not typically available to # sudo. if not vm.TryRemoteCommand('sudo which pip'): pip_location, _ = vm.RemoteCommand('which pip') if pip_location.startswith('/usr/local/bin/pip'): vm.RemoteCommand('sudo ln -s /usr/local/bin/pip /usr/bin/pip') vm.RemoteCommand('mkdir -p {0} && pip freeze > {0}/requirements.txt'.format( INSTALL_DIR))
Python
def YumInstall(vm): """Installs the pip package on the VM.""" vm.InstallEpelRepo() package_name = getattr(vm, 'python_pip_package_config', 'python27-pip') Install(vm, package_name)
def YumInstall(vm): """Installs the pip package on the VM.""" vm.InstallEpelRepo() package_name = getattr(vm, 'python_pip_package_config', 'python27-pip') Install(vm, package_name)
Python
def Uninstall(vm): """Uninstalls the pip package on the VM.""" vm.RemoteCommand('pip freeze | grep --fixed-strings --line-regexp ' '--invert-match --file {0}/requirements.txt | ' 'xargs --no-run-if-empty sudo pip uninstall -y'.format( INSTALL_DIR))
def Uninstall(vm): """Uninstalls the pip package on the VM.""" vm.RemoteCommand('pip freeze | grep --fixed-strings --line-regexp ' '--invert-match --file {0}/requirements.txt | ' 'xargs --no-run-if-empty sudo pip uninstall -y'.format( INSTALL_DIR))
Python
def _RunMultiClient(clients, target, rate, connections, duration, threads): """Run multiple instances of wrk2 against a single target.""" results = [] num_clients = len(clients) def _RunSingleClient(client, client_number): """Run wrk2 from a single client.""" client_results = list(wrk2.Run( client, target, rate, connections=connections, duration=duration, threads=threads)) for result in client_results: result.metadata.update({'client_number': client_number}) results.extend(client_results) args = [((client, i), {}) for i, client in enumerate(clients)] vm_util.RunThreaded(_RunSingleClient, args) requests = 0 errors = 0 max_latency = 0.0 # TODO(ehankland): Since wrk2 keeps an HDR histogram of latencies, we should # be able to merge them and compute aggregate percentiles. for result in results: if result.metric == 'requests': requests += result.value elif result.metric == 'errors': errors += result.value elif result.metric == 'p100 latency': max_latency = max(max_latency, result.value) error_rate = errors / requests metadata = { 'connections': connections * num_clients, 'threads': threads * num_clients, 'duration': duration, 'rate': rate * num_clients } results += [ sample.Sample('aggregate requests', requests, '', metadata), sample.Sample('aggregate errors', errors, '', metadata), sample.Sample('aggregate error_rate', error_rate, '', metadata), sample.Sample('aggregate p100 latency', max_latency, '', metadata) ] return results
def _RunMultiClient(clients, target, rate, connections, duration, threads): """Run multiple instances of wrk2 against a single target.""" results = [] num_clients = len(clients) def _RunSingleClient(client, client_number): """Run wrk2 from a single client.""" client_results = list(wrk2.Run( client, target, rate, connections=connections, duration=duration, threads=threads)) for result in client_results: result.metadata.update({'client_number': client_number}) results.extend(client_results) args = [((client, i), {}) for i, client in enumerate(clients)] vm_util.RunThreaded(_RunSingleClient, args) requests = 0 errors = 0 max_latency = 0.0 # TODO(ehankland): Since wrk2 keeps an HDR histogram of latencies, we should # be able to merge them and compute aggregate percentiles. for result in results: if result.metric == 'requests': requests += result.value elif result.metric == 'errors': errors += result.value elif result.metric == 'p100 latency': max_latency = max(max_latency, result.value) error_rate = errors / requests metadata = { 'connections': connections * num_clients, 'threads': threads * num_clients, 'duration': duration, 'rate': rate * num_clients } results += [ sample.Sample('aggregate requests', requests, '', metadata), sample.Sample('aggregate errors', errors, '', metadata), sample.Sample('aggregate error_rate', error_rate, '', metadata), sample.Sample('aggregate p100 latency', max_latency, '', metadata) ] return results
Python
def Run(benchmark_spec): """Run a benchmark against the Nginx server. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ clients = benchmark_spec.vm_groups['clients'] server = benchmark_spec.vm_groups['server'][0] results = [] for config in FLAGS.nginx_load_configs: rate, duration, threads, connections = list(map(int, config.split(':'))) target = 'http://%s/random_content' % server.internal_ip results += _RunMultiClient(clients, target, rate, connections, duration, threads) return results
def Run(benchmark_spec): """Run a benchmark against the Nginx server. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ clients = benchmark_spec.vm_groups['clients'] server = benchmark_spec.vm_groups['server'][0] results = [] for config in FLAGS.nginx_load_configs: rate, duration, threads, connections = list(map(int, config.split(':'))) target = 'http://%s/random_content' % server.internal_ip results += _RunMultiClient(clients, target, rate, connections, duration, threads) return results
Python
def _Exists(self): """Returns true if the disk exists.""" cmd = util.GcloudCommand(self, 'compute', 'disks', 'describe', self.name) stdout, _, _ = cmd.Issue(suppress_warning=True) try: json.loads(stdout) except ValueError: return False return True
def _Exists(self): """Returns true if the disk exists.""" cmd = util.GcloudCommand(self, 'compute', 'disks', 'describe', self.name) stdout, _, _ = cmd.Issue(suppress_warning=True) try: json.loads(stdout) except ValueError: return False return True
Python
def Attach(self, vm): """Attaches the disk to a VM. Args: vm: The GceVirtualMachine instance to which the disk will be attached. """ self.attached_vm_name = vm.name cmd = util.GcloudCommand(self, 'compute', 'instances', 'attach-disk', self.attached_vm_name) cmd.flags['device-name'] = self.name cmd.flags['disk'] = self.name cmd.IssueRetryable()
def Attach(self, vm): """Attaches the disk to a VM. Args: vm: The GceVirtualMachine instance to which the disk will be attached. """ self.attached_vm_name = vm.name cmd = util.GcloudCommand(self, 'compute', 'instances', 'attach-disk', self.attached_vm_name) cmd.flags['device-name'] = self.name cmd.flags['disk'] = self.name cmd.IssueRetryable()
Python
def Detach(self): """Detaches the disk from a VM.""" cmd = util.GcloudCommand(self, 'compute', 'instances', 'detach-disk', self.attached_vm_name) cmd.flags['device-name'] = self.name cmd.IssueRetryable() self.attached_vm_name = None
def Detach(self): """Detaches the disk from a VM.""" cmd = util.GcloudCommand(self, 'compute', 'instances', 'detach-disk', self.attached_vm_name) cmd.flags['device-name'] = self.name cmd.IssueRetryable() self.attached_vm_name = None
Python
def GetDevicePath(self): """Returns the path to the device inside the VM.""" if FLAGS.gce_ssd_interface == SCSI: return '/dev/disk/by-id/google-%s' % self.name elif FLAGS.gce_ssd_interface == NVME: return '/dev/%s' % self.name
def GetDevicePath(self): """Returns the path to the device inside the VM.""" if FLAGS.gce_ssd_interface == SCSI: return '/dev/disk/by-id/google-%s' % self.name elif FLAGS.gce_ssd_interface == NVME: return '/dev/%s' % self.name
Python
def Run(benchmark_spec): """Runs act and reports the results.""" vm = benchmark_spec.vms[0] act.RunActPrep(vm) samples = [] run_samples = [] for load in FLAGS.act_load: def _Run(act_load, index): run_samples.extend(act.RunAct(vm, act_load, index)) if FLAGS.act_parallel: args = [((float(load), idx), {}) for idx in range( len(vm.scratch_disks))[FLAGS.act_reserved_partitions:]] vm_util.RunThreaded(_Run, args) else: run_samples.extend(act.RunAct(vm, float(load))) samples.extend(run_samples) if FLAGS.act_stop_on_complete and act.IsRunComplete(run_samples): break run_samples = [] return samples
def Run(benchmark_spec): """Runs act and reports the results.""" vm = benchmark_spec.vms[0] act.RunActPrep(vm) samples = [] run_samples = [] for load in FLAGS.act_load: def _Run(act_load, index): run_samples.extend(act.RunAct(vm, act_load, index)) if FLAGS.act_parallel: args = [((float(load), idx), {}) for idx in range( len(vm.scratch_disks))[FLAGS.act_reserved_partitions:]] vm_util.RunThreaded(_Run, args) else: run_samples.extend(act.RunAct(vm, float(load))) samples.extend(run_samples) if FLAGS.act_stop_on_complete and act.IsRunComplete(run_samples): break run_samples = [] return samples
Python
def GetOrBuild(self, image): """Finds the image in the registry or builds it. Args: image: The PKB name for the image (string). Returns: The full image name (including the registry). """ full_image = self.GetFullRegistryTag(image) if not FLAGS.force_container_build: inspect_cmd = ['docker', 'image', 'inspect', full_image] _, _, retcode = vm_util.IssueCommand(inspect_cmd, suppress_warning=True) if retcode == 0: return full_image self._Build(image) return full_image
def GetOrBuild(self, image): """Finds the image in the registry or builds it. Args: image: The PKB name for the image (string). Returns: The full image name (including the registry). """ full_image = self.GetFullRegistryTag(image) if not FLAGS.force_container_build: inspect_cmd = ['docker', 'image', 'inspect', full_image] _, _, retcode = vm_util.IssueCommand(inspect_cmd, suppress_warning=True) if retcode == 0: return full_image self._Build(image) return full_image
Python
def GetResourceMetadata(self): """Returns a dictionary of cluster metadata.""" metadata = { 'cloud': self.CLOUD, 'cluster_type': self.CLUSTER_TYPE, 'machine_type': self.machine_type, 'zone': self.zone, 'size': self.num_nodes, } if self.min_nodes != self.num_nodes or self.max_nodes != self.num_nodes: metadata.update({ 'max_size': self.max_nodes, 'min_size': self.min_nodes, }) if self.gpu_count: metadata.update({ 'gpu_type': self.gpu_type, 'num_gpus': self.gpu_count, }) return metadata
def GetResourceMetadata(self): """Returns a dictionary of cluster metadata.""" metadata = { 'cloud': self.CLOUD, 'cluster_type': self.CLUSTER_TYPE, 'machine_type': self.machine_type, 'zone': self.zone, 'size': self.num_nodes, } if self.min_nodes != self.num_nodes or self.max_nodes != self.num_nodes: metadata.update({ 'max_size': self.max_nodes, 'min_size': self.min_nodes, }) if self.gpu_count: metadata.update({ 'gpu_type': self.gpu_type, 'num_gpus': self.gpu_count, }) return metadata
Python
def GetSamples(self): """Return samples with information about deployment times.""" samples = [] samples.append(sample.Sample( 'Cluster Creation Time', self.resource_ready_time - self.create_start_time, 'seconds')) for container in itertools.chain(*self.containers.values()): metadata = {'image': container.image.split('/')[-1]} if container.resource_ready_time and container.create_start_time: samples.append(sample.Sample( 'Container Deployment Time', container.resource_ready_time - container.create_start_time, 'seconds', metadata)) if container.delete_end_time and container.delete_start_time: samples.append(sample.Sample( 'Container Delete Time', container.delete_end_time - container.delete_start_time, 'seconds', metadata)) for service in self.services.values(): metadata = {'image': service.image.split('/')[-1]} if service.resource_ready_time and service.create_start_time: samples.append(sample.Sample( 'Service Deployment Time', service.resource_ready_time - service.create_start_time, 'seconds', metadata)) if service.delete_end_time and service.delete_start_time: samples.append(sample.Sample( 'Service Delete Time', service.delete_end_time - service.delete_start_time, 'seconds', metadata)) return samples
def GetSamples(self): """Return samples with information about deployment times.""" samples = [] samples.append(sample.Sample( 'Cluster Creation Time', self.resource_ready_time - self.create_start_time, 'seconds')) for container in itertools.chain(*self.containers.values()): metadata = {'image': container.image.split('/')[-1]} if container.resource_ready_time and container.create_start_time: samples.append(sample.Sample( 'Container Deployment Time', container.resource_ready_time - container.create_start_time, 'seconds', metadata)) if container.delete_end_time and container.delete_start_time: samples.append(sample.Sample( 'Container Delete Time', container.delete_end_time - container.delete_start_time, 'seconds', metadata)) for service in self.services.values(): metadata = {'image': service.image.split('/')[-1]} if service.resource_ready_time and service.create_start_time: samples.append(sample.Sample( 'Service Deployment Time', service.resource_ready_time - service.create_start_time, 'seconds', metadata)) if service.delete_end_time and service.delete_start_time: samples.append(sample.Sample( 'Service Delete Time', service.delete_end_time - service.delete_start_time, 'seconds', metadata)) return samples
Python
def _GetPod(self): """Gets a representation of the POD and returns it.""" stdout, _, _ = vm_util.IssueCommand([ FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig, 'get', 'pod', self.name, '-o', 'yaml']) pod = yaml.load(stdout) if pod: self.ip_address = pod.get('status', {}).get('podIP', None) return pod
def _GetPod(self): """Gets a representation of the POD and returns it.""" stdout, _, _ = vm_util.IssueCommand([ FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig, 'get', 'pod', self.name, '-o', 'yaml']) pod = yaml.load(stdout) if pod: self.ip_address = pod.get('status', {}).get('podIP', None) return pod
Python
def _GetIpAddress(self): """Attempts to set the Service's ip address.""" ingress_name = '%s-ingress' % self.name get_cmd = [ FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig, 'get', 'ing', ingress_name, '-o', 'jsonpath="{.status.loadBalancer.ingress[*][\'ip\']}"' ] stdout, _, _ = vm_util.IssueCommand(get_cmd) ip_address = yaml.load(stdout) if ip_address: self.ip_address = ip_address
def _GetIpAddress(self): """Attempts to set the Service's ip address.""" ingress_name = '%s-ingress' % self.name get_cmd = [ FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig, 'get', 'ing', ingress_name, '-o', 'jsonpath="{.status.loadBalancer.ingress[*][\'ip\']}"' ] stdout, _, _ = vm_util.IssueCommand(get_cmd) ip_address = yaml.load(stdout) if ip_address: self.ip_address = ip_address
Python
def _Install(vm): """Installs the redis package on the VM.""" vm.Install('build_tools') vm.Install('wget') redis_url = 'http://download.redis.io/releases/' + _GetRedisTarName() vm.RemoteCommand('wget %s -P %s' % (redis_url, INSTALL_DIR)) vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, _GetRedisTarName())) vm.RemoteCommand('cd %s && make' % _GetRedisDir())
def _Install(vm): """Installs the redis package on the VM.""" vm.Install('build_tools') vm.Install('wget') redis_url = 'http://download.redis.io/releases/' + _GetRedisTarName() vm.RemoteCommand('wget %s -P %s' % (redis_url, INSTALL_DIR)) vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, _GetRedisTarName())) vm.RemoteCommand('cd %s && make' % _GetRedisDir())
Python
def CheckPrerequisites(benchmark_config): """Verifies that the required resources are present. Raises: perfkitbenchmarker.data.ResourceNotFound: On missing resource. """ for resource in HBASE_CONF_FILES: data.ResourcePath(resource) hbase.CheckPrerequisites() ycsb.CheckPrerequisites() for scope in REQUIRED_SCOPES: if scope not in FLAGS.gcloud_scopes: raise ValueError('Scope {0} required.'.format(scope)) # TODO: extract from gcloud config if available. if FLAGS.google_bigtable_instance_name: instance = _GetInstanceDescription(FLAGS.project or _GetDefaultProject(), FLAGS.google_bigtable_instance_name) logging.info('Found instance: %s', instance) else: logging.info('No instance; will create in Prepare.')
def CheckPrerequisites(benchmark_config): """Verifies that the required resources are present. Raises: perfkitbenchmarker.data.ResourceNotFound: On missing resource. """ for resource in HBASE_CONF_FILES: data.ResourcePath(resource) hbase.CheckPrerequisites() ycsb.CheckPrerequisites() for scope in REQUIRED_SCOPES: if scope not in FLAGS.gcloud_scopes: raise ValueError('Scope {0} required.'.format(scope)) # TODO: extract from gcloud config if available. if FLAGS.google_bigtable_instance_name: instance = _GetInstanceDescription(FLAGS.project or _GetDefaultProject(), FLAGS.google_bigtable_instance_name) logging.info('Found instance: %s', instance) else: logging.info('No instance; will create in Prepare.')
Python
def _GetInstanceDescription(project, instance_name): """Gets the description for a Cloud Bigtable instance. Args: project: str. Name of the project in which the instance was created. instance_name: str. ID of the desired Bigtable instance. Returns: A dictionary containing an instance description. Raises: KeyError: when the instance was not found. """ env = {'CLOUDSDK_CORE_DISABLE_PROMPTS': '1'} env.update(os.environ) # List clusters and get the cluster associated with this instance so we can # read the number of nodes in the cluster. cmd = [FLAGS.gcloud_path, 'beta', 'bigtable', 'clusters', 'list', '--quiet', '--format', 'json', '--project', project] stdout, stderr, returncode = vm_util.IssueCommand(cmd, env=env) if returncode: raise IOError('Command "{0}" failed:\nSTDOUT:\n{1}\nSTDERR:\n{2}'.format( ' '.join(cmd), stdout, stderr)) result = json.loads(stdout) instances = { instance['name'].split('/')[3]: instance for instance in result } try: return instances[instance_name] except KeyError: raise KeyError('Instance {0} not found in {1}'.format( instance_name, list(instances)))
def _GetInstanceDescription(project, instance_name): """Gets the description for a Cloud Bigtable instance. Args: project: str. Name of the project in which the instance was created. instance_name: str. ID of the desired Bigtable instance. Returns: A dictionary containing an instance description. Raises: KeyError: when the instance was not found. """ env = {'CLOUDSDK_CORE_DISABLE_PROMPTS': '1'} env.update(os.environ) # List clusters and get the cluster associated with this instance so we can # read the number of nodes in the cluster. cmd = [FLAGS.gcloud_path, 'beta', 'bigtable', 'clusters', 'list', '--quiet', '--format', 'json', '--project', project] stdout, stderr, returncode = vm_util.IssueCommand(cmd, env=env) if returncode: raise IOError('Command "{0}" failed:\nSTDOUT:\n{1}\nSTDERR:\n{2}'.format( ' '.join(cmd), stdout, stderr)) result = json.loads(stdout) instances = { instance['name'].split('/')[3]: instance for instance in result } try: return instances[instance_name] except KeyError: raise KeyError('Instance {0} not found in {1}'.format( instance_name, list(instances)))
Python
def Prepare(benchmark_spec): """Prepare the virtual machines to run cloud bigtable. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ benchmark_spec.always_call_cleanup = True vms = benchmark_spec.vms # TODO: in the future, it might be nice to change this so that # a gcp_bigtable.GcpBigtableInstance can be created with an # flag that says don't create/delete the instance. That would # reduce the code paths here. if FLAGS.google_bigtable_instance_name is None: instance_name = 'pkb-bigtable-{0}'.format(FLAGS.run_uri) project = FLAGS.project or _GetDefaultProject() logging.info('Creating bigtable instance %s', instance_name) zone = FLAGS.google_bigtable_zone_name benchmark_spec.bigtable_instance = gcp_bigtable.GcpBigtableInstance( instance_name, CLUSTER_SIZE, project, zone) benchmark_spec.bigtable_instance.Create() instance = _GetInstanceDescription(project, instance_name) logging.info('Instance %s created successfully', instance) vm_util.RunThreaded(_Install, vms) # Create table hbase_ycsb.CreateYCSBTable(vms[0], table_name=_GetTableName(), use_snappy=False, limit_filesize=False) table_name = _GetTableName() # Add hbase conf dir to the classpath. ycsb_memory = min(vms[0].total_memory_kb // 1024, 4096) jvm_args = pipes.quote(' -Xmx{0}m'.format(ycsb_memory)) executor_flags = { 'cp': hbase.HBASE_CONF_DIR, 'jvm-args': jvm_args, 'table': table_name} benchmark_spec.executor = ycsb.YCSBExecutor('hbase10', **executor_flags)
def Prepare(benchmark_spec): """Prepare the virtual machines to run cloud bigtable. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ benchmark_spec.always_call_cleanup = True vms = benchmark_spec.vms # TODO: in the future, it might be nice to change this so that # a gcp_bigtable.GcpBigtableInstance can be created with an # flag that says don't create/delete the instance. That would # reduce the code paths here. if FLAGS.google_bigtable_instance_name is None: instance_name = 'pkb-bigtable-{0}'.format(FLAGS.run_uri) project = FLAGS.project or _GetDefaultProject() logging.info('Creating bigtable instance %s', instance_name) zone = FLAGS.google_bigtable_zone_name benchmark_spec.bigtable_instance = gcp_bigtable.GcpBigtableInstance( instance_name, CLUSTER_SIZE, project, zone) benchmark_spec.bigtable_instance.Create() instance = _GetInstanceDescription(project, instance_name) logging.info('Instance %s created successfully', instance) vm_util.RunThreaded(_Install, vms) # Create table hbase_ycsb.CreateYCSBTable(vms[0], table_name=_GetTableName(), use_snappy=False, limit_filesize=False) table_name = _GetTableName() # Add hbase conf dir to the classpath. ycsb_memory = min(vms[0].total_memory_kb // 1024, 4096) jvm_args = pipes.quote(' -Xmx{0}m'.format(ycsb_memory)) executor_flags = { 'cp': hbase.HBASE_CONF_DIR, 'jvm-args': jvm_args, 'table': table_name} benchmark_spec.executor = ycsb.YCSBExecutor('hbase10', **executor_flags)
Python
def Run(benchmark_spec): """Spawn YCSB and gather the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample instances. """ vms = benchmark_spec.vms instance_name = (FLAGS.google_bigtable_instance_name or 'pkb-bigtable-{0}'.format(FLAGS.run_uri)) instance_info = _GetInstanceDescription( FLAGS.project or _GetDefaultProject(), instance_name) metadata = {'ycsb_client_vms': len(vms), 'bigtable_nodes': instance_info.get('serveNodes')} # By default YCSB uses a BufferedMutator for Puts / Deletes. # This leads to incorrect update latencies, since since the call returns # before the request is acked by the server. # Disable this behavior during the benchmark run. run_kwargs = { 'columnfamily': COLUMN_FAMILY, 'clientbuffering': 'false'} load_kwargs = run_kwargs.copy() # During the load stage, use a buffered mutator with a single thread. # The BufferedMutator will handle multiplexing RPCs. load_kwargs['clientbuffering'] = 'true' if not FLAGS['ycsb_preload_threads'].present: load_kwargs['threads'] = 1 samples = list(benchmark_spec.executor.LoadAndRun( vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs)) for sample in samples: sample.metadata.update(metadata) return samples
def Run(benchmark_spec): """Spawn YCSB and gather the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample instances. """ vms = benchmark_spec.vms instance_name = (FLAGS.google_bigtable_instance_name or 'pkb-bigtable-{0}'.format(FLAGS.run_uri)) instance_info = _GetInstanceDescription( FLAGS.project or _GetDefaultProject(), instance_name) metadata = {'ycsb_client_vms': len(vms), 'bigtable_nodes': instance_info.get('serveNodes')} # By default YCSB uses a BufferedMutator for Puts / Deletes. # This leads to incorrect update latencies, since since the call returns # before the request is acked by the server. # Disable this behavior during the benchmark run. run_kwargs = { 'columnfamily': COLUMN_FAMILY, 'clientbuffering': 'false'} load_kwargs = run_kwargs.copy() # During the load stage, use a buffered mutator with a single thread. # The BufferedMutator will handle multiplexing RPCs. load_kwargs['clientbuffering'] = 'true' if not FLAGS['ycsb_preload_threads'].present: load_kwargs['threads'] = 1 samples = list(benchmark_spec.executor.LoadAndRun( vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs)) for sample in samples: sample.metadata.update(metadata) return samples
Python
def Install(vm): """Installs TensorFlow models on the VM.""" vm.InstallPackages('git') vm.RemoteCommand('git clone {}'.format(TF_MODELS_GIT), should_log=True) vm.RemoteCommand('cd models && git checkout {}'.format( FLAGS.tensorflow_models_commit_hash))
def Install(vm): """Installs TensorFlow models on the VM.""" vm.InstallPackages('git') vm.RemoteCommand('git clone {}'.format(TF_MODELS_GIT), should_log=True) vm.RemoteCommand('cd models && git checkout {}'.format( FLAGS.tensorflow_models_commit_hash))
Python
def StressngCustomStressorsValidator(stressors): """Returns whether or not the list of custom stressors is valid.""" valid_stressors = { 'affinity', 'af-alg', 'aio', 'aio-linux', 'apparmor', 'bigheap', 'brk', 'bsearch', 'cache', 'chdir', 'chmod', 'clock', 'clone', 'context', 'cpu', 'cpu-online', 'crypt', 'daemon', 'dentry', 'dir', 'dup', 'epoll', 'eventfd', 'exec', 'fallocate', 'fault', 'fcntl', 'fiemap', 'fifo', 'filename', 'flock', 'fork', 'fp-error', 'fstat', 'futex', 'get', 'getrandom', 'getdent', 'handle', 'hdd', 'heapsort', 'hsearch', 'icache', 'iosync', 'inotify', 'itimer', 'kcmp', 'key', 'kill', 'klog', 'lease', 'link', 'lockbus', 'lockf', 'longjmp', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'mknod', 'mlock', 'mmap', 'mmapfork', 'mmapmany', 'mremap', 'msg', 'mq', 'nice', 'null', 'numa', 'oom-pipe', 'open', 'personality', 'pipe', 'poll', 'procfs', 'pthread', 'ptrace', 'qsort', 'quota', 'rdrand', 'readahead', 'remap-file-pages', 'rename', 'rlimit', 'seccomp', 'seek', 'sem-posix', 'sem-sysv', 'shm-posix', 'shm-sysv', 'sendfile', 'sigfd', 'sigfpe', 'sigpending', 'sigq', 'sigsegv', 'sigsuspend', 'sleep', 'socket', 'socket-fd', 'socket-pair', 'spawn', 'splice', 'stack', 'str', 'stream', 'switch', 'symlink', 'sync-file', 'sysinfo', 'sysfs', 'tee', 'timer', 'timerfd', 'tsc', 'tsearch', 'udp', 'udp-flood', 'unshare', 'urandom', 'userfaultfd', 'utime', 'vecmath', 'vfork', 'vm', 'vm-rw', 'vm-splice', 'wait', 'wcs', 'xattr', 'yield', 'zero', 'zlib', 'zombie' } return valid_stressors.issuperset(set(stressors))
def StressngCustomStressorsValidator(stressors): """Returns whether or not the list of custom stressors is valid.""" valid_stressors = { 'affinity', 'af-alg', 'aio', 'aio-linux', 'apparmor', 'bigheap', 'brk', 'bsearch', 'cache', 'chdir', 'chmod', 'clock', 'clone', 'context', 'cpu', 'cpu-online', 'crypt', 'daemon', 'dentry', 'dir', 'dup', 'epoll', 'eventfd', 'exec', 'fallocate', 'fault', 'fcntl', 'fiemap', 'fifo', 'filename', 'flock', 'fork', 'fp-error', 'fstat', 'futex', 'get', 'getrandom', 'getdent', 'handle', 'hdd', 'heapsort', 'hsearch', 'icache', 'iosync', 'inotify', 'itimer', 'kcmp', 'key', 'kill', 'klog', 'lease', 'link', 'lockbus', 'lockf', 'longjmp', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'mknod', 'mlock', 'mmap', 'mmapfork', 'mmapmany', 'mremap', 'msg', 'mq', 'nice', 'null', 'numa', 'oom-pipe', 'open', 'personality', 'pipe', 'poll', 'procfs', 'pthread', 'ptrace', 'qsort', 'quota', 'rdrand', 'readahead', 'remap-file-pages', 'rename', 'rlimit', 'seccomp', 'seek', 'sem-posix', 'sem-sysv', 'shm-posix', 'shm-sysv', 'sendfile', 'sigfd', 'sigfpe', 'sigpending', 'sigq', 'sigsegv', 'sigsuspend', 'sleep', 'socket', 'socket-fd', 'socket-pair', 'spawn', 'splice', 'stack', 'str', 'stream', 'switch', 'symlink', 'sync-file', 'sysinfo', 'sysfs', 'tee', 'timer', 'timerfd', 'tsc', 'tsearch', 'udp', 'udp-flood', 'unshare', 'urandom', 'userfaultfd', 'utime', 'vecmath', 'vfork', 'vm', 'vm-rw', 'vm-splice', 'wait', 'wcs', 'xattr', 'yield', 'zero', 'zlib', 'zombie' } return valid_stressors.issuperset(set(stressors))
Python
def Run(benchmark_spec): """Runs stress-ng on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vm = benchmark_spec.vms[0] metadata = { 'duration_sec': FLAGS.stress_ng_duration, 'threads': vm.NumCpusForBenchmark() } # Rather than running stress-ng with --class cpu,cpu-cache,memory all in one # RobustRemoteCommand we run each stressor indivually. The reason is that # RobustRemoteCommand periodically SSHs into the VM, but one of the memory # stressors stresses the VM so much that SSH instantly returns 255, causing # the benchmark to fail completely. cpu_suites = [ 'af-alg', 'bsearch', 'context', 'cpu', 'cpu-online', 'crypt', 'fp-error', 'getrandom', 'heapsort', 'hsearch', 'longjmp', 'lsearch', 'matrix', 'mergesort', 'numa', 'qsort', 'rdrand', 'str', 'stream', 'tsc', 'tsearch', 'vecmath', 'wcs', 'zlib' ] cpu_cache_suites = [ 'bsearch', 'cache', 'heapsort', 'hsearch', 'icache', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'mergesort', 'qsort', 'str', 'stream', 'tsearch', 'vecmath', 'wcs', 'zlib' ] memory_suites = [ 'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'null', 'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str', 'stream', 'tsearch', 'vm', 'vm-rw', 'wcs', 'zero', 'zlib' ] stressors = sorted(set(cpu_suites + cpu_cache_suites + memory_suites)) if FLAGS.stress_ng_custom_stressors: stressors = FLAGS.stress_ng_custom_stressors samples = [] values_to_geomean_list = [] for stressor in stressors: cmd = ('stress-ng --{stressor} {numthreads} --metrics-brief ' '-t {duration}'.format(stressor=stressor, numthreads=vm.NumCpusForBenchmark(), duration=FLAGS.stress_ng_duration)) stdout, _ = vm.RemoteCommand(cmd) stressng_sample = _ParseStressngResult(metadata, stdout) if stressng_sample: samples.append(stressng_sample) values_to_geomean_list.append(stressng_sample.value) # Only calculate geomean if each stressors provided a value if FLAGS.stress_ng_calc_geomean and len(values_to_geomean_list) == len( stressors): geomean_metadata = metadata.copy() geomean_metadata['stressors'] = stressors geomean_sample = sample.Sample( metric='STRESS_NG_GEOMEAN', value=_GeoMeanOverflow(values_to_geomean_list), unit='bogus_ops_sec', metadata=geomean_metadata) samples.append(geomean_sample) return samples
def Run(benchmark_spec): """Runs stress-ng on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vm = benchmark_spec.vms[0] metadata = { 'duration_sec': FLAGS.stress_ng_duration, 'threads': vm.NumCpusForBenchmark() } # Rather than running stress-ng with --class cpu,cpu-cache,memory all in one # RobustRemoteCommand we run each stressor indivually. The reason is that # RobustRemoteCommand periodically SSHs into the VM, but one of the memory # stressors stresses the VM so much that SSH instantly returns 255, causing # the benchmark to fail completely. cpu_suites = [ 'af-alg', 'bsearch', 'context', 'cpu', 'cpu-online', 'crypt', 'fp-error', 'getrandom', 'heapsort', 'hsearch', 'longjmp', 'lsearch', 'matrix', 'mergesort', 'numa', 'qsort', 'rdrand', 'str', 'stream', 'tsc', 'tsearch', 'vecmath', 'wcs', 'zlib' ] cpu_cache_suites = [ 'bsearch', 'cache', 'heapsort', 'hsearch', 'icache', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'mergesort', 'qsort', 'str', 'stream', 'tsearch', 'vecmath', 'wcs', 'zlib' ] memory_suites = [ 'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'null', 'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str', 'stream', 'tsearch', 'vm', 'vm-rw', 'wcs', 'zero', 'zlib' ] stressors = sorted(set(cpu_suites + cpu_cache_suites + memory_suites)) if FLAGS.stress_ng_custom_stressors: stressors = FLAGS.stress_ng_custom_stressors samples = [] values_to_geomean_list = [] for stressor in stressors: cmd = ('stress-ng --{stressor} {numthreads} --metrics-brief ' '-t {duration}'.format(stressor=stressor, numthreads=vm.NumCpusForBenchmark(), duration=FLAGS.stress_ng_duration)) stdout, _ = vm.RemoteCommand(cmd) stressng_sample = _ParseStressngResult(metadata, stdout) if stressng_sample: samples.append(stressng_sample) values_to_geomean_list.append(stressng_sample.value) # Only calculate geomean if each stressors provided a value if FLAGS.stress_ng_calc_geomean and len(values_to_geomean_list) == len( stressors): geomean_metadata = metadata.copy() geomean_metadata['stressors'] = stressors geomean_sample = sample.Sample( metric='STRESS_NG_GEOMEAN', value=_GeoMeanOverflow(values_to_geomean_list), unit='bogus_ops_sec', metadata=geomean_metadata) samples.append(geomean_sample) return samples
Python
def _Exists(self): """Returns True if the availability set exists.""" show_cmd = [azure.AZURE_PATH, 'vm', 'availability-set', 'show', '--output', 'json', '--resource-group', self.resource_group.name, '--name', self.name] stdout, _, _ = vm_util.IssueCommand(show_cmd) return bool(json.loads(stdout))
def _Exists(self): """Returns True if the availability set exists.""" show_cmd = [azure.AZURE_PATH, 'vm', 'availability-set', 'show', '--output', 'json', '--resource-group', self.resource_group.name, '--name', self.name] stdout, _, _ = vm_util.IssueCommand(show_cmd) return bool(json.loads(stdout))
Python
def _PostCreate(self): """Get our connection string and our keys.""" self.connection_string = util.GetAzureStorageConnectionString( self.name, self.resource_group.args) self.connection_args = ['--connection-string', self.connection_string] self.key = util.GetAzureStorageAccountKey( self.name, self.resource_group.args)
def _PostCreate(self): """Get our connection string and our keys.""" self.connection_string = util.GetAzureStorageConnectionString( self.name, self.resource_group.args) self.connection_args = ['--connection-string', self.connection_string] self.key = util.GetAzureStorageAccountKey( self.name, self.resource_group.args)
Python
def _Exists(self): """Returns true if the storage account exists.""" stdout, _, _ = vm_util.IssueCommand( [azure.AZURE_PATH, 'storage', 'account', 'show', '--output', 'json', '--name', self.name] + self.resource_group.args, suppress_warning=True) try: json.loads(stdout) return True except ValueError: return False
def _Exists(self): """Returns true if the storage account exists.""" stdout, _, _ = vm_util.IssueCommand( [azure.AZURE_PATH, 'storage', 'account', 'show', '--output', 'json', '--name', self.name] + self.resource_group.args, suppress_warning=True) try: json.loads(stdout) return True except ValueError: return False
Python
def _Exists(self): """Returns true if the virtual network exists.""" stdout, _, _ = vm_util.IssueCommand( [azure.AZURE_PATH, 'network', 'vnet', 'show', '--output', 'json', '--name', self.name] + self.resource_group.args, suppress_warning=True) return bool(json.loads(stdout))
def _Exists(self): """Returns true if the virtual network exists.""" stdout, _, _ = vm_util.IssueCommand( [azure.AZURE_PATH, 'network', 'vnet', 'show', '--output', 'json', '--name', self.name] + self.resource_group.args, suppress_warning=True) return bool(json.loads(stdout))
Python
def AllowPort(self, vm, start_port, end_port=None, source_range=None): """Open a port or port range. Args: vm: the virtual machine to open the port for. start_port: either a single port or the start of a range. end_port: if given, the end of the port range. source_range: unsupported at present. Raises: ValueError: when there are too many firewall rules. """ with self.rules_lock: end_port = end_port or start_port if (start_port, end_port) in self.rules: return port_range = '%s-%s' % (start_port, end_port) rule_name = 'allow-%s' % port_range # Azure priorities are between 100 and 4096, but we reserve 4095 # for the special DenyAll rule created by DisallowAllPorts. rule_priority = 100 + len(self.rules) if rule_priority >= 4095: raise ValueError('Too many firewall rules!') self.rules[(start_port, end_port)] = rule_name vm_util.IssueRetryableCommand( [azure.AZURE_PATH, 'network', 'nsg', 'rule', 'create', '--name', rule_name, '--destination-port-range', port_range, '--access', 'Allow', '--priority', str(rule_priority)] + self.resource_group.args + self.args)
def AllowPort(self, vm, start_port, end_port=None, source_range=None): """Open a port or port range. Args: vm: the virtual machine to open the port for. start_port: either a single port or the start of a range. end_port: if given, the end of the port range. source_range: unsupported at present. Raises: ValueError: when there are too many firewall rules. """ with self.rules_lock: end_port = end_port or start_port if (start_port, end_port) in self.rules: return port_range = '%s-%s' % (start_port, end_port) rule_name = 'allow-%s' % port_range # Azure priorities are between 100 and 4096, but we reserve 4095 # for the special DenyAll rule created by DisallowAllPorts. rule_priority = 100 + len(self.rules) if rule_priority >= 4095: raise ValueError('Too many firewall rules!') self.rules[(start_port, end_port)] = rule_name vm_util.IssueRetryableCommand( [azure.AZURE_PATH, 'network', 'nsg', 'rule', 'create', '--name', rule_name, '--destination-port-range', port_range, '--access', 'Allow', '--priority', str(rule_priority)] + self.resource_group.args + self.args)
Python
def AllowPort(self, vm, start_port, end_port=None, source_range=None): """Opens a port on the firewall. Args: vm: The BaseVirtualMachine object to open the port for. start_port: The local port to open. end_port: if given, open the range [start_port, end_port]. source_range: unsupported at present. """ vm.network.nsg.AllowPort(vm, start_port, end_port=end_port, source_range=source_range)
def AllowPort(self, vm, start_port, end_port=None, source_range=None): """Opens a port on the firewall. Args: vm: The BaseVirtualMachine object to open the port for. start_port: The local port to open. end_port: if given, open the range [start_port, end_port]. source_range: unsupported at present. """ vm.network.nsg.AllowPort(vm, start_port, end_port=end_port, source_range=source_range)
Python
def _FormatCreateCommand(self): """Formats the command for Docker based on vm_spec and flags""" create_command = ['docker', 'run', '-d', '--name', self.name] #format scratch disks for vol in self.scratch_disks: vol_string = vol.volume_name + ":" + vol.mount_point create_command.append('-v') create_command.append(vol_string) #format cpus option if self.cpus > 0: create_command.append('--cpus') create_command.append(self.cpus) #format memory option if self.memory_mb > 0: create_command.append('-m') create_command.append(str(self.memory_mb) + 'm') create_command.append(self.container_image) create_command.append('/usr/sbin/sshd') create_command.append('-D') logging.info("CREATE COMMAND:") logging.info(create_command) return create_command
def _FormatCreateCommand(self): """Formats the command for Docker based on vm_spec and flags""" create_command = ['docker', 'run', '-d', '--name', self.name] #format scratch disks for vol in self.scratch_disks: vol_string = vol.volume_name + ":" + vol.mount_point create_command.append('-v') create_command.append(vol_string) #format cpus option if self.cpus > 0: create_command.append('--cpus') create_command.append(self.cpus) #format memory option if self.memory_mb > 0: create_command.append('-m') create_command.append(str(self.memory_mb) + 'm') create_command.append(self.container_image) create_command.append('/usr/sbin/sshd') create_command.append('-D') logging.info("CREATE COMMAND:") logging.info(create_command) return create_command
Python
def _PostCreate(self): """ Prepares running container. Gets the IP address, copies public keys, and configures the proxy if one is specified """ self._GetIpAddresses() #Copy ssh key to container to enable ssh login copy_ssh_command = ['docker', 'cp', self.ssh_public_key, '%s:/root/.ssh/authorized_keys' % self.name] vm_util.IssueCommand(copy_ssh_command) #change ownership of authorized_key file to root in container chown_command = ['docker', 'exec', self.name, 'chown', 'root:root', '/root/.ssh/authorized_keys'] vm_util.IssueCommand(chown_command) self._ConfigureProxy() #self._SetupDevicesPaths()
def _PostCreate(self): """ Prepares running container. Gets the IP address, copies public keys, and configures the proxy if one is specified """ self._GetIpAddresses() #Copy ssh key to container to enable ssh login copy_ssh_command = ['docker', 'cp', self.ssh_public_key, '%s:/root/.ssh/authorized_keys' % self.name] vm_util.IssueCommand(copy_ssh_command) #change ownership of authorized_key file to root in container chown_command = ['docker', 'exec', self.name, 'chown', 'root:root', '/root/.ssh/authorized_keys'] vm_util.IssueCommand(chown_command) self._ConfigureProxy() #self._SetupDevicesPaths()
Python
def _Exists(self): """ Checks if Container has successful been created an is running """ info, returnCode = self._GetContainerInfo() logging.info("Checking if Docker Container Exists") if len(info) > 0 and returnCode == 0: status = info[0]['State']['Running'] if status == "True" or status == True: logging.info("Docker Container %s is up and running.", self.name) return True return False
def _Exists(self): """ Checks if Container has successful been created an is running """ info, returnCode = self._GetContainerInfo() logging.info("Checking if Docker Container Exists") if len(info) > 0 and returnCode == 0: status = info[0]['State']['Running'] if status == "True" or status == True: logging.info("Docker Container %s is up and running.", self.name) return True return False
Python
def _CreateVolumes(self): """ Creates volumes for scratch disks. These volumes have to be created BEFORE containers creation because Docker doesn't allow to attach volume to currently running containers. """ self.scratch_disks = docker_disk.CreateDisks(self.disk_specs, self.name)
def _CreateVolumes(self): """ Creates volumes for scratch disks. These volumes have to be created BEFORE containers creation because Docker doesn't allow to attach volume to currently running containers. """ self.scratch_disks = docker_disk.CreateDisks(self.disk_specs, self.name)
Python
def _GetIpAddresses(self): """ Sets the internal and external IP address for the Container. """ info, returnCode = self._GetContainerInfo() ip = False if len(info) > 0 and returnCode == 0: ip = info[0]['NetworkSettings']['IPAddress'].encode('ascii') logging.info("IP: " + str(ip)) self.ip_address = ip self.internal_ip = ip else: logging.warning("IP address information not found")
def _GetIpAddresses(self): """ Sets the internal and external IP address for the Container. """ info, returnCode = self._GetContainerInfo() ip = False if len(info) > 0 and returnCode == 0: ip = info[0]['NetworkSettings']['IPAddress'].encode('ascii') logging.info("IP: " + str(ip)) self.ip_address = ip self.internal_ip = ip else: logging.warning("IP address information not found")
Python
def _GetContainerInfo(self): """ Gets Container information from Docker Inspect. Returns the information, if there is any and a return code. 0 """ logging.info("Finding Container Information") inspect_cmd = ['docker', 'inspect', self.name] info, _, returnCode = vm_util.IssueCommand(inspect_cmd, suppress_warning=True) info = json.loads(info) return info, returnCode
def _GetContainerInfo(self): """ Gets Container information from Docker Inspect. Returns the information, if there is any and a return code. 0 """ logging.info("Finding Container Information") inspect_cmd = ['docker', 'inspect', self.name] info, _, returnCode = vm_util.IssueCommand(inspect_cmd, suppress_warning=True) info = json.loads(info) return info, returnCode
Python
def _BuildVolumesBody(self): """ Constructs volumes-related part of create command for Docker Container """ volumes = [] for scratch_disk in self.scratch_disks: vol_string = scratch_disk.volume_name + ":" + scratch_disk.mount_point volumes.append('-v') volumes.append(vol_string) return volumes
def _BuildVolumesBody(self): """ Constructs volumes-related part of create command for Docker Container """ volumes = [] for scratch_disk in self.scratch_disks: vol_string = scratch_disk.volume_name + ":" + scratch_disk.mount_point volumes.append('-v') volumes.append(vol_string) return volumes
Python
def _LocalImageExists(self, docker_image_name): """ Checks if an image exists locally Returns boolean """ logging.info("Finding Image Information") inspect_cmd = ['docker', 'image', 'inspect', docker_image_name] info, _, returnCode = vm_util.IssueCommand(inspect_cmd, suppress_warning=True) info = json.loads(info) logging.info("Checking if Docker Image Exists") if len(info) > 0 and returnCode == 0: logging.info("Image exists") return True logging.info("Image does not exist") return False
def _LocalImageExists(self, docker_image_name): """ Checks if an image exists locally Returns boolean """ logging.info("Finding Image Information") inspect_cmd = ['docker', 'image', 'inspect', docker_image_name] info, _, returnCode = vm_util.IssueCommand(inspect_cmd, suppress_warning=True) info = json.loads(info) logging.info("Checking if Docker Image Exists") if len(info) > 0 and returnCode == 0: logging.info("Image exists") return True logging.info("Image does not exist") return False
Python
def CreateDisks(disk_specs, vm_name): """ Creates instances of KubernetesDisk child classes depending on scratch disk type. """ scratch_disks = [] for disk_num, disk_spec in enumerate(disk_specs): #disk_class = GetKubernetesDiskClass(disk_spec.disk_type) #scratch_disk = disk_class(disk_num, disk_spec, vm_name) #scratch_disk.Create() logging.info("Creating Disk number: " + str(disk_num)) volume_disk = DockerDisk(disk_spec, disk_num, vm_name) volume_disk.Create() scratch_disks.append(volume_disk) return scratch_disks
def CreateDisks(disk_specs, vm_name): """ Creates instances of KubernetesDisk child classes depending on scratch disk type. """ scratch_disks = [] for disk_num, disk_spec in enumerate(disk_specs): #disk_class = GetKubernetesDiskClass(disk_spec.disk_type) #scratch_disk = disk_class(disk_num, disk_spec, vm_name) #scratch_disk.Create() logging.info("Creating Disk number: " + str(disk_num)) volume_disk = DockerDisk(disk_spec, disk_num, vm_name) volume_disk.Create() scratch_disks.append(volume_disk) return scratch_disks
Python
def GetConfig(user_config): """Load and return benchmark config. Args: user_config: user supplied configuration (flags and config file) Returns: loaded benchmark configuration """ config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) if 'tpu_groups' in config: for vm_spec in config['vm_groups']['default']['vm_spec'].values(): vm_spec.pop('gpu_type', None) vm_spec.pop('gpu_count', None) return config
def GetConfig(user_config): """Load and return benchmark config. Args: user_config: user supplied configuration (flags and config file) Returns: loaded benchmark configuration """ config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) if 'tpu_groups' in config: for vm_spec in config['vm_groups']['default']['vm_spec'].values(): vm_spec.pop('gpu_type', None) vm_spec.pop('gpu_count', None) return config
Python
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ benchmark_spec.data_dir = FLAGS.imagenet_data_dir benchmark_spec.benchmark = FLAGS.mlperf_benchmark
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ benchmark_spec.data_dir = FLAGS.imagenet_data_dir benchmark_spec.benchmark = FLAGS.mlperf_benchmark
Python
def Prepare(benchmark_spec): """Install and set up MLPerf on the target vm. Args: benchmark_spec: The benchmark specification """ mnist_benchmark.Prepare(benchmark_spec) _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] vm.RemoteCommand( 'git clone https://github.com/mlperf/results.git', should_log=True) vm.InstallPackages('python3-pip') vm.RemoteCommand('pip3 install mlperf_compliance==0.0.10') if benchmark_spec.tpus: vm.RemoteCommand('pip3 install --upgrade ' 'pyyaml==3.13 ' 'oauth2client==4.1.3 ' 'google-api-python-client==1.7.4 ' 'google-cloud==0.34.0') vm.RemoteCommand('pip3 install cloud-tpu-profiler==1.12') else: vm.Install('nvidia_docker') vm.RemoteCommand('sudo ln -s /scratch /data') imagenet_data_dir = posixpath.join('/data', 'imagenet', 'combined') vm.RemoteCommand('sudo mkdir -p {}'.format(imagenet_data_dir)) vm.RemoteCommand('sudo chmod a+w /data/imagenet/combined') vm.InstallPreprovisionedBenchmarkData(BENCHMARK_NAME, [_ILSVRC2012_TAR], imagenet_data_dir) vm.RemoteCommand('sudo tar -xvf {tar} -C {data_dir}'.format( tar=posixpath.join(imagenet_data_dir, _ILSVRC2012_TAR), data_dir=imagenet_data_dir)) # Some of the data are in the sub directory. Copy all the data to current # directory. vm.RemoteCommand('find {data_dir} -name "*-*-of-*" -exec mv {{}} {data_dir}' ' \\;'.format(data_dir=imagenet_data_dir)) # Clearing caches. # https://github.com/mlperf/results/blob/master/v0.5.0/google/cloud_v2.8/resnet-tpuv2-8/code/resnet/model/main.sh#L133 vm.RemoteCommand('sync && echo 3 | sudo tee /proc/sys/vm/drop_caches') vm.RemoteCommand('python3 -c "import mlperf_compliance;mlperf_compliance.' 'mlperf_log.{}_print(key=\'run_clear_caches\')"'.format( benchmark_spec.benchmark))
def Prepare(benchmark_spec): """Install and set up MLPerf on the target vm. Args: benchmark_spec: The benchmark specification """ mnist_benchmark.Prepare(benchmark_spec) _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] vm.RemoteCommand( 'git clone https://github.com/mlperf/results.git', should_log=True) vm.InstallPackages('python3-pip') vm.RemoteCommand('pip3 install mlperf_compliance==0.0.10') if benchmark_spec.tpus: vm.RemoteCommand('pip3 install --upgrade ' 'pyyaml==3.13 ' 'oauth2client==4.1.3 ' 'google-api-python-client==1.7.4 ' 'google-cloud==0.34.0') vm.RemoteCommand('pip3 install cloud-tpu-profiler==1.12') else: vm.Install('nvidia_docker') vm.RemoteCommand('sudo ln -s /scratch /data') imagenet_data_dir = posixpath.join('/data', 'imagenet', 'combined') vm.RemoteCommand('sudo mkdir -p {}'.format(imagenet_data_dir)) vm.RemoteCommand('sudo chmod a+w /data/imagenet/combined') vm.InstallPreprovisionedBenchmarkData(BENCHMARK_NAME, [_ILSVRC2012_TAR], imagenet_data_dir) vm.RemoteCommand('sudo tar -xvf {tar} -C {data_dir}'.format( tar=posixpath.join(imagenet_data_dir, _ILSVRC2012_TAR), data_dir=imagenet_data_dir)) # Some of the data are in the sub directory. Copy all the data to current # directory. vm.RemoteCommand('find {data_dir} -name "*-*-of-*" -exec mv {{}} {data_dir}' ' \\;'.format(data_dir=imagenet_data_dir)) # Clearing caches. # https://github.com/mlperf/results/blob/master/v0.5.0/google/cloud_v2.8/resnet-tpuv2-8/code/resnet/model/main.sh#L133 vm.RemoteCommand('sync && echo 3 | sudo tee /proc/sys/vm/drop_caches') vm.RemoteCommand('python3 -c "import mlperf_compliance;mlperf_compliance.' 'mlperf_log.{}_print(key=\'run_clear_caches\')"'.format( benchmark_spec.benchmark))
Python
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ return mnist_benchmark.CreateMetadataDict(benchmark_spec)
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ return mnist_benchmark.CreateMetadataDict(benchmark_spec)
Python
def Run(benchmark_spec): """Run MLPerf on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] if benchmark_spec.tpus: # For MLPerf v0.5, the benchmake code of different hardware are different. if benchmark_spec.tpu_groups['train'].GetNumShards() > 8: code_path = 'cloud_v2.512/resnet-tpuv2-512/code/resnet/model' elif benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v2-8': code_path = 'cloud_v2.8/resnet-tpuv2-8/code/resnet/model' elif benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-8': code_path = 'cloud_v3.8/resnet-tpuv3-8/code/resnet/model' else: raise ValueError( 'MLPerf configurations do not support the hardware in PKB. PKB may ' 'need to be updated if this is a new TPU type.') cmd = 'bash run_helper.sh 2>&1 | tee output.txt' else: code_path = 'cloud_v100x8/code/resnet' cmd = ('sudo nvidia-docker build . -t foo && ' 'sudo nvidia-docker run -v $MLP_HOST_DATA_DIR:/data -v ' '$MLP_HOST_OUTPUT_DIR:/output -v /proc:/host_proc -t ' 'foo:latest run_helper_8xV100.sh 2>&1 | tee output.txt') mlperf_benchmark_cmd = ( 'export MLP_GCS_MODEL_DIR={model_dir} && ' 'export MLP_PATH_GCS_IMAGENET={data_dir} && ' 'export MLP_TPU_NAME={tpu_train} && ' 'export MLP_PATH_GCS_EUW_IMAGENET={data_dir} && ' 'export MLP_GCS_EUW_MODEL_DIR={model_dir} && ' 'export MLP_TPU_SIDECAR_NAME={tpu_eval} && ' 'export MLP_HOST_DATA_DIR=/data && ' 'export MLP_HOST_OUTPUT_DIR=`pwd`/output && ' 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models && ' 'cd results/v0.5.0/google/{code_path} && ' 'sed -i "s/python /python3 /g" run_helper*.sh && ' 'mkdir -p $MLP_HOST_OUTPUT_DIR && ' '{cmd}'.format( model_dir=benchmark_spec.model_dir, data_dir=benchmark_spec.data_dir, tpu_train=(benchmark_spec.tpu_groups['train'].GetName() if benchmark_spec.tpus else ''), tpu_eval=(benchmark_spec.tpu_groups['eval'].GetName() if benchmark_spec.tpus else ''), code_path=code_path, cmd=cmd)) if cuda_toolkit.CheckNvidiaGpuExists(vm): mlperf_benchmark_cmd = '{env} {cmd}'.format( env=tensorflow.GetEnvironmentVars(vm), cmd=mlperf_benchmark_cmd) samples = [] metadata = _CreateMetadataDict(benchmark_spec) stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd, should_log=True) samples.extend(MakeSamplesFromOutput(metadata, stdout)) return samples
def Run(benchmark_spec): """Run MLPerf on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] if benchmark_spec.tpus: # For MLPerf v0.5, the benchmake code of different hardware are different. if benchmark_spec.tpu_groups['train'].GetNumShards() > 8: code_path = 'cloud_v2.512/resnet-tpuv2-512/code/resnet/model' elif benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v2-8': code_path = 'cloud_v2.8/resnet-tpuv2-8/code/resnet/model' elif benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-8': code_path = 'cloud_v3.8/resnet-tpuv3-8/code/resnet/model' else: raise ValueError( 'MLPerf configurations do not support the hardware in PKB. PKB may ' 'need to be updated if this is a new TPU type.') cmd = 'bash run_helper.sh 2>&1 | tee output.txt' else: code_path = 'cloud_v100x8/code/resnet' cmd = ('sudo nvidia-docker build . -t foo && ' 'sudo nvidia-docker run -v $MLP_HOST_DATA_DIR:/data -v ' '$MLP_HOST_OUTPUT_DIR:/output -v /proc:/host_proc -t ' 'foo:latest run_helper_8xV100.sh 2>&1 | tee output.txt') mlperf_benchmark_cmd = ( 'export MLP_GCS_MODEL_DIR={model_dir} && ' 'export MLP_PATH_GCS_IMAGENET={data_dir} && ' 'export MLP_TPU_NAME={tpu_train} && ' 'export MLP_PATH_GCS_EUW_IMAGENET={data_dir} && ' 'export MLP_GCS_EUW_MODEL_DIR={model_dir} && ' 'export MLP_TPU_SIDECAR_NAME={tpu_eval} && ' 'export MLP_HOST_DATA_DIR=/data && ' 'export MLP_HOST_OUTPUT_DIR=`pwd`/output && ' 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models && ' 'cd results/v0.5.0/google/{code_path} && ' 'sed -i "s/python /python3 /g" run_helper*.sh && ' 'mkdir -p $MLP_HOST_OUTPUT_DIR && ' '{cmd}'.format( model_dir=benchmark_spec.model_dir, data_dir=benchmark_spec.data_dir, tpu_train=(benchmark_spec.tpu_groups['train'].GetName() if benchmark_spec.tpus else ''), tpu_eval=(benchmark_spec.tpu_groups['eval'].GetName() if benchmark_spec.tpus else ''), code_path=code_path, cmd=cmd)) if cuda_toolkit.CheckNvidiaGpuExists(vm): mlperf_benchmark_cmd = '{env} {cmd}'.format( env=tensorflow.GetEnvironmentVars(vm), cmd=mlperf_benchmark_cmd) samples = [] metadata = _CreateMetadataDict(benchmark_spec) stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd, should_log=True) samples.extend(MakeSamplesFromOutput(metadata, stdout)) return samples
Python
def GetEnvironmentVars(vm): """Return a string containing TensorFlow-related environment variables. Args: vm: vm to get environment varibles Returns: string of environment variables """ env_vars = [] if cuda_toolkit.CheckNvidiaGpuExists(vm): output, _ = vm.RemoteCommand('getconf LONG_BIT', should_log=True) long_bit = output.strip() lib_name = 'lib' if long_bit == '32' else 'lib64' env_vars.extend([ 'PATH=%s${PATH:+:${PATH}}' % posixpath.join(FLAGS.cuda_toolkit_installation_dir, 'bin'), 'CUDA_HOME=%s' % FLAGS.cuda_toolkit_installation_dir, 'LD_LIBRARY_PATH=%s${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}' % posixpath.join(FLAGS.cuda_toolkit_installation_dir, lib_name)]) if FLAGS.aws_s3_region: env_vars.append('AWS_REGION={}'.format(FLAGS.aws_s3_region)) return ' '.join(env_vars)
def GetEnvironmentVars(vm): """Return a string containing TensorFlow-related environment variables. Args: vm: vm to get environment varibles Returns: string of environment variables """ env_vars = [] if cuda_toolkit.CheckNvidiaGpuExists(vm): output, _ = vm.RemoteCommand('getconf LONG_BIT', should_log=True) long_bit = output.strip() lib_name = 'lib' if long_bit == '32' else 'lib64' env_vars.extend([ 'PATH=%s${PATH:+:${PATH}}' % posixpath.join(FLAGS.cuda_toolkit_installation_dir, 'bin'), 'CUDA_HOME=%s' % FLAGS.cuda_toolkit_installation_dir, 'LD_LIBRARY_PATH=%s${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}' % posixpath.join(FLAGS.cuda_toolkit_installation_dir, lib_name)]) if FLAGS.aws_s3_region: env_vars.append('AWS_REGION={}'.format(FLAGS.aws_s3_region)) return ' '.join(env_vars)
Python
def Install(vm): """Installs TensorFlow on the VM.""" has_gpu = cuda_toolkit.CheckNvidiaGpuExists(vm) tf_pip_package = (FLAGS.tf_gpu_pip_package if has_gpu else FLAGS.tf_cpu_pip_package) if has_gpu: vm.Install('cuda_toolkit') vm.Install('cudnn') # TODO(ferneyhough): Move NCCL installation to its own package. # Currently this is dependent on CUDA 9 being installed. vm.RemoteCommand('wget %s' % NCCL_URL) vm.RemoteCommand('sudo dpkg -i %s' % NCCL_PACKAGE) vm.RemoteCommand('sudo apt install libnccl2=2.3.5-2+cuda9.0 ' 'libnccl-dev=2.3.5-2+cuda9.0') vm.Install('pip') vm.RemoteCommand('sudo pip install requests') vm.RemoteCommand('sudo pip install --upgrade absl-py') vm.RemoteCommand('sudo pip install --upgrade %s' % tf_pip_package, should_log=True) vm.RemoteCommand( 'sudo pip install --upgrade %s' % FLAGS.t2t_pip_package, should_log=True) vm.InstallPackages('git') vm.RemoteCommand( 'git clone https://github.com/tensorflow/benchmarks.git', should_log=True) vm.RemoteCommand( 'cd benchmarks && git checkout {}'.format(FLAGS.tf_cnn_benchmarks_branch) ) if FLAGS.cloud == 'AWS' and FLAGS.tf_data_dir and ( not FLAGS.tf_use_local_data): vm.Install('aws_credentials')
def Install(vm): """Installs TensorFlow on the VM.""" has_gpu = cuda_toolkit.CheckNvidiaGpuExists(vm) tf_pip_package = (FLAGS.tf_gpu_pip_package if has_gpu else FLAGS.tf_cpu_pip_package) if has_gpu: vm.Install('cuda_toolkit') vm.Install('cudnn') # TODO(ferneyhough): Move NCCL installation to its own package. # Currently this is dependent on CUDA 9 being installed. vm.RemoteCommand('wget %s' % NCCL_URL) vm.RemoteCommand('sudo dpkg -i %s' % NCCL_PACKAGE) vm.RemoteCommand('sudo apt install libnccl2=2.3.5-2+cuda9.0 ' 'libnccl-dev=2.3.5-2+cuda9.0') vm.Install('pip') vm.RemoteCommand('sudo pip install requests') vm.RemoteCommand('sudo pip install --upgrade absl-py') vm.RemoteCommand('sudo pip install --upgrade %s' % tf_pip_package, should_log=True) vm.RemoteCommand( 'sudo pip install --upgrade %s' % FLAGS.t2t_pip_package, should_log=True) vm.InstallPackages('git') vm.RemoteCommand( 'git clone https://github.com/tensorflow/benchmarks.git', should_log=True) vm.RemoteCommand( 'cd benchmarks && git checkout {}'.format(FLAGS.tf_cnn_benchmarks_branch) ) if FLAGS.cloud == 'AWS' and FLAGS.tf_data_dir and ( not FLAGS.tf_use_local_data): vm.Install('aws_credentials')
Python
def GetManagedRelationalDbClass(cloud): """Get the ManagedRelationalDb class corresponding to 'cloud'. Args: cloud: name of cloud to get the class for """ return resource.GetResourceClass(BaseManagedRelationalDb, CLOUD=cloud)
def GetManagedRelationalDbClass(cloud): """Get the ManagedRelationalDb class corresponding to 'cloud'. Args: cloud: name of cloud to get the class for """ return resource.GetResourceClass(BaseManagedRelationalDb, CLOUD=cloud)
Python
def client_vm(self): """Client VM which will drive the database test. This is required by subclasses to perform client-vm network-specific tasks, such as getting information about the VPC, IP address, etc. """ if not hasattr(self, '_client_vm'): raise ManagedRelationalDbPropertyNotSet('client_vm is not set') return self._client_vm
def client_vm(self): """Client VM which will drive the database test. This is required by subclasses to perform client-vm network-specific tasks, such as getting information about the VPC, IP address, etc. """ if not hasattr(self, '_client_vm'): raise ManagedRelationalDbPropertyNotSet('client_vm is not set') return self._client_vm
Python
def endpoint(self): """Endpoint of the database server (exclusing port).""" if not hasattr(self, '_endpoint'): raise ManagedRelationalDbPropertyNotSet('endpoint not set') return self._endpoint
def endpoint(self): """Endpoint of the database server (exclusing port).""" if not hasattr(self, '_endpoint'): raise ManagedRelationalDbPropertyNotSet('endpoint not set') return self._endpoint
Python
def port(self): """Port (int) on which the database server is listening.""" if not hasattr(self, '_port'): raise ManagedRelationalDbPropertyNotSet('port not set') return self._port
def port(self): """Port (int) on which the database server is listening.""" if not hasattr(self, '_port'): raise ManagedRelationalDbPropertyNotSet('port not set') return self._port
Python
def GetResourceMetadata(self): """Returns a dictionary of metadata. Child classes can extend this if needed. Raises: ManagedRelationalDbPropertyNotSet: if any expected metadata is missing. """ metadata = { 'zone': self.spec.vm_spec.zone, 'disk_type': self.spec.disk_spec.disk_type, 'disk_size': self.spec.disk_spec.disk_size, 'engine': self.spec.engine, 'high_availability': self.spec.high_availability, 'backup_enabled': self.spec.backup_enabled, 'backup_start_time': self.spec.backup_start_time, 'engine_version': self.spec.engine_version, } if (hasattr(self.spec.vm_spec, 'machine_type') and self.spec.vm_spec.machine_type): metadata.update({ 'machine_type': self.spec.vm_spec.machine_type, }) elif hasattr(self.spec.vm_spec, 'cpus') and ( hasattr(self.spec.vm_spec, 'memory')): metadata.update({ 'cpus': self.spec.vm_spec.cpus, }) metadata.update({ 'memory': self.spec.vm_spec.memory, }) elif hasattr(self.spec.vm_spec, 'tier') and ( hasattr(self.spec.vm_spec, 'compute_units')): metadata.update({ 'tier': self.spec.vm_spec.tier, }) metadata.update({ 'compute_units': self.spec.vm_spec.compute_units, }) else: raise ManagedRelationalDbPropertyNotSet( 'Machine type of the database must be set.') return metadata
def GetResourceMetadata(self): """Returns a dictionary of metadata. Child classes can extend this if needed. Raises: ManagedRelationalDbPropertyNotSet: if any expected metadata is missing. """ metadata = { 'zone': self.spec.vm_spec.zone, 'disk_type': self.spec.disk_spec.disk_type, 'disk_size': self.spec.disk_spec.disk_size, 'engine': self.spec.engine, 'high_availability': self.spec.high_availability, 'backup_enabled': self.spec.backup_enabled, 'backup_start_time': self.spec.backup_start_time, 'engine_version': self.spec.engine_version, } if (hasattr(self.spec.vm_spec, 'machine_type') and self.spec.vm_spec.machine_type): metadata.update({ 'machine_type': self.spec.vm_spec.machine_type, }) elif hasattr(self.spec.vm_spec, 'cpus') and ( hasattr(self.spec.vm_spec, 'memory')): metadata.update({ 'cpus': self.spec.vm_spec.cpus, }) metadata.update({ 'memory': self.spec.vm_spec.memory, }) elif hasattr(self.spec.vm_spec, 'tier') and ( hasattr(self.spec.vm_spec, 'compute_units')): metadata.update({ 'tier': self.spec.vm_spec.tier, }) metadata.update({ 'compute_units': self.spec.vm_spec.compute_units, }) else: raise ManagedRelationalDbPropertyNotSet( 'Machine type of the database must be set.') return metadata
Python
def _Install(vm): """Installs the Glibc Benchmark package on the VM.""" vm.Install('node_js') vm.Install('build_tools') vm.InstallPackages('bison') vm.RemoteCommand('cd {0} && mkdir binutils'.format(INSTALL_DIR)) vm.RemoteCommand( 'cd {0} && ' 'wget http://ftp.gnu.org/gnu/binutils/binutils-2.30.tar.gz'.format( BINUTILS_DIR)) vm.RemoteCommand('cd {0} && tar xvf {1}'.format(BINUTILS_DIR, BINUTILS_TAR)) vm.RemoteCommand('cd {0} && mkdir binutils-build && ' 'cd binutils-build/ && ' '../binutils-2.30/configure --prefix=/opt/binutils && ' 'make -j 4 && sudo make install'.format(BINUTILS_DIR)) vm.Install('gcc5') vm.RemoteCommand('cd {0} && mkdir glibc'.format(INSTALL_DIR)) vm.RemoteCommand( 'cd {0} && ' 'wget https://fossies.org/linux/misc/{1}'.format(GLIBC_DIR, GLIBC_TAR)) vm.RemoteCommand('cd {0} && tar xvf {1}'.format(GLIBC_DIR, GLIBC_TAR)) vm.RemoteCommand( 'cd {0} && mkdir glibc-build && cd glibc-build && ' '../glibc-{1}/configure --prefix=/usr/local/glibc --disable-profile ' '--enable-add-ons --with-headers=/usr/include ' '--with-binutils=/opt/binutils/bin && make && sudo make install'.format( GLIBC_DIR, GLIBC_VERSION))
def _Install(vm): """Installs the Glibc Benchmark package on the VM.""" vm.Install('node_js') vm.Install('build_tools') vm.InstallPackages('bison') vm.RemoteCommand('cd {0} && mkdir binutils'.format(INSTALL_DIR)) vm.RemoteCommand( 'cd {0} && ' 'wget http://ftp.gnu.org/gnu/binutils/binutils-2.30.tar.gz'.format( BINUTILS_DIR)) vm.RemoteCommand('cd {0} && tar xvf {1}'.format(BINUTILS_DIR, BINUTILS_TAR)) vm.RemoteCommand('cd {0} && mkdir binutils-build && ' 'cd binutils-build/ && ' '../binutils-2.30/configure --prefix=/opt/binutils && ' 'make -j 4 && sudo make install'.format(BINUTILS_DIR)) vm.Install('gcc5') vm.RemoteCommand('cd {0} && mkdir glibc'.format(INSTALL_DIR)) vm.RemoteCommand( 'cd {0} && ' 'wget https://fossies.org/linux/misc/{1}'.format(GLIBC_DIR, GLIBC_TAR)) vm.RemoteCommand('cd {0} && tar xvf {1}'.format(GLIBC_DIR, GLIBC_TAR)) vm.RemoteCommand( 'cd {0} && mkdir glibc-build && cd glibc-build && ' '../glibc-{1}/configure --prefix=/usr/local/glibc --disable-profile ' '--enable-add-ons --with-headers=/usr/include ' '--with-binutils=/opt/binutils/bin && make && sudo make install'.format( GLIBC_DIR, GLIBC_VERSION))
Python
def _AddTags(self): """Tags all VMs in the cluster.""" vms_in_cluster = [] for instance_group in self._GetInstanceGroups(): vms_in_cluster.extend(self._GetInstancesFromInstanceGroup(instance_group)) for vm_name in vms_in_cluster: cmd = util.GcloudCommand(self, 'compute', 'instances', 'add-metadata', vm_name) cmd.flags['metadata'] = util.MakeFormattedDefaultTags() cmd.Issue()
def _AddTags(self): """Tags all VMs in the cluster.""" vms_in_cluster = [] for instance_group in self._GetInstanceGroups(): vms_in_cluster.extend(self._GetInstancesFromInstanceGroup(instance_group)) for vm_name in vms_in_cluster: cmd = util.GcloudCommand(self, 'compute', 'instances', 'add-metadata', vm_name) cmd.flags['metadata'] = util.MakeFormattedDefaultTags() cmd.Issue()
Python
def Run(vm, server_ip, server_port): """Runs the memtier benchmark on the vm.""" memtier_ratio = '1:{0}'.format(FLAGS.memtier_ratio) samples = [] for client_count in FLAGS.memtier_clients: vm.RemoteCommand('rm -f {0}'.format(MEMTIER_RESULTS)) vm.RemoteCommand( 'memtier_benchmark ' '-s {server_ip} ' '-p {server_port} ' '-P {protocol} ' '--run-count {run_count} ' '--requests {requests} ' '--clients {clients} ' '--threads {threads} ' '--ratio {ratio} ' '--data-size {data_size} ' '--key-pattern {key_pattern} ' '--random-data > {output_file}'.format( server_ip=server_ip, server_port=server_port, protocol=FLAGS.memtier_protocol, run_count=FLAGS.memtier_run_count, requests=FLAGS.memtier_requests, clients=client_count, threads=FLAGS.memtier_threads, ratio=memtier_ratio, data_size=FLAGS.memtier_data_size, key_pattern=FLAGS.memtier_key_pattern, output_file=MEMTIER_RESULTS)) results, _ = vm.RemoteCommand('cat {0}'.format(MEMTIER_RESULTS)) metadata = GetMetadata() metadata['memtier_clients'] = client_count samples.extend(ParseResults(results, metadata)) return samples
def Run(vm, server_ip, server_port): """Runs the memtier benchmark on the vm.""" memtier_ratio = '1:{0}'.format(FLAGS.memtier_ratio) samples = [] for client_count in FLAGS.memtier_clients: vm.RemoteCommand('rm -f {0}'.format(MEMTIER_RESULTS)) vm.RemoteCommand( 'memtier_benchmark ' '-s {server_ip} ' '-p {server_port} ' '-P {protocol} ' '--run-count {run_count} ' '--requests {requests} ' '--clients {clients} ' '--threads {threads} ' '--ratio {ratio} ' '--data-size {data_size} ' '--key-pattern {key_pattern} ' '--random-data > {output_file}'.format( server_ip=server_ip, server_port=server_port, protocol=FLAGS.memtier_protocol, run_count=FLAGS.memtier_run_count, requests=FLAGS.memtier_requests, clients=client_count, threads=FLAGS.memtier_threads, ratio=memtier_ratio, data_size=FLAGS.memtier_data_size, key_pattern=FLAGS.memtier_key_pattern, output_file=MEMTIER_RESULTS)) results, _ = vm.RemoteCommand('cat {0}'.format(MEMTIER_RESULTS)) metadata = GetMetadata() metadata['memtier_clients'] = client_count samples.extend(ParseResults(results, metadata)) return samples
Python
def load(self): """ Extract tabular data as |TableData| instances from a JSON text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` | This replaced the different value | for each single/multiple JSON tables: | [single JSON table] | ``%(format_name)s%(format_id)s`` | [multiple JSON table] Table data key. ``%(format_name)s`` ``"json"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator .. seealso:: :py:meth:`.JsonTableFileLoader.load()` """ formatter = JsonTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a JSON text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` | This replaced the different value | for each single/multiple JSON tables: | [single JSON table] | ``%(format_name)s%(format_id)s`` | [multiple JSON table] Table data key. ``%(format_name)s`` ``"json"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator .. seealso:: :py:meth:`.JsonTableFileLoader.load()` """ formatter = JsonTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
Python
def load(self): """ Extract tabular data as |TableData| instances from a dict object. |load_source_desc_text| :rtype: |TableData| iterator .. seealso:: :py:meth:`.JsonTableFileLoader.load()` """ self._validate() self._logger.logging_load() formatter = JsonTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a dict object. |load_source_desc_text| :rtype: |TableData| iterator .. seealso:: :py:meth:`.JsonTableFileLoader.load()` """ self._validate() self._logger.logging_load() formatter = JsonTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
Python
def create_from_path(self): """ Create a file loader from the file extension to loading file. Supported file extensions are as follows: ========================================= ===================================== Extension Loader ========================================= ===================================== ``"csv"`` :py:class:`~.CsvTableTextLoader` ``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader` ``"htm"``/``"html"``/``"asp"``/``"aspx"`` :py:class:`~.HtmlTableTextLoader` ``"json"`` :py:class:`~.JsonTableTextLoader` ``"jsonl"``/``"ldjson"``/``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader` ``"ltsv"`` :py:class:`~.LtsvTableTextLoader` ``"md"`` :py:class:`~.MarkdownTableTextLoader` ``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader` ``"tsv"`` :py:class:`~.TsvTableTextLoader` ========================================= ===================================== :return: Loader that coincides with the file extension of the URL. :raises pytablereader.UrlError: If unacceptable URL format. :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| loading the URL. """ import requests url_path = urlparse(self.__url).path try: url_extension = get_extension(url_path.rstrip("/")) except InvalidFilePathError: raise UrlError("url must include path") logger.debug(f"TableUrlLoaderFactory: extension={url_extension}") loader_class = self._get_loader_class(self._get_extension_loader_mapping(), url_extension) try: self._fetch_source(loader_class) except requests.exceptions.ProxyError as e: raise ProxyError(e) loader = self._create_from_extension(url_extension) logger.debug(f"TableUrlLoaderFactory: loader={loader.format_name}") return loader
def create_from_path(self): """ Create a file loader from the file extension to loading file. Supported file extensions are as follows: ========================================= ===================================== Extension Loader ========================================= ===================================== ``"csv"`` :py:class:`~.CsvTableTextLoader` ``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader` ``"htm"``/``"html"``/``"asp"``/``"aspx"`` :py:class:`~.HtmlTableTextLoader` ``"json"`` :py:class:`~.JsonTableTextLoader` ``"jsonl"``/``"ldjson"``/``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader` ``"ltsv"`` :py:class:`~.LtsvTableTextLoader` ``"md"`` :py:class:`~.MarkdownTableTextLoader` ``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader` ``"tsv"`` :py:class:`~.TsvTableTextLoader` ========================================= ===================================== :return: Loader that coincides with the file extension of the URL. :raises pytablereader.UrlError: If unacceptable URL format. :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| loading the URL. """ import requests url_path = urlparse(self.__url).path try: url_extension = get_extension(url_path.rstrip("/")) except InvalidFilePathError: raise UrlError("url must include path") logger.debug(f"TableUrlLoaderFactory: extension={url_extension}") loader_class = self._get_loader_class(self._get_extension_loader_mapping(), url_extension) try: self._fetch_source(loader_class) except requests.exceptions.ProxyError as e: raise ProxyError(e) loader = self._create_from_extension(url_extension) logger.debug(f"TableUrlLoaderFactory: loader={loader.format_name}") return loader
Python
def create_from_format_name(self, format_name): """ Create a file loader from a format name. Supported file formats are as follows: ========================== ====================================== Format name Loader ========================== ====================================== ``"csv"`` :py:class:`~.CsvTableTextLoader` ``"excel"`` :py:class:`~.ExcelTableFileLoader` ``"html"`` :py:class:`~.HtmlTableTextLoader` ``"json"`` :py:class:`~.JsonTableTextLoader` ``"json_lines"`` :py:class:`~.JsonLinesTableTextLoader` ``"jsonl"`` :py:class:`~.JsonLinesTableTextLoader` ``"ldjson"`` :py:class:`~.JsonLinesTableTextLoader` ``"ltsv"`` :py:class:`~.LtsvTableTextLoader` ``"markdown"`` :py:class:`~.MarkdownTableTextLoader` ``"mediawiki"`` :py:class:`~.MediaWikiTableTextLoader` ``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader` ``"sqlite"`` :py:class:`~.SqliteFileLoader` ``"ssv"`` :py:class:`~.CsvTableTextLoader` ``"tsv"`` :py:class:`~.TsvTableTextLoader` ========================== ====================================== :param str format_name: Format name string (case insensitive). :return: Loader that coincide with the ``format_name``: :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| the format. :raises TypeError: If ``format_name`` is not a string. """ import requests logger.debug(f"TableUrlLoaderFactory: name={format_name}") loader_class = self._get_loader_class(self._get_format_name_loader_mapping(), format_name) try: self._fetch_source(loader_class) except requests.exceptions.ProxyError as e: raise ProxyError(e) loader = self._create_from_format_name(format_name) logger.debug(f"TableUrlLoaderFactory: loader={loader.format_name}") return loader
def create_from_format_name(self, format_name): """ Create a file loader from a format name. Supported file formats are as follows: ========================== ====================================== Format name Loader ========================== ====================================== ``"csv"`` :py:class:`~.CsvTableTextLoader` ``"excel"`` :py:class:`~.ExcelTableFileLoader` ``"html"`` :py:class:`~.HtmlTableTextLoader` ``"json"`` :py:class:`~.JsonTableTextLoader` ``"json_lines"`` :py:class:`~.JsonLinesTableTextLoader` ``"jsonl"`` :py:class:`~.JsonLinesTableTextLoader` ``"ldjson"`` :py:class:`~.JsonLinesTableTextLoader` ``"ltsv"`` :py:class:`~.LtsvTableTextLoader` ``"markdown"`` :py:class:`~.MarkdownTableTextLoader` ``"mediawiki"`` :py:class:`~.MediaWikiTableTextLoader` ``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader` ``"sqlite"`` :py:class:`~.SqliteFileLoader` ``"ssv"`` :py:class:`~.CsvTableTextLoader` ``"tsv"`` :py:class:`~.TsvTableTextLoader` ========================== ====================================== :param str format_name: Format name string (case insensitive). :return: Loader that coincide with the ``format_name``: :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| the format. :raises TypeError: If ``format_name`` is not a string. """ import requests logger.debug(f"TableUrlLoaderFactory: name={format_name}") loader_class = self._get_loader_class(self._get_format_name_loader_mapping(), format_name) try: self._fetch_source(loader_class) except requests.exceptions.ProxyError as e: raise ProxyError(e) loader = self._create_from_format_name(format_name) logger.debug(f"TableUrlLoaderFactory: loader={loader.format_name}") return loader
Python
def load(self): """ Load table data from a Google Spreadsheet. This method consider :py:attr:`.source` as a path to the credential JSON file to access Google Sheets API. The method automatically search the header row start from :py:attr:`.start_row`. The condition of the header row is that all of the columns have value (except empty columns). :return: Loaded table data. Return one |TableData| for each sheet in the workbook. The table name for data will be determined by :py:meth:`~.GoogleSheetsTableLoader.make_table_name`. :rtype: iterator of |TableData| :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.OpenError: If the spread sheet not found. """ import gspread from oauth2client.service_account import ServiceAccountCredentials self._validate_table_name() self._validate_title() scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] credentials = ServiceAccountCredentials.from_json_keyfile_name(self.source, scope) gc = gspread.authorize(credentials) try: for worksheet in gc.open(self.title).worksheets(): self._worksheet = worksheet self.__all_values = [row for row in worksheet.get_all_values()] if self._is_empty_sheet(): continue try: self.__strip_empty_col() except ValueError: continue value_matrix = self.__all_values[self._get_start_row_idx() :] try: headers = value_matrix[0] rows = value_matrix[1:] except IndexError: continue self.inc_table_count() yield TableData( self.make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), ) except gspread.exceptions.SpreadsheetNotFound: raise OpenError(f"spreadsheet '{self.title}' not found") except gspread.exceptions.APIError as e: raise APIError(e)
def load(self): """ Load table data from a Google Spreadsheet. This method consider :py:attr:`.source` as a path to the credential JSON file to access Google Sheets API. The method automatically search the header row start from :py:attr:`.start_row`. The condition of the header row is that all of the columns have value (except empty columns). :return: Loaded table data. Return one |TableData| for each sheet in the workbook. The table name for data will be determined by :py:meth:`~.GoogleSheetsTableLoader.make_table_name`. :rtype: iterator of |TableData| :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.OpenError: If the spread sheet not found. """ import gspread from oauth2client.service_account import ServiceAccountCredentials self._validate_table_name() self._validate_title() scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] credentials = ServiceAccountCredentials.from_json_keyfile_name(self.source, scope) gc = gspread.authorize(credentials) try: for worksheet in gc.open(self.title).worksheets(): self._worksheet = worksheet self.__all_values = [row for row in worksheet.get_all_values()] if self._is_empty_sheet(): continue try: self.__strip_empty_col() except ValueError: continue value_matrix = self.__all_values[self._get_start_row_idx() :] try: headers = value_matrix[0] rows = value_matrix[1:] except IndexError: continue self.inc_table_count() yield TableData( self.make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), ) except gspread.exceptions.SpreadsheetNotFound: raise OpenError(f"spreadsheet '{self.title}' not found") except gspread.exceptions.APIError as e: raise APIError(e)
Python
def load(self): """ Extract tabular data as |TableData| instances from a Markdown file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"markdown"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the Markdown data is invalid or empty. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with open(self.source, encoding=self.encoding) as fp: formatter = MarkdownTableFormatter(fp.read(), self._logger) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a Markdown file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"markdown"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the Markdown data is invalid or empty. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with open(self.source, encoding=self.encoding) as fp: formatter = MarkdownTableFormatter(fp.read(), self._logger) formatter.accept(self) return formatter.to_table_data()
Python
def load(self): """ Extract tabular data as |TableData| instances from a Markdown text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"markdown"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the Markdown data is invalid or empty. """ self._validate() self._logger.logging_load() formatter = MarkdownTableFormatter(self.source, self._logger) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a Markdown text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"markdown"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the Markdown data is invalid or empty. """ self._validate() self._logger.logging_load() formatter = MarkdownTableFormatter(self.source, self._logger) formatter.accept(self) return formatter.to_table_data()
Python
def load(self): """ Extract tabular data as |TableData| instances from a LTSV file. |load_source_desc_file| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` |filename_desc| ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) self._ltsv_input_stream = open(self.source, encoding=self.encoding) for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a LTSV file. |load_source_desc_file| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` |filename_desc| ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) self._ltsv_input_stream = open(self.source, encoding=self.encoding) for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
Python
def load(self): """ Extract tabular data as |TableData| instances from a LTSV text object. |load_source_desc_text| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` ``""`` ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """ self._validate() self._logger.logging_load() self._ltsv_input_stream = self.source.splitlines() for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a LTSV text object. |load_source_desc_text| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` ``""`` ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """ self._validate() self._logger.logging_load() self._ltsv_input_stream = self.source.splitlines() for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
Python
def load(self): """ Extract tabular data as |TableData| instances from an Excel file. |spreadsheet_load_desc| :return: Loaded |TableData| iterator. |TableData| created for each sheet in the workbook. |load_table_name_desc| =================== ==================================== Format specifier Value after the replacement =================== ==================================== ``%(filename)s`` Filename of the workbook ``%(sheet)s`` Name of the sheet ``%(format_name)s`` ``"spreadsheet"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ==================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.error.OpenError: If failed to open the source file. """ try: import excelrd as xlrd except ImportError: import xlrd self._validate() self._logger.logging_load() try: workbook = xlrd.open_workbook(self.source) except xlrd.biffh.XLRDError as e: raise OpenError(e) for worksheet in workbook.sheets(): self._worksheet = worksheet if self._is_empty_sheet(): continue self.__extract_not_empty_col_idx() try: start_row_idx = self._get_start_row_idx() except DataError: continue rows = [ self.__get_row_values(row_idx) for row_idx in range(start_row_idx + 1, self._row_count) ] self.inc_table_count() headers = self.__get_row_values(start_row_idx) yield TableData( self._make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), )
def load(self): """ Extract tabular data as |TableData| instances from an Excel file. |spreadsheet_load_desc| :return: Loaded |TableData| iterator. |TableData| created for each sheet in the workbook. |load_table_name_desc| =================== ==================================== Format specifier Value after the replacement =================== ==================================== ``%(filename)s`` Filename of the workbook ``%(sheet)s`` Name of the sheet ``%(format_name)s`` ``"spreadsheet"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ==================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.error.OpenError: If failed to open the source file. """ try: import excelrd as xlrd except ImportError: import xlrd self._validate() self._logger.logging_load() try: workbook = xlrd.open_workbook(self.source) except xlrd.biffh.XLRDError as e: raise OpenError(e) for worksheet in workbook.sheets(): self._worksheet = worksheet if self._is_empty_sheet(): continue self.__extract_not_empty_col_idx() try: start_row_idx = self._get_start_row_idx() except DataError: continue rows = [ self.__get_row_values(row_idx) for row_idx in range(start_row_idx + 1, self._row_count) ] self.inc_table_count() headers = self.__get_row_values(start_row_idx) yield TableData( self._make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), )
Python
def load(self): """ Extract tabular data as |TableData| instances from HTML table tags in a HTML file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(title)s`` ``<title>`` tag value of the HTML. ``%(key)s`` | This replaced to: | **(1)** ``id`` attribute of the table tag | **(2)** ``%(format_name)s%(format_id)s`` | if ``id`` attribute not present in the | table tag. ``%(format_name)s`` ``"html"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the HTML data is invalid or empty. .. note:: Table tag attributes ignored with loaded |TableData|. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with open(self.source, encoding=self.encoding) as fp: formatter = HtmlTableFormatter(fp.read(), self._logger) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from HTML table tags in a HTML file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(title)s`` ``<title>`` tag value of the HTML. ``%(key)s`` | This replaced to: | **(1)** ``id`` attribute of the table tag | **(2)** ``%(format_name)s%(format_id)s`` | if ``id`` attribute not present in the | table tag. ``%(format_name)s`` ``"html"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the HTML data is invalid or empty. .. note:: Table tag attributes ignored with loaded |TableData|. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with open(self.source, encoding=self.encoding) as fp: formatter = HtmlTableFormatter(fp.read(), self._logger) formatter.accept(self) return formatter.to_table_data()
Python
def load(self): """ Extract tabular data as |TableData| instances from HTML table tags in a HTML text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(title)s`` ``<title>`` tag value of the HTML. ``%(key)s`` | This replaced to: | **(1)** ``id`` attribute of the table tag | **(2)** ``%(format_name)s%(format_id)s`` | if ``id`` attribute is not included | in the table tag. ``%(format_name)s`` ``"html"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the HTML data is invalid or empty. """ self._validate() self._logger.logging_load() formatter = HtmlTableFormatter(self.source, self._logger) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from HTML table tags in a HTML text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(title)s`` ``<title>`` tag value of the HTML. ``%(key)s`` | This replaced to: | **(1)** ``id`` attribute of the table tag | **(2)** ``%(format_name)s%(format_id)s`` | if ``id`` attribute is not included | in the table tag. ``%(format_name)s`` ``"html"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the HTML data is invalid or empty. """ self._validate() self._logger.logging_load() formatter = HtmlTableFormatter(self.source, self._logger) formatter.accept(self) return formatter.to_table_data()
Python
def create_from_path(self): """ Create a file loader from the file extension to loading file. Supported file extensions are as follows: ========================== ======================================= Extension Loader ========================== ======================================= ``"csv"`` :py:class:`~.CsvTableFileLoader` ``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader` ``"htm"``/``"html"`` :py:class:`~.HtmlTableFileLoader` ``"json"`` :py:class:`~.JsonTableFileLoader` ``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader` ``"ldjson"`` :py:class:`~.JsonLinesTableFileLoader` ``"ltsv"`` :py:class:`~.LtsvTableFileLoader` ``"md"`` :py:class:`~.MarkdownTableFileLoader` ``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader` ``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader` ``"tsv"`` :py:class:`~.TsvTableFileLoader` ========================== ======================================= :return: Loader that coincides with the file extension of the :py:attr:`.file_extension`. :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| loading the file. """ loader = self._create_from_extension(self.file_extension) logger.debug( "TableFileLoaderFactory.create_from_path: extension={}, loader={}".format( self.file_extension, loader.format_name ) ) return loader
def create_from_path(self): """ Create a file loader from the file extension to loading file. Supported file extensions are as follows: ========================== ======================================= Extension Loader ========================== ======================================= ``"csv"`` :py:class:`~.CsvTableFileLoader` ``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader` ``"htm"``/``"html"`` :py:class:`~.HtmlTableFileLoader` ``"json"`` :py:class:`~.JsonTableFileLoader` ``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader` ``"ldjson"`` :py:class:`~.JsonLinesTableFileLoader` ``"ltsv"`` :py:class:`~.LtsvTableFileLoader` ``"md"`` :py:class:`~.MarkdownTableFileLoader` ``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader` ``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader` ``"tsv"`` :py:class:`~.TsvTableFileLoader` ========================== ======================================= :return: Loader that coincides with the file extension of the :py:attr:`.file_extension`. :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| loading the file. """ loader = self._create_from_extension(self.file_extension) logger.debug( "TableFileLoaderFactory.create_from_path: extension={}, loader={}".format( self.file_extension, loader.format_name ) ) return loader
Python
def create_from_format_name(self, format_name): """ Create a file loader from a format name. Supported file formats are as follows: ================ ====================================== Format name Loader ================ ====================================== ``"csv"`` :py:class:`~.CsvTableFileLoader` ``"excel"`` :py:class:`~.ExcelTableFileLoader` ``"html"`` :py:class:`~.HtmlTableFileLoader` ``"json"`` :py:class:`~.JsonTableFileLoader` ``"json"`` :py:class:`~.JsonTableFileLoader` ``"json_lines"`` :py:class:`~.JsonTableFileLoader` ``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader` ``"ltsv"`` :py:class:`~.LtsvTableFileLoader` ``"markdown"`` :py:class:`~.MarkdownTableFileLoader` ``"mediawiki"`` :py:class:`~.MediaWikiTableFileLoader` ``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader` ``"sqlite"`` :py:class:`~.SqliteFileLoader` ``"ssv"`` :py:class:`~.CsvTableFileLoader` ``"tsv"`` :py:class:`~.TsvTableFileLoader` ================ ====================================== :param str format_name: Format name string (case insensitive). :return: Loader that coincides with the ``format_name``: :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| the format. """ loader = self._create_from_format_name(format_name) logger.debug( "TableFileLoaderFactory.create_from_format_name: name={}, loader={}".format( format_name, loader.format_name ) ) return loader
def create_from_format_name(self, format_name): """ Create a file loader from a format name. Supported file formats are as follows: ================ ====================================== Format name Loader ================ ====================================== ``"csv"`` :py:class:`~.CsvTableFileLoader` ``"excel"`` :py:class:`~.ExcelTableFileLoader` ``"html"`` :py:class:`~.HtmlTableFileLoader` ``"json"`` :py:class:`~.JsonTableFileLoader` ``"json"`` :py:class:`~.JsonTableFileLoader` ``"json_lines"`` :py:class:`~.JsonTableFileLoader` ``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader` ``"ltsv"`` :py:class:`~.LtsvTableFileLoader` ``"markdown"`` :py:class:`~.MarkdownTableFileLoader` ``"mediawiki"`` :py:class:`~.MediaWikiTableFileLoader` ``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader` ``"sqlite"`` :py:class:`~.SqliteFileLoader` ``"ssv"`` :py:class:`~.CsvTableFileLoader` ``"tsv"`` :py:class:`~.TsvTableFileLoader` ================ ====================================== :param str format_name: Format name string (case insensitive). :return: Loader that coincides with the ``format_name``: :raises pytablereader.LoaderNotFoundError: |LoaderNotFoundError_desc| the format. """ loader = self._create_from_format_name(format_name) logger.debug( "TableFileLoaderFactory.create_from_format_name: name={}, loader={}".format( format_name, loader.format_name ) ) return loader
Python
def load(self): """ Extract tabular data as |TableData| instances from a Line-delimited JSON file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| :rtype: |TableData| iterator :raises pytablereader.DataError: If the data is invalid Line-delimited JSON. :raises pytablereader.error.ValidationError: If the data is not acceptable Line-delimited JSON format. """ formatter = JsonLinesTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
def load(self): """ Extract tabular data as |TableData| instances from a Line-delimited JSON file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| :rtype: |TableData| iterator :raises pytablereader.DataError: If the data is invalid Line-delimited JSON. :raises pytablereader.error.ValidationError: If the data is not acceptable Line-delimited JSON format. """ formatter = JsonLinesTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
Python
def writeBenchScript(dir, benchmark_name, size, output_path): """ This method creates a script in dir which will be eventually passed to the simulated system (to run a specific benchmark at bootup). """ input_file_name = '{}/run_{}_{}'.format(dir, benchmark_name, size) with open(input_file_name,"w") as f: f.write('{} {} {}'.format(benchmark_name, size, output_path)) return input_file_name
def writeBenchScript(dir, benchmark_name, size, output_path): """ This method creates a script in dir which will be eventually passed to the simulated system (to run a specific benchmark at bootup). """ input_file_name = '{}/run_{}_{}'.format(dir, benchmark_name, size) with open(input_file_name,"w") as f: f.write('{} {} {}'.format(benchmark_name, size, output_path)) return input_file_name
Python
def boot_linux(): ''' Output 1: False if errors occur, True otherwise Output 2: exit cause ''' print("Booting Linux") exit_event = m5.simulate() exit_cause = exit_event.getCause() success = exit_cause == "m5_exit instruction encountered" if not success: print("Error while booting linux: {}".format(exit_cause)) exit(1) print("Booting done") return success, exit_cause
def boot_linux(): ''' Output 1: False if errors occur, True otherwise Output 2: exit cause ''' print("Booting Linux") exit_event = m5.simulate() exit_cause = exit_event.getCause() success = exit_cause == "m5_exit instruction encountered" if not success: print("Error while booting linux: {}".format(exit_cause)) exit(1) print("Booting done") return success, exit_cause
Python
def run_spec_benchmark(): ''' Output 1: False if errors occur, True otherwise Output 2: exit cause ''' print("Start running benchmark") exit_event = m5.simulate() exit_cause = exit_event.getCause() success = exit_cause == "m5_exit instruction encountered" if not success: print("Error while running benchmark: {}".format(exit_cause)) exit(1) print("Benchmark done") return success, exit_cause
def run_spec_benchmark(): ''' Output 1: False if errors occur, True otherwise Output 2: exit cause ''' print("Start running benchmark") exit_event = m5.simulate() exit_cause = exit_event.getCause() success = exit_cause == "m5_exit instruction encountered" if not success: print("Error while running benchmark: {}".format(exit_cause)) exit(1) print("Benchmark done") return success, exit_cause
Python
def copy_spec_logs(): ''' Output 1: False if errors occur, True otherwise Output 2: exit cause ''' print("Copying SPEC logs") exit_event = m5.simulate() exit_cause = exit_event.getCause() success = exit_cause == "m5_exit instruction encountered" if not success: print("Error while copying SPEC log files: {}".format(exit_cause)) exit(1) print("Copying done") return success, exit_cause
def copy_spec_logs(): ''' Output 1: False if errors occur, True otherwise Output 2: exit cause ''' print("Copying SPEC logs") exit_event = m5.simulate() exit_cause = exit_event.getCause() success = exit_cause == "m5_exit instruction encountered" if not success: print("Error while copying SPEC log files: {}".format(exit_cause)) exit(1) print("Copying done") return success, exit_cause
Python
def publish(config_file): '''parse config file and publish message''' try: with open(config_file) as cfile: config = json.load(cfile) client = ibmiotf.device.Client(config) client.connect() qos = 2 # Quality of service: exactly once my_data = { "insert_args": { "observed_timestamp": "2018-04-22 13:24:23", "device_id": "1", "value": 52.2 } } success = client.publishEvent('status', 'json', my_data, qos, on_publish=published) if not success: print('failed to publish message') time.sleep(5) client.disconnect() except BaseException as err: print('publish failed: %r' % err)
def publish(config_file): '''parse config file and publish message''' try: with open(config_file) as cfile: config = json.load(cfile) client = ibmiotf.device.Client(config) client.connect() qos = 2 # Quality of service: exactly once my_data = { "insert_args": { "observed_timestamp": "2018-04-22 13:24:23", "device_id": "1", "value": 52.2 } } success = client.publishEvent('status', 'json', my_data, qos, on_publish=published) if not success: print('failed to publish message') time.sleep(5) client.disconnect() except BaseException as err: print('publish failed: %r' % err)
Python
def switch( hass, name, value ): """React to flick of the switch""" hass.services.call( DOMAIN, SERVICE_SWITCH, { ATTR_NAME: name, ATTR_VALUE: value }, )
def switch( hass, name, value ): """React to flick of the switch""" hass.services.call( DOMAIN, SERVICE_SWITCH, { ATTR_NAME: name, ATTR_VALUE: value }, )
Python
def switch_template( hass, name_template, value_template ): """React to flick of the switch""" hass.services.call( DOMAIN, SERVICE_SWITCH, { ATTR_NAME_TEMPLATE: name_template, ATTR_VALUE_TEMPLATE: value_template }, )
def switch_template( hass, name_template, value_template ): """React to flick of the switch""" hass.services.call( DOMAIN, SERVICE_SWITCH, { ATTR_NAME_TEMPLATE: name_template, ATTR_VALUE_TEMPLATE: value_template }, )
Python
def switch_service(call): """Handle calls to the switch service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: name = call.data.get(ATTR_NAME) value = call.data.get(ATTR_VALUE) if name is None: name, value = _render_name_value("switch_template", call.data, {}, {}) if name is not None: LIGHTS_CONTROL.switch(name, value) else: _LOGGER.warning("{}: failed to do switch call since LightsControl is not running".format(DOMAIN))
def switch_service(call): """Handle calls to the switch service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: name = call.data.get(ATTR_NAME) value = call.data.get(ATTR_VALUE) if name is None: name, value = _render_name_value("switch_template", call.data, {}, {}) if name is not None: LIGHTS_CONTROL.switch(name, value) else: _LOGGER.warning("{}: failed to do switch call since LightsControl is not running".format(DOMAIN))
Python
def sensor_service(call): """Handle calls to the sensor service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: name = call.data.get(ATTR_NAME) value = call.data.get(ATTR_VALUE) if name is None: name, value = _render_name_value("sensor_template", call.data, {}, {}) if name is not None: LIGHTS_CONTROL.sensor(name, value) else: _LOGGER.warning("{}: failed to do sensor call since LightsControl is not running".format(DOMAIN))
def sensor_service(call): """Handle calls to the sensor service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: name = call.data.get(ATTR_NAME) value = call.data.get(ATTR_VALUE) if name is None: name, value = _render_name_value("sensor_template", call.data, {}, {}) if name is not None: LIGHTS_CONTROL.sensor(name, value) else: _LOGGER.warning("{}: failed to do sensor call since LightsControl is not running".format(DOMAIN))
Python
def watchdog_service(call): """Handle calls to the watchdog service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.watchdog() else: _LOGGER.warning("{}: failed to do watchdog call since LightsControl is not running".format(DOMAIN))
def watchdog_service(call): """Handle calls to the watchdog service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.watchdog() else: _LOGGER.warning("{}: failed to do watchdog call since LightsControl is not running".format(DOMAIN))
Python
def reload_groups_service(call): """Handle calls to the reload_groups service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.reload_groups() else: _LOGGER.warning("{}: failed to do reload_groups call since LightsControl is not running".format(DOMAIN))
def reload_groups_service(call): """Handle calls to the reload_groups service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.reload_groups() else: _LOGGER.warning("{}: failed to do reload_groups call since LightsControl is not running".format(DOMAIN))
Python
def restart_service(call): """Handle calls to the restart service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.restart() else: _LOGGER.warning("{}: failed to do restart call since LightsControl is not running".format(DOMAIN))
def restart_service(call): """Handle calls to the restart service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.restart() else: _LOGGER.warning("{}: failed to do restart call since LightsControl is not running".format(DOMAIN))