repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
triton
triton-main/python/triton/compiler/compiler.py
from __future__ import annotations import functools import hashlib import json import os import re import subprocess import tempfile from collections import namedtuple from pathlib import Path from typing import Any, Tuple from .._C.libtriton.triton import (add_external_libs, compile_ptx_to_cubin, get_shared_memory_size, ir, translate_llvmir_to_hsaco, translate_llvmir_to_ptx, translate_triton_gpu_to_llvmir) from ..common.backend import get_backend # from ..runtime import driver, jit, JITFunction # TODO: runtime.errors from ..runtime.autotuner import OutOfResources from ..runtime.cache import get_cache_manager from ..runtime.driver import driver from ..runtime.jit import (JITFunction, get_cuda_stream, get_current_device, get_device_capability, version_key) from ..tools.disasm import extract from .code_generator import ast_to_ttir from .make_launcher import make_stub def inline_triton_ir(mod): pm = ir.pass_manager(mod.context) pm.enable_debug() pm.add_inliner_pass() pm.run(mod) return mod def ttir_compute_capability_rewrite(mod, arch): # For hardware without support, we must rewrite all load/store # with block (tensor) pointers into tensors of pointers pm = ir.pass_manager(mod.context) pm.enable_debug() if _is_cuda(arch): pm.add_rewrite_tensor_pointer_pass(arch) pm.run(mod) return mod def optimize_ttir(mod, arch): mod = inline_triton_ir(mod) mod = ttir_compute_capability_rewrite(mod, arch) pm = ir.pass_manager(mod.context) pm.enable_debug() pm.add_inliner_pass() pm.add_triton_combine_pass() pm.add_canonicalizer_pass() pm.add_reorder_broadcast_pass() pm.add_cse_pass() pm.add_licm_pass() pm.add_symbol_dce_pass() pm.run(mod) return mod def ttir_to_ttgir(mod, num_warps): pm = ir.pass_manager(mod.context) pm.enable_debug() pm.add_convert_triton_to_tritongpu_pass(num_warps) pm.run(mod) return mod def optimize_ttgir(mod, num_stages, arch): pm = ir.pass_manager(mod.context) pm.enable_debug() pm.add_tritongpu_coalesce_pass() pm.add_tritongpu_remove_layout_conversions_pass() if isinstance(arch, int): pm.add_tritongpu_accelerate_matmul_pass(arch) pm.add_tritongpu_remove_layout_conversions_pass() pm.add_tritongpu_optimize_dot_operands_pass() pm.add_tritongpu_pipeline_pass(num_stages) pm.add_tritongpu_prefetch_pass() pm.add_tritongpu_optimize_dot_operands_pass() pm.add_tritongpu_remove_layout_conversions_pass() pm.add_tritongpu_decompose_conversions_pass() pm.add_tritongpu_reorder_instructions_pass() pm.add_cse_pass() pm.add_symbol_dce_pass() pm.run(mod) return mod def _add_external_libs(mod, libs): for name, path in libs.items(): if len(name) == 0 or len(path) == 0: return add_external_libs(mod, list(libs.keys()), list(libs.values())) def ttgir_to_llir(mod, extern_libs, arch): if extern_libs: _add_external_libs(mod, extern_libs) # TODO: separate tritongpu_to_llvmir for different backends if _is_cuda(arch): return translate_triton_gpu_to_llvmir(mod, arch, False) else: return translate_triton_gpu_to_llvmir(mod, 0, True) # PTX translation @functools.lru_cache() def ptx_get_version(cuda_version) -> int: ''' Get the highest PTX version supported by the current CUDA driver. ''' assert isinstance(cuda_version, str) major, minor = map(int, cuda_version.split('.')) if major == 12: return 80 + minor if major == 11: return 70 + minor if major == 10: return 63 + minor raise RuntimeError("Triton only support CUDA 10.0 or higher") @functools.lru_cache() def path_to_ptxas(): base_dir = os.path.join(os.path.dirname(__file__), os.pardir) paths = [ os.environ.get("TRITON_PTXAS_PATH", ""), os.path.join(base_dir, "third_party", "cuda", "bin", "ptxas") ] for ptxas in paths: ptxas_bin = ptxas.split(" ")[0] if os.path.exists(ptxas_bin) and os.path.isfile(ptxas_bin): result = subprocess.check_output([ptxas_bin, "--version"], stderr=subprocess.STDOUT) if result is not None: version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE) if version is not None: return ptxas, version.group(1) raise RuntimeError("Cannot find ptxas") def llir_to_ptx(mod: Any, arch: int, ptx_version: int = None) -> str: ''' Translate TritonGPU module to PTX code. :param mod: a TritonGPU dialect module :return: PTX code ''' if ptx_version is None: _, cuda_version = path_to_ptxas() ptx_version = ptx_get_version(cuda_version) return translate_llvmir_to_ptx(mod, arch, ptx_version) def ptx_to_cubin(ptx: str, arch: int): ''' Compile TritonGPU module to cubin. :param ptx: ptx code :param compute_capability: compute capability :return: str ''' ptxas, _ = path_to_ptxas() return compile_ptx_to_cubin(ptx, ptxas, arch) # AMDGCN translation def get_amdgcn_bitcode_paths(arch): gpu_arch_agnostic_bitcode_libraries = ["opencl.bc", "ocml.bc", "ockl.bc", "oclc_finite_only_off.bc", "oclc_daz_opt_off.bc", "oclc_correctly_rounded_sqrt_on.bc", "oclc_unsafe_math_off.bc", "oclc_wavefrontsize64_on.bc"] gfx_arch = arch[1] gfx_arch_id = re.search('gfx(\\w+)', gfx_arch).group(1).strip() gpu_arch_specific_bitcode_library = 'oclc_isa_version_' + gfx_arch_id + ".bc" bitcode_path_dir = os.path.join(Path(__file__).parent.resolve(), "third_party/rocm/lib/bitcode/") amdgcn_bitcode_paths = {} i = 1 for bc_lib in gpu_arch_agnostic_bitcode_libraries: bc_path = bitcode_path_dir + bc_lib if os.path.exists(bc_path): amdgcn_bitcode_paths['library_' + str(i)] = bc_path i += 1 bc_gfx_path = bitcode_path_dir + gpu_arch_specific_bitcode_library if os.path.exists(bc_gfx_path): amdgcn_bitcode_paths['library_' + str(i)] = bc_gfx_path return amdgcn_bitcode_paths def get_amdgpu_arch_fulldetails(): """ get the amdgpu fulll ISA details for compiling: i.e., arch_triple: amdgcn-amd-amdhsa; arch_name: gfx906; arch_features: sramecc+:xnack- """ try: # TODO: package rocm.cc with Triton rocm_path_dir = os.getenv("ROCM_PATH", default="/opt/rocm") rocminfo = subprocess.check_output(rocm_path_dir + '/bin/rocminfo').decode() gfx_arch_details = re.search('amd.*', rocminfo).group(0).strip().split('--') arch_triple = gfx_arch_details[0] arch_name_features = gfx_arch_details[1].split(':') arch_name = arch_name_features[0] arch_features = "" if (len(arch_name_features) == 3): arch_features = "+" + re.search('\\w+', arch_name_features[1]).group(0) + ","\ "-" + re.search('\\w+', arch_name_features[2]).group(0) return [arch_triple, arch_name, arch_features] except BaseException: return None def llir_to_amdgcn_and_hsaco(mod: Any, gfx_arch: str, gfx_triple: str, gfx_features: str) -> Tuple[str, str]: ''' Translate TritonGPU module to HSACO code based on full details of gpu architecture. :param mod: a TritonGPU dialect module :return: - AMDGCN code - Path to HSACO object ''' return translate_llvmir_to_hsaco(mod, gfx_arch, gfx_triple, gfx_features) # ------------------------------------------------------------------------------ # compiler # ------------------------------------------------------------------------------ def get_kernel_name(src: str, pattern: str) -> str: ''' Get kernel name from PTX code. This Kernel name is required when launching the kernel. ''' # There is a name mangling in PTX codegen, so the original kernel names in Triton IR are not available in PTX/cubin. assert src for line in src.split('\n'): line = line.strip() if line.startswith(pattern): return line.split()[-1] def convert_type_repr(x): match = re.search(r'!tt\.ptr<(.*)>', x) if match is not None: return '*' + convert_type_repr(match.group(1)) return x def make_hash(fn, arch, **kwargs): if isinstance(fn, JITFunction): configs = kwargs["configs"] signature = kwargs["signature"] constants = kwargs.get("constants", dict()) num_warps = kwargs.get("num_warps", 4) num_stages = kwargs.get("num_stages", 3) debug = kwargs.get("debug", False) # Get unique key for the compiled code get_conf_key = lambda conf: (sorted(conf.divisible_by_16), sorted(conf.equal_to_1)) configs_key = [get_conf_key(conf) for conf in configs] key = f"{fn.cache_key}-{''.join(signature.values())}-{configs_key}-{constants}-{num_warps}-{num_stages}-{debug}-{arch}" return hashlib.md5(key.encode("utf-8")).hexdigest() assert isinstance(fn, str) return hashlib.md5((Path(fn).read_text() + version_key()).encode("utf-8")).hexdigest() # - ^\s*tt\.func\s+ : match the start of the string, any leading whitespace, the keyword func, # and any following whitespace # - (public\s+)? : optionally match the keyword public and any following whitespace # - (@\w+) : match an @ symbol followed by one or more word characters # (letters, digits, or underscores), and capture it as group 1 (the function name) # - (\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\)) : match a pair of parentheses enclosing # zero or more arguments separated by commas, and capture it as group 2 (the argument list) mlir_prototype_pattern = r'^\s*tt\.func\s+(?:public\s+)?(@\w+)(\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\))\s*\{\s*$' ptx_prototype_pattern = r"\.(?:visible|extern)\s+\.(?:entry|func)\s+(\w+)\s*\(([^)]*)\)" prototype_pattern = { "ttir": mlir_prototype_pattern, "ttgir": mlir_prototype_pattern, "ptx": ptx_prototype_pattern, } mlir_arg_type_pattern = r'%\w+: ([^,^\)\s]+)(?: \{\S+ = \S+ : \S+\})?,?' ptx_arg_type_pattern = r"\.param\s+\.(\w+)" arg_type_pattern = { "ttir": mlir_arg_type_pattern, "ttgir": mlir_arg_type_pattern, "ptx": ptx_arg_type_pattern, } ttgir_num_warps_pattern = r'"triton_gpu.num-warps"\s?=\s?(\d+)\s?:' def _get_jsonable_constants(constants): def _is_jsonable(x): try: json.dumps(x) return True except (TypeError, OverflowError): return False serialized_constants = {} for constant in constants: if _is_jsonable(constants[constant]): serialized_constants[constant] = constants[constant] return serialized_constants def parse_mlir_module(path, context): module = ir.parse_mlir_module(path, context) # module takes ownership of the context module.context = context return module instance_descriptor = namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"], defaults=[set(), set()]) # TODO: architecture descriptor class def _is_cuda(arch): return isinstance(arch, int) def get_architecture_descriptor(capability): try: import torch except ImportError: raise ImportError("Triton requires PyTorch to be installed") if capability is None: if torch.version.hip is None: device = get_current_device() capability = get_device_capability(device) capability = capability[0] * 10 + capability[1] else: capability = get_amdgpu_arch_fulldetails() return capability def add_rocm_stages(arch, extern_libs, stages): extern_libs.update(get_amdgcn_bitcode_paths(arch)) for key in list(extern_libs): if extern_libs[key] == '' or extern_libs[key] is None: extern_libs.pop(key) gfx_arch_full_details = arch gfx_arch = os.environ.get('MI_GPU_ARCH', gfx_arch_full_details[1]) if gfx_arch is None: raise RuntimeError('gfx_arch is None (not specified)') stages["amdgcn"] = (lambda path: Path(path).read_text(), lambda src: llir_to_amdgcn_and_hsaco(src, gfx_arch, gfx_arch_full_details[0], gfx_arch_full_details[2])) def add_cuda_stages(arch, extern_libs, stages): stages["ptx"] = (lambda path: Path(path).read_text(), lambda src: llir_to_ptx(src, arch)) stages["cubin"] = (lambda path: Path(path).read_bytes(), lambda src: ptx_to_cubin(src, arch)) def compile(fn, **kwargs): # Get device type to decide which backend should be used device_type = kwargs.get("device_type", "cuda") _device_backend = get_backend(device_type) if device_type in ["cuda", "hip"]: arch = get_architecture_descriptor(kwargs.get("cc", None)) else: _device_backend = get_backend(device_type) assert _device_backend arch = _device_backend.get_architecture_descriptor(**kwargs) is_cuda = device_type == "cuda" and _is_cuda(arch) is_hip = device_type in ["cuda", "hip"] and not is_cuda context = ir.context() constants = kwargs.get("constants", dict()) num_warps = kwargs.get("num_warps", 4) num_stages = kwargs.get("num_stages", 3 if is_cuda and arch >= 75 else 2) extern_libs = kwargs.get("extern_libs", dict()) if extern_libs is None: extern_libs = dict() debug = kwargs.get("debug", False) # build compilation stages stages = dict() stages["ast"] = (lambda path: fn, None) stages["ttir"] = (lambda path: parse_mlir_module(path, context), lambda src: optimize_ttir(ast_to_ttir(src, signature, configs[0], constants, debug=debug, arch=arch), arch)) stages["ttgir"] = (lambda path: parse_mlir_module(path, context), lambda src: optimize_ttgir(ttir_to_ttgir(src, num_warps), num_stages, arch)) stages["llir"] = (lambda path: Path(path).read_text(), lambda src: ttgir_to_llir(src, extern_libs, arch)) if is_cuda: add_cuda_stages(arch, extern_libs, stages) elif is_hip: add_rocm_stages(arch, extern_libs, stages) else: _device_backend.add_stages(arch, extern_libs, stages) # find out the signature of the function if isinstance(fn, JITFunction): configs = kwargs.get("configs", None) signature = kwargs["signature"] if configs is None: configs = [instance_descriptor()] assert len(configs) == 1 kwargs["configs"] = configs name = fn.__name__ first_stage = 0 if isinstance(signature, str): signature = {k: v.strip() for k, v in enumerate(signature.split(","))} kwargs["signature"] = signature else: assert isinstance(fn, str) _, ir_name = os.path.basename(fn).split(".") src = Path(fn).read_text() import re match = re.search(prototype_pattern[ir_name], src, re.MULTILINE) name, signature = match.group(1), match.group(2) types = re.findall(arg_type_pattern[ir_name], signature) if ir_name == 'ttgir': num_warps_matches = re.findall(ttgir_num_warps_pattern, src) assert len(num_warps_matches) == 1, "Expected exactly one match for num_warps" assert "num_warps" not in kwargs or int(num_warps_matches[0]) == num_warps, "num_warps in ttgir does not match num_warps in compile" num_warps = int(num_warps_matches[0]) param_tys = [convert_type_repr(ty) for ty in types] signature = {k: v for k, v in enumerate(param_tys)} first_stage = list(stages.keys()).index(ir_name) # cache manager if is_cuda or is_hip: so_path = make_stub(name, signature, constants) else: so_path = _device_backend.make_launcher_stub(name, signature, constants) # create cache manager fn_cache_manager = get_cache_manager(make_hash(fn, arch, **kwargs)) # determine name and extension type of provided function if isinstance(fn, JITFunction): name, ext = fn.__name__, "ast" else: name, ext = os.path.basename(fn).split(".") # load metadata if any metadata = None metadata_filename = f"{name}.json" # The group is addressed by the metadata metadata_group = fn_cache_manager.get_group( metadata_filename ) or {} metadata_path = metadata_group.get(metadata_filename) if metadata_path is not None: with open(metadata_path) as f: metadata = json.load(f) else: metadata = {"num_warps": num_warps, "num_stages": num_stages, "constants": _get_jsonable_constants(constants), "debug": debug, "arch": arch, } if ext == "ptx": assert "shared" in kwargs, "ptx compilation must provide shared memory size" metadata["shared"] = kwargs["shared"] # Add device type to meta information metadata["device_type"] = device_type first_stage = list(stages.keys()).index(ext) asm = dict() module = fn # run compilation pipeline and populate metadata for ir_name, (parse, compile_kernel) in list(stages.items())[first_stage:]: ir_filename = f"{name}.{ir_name}" if ir_name == ext: next_module = parse(fn) else: path = metadata_group.get(ir_filename) if path is None: next_module = compile_kernel(module) if ir == "amdgcn": extra_file_name = f"{name}.hsaco_path" metadata_group[ir_filename] = fn_cache_manager.put(next_module[0], ir_filename) metadata_group[extra_file_name] = fn_cache_manager.put(next_module[1], extra_file_name) else: metadata_group[ir_filename] = fn_cache_manager.put(next_module, ir_filename) fn_cache_manager.put(next_module, ir_filename) else: if ir_name == "amdgcn": extra_file_name = f"{name}.hsaco_path" hasco_path = metadata_group.get(extra_file_name) assert hasco_path is not None, "Expected to have hsaco_path in metadata when we have the amdgcn" next_module = (parse(path), parse(hasco_path)) else: next_module = parse(path) if ir_name == "cubin": asm[ir_name] = next_module elif ir_name == "amdgcn": asm[ir_name] = str(next_module[0]) else: asm[ir_name] = str(next_module) if ir_name == "llir" and "shared" not in metadata: metadata["shared"] = get_shared_memory_size(module) if ir_name == "ptx": metadata["name"] = get_kernel_name(next_module, pattern='// .globl') if ir_name == "amdgcn": metadata["name"] = get_kernel_name(next_module[0], pattern='.globl') asm["hsaco_path"] = next_module[1] if not is_cuda and not is_hip: _device_backend.add_meta_info(ir_name, module, next_module, metadata, asm) module = next_module # write-back metadata, if it didn't come from the cache if metadata_path is None: metadata_group[metadata_filename] = fn_cache_manager.put(json.dumps(metadata), metadata_filename, binary=False) fn_cache_manager.put_group(metadata_filename, metadata_group) # return handle to compiled kernel return CompiledKernel(fn, so_path, metadata, asm) class CompiledKernel: # Hooks for external tools to monitor the execution of triton kernels launch_enter_hook = None launch_exit_hook = None def __init__(self, fn, so_path, metadata, asm): # initialize launcher import importlib.util spec = importlib.util.spec_from_file_location("__triton_launcher", so_path) mod = importlib.util.module_from_spec(spec) self.fn = fn spec.loader.exec_module(mod) self.c_wrapper = getattr(mod, "launch") # initialize metadata self.shared = metadata["shared"] if "shared" in metadata else 0 self.num_warps = metadata["num_warps"] self.num_stages = metadata["num_stages"] self.constants = metadata["constants"] self.device_type = metadata["device_type"] self.device_backend = get_backend(self.device_type) if self.device_type not in ["cuda", "hip"] else None # initialize asm dict self.asm = asm # binaries are lazily initialized # because it involves doing runtime things # (e.g., checking amount of shared memory on current device) self.metadata = metadata self.cu_module = None self.cu_function = None def _init_handles(self): if self.cu_module is not None: return if self.device_type in ["cuda", "hip"]: device = get_current_device() bin_path = { driver.HIP: "hsaco_path", driver.CUDA: "cubin" }[driver.backend] max_shared = driver.utils.get_device_properties(device)["max_shared_mem"] fn_load_binary = driver.utils.load_binary else: assert self.device_backend device = self.device_backend.get_current_device() bin_path = self.device_backend.get_kernel_bin() max_shared = self.device_backend.get_device_properties(device)["max_shared_mem"] fn_load_binary = self.device_backend.get_load_binary_fn() if self.shared > max_shared: raise OutOfResources(self.shared, max_shared, "shared memory") mod, func, n_regs, n_spills = fn_load_binary(self.metadata["name"], self.asm[bin_path], self.shared, device) self.n_spills = n_spills self.n_regs = n_regs self.cu_module = mod self.cu_function = func def __getattribute__(self, name): if name == 'c_wrapper': self._init_handles() return super().__getattribute__(name) def __getitem__(self, grid): self._init_handles() def runner(*args, stream=None): if stream is None: if self.device_type in ["cuda", "rocm"]: stream = get_cuda_stream() else: stream = get_backend(self.device_type).get_stream(None) self.c_wrapper(grid[0], grid[1], grid[2], self.num_warps, self.shared, stream, self.cu_function, CompiledKernel.launch_enter_hook, CompiledKernel.launch_exit_hook, self, *args) return runner def get_sass(self, fun=None): if 'sass' in self.asm: return self.asm['sass'] fd, path = tempfile.mkstemp() try: with open(fd, 'wb') as cubin: cubin.write(self.asm['cubin']) self.sass = extract(path, fun) finally: os.remove(path) self.asm['sass'] = self.sass return self.sass
23,660
36.797125
144
py
triton
triton-main/python/triton/compiler/code_generator.py
import ast import inspect import re import sys import warnings from typing import Any, Callable, Dict, Optional, Tuple, Type, Union from .. import language from .._C.libtriton.triton import ir from ..language import constexpr, tensor # ideally we wouldn't need any runtime component from ..runtime import JITFunction from .errors import (CompilationError, CompileTimeAssertionFailure, UnsupportedLanguageConstruct) def mangle_ty(ty): if ty.is_ptr(): return 'P' + mangle_ty(ty.element_ty) if ty.is_int(): SIGNED = language.dtype.SIGNEDNESS.SIGNED prefix = 'i' if ty.int_signedness == SIGNED else 'u' return prefix + str(ty.int_bitwidth) if ty.is_fp8(): return 'fp8' if ty.is_fp16(): return 'fp16' if ty.is_bf16(): return 'bf16' if ty.is_fp32(): return 'fp32' if ty.is_fp64(): return 'fp64' if ty.is_block(): elt = mangle_ty(ty.scalar) shape = '_'.join(map(str, ty.shape)) return f'{elt}S{shape}S' if ty.is_void(): return 'V' assert False, "Unsupported type" def mangle_fn(name, arg_tys, constants): # doesn't mangle ret type, which must be a function of arg tys mangled_arg_names = '_'.join([mangle_ty(ty) for ty in arg_tys]) mangled_constants = '_'.join([f'{i}c{repr(constants[i])}' for i in sorted(constants)]) mangled_constants = mangled_constants.replace('.', '_d_') mangled_constants = mangled_constants.replace("'", '_sq_') # [ and ] are not allowed in LLVM identifiers mangled_constants = mangled_constants.replace('[', '_').replace(']', '_') ret = f'{name}__{mangled_arg_names}__{mangled_constants}' return ret def _is_triton_tensor(o: Any) -> bool: return isinstance(o, tensor) def _is_constexpr(o: Any) -> bool: return isinstance(o, constexpr) def _is_triton_scalar(o: Any) -> bool: return _is_triton_tensor(o) and (not o.type.is_block() or o.type.numel == 1) def _unwrap_if_constexpr(o: Any): return o.value if isinstance(o, constexpr) else o def _check_fn_args(node, fn, args): if fn.noinline: for idx, arg in enumerate(args): if not _is_constexpr(arg) and not _is_triton_scalar(arg): raise UnsupportedLanguageConstruct(fn.src, node, f'Function {fn.__name__} is marked noinline, but was called with non-scalar argument {fn.arg_names[idx]}:{arg}') def _get_fn_file_line(fn): base_fn = fn while not isinstance(base_fn, JITFunction): base_fn = base_fn.fn file_name = base_fn.fn.__code__.co_filename lines, begin_line = inspect.getsourcelines(base_fn.fn) for line in lines: if line.strip().startswith('@'): begin_line += 1 else: break return file_name, begin_line _condition_types = {bool, int, type(None)} # Python types accepted for conditionals inside kernels class enter_sub_region: def __init__(self, generator): self.generator = generator def __enter__(self): # record lscope & local_defs in the parent scope self.liveins = self.generator.lscope.copy() self.prev_defs = self.generator.local_defs.copy() self.generator.local_defs = {} self.insert_block = self.generator.builder.get_insertion_block() self.insert_point = self.generator.builder.get_insertion_point() return self.liveins, self.insert_block def __exit__(self, *args, **kwargs): self.generator.builder.restore_insertion_point(self.insert_point) self.generator.lscope = self.liveins self.generator.local_defs = self.prev_defs # Check if the given syntax node has an "early" return class ContainsReturnChecker(ast.NodeVisitor): def __init__(self, gscope): self.gscope = gscope def _visit_stmts(self, body) -> bool: for s in body: if self.visit(s): return True return False def _visit_function(self, fn) -> bool: # Currently we only support JITFunctions defined in the global scope if isinstance(fn, JITFunction) and not fn.noinline: fn_node = fn.parse() return ContainsReturnChecker(self.gscope).visit(fn_node) return False def generic_visit(self, node) -> bool: ret = False for _, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, ast.AST): ret = ret or self.visit(item) elif isinstance(value, ast.AST): ret = ret or self.visit(value) return ret def visit_Attribute(self, node: ast.Attribute) -> bool: # If the left part is a name, it's possible that # we call triton native function or a jit function from another module. # If the left part is not a name, it must return a tensor or a constexpr # whose methods do not contain return statements # e.g., (tl.load(x)).to(y) # So we only check if the expressions within value have return or not if isinstance(node.value, ast.Name): if node.value.id in self.gscope: value = self.gscope[node.value.id] fn = getattr(value, node.attr) return self._visit_function(fn) return False return self.visit(node.value) def visit_Name(self, node: ast.Name) -> bool: if type(node.ctx) == ast.Store: return False if node.id in self.gscope: fn = self.gscope[node.id] return self._visit_function(fn) return False def visit_Return(self, node: ast.Return) -> bool: return True def visit_Assign(self, node: ast.Assign) -> bool: # There couldn't be an early return # x = ... return False def visit_AugAssign(self, node: ast.AugAssign) -> bool: # There couldn't be an early return # x += ... return False def visit_Module(self, node: ast.Module) -> bool: return self._visit_stmts(node.body) def visit_FunctionDef(self, node: ast.FunctionDef) -> bool: return self._visit_stmts(node.body) def visit_If(self, node: ast.If) -> bool: # TODO: optimize the following case in which we actually don't have # a return when static_cond is false: # if dynamic_cond # if static_cond # func_with_return # else # func_without_return ret = self._visit_stmts(node.body) if node.orelse: ret = ret or self._visit_stmts(node.orelse) return ret def visit_IfExp(self, node: ast.IfExp) -> bool: return self.visit(node.body) or self.visit(node.orelse) def visit_Call(self, node: ast.Call) -> bool: return self.visit(node.func) class CodeGenerator(ast.NodeVisitor): def __init__(self, context, prototype, gscope, attributes, constants, function_name, arch, module=None, is_kernel=False, function_types: Optional[Dict] = None, debug=False, noinline=False, file_name: Optional[str] = None, begin_line=0): self.context = context self.builder = ir.builder(context) self.file_name = file_name # node.lineno starts from 1, so we need to subtract 1 self.begin_line = begin_line - 1 self.builder.set_loc(file_name, begin_line, 0) self.builder.arch = arch self.module = self.builder.create_module() if module is None else module self.function_ret_types = {} if function_types is None else function_types self.prototype = prototype self.gscope = gscope self.lscope = dict() self.attributes = attributes self.constants = constants self.function_name = function_name self.is_kernel = is_kernel self.last_node = None self.debug = debug self.noinline = noinline self.scf_stack = [] self.last_ret_type = None # SSA-construction # name => language.tensor self.local_defs: Dict[str, tensor] = {} self.global_uses: Dict[str, tensor] = {} self.dereference_name: Callable[[str], Any] = self._define_name_lookup() builtin_namespace: Dict[str, Any] = {_.__name__: _ for _ in (range, float, int, isinstance, getattr)} builtin_namespace.update(( ('print', language.core.device_print), ('min', language.minimum), )) def _define_name_lookup(self): def local_lookup(name: str, absent): value = self.lscope.get(name, absent) # this needs to be re-fetched from `self` every time, because it gets switched occasionally if value is not absent and name not in self.local_defs: self.global_uses[name] = value return value absent_marker = object() def name_lookup(name: str) -> Any: absent = absent_marker for lookup_function in local_lookup, self.gscope.get, self.builtin_namespace.get: value = lookup_function(name, absent) if value is not absent: return value raise NameError(f'{name} is not defined') return name_lookup def set_value(self, name: str, value: Union[tensor, constexpr]) -> None: ''' This function: called by visit_Assign() & visit_FunctionDef() to store left value (lvalue) 1. record local defined name (FIXME: should consider control flow) 2. store tensor in self.lvalue ''' self.lscope[name] = value self.local_defs[name] = value def _get_insertion_point_and_loc(self): # XXX: this is a hack to get the location of the insertion point. # The insertion point's location could be invalid sometimes, # so we need to explicitly set the location loc = self.builder.get_loc() ip = self.builder.get_insertion_point() return ip, loc def _set_insertion_point_and_loc(self, ip, loc): self.builder.restore_insertion_point(ip) self.builder.set_loc(loc) # # AST visitor # def visit_compound_statement(self, stmts): for stmt in stmts: ret_type = self.visit(stmt) if ret_type is not None and isinstance(stmt, ast.Return): self.last_ret_type = ret_type def visit_Module(self, node): ast.NodeVisitor.generic_visit(self, node) def visit_List(self, node): ctx = self.visit(node.ctx) assert ctx is None elts = [self.visit(elt) for elt in node.elts] return elts # By design, only non-kernel functions can return def visit_Return(self, node): ret_value = self.visit(node.value) # ret_block = self.builder.create_block() # post_ret_block = self.builder.create_block() # self.builder.create_branch(ret_block) # self.builder.set_insertion_point_to_end(ret_block) if ret_value is None: self.builder.ret([]) ret_ty = None elif isinstance(ret_value, tuple): ret_values = [language.core._to_tensor(v, self.builder) for v in ret_value] ret_types = [v.type for v in ret_values] self.builder.ret([v.handle for v in ret_values]) ret_ty = tuple(ret_types) else: ret = language.core._to_tensor(ret_value, self.builder) self.builder.ret([ret.handle]) ret_ty = ret.type # self.builder.create_branch(post_ret_block) # self.builder.set_insertion_point_to_end(post_ret_block) return ret_ty def visit_FunctionDef(self, node): arg_names, kwarg_names = self.visit(node.args) # initialize defaults for i, default_value in enumerate(node.args.defaults): arg_node = node.args.args[-i - 1] annotation = arg_node.annotation name = arg_node.arg st_target = ast.Name(id=name, ctx=ast.Store()) if annotation is None: init_node = ast.Assign(targets=[st_target], value=default_value) else: init_node = ast.AnnAssign(target=st_target, value=default_value, annotation=annotation) self.visit(init_node) # initialize function visibility = "public" if self.is_kernel else "private" fn = self.builder.get_or_insert_function(self.module, self.function_name, self.prototype.to_ir(self.builder), visibility, self.noinline) self.module.push_back(fn) entry = fn.add_entry_block() arg_values = [] idx = 0 for i, arg_name in enumerate(arg_names): if i in self.constants: cst = self.constants[i] if not _is_constexpr(cst): cst = constexpr(self.constants[i]) arg_values.append(cst) continue else: if i in self.attributes: fn.set_arg_attr(idx, "tt.divisibility", self.attributes[i][1]) arg_values.append(tensor(fn.args(idx), self.prototype.param_types[idx])) idx += 1 insert_pt = self.builder.get_insertion_block() for arg_name, arg_value in zip(arg_names, arg_values): self.set_value(arg_name, arg_value) self.builder.set_insertion_point_to_start(entry) # visit function body self.visit_compound_statement(node.body) # finalize function if self.last_ret_type is None: self.builder.ret([]) else: # update return type if isinstance(self.last_ret_type, tuple): self.prototype.ret_types = list(self.last_ret_type) fn.reset_type(self.prototype.to_ir(self.builder)) else: self.prototype.ret_types = [self.last_ret_type] fn.reset_type(self.prototype.to_ir(self.builder)) if insert_pt: self.builder.set_insertion_point_to_end(insert_pt) # Remove dead code fn.finalize() def visit_arguments(self, node): arg_names = [] for arg in node.args: arg_names += [self.visit(arg)] kwarg_names = self.visit(node.kwarg) return arg_names, kwarg_names def visit_arg(self, node): ast.NodeVisitor.generic_visit(self, node) return node.arg def visit_AnnAssign(self, node): # extract attributes annotation = self.visit(node.annotation) target = self.visit(node.target) value = self.visit(node.value) # constexpr if annotation == constexpr: if target in self.lscope: raise ValueError(f'{target} is already defined.' f' constexpr cannot be reassigned.') if not _is_constexpr(value): value = constexpr(value) self.lscope[target] = value return self.lscope[target] # default: call visit_Assign return self.visit_Assign(node) def visit_Assign(self, node): _names = [] for target in node.targets: _names += [self.visit(target)] if len(_names) > 1: raise UnsupportedLanguageConstruct(None, node, "simultaneous multiple assignment is not supported.") names = _names[0] values = self.visit(node.value) if not isinstance(names, tuple): names = [names] if not isinstance(values, tuple): values = [values] native_nontensor_types = (language.dtype, ) for name, value in zip(names, values): # by default, constexpr are assigned into python variable value = _unwrap_if_constexpr(value) if value is not None and \ not _is_triton_tensor(value) and \ not isinstance(value, native_nontensor_types): value = language.core._to_tensor(value, self.builder) self.set_value(name, value) def visit_AugAssign(self, node): name = node.target.id lhs = ast.Name(id=name, ctx=ast.Load()) rhs = ast.BinOp(lhs, node.op, node.value) assign = ast.Assign(targets=[node.target], value=rhs) self.visit(assign) return self.dereference_name(name) def visit_Name(self, node): if type(node.ctx) == ast.Store: return node.id return self.dereference_name(node.id) def visit_Store(self, node): ast.NodeVisitor.generic_visit(self, node) def visit_Load(self, node): ast.NodeVisitor.generic_visit(self, node) def visit_Tuple(self, node): args = [self.visit(x) for x in node.elts] return tuple(args) def _apply_binary_method(self, method_name, lhs, rhs): # TODO: raise something meaningful if getattr fails below, esp for reverse method if _is_triton_tensor(lhs): return getattr(lhs, method_name)(rhs, _builder=self.builder) if _is_triton_tensor(rhs): reverse_method_name = re.sub(r"__(.*)__", r"__r\1__", method_name) return getattr(rhs, reverse_method_name)(lhs, _builder=self.builder) return getattr(lhs, method_name)(rhs) def visit_BinOp(self, node): lhs = self.visit(node.left) rhs = self.visit(node.right) method_name = self._method_name_for_bin_op.get(type(node.op)) if method_name is None: raise UnsupportedLanguageConstruct(None, node, "AST binary operator '{}' is not (currently) implemented.".format(node.op.__name__)) return self._apply_binary_method(method_name, lhs, rhs) _method_name_for_bin_op: Dict[Type[ast.operator], str] = { ast.Add: '__add__', ast.Sub: '__sub__', ast.Mult: '__mul__', ast.Div: '__truediv__', ast.FloorDiv: '__floordiv__', ast.Mod: '__mod__', ast.Pow: '__pow__', ast.LShift: '__lshift__', ast.RShift: '__rshift__', ast.BitAnd: '__and__', ast.BitOr: '__or__', ast.BitXor: '__xor__', } def visit_then_else_blocks(self, node, liveins, then_block, else_block): # then block self.builder.set_insertion_point_to_start(then_block) self.visit_compound_statement(node.body) then_block = self.builder.get_insertion_block() then_defs = self.local_defs.copy() # else block else_defs = {} if node.orelse: self.builder.set_insertion_point_to_start(else_block) self.lscope = liveins.copy() self.local_defs = {} self.visit_compound_statement(node.orelse) else_defs = self.local_defs.copy() else_block = self.builder.get_insertion_block() # update block arguments names = [] ret_types = [] ir_ret_types = [] # variables in livein whose value is updated in `if` for name in liveins: # check type for defs, block_name in [(then_defs, 'then'), (else_defs, 'else')]: if name in defs: assert defs[name].type == liveins[name].type,\ f'initial value for `{name}` is of type {liveins[name].type}, '\ f'but the {block_name} block redefines it as {defs[name].type}' if name in then_defs or name in else_defs: names.append(name) ret_types.append(then_defs[name].type if name in then_defs else else_defs[name].type) ir_ret_types.append(then_defs[name].handle.get_type() if name in then_defs else else_defs[name].handle.get_type()) # variable defined in then but not in else if name in then_defs and name not in else_defs: else_defs[name] = liveins[name] # variable defined in else but not in then if name in else_defs and name not in then_defs: then_defs[name] = liveins[name] # variables that are both in then and else but not in liveins # TODO: could probably be cleaned up for name in then_defs.keys() & else_defs.keys(): if name in names: continue then_ty = then_defs[name].type else_ty = else_defs[name].type assert then_ty == else_ty,\ f'mismatched type for {name} between then block ({then_ty}) '\ f'and else block ({else_ty})' names.append(name) ret_types.append(then_ty) ir_ret_types.append(then_defs[name].handle.get_type()) return then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types def visit_if_top_level(self, cond, node): has_endif_block = True with enter_sub_region(self) as sr: liveins, ip_block = sr then_block = self.builder.create_block() else_block = self.builder.create_block() # create basic-block after conditional endif_block = self.builder.create_block() # create branch self.builder.set_insertion_point_to_end(ip_block) self.builder.create_cond_branch(cond.handle, then_block, else_block) # visit then and else blocks then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types = \ self.visit_then_else_blocks(node, liveins, then_block, else_block) # then terminator self.builder.set_insertion_point_to_end(then_block) if then_block.has_return() and else_block.has_return(): has_endif_block = False endif_block.erase() if not then_block.has_terminator() and has_endif_block: self.builder.create_branch(endif_block, [then_defs[n].handle for n in names]) # else terminator self.builder.set_insertion_point_to_end(else_block) if not else_block.has_terminator() and has_endif_block: self.builder.create_branch(endif_block, [else_defs[n].handle for n in names]) if has_endif_block: for ty in ir_ret_types: endif_block.add_argument(ty) if has_endif_block: # change block self.builder.set_insertion_point_to_start(endif_block) # update value for i, name in enumerate(names): new_tensor = language.core.tensor(endif_block.arg(i), ret_types[i]) self.set_value(name, new_tensor) # TODO: refactor def visit_if_scf(self, cond, node): with enter_sub_region(self) as sr: liveins, _ = sr ip, last_loc = self._get_insertion_point_and_loc() then_block = self.builder.create_block() else_block = self.builder.create_block() if node.orelse else None then_defs, else_defs, then_block, else_block, names, ret_types, _ = \ self.visit_then_else_blocks(node, liveins, then_block, else_block) # create if op self._set_insertion_point_and_loc(ip, last_loc) if_op = self.builder.create_if_op([ty.to_ir(self.builder) for ty in ret_types], cond.handle, True) then_block.merge_block_before(if_op.get_then_block()) self.builder.set_insertion_point_to_end(if_op.get_then_block()) if len(names) > 0: self.builder.create_yield_op([then_defs[n].handle for n in names]) if not node.orelse: else_block = if_op.get_else_block() else: else_block.merge_block_before(if_op.get_else_block()) self.builder.set_insertion_point_to_end(if_op.get_else_block()) if len(names) > 0: self.builder.create_yield_op([else_defs[n].handle for n in names]) # update values for i, name in enumerate(names): new_tensor = language.core.tensor(if_op.get_result(i), ret_types[i]) self.set_value(name, new_tensor) def visit_If(self, node): cond = self.visit(node.test) if _is_triton_tensor(cond): cond = cond.to(language.int1, _builder=self.builder) contains_return = ContainsReturnChecker(self.gscope).visit(node) if self.scf_stack and contains_return: raise UnsupportedLanguageConstruct( None, node, "Cannot have `return` statements inside `while` or `for` statements in triton " "(note that this also applies to `return` statements that are inside functions " "transitively called from within `while`/`for` statements)") elif self.scf_stack or not contains_return: self.visit_if_scf(cond, node) else: self.visit_if_top_level(cond, node) else: cond = _unwrap_if_constexpr(cond) if type(cond) not in _condition_types: # not isinstance - we insist the real thing, no subclasses and no ducks raise UnsupportedLanguageConstruct( None, node, "`if` conditionals can only accept values of type {{{}}}, not objects of type {}".format( ', '.join(_.__name__ for _ in _condition_types), type(cond).__name__)) if cond: self.visit_compound_statement(node.body) else: self.visit_compound_statement(node.orelse) def visit_IfExp(self, node): cond = self.visit(node.test) if _is_triton_tensor(cond): cond = cond.to(language.int1, _builder=self.builder) if _unwrap_if_constexpr(cond): return self.visit(node.body) else: return self.visit(node.orelse) def visit_Pass(self, node): pass def visit_Compare(self, node): if not (len(node.comparators) == 1 and len(node.ops) == 1): raise UnsupportedLanguageConstruct(None, node, "simultaneous multiple comparison is not supported") lhs = self.visit(node.left) rhs = self.visit(node.comparators[0]) lhs_value = _unwrap_if_constexpr(lhs) rhs_value = _unwrap_if_constexpr(rhs) if type(node.ops[0]) == ast.Is: return constexpr(lhs_value is rhs_value) if type(node.ops[0]) == ast.IsNot: return constexpr(lhs_value is not rhs_value) method_name = self._method_name_for_comp_op.get(type(node.ops[0])) if method_name is None: raise UnsupportedLanguageConstruct(None, node, "AST comparison operator '{}' is not (currently) implemented.".format(node.ops[0].__name__)) return self._apply_binary_method(method_name, lhs, rhs) _method_name_for_comp_op: Dict[Type[ast.cmpop], str] = { ast.Eq: '__eq__', ast.NotEq: '__ne__', ast.Lt: '__lt__', ast.LtE: '__le__', ast.Gt: '__gt__', ast.GtE: '__ge__' } def visit_UnaryOp(self, node): op = self.visit(node.operand) fn = self._method_name_for_unary_op.get(type(node.op)) if fn is None: raise UnsupportedLanguageConstruct(None, node, "AST unary operator '{}' is not (currently) implemented.".format(node.op.__name__)) if _is_triton_tensor(op): return getattr(op, fn)(_builder=self.builder) return getattr(op, fn)() _method_name_for_unary_op: Dict[Type[ast.unaryop], str] = {ast.USub: '__neg__', ast.UAdd: '__pos__', ast.Not: '__not__', ast.Invert: '__invert__'} def visit_While(self, node): with enter_sub_region(self) as sr: liveins, insert_block = sr ip, last_loc = self._get_insertion_point_and_loc() # loop body (the after region) # loop_block = self.builder.create_block() dummy = self.builder.create_block() self.builder.set_insertion_point_to_start(dummy) self.scf_stack.append(node) self.visit_compound_statement(node.body) self.scf_stack.pop() loop_defs = self.local_defs dummy.erase() # collect loop-carried values names = [] ret_types = [] init_args = [] for name in loop_defs: if name in liveins: # We should not def new constexpr assert _is_triton_tensor(loop_defs[name]) assert _is_triton_tensor(liveins[name]) assert loop_defs[name].type == liveins[name].type # these are loop-carried values names.append(name) ret_types.append(loop_defs[name].type) init_args.append(liveins[name]) self._set_insertion_point_and_loc(ip, last_loc) while_op = self.builder.create_while_op([ty.to_ir(self.builder) for ty in ret_types], [arg.handle for arg in init_args]) # merge the condition region before_block = self.builder.create_block_with_parent(while_op.get_before(), [ty.to_ir(self.builder) for ty in ret_types]) self.builder.set_insertion_point_to_start(before_block) for i, name in enumerate(names): self.lscope[name] = language.core.tensor(before_block.arg(i), ret_types[i]) self.local_defs[name] = self.lscope[name] cond = self.visit(node.test) self.builder.set_insertion_point_to_end(before_block) # create ConditionOp: e.g., scf.condition(%cond) %arg0, %arg1, ... self.builder.create_condition_op(cond.handle, [before_block.arg(i) for i in range(len(init_args))]) # merge the loop body after_block = self.builder.create_block_with_parent(while_op.get_after(), [ty.to_ir(self.builder) for ty in ret_types]) # generate loop body self.builder.set_insertion_point_to_start(after_block) for i, name in enumerate(names): self.lscope[name] = language.core.tensor(after_block.arg(i), ret_types[i]) self.local_defs[name] = self.lscope[name] self.scf_stack.append(node) self.visit_compound_statement(node.body) self.scf_stack.pop() loop_defs = self.local_defs yields = [] for name in loop_defs: if name in liveins: yields.append(loop_defs[name]) self.builder.create_yield_op([y.handle for y in yields]) # WhileOp defines new values, update the symbol table (lscope, local_defs) for i, name in enumerate(names): new_def = language.core.tensor(while_op.get_result(i), ret_types[i]) self.lscope[name] = new_def self.local_defs[name] = new_def for stmt in node.orelse: assert False, "Not implemented" ast.NodeVisitor.generic_visit(self, stmt) def visit_Subscript(self, node): assert node.ctx.__class__.__name__ == "Load" lhs = self.visit(node.value) slices = self.visit(node.slice) if _is_triton_tensor(lhs): return lhs.__getitem__(slices, _builder=self.builder) return lhs[slices] def visit_ExtSlice(self, node): return [self.visit(dim) for dim in node.dims] def visit_For(self, node): IteratorClass = self.visit(node.iter.func) iter_args = [self.visit(arg) for arg in node.iter.args] if IteratorClass == language.static_range: iterator = IteratorClass(*iter_args) static_range = range(iterator.start.value, iterator.end.value, iterator.step.value) for i in static_range: self.lscope[node.target.id] = constexpr(i) self.visit_compound_statement(node.body) for stmt in node.orelse: ast.NodeVisitor.generic_visit(self, stmt) return if IteratorClass is not range: raise RuntimeError('Only `range` and `static_range` iterators are currently supported') # visit iterator arguments # note: only `range` iterator is supported now # collect lower bound (lb), upper bound (ub), and step lb = iter_args[0] if len(iter_args) > 1 else self.visit(ast.Num(0)) ub = iter_args[1] if len(iter_args) > 1 else self.visit(node.iter.args[0]) step = iter_args[2] if len(iter_args) > 2 else self.visit(ast.Num(1)) # handle negative constant step (not supported by scf.for in MLIR) negative_step = False if _is_constexpr(step) and step.value < 0: step = constexpr(-step.value) negative_step = True lb, ub = ub, lb lb = language.core._to_tensor(lb, self.builder) ub = language.core._to_tensor(ub, self.builder) step = language.core._to_tensor(step, self.builder) # induction variable type if not lb.dtype.is_int() or not ub.dtype.is_int() or not step.dtype.is_int(): raise TypeError(f"For loop bounds and step must all be ints, are ({lb.dtype}, {ub.dtype}, {step.dtype})") iv_type = language.semantic.integer_promote_impl(lb.dtype, ub.dtype) iv_type = language.semantic.integer_promote_impl(iv_type, step.dtype) iv_ir_type = iv_type.to_ir(self.builder) iv_is_signed = iv_type.int_signedness == language.core.dtype.SIGNEDNESS.SIGNED # lb/ub/step might be constexpr, we need to cast them to tensor lb = lb.handle ub = ub.handle step = step.handle # ForOp can only accept IndexType as lb/ub/step. Cast integer to Index lb = self.builder.create_int_cast(lb, iv_ir_type, iv_is_signed) ub = self.builder.create_int_cast(ub, iv_ir_type, iv_is_signed) step = self.builder.create_int_cast(step, iv_ir_type, iv_is_signed) # Create placeholder for the loop induction variable iv = self.builder.create_undef(iv_ir_type) self.set_value(node.target.id, language.core.tensor(iv, iv_type)) with enter_sub_region(self) as sr: liveins, insert_block = sr ip, last_loc = self._get_insertion_point_and_loc() # create loop body block block = self.builder.create_block() self.builder.set_insertion_point_to_start(block) # dry visit loop body self.scf_stack.append(node) self.visit_compound_statement(node.body) self.scf_stack.pop() block.erase() # If a variable (name) is defined in both its parent & itself, then it's # a loop-carried variable. (They must be of the same type) init_args = [] yields = [] names = [] for name in self.local_defs: if name in liveins: assert _is_triton_tensor(self.local_defs[name]), f'{name} is not tensor' assert _is_triton_tensor(liveins[name]) assert self.local_defs[name].type == liveins[name].type,\ f'Loop-carried variable {name} has initial type {liveins[name].type} '\ f'but is re-assigned to {self.local_defs[name].type} in loop! '\ f'Please make sure that the type stays consistent.' names.append(name) init_args.append(language.core._to_tensor(liveins[name], self.builder)) yields.append(language.core._to_tensor(self.local_defs[name], self.builder)) # create ForOp self._set_insertion_point_and_loc(ip, last_loc) for_op = self.builder.create_for_op(lb, ub, step, [arg.handle for arg in init_args]) self.scf_stack.append(node) self.builder.set_insertion_point_to_start(for_op.get_body(0)) for i, name in enumerate(names): self.set_value(name, language.core.tensor(for_op.get_body(0).arg(i + 1), yields[i].type)) self.visit_compound_statement(node.body) self.scf_stack.pop() yields = [] for name in self.local_defs: if name in liveins: yields.append(language.core._to_tensor(self.local_defs[name], self.builder)) # create YieldOp if len(yields) > 0: self.builder.create_yield_op([y.handle for y in yields]) for_op_region = for_op.get_body(0).get_parent() assert for_op_region.size() == 1, "We use SCF, so the loop body should only have one block" # update induction variable with actual value, and replace all uses self.builder.set_insertion_point_to_start(for_op.get_body(0)) iv = for_op.get_induction_var() if negative_step: iv = self.builder.create_sub(ub, iv) iv = self.builder.create_add(iv, lb) self.lscope[node.target.id].handle.replace_all_uses_with(iv) self.set_value(node.target.id, language.core.tensor(iv, iv_type)) # update lscope & local_defs (ForOp defines new values) for i, name in enumerate(names): self.set_value(name, language.core.tensor(for_op.get_result(i), yields[i].type)) for stmt in node.orelse: assert False, "Don't know what to do with else after for" ast.NodeVisitor.generic_visit(self, stmt) def visit_Slice(self, node): lower = self.visit(node.lower) upper = self.visit(node.upper) step = self.visit(node.step) return slice(lower, upper, step) def visit_Index(self, node): return self.visit(node.value) def visit_keyword(self, node) -> Tuple[str, Any]: return node.arg, self.visit(node.value) def visit_Assert(self, node) -> Any: if not self.debug: return test = self.visit(node.test) msg = self.visit(node.msg) # Convert assert to triton's device_assert which happens on the device return language.core.device_assert(test, msg, _builder=self.builder) def call_JitFunction(self, fn: JITFunction, args, kwargs): args = inspect.getcallargs(fn.fn, *args, **kwargs) args = [args[name] for name in fn.arg_names] args = [arg if _is_triton_tensor(arg) else constexpr(arg) for arg in args] # generate function def attributes = dict() constexprs = [i for i, arg in enumerate(args) if _is_constexpr(arg)] constants = {i: args[i] for i in constexprs} # generate call args = [None if i in constexprs else arg for i, arg in enumerate(args)] arg_vals = [arg.handle for arg in args if arg is not None] arg_types = [arg.type for arg in args if arg is not None] fn_name = mangle_fn(fn.__name__, arg_types, constants) # generate function def if necessary if not self.module.has_function(fn_name): prototype = language.function_type([], arg_types) gscope = sys.modules[fn.fn.__module__].__dict__ # If the callee is not set, we use the same debug setting as the caller debug = self.debug if fn.debug is None else fn.debug file_name, begin_line = _get_fn_file_line(fn) generator = CodeGenerator(self.context, prototype, gscope, attributes, constants, module=self.module, function_name=fn_name, function_types=self.function_ret_types, debug=debug, noinline=fn.noinline, file_name=file_name, begin_line=begin_line, arch=self.builder.arch) generator.visit(fn.parse()) callee_ret_type = generator.last_ret_type self.function_ret_types[fn_name] = callee_ret_type else: callee_ret_type = self.function_ret_types[fn_name] symbol = self.module.get_function(fn_name) call_op = self.builder.call(symbol, arg_vals) if call_op.get_num_results() == 0 or callee_ret_type is None: return None elif call_op.get_num_results() == 1: return tensor(call_op.get_result(0), callee_ret_type) else: # should return a tuple of tl.tensor results = [] for i in range(call_op.get_num_results()): results.append(tensor(call_op.get_result(i), callee_ret_type[i])) return tuple(results) def visit_Call(self, node): fn = _unwrap_if_constexpr(self.visit(node.func)) static_implementation = self.statically_implemented_functions.get(fn) if static_implementation is not None: return static_implementation(self, node) kws = dict(self.visit(keyword) for keyword in node.keywords) args = [self.visit(arg) for arg in node.args] if fn is language.core.device_assert: # TODO: this should not be so hardcoded if not self.debug: return if isinstance(fn, JITFunction): _check_fn_args(node, fn, args) return self.call_JitFunction(fn, args, kws) if (hasattr(fn, '__self__') and _is_triton_tensor(fn.__self__)) or language.core.is_builtin(fn): extra_kwargs = dict(_builder=self.builder) sig = inspect.signature(fn) if '_generator' in sig.parameters: extra_kwargs['_generator'] = self return fn(*args, **extra_kwargs, **kws) if fn in self.builtin_namespace.values(): args = map(_unwrap_if_constexpr, args) return fn(*args, **kws) def visit_Constant(self, node): return constexpr(node.value) def visit_BoolOp(self, node: ast.BoolOp): if len(node.values) != 2: raise UnsupportedLanguageConstruct(None, node, "chained boolean operators (A or B or C) are not supported; use parentheses to split the chain.") lhs = self.visit(node.values[0]) rhs = self.visit(node.values[1]) method_name = self._method_name_for_bool_op.get(type(node.op)) if method_name is None: raise UnsupportedLanguageConstruct(None, node, "AST boolean operator '{}' is not (currently) implemented.".format(node.op.__name__)) return self._apply_binary_method(method_name, lhs, rhs) _method_name_for_bool_op: Dict[Type[ast.boolop], str] = {ast.And: 'logical_and', ast.Or: 'logical_or'} if sys.version_info < (3, 8): def visit_NameConstant(self, node): return constexpr(node.value) def visit_Num(self, node): return constexpr(node.n) def visit_Str(self, node): return constexpr(ast.literal_eval(node)) def visit_Attribute(self, node): lhs = self.visit(node.value) if _is_triton_tensor(lhs): if node.attr == "T": return language.semantic.trans(lhs, builder=self.builder) return getattr(lhs, node.attr) def visit_Expr(self, node): ast.NodeVisitor.generic_visit(self, node) def visit_NoneType(self, node): return None def visit_JoinedStr(self, node): values = list(node.values) for i, value in enumerate(values): if isinstance(value, ast.Constant): values[i] = str(value.value) elif isinstance(value, ast.FormattedValue): conversion_code = value.conversion evaluated = self.visit(value.value) if not _is_constexpr(evaluated): raise UnsupportedLanguageConstruct( None, node, "Cannot evaluate f-string containing non-constexpr conversion values, found conversion of type " + str(type(evaluated))) values[i] = ("{}" if conversion_code < 0 else "{!" + chr(conversion_code) + "}").format(evaluated.value) else: raise AssertionError("encountered unexpected node of type {} in a JoinedStr node".format(type(value))) return ''.join(values) def visit(self, node): if node is None: return with warnings.catch_warnings(): # The ast library added visit_Constant and deprecated some other # methods but we can't move to that without breaking Python 3.6 and 3.7. warnings.simplefilter("ignore", DeprecationWarning) # python 3.9 warnings.simplefilter("ignore", PendingDeprecationWarning) # python 3.8 self.last_node = node last_loc = self.builder.get_loc() if hasattr(node, 'lineno') and hasattr(node, 'col_offset'): self.builder.set_loc(self.file_name, self.begin_line + node.lineno, node.col_offset) last_loc = self.builder.get_loc() ret = super().visit(node) # Reset the location to the last one before the visit if last_loc: self.builder.set_loc(last_loc) return ret def generic_visit(self, node): raise UnsupportedLanguageConstruct(None, node, "unsupported AST node type: {}".format(type(node).__name__)) def execute_static_print(self, node: ast.Call) -> None: # TODO: too simplistic? Perhaps do something else with non-constexpr kws = {name: _unwrap_if_constexpr(value) for name, value in (self.visit(keyword) for keyword in node.keywords)} args = [_unwrap_if_constexpr(self.visit(arg)) for arg in node.args] print(*args, **kws) def execute_static_assert(self, node: ast.Call) -> None: arg_count = len(node.args) if not (0 < arg_count <= 2) or len(node.keywords): raise TypeError("`static_assert` requires one or two positional arguments only") passed = _unwrap_if_constexpr(self.visit(node.args[0])) if not isinstance(passed, bool): raise NotImplementedError("Assertion condition could not be determined at compile-time. Make sure that it depends only on `constexpr` values") if not passed: if arg_count == 1: message = "" else: try: message = self.visit(node.args[1]) except Exception as e: message = "<failed to evaluate assertion message: " + repr(e) + ">" raise CompileTimeAssertionFailure(None, node, _unwrap_if_constexpr(message)) return None statically_implemented_functions: Dict[object, Callable[[ast.Call], Any]] = { language.core.static_assert: execute_static_assert, language.core.static_print: execute_static_print, } def str_to_ty(name): if name[0] == "*": ty = str_to_ty(name[1:]) return language.pointer_type(ty) tys = { "fp8e4": language.float8e4, "fp8e5": language.float8e5, "fp8e4b15": language.float8e4b15, "fp16": language.float16, "bf16": language.bfloat16, "fp32": language.float32, "fp64": language.float64, "i1": language.int1, "i8": language.int8, "i16": language.int16, "i32": language.int32, "i64": language.int64, "u8": language.uint8, "u16": language.uint16, "u32": language.uint32, "u64": language.uint64, "B": language.int1, } return tys[name] def kernel_suffix(signature, specialization): # suffix format: # <argid><'c' if equal to 1><'d' if divisible by 16> suffix = '' for i, _ in enumerate(signature): suffix += str(i) if i in specialization.equal_to_1: suffix += 'c' if i in specialization.divisible_by_16: suffix += 'd' return suffix def ast_to_ttir(fn, signature, specialization, constants, debug, arch): # canonicalize signature if isinstance(signature, str): signature = {k: v.strip() for k, v in enumerate(signature.split(","))} context = ir.context() context.load_triton() # create kernel prototype cst_key = lambda i: fn.arg_names.index(i) if isinstance(i, str) else i constants = {cst_key(key): value for key, value in constants.items()} # visit kernel AST gscope = fn.__globals__.copy() function_name = '_'.join([fn.__name__, kernel_suffix(signature.values(), specialization)]) tys = list(signature.values()) new_constants = {k: True if k in tys and tys[k] == "i1" else 1 for k in specialization.equal_to_1} new_attrs = {k: ("multiple_of", 16) for k in specialization.divisible_by_16} all_constants = constants.copy() all_constants.update(new_constants) arg_types = [str_to_ty(v) for k, v in signature.items() if k not in constants] file_name, begin_line = _get_fn_file_line(fn) prototype = language.function_type([], arg_types) generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants, function_name=function_name, attributes=new_attrs, is_kernel=True, debug=debug, file_name=file_name, begin_line=begin_line, arch=arch) try: generator.visit(fn.parse()) except CompilationError as e: if e.src is None: e.set_source_code(fn.src) raise except Exception as e: node = generator.last_node if node is None: raise raise CompilationError(fn.src, node, repr(e)) from e ret = generator.module # module takes ownership of the context ret.context = context return ret
49,833
42.790861
177
py
triton
triton-main/python/triton/compiler/__init__.py
from .compiler import CompiledKernel, compile, instance_descriptor from .errors import CompilationError __all__ = ["compile", "instance_descriptor", "CompiledKernel", "CompilationError"]
188
36.8
82
py
triton
triton-main/python/triton/language/semantic.py
from __future__ import annotations # remove after python 3.11 import warnings from functools import wraps from typing import List, Optional, Sequence, Tuple, TypeVar from .._C.libtriton.triton import ir from . import core as tl T = TypeVar('T') # Create custom exception that prints message "hello" class IncompatibleTypeErrorImpl(Exception): def __init__(self, type_a, type_b): self.type_a = type_a self.type_b = type_b self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__() super(IncompatibleTypeErrorImpl, self).__init__(self.message) # ===----------------------------------------------------------------------===## # Programming Model # ===----------------------------------------------------------------------===## def program_id(axis: int, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_get_program_id(axis), tl.int32) def num_programs(axis: int, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_get_num_programs(axis), tl.int32) # ===----------------------------------------------------------------------===// # Implicit Casting Utilities # ===----------------------------------------------------------------------===// def integer_promote_impl(a_ty: tl.dtype, b_ty: tl.dtype) -> tl.dtype: a_rank = a_ty.int_bitwidth b_rank = b_ty.int_bitwidth a_sn = a_ty.int_signedness b_sn = b_ty.int_signedness # Rules for signedness taken from "Usual arithmetic conversions" on # https://en.cppreference.com/w/c/language/conversion. if a_sn == b_sn: return a_ty if a_rank > b_rank else b_ty elif a_sn == tl.dtype.SIGNEDNESS.UNSIGNED: return a_ty if a_rank >= b_rank else b_ty elif b_sn == tl.dtype.SIGNEDNESS.UNSIGNED: return b_ty if b_rank >= a_rank else a_ty assert False def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> tl.dtype: # 1) if one operand is double, the other is implicitly # converted to double if a_ty.is_fp64() or b_ty.is_fp64(): return tl.float64 # 2) if one operand is float, the other is implicitly # converted to float if a_ty.is_fp32() or b_ty.is_fp32(): return tl.float32 # 3 ) if one operand is half, the other is implicitly converted to half # unless we're doing / or %, which do not exist natively in PTX for fp16. # Supported PTX op: add, sub, mul, fma, neg, abs, min, max, tanh, ex2, setp if a_ty.is_fp16() or b_ty.is_fp16(): if div_or_mod: return tl.float32 else: return tl.float16 # 4) return bf16 only if both operands are of bf16 if a_ty.is_bf16() or b_ty.is_bf16(): if div_or_mod: return tl.float32 if a_ty.is_bf16() and b_ty.is_bf16(): return tl.bfloat16 return tl.float32 if not a_ty.is_int() or not b_ty.is_int(): assert False # 5 ) both operands are integer and undergo # integer promotion if div_or_mod and a_ty.int_signedness != b_ty.int_signedness: raise ValueError("Cannot use /, #, or % with " + a_ty.__repr__() + " and " + b_ty.__repr__() + " because they have different signedness;" "this is unlikely to result in a useful answer. Cast them to the same signedness.") return integer_promote_impl(a_ty, b_ty) # ===----------------------------------------------------------------------===// # Binary Operators # ===----------------------------------------------------------------------===// def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None: if type_a.is_ptr(): if not allow_ptr_a: raise IncompatibleTypeErrorImpl(type_a, type_b) # T* + U* with T != U if type_b.is_ptr() and (type_a != type_b): raise IncompatibleTypeErrorImpl(type_a, type_b) # T* + float if type_b.is_floating(): raise IncompatibleTypeErrorImpl(type_a, type_b) def binary_op_type_checking_impl(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder, allow_lhs_ptr=False, allow_rhs_ptr=False, arithmetic_check=True, div_or_mod=False ) -> Tuple[tl.tensor, tl.tensor]: # implicit broadcasting lhs, rhs = broadcast_impl_value(lhs, rhs, builder) # implicit typecasting lhs_sca_ty = lhs.type.scalar rhs_sca_ty = rhs.type.scalar check_ptr_type_impl(lhs_sca_ty, rhs_sca_ty, allow_lhs_ptr) check_ptr_type_impl(rhs_sca_ty, lhs_sca_ty, allow_rhs_ptr) if arithmetic_check and not lhs_sca_ty.is_ptr() and not rhs_sca_ty.is_ptr(): ret_sca_ty = computation_type_impl(lhs_sca_ty, rhs_sca_ty, div_or_mod) lhs = cast(lhs, ret_sca_ty, builder) rhs = cast(rhs, ret_sca_ty, builder) return lhs, rhs def add(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, True, True) input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar # offset + ptr # ptr + offset if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr(): input, other = other, input if input_scalar_ty.is_ptr(): return tl.tensor(builder.create_addptr(input.handle, other.handle), input.type) # float + float elif input_scalar_ty.is_floating(): return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type) # int + int elif input_scalar_ty.is_int(): return tl.tensor(builder.create_add(input.handle, other.handle), input.type) assert False def sub(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, True, False) scalar_ty = input.type.scalar # ptr - offset if scalar_ty.is_ptr(): return tl.tensor(builder.create_addptr(input.handle, minus(other, builder).handle), input.type) # float - float if scalar_ty.is_floating(): return tl.tensor(builder.create_fsub(input.handle, other.handle), input.type) # int - int elif scalar_ty.is_int(): return tl.tensor(builder.create_sub(input.handle, other.handle), input.type) assert False def mul(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float * float if scalar_ty.is_floating(): return tl.tensor(builder.create_fmul(input.handle, other.handle), input.type) # * int elif scalar_ty.is_int(): return tl.tensor(builder.create_mul(input.handle, other.handle), input.type) assert False def truediv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar # float / int if input_scalar_ty.is_floating() and other_scalar_ty.is_int(): other = cast(other, input_scalar_ty, builder) # int / float elif input_scalar_ty.is_int() and other_scalar_ty.is_floating(): input = cast(input, other_scalar_ty, builder) # int / int (cast to tl.float32) elif input_scalar_ty.is_int() and other_scalar_ty.is_int(): input = cast(input, tl.float32, builder) other = cast(other, tl.float32, builder) # float / float (cast to highest exponent type) elif input_scalar_ty.is_floating() and other_scalar_ty.is_floating(): if input_scalar_ty.fp_mantissa_width > other_scalar_ty.fp_mantissa_width: other = cast(other, input_scalar_ty, builder) else: input = cast(input, other_scalar_ty, builder) # unreachable else: assert False return tl.tensor(builder.create_fdiv(input.handle, other.handle), input.type) def floordiv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar if input_scalar_ty.is_int() and other_scalar_ty.is_int(): ret_ty = integer_promote_impl(input_scalar_ty, other_scalar_ty) input = cast(input, ret_ty, builder) other = cast(other, ret_ty, builder) if ret_ty.is_int_signed(): return tl.tensor(builder.create_sdiv(input.handle, other.handle), input.type) else: return tl.tensor(builder.create_udiv(input.handle, other.handle), input.type) assert False def fdiv(input: tl.tensor, other: tl.tensor, ieee_rounding: bool, builder: ir.builder) -> tl.tensor: input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating(): raise ValueError("both operands of fdiv must have floating scalar type") input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True) ret = builder.create_fdiv(input.handle, other.handle) return tl.tensor(ret, input.type) def mod(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar # float % float if scalar_ty.is_floating(): # input - input.div(other, rounding_mode="floor") * other ret = sub(input, mul(floor(fdiv(input, other, False, builder), builder), other, builder), builder) return ret # % int elif scalar_ty.is_int(): if scalar_ty.int_signedness != other_scalar_ty.int_signedness: raise ValueError("Cannot mod " + scalar_ty.__repr__() + " by " + other_scalar_ty.__repr__() + " " "because they have different signedness;" "this is unlikely to result in a useful answer. Cast them to the same signedness.") if scalar_ty.is_int_signed(): return tl.tensor(builder.create_srem(input.handle, other.handle), input.type) else: return tl.tensor(builder.create_urem(input.handle, other.handle), input.type) assert False ############## # bitwise ops ############## def bitwise_op_type_checking_impl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> Tuple[tl.tensor, tl.tensor]: input, other = binary_op_type_checking_impl(input, other, builder, False, False, False) input_sca_ty = input.type.scalar other_sca_ty = other.type.scalar if not input_sca_ty.is_int() or not other_sca_ty.is_int(): raise IncompatibleTypeErrorImpl(input_sca_ty, other_sca_ty) ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty) if ret_sca_ty != input_sca_ty: input = cast(input, ret_sca_ty, builder) if ret_sca_ty != other_sca_ty: other = cast(other, ret_sca_ty, builder) return input, other def and_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_and(input.handle, other.handle), input.type) def or_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_or(input.handle, other.handle), input.type) def xor_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_xor(input.handle, other.handle), input.type) def logical_and(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: if not input.type.is_int1(): input = bitcast(input, tl.dtype("int1"), builder) if not other.type.is_int1(): other = bitcast(other, tl.dtype("int1"), builder) return and_(input, other, builder) def logical_or(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: if not input.type.is_int1(): input = bitcast(input, tl.dtype("int1"), builder) if not other.type.is_int1(): other = bitcast(other, tl.dtype("int1"), builder) return or_(input, other, builder) def not_(input: tl.tensor, builder: ir.builder): if not input.type.is_int1(): input = bitcast(input, tl.dtype("int1"), builder) return invert(input, builder) def lshr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_lshr(input.handle, other.handle), input.type) def ashr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_ashr(input.handle, other.handle), input.type) def shl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_shl(input.handle, other.handle), input.type) # ===----------------------------------------------------------------------===// # Unary Operators # ===----------------------------------------------------------------------===// def plus(input: tl.tensor) -> tl.tensor: return input def minus(input: tl.tensor, builder: ir.builder) -> tl.tensor: input_sca_ty = input.type.scalar if input_sca_ty.is_ptr(): raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")") _0 = tl.tensor(builder.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty) return sub(_0, input, builder) def invert(input: tl.tensor, builder: tl.tensor) -> tl.tensor: input_sca_ty = input.type.scalar if input_sca_ty.is_ptr() or input_sca_ty.is_floating(): raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")") _1 = tl.tensor(builder.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty) return xor_(input, _1, builder) # ===----------------------------------------------------------------------===// # Comparison Operators # ===----------------------------------------------------------------------===// def _bool_like(v: tl.tensor) -> tl.block_type: if not v.type.is_block(): return tl.int1 shape = v.type.shape return tl.block_type(tl.int1, shape) def greater_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float > float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOGT(input.handle, other.handle), _bool_like(input)) # > int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSGT(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpUGT(input.handle, other.handle), _bool_like(input)) assert False def greater_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float >= float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOGE(input.handle, other.handle), _bool_like(input)) # >= int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSGE(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpUGE(input.handle, other.handle), _bool_like(input)) assert False def less_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float < float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOLT(input.handle, other.handle), _bool_like(input)) # < int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSLT(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpULT(input.handle, other.handle), _bool_like(input)) assert False def less_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float < float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input)) # < int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input)) assert False def equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float == float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input)) # == int elif scalar_ty.is_int(): return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input)) assert False def not_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float == float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input)) # == int elif scalar_ty.is_int(): return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input)) assert False # ===----------------------------------------------------------------------===// # Block Creation # ===----------------------------------------------------------------------===// def arange(start: int, end: int, builder: ir.builder) -> tl.tensor: if not isinstance(start, int) or not isinstance(end, int): raise ValueError("arange's arguments must be of type tl.constexpr") is_start_int64 = bool(start >> 32) is_end_int64 = bool(end >> 32) if is_start_int64 or is_end_int64: raise ValueError("arange must fit in int32") if end <= start: raise ValueError("arange's end argument must be greater than the start argument") shape = [end - start] ret_ty = tl.block_type(tl.int32, shape) return tl.tensor(builder.create_make_range(start, end), ret_ty) def full(shape: List[int], value, dtype: tl.dtype, builder: ir.builder) -> tl.tensor: if isinstance(value, tl.tensor): assert value.numel.value == 1, "only accepts size-1 tensor" value = cast(value, dtype, builder) ret_ty = tl.block_type(value.dtype, shape) return tl.tensor(builder.create_splat(value.handle, shape), ret_ty) else: # scalar if value == 0: value = builder.get_null_value(dtype.to_ir(builder)) else: get_value_fn = getattr(builder, f"get_{dtype.name}") value = get_value_fn(value) if dtype is None: raise ValueError("dtype must be specified when value is not a tensor") ret_ty = tl.block_type(dtype, shape) return tl.tensor(builder.create_splat(value, shape), ret_ty) # ===----------------------------------------------------------------------===// # Shape Manipulation # ===----------------------------------------------------------------------===// def view(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: # TODO: disable when TritonToTritonGPU handles views properly # assert len(input.shape) == len(dst_shape) numel = 1 for s in dst_shape: numel *= s if input.type.numel != numel: raise ValueError("cannot view block of different shape") ret_ty = tl.block_type(input.type.scalar, dst_shape) return tl.tensor(builder.create_view(input.handle, dst_shape), ret_ty) def reshape(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: raise ValueError("`reshape` is not supported yet. Please use `view` instead if applicable. " "Note that view may reorder elements in an implementation- and context- dependent way.") def expand_dims(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor: dst_shape = list(input.type.shape) dst_shape.insert(axis, 1) ret_ty = tl.block_type(input.type.scalar, dst_shape) return tl.tensor(builder.create_expand_dims(input.handle, axis), ret_ty) def cat(lhs: tl.tensor, rhs: tl.tensor, can_reorder: bool, builder: ir.builder) -> tl.tensor: assert can_reorder, "current implementation of `cat` always may reorder elements" assert len(lhs.shape) == 1 ret_type = tl.block_type(lhs.type.scalar, [lhs.shape[0] + rhs.shape[0]]) return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_type) def trans(input: tl.tensor, builder: ir.builder) -> tl.tensor: if len(input.shape) != 2: raise ValueError("Only 2D tensors can be transposed") ret_type = tl.block_type(input.type.scalar, [input.shape[1], input.shape[0]]) return tl.tensor(builder.create_trans(input.handle), ret_type) def broadcast_impl_shape(input: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor: if not input.type.is_block(): ret_ty = tl.block_type(input.type, shape) return tl.tensor(builder.create_splat(input.handle, shape), ret_ty) src_shape = input.type.get_block_shapes() if len(src_shape) != len(shape): raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}") if shape == src_shape: return input for i, item in enumerate(src_shape): if shape[i] != item and item != 1: raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})" f" must match the existing size ({item}) at non-singleton dimension" f" {i}: {src_shape}, {shape}") ret_ty = tl.block_type(input.type.scalar, shape) return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty) def broadcast_impl_value(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor: lhs_ty = lhs.type rhs_ty = rhs.type # make_shape_compatible(block, scalar) if lhs_ty.is_block() and not rhs_ty.is_block(): rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape) rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty) # make_shape_compatible(scalar, block) elif not lhs_ty.is_block() and rhs_ty.is_block(): lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape) lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty) # make_shape_compatible(block, block) elif lhs_ty.is_block() and rhs_ty.is_block(): lhs_shape = lhs_ty.get_block_shapes() rhs_shape = rhs_ty.get_block_shapes() if len(lhs_shape) < len(rhs_shape): # Add new axes to lhs for dim in range(len(lhs_shape), len(rhs_shape)): lhs = tl.tensor(builder.create_expand_dims(lhs.handle, 0), tl.block_type(lhs_ty.scalar, [1] + lhs_shape)) lhs_ty = lhs.type lhs_shape = lhs_ty.get_block_shapes() elif len(rhs_shape) < len(lhs_shape): # Add new axes to rhs for dim in range(len(rhs_shape), len(lhs_shape)): rhs = tl.tensor(builder.create_expand_dims(rhs.handle, 0), tl.block_type(rhs_ty.scalar, [1] + rhs_shape)) rhs_ty = rhs.type rhs_shape = rhs_ty.get_block_shapes() assert len(rhs_shape) == len(lhs_shape) ret_shape = [] for i, left in enumerate(lhs_shape): right = rhs_shape[i] if left == 1: ret_shape.append(right) elif right == 1: ret_shape.append(left) elif left == right: ret_shape.append(left) else: raise ValueError("Cannot make_shape_compatible: incompatible dimensions " "at index " + str(i) + ": " + str(left) + " and " + str(right)) if lhs_shape != ret_shape: ret_ty = tl.block_type(lhs_ty.scalar, ret_shape) lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty) if rhs_shape != ret_shape: ret_ty = tl.block_type(rhs_ty.scalar, ret_shape) rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty) # (scalar, scalar) => returns original blocks return lhs, rhs ####### # cast ####### def bitcast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: src_ty = input.type if src_ty.is_block(): dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) if src_ty == dst_ty: return input src_sca_ty = src_ty.scalar dst_sca_ty = dst_ty.scalar if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr(): return cast(input, dst_ty, builder) # Bitcast src_bits = src_sca_ty.primitive_bitwidth dst_bits = dst_sca_ty.primitive_bitwidth if src_bits != dst_bits: raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + " to " "data-type of size " + str(dst_bits)) return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) # TODO: architecture descriptor class def _is_cuda(arch): return isinstance(arch, int) def cast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: src_ty = input.type if isinstance(dst_ty, tl.constexpr): dst_ty = dst_ty.value if src_ty.is_block(): dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) if src_ty == dst_ty: return input src_sca_ty = src_ty.scalar dst_sca_ty = dst_ty.scalar if _is_cuda(builder.arch) and builder.arch < 89 and \ (src_sca_ty.is_fp8e4() or dst_sca_ty.is_fp8e4()): warnings.warn("Standard tl.float8e4 format will be deprecated on SM < 89. " "Please use tl.float8e4b15.", DeprecationWarning) # Unsupported conversion: if (src_sca_ty.is_fp8e4b15() and not dst_sca_ty.is_fp16()) or \ (dst_sca_ty.is_fp8e4b15() and not src_sca_ty.is_fp16()): raise ValueError('fp8e4b15 can only be converted to/from fp16') # Casting with customized floating types involved: fp8 <=> bf16, fp16, fp32, fp64 if (src_sca_ty.is_fp8() and dst_sca_ty.is_floating()) or \ (src_sca_ty.is_floating() and dst_sca_ty.is_fp8()): return tl.tensor(builder.create_fp_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) # bf16 <=> (not fp32) if (src_sca_ty.is_fp16() and not dst_sca_ty.is_fp32()) or \ (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()): return cast(cast(input, tl.float32, builder), dst_sca_ty, builder) # Standard floating types' casting: truncation # fp64 => fp32, fp16, bf16 # fp32 => fp16, bf16 truncate_fp = src_sca_ty.is_floating() and \ dst_sca_ty.is_floating() and \ src_sca_ty.primitive_bitwidth > dst_sca_ty.primitive_bitwidth if truncate_fp: return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), dst_ty) # Standard floating types' casting: extension # fp32 => fp64 # fp16 => fp32, fp64 # bf16 => fp32, fp64 ext_fp = src_sca_ty.is_floating() and \ dst_sca_ty.is_floating() and \ src_sca_ty.primitive_bitwidth < dst_sca_ty.primitive_bitwidth if ext_fp: return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting between integer types if src_sca_ty.is_int() and dst_sca_ty.is_int() and \ (src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness): sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool() if dst_sca_ty.is_bool(): ty = input.dtype.to_ir(builder) _0 = tl.tensor(builder.get_null_value(ty), input.dtype) return not_equal(input, _0, builder) else: return tl.tensor(builder.create_int_cast(input.handle, dst_ty.to_ir(builder), sign_extend), dst_ty) # Casting standard floating types to integer types if src_sca_ty.is_standard_floating() and dst_sca_ty.is_int(): if dst_sca_ty.is_bool(): ty = input.dtype.to_ir(builder) _0 = tl.tensor(builder.get_null_value(ty), input.dtype) return not_equal(input, _0, builder) elif dst_sca_ty.is_int_signed(): return tl.tensor(builder.create_fp_to_si(input.handle, dst_ty.to_ir(builder)), dst_ty) else: return tl.tensor(builder.create_fp_to_ui(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting integer types to standard floating types if src_sca_ty.is_int() and dst_sca_ty.is_standard_floating(): if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed(): return tl.tensor(builder.create_ui_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) else: return tl.tensor(builder.create_si_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting pointer types to integer types if src_sca_ty.is_ptr() and dst_sca_ty.is_int(): bitwidth = dst_sca_ty.int_bitwidth if bitwidth == 64: return tl.tensor(builder.create_ptr_to_int(input.handle, dst_ty.to_ir(builder)), dst_ty) if bitwidth == 1: return not_equal(cast(input, tl.int64, builder), tl.tensor(builder.get_int64(0), tl.int64), builder) # Casting integer types to pointer types if src_sca_ty.is_int() and dst_sca_ty.is_ptr(): return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting pointer types to pointer types if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr(): return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) assert False, f'cannot cast {input} to {dst_ty}' # ===----------------------------------------------------------------------===// # Memory Operators # ===----------------------------------------------------------------------===// def _str_to_load_cache_modifier(cache_modifier): cache = ir.CACHE_MODIFIER.NONE # default if cache_modifier: if cache_modifier == ".ca": cache = ir.CACHE_MODIFIER.CA elif cache_modifier == ".cg": cache = ir.CACHE_MODIFIER.CG else: raise ValueError(f"Cache modifier {cache_modifier} not supported") return cache def _str_to_store_cache_modifier(cache_modifier): cache = ir.CACHE_MODIFIER.NONE # default if cache_modifier: if cache_modifier == ".wb": cache = ir.CACHE_MODIFIER.WB elif cache_modifier == ".cg": cache = ir.CACHE_MODIFIER.CG elif cache_modifier == ".cs": cache = ir.CACHE_MODIFIER.CS elif cache_modifier == ".wt": cache = ir.CACHE_MODIFIER.WT else: raise ValueError(f"Cache modifier {cache_modifier} not supported") return cache def _str_to_eviction_policy(eviction_policy): eviction = ir.EVICTION_POLICY.NORMAL # default if eviction_policy: if eviction_policy == "evict_last": eviction = ir.EVICTION_POLICY.EVICT_LAST elif eviction_policy == "evict_first": eviction = ir.EVICTION_POLICY.EVICT_FIRST else: raise ValueError(f"Eviction policy {eviction_policy} not supported") return eviction def _str_to_padding_option(padding_option): padding = None # default if padding_option: if padding_option == "zero": padding = ir.PADDING_OPTION.PAD_ZERO elif padding_option == "nan": padding = ir.PADDING_OPTION.PAD_NAN else: raise ValueError(f"Padding option {padding_option} not supported") return padding def _str_to_sem(sem_option): sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE if sem_option: if sem_option == "acquire": sem = ir.MEM_SEMANTIC.ACQUIRE elif sem_option == "release": sem = ir.MEM_SEMANTIC.RELEASE elif sem_option == "acq_rel": sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE elif sem_option == "relaxed": sem = ir.MEM_SEMANTIC.RELAXED else: raise ValueError(f"Memory semantic {sem_option} not supported") return sem def _canonicalize_boundary_check(boundary_check, block_shape): if boundary_check: if not hasattr(boundary_check, "__iter__"): boundary_check = [boundary_check] boundary_check = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in boundary_check] for dim in boundary_check: assert isinstance(dim, int) and 0 <= dim < len(block_shape) assert len(boundary_check) > 0 assert len(boundary_check) == len(set(boundary_check)), "Duplicate dimension in `boundary_check`" return sorted(boundary_check) return tuple() def _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): # Load by a block pointer: `pointer_type<block_type<>>` # Block pointer can not have `mask` and `other` arguments if mask or other: raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") elt_ty = ptr.type.element_ty.element_ty assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" if elt_ty.is_int() and padding == ir.PADDING_OPTION.PAD_NAN: raise ValueError("Padding option `nan` is not supported for integer block pointers") # `dst_ty` is de-referenced type of the pointer type dst_ty = ptr.type.element_ty # Check `boundary_check` argument boundary_check = _canonicalize_boundary_check(boundary_check, dst_ty.get_block_shapes()) # Build IR return tl.tensor(builder.create_tensor_pointer_load(ptr.handle, boundary_check, padding, cache, eviction, is_volatile), dst_ty) def _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): # Load by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` if not ptr.type.scalar.is_ptr(): raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.load`") # Check `mask`, `other`, `boundary_check`, and `padding` arguments if not mask and other: raise ValueError("`other` cannot be provided without `mask`") if padding or boundary_check: raise ValueError("`padding_option` or `boundary_check` argument is not supported for loading a tensor of" "pointers or loading a scalar. Because the compiler does not know the boundary; please " "use block pointers (defined by `make_block_ptr`) instead") # For a pointer of scalar, check the type of `mask` and `other` if not ptr.type.is_block(): if mask and mask.type.is_block(): raise ValueError("Mask argument cannot be block type if pointer argument is not a block") if other and other.type.is_block(): raise ValueError("Other argument cannot be block type if pointer argument is not a block") # Make `mask` and `other` into the same shape as `ptr` if ptr.type.is_block(): if mask: mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) if other: other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder) # Get `pointer_type<elt_ty>` and `elt_ty` ptr_ty = ptr.type.scalar elt_ty = ptr_ty.element_ty # Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>` if elt_ty == tl.int1: elt_ty = tl.int8 ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) ptr = cast(ptr, ptr_ty, builder) # Cast `other` into `ele_ty` type if other: other = cast(other, elt_ty, builder) # Create loaded result type `dst_ty` if ptr.type.is_block(): shape = ptr.type.get_block_shapes() dst_ty = tl.block_type(elt_ty, shape) else: # Load by de-referencing the pointer of scalar dst_ty = elt_ty # Build IR if not mask: return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile), dst_ty) else: return tl.tensor(builder.create_masked_load(ptr.handle, mask.handle, other.handle if other else None, cache, eviction, is_volatile), dst_ty) def load(ptr: tl.tensor, mask: Optional[tl.tensor], other: Optional[tl.tensor], boundary_check, padding_option: str, cache_modifier: str, eviction_policy: str, is_volatile: bool, builder: ir.builder) -> tl.tensor: # Cache, eviction and padding options cache = _str_to_load_cache_modifier(cache_modifier) eviction = _str_to_eviction_policy(eviction_policy) padding = _str_to_padding_option(padding_option) if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): # Load by a block pointer: `pointer_type<block_type<>>` return _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) else: # Load by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` return _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) def _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder): # Store by a block pointer: `pointer_type<block_type<>>` # Block pointers can not have the `mask` argument if mask: raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") # Check same shape and element type block_shape = ptr.type.element_ty.get_block_shapes() if not val.type.is_block(): val = broadcast_impl_shape(val, block_shape, builder) assert val.type.is_block(), "Value argument must be block type or a scalar" assert block_shape == val.type.get_block_shapes(), "Block shape and value shape mismatch" assert ptr.type.element_ty.element_ty == val.type.element_ty, "Block element type and value element type mismatch" elt_ty = ptr.type.element_ty.element_ty assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" # Check `boundary_check` argument boundary_check = _canonicalize_boundary_check(boundary_check, block_shape) # Build IR return tl.tensor(builder.create_tensor_pointer_store(ptr.handle, val.handle, boundary_check, cache, eviction), tl.void) def _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder): # Store by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` if not ptr.type.scalar.is_ptr(): raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.store`") # Check `boundary_check` argument if boundary_check: raise ValueError("`boundary_check` argument is not supported for storing a tensor of pointers or storing a " "scalar. Because the compiler does not know the boundary; please use block pointers " "(defined by `make_block_ptr`) instead") # For a pointer of scalar, check the type of `val` and `mask` if not ptr.type.is_block(): if val.type.is_block(): raise ValueError("Value argument cannot be block type if pointer argument is not a block") if mask and mask.type.is_block(): raise ValueError("Mask argument cannot be block type if pointer argument is not a block") # Make `mask` and `val` into the same shape as `ptr` if ptr.type.is_block(): val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) if mask: mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) ptr_ty = ptr.type.scalar elt_ty = ptr_ty.element_ty # Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>` if elt_ty == tl.int1: elt_ty = tl.int8 ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) ptr = cast(ptr, ptr_ty, builder) # Cast to target data type val = cast(val, elt_ty, builder) # Build IR if not mask: return tl.tensor(builder.create_store(ptr.handle, val.handle, cache, eviction), tl.void) if not mask.type.scalar.is_bool(): raise ValueError("Mask must have boolean scalar type") return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, cache, eviction), tl.void) def store(ptr: tl.tensor, val: tl.tensor, mask: Optional[tl.tensor], boundary_check, cache_modifier: str, eviction_policy: str, builder: ir.builder) -> tl.tensor: # Cache and eviction options cache = _str_to_store_cache_modifier(cache_modifier) eviction = _str_to_eviction_policy(eviction_policy) if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): # Store by a block pointer: `pointer_type<block_type<>>` return _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder) else: # Store by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` return _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder) ######### # atomic ######### def atomic_cas(ptr: tl.tensor, cmp: tl.tensor, val: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: sem = _str_to_sem(sem) element_ty = ptr.type.scalar.element_ty if element_ty.primitive_bitwidth not in [16, 32, 64]: raise ValueError("atomic_cas only supports elements with width {16, 32, 64}") return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle, sem), val.type) def atom_red_typechecking_impl(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, op: str, builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]: if not ptr.type.scalar.is_ptr(): raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__()) element_ty = ptr.type.scalar.element_ty if element_ty is tl.float16 and op != 'add': raise ValueError("atomic_" + op + " does not support fp16") if element_ty in [tl.int1, tl.int8, tl.int16, tl.bfloat16]: raise ValueError("atomic_" + op + " does not support " + str(element_ty)) if ptr.type.is_block(): if mask: mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) if val: val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) val = cast(val, ptr.type.scalar.element_ty, builder) if not mask: mask_ir = builder.get_int1(True) mask_ty = tl.int1 if ptr.type.is_block(): mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes()) mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes()) mask = tl.tensor(mask_ir, mask_ty) return ptr, val, mask def atomic_max(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'max', builder) sem = _str_to_sem(sem) sca_ty = val.type.scalar # direct call to atomic_max for integers if sca_ty.is_int(): if sca_ty.is_int_signed(): return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, ptr.handle, val.handle, mask.handle, sem), val.type) else: return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, ptr.handle, val.handle, mask.handle, sem), val.type) # for float # return atomic_smax(i_ptr, i_val) if val >= 0 # return atomic_umin(i_ptr, i_val) if val < 0 i_val = bitcast(val, tl.int32, builder) i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder) pos = greater_equal(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) neg = less_than(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle, sem), i_val.type) neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle, sem), i_val.type) return where(pos, pos_ret, neg_ret, builder) def atomic_min(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'min', builder) sem = _str_to_sem(sem) sca_ty = val.type.scalar # direct call to atomic_min for integers if sca_ty.is_int(): if sca_ty.is_int_signed(): return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, ptr.handle, val.handle, mask.handle, sem), val.type) else: return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, ptr.handle, val.handle, mask.handle, sem), val.type) # for float # return atomic_smin(i_ptr, i_val) if val >= 0 # return atomic_umax(i_ptr, i_val) if val < 0 i_val = bitcast(val, tl.int32, builder) i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder) pos = greater_equal(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) neg = less_than(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle, sem), i_val.type) neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle, sem), i_val.type) return where(pos, pos_ret, neg_ret, builder) def atomic_add(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'add', builder) sem = _str_to_sem(sem) sca_ty = val.type.scalar op = ir.ATOMIC_OP.FADD if sca_ty.is_floating() else ir.ATOMIC_OP.ADD return tl.tensor(builder.create_atomic_rmw(op, ptr.handle, val.handle, mask.handle, sem), val.type) def atomic_and(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'and', builder) sem = _str_to_sem(sem) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.AND, ptr.handle, val.handle, mask.handle, sem), val.type) def atomic_or(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'or', builder) sem = _str_to_sem(sem) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.OR, ptr.handle, val.handle, mask.handle, sem), val.type) def atomic_xor(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xor', builder) sem = _str_to_sem(sem) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XOR, ptr.handle, val.handle, mask.handle, sem), val.type) def atomic_xchg(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xchg', builder) sem = _str_to_sem(sem) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XCHG, ptr.handle, val.handle, mask.handle, sem), val.type) # ===----------------------------------------------------------------------===// # Linear Algebra # ===----------------------------------------------------------------------===// def dot(lhs: tl.tensor, rhs: tl.tensor, allow_tf32: bool, out_dtype: tl.dtype, builder: ir.builder) -> tl.tensor: assert lhs.type.is_block() and rhs.type.is_block() assert lhs.dtype == rhs.dtype, f"First input ({lhs.dtype}) and second input ({rhs.dtype}) must have the same dtype!" assert len(lhs.shape) == 2, f"First input shape ({lhs.shape}) is not two dimensional!" assert len(rhs.shape) == 2, f"Second input shape ({rhs.shape}) is not two dimensional!" assert lhs.shape[1].value == rhs.shape[0].value, f"First input shape ({lhs.shape}) and second input shape {rhs.shape} are not compatible for matmul (second index of first shape ({lhs.shape[1].value}) must be equal to first index of second shape ({rhs.shape[0].value})" assert lhs.shape[0].value >= 16 and lhs.shape[1].value >= 16 \ and rhs.shape[1].value >= 16,\ f"All values in both first input shape ({lhs.shape}) and second input shape ({rhs.shape}) must be >= 16!" if lhs.type.scalar.is_int(): assert lhs.type.scalar == tl.int8, "only int8 supported!" # TODO: This is CUDA specific, check if ROCm has the same limitation assert lhs.shape[1].value >= 32, "small blocks not supported!" _0 = builder.get_int32(0) ret_scalar_ty = tl.int32 elif lhs.type.scalar.is_fp32() or lhs.type.scalar.is_bf16(): _0 = builder.get_fp32(0) ret_scalar_ty = tl.float32 else: _0 = builder.get_fp16(0) if out_dtype.is_fp16() else builder.get_fp32(0) ret_scalar_ty = out_dtype M = lhs.type.shape[0] N = rhs.type.shape[1] _0 = builder.create_splat(_0, [M, N]) ret_ty = tl.block_type(ret_scalar_ty, [M, N]) return tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty) # ===----------------------------------------------------------------------===// # Indexing # ===----------------------------------------------------------------------===// def where(condition: tl.tensor, x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: condition = cast(condition, tl.int1, builder) if condition.type.is_block(): condition, x = broadcast_impl_value(condition, x, builder) x, y = broadcast_impl_value(x, y, builder) condition, x = broadcast_impl_value(condition, x, builder) x, y = binary_op_type_checking_impl(x, y, builder, True, True) if not condition.type.is_block(): condition, _ = broadcast_impl_value(condition, x, builder) ret_ty = x.type return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty) # ===----------------------------------------------------------------------===// # Reduction # ===----------------------------------------------------------------------=== def reduction( inputs: Sequence[tl.tensor], axis: int, region_builder_fn, builder: ir.builder ) -> Tuple[tl.tensor, ...]: if axis is None: new_inputs = [] for i in range(len(inputs)): new_shape = [inputs[i].numel.value] new_inputs.append(view(inputs[i], new_shape, builder)) inputs = tuple(new_inputs) axis = 0 # get result shape shape = inputs[0].type.shape ret_shape = [s for i, s in enumerate(shape) if i != axis] for t in inputs: assert t.type.shape == shape def wrap_tensor(x, scalar_ty): if ret_shape: res_ty = tl.block_type(scalar_ty, ret_shape) else: # 0d-tensor -> scalar res_ty = scalar_ty return tl.tensor(x, res_ty) reduce_op = builder.create_reduce([t.handle for t in inputs], axis) region_builder_fn(reduce_op) reduce_op.verify() return tuple( wrap_tensor(reduce_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs)) ) # ===----------------------------------------------------------------------=== # Associative Scan # ===----------------------------------------------------------------------=== def associative_scan( inputs: Sequence[tl.tensor], axis: int, region_builder_fn, builder: ir.builder ) -> Tuple[tl.tensor, ...]: if len(inputs) != 1: raise ValueError("Current implementation only support single tensor input") shape = inputs[0].type.shape def wrap_tensor(x, scalar_ty): res_ty = tl.block_type(scalar_ty, shape) return tl.tensor(x, res_ty) scan_op = builder.create_scan([t.handle for t in inputs], axis) region_builder_fn(scan_op) scan_op.verify() return tuple( wrap_tensor(scan_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs)) ) # ===----------------------------------------------------------------------=== # Math # ===----------------------------------------------------------------------=== def _check_dtype(dtypes: List[str]) -> T: """ We following libdevice's convention to check accepted data types for math functions. It is not a good practice to support all data types as accelerators/GPUs don't support many float16 and bfloat16 math operations. We should let the users know that they are using and invoke explicit cast to convert the data type to the supported one. """ def wrapper(fn): @wraps(fn) def check(*args, **kwargs): # concatenate args and kwargs all_args = list(args) + list(kwargs.values()) for arg in [a for a in all_args if isinstance(a, tl.tensor)]: if arg.type.scalar.name not in dtypes: raise ValueError(f"Expected dtype {dtypes} but got {arg.type.scalar.name}") return fn(*args, **kwargs) return check return wrapper def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: x, y = binary_op_type_checking_impl(x, y, builder) # FIXME(Keren): not portable, should be fixed from . import math return math.mulhi(x, y, _builder=builder) @_check_dtype(dtypes=["fp32", "fp64"]) def floor(x: tl.tensor, builder: ir.builder) -> tl.tensor: # FIXME(Keren): not portable, should be fixed from . import math return math.floor(x, _builder=builder) @_check_dtype(dtypes=["fp32", "fp64"]) def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_exp(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def log(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_log(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def cos(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_cos(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def sin(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_sin(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_sqrt(x.handle), x.type) def abs(x: tl.tensor, builder: ir.builder) -> tl.tensor: dtype = x.dtype if dtype.is_floating(): return tl.tensor(builder.create_fabs(x.handle), x.type) elif dtype.is_int_signed(): return tl.tensor(builder.create_iabs(x.handle), x.type) elif dtype.is_int_unsigned(): return x # no-op else: assert False, f"Unexpected dtype {dtype}" ## def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to multiple_of does not match the length of values") x.handle.set_attr("tt.divisibility", ir.make_attr(values, x.handle.get_context())) return x def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to max_contiguous does not match the length of values") x.handle.set_attr("tt.contiguity", ir.make_attr(values, x.handle.get_context())) return x def max_constancy(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to max_constancy does not match the length of values") x.handle.set_attr("tt.constancy", ir.make_attr(values, x.handle.get_context())) return x def debug_barrier(builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_barrier(), tl.void) def device_print(prefix: str, args: List[tl.tensor], builder: ir.builder) -> tl.tensor: new_args = [] for arg in args: new_args.append(arg.handle) return tl.tensor(builder.create_print(prefix, new_args), tl.void) def device_assert(cond: tl.tensor, msg: str, file_name: str, func_name, lineno: int, builder: ir.builder) -> tl.tensor: cond_ty = cond.type if not cond_ty.is_block(): cond_ty = tl.block_type(cond_ty.scalar, (1,)) cond = tl.tensor(builder.create_splat(cond.handle, (1,)), cond_ty) return tl.tensor(builder.create_assert(cond.handle, msg, file_name, func_name, lineno), tl.void) def _convert_elem_to_ir_value(builder, elem, require_i64): if isinstance(elem, tl.constexpr): return builder.get_int64(elem.value) if require_i64 else builder.get_int32(elem.value) elif isinstance(elem, tl.tensor): assert elem.numel.value == 1, "Expected a scalar in shape/strides/offsets" assert elem.dtype.is_int(), "Expected an integer scalar type in shape/strides/offsets" if elem.dtype != tl.int64 and require_i64: return builder.create_int_cast(elem.handle, builder.get_int64_ty(), elem.dtype.is_int_signed()) elif elem.dtype != tl.int32: return builder.create_int_cast(elem.handle, builder.get_int32_ty(), elem.dtype.is_int_signed()) return elem.handle assert False, f"Unsupported element type in shape/strides/offsets: {type(elem)}" def _convert_to_ir_values(builder, list_like, require_i64=True): if hasattr(list_like, "__iter__"): return [_convert_elem_to_ir_value(builder, elem, require_i64) for elem in list_like] return [_convert_elem_to_ir_value(builder, list_like, require_i64)] def make_block_ptr(base: tl.tensor, shape, strides, offsets, block_shape, order, builder: ir.builder) -> tl.tensor: # Convert dynamic arguments to IR values # NOTES(Chenggang): current `shape/strides` are `int64_t`, while `offsets/block_shape` are `int32_t` shape = _convert_to_ir_values(builder, shape) strides = _convert_to_ir_values(builder, strides) offsets = _convert_to_ir_values(builder, offsets, require_i64=False) # Check `base` type if not base.type.is_ptr() or base.type.element_ty.is_block(): raise ValueError("Expected `base` to be a pointer type (but not a block pointer type or others)") # Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>` if base.type.element_ty == tl.int1: base = cast(base, tl.pointer_type(tl.int8, base.type.address_space), builder) # Check whether `block_shape` is static if not hasattr(block_shape, "__iter__"): block_shape = [block_shape] block_shape = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in block_shape] assert all([isinstance(elem, int) and -2**31 <= elem < 2**31 for elem in block_shape]), \ "Expected a list of constant integers (`int32_t` range) in `block_shape`" # Check `order` if not hasattr(order, "__iter__"): order = [order] order = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in order] assert sorted(order) == list(range(len(order))), "Expected a permutation of (0, 1, ..., len(order)-1) in order" # Must have same length assert all([len(block_shape) == len(list_like) for list_like in [shape, strides, offsets, order]]), \ "Expected shape/strides/offsets/block_shape to have the same length" # Build value, the type is: # `pointer_type<blocked<shape, element_type>>` in Python # `tt.ptr<tensor<shape, element_type>>` in MLIR handle = builder.create_make_block_ptr(base.handle, shape, strides, offsets, block_shape, order) return tl.tensor(handle, tl.pointer_type(tl.block_type(base.type.element_ty, block_shape))) def advance(base: tl.tensor, offsets, builder: ir.builder) -> tl.tensor: # Convert dynamic offsets to IR values offsets = _convert_to_ir_values(builder, offsets, require_i64=False) # Advanced block pointer type is the same as before return tl.tensor(builder.create_advance(base.handle, offsets), base.type)
65,322
41.116699
272
py
triton
triton-main/python/triton/language/core.py
from __future__ import annotations from contextlib import contextmanager from enum import Enum from functools import wraps from typing import Callable, List, Sequence, TypeVar from .._C.libtriton.triton import ir from ..runtime.jit import jit from . import math, semantic T = TypeVar('T') TRITON_MAX_TENSOR_NUMEL = 131072 TRITON_BUILTIN = "__triton_builtin__" def builtin(fn: T) -> T: """Mark a function as a builtin.""" assert callable(fn) @wraps(fn) def wrapper(*args, **kwargs): if "_builder" not in kwargs or kwargs["_builder"] is None: raise ValueError( "Did you forget to add @triton.jit ? " "(`_builder` argument must be provided outside of JIT functions.)" ) return fn(*args, **kwargs) setattr(wrapper, TRITON_BUILTIN, True) return wrapper def is_builtin(fn) -> bool: """Is this a registered triton builtin function?""" return getattr(fn, TRITON_BUILTIN, False) def _to_tensor(x, builder): if isinstance(x, bool): return tensor(builder.get_int1(x), int1) # Note: compile-time const integers are represented by unsigned values elif isinstance(x, int): if -2**31 <= x < 2**31: return tensor(builder.get_int32(x), int32) elif 2**31 <= x < 2**32: return tensor(builder.get_int32(x), uint32) elif -2**63 <= x < 2**63: return tensor(builder.get_int64(x), int64) elif 2**63 <= x < 2**64: return tensor(builder.get_int64(x), uint64) else: raise RuntimeError(f'Nonrepresentable integer {x}.') elif isinstance(x, float): min_float32 = 2 ** -126 max_float32 = (2 - 2**-23) * 2**127 abs_x = __builtins__['abs'](x) if abs_x == float("inf") or\ abs_x == 0.0 or \ x != x or \ min_float32 <= abs_x <= max_float32: return tensor(builder.get_fp32(x), float32) else: return tensor(builder.get_fp64(x), float64) elif isinstance(x, constexpr): return _to_tensor(x.value, builder) elif isinstance(x, tensor): return x assert False, f"cannot convert {x} of type {type(x)} to tensor" class dtype: SINT_TYPES = ['int8', 'int16', 'int32', 'int64'] UINT_TYPES = ['int1', 'uint8', 'uint16', 'uint32', 'uint64'] FP_TYPES = ['fp8e4b15', 'fp8e4', 'fp8e5', 'fp16', 'bf16', 'fp32', 'fp64'] STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64'] OTHER_TYPES = ['void'] class SIGNEDNESS(Enum): SIGNED = 0 UNSIGNED = 1 def __init__(self, name): self.name = name assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name if name in dtype.SINT_TYPES: self.int_signedness = dtype.SIGNEDNESS.SIGNED self.int_bitwidth = int(name.split('int')[-1]) self.primitive_bitwidth = self.int_bitwidth elif name in dtype.UINT_TYPES: self.int_signedness = dtype.SIGNEDNESS.UNSIGNED self.int_bitwidth = int(name.split('int')[-1]) self.primitive_bitwidth = self.int_bitwidth elif name in dtype.FP_TYPES: if name == 'fp8e4b15': self.fp_mantissa_width = 3 self.primitive_bitwidth = 8 self.exponent_bias = 15 elif name == 'fp8e4': self.fp_mantissa_width = 3 self.primitive_bitwidth = 8 self.exponent_bias = 7 elif name == 'fp8e5': self.fp_mantissa_width = 2 self.primitive_bitwidth = 8 self.exponent_bias = 15 elif name == 'fp16': self.fp_mantissa_width = 10 self.primitive_bitwidth = 16 self.exponent_bias = 15 elif name == 'bf16': self.fp_mantissa_width = 7 self.primitive_bitwidth = 16 self.exponent_bias = 127 elif name == 'fp32': self.fp_mantissa_width = 23 self.primitive_bitwidth = 32 self.exponent_bias = 127 elif name == 'fp64': self.fp_mantissa_width = 53 self.primitive_bitwidth = 64 self.exponent_bias = 1023 else: raise RuntimeError(f'Unsupported floating-point type {name}') elif name == 'void': self.primitive_bitwidth = 0 def is_fp8(self): return 'fp8' in self.name def is_fp8e4(self): return self.name == 'fp8e4' def is_fp8e4b15(self): return self.name == 'fp8e4b15' def is_fp16(self): return self.name == 'fp16' def is_bf16(self): return self.name == 'bf16' def is_fp32(self): return self.name == 'fp32' def is_fp64(self): return self.name == 'fp64' def is_int1(self): return self.name == 'int1' def is_int8(self): return self.name == 'int8' def is_int16(self): return self.name == 'int16' def is_int32(self): return self.name == 'int32' def is_int64(self): return self.name == 'int64' def is_uint8(self): return self.name == 'uint8' def is_uint16(self): return self.name == 'uint16' def is_uint32(self): return self.name == 'uint32' def is_uint64(self): return self.name == 'uint64' def is_floating(self): return self.name in dtype.FP_TYPES def is_standard_floating(self): return self.name in dtype.STANDARD_FP_TYPES def is_int_signed(self): return self.name in dtype.SINT_TYPES def is_int_unsigned(self): return self.name in dtype.UINT_TYPES def is_int(self): return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES def is_bool(self): return self.is_int1() @staticmethod def is_void(): raise RuntimeError("Not implemented") @staticmethod def is_block(): return False @staticmethod def is_ptr(): return False def __eq__(self, other: dtype): if not isinstance(other, dtype): return False return self.name == other.name def __ne__(self, other: dtype): return not self.__eq__(other) def __hash__(self): return hash((self.name,)) @property def scalar(self): return self def to_ir(self, builder: ir.builder) -> ir.type: if self.name == 'void': return builder.get_void_ty() elif self.name == 'int1': return builder.get_int1_ty() elif self.name in ('int8', 'uint8'): return builder.get_int8_ty() elif self.name in ('int16', 'uint16'): return builder.get_int16_ty() elif self.name in ('int32', 'uint32'): return builder.get_int32_ty() elif self.name in ('int64', 'uint64'): return builder.get_int64_ty() elif self.name == 'fp8e5': return builder.get_fp8e5_ty() elif self.name == 'fp8e4': return builder.get_fp8e4_ty() elif self.name == 'fp8e4b15': return builder.get_fp8e4b15_ty() elif self.name == 'fp16': return builder.get_half_ty() elif self.name == 'bf16': return builder.get_bf16_ty() elif self.name == 'fp32': return builder.get_float_ty() elif self.name == 'fp64': return builder.get_double_ty() raise ValueError(f'fail to convert {self} to ir type') def __str__(self): return self.name @property def cache_key_part(self) -> str: """See cache_key_part() in triton.cc.""" return self.name def __repr__(self): return f'triton.language.{self.name}' class pointer_type(dtype): def __init__(self, element_ty: dtype, address_space: int = 1): if not isinstance(element_ty, dtype): raise TypeError('element_ty is a {type(element_ty).__name__}.') self.element_ty = element_ty self.address_space = address_space self.name = self.__str__() def to_ir(self, builder: ir.builder) -> ir.pointer_type: return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1) def __str__(self): return f'pointer<{self.element_ty}>' def __repr__(self): return self.__str__() def is_ptr(self): return True def __eq__(self, other: pointer_type) -> bool: if not isinstance(other, pointer_type): return False return self.element_ty == other.element_ty and self.address_space == other.address_space def __ne__(self, other: pointer_type) -> bool: return not self.__eq__(other) @property def scalar(self): return self class block_type(dtype): def __init__(self, element_ty: dtype, shape: List): self.element_ty = element_ty # Note that block_type's shape is a list of int # while tensor's shape is a list of constexpr. # shape can be empty ([]) when an input is a 0D tensor. if not shape: raise TypeError('0d block_type is forbidden') if isinstance(shape[0], constexpr): shape = [s.value for s in shape] self.shape = shape self.numel = 1 for s in self.shape: self.numel *= s if self.numel > TRITON_MAX_TENSOR_NUMEL: raise ValueError(f"numel ({self.numel}) exceeds triton maximum tensor numel ({TRITON_MAX_TENSOR_NUMEL})") self.name = self.__str__() def to_ir(self, builder: ir.builder) -> ir.block_type: return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape) def __str__(self): return f'<{self.shape}, {self.element_ty}>' def __repr__(self): return self.__str__() def is_block(self): return True def get_block_shapes(self) -> List[int]: return self.shape def __eq__(self, other: block_type) -> bool: if not isinstance(other, block_type): return False return self.element_ty == other.element_ty and self.shape == other.shape def __ne__(self, other: block_type) -> bool: return not self.__eq__(other) @property def scalar(self): return self.element_ty class function_type(dtype): def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None: self.ret_types = ret_types self.param_types = param_types def __str__(self): return f'fn ({self.param_types}) -> {self.ret_types}' def to_ir(self, builder: ir.builder): ir_param_types = [ty.to_ir(builder) for ty in self.param_types] ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types] return builder.get_function_ty(ir_param_types, ret_types) # scalar types void = dtype('void') int1 = dtype('int1') int8 = dtype('int8') int16 = dtype('int16') int32 = dtype('int32') int64 = dtype('int64') uint8 = dtype('uint8') uint16 = dtype('uint16') uint32 = dtype('uint32') uint64 = dtype('uint64') float8e5 = dtype('fp8e5') float8e4 = dtype('fp8e4') float8e4b15 = dtype('fp8e4b15') float16 = dtype('fp16') bfloat16 = dtype('bf16') float32 = dtype('fp32') float64 = dtype('fp64') # pointer types pi32_t = pointer_type(int32) # ----------------------- # constexpr # ----------------------- class constexpr: """ This class is used to store a value that is known at compile-time. """ def __init__(self, value): if isinstance(value, constexpr): self.value = value.value else: self.value = value def __repr__(self) -> str: return f"constexpr[{self.value}]" def __index__(self): return self.value def __add__(self, other): return constexpr(self.value + other.value) def __radd__(self, other): return constexpr(other.value + self.value) def __sub__(self, other): return constexpr(self.value - other.value) def __rsub__(self, other): return constexpr(other.value - self.value) def __mul__(self, other): return constexpr(self.value * other.value) def __mod__(self, other): return constexpr(self.value % other.value) def __rmul__(self, other): return constexpr(other.value * self.value) def __truediv__(self, other): return constexpr(self.value / other.value) def __rtruediv__(self, other): return constexpr(other.value / self.value) def __floordiv__(self, other): return constexpr(self.value // other.value) def __rfloordiv__(self, other): return constexpr(other.value // self.value) def __gt__(self, other): return constexpr(self.value > other.value) def __rgt__(self, other): return constexpr(other.value > self.value) def __ge__(self, other): return constexpr(self.value >= other.value) def __rge__(self, other): return constexpr(other.value >= self.value) def __lt__(self, other): return constexpr(self.value < other.value) def __rlt__(self, other): return constexpr(other.value < self.value) def __le__(self, other): return constexpr(self.value <= other.value) def __rle__(self, other): return constexpr(other.value <= self.value) def __eq__(self, other): return constexpr(self.value == other.value) def __ne__(self, other): return constexpr(self.value != other.value) def __bool__(self): return bool(self.value) def __neg__(self): return constexpr(-self.value) def __and__(self, other): return constexpr(self.value & other.value) def logical_and(self, other): return constexpr(self.value and other.value) def __or__(self, other): return constexpr(self.value | other.value) def __xor__(self, other): return constexpr(self.value ^ other.value) def logical_or(self, other): return constexpr(self.value or other.value) def __pos__(self): return constexpr(+self.value) def __invert__(self): return constexpr(~self.value) def __pow__(self, other): return constexpr(self.value ** other.value) def __rshift__(self, other): return constexpr(self.value >> other.value) def __lshift__(self, other): return constexpr(self.value << other.value) def __not__(self): return constexpr(not self.value) def __call__(self, *args, **kwds): return self.value(*args, **kwds) class tensor: def __init__(self, handle, type: dtype): # IR handle self.handle = handle # Block shape self.shape = (1, ) if type.is_block(): self.shape = type.shape self.numel = 1 for s in self.shape: self.numel *= s self.numel = constexpr(self.numel) self.type = type # Tensor type (can be block_type) # Following the practice in pytorch, dtype is scalar type self.dtype = type.scalar self.shape = [constexpr(s) for s in self.shape] def __str__(self) -> str: # ex. "float32[3,4]" return str(self.dtype) + '[' + ','.join(str(s) for s in self.shape) + ']' @builtin def __add__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.add(self, other, _builder) def __radd__(self, other, _builder=None): return self.__add__(other, _builder=_builder) @builtin def __sub__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.sub(self, other, _builder) def __rsub__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.sub(other, self, _builder) @builtin def __mul__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.mul(self, other, _builder) def __rmul__(self, other, _builder=None): return self.__mul__(other, _builder=_builder) @builtin def __truediv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.truediv(self, other, _builder) def __rtruediv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.truediv(other, self, _builder) @builtin def __floordiv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.floordiv(self, other, _builder) @builtin def __rfloordiv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.floordiv(other, self, _builder) @builtin def __mod__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.mod(self, other, _builder) @builtin def __rmod__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.mod(other, self, _builder) # unary operators @builtin def __neg__(self, _builder=None): return semantic.minus(self, _builder) @builtin def __invert__(self, _builder=None): return semantic.invert(self, _builder) # bitwise operators @builtin def __and__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.and_(self, other, _builder) @builtin def __rand__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.and_(other, self, _builder) @builtin def __or__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.or_(self, other, _builder) @builtin def __ror__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.or_(other, self, _builder) @builtin def __xor__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.xor_(self, other, _builder) @builtin def __rxor__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.xor_(other, self, _builder) @builtin def __lshift__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.shl(self, other, _builder) @builtin def __rlshift__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.shl(other, self, _builder) @builtin def __rshift__(self, other, _builder=None): other = _to_tensor(other, _builder) if self.dtype.is_int_signed(): return semantic.ashr(self, other, _builder) else: return semantic.lshr(self, other, _builder) @builtin def __rrshift__(self, other, _builder=None): other = _to_tensor(other, _builder) if self.dtype.is_int_signed(): return semantic.ashr(other, self, _builder) else: return semantic.lshr(other, self, _builder) # comparison operators # > @builtin def __gt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_than(self, other, _builder) @builtin def __rgt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_than(other, self, _builder) # >= @builtin def __ge__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_equal(self, other, _builder) @builtin def __rge__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_equal(other, self, _builder) # < @builtin def __lt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_than(self, other, _builder) @builtin def __rlt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_than(other, self, _builder) # <= @builtin def __le__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_equal(self, other, _builder) @builtin def __rle__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_equal(other, self, _builder) # == @builtin def __eq__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.equal(self, other, _builder) @builtin def __ne__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.not_equal(self, other, _builder) @builtin def logical_and(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.logical_and(self, other, _builder) @builtin def logical_or(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.logical_or(self, other, _builder) # note: __not__ isn't actually a magic method in python # but it's ok because our ASTVisitor handles it @builtin def __not__(self, _builder=None): return semantic.not_(self, _builder) @builtin def __getitem__(self, slices, _builder=None): if isinstance(slices, slice): slices = [slices] ret = self for dim, sl in enumerate(slices): if isinstance(sl, constexpr) and sl.value is None: ret = semantic.expand_dims(ret, dim, _builder) elif isinstance(sl, slice) and sl.start is None and sl.stop is None and sl.step is None: pass else: assert False, f"unsupported tensor index: {sl}" return ret @property def T(self): assert False, "Transposition must be created by the AST Visitor" @builtin def to(self, dtype, bitcast=False, _builder=None): if isinstance(bitcast, constexpr): bitcast = bitcast.value if bitcast: return semantic.bitcast(self, dtype, _builder) return semantic.cast(self, dtype, _builder) # ----------------------- # SPMD Programming Model # ----------------------- def _constexpr_to_value(v): if isinstance(v, constexpr): return v.value return v @builtin def program_id(axis, _builder=None): """ Returns the id of the current program instance along the given :code:`axis`. :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. :type axis: int """ # if axis == -1: # pid0 = program_id(0, _builder) # pid1 = program_id(1, _builder) # pid2 = program_id(2, _builder) # npg0 = num_programs(0, _builder) # npg1 = num_programs(0, _builder) # return pid0 + pid1*npg0 + pid2*npg0*npg1 axis = _constexpr_to_value(axis) return semantic.program_id(axis, _builder) @builtin def num_programs(axis, _builder=None): """ Returns the number of program instances launched along the given :code:`axis`. :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. :type axis: int """ axis = _constexpr_to_value(axis) return semantic.num_programs(axis, _builder) # ----------------------- # Block Initialization # ----------------------- @builtin def arange(start, end, _builder=None): """ Returns contiguous values within the left-closed and right-open interval [:code:`start`, :code:`end`). \ End - Start must be less than or equal to TRITON_MAX_TENSOR_NUMEL = 131072 :param start: Start of the interval. Must be a power of two. :type start: int32 :param end: End of the interval. Must be a power of two > start. :type end: int32 """ start = _constexpr_to_value(start) end = _constexpr_to_value(end) return semantic.arange(start, end, _builder) def _shape_check_impl(shape): shape = _constexpr_to_value(shape) for i, d in enumerate(shape): if not isinstance(d, constexpr): raise TypeError(f"Shape element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") return [_constexpr_to_value(x) for x in shape] @builtin def full(shape, value, dtype, _builder=None): """ Returns a tensor filled with the scalar value for the given :code:`shape` and :code:`dtype`. :param shape: Shape of the new array, e.g., (8, 16) or (8, ) :value value: A scalar value to fill the array with :type shape: tuple of ints :param dtype: Data-type of the new array, e.g., :code:`tl.float16` :type dtype: DType """ shape = _shape_check_impl(shape) value = _constexpr_to_value(value) dtype = _constexpr_to_value(dtype) return semantic.full(shape, value, dtype, _builder) # ----------------------- # Shape Manipulation # ----------------------- @builtin def broadcast(input, other, _builder=None): """ Tries to broadcast the two given blocks to a common compatible shape. :param input: The first input tensor. :type input: Block :param other: The second input tensor. :type other: Block """ return semantic.broadcast_impl_value(input, other, _builder) @builtin def broadcast_to(input, shape, _builder=None): """ Tries to broadcast the given tensor to a new :code:`shape`. :param input: The input tensor. :type input: Block :param shape: The desired shape. :type shape: Tuple[int] """ shape = _shape_check_impl(shape) return semantic.broadcast_impl_shape(input, shape, _builder) @builtin def trans(input, _builder=None): return semantic.trans(input, _builder) @builtin def cat(input, other, can_reorder=False, _builder=None): """ Concatenate the given blocks :param input: The first input tensor. :type input: :param other: The second input tensor. :type other: :param reorder: Compiler hint. If true, the compiler is allowed to reorder elements while concatenating inputs. Only use if the order does not matter (e.g., result is only used in reduction ops) """ return semantic.cat(input, other, can_reorder, _builder) @builtin def view(input, shape, _builder=None): """ Returns a tensor with the same elements as `input` but a different shape. The order of the elements may not be preserved. :param input: The input tensor. :type input: :param shape: The desired shape. :type shape: Tuple[int] """ shape = _shape_check_impl(shape) return semantic.view(input, shape, _builder) @builtin def reshape(input, shape, _builder=None): shape = _shape_check_impl(shape) return semantic.reshape(input, shape, _builder) def _wrap_axis(axis, ndim): if not (-ndim <= axis < ndim): raise ValueError(f"invalid axis {axis}. Expected {-ndim} <= axis < {ndim}") return axis if axis >= 0 else axis + ndim @builtin def expand_dims(input, axis, _builder=None): """ Expand the shape of a tensor, by inserting new length-1 dimensions. Axis indices are with respect to the resulting tensor, so ``result.shape[axis]`` will be 1 for each axis. :param input: The input tensor. :type input: tl.tensor :param axis: The indices to add new axes :type axis: int | Sequence[int] """ axis = _constexpr_to_value(axis) axes = list(axis) if isinstance(axis, Sequence) else [axis] new_ndim = len(input.shape) + len(axes) axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes] if len(set(axes)) != len(axes): raise ValueError(f"expand_dims recieved duplicate axes, normalized axes = {axes}") ret = input for a in sorted(axes): ret = semantic.expand_dims(ret, a, _builder) return ret # ----------------------- # Linear Algebra # ----------------------- @builtin def dot(input, other, allow_tf32=True, out_dtype=float32, _builder=None): """ Returns the matrix product of two blocks. The two blocks must be two-dimensional and have compatible inner dimensions. :param input: The first tensor to be multiplied. :type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} :param other: The second tensor to be multiplied. :type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} """ allow_tf32 = _constexpr_to_value(allow_tf32) out_dtype = _constexpr_to_value(out_dtype) return semantic.dot(input, other, allow_tf32, out_dtype, _builder) # ----------------------- # Non-Atomic Memory Operations # ----------------------- @builtin def load(pointer, mask=None, other=None, boundary_check=tuple(), padding_option="", cache_modifier="", eviction_policy="", volatile=False, _builder=None): """ Return a tensor of data whose values are loaded from memory at location defined by `pointer`: (1) `pointer` could be a single element pointer, then a scalar will be loaded - `mask` and `other` must be scalar too - `other` is implicitly typecast to `pointer.dtype.element_ty` - `boundary_check` and `padding_option` must be empty (2) `pointer` could be element-wise tensor of pointers, in which case: - `mask` and `other` are implicitly broadcast to `pointer.shape` - `other` is implicitly typecast to `pointer.dtype.element_ty` - `boundary_check` and `padding_option` must be empty (3) `pointer` could be a block pointer defined by `make_block_ptr`, in which case: - `mask` and `other` must be None - `boundary_check` and `padding_option` can be specified to control the behavior of out-of-bound access :param pointer: Pointer to the data to be loaded :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` :param mask: if `mask[idx]` is false, do not load the data at address `pointer[idx]` (must be `None` with block pointers) :type mask: Block of `triton.int1`, optional :param other: if `mask[idx]` is false, return `other[idx]` :type other: Block, optional :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check :type boundary_check: tuple of ints, optional :param padding_option: should be one of {"", "zero", "nan"}, do padding while out of bound :param cache_modifier: changes cache option in NVIDIA PTX :type cache_modifier: str, optional :param eviction_policy: changes eviction policy in NVIDIA PTX :type eviction_policy: str, optional :param volatile: changes volatile option in NVIDIA PTX :type volatile: bool, optional """ # `mask` and `other` can be constexpr if _constexpr_to_value(mask) is not None: mask = _to_tensor(mask, _builder) if _constexpr_to_value(other) is not None: other = _to_tensor(other, _builder) padding_option = _constexpr_to_value(padding_option) cache_modifier = _constexpr_to_value(cache_modifier) eviction_policy = _constexpr_to_value(eviction_policy) volatile = _constexpr_to_value(volatile) return semantic.load(pointer, mask, other, boundary_check, padding_option, cache_modifier, eviction_policy, volatile, _builder) @builtin def store(pointer, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="", _builder=None): """ Store a tensor of data into memory locations defined by `pointer`: (1) `pointer` could be a single element pointer, then a scalar will be stored - `mask` must be scalar too - `boundary_check` and `padding_option` must be empty (2) `pointer` could be element-wise tensor of pointers, in which case: - `mask` is implicitly broadcast to `pointer.shape` - `boundary_check` must be empty (3) or `pointer` could be a block pointer defined by `make_block_ptr`, in which case: - `mask` must be None - `boundary_check` can be specified to control the behavior of out-of-bound access `value` is implicitly broadcast to `pointer.shape` and typecast to `pointer.dtype.element_ty`. :param pointer: The memory location where the elements of `value` are stored :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` :param value: The tensor of elements to be stored :type value: Block :param mask: If `mask[idx]` is false, do not store `value[idx]` at `pointer[idx]` :type mask: Block of triton.int1, optional :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check :type boundary_check: tuple of ints, optional :param cache_modifier: changes cache option in NVIDIA PTX :type cache_modifier: str, optional :param eviction_policy: changes eviction policy in NVIDIA PTX :type eviction_policy: str, optional """ # `value` can be constexpr value = _to_tensor(value, _builder) if _constexpr_to_value(mask) is not None: mask = _to_tensor(mask, _builder) cache_modifier = _constexpr_to_value(cache_modifier) eviction_policy = _constexpr_to_value(eviction_policy) return semantic.store(pointer, value, mask, boundary_check, cache_modifier, eviction_policy, _builder) @builtin def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None): """ Returns a pointer to a block in a parent tensor :param base: The base pointer to the parent tensor :param shape: The shape of the parent tensor :param strides: The strides of the parent tensor :param offsets: The offsets to the block :param block_shape: The shape of the block :param order: The order of the original data format """ return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder) @builtin def advance(base: tensor, offsets, _builder=None): """ Advance a block pointer :param base: the block pointer to advance :param offsets: the offsets to advance, a tuple by dimension """ return semantic.advance(base, offsets, _builder) # ----------------------- # Atomic Memory Operations # ----------------------- def _add_atomic_docstr(name: str) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Performs an atomic {name} at the memory location specified by :code:`pointer`. Return the data stored at :code:`pointer` before the atomic operation. :param pointer: The memory locations to compare-and-swap. :type pointer: Block of dtype=triton.PointerDType :param cmp: The values expected to be found in the atomic object :type cmp: Block of dtype=`pointer.dtype.element_ty` :param val: The values to copy in case the expected value matches the contained value. :type val: Block of dtype=`pointer.dtype.element_ty` """ func.__doc__ = docstr.format(name=name) return func return _decorator @builtin @_add_atomic_docstr("compare-and-swap") def atomic_cas(pointer, cmp, val, sem=None, _builder=None): cmp = _to_tensor(cmp, _builder) val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_cas(pointer, cmp, val, sem, _builder) @builtin @_add_atomic_docstr("exchange") def atomic_xchg(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_xchg(pointer, val, mask, sem, _builder) @builtin @_add_atomic_docstr("add") def atomic_add(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_add(pointer, val, mask, sem, _builder) @builtin @_add_atomic_docstr("max") def atomic_max(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_max(pointer, val, mask, sem, _builder) @builtin @_add_atomic_docstr("min") def atomic_min(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_min(pointer, val, mask, sem, _builder) @builtin @_add_atomic_docstr("logical and") def atomic_and(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_and(pointer, val, mask, sem, _builder) @builtin @_add_atomic_docstr("logical or") def atomic_or(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_or(pointer, val, mask, sem, _builder) @builtin @_add_atomic_docstr("logical xor") def atomic_xor(pointer, val, mask=None, sem=None, _builder=None): val = _to_tensor(val, _builder) sem = _constexpr_to_value(sem) return semantic.atomic_xor(pointer, val, mask, sem, _builder) # ----------------------- # Conditioning # ----------------------- @builtin def where(condition, x, y, _builder=None): """ Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`. Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`. If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead. The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`. :code:`x` and :code:`y` must have the same data type. :param condition: When True (nonzero), yield x, otherwise yield y. :type condition: Block of triton.bool :param x: values selected at indices where condition is True. :param y: values selected at indices where condition is False. """ condition = _to_tensor(condition, _builder) x = _to_tensor(x, _builder) y = _to_tensor(y, _builder) return semantic.where(condition, x, y, _builder) # ----------------------- # Math # ----------------------- @builtin def umulhi(x, y, _builder=None): x = _to_tensor(x, _builder) y = _to_tensor(y, _builder) return semantic.umulhi(x, y, _builder) @builtin def fdiv(x, y, ieee_rounding=False, _builder=None): ieee_rounding = _constexpr_to_value(ieee_rounding) return semantic.fdiv(x, y, ieee_rounding, _builder) def _add_math_1arg_docstr(name: str) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Computes the element-wise {name} of :code:`x`. :param x: the input values :type x: Block """ func.__doc__ = docstr.format(name=name) return func return _decorator @builtin @_add_math_1arg_docstr("exponential") def exp(x, _builder=None): return semantic.exp(x, _builder) @builtin @_add_math_1arg_docstr("natural logarithm") def log(x, _builder=None): return semantic.log(x, _builder) @builtin @_add_math_1arg_docstr("cosine") def cos(x, _builder=None): return semantic.cos(x, _builder) @builtin @_add_math_1arg_docstr("sine") def sin(x, _builder=None): return semantic.sin(x, _builder) @builtin @_add_math_1arg_docstr("square root") def sqrt(x, _builder=None): return semantic.sqrt(x, _builder) @builtin @_add_math_1arg_docstr("absolute value") def abs(x, _builder=None): return semantic.abs(x, _builder) # ----------------------- # Reductions # ----------------------- def _add_reduction_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` :param input: the input values :param axis: the dimension along which the reduction should be done""" if return_indices_arg is not None: docstr += f""" :param {return_indices_arg}: if true, return index corresponding to the {name} value""" if tie_break_arg is not None: docstr += f""" :param {tie_break_arg}: if true, return the left-most indices in case of ties for values that aren't NaN""" func.__doc__ = docstr.format(name=name) return func return _decorator @contextmanager def _insertion_guard(builder): ip = builder.get_insertion_point() yield builder.restore_insertion_point(ip) @builtin def reduce(input, axis, combine_fn, _builder=None, _generator=None): """Applies the combine_fn to all elements in :code:`input` tensors along the provided :code:`axis` :param input: the input tensor, or tuple of tensors :param axis: the dimension along which the reduction should be done :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) """ if isinstance(input, tensor): return reduce((input,), axis, combine_fn, _builder=_builder, _generator=_generator)[0] def make_combine_region(reduce_op): in_scalar_tys = [t.type.scalar for t in input] prototype = function_type(in_scalar_tys, in_scalar_tys * 2) region = reduce_op.get_region(0) with _insertion_guard(_builder): param_types = [ty.to_ir(_builder) for ty in prototype.param_types] block = _builder.create_block_with_parent(region, param_types) args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] results = _generator.call_JitFunction(combine_fn, args, kwargs={}) if isinstance(results, tensor): handles = [results.handle] else: handles = [r.handle for r in results] _builder.create_reduce_ret(*handles) if axis is not None: axis = _constexpr_to_value(axis) return semantic.reduction(input, axis, make_combine_region, _builder) @builtin def _promote_reduction_input(t, _builder=None): scalar_ty = t.type.scalar # input is extended to 32-bits if necessary # this increases numerical accuracy and can be done pretty much for free # on GPUs if scalar_ty.is_int() and scalar_ty.int_bitwidth < 32: return t.to(int32, _builder=_builder) # hardware doesn't support FMAX, FMIN, CMP for bfloat16 if scalar_ty is bfloat16: return t.to(float32, _builder=_builder) return t @builtin def _reduce_with_indices(input, axis, combine_fn, _builder=None, _generator=None): axis = _constexpr_to_value(axis) n = input.shape[axis] index = arange(0, n, _builder=_builder) if len(input.shape) > 1: # Broadcast index across the non-reduced axes axes_to_expand = [constexpr(d) for d in range(len(input.shape))] del axes_to_expand[axis] index = expand_dims(index, axes_to_expand, _builder=_builder) index = broadcast_to(index, input.shape, _builder=_builder) rvalue, rindices = reduce((input, index), axis, combine_fn, _builder=_builder, _generator=_generator) return rvalue, rindices @jit def minimum(x, y): """ Computes the element-wise minimum of :code:`x` and :code:`y`. :param input: the first input tensor :type input: Block :param other: the second input tensor :type other: Block """ return where(x < y, x, y) @jit def maximum(x, y): """ Computes the element-wise maximum of :code:`x` and :code:`y`. :param input: the first input tensor :type input: Block :param other: the second input tensor :type other: Block """ return where(x > y, x, y) # max and argmax @jit def _argmax_combine(value1, index1, value2, index2, tie_break_left): if tie_break_left: tie = value1 == value2 and index1 < index2 else: tie = False gt = value1 > value2 or tie v_ret = where(gt, value1, value2) i_ret = where(gt, index1, index2) return v_ret, i_ret @jit def _argmax_combine_tie_break_left(value1, index1, value2, index2): return _argmax_combine(value1, index1, value2, index2, True) @jit def _argmax_combine_tie_break_fast(value1, index1, value2, index2): return _argmax_combine(value1, index1, value2, index2, False) @jit def _fast_max(x, y): return math.max(x, y) @jit @_add_reduction_docstr("maximum", return_indices_arg="return_indices", tie_break_arg="return_indices_tie_break_left") def max(input, axis=None, return_indices=False, return_indices_tie_break_left=True): input = _promote_reduction_input(input) if return_indices: if return_indices_tie_break_left: return _reduce_with_indices(input, axis, _argmax_combine_tie_break_left) else: return _reduce_with_indices(input, axis, _argmax_combine_tie_break_fast) else: if constexpr(input.dtype.primitive_bitwidth) < 32: if constexpr(input.dtype.is_floating()): input = input.to(float32) else: assert input.dtype.is_integer_type() input = input.to(int32) return reduce(input, axis, _fast_max) @jit @_add_reduction_docstr("maximum index", tie_break_arg="tie_break_left") def argmax(input, axis, tie_break_left=True): (_, ret) = max(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left) return ret # min and argmin @jit def _argmin_combine(value1, index1, value2, index2, tie_break_left): if tie_break_left: tie = value1 == value2 and index1 < index2 else: tie = False lt = value1 < value2 or tie value_ret = where(lt, value1, value2) index_ret = where(lt, index1, index2) return value_ret, index_ret @jit def _argmin_combine_tie_break_left(value1, index1, value2, index2): return _argmin_combine(value1, index1, value2, index2, True) @jit def _argmin_combine_tie_break_fast(value1, index1, value2, index2): return _argmin_combine(value1, index1, value2, index2, False) @jit def _fast_min(x, y): return math.min(x, y) @jit @_add_reduction_docstr("minimum", return_indices_arg="return_indices", tie_break_arg="return_indices_tie_break_left") def min(input, axis=None, return_indices=False, return_indices_tie_break_left=True): input = _promote_reduction_input(input) if return_indices: if return_indices_tie_break_left: return _reduce_with_indices(input, axis, _argmin_combine_tie_break_left) else: return _reduce_with_indices(input, axis, _argmin_combine_tie_break_fast) else: if constexpr(input.dtype.primitive_bitwidth) < 32: if constexpr(input.dtype.is_floating()): input = input.to(float32) else: assert input.dtype.is_integer_type() input = input.to(int32) return reduce(input, axis, _fast_min) @jit @_add_reduction_docstr("minimum index", tie_break_arg="tie_break_left") def argmin(input, axis, tie_break_left=True): _, ret = min(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left) return ret @jit def _sum_combine(a, b): return a + b # sum @jit @_add_reduction_docstr("sum") def sum(input, axis=None): input = _promote_reduction_input(input) return reduce(input, axis, _sum_combine) @jit def _xor_combine(a, b): return a ^ b # xor sum @builtin @_add_reduction_docstr("xor sum") def xor_sum(input, axis=None, _builder=None, _generator=None): scalar_ty = input.type.scalar if not scalar_ty.is_int(): raise ValueError("xor_sum only supported for integers") input = _promote_reduction_input(input, _builder=_builder) return reduce(input, axis, _xor_combine, _builder=_builder, _generator=_generator) # ----------------------- # Scans # ----------------------- def _add_scan_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` :param input: the input values :param axis: the dimension along which the scan should be done""" func.__doc__ = docstr.format(name=name) return func return _decorator @builtin def associative_scan(input, axis, combine_fn, _builder=None, _generator=None): """Applies the combine_fn to each elements with a carry in :code:`input` tensors along the provided :code:`axis` and update the carry :param input: the input tensor, or tuple of tensors :param axis: the dimension along which the reduction should be done :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) """ if isinstance(input, tensor): return associative_scan((input,), axis, combine_fn, _builder=_builder, _generator=_generator)[0] def make_combine_region(scan_op): in_scalar_tys = [t.type.scalar for t in input] prototype = function_type(in_scalar_tys, in_scalar_tys * 2) region = scan_op.get_region(0) with _insertion_guard(_builder): param_types = [ty.to_ir(_builder) for ty in prototype.param_types] block = _builder.create_block_with_parent(region, param_types) args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] results = _generator.call_JitFunction(combine_fn, args, kwargs={}) if isinstance(results, tensor): handles = [results.handle] else: handles = [r.handle for r in results] _builder.create_scan_ret(*handles) axis = _constexpr_to_value(axis) return semantic.associative_scan(input, axis, make_combine_region, _builder) # cumsum @jit @_add_scan_docstr("cumsum") def cumsum(input, axis=0): # todo rename this to a generic function name input = _promote_reduction_input(input) return associative_scan(input, axis, _sum_combine) # cumprod @jit def _prod_combine(a, b): return a * b @jit @_add_scan_docstr("cumprod") def cumprod(input, axis=0): # todo rename this to a generic function name input = _promote_reduction_input(input) return associative_scan(input, axis, _prod_combine) # ----------------------- # Compiler Hint Ops # ----------------------- @builtin def debug_barrier(_builder=None): ''' Insert a barrier to synchronize all threads in a block. ''' return semantic.debug_barrier(_builder) @builtin def multiple_of(input, values, _builder=None): """ Let the compiler knows that the values in :code:`input` are all multiples of :code:`value`. """ if isinstance(values, constexpr): values = [values] for i, d in enumerate(values): if not isinstance(d, constexpr): raise TypeError(f"values element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") values = [x.value for x in values] return semantic.multiple_of(input, values) @builtin def max_contiguous(input, values, _builder=None): """ Let the compiler knows that the `value` first values in :code:`input` are contiguous. """ if isinstance(values, constexpr): values = [values] for i, d in enumerate(values): if not isinstance(d, constexpr): raise TypeError(f"values element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") values = [x.value for x in values] return semantic.max_contiguous(input, values) @builtin def max_constancy(input, values, _builder=None): """ Let the compiler knows that the `value` first values in :code:`input` are constant. e.g. if :code:`values` is [4], then each group of 4 values in :code:`input` should all be equal, for example [0, 0, 0, 0, 1, 1, 1, 1]. """ if isinstance(values, constexpr): values = [values] for i, d in enumerate(values): if not isinstance(d, constexpr): raise TypeError(f"values element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") values = [x.value for x in values] return semantic.max_constancy(input, values) # ----------------------- # Debugging functions # ----------------------- @builtin def static_print(*values, sep: str = " ", end: str = "\n", file=None, flush=False, _builder=None): ''' Print the values at compile time. The parameters are the same as the builtin :code:`print`. NOTE: Calling the Python builtin :code:`print` is not the same as calling this, it instead maps to :code:`device_print`, which has special requirements for the arguments. .. highlight:: python .. code-block:: python tl.static_print(f"{BLOCK_SIZE=}") ''' pass @builtin def static_assert(cond, msg="", _builder=None): ''' Assert the condition at compile time. Does not require that the :code:`TRITON_DEBUG` environment variable is set. .. highlight:: python .. code-block:: python tl.static_assert(BLOCK_SIZE == 1024) ''' pass @builtin def device_print(prefix, *args, _builder=None): ''' Print the values at runtime from the device. String formatting does not work for runtime values, so you should provide the values you want to print as arguments. The first value must be a string, all following values must be scalars or tensors. Calling the Python builtin :code:`print` is the same as calling this function, and the requirements for the arguments will match this function (not the normal requirements for :code:`print`). .. highlight:: python .. code-block:: python tl.device_print("pid", pid) print("pid", pid) :param prefix: a prefix to print before the values. This is required to be a string literal. :param args: the values to print. They can be any tensor or scalar. ''' import string prefix = _constexpr_to_value(prefix) assert isinstance(prefix, str), f"{prefix} is not string" b_ascii = True for ch in prefix: if ch not in string.printable: b_ascii = False break assert b_ascii, f"{prefix} is not an ascii string" new_args = [] for arg in args: new_args.append(_to_tensor(arg, _builder)) return semantic.device_print(prefix, new_args, _builder) @builtin def device_assert(cond, msg="", _builder=None): ''' Assert the condition at runtime from the device. Requires that the environment variable :code:`TRITON_DEBUG` is set to a value besides :code:`0` in order for this to have any effect. Using the Python :code:`assert` statement is the same as calling this function, except that the second argument must be provided and must be a string, e.g. :code:`assert pid == 0, "pid != 0"`. The environment variable must be set for this :code:`assert` statement to have any effect. .. highlight:: python .. code-block:: python tl.device_assert(pid == 0) assert pid == 0, f"pid != 0" :param cond: the condition to assert. This is required to be a boolean tensor. :param msg: the message to print if the assertion fails. This is required to be a string literal. ''' msg = _constexpr_to_value(msg) import inspect frame = inspect.currentframe() module = inspect.getmodule(frame) # The triton function module doesn't have the name attribute. # We use this trick to find the caller. while hasattr(module, "__name__"): frame = frame.f_back module = inspect.getmodule(frame) lineno = 0 func_name = 'unknown' file_name = 'unknown' if frame is not None: func_name = frame.f_code.co_name file_name = frame.f_back.f_code.co_filename # TODO: The line number currently indicates the line # where the triton function is called but not where the # device_assert is called. Need to enhance this. lineno = frame.f_back.f_lineno return semantic.device_assert(_to_tensor(cond, _builder), msg, file_name, func_name, lineno, _builder) # ----------------------- # Iterators # ----------------------- class static_range: """ Iterator that counts upward forever. .. highlight:: python .. code-block:: python @triton.jit def kernel(...): for i in tl.static_range(10): ... :note: This is a special iterator used to implement similar semantics to Python's :code:`range` in the context of :code:`triton.jit` functions. In addition, it also guides the compiler to unroll the loop aggressively. :param arg1: the start value. :param arg2: the end value. :param step: the step value. """ def __init__(self, arg1, arg2=None, step=None): assert isinstance(arg1, constexpr) if step is None: self.step = constexpr(1) else: assert isinstance(step, constexpr) self.step = step if arg2 is None: self.start = constexpr(0) self.end = arg1 else: assert isinstance(arg2, constexpr) self.start = arg1 self.end = arg2 def __iter__(self): raise RuntimeError("static_range can only be used in @triton.jit'd functions") def __next__(self): raise RuntimeError("static_range can only be used in @triton.jit'd functions") # ----------------------- # Extern functions # ----------------------- def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, is_pure: bool, _builder=None): ''' Dispatch a function to a library :param func: the function to dispatch :param lib_name: the name of the library :param lib_path: the path of the library :param args: the arguments of the function :param arg_type_symbol_dict: the type of the arguments :param ret_shape: the shape of the return value :param _builder: the builder :return: the return value of the function ''' if len(arg_type_symbol_dict) == 0: raise ValueError("arg_type_symbol_dict is empty") num_args = len(list(arg_type_symbol_dict.keys())[0]) if len(args) != num_args: raise ValueError(f"length of input args does not match." f"Expect {len(args)}, got {num_args}") arg_types = [] arg_list = [] for arg in args: if isinstance(arg, tensor): arg_types.append(arg.dtype) arg_list.append(arg.handle) else: arg_types.append(type(arg)) arg_list.append(arg) arg_types = tuple(arg_types) if arg_types not in arg_type_symbol_dict: raise ValueError(f"input arg type does not match." f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}") else: symbol = arg_type_symbol_dict[arg_types][0] ret_type = arg_type_symbol_dict[arg_types][1] if ret_shape: ret_type = block_type(ret_type, ret_shape) return tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder), is_pure), ret_type) def extern_elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, is_pure: bool, _builder=None): ''' Dispatch an elementwise function to a library :param lib_name: the name of the library :param lib_path: the path of the library :param args: the arguments of the function :param arg_type_symbol_dict: the type of the arguments :param is_pure: whether the function is pure :param _builder: the builder :return: the return value of the function ''' dispatch_args = args.copy() all_scalar = True ret_shape = None arg_types = [] for i in range(len(dispatch_args)): dispatch_args[i] = _to_tensor(dispatch_args[i], _builder) arg_types.append(dispatch_args[i].dtype) if dispatch_args[i].type.is_block(): all_scalar = False if len(arg_types) > 0: arg_types = tuple(arg_types) arithmetic_check = True # If there's a type tuple that is not supported by the library, we will do arithmetic check if arg_types in arg_type_symbol_dict: arithmetic_check = False broadcast_arg = dispatch_args[0] # Get the broadcast shape over all the arguments for i, item in enumerate(dispatch_args): _, broadcast_arg = semantic.binary_op_type_checking_impl( item, broadcast_arg, _builder, arithmetic_check=arithmetic_check) # Change the shape of each argument based on the broadcast shape for i in range(len(dispatch_args)): dispatch_args[i], _ = semantic.binary_op_type_checking_impl( dispatch_args[i], broadcast_arg, _builder, arithmetic_check=arithmetic_check) if not all_scalar: ret_shape = broadcast_arg.shape func = getattr(_builder, "create_extern_elementwise") return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, is_pure, _builder) def extern(fn): """A decorator for external functions.""" return builtin(fn)
62,163
31.175983
137
py
triton
triton-main/python/triton/language/math.py
import functools import os from . import core @functools.lru_cache() def libdevice_path(): import torch third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "third_party") if torch.version.hip is None: default = os.path.join(third_party_dir, "cuda", "lib", "libdevice.10.bc") else: default = '' return os.getenv("TRITON_LIBDEVICE_PATH", default) @core.extern def clz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_clz", core.dtype("int32")), (core.dtype("int64"),): ("__nv_clzll", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def popc(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_popc", core.dtype("int32")), (core.dtype("int64"),): ("__nv_popcll", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def byte_perm(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("int32"), core.dtype("int32"), core.dtype("int32"),): ("__nv_byte_perm", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def min(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_min", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umin", core.dtype("uint32")), (core.dtype("int64"), core.dtype("int64"),): ("__nv_llmin", core.dtype("int64")), (core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmin", core.dtype("uint64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fminf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmin", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def max(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_max", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umax", core.dtype("uint32")), (core.dtype("int64"), core.dtype("int64"),): ("__nv_llmax", core.dtype("int64")), (core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmax", core.dtype("uint64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaxf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmax", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def mulhi(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_mulhi", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umulhi", core.dtype("uint32")), (core.dtype("int64"), core.dtype("int64"),): ("__nv_mul64hi", core.dtype("int64")), (core.dtype("uint64"), core.dtype("uint64"),): ("__nv_umul64hi", core.dtype("uint64")), }, is_pure=True, _builder=_builder) @core.extern def mul24(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_mul24", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umul24", core.dtype("uint32")), }, is_pure=True, _builder=_builder) @core.extern def brev(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_brev", core.dtype("int32")), (core.dtype("int64"),): ("__nv_brevll", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def sad(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("int32"), core.dtype("int32"), core.dtype("uint32"),): ("__nv_sad", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32"),): ("__nv_usad", core.dtype("uint32")), }, is_pure=True, _builder=_builder) @core.extern def abs(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_abs", core.dtype("int32")), (core.dtype("int64"),): ("__nv_llabs", core.dtype("int64")), (core.dtype("fp32"),): ("__nv_fabsf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_fabs", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def floor(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_floorf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_floor", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rcp64h(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_rcp64h", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rsqrt(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_rsqrtf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_rsqrt", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ceil(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_ceil", core.dtype("fp64")), (core.dtype("fp32"),): ("__nv_ceilf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def trunc(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_trunc", core.dtype("fp64")), (core.dtype("fp32"),): ("__nv_truncf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def exp2(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_exp2f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_exp2", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def saturatef(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_saturatef", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fma_rn(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rn", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def fma_rz(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rz", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def fma_rd(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rd", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def fma_ru(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ru", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def fast_dividef(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_fdividef", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def div_rn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rn", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def div_rz(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rz", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def div_rd(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rd", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def div_ru(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_ru", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rcp_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_frcp_rn", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_drcp_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rcp_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_frcp_rz", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_drcp_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rcp_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_frcp_rd", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_drcp_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rcp_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_frcp_ru", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_drcp_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sqrt_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fsqrt_rn", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_dsqrt_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sqrt_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fsqrt_rz", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_dsqrt_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sqrt_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fsqrt_rd", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_dsqrt_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sqrt_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fsqrt_ru", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_dsqrt_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sqrt(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_sqrtf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_sqrt", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def add_rn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rn", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def add_rz(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rz", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def add_rd(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rd", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def add_ru(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_ru", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def mul_rn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rn", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def mul_rz(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rz", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def mul_rd(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rd", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def mul_ru(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_ru", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def double2float_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2float_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def double2float_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2float_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def double2float_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2float_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def double2float_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2float_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def double2int_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2int_rn", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2int_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2int_rz", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2int_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2int_rd", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2int_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2int_ru", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2uint_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2uint_rn", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2uint_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2uint_rz", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2uint_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2uint_rd", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2uint_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2uint_ru", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def int2double_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_int2double_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def uint2double_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint32"),): ("__nv_uint2double_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def float2int_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2int_rn", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2int_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2int_rz", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2int_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2int_rd", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2int_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2int_ru", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2uint_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2uint_rn", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2uint_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2uint_rz", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2uint_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2uint_rd", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2uint_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2uint_ru", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def int2float_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_int2float_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def int2float_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_int2float_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def int2float_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_int2float_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def int2float_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_int2float_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def uint2float_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint32"),): ("__nv_uint2float_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def uint2float_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint32"),): ("__nv_uint2float_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def uint2float_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint32"),): ("__nv_uint2float_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def uint2float_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint32"),): ("__nv_uint2float_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def hiloint2double(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_hiloint2double", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def double2loint(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2loint", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def double2hiint(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2hiint", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def float2ll_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ll_rn", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ll_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ll_rz", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ll_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ll_rd", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ll_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ll_ru", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ull_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ull_rn", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ull_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ull_rz", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ull_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ull_rd", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def float2ull_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float2ull_ru", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ll_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ll_rn", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ll_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ll_rz", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ll_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ll_rd", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ll_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ll_ru", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ull_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ull_rn", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ull_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ull_rz", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ull_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ull_rd", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def double2ull_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double2ull_ru", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def ll2float_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2float_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ll2float_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2float_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ll2float_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2float_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ll2float_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2float_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ull2float_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2float_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ull2float_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2float_rz", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ull2float_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2float_rd", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ull2float_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2float_ru", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ll2double_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2double_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ll2double_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2double_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ll2double_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2double_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ll2double_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_ll2double_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ull2double_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2double_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ull2double_rz(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2double_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ull2double_rd(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2double_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ull2double_ru(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint64"),): ("__nv_ull2double_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def int_as_float(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_int_as_float", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def float_as_int(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float_as_int", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def uint_as_float(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("uint32"),): ("__nv_uint_as_float", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def float_as_uint(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_float_as_uint", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def longlong_as_double(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int64"),): ("__nv_longlong_as_double", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def double_as_longlong(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_double_as_longlong", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def fast_sinf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_sinf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_cosf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_cosf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_log2f(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_log2f", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_logf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_logf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_expf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_expf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_tanf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_tanf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_exp10f(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_exp10f", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_log10f(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_fast_log10f", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def fast_powf(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_powf", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def hadd(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_hadd", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_uhadd", core.dtype("uint32")), }, is_pure=True, _builder=_builder) @core.extern def rhadd(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("int32"),): ("__nv_rhadd", core.dtype("int32")), (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_urhadd", core.dtype("uint32")), }, is_pure=True, _builder=_builder) @core.extern def sub_rn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rn", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sub_rz(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rz", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rz", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sub_rd(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rd", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rd", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sub_ru(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_ru", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_ru", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rsqrt_rn(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_frsqrt_rn", core.dtype("fp32")), }, is_pure=True, _builder=_builder) @core.extern def ffs(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("int32"),): ("__nv_ffs", core.dtype("int32")), (core.dtype("int64"),): ("__nv_ffsll", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def rint(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_rintf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_rint", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def llrint(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_llrintf", core.dtype("int64")), (core.dtype("fp64"),): ("__nv_llrint", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def nearbyint(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_nearbyintf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_nearbyint", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def isnan(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_isnanf", core.dtype("int32")), (core.dtype("fp64"),): ("__nv_isnand", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def signbit(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_signbitf", core.dtype("int32")), (core.dtype("fp64"),): ("__nv_signbitd", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def copysign(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_copysignf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_copysign", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def finitef(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_finitef", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def isinf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_isinff", core.dtype("int32")), (core.dtype("fp64"),): ("__nv_isinfd", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def nextafter(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_nextafterf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_nextafter", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sin(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_sinf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_sin", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def cos(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_cosf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_cos", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sinpi(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_sinpif", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_sinpi", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def cospi(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_cospif", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_cospi", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def tan(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_tanf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_tan", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def log2(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_log2f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_log2", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def exp(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_expf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_exp", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def exp10(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_exp10f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_exp10", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def cosh(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_coshf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_cosh", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def sinh(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_sinhf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_sinh", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def tanh(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_tanhf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_tanh", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def atan2(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_atan2f", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_atan2", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def atan(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_atanf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_atan", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def asin(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_asinf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_asin", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def acos(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_acosf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_acos", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def log(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_logf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_log", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def log10(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_log10f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_log10", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def log1p(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_log1pf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_log1p", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def acosh(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_acoshf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_acosh", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def asinh(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_asinhf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_asinh", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def atanh(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_atanhf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_atanh", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def expm1(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_expm1f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_expm1", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def hypot(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_hypotf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_hypot", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rhypot(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rhypotf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rhypot", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def norm3d(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm3df", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm3d", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rnorm3d(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm3df", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm3d", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def norm4d(arg0, arg1, arg2, arg3, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, arg3, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm4df", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm4d", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rnorm4d(arg0, arg1, arg2, arg3, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, arg3, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm4df", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm4d", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def cbrt(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_cbrtf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_cbrt", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def rcbrt(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_rcbrtf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_rcbrt", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def j0(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_j0f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_j0", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def j1(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_j1f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_j1", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def y0(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_y0f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_y0", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def y1(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_y1f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_y1", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def yn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("fp32"),): ("__nv_ynf", core.dtype("fp32")), (core.dtype("int32"), core.dtype("fp64"),): ("__nv_yn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def jn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("int32"), core.dtype("fp32"),): ("__nv_jnf", core.dtype("fp32")), (core.dtype("int32"), core.dtype("fp64"),): ("__nv_jn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def cyl_bessel_i0(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_cyl_bessel_i0f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_cyl_bessel_i0", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def cyl_bessel_i1(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_cyl_bessel_i1f", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_cyl_bessel_i1", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def erf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_erff", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_erf", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def erfinv(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_erfinvf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_erfinv", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def erfc(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_erfcf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_erfc", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def erfcx(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_erfcxf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_erfcx", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def erfcinv(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_erfcinvf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_erfcinv", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def normcdfinv(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_normcdfinvf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_normcdfinv", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def normcdf(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_normcdff", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_normcdf", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def lgamma(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_lgammaf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_lgamma", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ldexp(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("int32"),): ("__nv_ldexpf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("int32"),): ("__nv_ldexp", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def scalbn(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("int32"),): ("__nv_scalbnf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("int32"),): ("__nv_scalbn", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def fmod(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmodf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmod", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def remainder(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_remainderf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_remainder", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def fma(arg0, arg1, arg2, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ], {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def pow(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("int32"),): ("__nv_powif", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("int32"),): ("__nv_powi", core.dtype("fp64")), (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_powf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_pow", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def tgamma(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_tgammaf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_tgamma", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def round(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_roundf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_round", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def llround(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_llroundf", core.dtype("int64")), (core.dtype("fp64"),): ("__nv_llround", core.dtype("int64")), }, is_pure=True, _builder=_builder) @core.extern def fdim(arg0, arg1, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ], {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdimf", core.dtype("fp32")), (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fdim", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def ilogb(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_ilogbf", core.dtype("int32")), (core.dtype("fp64"),): ("__nv_ilogb", core.dtype("int32")), }, is_pure=True, _builder=_builder) @core.extern def logb(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp32"),): ("__nv_logbf", core.dtype("fp32")), (core.dtype("fp64"),): ("__nv_logb", core.dtype("fp64")), }, is_pure=True, _builder=_builder) @core.extern def isfinited(arg0, _builder=None): return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ], {(core.dtype("fp64"),): ("__nv_isfinited", core.dtype("int32")), }, is_pure=True, _builder=_builder)
75,127
47.943322
157
py
triton
triton-main/python/triton/language/standard.py
from __future__ import annotations from ..runtime.jit import jit from . import core # ----------------------- # Standard library # ----------------------- @jit def cdiv(x, div): """ Computes the ceiling division of :code:`x` by :code:`div` :param x: the input number :type input: Block :param div: the divisor :param div: Block """ return (x + div - 1) // div @jit @core._add_math_1arg_docstr("sigmoid") def sigmoid(x): return 1 / (1 + core.exp(-x)) @jit @core._add_math_1arg_docstr("softmax") def softmax(x, ieee_rounding=False): z = x - core.max(x, 0) num = core.exp(z) den = core.sum(num, 0) return core.fdiv(num, den, ieee_rounding) @jit def ravel(x): """ Returns a contiguous flattened view of :code:`x`. :param x: the input tensor :type x: Block """ return core.view(x, [x.numel]) @jit def swizzle2d(i, j, size_i, size_j, size_g): """ Transforms indices of a row-major size_i*size_j matrix into those of one where indices are row major for each group of size_j rows. For example, for size_i = size_j = 4 and size_g = 2, it will transform [[0 , 1 , 2 , 3 ], [4 , 5 , 6 , 7 ], [8 , 9 , 10, 11], [12, 13, 14, 15]] into [[0, 2, 4 , 6 ], [1, 3, 5 , 7 ], [8, 10, 12, 14], [9, 11, 13, 15]] """ # "unrolled index in array" ij = i * size_j + j # number of elements in `size_g` groups # of `size_j` columns size_gj = size_g * size_j # index of the group in which (i,j) is group_id = ij // size_gj # row-index of the first element of this group off_i = group_id * size_g # last group may have fewer rows size_g = core.minimum(size_i - off_i, size_g) # new row and column indices new_i = off_i + (ij % size_g) new_j = (ij % size_gj) // size_g return new_i, new_j @jit def zeros(shape, dtype): """ Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`. :param shape: Shape of the new array, e.g., (8, 16) or (8, ) :type shape: tuple of ints :param dtype: Data-type of the new array, e.g., :code:`tl.float16` :type dtype: DType """ return core.full(shape, 0, dtype) @jit def zeros_like(input): return zeros(input.shape, input.dtype)
2,320
22.444444
98
py
triton
triton-main/python/triton/language/random.py
from ..runtime.jit import jit from . import core as tl PHILOX_KEY_A: tl.constexpr = 0x9E3779B9 PHILOX_KEY_B: tl.constexpr = 0xBB67AE85 PHILOX_ROUND_A: tl.constexpr = 0xD2511F53 PHILOX_ROUND_B: tl.constexpr = 0xCD9E8D57 N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox # ------------------- # randint # ------------------- @jit def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1). """ for _ in tl.static_range(n_rounds): # for _ in range(n_rounds): # update random state A = PHILOX_ROUND_A B = PHILOX_ROUND_B _c0, _c2 = c0, c2 c0 = tl.umulhi(B, _c2) ^ c1 ^ k0 c2 = tl.umulhi(A, _c0) ^ c3 ^ k1 c1 = B * _c2 c3 = A * _c0 # raise key k0 = k0 + PHILOX_KEY_A k1 = k1 + PHILOX_KEY_B return c0, c1, c2, c3 @jit def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): seed = seed.to(tl.uint64) seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32) seed_lo = (seed & 0xffffffff).to(tl.uint32) c0 = c0.to(tl.uint32, bitcast=True) c1 = c1.to(tl.uint32, bitcast=True) c2 = c2.to(tl.uint32, bitcast=True) c3 = c3.to(tl.uint32, bitcast=True) return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds) @jit def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Given a :code:`seed` scalar and an :code:`offset` block, returns a single block of random :code:`int32`. If you need multiple streams of random numbers, using `randint4x` is likely to be faster than calling `randint` 4 times. :param seed: The seed for generating random numbers. :param offsets: The offsets to generate random numbers for. """ ret, _, _, _ = randint4x(seed, offset, n_rounds) return ret @jit def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Given a :code:`seed` scalar and an :code:`offset` block, returns four blocks of random :code:`int32`. This is the maximally efficient entry point to Triton's Philox pseudo-random number generator. :param seed: The seed for generating random numbers. :param offsets: The offsets to generate random numbers for. """ # _0 = tl.zeros(offset.shape, offset.dtype) _0 = offset * 0 return philox(seed, offset, _0, _0, _0, n_rounds) # ------------------- # rand # ------------------- # @jit # def uint32_to_uniform_float(x): # """ # Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1). # """ # two_to_the_minus_32: tl.constexpr = 2.328306e-10 # return x * two_to_the_minus_32 @jit def uint32_to_uniform_float(x): """ Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1). """ x = x.to(tl.int32, bitcast=True) # maximum value such that `MAX_INT * scale < 1.0` (with float rounding) scale = 4.6566127342e-10 x = tl.where(x < 0, -x - 1, x) return x * scale @jit def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Given a :code:`seed` scalar and an :code:`offset` block, returns a block of random :code:`float32` in :math:`U(0, 1)`. :param seed: The seed for generating random numbers. :param offsets: The offsets to generate random numbers for. """ offset = offset.to(tl.uint32, bitcast=True) source = randint(seed, offset, n_rounds) return uint32_to_uniform_float(source) @jit def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Given a :code:`seed` scalar and an :code:`offsets` block, returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`. :param seed: The seed for generating random numbers. :param offsets: The offsets to generate random numbers for. """ offsets = offsets.to(tl.uint32, bitcast=True) i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds) u1 = uint32_to_uniform_float(i1) u2 = uint32_to_uniform_float(i2) u3 = uint32_to_uniform_float(i3) u4 = uint32_to_uniform_float(i4) return u1, u2, u3, u4 # ------------------- # randn # ------------------- @jit def pair_uniform_to_normal(u1, u2): """Box-Muller transform""" u1 = tl.maximum(1.0e-7, u1) th = 6.283185307179586 * u2 r = tl.sqrt(-2.0 * tl.log(u1)) return r * tl.cos(th), r * tl.sin(th) @jit def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Given a :code:`seed` scalar and an :code:`offset` block, returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`. :param seed: The seed for generating random numbers. :param offsets: The offsets to generate random numbers for. """ i1, i2, _, _ = randint4x(seed, offset, n_rounds) u1 = uint32_to_uniform_float(i1) u2 = uint32_to_uniform_float(i2) n1, _ = pair_uniform_to_normal(u1, u2) return n1 @jit def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): """ Given a :code:`seed` scalar and an :code:`offset` block, returns a 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`. :param seed: The seed for generating random numbers. :param offsets: The offsets to generate random numbers for. """ u1, u2, u3, u4 = rand4x(seed, offset, n_rounds) n1, n2 = pair_uniform_to_normal(u1, u2) n3, n4 = pair_uniform_to_normal(u3, u4) return n1, n2, n3, n4
5,569
30.117318
109
py
triton
triton-main/python/triton/language/__init__.py
"""isort:skip_file""" # Import order is significant here. from . import math from . import extra from .standard import ( cdiv, sigmoid, softmax, ravel, swizzle2d, zeros, zeros_like, ) from .core import ( TRITON_MAX_TENSOR_NUMEL, abs, advance, arange, argmin, argmax, associative_scan, atomic_add, atomic_and, atomic_cas, atomic_max, atomic_min, atomic_or, atomic_xchg, atomic_xor, bfloat16, block_type, broadcast, broadcast_to, cat, constexpr, cos, cumprod, cumsum, debug_barrier, device_assert, device_print, dot, dtype, exp, expand_dims, full, fdiv, float16, float32, float64, float8e4b15, float8e4, float8e5, function_type, int1, int16, int32, int64, int8, load, log, make_block_ptr, max, max_constancy, max_contiguous, maximum, min, minimum, multiple_of, num_programs, pi32_t, pointer_type, program_id, reduce, reshape, sin, sqrt, static_assert, static_print, store, sum, static_range, tensor, trans, # triton, uint16, uint32, uint64, uint8, umulhi, view, void, where, xor_sum, ) from .random import ( pair_uniform_to_normal, philox, philox_impl, rand, rand4x, randint, randint4x, randn, randn4x, uint32_to_uniform_float, ) __all__ = [ "TRITON_MAX_TENSOR_NUMEL", "abs", "advance", "arange", "argmin", "argmax", "associative_scan", "atomic_add", "atomic_and", "atomic_cas", "atomic_max", "atomic_min", "atomic_or", "atomic_xchg", "atomic_xor", "bfloat16", "block_type", "broadcast", "broadcast_to", "builtin", "cat", "cdiv", "constexpr", "cos", "cumprod", "cumsum", "debug_barrier", "device_assert", "device_print", "dot", "dtype", "exp", "expand_dims", "extra", "fdiv", "float16", "float32", "float64", "float8e4b15", "float8e4", "float8e5", "full", "function_type", "int1", "int16", "int32", "int64", "int8", "ir", "math", "load", "log", "make_block_ptr", "max", "max_constancy", "max_contiguous", "maximum", "min", "minimum", "multiple_of", "num_programs", "pair_uniform_to_normal", "philox", "philox_impl", "pi32_t", "pointer_type", "program_id", "rand", "rand4x", "randint", "randint4x", "randn", "randn4x", "ravel", "reduce", "reshape", "sigmoid", "sin", "softmax", "sqrt", "static_range", "static_assert", "static_print", "store", "sum", "swizzle2d", "tensor", "trans", "triton", "uint16", "uint32", "uint32_to_uniform_float", "uint64", "uint8", "umulhi", "view", "void", "where", "xor_sum", "zeros", "zeros_like", ]
3,122
13.593458
35
py
triton
triton-main/python/triton/language/extra/cuda.py
import os from .. import core __path__ = os.path.dirname(os.path.abspath(__file__)) @core.extern def globaltimer(_builder=None): return core.extern_elementwise("cuda", os.path.join(__path__, "cuda.bc"), [], {tuple(): ("globaltimer", core.dtype("int64")), }, is_pure=False, _builder=_builder) @core.extern def smid(_builder=None): return core.extern_elementwise("cuda", os.path.join(__path__, "cuda.bc"), [], {tuple(): ("smid", core.dtype("int32")), }, is_pure=True, _builder=_builder)
641
31.1
82
py
triton
triton-main/python/triton/language/extra/__init__.py
from . import cuda __all__ = ['cuda']
39
9
18
py
triton
triton-main/python/triton/ops/flash_attention.py
""" Fused Attention =============== This is a Triton implementation of the Flash Attention algorithm (see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf) Sequence Parallel implementation inspired by HazyResearch (see https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attn_triton.py) """ import torch from .. import cdiv, jit from .. import language as tl @jit def _fwd_kernel( Q, K, V, sm_scale, L, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, MODE: tl.constexpr, ): start_m = tl.program_id(0) off_hz = tl.program_id(1) qvk_offset = off_hz * stride_qh Q_block_ptr = tl.make_block_ptr( base=Q + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0) ) K_block_ptr = tl.make_block_ptr( base=K + qvk_offset, shape=(BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1) ) V_block_ptr = tl.make_block_ptr( base=V + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0) ) O_block_ptr = tl.make_block_ptr( base=Out + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0) ) # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) # initialize pointer to m and l m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # causal check on every loop iteration can be expensive # and peeling the last iteration of the loop does not work well with ptxas # so we have a mode to do the causal check in a separate kernel entirely if MODE == 0: # entire non-causal attention lo, hi = 0, N_CTX if MODE == 1: # entire causal attention lo, hi = 0, (start_m + 1) * BLOCK_M if MODE == 2: # off band-diagonal lo, hi = 0, start_m * BLOCK_M if MODE == 3: # on band-diagonal l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m m_i = tl.load(m_ptrs) l_i = tl.load(l_ptrs) acc += tl.load(O_block_ptr).to(tl.float32) lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M # credits to: Adam P. Goucher (https://github.com/apgoucher): # scale sm_scale by 1/log_2(e) and use # 2^x instead of exp in the loop because CSE and LICM # don't work as expected with `exp` in the loop qk_scale = sm_scale * 1.44269504 # load q: it will stay in SRAM throughout q = tl.load(Q_block_ptr) q = (q * qk_scale).to(K.dtype.element_ty) # advance block pointers to first iteration of the loop K_block_ptr = tl.advance(K_block_ptr, (0, lo)) V_block_ptr = tl.advance(V_block_ptr, (lo, 0)) # loop over k, v and update accumulator for start_n in range(lo, hi, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- k = tl.load(K_block_ptr) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k, allow_tf32=True) if MODE == 1 or MODE == 3: qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) # -- compute m_ij, p, l_ij m_ij = tl.max(qk, 1) p = tl.math.exp2(qk - m_ij[:, None]) l_ij = tl.sum(p, 1) # -- update m_i and l_i m_i_new = tl.maximum(m_i, m_ij) alpha = tl.math.exp2(m_i - m_i_new) beta = tl.math.exp2(m_ij - m_i_new) l_i *= alpha l_i_new = l_i + beta * l_ij # scale p p_scale = beta / l_i_new p = p * p_scale[:, None] # scale acc acc_scale = l_i / l_i_new acc = acc * acc_scale[:, None] # update acc v = tl.load(V_block_ptr) p = p.to(V.dtype.element_ty) acc += tl.dot(p, v, allow_tf32=True) # update m_i and l_i l_i = l_i_new m_i = m_i_new # update pointers K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) # write back l and m l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_i) tl.store(m_ptrs, m_i) # write back O tl.store(O_block_ptr, acc.to(K.dtype.element_ty)) @jit def _bwd_preprocess( Out, DO, L, NewDO, Delta, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr, ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) # load o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) denom = tl.load(L + off_m).to(tl.float32) # compute do = do / denom[:, None] delta = tl.sum(o * do, axis=1) # write-back tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do) tl.store(Delta + off_m, delta) @jit def _bwd_kernel_one_col_block( Q, K, V, sm_scale, qk_scale, Out, DO, DQ, DK, DV, L, M, D, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, Z, H, N_CTX, off_hz, start_n, num_block, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, MODE: tl.constexpr, ): if SEQUENCE_PARALLEL: DQ += stride_dqa.to(tl.int64) * start_n if MODE == 0: lo = 0 else: lo = start_n * BLOCK_M # initialize row/col offsets offs_qm = lo + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M) offs_m = tl.arange(0, BLOCK_N) offs_k = tl.arange(0, BLOCK_DMODEL) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) v_ptrs = V + (offs_n[:, None] * stride_vk + offs_k[None, :] * stride_vn) do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) # pointer to row-wise quantities in value-like data D_ptrs = D + off_hz * N_CTX m_ptrs = M + off_hz * N_CTX # initialize dv amd dk dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # k and v stay in SRAM throughout k = tl.load(k_ptrs) v = tl.load(v_ptrs) # loop over rows for start_m in range(lo, num_block * BLOCK_M, BLOCK_M): offs_m_curr = start_m + offs_m # load q, k, v, do on-chip q = tl.load(q_ptrs) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here if MODE == 1: qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), float(0.), float("-inf")) else: qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) qk *= qk_scale m = tl.load(m_ptrs + offs_m_curr) p = tl.math.exp2(qk - m[:, None]) # compute dv do = tl.load(do_ptrs) dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do, allow_tf32=True) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] dp = tl.dot(do, tl.trans(v), allow_tf32=True) # compute ds = p * (dp - delta[:, None]) ds = (p * (dp - Di[:, None]) * sm_scale).to(Q.dtype.element_ty) # compute dk = dot(ds.T, q) dk += tl.dot(tl.trans(ds), q, allow_tf32=True) # compute dq if not SEQUENCE_PARALLEL: dq = tl.load(dq_ptrs) dq += tl.dot(ds, k, allow_tf32=True) tl.store(dq_ptrs, dq) elif SEQUENCE_PARALLEL: # dq = tl.dot(ds, k, allow_tf32=True) dq = tl.trans(tl.dot(tl.trans(k), tl.trans(ds), allow_tf32=True)) tl.store(dq_ptrs, dq) # increment pointers dq_ptrs += BLOCK_M * stride_qm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_qm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_vk + offs_k[None, :] * stride_vn) dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) @jit def _bwd_kernel( # fmt: off Q, K, V, sm_scale, Out, DO, DQ, DK, DV, L, M, D, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, Z, H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, MODE: tl.constexpr, # fmt: on ): qk_scale = sm_scale * 1.44269504 off_hz = tl.program_id(0) off_z = off_hz // H off_h = off_hz % H # offset pointers for batch/head Q += off_z * stride_qz + off_h * stride_qh K += off_z * stride_kz + off_h * stride_kh V += off_z * stride_vz + off_h * stride_vh DO += off_z * stride_qz + off_h * stride_qh DQ += off_z * stride_qz + off_h * stride_qh DK += off_z * stride_kz + off_h * stride_kh DV += off_z * stride_vz + off_h * stride_vh num_block_n = tl.cdiv(N_CTX, BLOCK_N) if not SEQUENCE_PARALLEL: for start_n in range(0, num_block_n): _bwd_kernel_one_col_block( Q, K, V, sm_scale, qk_scale, Out, DO, DQ, DK, DV, L, M, D, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, Z, H, N_CTX, off_hz, start_n, num_block_n, BLOCK_M=BLOCK_M, BLOCK_DMODEL=BLOCK_DMODEL, BLOCK_N=BLOCK_N, SEQUENCE_PARALLEL=SEQUENCE_PARALLEL, MODE=MODE, ) else: start_n = tl.program_id(1) _bwd_kernel_one_col_block( Q, K, V, sm_scale, qk_scale, Out, DO, DQ, DK, DV, L, M, D, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, Z, H, N_CTX, off_hz, start_n, num_block_n, BLOCK_M=BLOCK_M, BLOCK_DMODEL=BLOCK_DMODEL, BLOCK_N=BLOCK_N, SEQUENCE_PARALLEL=SEQUENCE_PARALLEL, MODE=MODE, ) class _attention(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, causal, sm_scale, sequence_parallel=False): # only support for Ampere now capability = torch.cuda.get_device_capability() if capability[0] < 8: raise RuntimeError("Flash attention currently only supported for compute capability >= 80") BLOCK = 128 # shape constraints Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] assert Lq == Lk and Lk == Lv assert Lk in {16, 32, 64, 128} o = torch.empty_like(q) grid = (cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1], 1) L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) num_warps = 4 if Lk <= 64 else 8 if causal: modes = [1] if q.shape[2] <= 2048 else [2, 3] else: modes = [0] for mode in modes: _fwd_kernel[grid]( q, k, v, sm_scale, L, m, o, q.stride(0), q.stride(1), q.stride(2), q.stride(3), k.stride(0), k.stride(1), k.stride(2), k.stride(3), v.stride(0), v.stride(1), v.stride(2), v.stride(3), o.stride(0), o.stride(1), o.stride(2), o.stride(3), q.shape[0], q.shape[1], q.shape[2], BLOCK_M=128, BLOCK_N=BLOCK, BLOCK_DMODEL=Lk, MODE=mode, num_warps=num_warps, num_stages=2) ctx.save_for_backward(q, k, v, o, L, m) ctx.grid = grid ctx.sm_scale = sm_scale ctx.BLOCK_DMODEL = Lk ctx.causal = causal ctx.sequence_parallel = sequence_parallel return o @staticmethod def backward(ctx, do): BLOCK = 128 q, k, v, o, l, m = ctx.saved_tensors sequence_parallel = ctx.sequence_parallel seq_len_kv = k.shape[2] do = do.contiguous() if sequence_parallel: replicas = cdiv(seq_len_kv, BLOCK) new_dq_shape = (replicas,) + q.shape dq = torch.zeros(new_dq_shape, device=q.device, dtype=q.dtype) else: dq = torch.zeros_like(q, dtype=torch.float32) dk = torch.empty_like(k) dv = torch.empty_like(v) do_scaled = torch.empty_like(do) delta = torch.empty_like(l) if ctx.causal: mode = 1 else: mode = 0 _bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )]( o, do, l, do_scaled, delta, BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL, ) _bwd_kernel[(ctx.grid[1], cdiv(seq_len_kv, BLOCK) if sequence_parallel else 1)]( q, k, v, ctx.sm_scale, o, do_scaled, dq, dk, dv, l, m, delta, o.numel(), q.stride(0), q.stride(1), q.stride(2), q.stride(3), k.stride(0), k.stride(1), k.stride(2), k.stride(3), v.stride(0), v.stride(1), v.stride(2), v.stride(3), q.shape[0], q.shape[1], q.shape[2], BLOCK_M=BLOCK, BLOCK_N=BLOCK, BLOCK_DMODEL=ctx.BLOCK_DMODEL, SEQUENCE_PARALLEL=sequence_parallel, MODE=mode, num_warps=8, num_stages=1, ) if len(dq.shape) == 5: dq = dq.sum(dim=0) return dq, dk, dv, None, None, None attention = _attention.apply
15,056
35.107914
113
py
triton
triton-main/python/triton/ops/cross_entropy.py
import torch from .. import heuristics, jit from .. import language as tl from .. import next_power_of_2 def num_warps(N): if N < 2048: return 4 elif N < 8192: return 8 return 16 @heuristics({'num_warps': lambda nargs: num_warps(nargs['N'])}) @heuristics({'BLOCK': lambda nargs: next_power_of_2(nargs['N'])}) @jit def _forward(LOGITS, PROBS, IDX, LOSS, N, BLOCK: tl.constexpr): row = tl.program_id(0) cols = tl.arange(0, BLOCK) idx = tl.load(IDX + row) # pointers to logit and probs LOGITS = LOGITS + row * N + cols WRIT_PROBS = PROBS + row * N + cols READ_PROBS = PROBS + row * N + idx # write-back negative log-probs logits = tl.load(LOGITS, mask=cols < N, other=-float('inf')) logits = logits.to(tl.float32) logits = logits - tl.max(logits, 0) probs = tl.log(tl.sum(tl.exp(logits), 0)) - logits tl.store(WRIT_PROBS, probs, mask=cols < N) # There is a bug in the compiler, which fails to insert a barrier here. # We add it explicitly for now. Will be fixed soon. tl.debug_barrier() # write-back loss probs = tl.load(READ_PROBS) tl.store(LOSS + row, probs) @heuristics({'num_warps': lambda nargs: num_warps(nargs['N'])}) @heuristics({'BLOCK': lambda nargs: next_power_of_2(nargs['N'])}) @jit def _backward(PROBS, IDX, DPROBS, N, BLOCK: tl.constexpr): row = tl.program_id(0) cols = tl.arange(0, BLOCK) idx = tl.load(IDX + row) # pointers to probs PROBS = PROBS + row * N + cols # We know d(-log(p[i])/dlogit[k] = -id_mat[i,k] + p[k] # and we have -log(p[k]) stored in PROBS, so this is easy probs = -tl.load(PROBS, mask=cols < N, other=float('inf')) probs = tl.exp(probs.to(tl.float32)) delta = cols == idx # write result in-place in PROBS dout = tl.load(DPROBS + row) din = (probs - delta) * dout tl.store(PROBS, din.to(PROBS.dtype.element_ty), mask=cols < N) class _cross_entropy(torch.autograd.Function): @classmethod def forward(cls, ctx, logits, indices): # make sure we can use triton assert (indices.dtype == torch.int64), "Indices are expected to be of type long." # make kernel device, dtype = logits.device, logits.dtype n_cols = logits.shape[-1] # run the kernel result = torch.empty_like(indices, dtype=dtype, device=device) neg_logprobs = torch.empty_like(logits, dtype=dtype, device=device) grid = lambda opt: (logits.numel() // n_cols, ) _forward[grid](logits, neg_logprobs, indices, result, n_cols) # save for backward ctx.save_for_backward(neg_logprobs, indices) return result @classmethod def backward(cls, ctx, dneg_logprobs): """We know d(-log(p[i])/dlogit[k] = -id_mat[i,k] + p[k] so we initialize the gradient as neg_logprobs, so we can just exponentiate to get p[k], which is most of what we need... neg_logprobs will be modified in place to become the gradient we want """ # load saved tensors neg_logprobs, indices = ctx.saved_tensors # run the kernel # neg_logprobs will be modified in place to become our gradient: n_cols = neg_logprobs.shape[-1] grid = lambda opt: (neg_logprobs.numel() // n_cols, ) _backward[grid](neg_logprobs, indices, dneg_logprobs, n_cols) return neg_logprobs, None cross_entropy = _cross_entropy.apply
3,450
34.947917
89
py
triton
triton-main/python/triton/ops/matmul.py
import torch from .. import Config, autotune, cdiv, heuristics, jit from .. import language as tl from .matmul_perf_model import early_config_prune, estimate_matmul_time _ordered_datatypes = [torch.float16, torch.bfloat16, torch.float32] def get_higher_dtype(a, b): if a is b: return a assert a in _ordered_datatypes assert b in _ordered_datatypes for d in _ordered_datatypes: if a is d: return b if b is d: return a def init_to_zero(name): return lambda nargs: nargs[name].zero_() def get_configs_io_bound(): configs = [] for num_stages in [2, 3, 4, 5, 6]: for block_m in [16, 32]: for block_k in [32, 64]: for block_n in [32, 64, 128, 256]: num_warps = 2 if block_n <= 64 else 4 configs.append( Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1}, num_stages=num_stages, num_warps=num_warps)) # split_k for split_k in [2, 4, 8, 16]: configs.append(Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k}, num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C'))) return configs @autotune( configs=[ # basic configs for compute-bound matmuls Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2), # good for int8 Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2), ] + get_configs_io_bound(), key=['M', 'N', 'K'], prune_configs_by={ 'early_config_prune': early_config_prune, 'perf_model': estimate_matmul_time, 'top_k': 10 }, ) @heuristics({ 'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0, }) @jit def _kernel(A, B, C, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, dot_out_dtype: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, ): # matrix multiplication pid = tl.program_id(0) pid_z = tl.program_id(1) grid_m = tl.cdiv(M, BLOCK_M) grid_n = tl.cdiv(N, BLOCK_N) # re-order program ID for better L2 performance width = GROUP_M * grid_n group_id = pid // width group_size = min(grid_m - group_id * GROUP_M, GROUP_M) pid_m = group_id * GROUP_M + (pid % group_size) pid_n = (pid % width) // (group_size) # do matrix multiplication rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) # pointers A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype) for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): if EVEN_K: a = tl.load(A) b = tl.load(B) else: k_remaining = K - k * (BLOCK_K * SPLIT_K) _0 = tl.zeros((1, 1), dtype=C.dtype.element_ty) a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0) b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0) a = a.to(C.dtype.element_ty) b = b.to(C.dtype.element_ty) acc += tl.dot(a, b, out_dtype=dot_out_dtype) A += BLOCK_K * SPLIT_K * stride_ak B += BLOCK_K * SPLIT_K * stride_bk acc = acc.to(C.dtype.element_ty) # rematerialize rm and rn to save registers rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) mask = (rm < M)[:, None] & (rn < N)[None, :] # handles write-back with reduction-splitting if SPLIT_K == 1: tl.store(C, acc, mask=mask) else: tl.atomic_add(C, acc, mask=mask) class _matmul(torch.autograd.Function): kernel = _kernel _locks = {} @staticmethod def _call(a, b, dot_out_dtype): device = a.device # handle non-contiguous inputs if necessary if a.stride(0) > 1 and a.stride(1) > 1: a = a.contiguous() if b.stride(0) > 1 and b.stride(1) > 1: b = b.contiguous() # checks constraints assert a.shape[1] == b.shape[0], "incompatible dimensions" M, K = a.shape _, N = b.shape # allocates output if a.dtype in [tl.float8e4, tl.float8e4b15, tl.float8e5] or\ b.dtype in [tl.float8e4, tl.float8e4b15, tl.float8e5]: c_dtype = torch.float16 else: c_dtype = get_higher_dtype(a.dtype, b.dtype) c = torch.empty((M, N), device=device, dtype=c_dtype) if dot_out_dtype is None: if c_dtype in [torch.float16, torch.float32, torch.bfloat16]: dot_out_dtype = tl.float32 else: dot_out_dtype = tl.int32 else: assert isinstance(dot_out_dtype, torch.dtype), "dot_out_dtype must be a torch.dtype" if dot_out_dtype == torch.float16: dot_out_dtype = tl.float16 elif dot_out_dtype in [torch.float32, torch.bfloat16]: dot_out_dtype = tl.float32 else: dot_out_dtype = tl.int32 # launch kernel grid = lambda META: (cdiv(M, META['BLOCK_M']) * cdiv(N, META['BLOCK_N']), META['SPLIT_K']) _kernel[grid](a, b, c, M, N, K, a.stride(0), a.stride(1), b.stride(0), b.stride(1), c.stride(0), c.stride(1), dot_out_dtype=dot_out_dtype, GROUP_M=8) return c @staticmethod def forward(ctx, a, b, dot_out_dtype=None): return _matmul._call(a, b, dot_out_dtype=dot_out_dtype) matmul = _matmul.apply
8,026
41.696809
127
py
triton
triton-main/python/triton/ops/__init__.py
# from .conv import _conv, conv from . import blocksparse from .cross_entropy import _cross_entropy, cross_entropy from .flash_attention import attention from .matmul import _matmul, matmul __all__ = [ "blocksparse", "_cross_entropy", "cross_entropy", "_matmul", "matmul", "attention", ]
313
19.933333
56
py
triton
triton-main/python/triton/ops/matmul_perf_model.py
import heapq import torch from .. import cdiv from .._C.libtriton.triton import runtime from ..runtime import driver from ..testing import get_dram_gbps, get_max_simd_tflops, get_max_tensorcore_tflops def get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype): ''' return compute throughput in TOPS ''' total_warps = num_ctas * min(num_warps, 4) num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs tflops = min(num_subcores, total_warps) / num_subcores * get_max_tensorcore_tflops(dtype, backend, device) return tflops def get_simd_tflops(backend, device, num_ctas, num_warps, dtype): ''' return compute throughput in TOPS ''' total_warps = num_ctas * min(num_warps, 4) num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs tflops = min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, backend, device) return tflops def get_tflops(backend, device, num_ctas, num_warps, dtype): capability = torch.cuda.get_device_capability(device) if capability[0] < 8 and dtype == torch.float32: return get_simd_tflops(backend, device, num_ctas, num_warps, dtype) return get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype) def estimate_matmul_time( # backend, device, num_warps, num_stages, A, B, C, M, N, K, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, debug=False, **kwargs ): ''' return estimated running time in ms = max(compute, loading) + store ''' backend = runtime.backend.CUDA device = torch.cuda.current_device() dtype = A.dtype dtsize = A.element_size() num_cta_m = cdiv(M, BLOCK_M) num_cta_n = cdiv(N, BLOCK_N) num_cta_k = SPLIT_K num_ctas = num_cta_m * num_cta_n * num_cta_k # If the input is smaller than the block size M, N = max(M, BLOCK_M), max(N, BLOCK_N) # time to compute total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS tput = get_tflops(backend, device, num_ctas, num_warps, dtype) compute_ms = total_ops / tput # time to load data num_sm = driver.utils.get_device_properties(device)["multiprocessor_count"] active_cta_ratio = min(1, num_ctas / num_sm) active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5% dram_bw = get_dram_gbps(backend, device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?) # assume 80% of (following) loads are in L2 cache load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1)) load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1) load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1)) load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1) # total total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024) # loading time in ms load_ms = total_dram / dram_bw + total_l2 / l2_bw # estimate storing time store_bw = dram_bw * 0.6 # :o store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB if SPLIT_K == 1: store_ms = store_c_dram / store_bw else: reduce_bw = store_bw store_ms = store_c_dram / reduce_bw # c.zero_() zero_ms = M * N * 2 / (1024 * 1024) / store_bw store_ms += zero_ms total_time_ms = max(compute_ms, load_ms) + store_ms if debug: print(f'Total time: {total_time_ms}ms, compute time: {compute_ms}ms, ' f'loading time: {load_ms}ms, store time: {store_ms}ms, ' f'Activate CTAs: {active_cta_ratio*100}%') return total_time_ms def early_config_prune(configs, named_args): device = torch.cuda.current_device() capability = torch.cuda.get_device_capability() # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages dtsize = named_args['A'].element_size() dtype = named_args['A'].dtype # 1. make sure we have enough smem pruned_configs = [] for config in configs: kw = config.kwargs BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \ kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], config.num_stages max_shared_memory = driver.utils.get_device_properties(device)["max_shared_mem"] required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize if required_shared_memory <= max_shared_memory: pruned_configs.append(config) configs = pruned_configs # Some dtypes do not allow atomic_add if dtype not in [torch.float16, torch.float32]: configs = [config for config in configs if config.kwargs['SPLIT_K'] == 1] # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps) configs_map = {} for config in configs: kw = config.kwargs BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = \ kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], kw['SPLIT_K'], config.num_warps, config.num_stages key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps) if key in configs_map: configs_map[key].append((config, num_stages)) else: configs_map[key] = [(config, num_stages)] pruned_configs = [] for k, v in configs_map.items(): BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k if capability[0] >= 8: # compute cycles (only works for ampere GPUs) mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) mma_cycles = mmas / min(4, num_warps) * 8 ldgsts_latency = 300 # Does this matter? optimal_num_stages = ldgsts_latency / mma_cycles # nearest stages, prefer large #stages nearest = heapq.nsmallest(2, v, key=lambda x: 10 + abs(x[1] - optimal_num_stages) if (x[1] - optimal_num_stages) < 0 else x[1] - optimal_num_stages) for n in nearest: pruned_configs.append(n[0]) else: # Volta & Turing only supports num_stages <= 2 random_config = v[0][0] random_config.num_stages = 2 pruned_configs.append(random_config) return pruned_configs
6,369
39.062893
117
py
triton
triton-main/python/triton/ops/blocksparse/softmax.py
import torch from ... import jit from ... import language as tl from ... import next_power_of_2 def num_warps(n): if n <= 128: return 1 if n <= 256: return 2 if n <= 512: return 4 if n <= 4096: return 8 return 16 @jit def _blocksparse_softmax_fwd( Out, A, stride_xz, LUT, R, extent, stride_zr, stride_hr, # relative attention scale, is_causal, ROW_SIZE: tl.constexpr, BLOCK_SIZE: tl.constexpr, IS_DENSE: tl.constexpr, ): h = tl.program_id(0) m = tl.program_id(1) z = tl.program_id(2) # create index ranges hm = h * tl.num_programs(1) + m lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE # extract information from LUT header = LUT + (hm // BLOCK_SIZE) * 2 size = tl.load(header + 0) offset = tl.load(header + 1) # pointer offset off_a = z * stride_xz off_a += (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE # block indx off_a += (m % BLOCK_SIZE) * BLOCK_SIZE # row indx # do not need to read column indices in the dense case if IS_DENSE: ns = tl.arange(0, ROW_SIZE) else: off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE start_n = tl.load(LUT + off_lut + block_n, mask=block_n < size, other=0) ns = start_n * BLOCK_SIZE + lane_n # load X mask = block_n < size a = tl.load(A + off_a + lane_n, mask=mask, other=-float("inf")) a = a.to(tl.float32) # compute out = a out *= scale # apply relative attention if R is not None: R += z * stride_zr R += h * stride_hr off_lo = (extent - m - 1) + ns mask_lo = (off_lo >= 0) & (off_lo < extent) rel_logits = tl.load(R + m * extent + off_lo, mask=mask_lo, other=0.0) out += rel_logits out = out.to(tl.float32) # apply causal mask out = tl.where((ns > m) & is_causal, -float("inf"), out) # computation out = tl.softmax(out) # write-back tl.store(Out + off_a + lane_n, out, mask=mask) @jit def _blocksparse_softmax_bwd( DA, stride_zdx, DOut, stride_zdout, Out, stride_zout, scale, LUT, DR, extent, stride_zr, stride_hr, stride_er, is_causal, ROW_SIZE: tl.constexpr, BLOCK_SIZE: tl.constexpr, IS_DENSE: tl.constexpr, ): h = tl.program_id(0) m = tl.program_id(1) z = tl.program_id(2) # create index ranges hm = h * tl.num_programs(1) + m lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE # extract information from LUT header = LUT + (hm // BLOCK_SIZE) * 2 size = tl.load(header + 0) offset = tl.load(header + 1) # row-col offset off_mn = (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE off_mn += (m % BLOCK_SIZE) * BLOCK_SIZE mask = block_n < size # pointers As = Out + z * stride_zout + off_mn DOuts = DOut + z * stride_zdout + off_mn # do not need to read column indices in the dense case if IS_DENSE: ns = tl.arange(0, ROW_SIZE) else: off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE start_n = tl.load(LUT + off_lut + block_n, mask=mask, other=0) ns = start_n * BLOCK_SIZE + lane_n # load data a = tl.load(As + lane_n, mask=mask, other=0.0) a = a.to(tl.float32) dout = tl.load(DOuts + lane_n, mask=mask, other=0.0) dout = dout.to(tl.float32) # compute a = tl.where((ns > m) & is_causal & (a == a), 0., a) da = a * (dout - tl.sum(a * dout, 0)) # apply relative attention if DR is not None: DR += z * stride_zr DR += h * stride_hr off_lo = (extent - m - 1) + ns mask_lo = (off_lo >= 0) & (off_lo < extent) & mask tl.store(DR + m * extent + off_lo, da, mask=mask_lo) da = da * scale # convert da # write-back DAs = DA + z * stride_zdx + off_mn tl.store(DAs + lane_n, da, mask=mask) class _softmax(torch.autograd.Function): @staticmethod def make_lut(layout, block, device): _empty = torch.tensor([], dtype=torch.int64, device=layout.device) sizes = _empty.clone() # sizes along rows for h in range(layout.shape[0]): sizes = torch.cat((sizes, layout[h, :, :].sum(-1))) total_sizes = sizes * block # offsets in block format offsets = torch.zeros_like(sizes) offsets[1:] = torch.cumsum(sizes[:-1], dim=0) # block indices columns = layout.nonzero(as_tuple=False)[:, 2] header = torch.stack((sizes, offsets), dim=1).view(-1) lut = torch.cat((header, columns)).type(torch.int32).to(device) return lut, int(total_sizes.max()) @staticmethod def forward( ctx, a, scale, rel_logits, is_causal, spdims, block, lut, maxlut, is_dense ): if scale is not None and isinstance(scale, torch.Tensor): assert scale.device.type == "cpu" scale = scale.item() M = a.shape[0] grid = [spdims[0], spdims[1] * block, M] rel_shape = (1, 1, 1, 1) if rel_logits is None else rel_logits.shape rel_strides = (1, 1, 1, 1) if rel_logits is None else rel_logits.stride() # enqueue kernel out = torch.empty_like(a) _blocksparse_softmax_fwd[grid]( out, a, a.stride(0), lut, rel_logits, rel_shape[-1], rel_strides[0], rel_strides[1], # relative attn scale, is_causal, BLOCK_SIZE=block, ROW_SIZE=next_power_of_2(maxlut), IS_DENSE=is_dense, num_warps=num_warps(maxlut) ) # save to context # ctx.mark_dirty(x) ctx.save_for_backward(out, lut) ctx.spdims = spdims ctx.block = block ctx.maxlut = maxlut ctx.scale = scale ctx.rel_shape = rel_shape ctx.rel_strides = rel_strides ctx.rel_dtype = a.dtype ctx.is_dense = is_dense ctx.is_causal = is_causal return out @staticmethod def backward(ctx, dout): # retrieve from context out, lut = ctx.saved_tensors # relative logits gradients dr = None if ctx.needs_input_grad[3]: dr = torch.zeros(ctx.rel_shape, dtype=ctx.rel_dtype, device=out.device) # run kernel M = out.shape[0] grid = (ctx.spdims[0], ctx.spdims[1] * ctx.block, M) da = torch.empty_like(dout) _blocksparse_softmax_bwd[grid]( da, da.stride(0), dout, dout.stride(0), out, out.stride(0), ctx.scale, lut, dr, ctx.rel_shape[-1], ctx.rel_strides[0], ctx.rel_strides[1], ctx.rel_strides[2], ctx.is_causal, BLOCK_SIZE=ctx.block, ROW_SIZE=next_power_of_2(ctx.maxlut), IS_DENSE=ctx.is_dense, num_warps=num_warps(ctx.maxlut) ) return (da, None, None, dr, None, None, None, None, None, None, None, None, None, None, None, None, None, None ) class softmax: def __init__(self, layout, block, device, is_dense=False): self.spdims = layout.shape self.layout = layout self.block = block self.lut, self.maxlut = _softmax.make_lut(self.layout, self.block, device) self.is_dense = is_dense def __call__(self, a, *, scale=1.0, rel_logits=None, is_causal=False): if rel_logits is not None and rel_logits.dtype != a.dtype: raise ValueError(f"relative position embedding must be {a.dtype}") a = _softmax.apply( a, scale, rel_logits, is_causal, self.spdims, self.block, self.lut, self.maxlut, self.is_dense, ) return a
7,905
31.804979
94
py
triton
triton-main/python/triton/ops/blocksparse/matmul.py
import torch from ... import cdiv, heuristics, jit from ... import language as tl # ******************************************************** # -------------------------------------------------------- # Sparse = Dense x Dense (SDD) # This operation uses super-blocking to make sure that # it's done efficiently when small blocks can be grouped # together # -------------------------------------------------------- # ******************************************************** @heuristics({ 'EVEN_K': lambda nargs: nargs['K'] % nargs['TILE_K'] == 0, }) @jit def _sdd_kernel( A, B, C, stride_za, stride_ha, stride_ma, stride_ak, stride_zb, stride_hb, stride_bk, stride_nb, stride_zc, stride_hc, stride_mc, stride_nc, K, grid_offset, lut, TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, BLOCK: tl.constexpr, EVEN_K: tl.constexpr ): # ------------ # # - Prologue - # # ------------ # block_id = tl.program_id(0) + grid_offset lut += block_id * 3 # offsets off_z = tl.program_id(2) # batch off_h = tl.load(lut + 0) # head # initialize pointers to A start_am = tl.load(lut + 1) offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK) offs_ak = tl.arange(0, TILE_K) a_ptrs = A \ + off_z * stride_za \ + off_h * stride_ha \ + offs_am[:, None] * stride_ma \ + offs_ak[None, :] * stride_ak # initialize pointers to B start_bn = tl.load(lut + 2) offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK) offs_bk = tl.arange(0, TILE_K) b_ptrs = B \ + off_z * stride_zb \ + off_h * stride_hb \ + offs_bn[None, :] * stride_nb \ + offs_bk[:, None] * stride_bk # ---------------- # # Inner Loop # # ---------------- # acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) for k in range(K, 0, -TILE_K): if EVEN_K: a = tl.load(a_ptrs) b = tl.load(b_ptrs) else: a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.) b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.) acc += tl.dot(a, b, out_dtype=tl.float32) a_ptrs += TILE_K * stride_ak b_ptrs += TILE_K * stride_bk c = acc.to(C.dtype.element_ty) # ---------------- # # Epilogue # # ---------------- # offs_cm = tl.arange(0, TILE_M) % BLOCK offs_cn = tl.arange(0, TILE_N) % BLOCK pc = C \ + off_z * stride_zc \ + block_id * stride_hc \ + offs_cm[:, None] * stride_mc \ + offs_cn[None, :] * stride_nc tl.store(pc, c, mask=True) def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, widths, out=None): if a.stride(2) != 1 and a.stride(3) != 1: a = a.contiguous() if b.stride(2) != 1 and b.stride(3) != 1: b = b.contiguous() # (A * B)^T = B^T * A^T if trans_c: a, b = b, a trans_a, trans_b = not trans_b, not trans_a # shape constraints a_dim = -2 if trans_a else -1 b_dim = -1 if trans_b else -2 Ka, Kb = a.shape[a_dim], b.shape[b_dim] if Ka != Kb: raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})") # allocate output if out is None: c = torch.empty((a.shape[0], lut.shape[0], block, block), dtype=a.dtype, device=a.device) else: assert out.shape == (a.shape[0], lut.shape[0], block, block) c = out grid = [c.shape[1], 1, c.shape[0]] _sdd_kernel[grid]( a, b, c, a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), c.stride(0), c.stride(1), c.stride(2), c.stride(3), Ka, 0, lut, TILE_M=block, TILE_N=block, TILE_K=32, BLOCK=block, num_stages=4, num_warps=4, ) return c def sdd_lut(layout, block, device): lut = layout.nonzero(as_tuple=False).to(device).int() lut = lut.contiguous() return lut, None # ----------------------------- # Dense = Sparse x Dense (DSD) # This operation uses a look-up table that contains pre-computed pointer increments # in order to minimize computations in the inner loop of the matmul kernel. # ----------------------------- @jit def _dsd_kernel( A, B, C, stride_az, stride_ha, stride_am, stride_ak, stride_zb, stride_hb, stride_bk, stride_bn, stride_zc, stride_hc, stride_cm, stride_cn, DS0, DS1, lut, TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, BLOCK: tl.constexpr ): # ------------ # # - Prologue - # # ------------ # pid_m = tl.program_id(0) pid_n = tl.program_id(1) num_pid_m = tl.num_programs(0) num_pid_n = tl.num_programs(1) pid_n, pid_m = tl.swizzle2d(pid_n, pid_m, num_pid_n, num_pid_m, GROUP_SIZE_M) pidz = tl.program_id(2) header = lut + pid_n * 4 offset = tl.load(header + 0) K = tl.load(header + 1) column = tl.load(header + 2) off_h = tl.load(header + 3) pinc = lut + offset # initialize pointers to A (sparse) block_id = tl.load(pinc + 1) block_id = tl.multiple_of(block_id, 8) # compiler hint offs_am = tl.arange(0, TILE_M) offs_ak = tl.arange(0, TILE_K) pa = A + pidz * stride_az \ + block_id * stride_ha \ + offs_am[:, None] * stride_am \ + offs_ak[None, :] * stride_ak # initialize pointers to B (dense) offs_bn = pid_m * TILE_N + tl.arange(0, TILE_N) offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn % DS0, TILE_N), TILE_N) start_bk = tl.load(pinc) start_bk = tl.multiple_of(start_bk, 8) # compiler hint offs_bk = start_bk + tl.arange(0, TILE_K) pb = B + pidz * stride_zb \ + off_h * stride_hb \ + offs_bn[None, :] * stride_bn \ + offs_bk[:, None] * stride_bk # ---------------- # # Inner Loop # # ---------------- # acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) pinc += 2 inc_a = tl.load(pinc + 1) inc_a = tl.multiple_of(inc_a, 8) inc_b = tl.load(pinc) inc_b = tl.multiple_of(inc_b, 8) for k in range(K, 0, -TILE_K): a = tl.load(pa) b = tl.load(pb) acc += tl.dot(a, b, out_dtype=tl.float32) pa += inc_a pb += inc_b * stride_bk pinc += 2 inc_a = tl.load(pinc + 1) inc_a = tl.multiple_of(inc_a, 8) inc_b = tl.load(pinc) inc_b = tl.multiple_of(inc_b, 8) c = acc.to(C.dtype.element_ty) # initialize pointers to C offs_cm = column * TILE_M + tl.arange(0, TILE_M) offs_cn = pid_m * TILE_N + tl.arange(0, TILE_N) pc = C \ + off_h * stride_hc \ + pidz * stride_zc \ + offs_cm[:, None] * stride_cm \ + offs_cn[None, :] * stride_cn tl.store(pc, c, mask=offs_cn[None, :] < DS0) def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None): if a.stride(2) != 1 and a.stride(3) != 1: a = a.contiguous() if b.stride(2) != 1 and b.stride(3) != 1: b = b.contiguous() # shapes / dtypes AS1 = block * spdims[2 if trans_a else 1] BS0 = b.size(0) BS1 = b.size(1) BS3 = b.size(2 if trans_b else 3) dtype = a.dtype # allocate output CS0 = BS0 CS1 = BS1 CS2 = BS3 if trans_c else AS1 CS3 = AS1 if trans_c else BS3 if out is None: c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device) else: assert out.shape == (CS0, CS1, CS2, CS3) c = out # meta-parameter heuristics TILE_N = 128 # compute output grid = lambda meta: [cdiv(BS3, meta['TILE_N']), width, BS0] _dsd_kernel[grid]( a, b, c, a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3), BS3, AS1, lut, TILE_M=block, TILE_N=TILE_N, TILE_K=min(block, 32), BLOCK=block, num_stages=4, num_warps=4, GROUP_SIZE_M=4, ) # exit() return c def dsd_lut(layout, block, step, trans, device): """ Generates the look-up table for incrementing pointers in the DSD/DDS matmul. Example (BLOCK=32, STEP=16) [[1, 0, 0, 1, 0], [0, 1, 1, 0, 1], [1, 0, 1, 0, 0]] Then the offsets for A are [0 , 16, 32, 48] <- row 0 \\----/ \\----/ col=0 col=3 [64, 80, 96, 112, 128, 144] <- row 1 \\----/ \\----/ \\------/ col=1 col=2 col=3 [160, 176, 192, 208] which leads to increments table [0, 16, 16, 16, || 64, 16, 16, 16, 16, 16, || 160, 16, 16, 16] Because B is dense, the offsets are [0, 16, 96, 112] <- row 0 [32, 48, 64, 80] <- row 1 [0, 16, 64, 80] <- row 2 """ sizes = torch.sum(layout, 2 if trans else 1) head_id, col_id = torch.ones_like(sizes).nonzero(as_tuple=True) sizes = sizes.flatten() segments = sizes * step # pointer increments if trans: nnz = layout.nonzero(as_tuple=False) else: nnz = layout.transpose(1, 2).nonzero(as_tuple=False) num_blocks = nnz.size(0) offsets = torch.zeros_like(sizes) offsets[1:] = torch.cumsum(sizes[:-1], dim=0) offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets)) # ------------------------------- # dense input pointer increments # ------------------------------- # Note that the inner loop matmul kernel may have a fixed step size (e.g., TILE_K) # that is smaller than the block size, so we need to do a bit of extra work # to handle this case B_idx = nnz[:, 2] * block B_incs = B_idx.clone() B_incs[1:] -= B_idx[:-1] div = block // step B_incs = B_incs.view(-1, 1).repeat(1, div) B_incs[:, 1:] = step B_incs[:, 0] -= (div - 1) * step # first increment for each reduction is actually the offset B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]] B_incs = B_incs.view(-1) # ------------------------------- # sparse input pointer increments # ------------------------------- # same as above, except that the increments are in the sparse memory layout if trans: A_idx = torch.arange(num_blocks, device=layout.device) else: A_idx = torch.tensor([], dtype=torch.int64, device=layout.device) current_offset = 0 for z in range(layout.size(0)): layoutw = layout[z, :, :].clone().long() msum = layoutw.sum() layoutw[layoutw > 0] = 1 + torch.arange(msum, device=layout.device) A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1)) current_offset += msum A_incs = A_idx * block * block A_incs[1:] -= A_idx[:-1] * block * block A_incs = A_incs.view(-1, 1).repeat(1, div) if trans: A_incs[:, 1:] = step A_incs[:, 0] -= (div - 1) * step else: A_incs[:, 1:] = step * block A_incs[:, 0] -= (div - 1) * step * block A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]] A_incs = A_incs.view(-1) # create header width = col_id.size(0) offsets = offsets * 2 * div + 4 * width segments = segments * div header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous() # create increments incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous() # pad by a factor 2*MAX_NUM_STAGES # to accommodate pre-fetching inside the kernel pad = torch.zeros(20, device=incs.device, dtype=incs.dtype) incs = torch.cat((incs, pad)) # create lut lut = torch.cat((header, incs)) lut = lut.type(torch.int32).to(device) # create locks return lut, width # ----------------------------- # Dense = Dense x Sparse (DDS) # ----------------------------- # AB = (B^T A^T)^T def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None): return dsd_matmul(b, a, not trans_b, not trans_a, not trans_c, spdims, block, lut, width, out=out) ############## # MAIN API # ############## class _matmul(torch.autograd.Function): fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul} @staticmethod def forward( ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_width, da_lut, da_width, db_lut, db_width, out ): c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_width, out=out) # save for backward ctx.save_for_backward(a, b) ctx.da_lut = da_lut ctx.da_width = da_width ctx.db_lut = db_lut ctx.db_width = db_width ctx.mode = mode ctx.spdims = spdims ctx.block = block ctx.trans_a = trans_a ctx.trans_b = trans_b ctx.trans_c = trans_c ctx.has_out = out is not None return c @staticmethod def backward(ctx, dc): # saved for backward a, b = ctx.saved_tensors da, db = None, None mode = ctx.mode # gradients w.r.t. a if ctx.needs_input_grad[0]: mode_da = mode[1] + mode[0] + mode[2] da = _matmul.fn[mode_da]( dc, b, ctx.trans_c, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_width, ) # gradients w.r.t. b if ctx.needs_input_grad[1]: mode_db = mode[2] + mode[1] + mode[0] db = _matmul.fn[mode_db]( a, dc, not ctx.trans_a, ctx.trans_c, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_width, ) dout = dc if ctx.has_out else None return da, db, None, None, None,\ None, None, None, None,\ None, None, None, None, None, dout class matmul: def __init__(self, layout, block, mode, device, trans_a=False, trans_b=False, trans_c=False): if mode not in ['sdd', 'dsd', 'dds']: raise NotImplementedError('Supported modes are: sdd, dsd, dds') self.block = block self.mode = mode self.trans_a = trans_a self.trans_b = trans_b self.trans_c = trans_c self.layout = layout self.spdims = layout.shape step = min(block, 32) if self.mode == 'sdd': self.c_lut, self.c_width = sdd_lut(layout, block, device) self.da_lut, self.da_width = dsd_lut(layout, block, step, True, device) self.db_lut, self.db_width = dsd_lut(layout, block, step, False, device) if self.mode == 'dsd': self.c_lut, self.c_width = dsd_lut(layout, block, step, not self.trans_a, device) self.da_lut, self.da_width = sdd_lut(layout, block, device) self.db_lut, self.db_width = dsd_lut(layout, block, step, self.trans_a, device) if self.mode == 'dds': self.c_lut, self.c_width = dsd_lut(layout, block, step, self.trans_b, device) self.da_lut, self.da_width = dsd_lut(layout, block, step, not self.trans_b, device) self.db_lut, self.db_width = sdd_lut(layout, block, device) def __call__(self, a, b, out=None): c = _matmul.apply( a, b, self.trans_a, self.trans_b, self.trans_c, self.mode, self.spdims, self.block, self.c_lut, self.c_width, self.da_lut, self.da_width, self.db_lut, self.db_width, out ) return c
15,615
34.652968
114
py
triton
triton-main/python/triton/ops/blocksparse/__init__.py
from .matmul import matmul from .softmax import softmax __all__ = [ "matmul", "softmax", ]
100
11.625
28
py
triton
triton-main/test/lit.cfg.py
# -*- Python -*- import os import platform import re import subprocess import tempfile import lit.formats import lit.util from lit.llvm import llvm_config from lit.llvm.subst import FindTool, ToolSubst # Configuration file for the 'lit' test runner # name: The name of this test suite config.name = 'TRITON' config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) # suffixes: A list of file extensions to treat as test files. config.suffixes = ['.mlir'] # test_source_root: The root path where tests are located. config.test_source_root = os.path.dirname(__file__) # test_exec_root: The root path where tests should be run. config.test_exec_root = os.path.join(config.triton_obj_root, 'test') config.substitutions.append(('%PATH%', config.environment['PATH'])) config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) llvm_config.with_system_environment( ['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP']) # llvm_config.use_default_substitutions() # excludes: A list of directories to exclude from the testsuite. The 'Inputs' # subdirectories contain auxiliary inputs for various tests in their parent # directories. config.excludes = [ 'Inputs', 'Examples', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt'] # test_source_root: The root path where tests are located. config.test_source_root = os.path.dirname(__file__) # test_exec_root: The root path where tests should be run. config.test_exec_root = os.path.join(config.triton_obj_root, 'test') config.triton_tools_dir = os.path.join(config.triton_obj_root, 'bin') config.filecheck_dir = os.path.join(config.triton_obj_root, 'bin', 'FileCheck') tool_dirs = [ config.triton_tools_dir, config.llvm_tools_dir, config.filecheck_dir] # Tweak the PATH to include the tools dir. for d in tool_dirs: llvm_config.with_environment('PATH', d, append_path=True) tools = [ 'triton-opt', ToolSubst('%PYTHON', config.python_executable, unresolved='ignore'), ] llvm_config.add_tool_substitutions(tools, tool_dirs) # TODO: what's this? llvm_config.with_environment('PYTHONPATH', [ os.path.join(config.mlir_binary_dir, 'python_packages', 'triton'), ], append_path=True)
2,187
28.567568
79
py
triton
triton-main/docs/conf.py
# -*- coding: utf-8 -*- # # Triton documentation build configuration file, created by # sphinx-quickstart on Mon Feb 10 01:19:09 2020. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ import os import sys import sphinx_rtd_theme from sphinx_gallery.sorting import FileNameSortKey def process_sig(app, what, name, obj, options, signature, return_annotation): if signature and '_builder' in signature: signature = signature.split('_builder')[0] + ")" return (signature, return_annotation) def setup(app): """Customize function args retrieving to get args under decorator.""" import os import sphinx app.connect("autodoc-process-signature", process_sig) os.system("pip install -e ../python") def forward_jit_fn(func): old = func def wrapped(obj, **kwargs): import triton if isinstance(obj, triton.runtime.JITFunction): obj = obj.fn return old(obj) return wrapped old_documenter = sphinx.ext.autosummary.get_documenter def documenter(app, obj, parent): import triton if isinstance(obj, triton.runtime.JITFunction): obj = obj.fn return old_documenter(app, obj, parent) sphinx.ext.autosummary.get_documenter = documenter sphinx.util.inspect.unwrap_all = forward_jit_fn( sphinx.util.inspect.unwrap_all) sphinx.util.inspect.signature = forward_jit_fn( sphinx.util.inspect.signature) sphinx.util.inspect.object_description = forward_jit_fn( sphinx.util.inspect.object_description) # Auto Doc sys.path.insert(0, os.path.abspath('../python/')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx_multiversion'] autosummary_generate = True # versioning config smv_tag_whitelist = r'^(v2.1.0)$' smv_branch_whitelist = r'^main$' smv_remote_whitelist = None smv_released_pattern = r'^tags/.*$' smv_outputdir_format = '{ref.name}' smv_prefer_remote_refs = False # Sphinx gallery extensions += ['sphinx_gallery.gen_gallery'] sphinx_gallery_conf = { 'examples_dirs': '../python/tutorials/', 'gallery_dirs': 'getting-started/tutorials', 'filename_pattern': '', # XXX: Temporarily disable fused attention tutorial on V100 'ignore_pattern': r'__init__\.py', 'within_subsection_order': FileNameSortKey, 'reference_url': { 'sphinx_gallery': None, } } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] html_sidebars = { '**': [ '_templates/versions.html', ], } # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Triton' copyright = '2020, Philippe Tillet' author = 'Philippe Tillet' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = [ 'css/custom.css', ] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'Tritondoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Triton.tex', 'Triton Documentation', 'Philippe Tillet', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, 'triton', 'Triton Documentation', [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Triton', 'Triton Documentation', author, 'Triton', 'One line description of project.', 'Miscellaneous'), ]
7,261
29.008264
84
py
G-PATE
G-PATE-master/main.py
import os import scipy.misc import numpy as np from model import DCGAN from utils import pp, visualize, to_json, show_all_variables, mkdir import tensorflow as tf import argparse from gen_data import batch2str import sys import pickle # os.environ["CUDA_VISIBLE_DEVICES"] = "1" flags = tf.app.flags flags.DEFINE_integer("epoch", 1000, "Epoch for training teacher models") flags.DEFINE_integer("g_epoch", 500, "Epoch for training the student models") flags.DEFINE_float("learning_rate", 1e-3, "Learning rate of for adam") flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]") flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]") flags.DEFINE_integer("batch_size", 30, "The size of batch images [64]") flags.DEFINE_integer("input_height", 32, "The size of image to use (will be center cropped). [108]") flags.DEFINE_integer("input_width", 32, "The size of image to use (will be center cropped). If None, same value as input_height [None]") flags.DEFINE_integer("output_height", 32, "The size of the output images to produce [64]") flags.DEFINE_integer("output_width", 32, "The size of the output images to produce. If None, same value as output_height [None]") flags.DEFINE_string("dataset", "slt", "The name of dataset [cinic, celebA, mnist, lsun, fire-small]") flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]") flags.DEFINE_string("checkpoint_name", "checkpoint", "checkpoint model name [checkpoint]") flags.DEFINE_string("data_dir", "../../data", "Root directory of dataset [data]") flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]") flags.DEFINE_boolean("train", False, "True for training, False for testing [False]") flags.DEFINE_boolean("pretrain", True, "True for loading the pretrained models, False for not load [True]") flags.DEFINE_boolean("load_d", True, "True for loading the pretrained models w/ discriminator, False for not load [True]") flags.DEFINE_boolean("crop", False, "True for cropping") flags.DEFINE_integer("orders", 200, "rdp orders") flags.DEFINE_integer("proj_mat", 1, "#/ projection mat") flags.DEFINE_integer("z_dim", 100, "#/ z dim") flags.DEFINE_integer("y_dim", 10, "#/ y dim") flags.DEFINE_boolean("tanh", False, "Use tanh as activation func") flags.DEFINE_boolean("random_proj", True, "Apply pca for gradient aggregation ") flags.DEFINE_boolean("pca", False, "Apply pca for gradient aggregation ") flags.DEFINE_boolean("non_private", False, "Do not apply differential privacy") flags.DEFINE_boolean("increasing_dim", False, "Increase the projection dimension for each epoch") flags.DEFINE_boolean("wgan", False, "Train wgan") flags.DEFINE_boolean("small", False, "Use a smaller discriminator") flags.DEFINE_float("sigma", 2000.0, "Scale of gaussian noise for gradient aggregation") flags.DEFINE_float("sigma_thresh", 4500.0, "Scale of gaussian noise for thresh gnmax") flags.DEFINE_float("pca_sigma", 1.0, "Scale of gaussian noise for dp pca") flags.DEFINE_float("step_size", 1e-4, "Step size for gradient aggregation") flags.DEFINE_float("delta", 1e-5, "delta for differential privacy") flags.DEFINE_integer("g_step", 1, "steps of the generator") flags.DEFINE_integer("d_step", 1, "steps of the discriminator") flags.DEFINE_integer("pca_dim", 10, "principal dimensions for pca") flags.DEFINE_float("thresh", 0.5, "threshhold for threshgmax") flags.DEFINE_float("max_eps", 1, "maximum epsilon") flags.DEFINE_boolean("random_label", False, "random labels for training data, only used when pretraining some models") flags.DEFINE_boolean("shuffle", False, "Evenly distribute dataset") flags.DEFINE_boolean("save_epoch", True, "Save each epoch per 0.1 eps") # new flag flags.DEFINE_integer("batch_teachers", 1, "Number of teacher models in one batch") flags.DEFINE_integer("teachers_batch", 1, "Number of batch") flags.DEFINE_string("teacher_dir", "teacher", "Directory name to save the teacher [teacher]") flags.DEFINE_string("generator_dir", "generator", "Directory name to save the generator") flags.DEFINE_integer("sample_step", 10, "Number of teacher models in one batch") FLAGS = flags.FLAGS def main(_): pp.pprint(flags.FLAGS.flag_values_dict()) # if FLAGS.input_width is None: # FLAGS.input_width = FLAGS.input_height # if FLAGS.output_width is None: # FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True if FLAGS.thresh == 0: thresh = None else: thresh = FLAGS.thresh if FLAGS.wgan: FLAGS.learning_rate = 5e-5 FLAGS.step_size = 5e-4 with tf.Session(config=run_config) as sess: dcgan = DCGAN( sess, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=FLAGS.y_dim, z_dim=FLAGS.z_dim, dataset_name=FLAGS.dataset, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir, # parameters to tune batch_teachers=FLAGS.batch_teachers, pca=FLAGS.pca, random_proj=FLAGS.random_proj, thresh=thresh, dp_delta=FLAGS.delta, pca_dim=FLAGS.pca_dim, teachers_batch=FLAGS.teachers_batch, teacher_dir=os.path.join(FLAGS.checkpoint_dir, FLAGS.teacher_dir), generator_dir=FLAGS.generator_dir, non_private=FLAGS.non_private, input_height=FLAGS.input_height, input_width=FLAGS.input_width, output_height=FLAGS.output_height, output_width=FLAGS.output_width, wgan=FLAGS.wgan, small=FLAGS.small, config=FLAGS ) show_all_variables() if FLAGS.train: epsilon, delta = dcgan.train_together(FLAGS) filename = '%.2fepsilon-%.2fdelta.data' % (epsilon, delta) else: if not dcgan.load(FLAGS.checkpoint_dir, FLAGS.checkpoint_name)[0]: raise Exception("[!] Train a model first, then run test mode") filename = 'private.data' outpath = os.path.join(FLAGS.checkpoint_dir, FLAGS.sample_dir) if not os.path.exists(outpath): os.makedirs(outpath) outfile = os.path.join(outpath, filename) n_batch = 100000 // FLAGS.batch_size + 1 data = dcgan.gen_data(n_batch) data = data[:100000] import joblib joblib.dump(data, outfile) if __name__ == '__main__': tf.app.run()
6,951
43.564103
118
py
G-PATE
G-PATE-master/pate_core.py
# core method from 'Scalable Private Learning with PATE' # Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Core functions for RDP analysis in PATE framework. This library comprises the core functions for doing differentially private analysis of the PATE architecture and its various Noisy Max and other mechanisms. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl import app import numpy as np import scipy.stats def _logaddexp(x): """Addition in the log space. Analogue of numpy.logaddexp for a list.""" m = max(x) return m + math.log(sum(np.exp(x - m))) def _log1mexp(x): """Numerically stable computation of log(1-exp(x)).""" if x < -1: return math.log1p(-math.exp(x)) elif x < 0: return math.log(-math.expm1(x)) elif x == 0: return -np.inf else: raise ValueError("Argument must be non-positive.") def compute_eps_from_delta(orders, rdp, delta): """Translates between RDP and (eps, delta)-DP. Args: orders: A list (or a scalar) of orders. rdp: A list of RDP guarantees (of the same length as orders). delta: Target delta. Returns: Pair of (eps, optimal_order). Raises: ValueError: If input is malformed. """ if len(orders) != len(rdp): raise ValueError("Input lists must have the same length.") eps = np.array(rdp) - math.log(delta) / (np.array(orders) - 1) idx_opt = np.argmin(eps) return eps[idx_opt], orders[idx_opt] ##################### # RDP FOR THE GNMAX # ##################### def compute_logq_gaussian(counts, sigma): """Returns an upper bound on ln Pr[outcome != argmax] for GNMax. Implementation of Proposition 7. Args: counts: A numpy array of scores. sigma: The standard deviation of the Gaussian noise in the GNMax mechanism. Returns: logq: Natural log of the probability that outcome is different from argmax. """ n = len(counts) variance = sigma**2 idx_max = np.argmax(counts) counts_normalized = counts[idx_max] - counts counts_rest = counts_normalized[np.arange(n) != idx_max] # exclude one index # Upper bound q via a union bound rather than a more precise calculation. logq = _logaddexp( scipy.stats.norm.logsf(counts_rest, scale=math.sqrt(2 * variance))) # A sketch of a more accurate estimate, which is currently disabled for two # reasons: # 1. Numerical instability; # 2. Not covered by smooth sensitivity analysis. # covariance = variance * (np.ones((n - 1, n - 1)) + np.identity(n - 1)) # logq = np.log1p(-statsmodels.sandbox.distributions.extras.mvnormcdf( # counts_rest, np.zeros(n - 1), covariance, maxpts=1e4)) return min(logq, math.log(1 - (1 / n))) def rdp_data_independent_gaussian(sigma, orders): """Computes a data-independent RDP curve for GNMax. Implementation of Proposition 8. Args: sigma: Standard deviation of Gaussian noise. orders: An array_like list of Renyi orders. Returns: Upper bound on RPD for all orders. A scalar if orders is a scalar. Raises: ValueError: If the input is malformed. """ if sigma < 0 or np.any(orders <= 1): # not defined for alpha=1 raise ValueError("Inputs are malformed.") variance = sigma**2 if np.isscalar(orders): return orders / variance else: return np.atleast_1d(orders) / variance def rdp_gaussian(logq, sigma, orders): """Bounds RDP from above of GNMax given an upper bound on q (Theorem 6). Args: logq: Natural logarithm of the probability of a non-argmax outcome. sigma: Standard deviation of Gaussian noise. orders: An array_like list of Renyi orders. Returns: Upper bound on RPD for all orders. A scalar if orders is a scalar. Raises: ValueError: If the input is malformed. """ if logq > 0 or sigma < 0 or np.any(orders <= 1): # not defined for alpha=1 raise ValueError("Inputs are malformed.") if np.isneginf(logq): # If the mechanism's output is fixed, it has 0-DP. if np.isscalar(orders): return 0. else: return np.full_like(orders, 0., dtype=np.float) variance = sigma**2 # Use two different higher orders: mu_hi1 and mu_hi2 computed according to # Proposition 10. mu_hi2 = math.sqrt(variance * -logq) mu_hi1 = mu_hi2 + 1 orders_vec = np.atleast_1d(orders) ret = orders_vec / variance # baseline: data-independent bound # Filter out entries where data-dependent bound does not apply. mask = np.logical_and(mu_hi1 > orders_vec, mu_hi2 > 1) rdp_hi1 = mu_hi1 / variance rdp_hi2 = mu_hi2 / variance log_a2 = (mu_hi2 - 1) * rdp_hi2 # Make sure q is in the increasing wrt q range and A is positive. if (np.any(mask) and logq <= log_a2 - mu_hi2 * (math.log(1 + 1 / (mu_hi1 - 1)) + math.log(1 + 1 / (mu_hi2 - 1))) and -logq > rdp_hi2): # Use log1p(x) = log(1 + x) to avoid catastrophic cancellations when x ~ 0. log1q = _log1mexp(logq) # log1q = log(1-q) log_a = (orders - 1) * ( log1q - _log1mexp((logq + rdp_hi2) * (1 - 1 / mu_hi2))) log_b = (orders - 1) * (rdp_hi1 - logq / (mu_hi1 - 1)) # Use logaddexp(x, y) = log(e^x + e^y) to avoid overflow for large x, y. log_s = np.logaddexp(log1q + log_a, logq + log_b) ret[mask] = np.minimum(ret, log_s / (orders - 1))[mask] assert np.all(ret >= 0) if np.isscalar(orders): return np.asscalar(ret) else: return ret def is_data_independent_always_opt_gaussian(num_teachers, num_classes, sigma, orders): """Tests whether data-ind bound is always optimal for GNMax. Args: num_teachers: Number of teachers. num_classes: Number of classes. sigma: Standard deviation of the Gaussian noise. orders: An array_like list of Renyi orders. Returns: Boolean array of length |orders| (a scalar if orders is a scalar). True if the data-independent bound is always the same as the data-dependent bound. """ unanimous = np.array([num_teachers] + [0] * (num_classes - 1)) logq = compute_logq_gaussian(unanimous, sigma) rdp_dep = rdp_gaussian(logq, sigma, orders) rdp_ind = rdp_data_independent_gaussian(sigma, orders) return np.isclose(rdp_dep, rdp_ind) ################################### # RDP FOR THE THRESHOLD MECHANISM # ################################### def compute_logpr_answered(t, sigma, counts): """Computes log of the probability that a noisy threshold is crossed. Args: t: The threshold. sigma: The stdev of the Gaussian noise added to the threshold. counts: An array of votes. Returns: Natural log of the probability that max is larger than a noisy threshold. """ # Compared to the paper, max(counts) is rounded to the nearest integer. This # is done to facilitate computation of smooth sensitivity for the case of # the interactive mechanism, where votes are not necessarily integer. return scipy.stats.norm.logsf(t - round(max(counts)), scale=sigma) def compute_rdp_data_independent_threshold(sigma, orders): # The input to the threshold mechanism has stability 1, compared to # GNMax, which has stability = 2. Hence the sqrt(2) factor below. return rdp_data_independent_gaussian(2**.5 * sigma, orders) def compute_rdp_threshold(log_pr_answered, sigma, orders): logq = min(log_pr_answered, _log1mexp(log_pr_answered)) # The input to the threshold mechanism has stability 1, compared to # GNMax, which has stability = 2. Hence the sqrt(2) factor below. return rdp_gaussian(logq, 2**.5 * sigma, orders) def is_data_independent_always_opt_threshold(num_teachers, threshold, sigma, orders): """Tests whether data-ind bound is always optimal for the threshold mechanism. Args: num_teachers: Number of teachers. threshold: The cut-off threshold. sigma: Standard deviation of the Gaussian noise. orders: An array_like list of Renyi orders. Returns: Boolean array of length |orders| (a scalar if orders is a scalar). True if the data-independent bound is always the same as the data-dependent bound. """ # Since the data-dependent bound depends only on max(votes), it suffices to # check whether the data-dependent bounds are better than data-independent # bounds in the extreme cases when max(votes) is minimal or maximal. # For both Confident GNMax and Interactive GNMax it holds that # 0 <= max(votes) <= num_teachers. # The upper bound is trivial in both cases. # The lower bound is trivial for Confident GNMax (and a stronger one, based on # the pigeonhole principle, is possible). # For Interactive GNMax (Algorithm 2), the lower bound follows from the # following argument. Since the votes vector is the difference between the # actual teachers' votes and the student's baseline, we need to argue that # max(n_j - M * p_j) >= 0. # The bound holds because sum_j n_j = sum M * p_j = M. Thus, # sum_j (n_j - M * p_j) = 0, and max_j (n_j - M * p_j) >= 0 as needed. logq1 = compute_logpr_answered(threshold, sigma, [0]) logq2 = compute_logpr_answered(threshold, sigma, [num_teachers]) rdp_dep1 = compute_rdp_threshold(logq1, sigma, orders) rdp_dep2 = compute_rdp_threshold(logq2, sigma, orders) rdp_ind = compute_rdp_data_independent_threshold(sigma, orders) return np.isclose(rdp_dep1, rdp_ind) and np.isclose(rdp_dep2, rdp_ind) ############################# # RDP FOR THE LAPLACE NOISE # ############################# def compute_logq_laplace(counts, lmbd): """Computes an upper bound on log Pr[outcome != argmax] for LNMax. Args: counts: A list of scores. lmbd: The lambda parameter of the Laplace distribution ~exp(-|x| / lambda). Returns: logq: Natural log of the probability that outcome is different from argmax. """ # For noisy max, we only get an upper bound via the union bound. See Lemma 4 # in https://arxiv.org/abs/1610.05755. # # Pr[ j beats i*] = (2+gap(j,i*))/ 4 exp(gap(j,i*) # proof at http://mathoverflow.net/questions/66763/ idx_max = np.argmax(counts) counts_normalized = (counts - counts[idx_max]) / lmbd counts_rest = np.array( [counts_normalized[i] for i in range(len(counts)) if i != idx_max]) logq = _logaddexp(np.log(2 - counts_rest) + math.log(.25) + counts_rest) return min(logq, math.log(1 - (1 / len(counts)))) def rdp_pure_eps(logq, pure_eps, orders): """Computes the RDP value given logq and pure privacy eps. Implementation of https://arxiv.org/abs/1610.05755, Theorem 3. The bound used is the min of three terms. The first term is from https://arxiv.org/pdf/1605.02065.pdf. The second term is based on the fact that when event has probability (1-q) for q close to zero, q can only change by exp(eps), which corresponds to a much smaller multiplicative change in (1-q) The third term comes directly from the privacy guarantee. Args: logq: Natural logarithm of the probability of a non-optimal outcome. pure_eps: eps parameter for DP orders: array_like list of moments to compute. Returns: Array of upper bounds on rdp (a scalar if orders is a scalar). """ orders_vec = np.atleast_1d(orders) q = math.exp(logq) log_t = np.full_like(orders_vec, np.inf) if q <= 1 / (math.exp(pure_eps) + 1): logt_one = math.log1p(-q) + ( math.log1p(-q) - _log1mexp(pure_eps + logq)) * ( orders_vec - 1) logt_two = logq + pure_eps * (orders_vec - 1) log_t = np.logaddexp(logt_one, logt_two) ret = np.minimum( np.minimum(0.5 * pure_eps * pure_eps * orders_vec, log_t / (orders_vec - 1)), pure_eps) if np.isscalar(orders): return np.asscalar(ret) else: return ret def main(argv): del argv # Unused. if __name__ == "__main__": app.run(main)
12,472
32.350267
87
py
G-PATE
G-PATE-master/download.py
""" Modification of https://github.com/stanfordnlp/treelstm/blob/master/scripts/download.py Downloads the following: - Celeb-A dataset - LSUN dataset - MNIST dataset """ from __future__ import print_function import os import sys import gzip import json import shutil import zipfile import argparse import requests import subprocess from tqdm import tqdm from six.moves import urllib parser = argparse.ArgumentParser(description='Download dataset for DCGAN.') parser.add_argument('datasets', metavar='N', type=str, nargs='+', choices=['celebA', 'lsun', 'mnist','fashion'], help='name of dataset to download [celebA, lsun, mnist]') def download(url, dirpath): filename = url.split('/')[-1] filepath = os.path.join(dirpath, filename) u = urllib.request.urlopen(url) f = open(filepath, 'wb') filesize = int(u.headers["Content-Length"]) print("Downloading: %s Bytes: %s" % (filename, filesize)) downloaded = 0 block_sz = 8192 status_width = 70 while True: buf = u.read(block_sz) if not buf: print('') break else: print('', end='\r') downloaded += len(buf) f.write(buf) status = (("[%-" + str(status_width + 1) + "s] %3.2f%%") % ('=' * int(float(downloaded) / filesize * status_width) + '>', downloaded * 100. / filesize)) print(status, end='') sys.stdout.flush() f.close() return filepath def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={ 'id': id }, stream=True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params=params, stream=True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination, chunk_size=32*1024): total_size = int(response.headers.get('content-length', 0)) with open(destination, "wb") as f: for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True, desc=destination): if chunk: # filter out keep-alive new chunks f.write(chunk) def unzip(filepath): print("Extracting: " + filepath) dirpath = os.path.dirname(filepath) with zipfile.ZipFile(filepath) as zf: zf.extractall(dirpath) os.remove(filepath) def _list_categories(tag): url = 'http://lsun.cs.princeton.edu/htbin/list.cgi?tag=' + tag f = urllib.request.urlopen(url) return json.loads(f.read()) def _download_lsun(out_dir, category, set_name, tag): url = 'http://lsun.cs.princeton.edu/htbin/download.cgi?tag={tag}' \ '&category={category}&set={set_name}'.format(**locals()) print(url) if set_name == 'test': out_name = 'test_lmdb.zip' else: out_name = '{category}_{set_name}_lmdb.zip'.format(**locals()) out_path = os.path.join(out_dir, out_name) cmd = ['curl', url, '-o', out_path] print('Downloading', category, set_name, 'set') subprocess.call(cmd) def download_lsun(dirpath): data_dir = os.path.join(dirpath, 'lsun') if os.path.exists(data_dir): print('Found LSUN - skip') return else: os.mkdir(data_dir) tag = 'latest' #categories = _list_categories(tag) categories = ['bedroom'] for category in categories: _download_lsun(data_dir, category, 'train', tag) _download_lsun(data_dir, category, 'val', tag) _download_lsun(data_dir, '', 'test', tag) def download_fashion_mnist(dirpath): data_dir = os.path.join(dirpath, 'fashion_mnist') if os.path.exists(data_dir): print('Found MNIST - skip') return else: os.mkdir(data_dir) url_base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' file_names = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] for file_name in file_names: url = (url_base+file_name).format(**locals()) print(url) out_path = os.path.join(data_dir,file_name) cmd = ['curl', url, '-o', out_path] print('Downloading ', file_name) subprocess.call(cmd) cmd = ['gzip', '-d', out_path] print('Decompressing ', file_name) subprocess.call(cmd) def download_mnist(dirpath): data_dir = os.path.join(dirpath, 'mnist') if os.path.exists(data_dir): print('Found MNIST - skip') return else: os.mkdir(data_dir) url_base = 'http://yann.lecun.com/exdb/mnist/' file_names = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] for file_name in file_names: url = (url_base+file_name).format(**locals()) print(url) out_path = os.path.join(data_dir,file_name) cmd = ['curl', url, '-o', out_path] print('Downloading ', file_name) subprocess.call(cmd) cmd = ['gzip', '-d', out_path] print('Decompressing ', file_name) subprocess.call(cmd) def prepare_data_dir(path = './data'): if not os.path.exists(path): os.mkdir(path) if __name__ == '__main__': args = parser.parse_args() prepare_data_dir() if 'lsun' in args.datasets: download_lsun('./data') if 'mnist' in args.datasets: download_mnist('./data') if 'fashion' in args.datasets: download_fashion_mnist('./data')
5,501
28.902174
112
py
G-PATE
G-PATE-master/rdp_utils.py
import numpy as np import math import sys from sklearn.preprocessing import normalize from pate_core import * from numpy import linalg as LA EPS = sys.float_info.epsilon # Algorithm 1 in 'Scalable Private Learning with PATE' def gnmax_thresh_aggregator(counts, thresh_cnt, sigma_thresh, sigma, orders): log_pr_answered = compute_logpr_answered(thresh_cnt, sigma_thresh, counts) rdp_budget = compute_rdp_threshold(log_pr_answered, sigma_thresh, orders) # print("Threshold budget:" + str(rdp_budget)) if np.random.normal(np.max(counts), sigma_thresh) >= thresh_cnt: logq = compute_logq_gaussian(counts, sigma) res = np.argmax(np.random.normal(counts, sigma)) g_rdp_budget = rdp_gaussian(logq, sigma, orders) rdp_budget += g_rdp_budget else: # do not return result if teacher models do not agree res = -1 return res, rdp_budget def gnmax_aggregator(counts, sigma, orders): logq = compute_logq_gaussian(counts, sigma) dir_index = np.argmax(np.random.normal(counts, sigma)) rdp_budget = rdp_gaussian(logq, sigma, orders) return dir_index, rdp_budget def rdp_percentile(arr_list, q, orders, vmin, vmax, lmbd, axis=0): arr_length = len(arr_list) arr_size = arr_list[0].size input_shape = arr_list[0].shape arr_reshaped = np.vstack([arr.reshape([1, arr_size]) for arr in arr_list]) arr_ordered = np.sort(arr_reshaped, axis=0) arr_ordered = arr_ordered.clip(min=vmin, max=vmax) arr_ordered_new = np.vstack([np.ones([1, arr_size]) * vmin, arr_ordered, np.ones([1, arr_size]) * vmax]) arr_ordered_new[np.abs(arr_ordered_new) < sys.float_info.epsilon] = 0 n_teachers, n_feature = arr_reshaped.shape arr_prob = np.zeros([n_teachers + 1, n_feature]) for i in range(arr_length + 1): diff = arr_ordered_new[i + 1, :] - arr_ordered_new[i, :] diff = diff.clip(min=0) arr_prob[i] = diff * np.exp(-0.5 / lmbd * abs(i - q / 100 * arr_length)) # arr_prob[i] = np.exp(np.log(diff) - 0.5/lmbd * abs(i - q/100 * arr_length)) # arr_prob = normalize(arr_prob, norm='l1', axis=0) if np.min(arr_prob) < 0: print(arr_prob) exit() low = np.zeros([1, arr_size]) high = np.zeros([1, arr_size]) for i in range(arr_size): prob = arr_prob[:, i] / np.sum(arr_prob[:, i]) rindex = np.random.choice(arr_length + 1, p=prob) # print(rindex) low[0, i] = arr_ordered_new[rindex, i] high[0, i] = arr_ordered_new[rindex + 1, i] output_q = np.random.uniform(low=low, high=high, size=[1, arr_size]) output_q = output_q.reshape(input_shape) rdp_budget = arr_size * np.multiply( 1 / (orders - 1), np.log( np.multiply(np.divide(orders, 2 * orders - 1), np.exp((orders - 1) / lmbd)) \ + np.multiply(np.divide(orders - 1, 2 * orders - 1), np.exp(-orders / lmbd)) ) ) return output_q, rdp_budget def rdp_winsorized_mean(arr_list, step_size, sigma_mean, sigma_percentile, orders, pca_mat=None): vmin = -step_size vmax = step_size flatten_arr = np.asarray([arr.flatten() for arr in arr_list]) n_teachers, n_features = flatten_arr.shape if pca_mat is not None: # project to principal components flatten_arr = np.matmul(flatten_arr, pca_mat) n_features = flatten_arr.shape[1] q25, q25_budget = rdp_percentile(flatten_arr, 25, orders, vmin=vmin, vmax=vmax, lmbd=sigma_percentile) q75, q75_budget = rdp_percentile(flatten_arr, 75, orders, vmin=vmin, vmax=vmax, lmbd=sigma_percentile) arr_mean = np.mean(flatten_arr.clip(min=q25, max=q75), axis=0) arr_mean[np.sign(q75) != np.sign(q25)] = 0 # when 75 percentile is smaller, update the model with the average of 75 and 25 percentile # quantile_mean = (q75 + q25) / 2 arr_mean[q75 < q25] = 0 update_index = np.nonzero(np.logical_and(np.sign(q75) == np.sign(q25), q75 > q25)) q_range = q75 - q25 sensitivity = LA.norm(q_range[update_index] / len(arr_list)) gaussian_noise, mean_budget = gaussian_rdp(arr_mean[update_index], sensitivity, orders, sigma_mean) arr_mean[update_index] += gaussian_noise arr_mean[update_index] = arr_mean[update_index].clip(min=q25[update_index], max=q75[update_index]) # for testing only # update_ratio = gaussian_noise.size / arr_mean.size # print("Update ratio: %.8f, norm: %.8f" % (update_ratio, sensitivity)) rdp_budget = q25_budget + q75_budget + mean_budget if pca_mat is not None: # project res direction back to original axis arr_mean = np.matmul(arr_mean, np.transpose(pca_mat)) return arr_mean.reshape(arr_list[0].shape), rdp_budget def gradient_voting_nonprivate(output_list, step_size, nbins=10): n = len(output_list) flatten_arr = np.asarray([arr.flatten() for arr in output_list]) n_teachers, n_features = flatten_arr.shape flatten_arr = flatten_arr.clip(min=-step_size, max=step_size) bins = np.arange(-step_size, step_size, (step_size * 2 / nbins)) bins = np.hstack([bins, step_size]) result = np.zeros([1, n_features]) for i in range(n_features): votes_arr, _ = np.histogram(flatten_arr[:, i], bins) res_idx = np.argmax(votes_arr) result[:, i] = (bins[res_idx] + bins[res_idx + 1]) / 2 return result.reshape(output_list[0].shape) def gradient_voting_rdp(output_list, step_size, sigma, sigma_thresh, orders, pca_mat=None, nbins=10, thresh=0.9): import time st = time.time() n = len(output_list) use_gpu = False # turn it on if you are running a huge matrix and the bottleneck lies on CPU matmul if use_gpu: # have to use torch==1.2.0 and torchvision==0.4.0 to run tensorflow-gpu==1.4.0 import torch flatten_arr = torch.tensor([arr.flatten() for arr in output_list], device='cuda:0') else: flatten_arr = np.asarray([arr.flatten() for arr in output_list]) n_teachers, n_features = flatten_arr.shape if pca_mat is not None: # project to principal components if use_gpu: pca_mat_tensor = torch.from_numpy(pca_mat).float().to('cuda:0') flatten_arr = torch.matmul(flatten_arr, pca_mat_tensor) flatten_arr = flatten_arr.cpu().numpy() else: flatten_arr = np.matmul(flatten_arr, pca_mat) n_features = flatten_arr.shape[1] flatten_arr = flatten_arr.clip(min=-step_size, max=step_size) bins = np.arange(-step_size, step_size, (step_size * 2 / nbins)) bins = np.hstack([bins, step_size]) result = np.zeros([1, n_features]) rdp_budget = 0 skipped_cnt = 0 for i in range(n_features): votes_arr, _ = np.histogram(flatten_arr[:, i], bins) print(votes_arr) res_idx, cur_budget = gnmax_thresh_aggregator(votes_arr, thresh * n_teachers, sigma_thresh, sigma, orders) rdp_budget += cur_budget if res_idx < 0: skipped_cnt += 1 else: result[:, i] = (bins[res_idx] + bins[res_idx + 1]) / 2 print("Skipped %d feaatures out of %d" % (skipped_cnt, n_features)) if pca_mat is not None: # project res direction back to original axis result = np.matmul(result, np.transpose(pca_mat)) return result.reshape(output_list[0].shape), rdp_budget def gradient_voting_rdp_multiproj(output_list, step_size, sigma, sigma_thresh, orders, pca_mats=None, nbins=10, thresh=0.9): n = len(output_list) flatten_arr = np.asarray([arr.flatten() for arr in output_list]) n_teachers, n_features = flatten_arr.shape print("flatten arr shape", flatten_arr.shape) if pca_mats is not None: # project to principal components split_flatten_arr = np.split(flatten_arr, len(pca_mats), axis=1) reduced_flatten_arr = [] for pca_mat, arr in zip(pca_mats, split_flatten_arr): print("arr shape", arr.shape) print("pca shape", pca_mat.shape) arr = np.matmul(arr, pca_mat) reduced_flatten_arr.append(arr) flatten_arr = np.concatenate(reduced_flatten_arr, axis=1) n_features = flatten_arr.shape[1] flatten_arr = flatten_arr.clip(min=-step_size, max=step_size) bins = np.arange(-step_size, step_size, (step_size * 2 / nbins)) bins = np.hstack([bins, step_size]) result = np.zeros([1, n_features]) rdp_budget = 0 skipped_cnt = 0 for i in range(n_features): votes_arr, _ = np.histogram(flatten_arr[:, i], bins) print(votes_arr) res_idx, cur_budget = gnmax_thresh_aggregator(votes_arr, thresh * n_teachers, sigma_thresh, sigma, orders) rdp_budget += cur_budget if res_idx < 0: skipped_cnt += 1 else: result[:, i] = (bins[res_idx] + bins[res_idx + 1]) / 2 print("Skipped %d feaatures out of %d" % (skipped_cnt, n_features)) if pca_mat is not None: # project res direction back to original axis split_results = np.split(result, len(pca_mats), axis=1) final_results = [] for split_result, pca_mat in zip(split_results, pca_mats): final_results.append(np.matmul(split_result, np.transpose(pca_mat))) final_results = np.concatenate(final_results, axis=1) return final_results.reshape(output_list[0].shape), rdp_budget def gradient_sign_rdp(output_list, step_size, sigma, sigma_thresh, orders, pca_mat=None, thresh=0.9): n = len(output_list) flatten_arr = np.asarray([arr.flatten() for arr in output_list]) n_teachers, n_features = flatten_arr.shape if pca_mat is not None: # project to principal components flatten_arr = np.matmul(flatten_arr, pca_mat) n_features = flatten_arr.shape[1] # first line for positive votes, second line for negative votes votes_arr = np.zeros([2, n_features]) votes_sign = np.sign(flatten_arr) # counts for positive votes votes_arr[0, :] = np.sum(votes_sign[votes_sign > 0], axis=0) # counts for negative votes votes_arr[1, :] = -np.sum(votes_sign[votes_sign < 0], axis=0) res_dir = np.zeros([1, n_features]) rdp_budget = 0 skipped_cnt = 0 for i in range(n_features): dir_index, cur_budget = gnmax_thresh_aggregator(votes_arr[:, i], thresh * n_teachers, sigma_thresh, sigma, orders) if dir_index == 0: res_dir[0, i] = step_size elif dir_index == 1: res_dir[0, i] = -step_size else: skipped_cnt += 1 rdp_budget += cur_budget print("Skipped %d feaatures out of %d" % (skipped_cnt, n_features)) if pca_mat is not None: # project res direction back to original axis res_dir = np.matmul(res_dir, np.transpose(pca_mat)) return res_dir.reshape(output_list[0].shape), rdp_budget def gradient_rdp(output_list, step_size, sigma, orders, pca_mat=None, thresh=None, sigma_thresh=1): n = len(output_list) flatten_arr = np.asarray([arr.flatten() for arr in output_list]) n_teachers, n_features = flatten_arr.shape if pca_mat is not None: # project to principal components flatten_arr = np.matmul(flatten_arr, pca_mat) n_features = flatten_arr.shape[1] # first half votes for positive direction, second half votes for negative direction votes_arr = np.zeros([n_teachers, n_features * 2]) max_index = np.argmax(np.abs(flatten_arr), axis=1) for i in range(n_teachers): if flatten_arr[i, max_index[i]] > 0: votes_arr[i, max_index[i]] = 1 else: votes_arr[i, max_index[i] + n_features] = 1 votes_count = np.sum(votes_arr, axis=0) if thresh is None: dir_index, rdp_budget = gnmax_aggregator(votes_count, sigma, orders) else: dir_index, rdp_budget = gnmax_thresh_aggregator(votes_count, thresh * n_teachers, sigma_thresh, sigma, orders) max_votes = np.max(votes_count) selected_votes = votes_count[dir_index] # print("Max cnt: %d, selected cnt: %d" % (max_votes, selected_votes)) res_dir = np.zeros([1, n_features]) if dir_index < n_features and dir_index >= 0: res_dir[0, dir_index] = step_size elif dir_index >= n_features: res_dir[0, dir_index - n_features] = -step_size else: print("Teachers don't agree. Skip...") if pca_mat is not None: # project res direction back to original axis res_dir = np.matmul(res_dir, np.transpose(pca_mat)) return res_dir.reshape(output_list[0].shape), rdp_budget def gaussian_rdp(arr, sensitivity, orders, sigma): gaussian_noise = np.random.normal(loc=np.zeros(arr.shape), scale=sigma * sensitivity, size=arr.shape) # Table 2 @ https://arxiv.org/pdf/1702.07476.pdf rdp_budget = [o / ((2 * sigma) ** 2) for o in orders] return gaussian_noise, rdp_budget
12,952
36.436416
124
py
G-PATE
G-PATE-master/dp_pca.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Combined sanitizer.py and dp_pca.py under tensorflow/models/research/differential-privacy """ """Differentially private optimizers. """ import tensorflow as tf import collections from sklearn.preprocessing import normalize from rdp_utils import gaussian_rdp import numpy as np def ComputeDPPrincipalProjection(data, projection_dims, orders, sigma): """Compute differentially private projection. Args: data: the input data, each row is a data vector. projection_dims: the projection dimension. sigma: sigma for gaussian noise Returns: A projection matrix with projection_dims columns. """ # Normalize each row. normalized_data = normalize(data, norm='l2', axis=1) covar = np.matmul(np.transpose(normalized_data), normalized_data) # Since the data is already normalized, there is no need to clip # the covariance matrix. gaussian_noise, rdp_budget = gaussian_rdp(covar.reshape([1,-1]), 1.0, orders, sigma) saned_covar = covar + gaussian_noise.reshape(covar.shape) # Symmetrize saned_covar. This also reduces the noise variance. saned_covar = 0.5 * (saned_covar + np.transpose(saned_covar)) # Compute the eigen decomposition of the covariance matrix, and # return the top projection_dims eigen vectors, represented as columns of # the projection matrix. eigvals, eigvecs = np.linalg.eig(saned_covar) topk_indices = eigvals.argsort()[::-1][:projection_dims] topk_indices = np.reshape(topk_indices, [projection_dims]) # Gather and return the corresponding eigenvectors. return np.transpose(np.take(np.transpose(eigvecs), topk_indices, axis=0)), rdp_budget
2,314
37.583333
97
py
G-PATE
G-PATE-master/utils.py
""" Some codes from https://github.com/Newmu/dcgan_code """ from __future__ import division import math import json import random import pprint import scipy.misc import numpy as np from time import gmtime, strftime from six.moves import xrange import os import tensorflow as tf import tensorflow.contrib.slim as slim pp = pprint.PrettyPrinter() get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1]) def mkdir(dir_name): dirs = dir_name.split('/') cur_dir = '' for d in dirs: cur_dir = os.path.join(cur_dir, d) if not os.path.exists(cur_dir): print("mkdir %s" % cur_dir) os.mkdir(cur_dir) else: print("%s exists" % cur_dir) def show_all_variables(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) def get_image(image_path, input_height, input_width, resize_height=64, resize_width=64, crop=True, grayscale=False): image = imread(image_path, grayscale) return transform(image, input_height, input_width, resize_height, resize_width, crop) def save_images(images, size, image_path): return imsave(inverse_transform(images), size, image_path) def imread(path, grayscale = False): if (grayscale): return scipy.misc.imread(path, flatten = True).astype(np.float) else: return scipy.misc.imread(path).astype(np.float) def merge_images(images, size): return inverse_transform(images) def merge(images, size): h, w = images.shape[1], images.shape[2] if (images.shape[3] in (3,4)): c = images.shape[3] img = np.zeros((h * size[0], w * size[1], c)) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w, :] = image return img elif images.shape[3]==1: img = np.zeros((h * size[0], w * size[1])) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0] return img else: raise ValueError('in merge(images,size) images parameter ' 'must have dimensions: HxW or HxWx3 or HxWx4') def imsave(images, size, path): image = np.squeeze(merge(images, size)) return scipy.misc.imsave(path, image) def center_crop(x, crop_h, crop_w, resize_h=64, resize_w=64): if crop_w is None: crop_w = crop_h h, w = x.shape[:2] j = int(round((h - crop_h)/2.)) i = int(round((w - crop_w)/2.)) return scipy.misc.imresize( x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w]) def transform(image, input_height, input_width, resize_height=64, resize_width=64, crop=True): if crop: cropped_image = center_crop( image, input_height, input_width, resize_height, resize_width) else: cropped_image = scipy.misc.imresize(image, [resize_height, resize_width]) return np.array(cropped_image)/127.5 - 1. def inverse_transform(images): return (images+1.)/2. def to_json(output_path, *layers): with open(output_path, "w") as layer_f: lines = "" for w, b, bn in layers: layer_idx = w.name.split('/')[0].split('h')[1] B = b.eval() if "lin/" in w.name: W = w.eval() depth = W.shape[1] else: W = np.rollaxis(w.eval(), 2, 0) depth = W.shape[0] biases = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(B)]} if bn != None: gamma = bn.gamma.eval() beta = bn.beta.eval() gamma = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(gamma)]} beta = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(beta)]} else: gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []} beta = {"sy": 1, "sx": 1, "depth": 0, "w": []} if "lin/" in w.name: fs = [] for w in W.T: fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": ['%.2f' % elem for elem in list(w)]}) lines += """ var layer_%s = { "layer_type": "fc", "sy": 1, "sx": 1, "out_sx": 1, "out_sy": 1, "stride": 1, "pad": 0, "out_depth": %s, "in_depth": %s, "biases": %s, "gamma": %s, "beta": %s, "filters": %s };""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs) else: fs = [] for w_ in W: fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": ['%.2f' % elem for elem in list(w_.flatten())]}) lines += """ var layer_%s = { "layer_type": "deconv", "sy": 5, "sx": 5, "out_sx": %s, "out_sy": %s, "stride": 2, "pad": 1, "out_depth": %s, "in_depth": %s, "biases": %s, "gamma": %s, "beta": %s, "filters": %s };""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2), W.shape[0], W.shape[3], biases, gamma, beta, fs) layer_f.write(" ".join(lines.replace("'","").split())) def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps = len(images) / duration) def visualize(sess, dcgan, config, option): image_frame_dim = int(math.ceil(config.batch_size**.5)) if option == 0: z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim)) samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime())) elif option == 1: values = np.arange(0, 1, 1./config.batch_size) for idx in xrange(dcgan.z_dim): print(" [*] %d" % idx) z_sample = np.random.uniform(-1, 1, size=(config.batch_size , dcgan.z_dim)) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] if config.dataset == "mnist": y = np.random.choice(10, config.batch_size) y_one_hot = np.zeros((config.batch_size, 10)) y_one_hot[np.arange(config.batch_size), y] = 1 samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot}) else: samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx)) elif option == 2: values = np.arange(0, 1, 1./config.batch_size) for idx in [random.randint(0, dcgan.z_dim - 1) for _ in xrange(dcgan.z_dim)]: print(" [*] %d" % idx) z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) z_sample = np.tile(z, (config.batch_size, 1)) #z_sample = np.zeros([config.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] if config.dataset == "mnist": y = np.random.choice(10, config.batch_size) y_one_hot = np.zeros((config.batch_size, 10)) y_one_hot[np.arange(config.batch_size), y] = 1 samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot}) else: samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) try: make_gif(samples, './samples/test_gif_%s.gif' % (idx)) except: save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime())) elif option == 3: values = np.arange(0, 1, 1./config.batch_size) for idx in xrange(dcgan.z_dim): print(" [*] %d" % idx) z_sample = np.zeros([config.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) make_gif(samples, './samples/test_gif_%s.gif' % (idx)) elif option == 4: image_set = [] values = np.arange(0, 1, 1./config.batch_size) for idx in xrange(dcgan.z_dim): print(" [*] %d" % idx) z_sample = np.zeros([config.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx)) new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \ for idx in range(64) + range(63, -1, -1)] make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8) def image_manifold_size(num_images): manifold_h = int(np.floor(np.sqrt(num_images))) manifold_w = int(np.ceil(np.sqrt(num_images))) assert manifold_h * manifold_w == num_images return manifold_h, manifold_w
9,032
33.215909
131
py
G-PATE
G-PATE-master/model.py
from __future__ import division import os import time import math from glob import glob import tensorflow as tf import numpy as np from six.moves import xrange import json import sys from keras.datasets import cifar10 from ops import * from utils import * from rdp_utils import * from pate_core import * import pickle from keras.utils import np_utils # import pandas as pd import torch import torch.utils.data import torchvision import torchvision.transforms as transforms import scipy from dp_pca import ComputeDPPrincipalProjection from sklearn.random_projection import GaussianRandomProjection from utils import pp, visualize, to_json, show_all_variables, mkdir from gen_data import batch2str from PIL import Image def partition_dataset(data, labels, nb_teachers, teacher_id): """ Simple partitioning algorithm that returns the right portion of the data needed by a given teacher out of a certain nb of teachers :param data: input data to be partitioned :param labels: output data to be partitioned :param nb_teachers: number of teachers in the ensemble (affects size of each partition) :param teacher_id: id of partition to retrieve :return: """ # Sanity check assert(int(teacher_id) < int(nb_teachers)) # This will floor the possible number of batches batch_len = int(len(data) / nb_teachers) # Compute start, end indices of partition start = teacher_id * batch_len end = (teacher_id+1) * batch_len # Slice partition off partition_data = data[start:end] if labels is not None: partition_labels = labels[start:end] else: partition_labels = None return partition_data, partition_labels def conv_out_size_same(size, stride): return int(math.ceil(float(size) / float(stride))) def sigmoid_cross_entropy_with_logits(x, y): try: return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y) except: return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y) class DCGAN(object): def __init__(self, sess, input_height=32, input_width=32, crop=False, batch_size=64, sample_num=64, output_height=32, output_width=32, y_dim=10, z_dim=100, gf_dim=64, df_dim=32, sample_step=800, gfc_dim=1024, dfc_dim=256, c_dim=3, dataset_name='default', input_fname_pattern='*.jpg', checkpoint_dir=None, teacher_dir=None, generator_dir=None, sample_dir=None, data_dir='./data', batch_teachers=10, teachers_batch=2, orders=None, thresh=None, dp_delta=1e-5, pca=False, pca_dim=5, non_private=False, random_proj=False, wgan=False, wgan_scale=10, small=False, config=None): """ Args: sess: TensorFlow session batch_size: The size of batch. Should be specified before training. z_dim: (optional) Dimension of dim for Z. [100] gf_dim: (optional) Dimension of gen filters in first conv layer. [64] df_dim: (optional) Dimension of discrim filters in first conv layer. [64] gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] batch_teachers: Number of teacher models in one batch. Default 10. teachers_batch: Batches of training teacher models. Default 1. """ self.config = config self.small = small self.wgan = wgan self.wgan_scale = wgan_scale self.sample_step = sample_step self.pca = pca self.pca_dim = pca_dim self.random_proj = random_proj self.dp_eps_list = [] self.rdp_eps_list = [] self.rdp_order_list = [] self.thresh = thresh self.dp_delta = dp_delta self.sample_dir = sample_dir self.dataset = dataset_name self.batch_teachers = batch_teachers self.teachers_batch = teachers_batch self.overall_teachers = batch_teachers * teachers_batch self.sess = sess self.crop = crop self.batch_size = batch_size self.sample_num = sample_num self.input_height = input_height self.input_width = input_width self.output_height = output_height self.output_width = output_width self.z_dim = z_dim self.y_dim = y_dim self.gf_dim = gf_dim self.df_dim = df_dim self.gfc_dim = gfc_dim self.dfc_dim = dfc_dim # batch normalization : deals with poor initialization helps gradient flow self.d_bn1 = batch_norm(name='d_bn1') self.d_bn2 = batch_norm(name='d_bn2') self.d_bn3 = batch_norm(name='d_bn3') self.g_bn0 = batch_norm(name='g_bn0') self.g_bn1 = batch_norm(name='g_bn1') self.g_bn2 = batch_norm(name='g_bn2') self.g_bn3 = batch_norm(name='g_bn3') self.dataset_name = dataset_name self.input_fname_pattern = input_fname_pattern self.checkpoint_dir = checkpoint_dir self.teacher_dir = teacher_dir self.generator_dir = generator_dir self.data_dir = data_dir if orders is not None: self.orders = np.asarray(orders) else: self.orders = np.hstack([1.1, np.arange(2, config.orders)]) self.rdp_counter = np.zeros(self.orders.shape) # Load the dataset, ignore test data for now if self.dataset_name == 'mnist': self.data_X, self.data_y = self.load_mnist() self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 1) self.input_height = self.input_width = 28 self.output_height = self.output_width = 28 elif self.dataset_name == 'fashion_mnist': self.data_X, self.data_y = self.load_fashion_mnist() self.c_dim = self.data_X[0].shape[-1] # = (self.c_dim == 1) self.input_height = self.input_width = 28 self.output_height = self.output_width = 28 if self.config.random_label: np.random.shuffle(self.data_y) elif self.dataset_name == 'cifar': self.data_X, self.data_y = self.load_cifar() self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 3) elif 'small-celebA-gender' in self.dataset_name: mode = self.dataset_name.split('-')[-1] self.y_dim = 2 self.input_size = self.input_height = self.input_width = 32 self.output_size = self.output_height = self.output_width = 32 self.data_X, self.data_y = self.load_small_celebA_gender(mode) self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 1) if self.config.random_label: np.random.shuffle(self.data_y) elif 'celebA-hair' in self.dataset_name: mode = self.dataset_name.split('-')[-1] self.y_dim = 3 self.input_size = self.input_height = self.input_width = 64 self.output_size = self.output_height = self.output_width = 64 self.data_X, self.data_y = self.load_celebA_hair(mode) self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 1) if self.config.random_label: np.random.shuffle(self.data_y) elif 'celebA-gender' in self.dataset_name: mode = self.dataset_name.split('-')[-1] self.y_dim = 2 self.input_size = self.input_height = self.input_width = 64 self.output_size = self.output_height = self.output_width = 64 self.data_X, self.data_y = self.load_celebA_gender(mode) self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 1) if self.config.random_label: np.random.shuffle(self.data_y) elif self.dataset_name == 'cinic': self.data_X, self.data_y = self.load_cinic() self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 3) elif self.dataset_name == 'slt': self.data_X, self.data_y = self.slt() self.c_dim = self.data_X[0].shape[-1] self.grayscale = (self.c_dim == 3) print(self.data_X.shape) elif 'isolet' in self.dataset_name: self.data_X, self.data_y = self.load_isolet() self.train_size, self.input_size = self.data_X.shape self.output_size = self.input_size # self.y_dim = None # self.crop = False if self.pca_dim > self.input_size: self.pca_dim = self.input_size elif 'fire-small' in self.dataset_name: self.data_X = self.load_fire_data() self.data_y = None self.train_size, self.input_size = self.data_X.shape self.output_size = self.input_size self.y_dim = None self.crop = False if self.pca_dim > self.input_size: self.pca_dim = self.input_size elif 'census' in self.dataset_name: self.data_X = self.load_census_data() self.data_y = None self.train_size, self.input_size = self.data_X.shape self.output_size = self.input_size self.y_dim = None self.crop = False if self.pca_dim > self.input_size: self.pca_dim = self.input_size else: raise Exception("Check value of dataset flag") self.train_data_list = [] self.train_label_list = [] # if non_private: # for i in range(self.overall_teachers): # partition_data, partition_labels = partition_dataset(self.data_X, self.data_y, 1, i) # self.train_data_list.append(partition_data) # self.train_label_list.append(partition_labels) # else: if config.shuffle: from sklearn.utils import shuffle self.data_X, self.data_y = shuffle(self.data_X, self.data_y) from collections import defaultdict self.save_dict = defaultdict(lambda: False) for i in range(self.overall_teachers): partition_data, partition_labels = partition_dataset(self.data_X, self.data_y, self.overall_teachers, i) self.train_data_list.append(partition_data) self.train_label_list.append(partition_labels) # print(self.train_label_list) self.train_size = len(self.train_data_list[0]) if self.train_size < self.batch_size: self.batch_size = self.train_size print('adjusted batch size:', self.batch_size) # raise Exception("[!] Entire dataset size (%d) is less than the configured batch_size (%d) " % ( # self.train_size, self.batch_size)) self.build_model() def aggregate_results(self, output_list, config, thresh=None, epoch=None): if self.pca: res, rdp_budget = gradient_voting_rdp( output_list, config.step_size, config.sigma, config.sigma_thresh, self.orders, pca_mat=self.pca_components, thresh=thresh ) elif self.random_proj: orig_dim = 1 for dd in self.image_dims: orig_dim = orig_dim * dd if epoch is not None: proj_dim = min(epoch + 1, self.pca_dim) else: proj_dim = self.pca_dim n_data = output_list[0].shape[0] if config.proj_mat > 1: proj_dim_ = proj_dim // config.proj_mat n_data_ = n_data // config.proj_mat orig_dim_ = orig_dim // config.proj_mat print("n_data:", n_data) print("orig_dim:", orig_dim) transformers = [GaussianRandomProjection(n_components=proj_dim_) for _ in range(config.proj_mat)] for transformer in transformers: transformer.fit(np.zeros([n_data_, orig_dim_])) print(transformer.components_.shape) proj_matrices = [np.transpose(transformer.components_) for transformer in transformers] res, rdp_budget = gradient_voting_rdp_multiproj( output_list, config.step_size, config.sigma, config.sigma_thresh, self.orders, pca_mats=proj_matrices, thresh=thresh ) else: transformer = GaussianRandomProjection(n_components=proj_dim) transformer.fit(np.zeros([n_data, orig_dim])) # only the shape of output_list[0] is used proj_matrix = np.transpose(transformer.components_) # proj_matrix = np.random.normal(loc=np.zeros([orig_dim, proj_dim]), scale=1/float(proj_dim), size=[orig_dim, proj_dim]) res, rdp_budget = gradient_voting_rdp( output_list, config.step_size, config.sigma, config.sigma_thresh, self.orders, pca_mat=proj_matrix, thresh=thresh ) else: res, rdp_budget = gradient_voting_rdp(output_list, config.step_size, config.sigma, config.sigma_thresh, self.orders, thresh=thresh) return res, rdp_budget def non_private_aggregation(self, output_list, config): # TODO update nonprivate aggregation sum_arr = np.zeros(output_list[0].shape) for arr in output_list: sum_arr += arr return sum_arr / len(output_list) def load_fire_data(self): dataset_name = os.path.join(self.data_dir, self.dataset_name) dataset_name += '.csv' X = np.loadtxt(dataset_name) seed = 307 np.random.seed(seed) np.random.shuffle(X) return X def load_census_data(self): dataset_name = os.path.join(self.data_dir, self.dataset_name) dataset_name += '.pkl' with open(dataset_name, "rb") as f: X = pickle.load(f) seed = 37 np.random.seed(seed) np.random.shuffle(X) return X def load_isolet(self): dataset_name = os.path.join(self.data_dir, self.dataset_name) dataset_name += '.csv' X = np.loadtxt(dataset_name) # print(X.shape) seed = 37 np.random.seed(seed) np.random.shuffle(X) X = np.hsplit(X, [-1]) x = X[0] # print(X.shape) y = X[1] # print(y.shape) y = np_utils.to_categorical(y, 2) # print(y.shape) return x, y def load_cifar(self): # dataset_name = os.path.join(self.data_dir, self.dataset_name) # dataset_name += '.csv' (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = np_utils.to_categorical(y_train, 10) x_train = x_train.reshape(x_train.shape[0], 32, 32, 3) x_train = x_train.astype('float32') / 255. return x_train, y_train def slt(self): path_to_data = '../../data/stl10_binary/unlabeled_X.bin' with open(path_to_data, 'rb') as f: # read whole file in uint8 chunks everything = np.fromfile(f, dtype=np.uint8) # We force the data into 3x96x96 chunks, since the # images are stored in "column-major order", meaning # that "the first 96*96 values are the red channel, # the next 96*96 are green, and the last are blue." # The -1 is since the size of the pictures depends # on the input file, and this way numpy determines # the size on its own. images = np.reshape(everything, (-1, 3, 96, 96)) # Now transpose the images into a standard image format # readable by, for example, matplotlib.imshow # You might want to comment this line or reverse the shuffle # if you will use a learning algorithm like CNN, since they like # their channels separated. images = np.transpose(images, (0, 3, 2, 1)) X_resized = np.zeros((100000, 32, 32, 3)) for i in range(0, 100000): img = images[i] img = Image.fromarray(img) img = np.array(img.resize((32, 32), Image.BICUBIC)) # 修改分辨率,再转为array类 X_resized[i, :, :, :] = img y = np.random.randint(10, size=(100000, 1)) y = np_utils.to_categorical(y, 10) X_resized /= 255 print(X_resized) return X_resized, y def load_cinic(self): cinic_directory = '../../data/cinic' # cinic_mean = [0.47889522, 0.47227842, 0.43047404] # cinic_std = [0.24205776, 0.23828046, 0.25874835] image_folder = torchvision.datasets.ImageFolder(cinic_directory + '/train/', # transform=transforms.Compose([transforms.ToTensor(), # transforms.Normalize(mean=cinic_mean,std=cinic_std)])), transform=transforms.ToTensor()) cinic_train = torch.utils.data.DataLoader(image_folder, batch_size=180000, shuffle=True) for batch_ndx, sample in enumerate(cinic_train): x = np.asarray(sample[0]) y = np.asarray(sample[1]) x = np.reshape(x, [x.shape[0], 32, 32, 3]) y = np_utils.to_categorical(y, 10) return x, y def load_celebA_gender(self, mode='train'): celebA_directory = '../../data/celebA/' import joblib if mode == 'train': train_x = joblib.load(celebA_directory + 'celebA-trn-x-lg-ups.pkl') train_y = joblib.load(celebA_directory + 'celebA-trn-gender-lg-ups.pkl') train_y = np_utils.to_categorical(train_y, 2) val_x = joblib.load(celebA_directory + 'celebA-val-x-lg-ups.pkl') val_y = joblib.load(celebA_directory + 'celebA-val-gender-lg-ups.pkl') val_y = np_utils.to_categorical(val_y, 2) return np.vstack((train_x, val_x)), np.vstack((train_y, val_y)) elif mode == 'val': val_x = joblib.load(celebA_directory + 'celebA-val-x-lg-ups.pkl') val_y = joblib.load(celebA_directory + 'celebA-val-gender-lg-ups.pkl') val_y = np_utils.to_categorical(val_y, 2) return val_x, val_y elif mode == 'tst': tst_x = joblib.load(celebA_directory + 'celebA-tst-x.pkl') tst_y = joblib.load(celebA_directory + 'celebA-tst-gender.pkl') tst_y = np_utils.to_categorical(tst_y, 2) return tst_x, tst_y else: raise Exception("Mode {} Not support".format(mode)) def load_celebA_hair(self, mode='trn'): celebA_directory = '../../data/celebA/' import joblib if mode == 'trn': train_x = joblib.load(celebA_directory + 'celeb-trn-ups-hair-x.pkl') train_y = joblib.load(celebA_directory + 'celeb-trn-ups-hair-y.pkl') train_y = np_utils.to_categorical(train_y, 3) val_x = joblib.load(celebA_directory + 'celeb-val-ups-hair-x.pkl') val_y = joblib.load(celebA_directory + 'celeb-val-ups-hair-y.pkl') val_y = np_utils.to_categorical(val_y, 3) return np.vstack((train_x, val_x)), np.vstack((train_y, val_y)) elif mode == 'val': val_x = joblib.load(celebA_directory + 'celeb-val-ups-hair-x.pkl') val_y = joblib.load(celebA_directory + 'celeb-val-ups-hair-y.pkl') val_y = np_utils.to_categorical(val_y, 3) return val_x, val_y elif mode == 'tst': tst_x = joblib.load(celebA_directory + 'celeb-tst-ups-hair-x.pkl') tst_y = joblib.load(celebA_directory + 'celeb-tst-ups-hair-y.pkl') tst_y = np_utils.to_categorical(tst_y, 3) return tst_x, tst_y else: raise Exception("Mode {} Not support".format(mode)) def load_small_celebA_gender(self, mode='train'): celebA_directory = '../../data/celebA/' import joblib if mode == 'train': train_x = joblib.load(celebA_directory + 'celebA-trn-x-small-ups.pkl') train_y = joblib.load(celebA_directory + 'celebA-trn-gender-ups.pkl') train_y = np_utils.to_categorical(train_y, 2) val_x = joblib.load(celebA_directory + 'celebA-val-x-small-ups.pkl') val_y = joblib.load(celebA_directory + 'celebA-val-gender-ups.pkl') val_y = np_utils.to_categorical(val_y, 2) return np.vstack((train_x, val_x)), np.vstack((train_y, val_y)) elif mode == 'val': val_x = joblib.load(celebA_directory + 'celebA-val-x-small-ups.pkl') val_y = joblib.load(celebA_directory + 'celebA-val-gender-ups.pkl') val_y = np_utils.to_categorical(val_y, 2) return val_x, val_y elif mode == 'tst': tst_x = joblib.load(celebA_directory + 'celebA-tst-x-small.pkl') tst_y = joblib.load(celebA_directory + 'celebA-tst-gender.pkl') tst_y = np_utils.to_categorical(tst_y, 2) return tst_x, tst_y else: raise Exception("Mode {} Not support".format(mode)) def load_fashion_mnist(self): data_dir = os.path.join(self.data_dir, self.dataset_name) fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float) fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) trY = loaded[8:].reshape((60000)).astype(np.int) # fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte')) # loaded = np.fromfile(file=fd,dtype=np.uint8) # teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float) # fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte')) # loaded = np.fromfile(file=fd,dtype=np.uint8) # teY = loaded[8:].reshape((10000)).astype(np.int) trY = np.asarray(trY) # teY = np.asarray(teY) # X = np.concatenate((trX, teX), axis=0) # y = np.concatenate((trY, teY), axis=0).astype(np.int) X = trX y = trY.astype(np.int) seed = 307 np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(y) y_vec = np.zeros((len(y), self.y_dim), dtype=np.float) for i, label in enumerate(y): y_vec[i, y[i]] = 1.0 return X / 255., y_vec def load_mnist(self): data_dir = os.path.join(self.data_dir, self.dataset_name) fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float) fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) trY = loaded[8:].reshape((60000)).astype(np.int) # fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte')) # loaded = np.fromfile(file=fd,dtype=np.uint8) # teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float) # fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte')) # loaded = np.fromfile(file=fd,dtype=np.uint8) # teY = loaded[8:].reshape((10000)).astype(np.int) trY = np.asarray(trY) # teY = np.asarray(teY) # X = np.concatenate((trX, teX), axis=0) # y = np.concatenate((trY, teY), axis=0).astype(np.int) X = trX y = trY.astype(np.int) seed = 307 np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(y) y_vec = np.zeros((len(y), self.y_dim), dtype=np.float) for i, label in enumerate(y): y_vec[i, y[i]] = 1.0 return X / 255., y_vec def build_model(self): if self.crop: image_dims = [self.output_height, self.output_width, self.c_dim] else: image_dims = [self.input_height, self.input_width, self.c_dim] self.inputs = tf.placeholder( tf.float32, [self.batch_size] + [self.input_height, self.input_width, self.c_dim], name='real_images') self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y') self.image_dims = image_dims inputs = self.inputs if self.crop: inputs = tf.image.resize_image_with_crop_or_pad(inputs, target_height=self.output_height, target_width=self.output_width) self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name='z') self.z_sum = histogram_summary("z", self.z) self.G = self.generator(self.z, self.y) if 'slt' in self.dataset_name or 'cifar' in self.dataset_name: self.G_sum = image_summary("G", self.G, max_outputs=10) self.updated_img = tf.placeholder(tf.float32, [self.batch_size] + image_dims, name='updated_img') self.g_loss = tf.reduce_sum(tf.square(self.updated_img - self.G)) self.g_loss_sum = scalar_summary("g_loss", self.g_loss) self.teachers_list = [] for i in range(self.batch_teachers): with tf.variable_scope("teacher%d" % i) as scope: D, D_logits = self.discriminator(inputs, self.y) scope.reuse_variables() D_, D_logits_ = self.discriminator(self.G, self.y) if self.wgan: # Use WassersteinGAN loss with gradient penalty. Reference: https://github.com/jiamings/wgan/blob/master/wgan_v2.py # Calculate interpolation of real and fake image if 'mnist' in self.dataset_name: alpha = tf.random_uniform([self.batch_size, 1, 1, 1], 0.0, 1.0) alpha = tf.tile(alpha, tf.constant([1, self.input_height, self.input_width, self.c_dim])) else: alpha = tf.random_uniform([self.batch_size, 1], 0.0, 1.0) alpha = tf.tile(alpha, tf.constant([1, self.input_size])) x_hat = tf.math.multiply(alpha, inputs) + tf.math.multiply((1 - alpha), self.G) _, d_hat = self.discriminator(x_hat, self.y) # Calculate gradient penalty for wgan ddx = tf.gradients(d_hat, x_hat)[0] ddx = tf.sqrt(tf.reduce_sum(tf.square(ddx), axis=1)) ddx = tf.reduce_mean(tf.square(ddx - 1.0) ** 2 * self.wgan_scale) if self.wgan: teacher = { 'd_loss': tf.reduce_mean(D_logits_) - tf.reduce_mean(D_logits) + ddx, 'g_loss': -tf.reduce_mean(D_logits_), } else: teacher = { 'd_loss': tf.reduce_mean(sigmoid_cross_entropy_with_logits(D_logits, tf.ones_like(D))) + \ tf.reduce_mean(sigmoid_cross_entropy_with_logits(D_logits_, tf.zeros_like(D_))), 'g_loss': tf.reduce_mean(sigmoid_cross_entropy_with_logits(D_logits_, tf.ones_like(D_))), } teacher.update({ 'd_loss_sum': scalar_summary("d_loss_%d" % i, teacher['d_loss']), 'g_loss_sum': scalar_summary("g_loss_%d" % i, teacher['g_loss']), }) # calculate the change in the images that would minimize generator loss teacher['img_grads'] = -tf.gradients(teacher['g_loss'], self.G)[0] if 'slt' in self.dataset_name: teacher['img_grads_sum'] = image_summary("img_grads", teacher['img_grads'], max_outputs=10) self.teachers_list.append(teacher) t_vars = tf.trainable_variables() g_list = tf.global_variables() add_save = [g for g in g_list if "moving_mean" in g.name] add_save += [g for g in g_list if "moving_variance" in g.name] self.save_vars = t_vars + add_save self.d_vars = [] for i in range(self.batch_teachers): self.d_vars.append([var for var in t_vars if 'teacher%d' % i in var.name]) self.g_vars = [var for var in t_vars if 'g_' in var.name] self.g_save_vars = [var for var in t_vars if 'g_' in var.name] self.d_save_vars = [var for var in t_vars if 'd_' in var.name] # print(self.d_save_vars) print(self.save_vars) # self.d_save_vars = {'k': v for k, v in zip(self.d_save_vars, self.d_save_vars)} self.saver = tf.train.Saver(max_to_keep=5, var_list=self.save_vars) self.saver_g = tf.train.Saver(max_to_keep=5, var_list=self.g_save_vars) self.saver_d = tf.train.Saver(max_to_keep=self.teachers_batch, var_list=self.d_save_vars) def get_random_labels(self, batch_size): # print(self.y_dim) y_vec = np.zeros((batch_size, self.y_dim), dtype=np.float) y = np.random.randint(0, self.y_dim, batch_size) for i, label in enumerate(y): y_vec[i, y[i]] = 1.0 return y_vec def train_together(self, config): print("Training teacher models and student model together...") if not config.non_private: assert len(self.train_data_list) == self.overall_teachers else: print(str(len(self.train_data_list))) configs = { 'sigma': config.sigma, 'sigma_thresh': config.sigma_thresh, 'pca': self.pca, 'pca_sigma': config.pca_sigma, 'step_size': config.step_size, 'batch_teachers': self.batch_teachers, 'g_step': config.g_step, 'pca_dim': self.pca_dim, } if not os.path.exists(self.checkpoint_dir): os.makedirs(self.checkpoint_dir) if not os.path.exists(self.teacher_dir): os.makedirs(self.teacher_dir) with open(os.path.join(self.checkpoint_dir, 'configs.json'), 'w') as fp: json.dump(configs, fp) if self.pca: data = self.data_X.reshape([self.data_X.shape[0], -1]) self.pca_components, rdp_budget = ComputeDPPrincipalProjection( data, self.pca_dim, self.orders, config.pca_sigma, ) self.rdp_counter += rdp_budget d_optim_list = [] for i in range(self.batch_teachers): d_optim_list.append(tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize( self.teachers_list[i]['d_loss'], var_list=self.d_vars[i])) g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.g_loss, var_list=self.g_vars) if not config.pretrain: try: tf.global_variables_initializer().run() except: tf.initialize_all_variables().run() else: try: tf.global_variables_initializer().run() except: tf.initialize_all_variables().run() self.load_pretrain(config.checkpoint_dir) # data = self.gen_data(5000) # output_dir = os.path.join(self.checkpoint_dir, self.sample_dir) # if not os.path.exists(output_dir): # os.makedirs(output_dir) # filename = 'private.data_epoch_' + str(-1) + '.pkl' # outfile = os.path.join(output_dir, filename) # mkdir(output_dir) # with open(outfile, 'wb') as f: # pickle.dump(data, f) # current_scope = tf.contrib.framework.get_name_scope() # with tf.variable_scope(current_scope, reuse=True): # biases = tf.get_variable("teacher0/d_h0_conv/biases") # biases = tf.Print(biases, [biases]) # self.sess.run(biases) if 'slt' in self.dataset_name: self.g_sum = merge_summary([self.z_sum, self.G_sum, self.g_loss_sum]) else: self.g_sum = merge_summary([self.z_sum, self.g_loss_sum]) self.d_sum_list = [] for i in range(self.batch_teachers): teacher = self.teachers_list[i] if 'slt' in self.dataset_name: self.d_sum_list.append( merge_summary([teacher['d_loss_sum'], teacher['g_loss_sum'], teacher['img_grads_sum']])) else: self.d_sum_list.append(merge_summary([teacher['d_loss_sum'], teacher['g_loss_sum']])) self.writer = SummaryWriter(os.path.join(self.checkpoint_dir, "logs"), self.sess.graph) sample_z = np.random.uniform(-1, 1, size=(self.sample_num, self.z_dim)) counter = 0 start_time = time.time() self.save_d(self.teacher_dir, 0, -1) for epoch in xrange(config.epoch): print("----------------epoch: %d --------------------" % epoch) print("-------------------train-teachers----------------") batch_idxs = int(min(self.train_size, config.train_size) // self.batch_size) # The idex of each batch print("Train %d idxs" % batch_idxs) for idx in xrange(0, batch_idxs): batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32) errD = 0 # train teacher models in batches, teachers_batch: how many batches of teacher for batch_num in range(self.teachers_batch): could_load, checkpoint_counter = self.load_d(self.teacher_dir, epoch=epoch, batch_num=batch_num) if could_load: counter = checkpoint_counter print("load sucess_this_epoch") else: print('fail_1') could_load, checkpoint_counter = self.load_d(self.teacher_dir, epoch=epoch - 1, batch_num=batch_num) if could_load: counter = checkpoint_counter print("load sucess_previous_epoch") else: print('fail_2') could_load, checkpoint_counter = self.load_d(self.teacher_dir, epoch=0, batch_num=-1) # train each teacher in this batch, batch_teachers: how many teacher in a batch for teacher_id in range(self.batch_teachers): #print("Training teacher model %d" % teacher_id) # data_X = self.data_X if config.non_private else self.train_data_list[teacher_id+batch_num*self.batch_teachers] data_X = self.train_data_list[teacher_id+batch_num*self.batch_teachers] batch_idx = range(idx * self.batch_size, (idx + 1) * self.batch_size) batch_images = data_X[batch_idx] for k in range(config.d_step): if self.y is not None: # data_y = self.data_y if config.non_private else self.train_label_list[teacher_id+batch_num*self.batch_teachers] data_y = self.train_label_list[teacher_id+batch_num*self.batch_teachers] #print(data_y.shape) batch_labels = data_y[batch_idx] _, summary_str = self.sess.run([d_optim_list[teacher_id], self.d_sum_list[teacher_id]], feed_dict={ self.inputs: batch_images, self.z: batch_z, self.y: batch_labels, }) self.writer.add_summary(summary_str, epoch) err = self.teachers_list[teacher_id]['d_loss'].eval({ self.z: batch_z, self.inputs: batch_images, self.y: batch_labels, }) # print(str(batch_num*self.batch_teachers + teacher_id) + "loss:"+str(err)) errD += err else: _, summary_str = self.sess.run([d_optim_list[teacher_id], self.d_sum_list[teacher_id]], feed_dict={ self.inputs: batch_images, self.z: batch_z, }) self.writer.add_summary(summary_str, epoch) err = self.teachers_list[teacher_id]['d_loss'].eval({ self.z: batch_z, self.inputs: batch_images, }) # print(str(batch_num * self.batch_teachers + teacher_id) + "d_loss:" + str(err)) errD += err self.save_d(self.teacher_dir, epoch, batch_num) # print("------------------train-generator-------------------") for k in range(config.g_step): errG = 0 img_grads_list = [] if self.y is not None: batch_labels = self.get_random_labels(self.batch_size) for batch_num in range(self.teachers_batch): could_load, checkpoint_counter = self.load_d(self.teacher_dir, epoch=epoch, batch_num=batch_num) if could_load: counter = checkpoint_counter print("load sucess") else: print('fail') for teacher_id in range(self.batch_teachers): img_grads = self.sess.run(self.teachers_list[teacher_id]['img_grads'], feed_dict={ self.z: batch_z, self.y: batch_labels, }) img_grads_list.append(img_grads) old_img = self.sess.run(self.G, feed_dict={self.z: batch_z, self.y: batch_labels}) else: for batch_num in range(self.teachers_batch): could_load, checkpoint_counter = self.load_d(self.teacher_dir, epoch=epoch, batch_num=batch_num) if could_load: counter = checkpoint_counter print("load sucess") else: print('fail') for teacher_id in range(self.batch_teachers): img_grads = self.sess.run(self.teachers_list[teacher_id]['img_grads'], feed_dict={ self.z: batch_z, }) img_grads_list.append(img_grads) old_img = self.sess.run(self.G, feed_dict={self.z: batch_z}) img_grads_agg_list = [] for j in range(self.batch_size): thresh = self.thresh if config.non_private: img_grads_agg_tmp = self.non_private_aggregation([grads[j] for grads in img_grads_list], config) rdp_budget = 0 elif config.increasing_dim: img_grads_agg_tmp, rdp_budget = self.aggregate_results( [grads[j] for grads in img_grads_list], config, thresh=thresh, epoch=epoch) else: img_grads_agg_tmp, rdp_budget = self.aggregate_results( [grads[j] for grads in img_grads_list], config, thresh=thresh) img_grads_agg_list.append(img_grads_agg_tmp) self.rdp_counter += rdp_budget img_grads_agg = np.asarray(img_grads_agg_list) updated_img = old_img + img_grads_agg if config.non_private: eps = 0 order = 0 else: # calculate privacy budget and break if exceeds threshold eps, order = compute_eps_from_delta(self.orders, self.rdp_counter, self.dp_delta) if eps > config.max_eps: print("New budget (eps = %.2f) exceeds threshold of %.2f. Early break (eps = %.2f)." % ( eps, config.max_eps, self.dp_eps_list[-1])) # save privacy budget self.save(config.checkpoint_dir, counter) np.savetxt(self.checkpoint_dir + "/dp_eps.txt", np.asarray(self.dp_eps_list), delimiter=",") np.savetxt(self.checkpoint_dir + "/rdp_eps.txt", np.asarray(self.rdp_eps_list), delimiter=",") np.savetxt(self.checkpoint_dir + "/rdp_order.txt", np.asarray(self.rdp_order_list), delimiter=",") gen_batch = 100000 // self.batch_size + 1 data = self.gen_data(gen_batch) data = data[:100000] import joblib joblib.dump(data, self.checkpoint_dir + '/eps-%.2f.data' % self.dp_eps_list[-1]) sys.exit() self.dp_eps_list.append(eps) self.rdp_order_list.append(order) self.rdp_eps_list.append(self.rdp_counter) # Update G network if self.y is not None: _, summary_str, errG2 = self.sess.run([g_optim, self.g_sum, self.g_loss], feed_dict={ self.z: batch_z, self.updated_img: updated_img, self.y: batch_labels, }) self.writer.add_summary(summary_str, epoch) errG = self.g_loss.eval({ self.z: batch_z, self.updated_img: updated_img, self.y: batch_labels, }) else: _, summary_str = self.sess.run([g_optim, self.g_sum], feed_dict={ self.z: batch_z, self.updated_img: updated_img, }) self.writer.add_summary(summary_str, epoch) errG = self.g_loss.eval({ self.z: batch_z, self.updated_img: updated_img, }) counter += 1 print("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f, g_loss_before: %.8f, dp_eps: %.8f, rdp_order: %d" \ % (epoch, config.epoch, idx, batch_idxs, time.time() - start_time, errD, errG, errG2, eps, order)) # filename = 'epoch'+str(epoch)+'_errD'+str(errD)+'_errG'+str(errG)+'_teachers'+str(self.batch_teachers)+'f.csv' # if epoch % 4 == 0: print('----------------------generate sample----------------------') # data = self.gen_data(500) # output_dir = os.path.join(self.checkpoint_dir, self.sample_dir) # if not os.path.exists(output_dir): # os.makedirs(output_dir) # filename = 'private.data_epoch_' + str(epoch) + '.pkl' # outfile = os.path.join(output_dir, filename) # mkdir(output_dir) # with open(outfile,'wb') as f: # pickle.dump(data, f) filename = 'epoch' + str(epoch) + '_errD' + str(errD) + '_errG' + str(errG) + '_teachers' + str( self.batch_teachers) + 'f.csv' # save each epoch self.save(config.checkpoint_dir, counter) np.savetxt(self.checkpoint_dir + "/dp_eps.txt", np.asarray(self.dp_eps_list), delimiter=",") np.savetxt(self.checkpoint_dir + "/rdp_order.txt", np.asarray(self.rdp_order_list), delimiter=",") np.savetxt(self.checkpoint_dir + "/rdp_eps.txt", np.asarray(self.rdp_eps_list), delimiter=",") if config.save_epoch: floor_eps = math.floor(eps * 10) / 10.0 if not self.save_dict[floor_eps]: # get a checkpoint of low eps self.save_dict[floor_eps] = True from shutil import copytree src_dir = os.path.join(config.checkpoint_dir, self.model_dir) dst_dir = os.path.join(config.checkpoint_dir, str(floor_eps)) copytree(src_dir, dst_dir) # # save after training self.save(config.checkpoint_dir, counter) np.savetxt(self.checkpoint_dir + "/dp_eps.txt", np.asarray(self.dp_eps_list), delimiter=",") np.savetxt(self.checkpoint_dir + "/rdp_eps.txt", np.asarray(self.rdp_eps_list), delimiter=",") np.savetxt(self.checkpoint_dir + "/rdp_order.txt", np.asarray(self.rdp_order_list), delimiter=",") return self.dp_eps_list[-1], self.dp_delta def discriminator(self, image, y): yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) x = conv_cond_concat(image, yb) h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv')) h0 = conv_cond_concat(h0, yb) if self.wgan: h1 = lrelu(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv')) else: h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv'))) h1 = tf.reshape(h1, [self.batch_size, -1]) h1 = concat([h1, y], 1) if self.wgan: h2 = lrelu(linear(h1, self.dfc_dim, 'd_h2_lin')) else: h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin'))) h2 = concat([h2, y], 1) h3 = linear(h2, 1, 'd_h3_lin') return tf.nn.sigmoid(h3), h3 def generator(self, z, y): with tf.variable_scope("generator") as scope: s_h, s_w = self.output_height, self.output_width s_h2, s_h4 = int(s_h / 2), int(s_h / 4) s_w2, s_w4 = int(s_w / 2), int(s_w / 4) # yb = tf.expand_dims(tf.expand_dims(y, 1),2) yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) z = concat([z, y], 1) if self.wgan: h0 = tf.nn.relu(linear(z, self.gfc_dim, 'g_h0_lin')) else: h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'))) h0 = concat([h0, y], 1) if self.wgan: h1 = tf.nn.relu(linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin')) else: h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin'))) h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2]) h1 = conv_cond_concat(h1, yb) if self.wgan: h2 = tf.nn.relu(deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2')) else: h2 = tf.nn.relu( self.g_bn2(deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'))) h2 = conv_cond_concat(h2, yb) if self.config.tanh: return (1 + tf.nn.tanh(deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))) / 2. else: return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3')) def gen_data(self, n_batch, label=None): output_list = [] for i in range(n_batch): batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32) if self.y is not None: if label is None: batch_labels = self.get_random_labels(self.batch_size) else: batch_labels = np.zeros((self.batch_size, self.y_dim), dtype=np.float) batch_labels[:, label] = 1.0 outputs = self.sess.run(self.G, feed_dict={ self.z: batch_z, self.y: batch_labels, }) outputsX = outputs.reshape([self.batch_size, -1]) outputs = np.hstack([outputsX, batch_labels[:, 0:10]]) else: outputs = self.sess.run(self.G, feed_dict={ self.z: batch_z, }) outputsX = outputs.reshape([self.batch_size, -1]) outputs = outputsX output_list.append(outputs) output_arr = np.vstack(output_list) return output_arr @property def model_dir(self): return "{}_{}_{}_{}".format( self.dataset_name, self.batch_size, self.output_height, self.output_width) def print_tensors_in_checkpoint(self, checkpoint_dir, ckpt_name): from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file import os checkpoint_path = os.path.join(checkpoint_dir, ckpt_name) # List ALL tensors example output: v0/Adam (DT_FLOAT) [3,3,1,80] print_tensors_in_checkpoint_file(file_name=checkpoint_path, tensor_name='', all_tensors=True) def load_pretrain(self, checkpoint_dir): print(" [*] Reading checkpoints...") print(checkpoint_dir) save_vars_dict = {x.name[:-2]: x for x in self.save_vars if x.name.startswith('generator')} pretrain_saver = tf.train.Saver(max_to_keep=5, var_list=save_vars_dict) print(self.dataset_name) if 'cifar' in self.dataset_name or 'cinic' in self.dataset_name: ckpt_name = 'DCGAN.model-100' elif 'mnist' in self.dataset_name: ckpt_name = 'CIFAR.model-250' elif 'celebA' in self.dataset_name: ckpt_name = 'CIFAR.model-99' pretrain_saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) import re if self.config.load_d: for i in range(self.batch_teachers): print('loading teacher {}'.format(i)) save_vars_dict = {re.sub(r'teacher[0-9]+', 'teacher0', x.name[:-2]): x for x in self.save_vars if x.name.startswith('teacher{}/'.format(i))} pretrain_saver = tf.train.Saver(max_to_keep=5, var_list=save_vars_dict) pretrain_saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) # save_vars_dict = {x.name: x for x in self.save_vars} # print(save_vars_dict.keys()) counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0)) print(" [*] Success to read {}".format(ckpt_name)) # current_scope = tf.contrib.framework.get_name_scope() # with tf.variable_scope(current_scope, reuse=True): # biases = tf.get_variable("teacher0/d_h0_conv/biases") # biases2 = tf.get_variable("teacher12/d_h0_conv/biases") # biases3 = tf.get_variable("generator/g_h0_lin/Matrix") # biases = tf.Print(biases, [biases, biases2, biases3]) # self.sess.run(biases) return True, counter def load(self, checkpoint_dir, ckpt_name): import re print(" [*] Reading checkpoints...") print(checkpoint_dir) print(ckpt_name) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0)) print(" [*] Success to read {}".format(ckpt_name)) return True, counter # def load(self, checkpoint_dir): # import re # print(" [*] Reading checkpoints...") # checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) # print(checkpoint_dir) # ckpt = tf.train.get_checkpoint_state(checkpoint_dir) # print(ckpt) # print(ckpt.model_checkpoint_path) # if ckpt and ckpt.model_checkpoint_path: # ckpt_name = os.path.basename(ckpt.model_checkpoint_path) # print(ckpt_name) # self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) # counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0)) # print(" [*] Success to read {}".format(ckpt_name)) # return True, counter # else: # print(" [*] Failed to find a checkpoint") # return False, 0 def load_d(self, checkpoint_dir, batch_num, epoch): import re print(" [*] Reading checkpoints...") checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) model_name = "DCGAN_batch_" + str(batch_num) + "_epoch-" + str(epoch) ckpt = os.path.join(checkpoint_dir, model_name) print(ckpt + ".meta") if os.path.isfile(ckpt + ".meta"): # model_name = "DCGAN_batch_" + str(batch_num) + "_epoch_" + str(epoch) # print(model_name) self.saver_d.restore(self.sess, ckpt) counter = int(next(re.finditer("(\d+)(?!.*\d)", model_name)).group(0)) print(" [*] Success to read {}".format(model_name)) return True, counter else: print(" [*] Failed to find a checkpoint") return False, 0 def save(self, checkpoint_dir, step): model_name = "CIFAR.model" checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step) def save_d(self, checkpoint_dir, step, teacher_batch): model_name = "DCGAN_batch_" + str(teacher_batch) + "_epoch" checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) self.saver_d.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step) print("-------------save-dis----------------------") def save_g(self, checkpoint_dir, step): model_name = "DCGAN.model" checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) self.saver_g.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step)
57,590
43.575077
156
py
G-PATE
G-PATE-master/gen_data.py
import numpy as np import argparse x = [2, 2, 5, 8, 8, 9, 10, 11, 11, 12, 15, 28, 32, 42, 46, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101] y = [[-1, 2], [-1, 2], [-1, 5], [-1, 8], [-1, 8], [-1, 9], [-1, 10], [-1, 11], [-1, 11], [-1, 12], [-1, 15], [-1, 28], [-1, 32], [-1, 42], [-1, 46], [1451516400, 1483225200], [1451602843, 1483311589], [1451602962, 1483311743], [1451603006, 1483311791], [1451603122, 1483311883], [1451603285, 1483311990], [1451604148, 1483312891], [1451604963, 1483314738], [-122.513648358854, -122.332574620522], [1, 5], [1451603253, 1483316817], [1, 83], [37.6168823239251, 37.8544643401172], [955490400, 1539122400]] for i in range(1,29): x[i] += x[i - 1] FEATURES = [ 'ALS Unit', 'Final Priority', 'Call Type Group', 'Original Priority', 'Priority', 'City', 'Unit Type', 'Fire Prevention District', 'Battalion', 'Supervisor District', 'Call Final Disposition', 'Zipcode of Incident', 'Call Type', 'Neighborhooods - Analysis Boundaries', 'Station Area', 'Watch Date', 'Received DtTm', 'Entry DtTm', 'Dispatch DtTm', 'Response DtTm', 'On Scene DtTm', 'Transport DtTm', 'Hospital DtTm', 'Location - Lng', 'Number of Alarms', 'Available DtTm', 'Unit sequence in call dispatch', 'Location - Lat', 'Call Date', 'Unit ID', 'Box', 'Address', ] def data2str(ans, n_dim=29): temp = "" for i in range(n_dim): if (i == 0): tmp = ans[:x[i]] else: tmp = ans[x[i - 1]:x[i]] _ = np.argmax(tmp) if (i == 0): temp += str(_) else: if (x[i] - x[i - 1] == 101): if (_ == 100): temp += "," else: step = float(y[i][1] - y[i][0]) / 100 value = y[i][0] + (_ + 0.5) * step if (i != 23 and i != 27): temp += "," + str(int(round(value))) else: temp += "," + str(value) else: temp += "," + str(_) return temp def batch2str(data, out_file, n_dim=29, n_features=20): g = open(out_file, "w+") temp = '' for i in range(n_features): if i > 0: temp += ',' temp += FEATURES[i] g.write(temp + "\n") for i in range(data.shape[0]): temp = data2str(data[i,:], n_dim = n_dim) g.write(temp + "\n") g.close()
2,152
24.630952
502
py
G-PATE
G-PATE-master/ops.py
import math import numpy as np import tensorflow as tf from tensorflow.python.framework import ops from utils import * try: image_summary = tf.image_summary scalar_summary = tf.scalar_summary histogram_summary = tf.histogram_summary merge_summary = tf.merge_summary SummaryWriter = tf.train.SummaryWriter except: image_summary = tf.summary.image scalar_summary = tf.summary.scalar histogram_summary = tf.summary.histogram merge_summary = tf.summary.merge SummaryWriter = tf.summary.FileWriter if "concat_v2" in dir(tf): def concat(tensors, axis, *args, **kwargs): return tf.concat_v2(tensors, axis, *args, **kwargs) else: def concat(tensors, axis, *args, **kwargs): return tf.concat(tensors, axis, *args, **kwargs) class batch_norm(object): def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name self.bn = None def __call__(self, x, train=True): return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=train, scope=self.name) def conv_cond_concat(x, y): """Concatenate conditioning vector on feature map axis.""" x_shapes = x.get_shape() y_shapes = y.get_shape() return concat([ x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="deconv2d", with_w=False): with tf.variable_scope(name): # filter : [height, width, output_channels, in_channels] w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) try: deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) # Support for verisons of TensorFlow before 0.7.0 except AttributeError: deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) if with_w: return deconv, w, biases else: return deconv def lrelu(x, leak=0.2, name="lrelu"): return tf.maximum(x, leak*x) def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): try: matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) except ValueError as err: msg = "NOTE: Usually, this is due to an issue with the image dimensions. Did you correctly set '--crop' or '--input_height' or '--output_height'?" err.args = err.args + (msg,) raise bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(input_, matrix) + bias, matrix, bias else: return tf.matmul(input_, matrix) + bias
3,925
34.369369
155
py
G-PATE
G-PATE-master/dp_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for building and training NN models. """ from __future__ import division import math import numpy import tensorflow as tf def BatchClipByL2norm(t, upper_bound, name=None): """Clip an array of tensors by L2 norm. Shrink each dimension-0 slice of tensor (for matrix it is each row) such that the l2 norm is at most upper_bound. Here we clip each row as it corresponds to each example in the batch. Args: t: the input tensor. upper_bound: the upperbound of the L2 norm. name: optional name. Returns: the clipped tensor. """ assert upper_bound > 0 with tf.name_scope(values=[t, upper_bound], name=name, default_name="batch_clip_by_l2norm") as name: saved_shape = tf.shape(t) batch_size = tf.slice(saved_shape, [0], [1]) t2 = tf.reshape(t, tf.concat(axis=0, values=[batch_size, [-1]])) upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]), tf.constant(1.0/upper_bound)) # Add a small number to avoid divide by 0 l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001) scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound clipped_t = tf.matmul(tf.diag(scale), t2) clipped_t = tf.reshape(clipped_t, saved_shape, name=name) return clipped_t def AddGaussianNoise(t, sigma, name=None): """Add i.i.d. Gaussian noise (0, sigma^2) to every entry of t. Args: t: the input tensor. sigma: the stddev of the Gaussian noise. name: optional name. Returns: the noisy tensor. """ with tf.name_scope(values=[t, sigma], name=name, default_name="add_gaussian_noise") as name: noisy_t = t + tf.random_normal(tf.shape(t), stddev=sigma) return noisy_t
2,431
32.777778
80
py
G-PATE
G-PATE-master/input.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # TODO update load mnist from __future__ import absolute_import from __future__ import division from __future__ import print_function import _pickle as cPickle import gzip import math import numpy as np import os from scipy.io import loadmat as loadmat from six.moves import urllib from six.moves import xrange import sys import tarfile import tensorflow as tf def create_dir_if_needed(dest_directory): """ Create directory if doesn't exist :param dest_directory: :return: True if everything went well """ if not tf.gfile.IsDirectory(dest_directory): tf.gfile.MakeDirs(dest_directory) return True # Test if file already exists if not tf.gfile.Exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(file_url, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') return result def image_whitening(data): """ Subtracts mean of image and divides by adjusted standard variance (for stability). Operations are per image but performed for the entire array. :param image: 4D array (ID, Height, Weight, Channel) :return: 4D array (ID, Height, Weight, Channel) """ assert len(np.shape(data)) == 4 # Compute number of pixels in image nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3] # Subtract mean mean = np.mean(data, axis=(1,2,3)) ones = np.ones(np.shape(data)[1:4], dtype=np.float32) for i in xrange(len(data)): data[i, :, :, :] -= mean[i] * ones # Compute adjusted standard variance adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1,2,3))) #NOLINT(long-line) # Divide image for i in xrange(len(data)): data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i] print(np.shape(data)) return data def ld_mnist(data_dir, dataset_name): data_dir = os.path.join(data_dir, dataset_name) fd = open(os.path.join(data_dir,'train-images-idx3-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float) fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) trY = loaded[8:].reshape((60000)).astype(np.float) fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float) fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) teY = loaded[8:].reshape((10000)).astype(np.float) trY = np.asarray(trY) teY = np.asarray(teY) X = np.concatenate((trX, teX), axis=0) y = np.concatenate((trY, teY), axis=0).astype(np.int) seed = 547 np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(y) y_vec = np.zeros((len(y), self.y_dim), dtype=np.float) for i, label in enumerate(y): y_vec[i,y[i]] = 1.0 return X/255.,y_vec def partition_dataset(data, labels, nb_teachers, teacher_id): """ Simple partitioning algorithm that returns the right portion of the data needed by a given teacher out of a certain nb of teachers :param data: input data to be partitioned :param labels: output data to be partitioned :param nb_teachers: number of teachers in the ensemble (affects size of each partition) :param teacher_id: id of partition to retrieve :return: """ # Sanity check assert(len(data) == len(labels)) assert(int(teacher_id) < int(nb_teachers)) # This will floor the possible number of batches batch_len = int(len(data) / nb_teachers) # Compute start, end indices of partition start = teacher_id * batch_len end = (teacher_id+1) * batch_len # Slice partition off partition_data = data[start:end] partition_labels = labels[start:end] return partition_data, partition_labels
5,028
31.031847
136
py
G-PATE
G-PATE-master/evaluation/train-classifier-celebA.py
#!/usr/bin/env python # coding: utf-8 import numpy as np import argparse import joblib import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.gpu_options.per_process_gpu_memory_fraction = 0.3 tf.keras.backend.set_session(tf.Session(config=config)); def load_celeb(): celebA_directory = '../../data/celebA/' tst_x = joblib.load(celebA_directory + 'celebA-tst-x.pkl') tst_y = joblib.load(celebA_directory + 'celebA-tst-gender.pkl') print(tst_y.sum(), len(tst_y)) from keras.utils import np_utils tst_y = np_utils.to_categorical(tst_y, 2) return tst_x, tst_y def load_celeb_train(): celebA_directory = '../../data/celebA/' tst_x = joblib.load(celebA_directory + 'celebA-trn-x-lg-ups.pkl') tst_y = joblib.load(celebA_directory + 'celebA-trn-gender-lg-ups.pkl') print(tst_y.sum(), len(tst_y)) from keras.utils import np_utils tst_y = np_utils.to_categorical(tst_y, 2) return tst_x, tst_y x_test, y_test = load_celeb() def pipeline(): parser = argparse.ArgumentParser(description='Train classifier and evaluate their accuracy') parser.add_argument('--data', type=str, help='datafile name') args = parser.parse_args() data = joblib.load(args.data) print(args.data) print(data.shape) x, label = np.hsplit(data, [-2]) nb_classes = 2 label = label.reshape((label.shape[0], nb_classes),order='F') x = x.reshape(x.shape[0], 64, 64, 3) from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.layers.convolutional import Convolution2D, Conv2D from keras.optimizers import Adam from keras import optimizers model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(64, 64, 3), name='Conv2D-1')) model.add(MaxPooling2D(pool_size=2, name='MaxPool')) model.add(Dropout(0.2, name='Dropout-1')) model.add(Conv2D(64, kernel_size=3, activation='relu', name='Conv2D-2')) model.add(Dropout(0.25, name='Dropout-2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='Dense')) model.add(Dense(nb_classes, activation='softmax', name='Output')) sgd = optimizers.sgd(lr=1e-4) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(x.shape) print(label.shape) print(x_test.shape) print(y_test.shape) evals = model.fit(x, label, batch_size=256, epochs=250, validation_data=(x_test, y_test), shuffle=True) return evals.history train_accs, eval_accs = pipeline() print("Max eval acc:", max(eval_accs)) print("Max train acc:", max(train_accs))
2,797
30.438202
107
py
G-PATE
G-PATE-master/evaluation/train-classifier-fmnist.py
#!/usr/bin/env python # coding: utf-8 import numpy as np import argparse import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.gpu_options.per_process_gpu_memory_fraction = 0.3 tf.keras.backend.set_session(tf.Session(config=config)); def pipeline(): parser = argparse.ArgumentParser(description='Train classifier and evaluate their accuracy') parser.add_argument('--data', type=str, help='datafile name') args = parser.parse_args() import joblib data = joblib.load(args.data) print(args.data) x, label = np.hsplit(data, [-10]) nb_classes = 10 label = label.reshape((label.shape[0], nb_classes), order='F') x = x.reshape(x.shape[0], 28, 28, 1) from keras.datasets import fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() from keras.utils import np_utils y_train = np_utils.to_categorical(y_train, 10) y_test = np_utils.to_categorical(y_test, 10) x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_train = x_train.astype('float32') / 255. x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) x_test = x_test.astype('float32') / 255. from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.layers.convolutional import Convolution2D, Conv2D from keras.optimizers import Adam from keras import optimizers model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1), name='Conv2D-1')) model.add(MaxPooling2D(pool_size=2, name='MaxPool')) model.add(Dropout(0.2, name='Dropout-1')) model.add(Conv2D(64, kernel_size=3, activation='relu', name='Conv2D-2')) model.add(Dropout(0.25, name='Dropout-2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='Dense')) model.add(Dense(nb_classes, activation='softmax', name='Output')) sgd = optimizers.sgd(lr=2e-3) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(x.shape) print(label.shape) print(x_test.shape) print(y_test.shape) train_accs = [] eval_accs = [] history = model.fit(x, label, batch_size=512, epochs=600, validation_data=(x_test, y_test), shuffle=True) if 'acc' in history.history: train_accs = history.history['acc'] else: train_accs = history.history['accuracy'] if 'val_acc' in history.history: eval_accs = history.history['val_acc'] else: eval_accs = history.history['val_accuracy'] return train_accs, eval_accs train_accs, eval_accs = pipeline() print("Max eval acc:", max(eval_accs)) print("Max train acc:", max(train_accs))
2,846
32.892857
109
py
G-PATE
G-PATE-master/evaluation/train-classifier-mnist.py
#!/usr/bin/env python # coding: utf-8 import numpy as np import argparse import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.gpu_options.per_process_gpu_memory_fraction = 0.3 tf.keras.backend.set_session(tf.Session(config=config)); def pipeline(): parser = argparse.ArgumentParser(description='Train classifier and evaluate their accuracy') parser.add_argument('--data', type=str, help='datafile name') args = parser.parse_args() import joblib data = joblib.load(args.data) print(args.data) x, label = np.hsplit(data, [-10]) nb_classes = 10 label = label.reshape((label.shape[0], nb_classes), order='F') x = x.reshape(x.shape[0], 28, 28, 1) from keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() from keras.utils import np_utils y_train = np_utils.to_categorical(y_train, 10) y_test = np_utils.to_categorical(y_test, 10) x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_train = x_train.astype('float32') / 255. x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) x_test = x_test.astype('float32') / 255. from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.layers.convolutional import Convolution2D, Conv2D from keras.optimizers import Adam from keras import optimizers model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1), name='Conv2D-1')) model.add(MaxPooling2D(pool_size=2, name='MaxPool')) model.add(Dropout(0.2, name='Dropout-1')) model.add(Conv2D(64, kernel_size=3, activation='relu', name='Conv2D-2')) model.add(Dropout(0.25, name='Dropout-2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='Dense')) model.add(Dense(nb_classes, activation='softmax', name='Output')) sgd = optimizers.sgd(lr=1e-3) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(x.shape) print(label.shape) print(x_test.shape) print(y_test.shape) train_accs = [] eval_accs = [] # for i in range(70): history = model.fit(x, label, batch_size=512, epochs=600, validation_data=(x_test, y_test), shuffle=True) train_accs = history.history['acc'] eval_accs = history.history['val_acc'] return train_accs, eval_accs train_accs, eval_accs = pipeline() print("Max eval acc:", max(eval_accs)) print("Max train acc:", max(train_accs))
2,661
32.696203
109
py
G-PATE
G-PATE-master/evaluation/train-classifier-hair.py
#!/usr/bin/env python # coding: utf-8 # In[13]: import numpy as np import argparse import joblib import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.gpu_options.per_process_gpu_memory_fraction = 0.3 tf.keras.backend.set_session(tf.Session(config=config)); def load_celeb(): celebA_directory = '../../data/celebA/' tst_x = joblib.load(celebA_directory + 'celeb-tst-ups-hair-x.pkl') tst_y = joblib.load(celebA_directory + 'celeb-tst-ups-hair-y.pkl') print(tst_y.sum(), len(tst_y)) from keras.utils import np_utils tst_y = np_utils.to_categorical(tst_y, 3) return tst_x, tst_y def load_celeb_train(): celebA_directory = '../../data/celebA/' tst_x = joblib.load(celebA_directory + 'celeb-trn-ups-hair-x.pkl') tst_y = joblib.load(celebA_directory + 'celeb-trn-ups-hair-y.pkl') print(tst_y.sum(), len(tst_y)) from keras.utils import np_utils tst_y = np_utils.to_categorical(tst_y, 3) return tst_x, tst_y x_test, y_test = load_celeb() def pipeline(): parser = argparse.ArgumentParser(description='Train classifier and evaluate their accuracy') parser.add_argument('--data', type=str, help='datafile name') args = parser.parse_args() data = joblib.load(args.data) print(args.data) print(data.shape) x, label = np.hsplit(data, [-3]) nb_classes = 3 label = label.reshape((label.shape[0], nb_classes),order='F') x = x.reshape(x.shape[0], 64, 64, 3) from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.layers.convolutional import Convolution2D, Conv2D from keras.optimizers import Adam from keras import optimizers model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(64, 64, 3), name='Conv2D-1')) model.add(MaxPooling2D(pool_size=2, name='MaxPool')) model.add(Dropout(0.2, name='Dropout-1')) model.add(Conv2D(64, kernel_size=3, activation='relu', name='Conv2D-2')) model.add(Dropout(0.25, name='Dropout-2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='Dense')) model.add(Dense(nb_classes, activation='softmax', name='Output')) sgd = optimizers.sgd(lr=1e-4) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(x.shape) print(label.shape) print(x_test.shape) print(y_test.shape) evals = model.fit(x, label, batch_size=256, epochs=250, validation_data=(x_test, y_test), shuffle=True) return evals.history train_accs, eval_accs = pipeline() print("Max eval acc:", max(eval_accs)) print("Max train acc:", max(train_accs))
2,820
29.010638
107
py
G-PATE
G-PATE-master/evaluation/train-classifier-small-celebA.py
#!/usr/bin/env python # coding: utf-8 import numpy as np import argparse parser = argparse.ArgumentParser(description='Train classifier and evaluate their accuracy') parser.add_argument('--data', type=str, help='datafile name') args = parser.parse_args() import joblib data = joblib.load(args.data) print(args.data) import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.gpu_options.per_process_gpu_memory_fraction = 0.3 tf.keras.backend.set_session(tf.Session(config=config)); def load_celeb(): celebA_directory = '../../data/celebA/' tst_x = joblib.load(celebA_directory + 'celebA-tst-x-small.pkl') tst_y = joblib.load(celebA_directory + 'celebA-tst-gender.pkl') print(tst_y.sum(), len(tst_y)) from keras.utils import np_utils tst_y = np_utils.to_categorical(tst_y, 2) return tst_x, tst_y x_test, y_test = load_celeb() def pipeline(data): print(data.shape) x, label = np.hsplit(data, [-2]) nb_classes = 2 label = label.reshape((label.shape[0], nb_classes),order='F') x = x.reshape(x.shape[0], 32, 32, 3) from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.layers.convolutional import Convolution2D, Conv2D from keras.optimizers import Adam from keras import optimizers model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(32, 32, 3), name='Conv2D-1')) model.add(MaxPooling2D(pool_size=2, name='MaxPool')) model.add(Dropout(0.2, name='Dropout-1')) model.add(Conv2D(64, kernel_size=3, activation='relu', name='Conv2D-2')) model.add(Dropout(0.25, name='Dropout-2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='Dense')) model.add(Dense(nb_classes, activation='softmax', name='Output')) sgd = optimizers.sgd(lr=1e-4) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(x.shape) print(label.shape) print(x_test.shape) print(y_test.shape) evals = model.fit(x, label, batch_size=256, epochs=250, validation_data=(x_test, y_test), shuffle=True) return evals.history hist = pipeline(data) print("Max acc:", max(hist['val_accuracy']))
2,390
27.129412
107
py
kraken
kraken-main/setup.py
#!/usr/bin/env python from setuptools import setup setup( include_package_data=True, setup_requires=['pbr'], pbr=True, )
134
14
30
py
kraken
kraken-main/kraken/rpred.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.rpred ~~~~~~~~~~~~ Generators for recognition on lines images. """ import logging import numpy as np import bidi.algorithm as bd from abc import ABC, abstractmethod from PIL import Image from functools import partial from collections import defaultdict from typing import List, Tuple, Optional, Generator, Union, Dict, Sequence from kraken.lib.util import get_im_str, is_bitonal from kraken.lib.models import TorchSeqRecognizer from kraken.lib.segmentation import extract_polygons, compute_polygon_section from kraken.lib.exceptions import KrakenInputException from kraken.lib.dataset import ImageInputTransforms import copy __all__ = ['ocr_record', 'BaselineOCRRecord', 'BBoxOCRRecord', 'mm_rpred', 'rpred'] logger = logging.getLogger(__name__) class ocr_record(ABC): """ A record object containing the recognition result of a single line """ base_dir = None def __init__(self, prediction: str, cuts: Sequence[Union[Tuple[int, int], Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int]]]], confidences: Sequence[float], display_order: bool = True) -> None: self._prediction = prediction self._cuts = cuts self._confidences = confidences self._display_order = display_order @property @abstractmethod def type(self): pass def __len__(self) -> int: return len(self._prediction) def __str__(self) -> str: return self._prediction @property def prediction(self) -> str: return self._prediction @property def cuts(self) -> Sequence: return self._cuts @property def confidences(self) -> List[float]: return self._confidences def __iter__(self): self.idx = -1 return self @abstractmethod def __next__(self) -> Tuple[str, Union[Sequence[Tuple[int, int]], Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int]]], float]: pass @abstractmethod def __getitem__(self, key: Union[int, slice]): pass @abstractmethod def display_order(self, base_dir) -> 'ocr_record': pass @abstractmethod def logical_order(self, base_dir) -> 'ocr_record': pass class BaselineOCRRecord(ocr_record): """ A record object containing the recognition result of a single line in baseline format. Attributes: type: 'baselines' to indicate a baseline record prediction: The text predicted by the network as one continuous string. cuts: The absolute bounding polygons for each code point in prediction as a list of tuples [(x0, y0), (x1, y2), ...]. confidences: A list of floats indicating the confidence value of each code point. Notes: When slicing the record the behavior of the cuts is changed from earlier versions of kraken. Instead of returning per-character bounding polygons a single polygons section of the line bounding polygon starting at the first and extending to the last code point emitted by the network is returned. This aids numerical stability when computing aggregated bounding polygons such as for words. Individual code point bounding polygons are still accessible through the `cuts` attribute or by iterating over the record code point by code point. """ type = 'baselines' def __init__(self, prediction: str, cuts: Sequence[Tuple[int, int]], confidences: Sequence[float], line: Dict[str, List], display_order: bool = True) -> None: super().__init__(prediction, cuts, confidences, display_order) if 'baseline' not in line: raise TypeError('Invalid argument type (non-baseline line)') self.tags = None if 'tags' not in line else line['tags'] self.line = line['boundary'] self.baseline = line['baseline'] def __repr__(self) -> str: return f'pred: {self.prediction} baseline: {self.baseline} boundary: {self.line} confidences: {self.confidences}' def __next__(self) -> Tuple[str, int, float]: if self.idx + 1 < len(self): self.idx += 1 return (self.prediction[self.idx], compute_polygon_section(self.baseline, self.line, self.cuts[self.idx][0], self.cuts[self.idx][1]), self.confidences[self.idx]) else: raise StopIteration def _get_raw_item(self, key: int): if key < 0: key += len(self) if key >= len(self): raise IndexError('Index (%d) is out of range' % key) return (self.prediction[key], self._cuts[key], self.confidences[key]) def __getitem__(self, key: Union[int, slice]): if isinstance(key, slice): recs = [self._get_raw_item(i) for i in range(*key.indices(len(self)))] prediction = ''.join([x[0] for x in recs]) flat_offsets = sum((tuple(x[1]) for x in recs), ()) cut = compute_polygon_section(self.baseline, self.line, min(flat_offsets), max(flat_offsets)) confidence = np.mean([x[2] for x in recs]) return (prediction, cut, confidence) elif isinstance(key, int): pred, cut, confidence = self._get_raw_item(key) return (pred, compute_polygon_section(self.baseline, self.line, cut[0], cut[1]), confidence) else: raise TypeError('Invalid argument type') @property def cuts(self) -> Sequence[Tuple[int, int]]: return tuple([compute_polygon_section(self.baseline, self.line, cut[0], cut[1]) for cut in self._cuts]) def logical_order(self, base_dir: Optional[str] = None) -> 'BaselineOCRRecord': """ Returns the OCR record in Unicode logical order, i.e. in the order the characters in the line would be read by a human. Args: base_dir: An optional string defining the base direction (also called paragraph direction) for the BiDi algorithm. Valid values are 'L' or 'R'. If None is given the default auto-resolution will be used. """ if self._display_order: return self._reorder(base_dir) else: return self def display_order(self, base_dir: Optional[str] = None) -> 'BaselineOCRRecord': """ Returns the OCR record in Unicode display order, i.e. ordered from left to right inside the line. Args: base_dir: An optional string defining the base direction (also called paragraph direction) for the BiDi algorithm. Valid values are 'L' or 'R'. If None is given the default auto-resolution will be used. """ if self._display_order: return self else: return self._reorder(base_dir) def _reorder(self, base_dir: Optional[str] = None) -> 'BaselineOCRRecord': """ Reorder the record using the BiDi algorithm. """ storage = bd.get_empty_storage() if base_dir not in ('L', 'R'): base_level = bd.get_base_level(self._prediction) else: base_level = {'L': 0, 'R': 1}[base_dir] storage['base_level'] = base_level storage['base_dir'] = ('L', 'R')[base_level] bd.get_embedding_levels(self._prediction, storage) bd.explicit_embed_and_overrides(storage) bd.resolve_weak_types(storage) bd.resolve_neutral_types(storage, False) bd.resolve_implicit_levels(storage, False) for i, j in enumerate(zip(self._prediction, self._cuts, self._confidences)): storage['chars'][i]['record'] = j bd.reorder_resolved_levels(storage, False) bd.apply_mirroring(storage, False) prediction = '' cuts = [] confidences = [] for ch in storage['chars']: # code point may have been mirrored prediction = prediction + ch['ch'] cuts.append(ch['record'][1]) confidences.append(ch['record'][2]) line = {'boundary': self.line, 'baseline': self.baseline} rec = BaselineOCRRecord(prediction, cuts, confidences, line) rec.tags = self.tags rec.base_dir = base_dir rec._display_order = not self._display_order return rec class BBoxOCRRecord(ocr_record): """ A record object containing the recognition result of a single line in bbox format. """ type = 'box' def __init__(self, prediction: str, cuts: Sequence[Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int]]], confidences: Sequence[float], line: Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int]], display_order: bool = True) -> None: super().__init__(prediction, cuts, confidences, display_order) if 'baseline' in line: raise TypeError('Invalid argument type (baseline line)') self.line = line def __repr__(self) -> str: return f'pred: {self.prediction} line: {self.line} confidences: {self.confidences}' def __next__(self) -> Tuple[str, int, float]: if self.idx + 1 < len(self): self.idx += 1 return (self.prediction[self.idx], self.cuts[self.idx], self.confidences[self.idx]) else: raise StopIteration def _get_raw_item(self, key: int): if key < 0: key += len(self) if key >= len(self): raise IndexError('Index (%d) is out of range' % key) return (self.prediction[key], self.cuts[key], self.confidences[key]) def __getitem__(self, key: Union[int, slice]): if isinstance(key, slice): recs = [self._get_raw_item(i) for i in range(*key.indices(len(self)))] prediction = ''.join([x[0] for x in recs]) box = [x[1] for x in recs] flat_box = [point for pol in box for point in pol] flat_box = [x for point in flat_box for x in point] min_x, max_x = min(flat_box[::2]), max(flat_box[::2]) min_y, max_y = min(flat_box[1::2]), max(flat_box[1::2]) cut = ((min_x, min_y), (max_x, min_y), (max_x, max_y), (min_x, max_y)) confidence = np.mean([x[2] for x in recs]) return (prediction, cut, confidence) elif isinstance(key, int): return self._get_raw_item(key) else: raise TypeError('Invalid argument type') def logical_order(self, base_dir: Optional[str] = None) -> 'BBoxOCRRecord': """ Returns the OCR record in Unicode logical order, i.e. in the order the characters in the line would be read by a human. Args: base_dir: An optional string defining the base direction (also called paragraph direction) for the BiDi algorithm. Valid values are 'L' or 'R'. If None is given the default auto-resolution will be used. """ if self._display_order: return self._reorder(base_dir) else: return self def display_order(self, base_dir: Optional[str] = None) -> 'BBoxOCRRecord': """ Returns the OCR record in Unicode display order, i.e. ordered from left to right inside the line. Args: base_dir: An optional string defining the base direction (also called paragraph direction) for the BiDi algorithm. Valid values are 'L' or 'R'. If None is given the default auto-resolution will be used. """ if self._display_order: return self else: return self._reorder(base_dir) def _reorder(self, base_dir: Optional[str] = None) -> 'BBoxOCRRecord': storage = bd.get_empty_storage() if base_dir not in ('L', 'R'): base_level = bd.get_base_level(self.prediction) else: base_level = {'L': 0, 'R': 1}[base_dir] storage['base_level'] = base_level storage['base_dir'] = ('L', 'R')[base_level] bd.get_embedding_levels(self.prediction, storage) bd.explicit_embed_and_overrides(storage) bd.resolve_weak_types(storage) bd.resolve_neutral_types(storage, False) bd.resolve_implicit_levels(storage, False) for i, j in enumerate(zip(self.prediction, self.cuts, self.confidences)): storage['chars'][i]['record'] = j bd.reorder_resolved_levels(storage, False) bd.apply_mirroring(storage, False) prediction = '' cuts = [] confidences = [] for ch in storage['chars']: # code point may have been mirrored prediction = prediction + ch['ch'] cuts.append(ch['record'][1]) confidences.append(ch['record'][2]) # carry over whole line information rec = BBoxOCRRecord(prediction, cuts, confidences, self.line) rec.base_dir = base_dir rec._display_order = not self._display_order return rec class mm_rpred(object): """ Multi-model version of kraken.rpred.rpred """ def __init__(self, nets: Dict[str, TorchSeqRecognizer], im: Image.Image, bounds: dict, pad: int = 16, bidi_reordering: Union[bool, str] = True, tags_ignore: Optional[List[str]] = None) -> Generator[ocr_record, None, None]: """ Multi-model version of kraken.rpred.rpred. Takes a dictionary of ISO15924 script identifiers->models and an script-annotated segmentation to dynamically select appropriate models for these lines. Args: nets (dict): A dict mapping tag values to TorchSegRecognizer objects. Recommended to be an defaultdict. im (PIL.Image.Image): Image to extract text from bounds (dict): A dictionary containing a 'boxes' entry with a list of lists of coordinates (script, (x0, y0, x1, y1)) of a text line in the image and an entry 'text_direction' containing 'horizontal-lr/rl/vertical-lr/rl'. pad (int): Extra blank padding to the left and right of text line bidi_reordering (bool|str): Reorder classes in the ocr_record according to the Unicode bidirectional algorithm for correct display. Set to L|R to override default text direction. tags_ignore (list): List of tag values to ignore during recognition Yields: An ocr_record containing the recognized text, absolute character positions, and confidence values for each character. Raises: KrakenInputException if the mapping between segmentation tags and networks is incomplete. """ seg_types = set(recognizer.seg_type for recognizer in nets.values()) if isinstance(nets, defaultdict): seg_types.add(nets.default_factory().seg_type) self._resolve_tags_to_model = partial(_resolve_tags_to_model, default=nets.default_factory()) else: self._resolve_tags_to_model = _resolve_tags_to_model if not tags_ignore: tags_ignore = [] if ('type' in bounds and bounds['type'] not in seg_types) or len(seg_types) > 1: logger.warning(f'Recognizers with segmentation types {seg_types} will be ' f'applied to segmentation of type {bounds["type"] if "type" in bounds else None}. ' f'This will likely result in severely degraded performace') one_channel_modes = set(recognizer.nn.one_channel_mode for recognizer in nets.values()) if '1' in one_channel_modes and len(one_channel_modes) > 1: raise KrakenInputException('Mixing binary and non-binary recognition models is not supported.') elif '1' in one_channel_modes and not is_bitonal(im): logger.warning('Running binary models on non-binary input image ' '(mode {}). This will result in severely degraded ' 'performance'.format(im.mode)) if 'type' in bounds and bounds['type'] == 'baselines': valid_norm = False self.len = len(bounds['lines']) self.seg_key = 'lines' self.next_iter = self._recognize_baseline_line self.line_iter = iter(bounds['lines']) tags = set() for x in bounds['lines']: tags.update(x['tags'].values()) else: valid_norm = True self.len = len(bounds['boxes']) self.seg_key = 'boxes' self.next_iter = self._recognize_box_line self.line_iter = iter(bounds['boxes']) tags = set(x[0] for line in bounds['boxes'] for x in line) im_str = get_im_str(im) logger.info('Running {} multi-script recognizers on {} with {} lines'.format(len(nets), im_str, self.len)) filtered_tags = [] miss = [] for tag in tags: if not isinstance(nets, defaultdict) and (not nets.get(tag) and tag not in tags_ignore): miss.append(tag) elif tag not in tags_ignore: filtered_tags.append(tag) tags = filtered_tags if miss: raise KrakenInputException('Missing models for tags {}'.format(set(miss))) # build dictionary for line preprocessing self.ts = {} for tag in tags: logger.debug('Loading line transforms for {}'.format(tag)) network = nets[tag] batch, channels, height, width = network.nn.input self.ts[tag] = ImageInputTransforms(batch, height, width, channels, (pad, 0), valid_norm) self.im = im self.nets = nets self.bidi_reordering = bidi_reordering self.pad = pad self.bounds = bounds self.tags_ignore = tags_ignore def _recognize_box_line(self, line): flat_box = [point for box in line['boxes'][0] for point in box[1]] xmin, xmax = min(flat_box[::2]), max(flat_box[::2]) ymin, ymax = min(flat_box[1::2]), max(flat_box[1::2]) line_bbox = ((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)) prediction = '' cuts = [] confidences = [] for tag, (box, coords) in zip(map(lambda x: x[0], line['boxes'][0]), extract_polygons(self.im, {'text_direction': line['text_direction'], 'boxes': map(lambda x: x[1], line['boxes'][0])})): self.box = box # skip if tag is set to ignore if self.tags_ignore is not None and tag in self.tags_ignore: logger.warning(f'Ignoring {tag} line segment.') continue # check if boxes are non-zero in any dimension if 0 in box.size: logger.warning(f'bbox {coords} with zero dimension. Emitting empty record.') return BBoxOCRRecord('', (), (), coords) # try conversion into tensor try: logger.debug('Preparing run.') line = self.ts[tag](box) except Exception: logger.warning(f'Conversion of line {coords} failed. Emitting empty record..') return BBoxOCRRecord('', (), (), coords) # check if line is non-zero if line.max() == line.min(): logger.warning('Empty run. Emitting empty record.') return BBoxOCRRecord('', (), (), coords) _, net = self._resolve_tags_to_model({'type': tag}, self.nets) logger.debug(f'Forward pass with model {tag}.') preds = net.predict(line.unsqueeze(0))[0] # calculate recognized LSTM locations of characters logger.debug('Convert to absolute coordinates') # calculate recognized LSTM locations of characters # scale between network output and network input self.net_scale = line.shape[2]/net.outputs.shape[2] # scale between network input and original line self.in_scale = box.size[0]/(line.shape[2]-2*self.pad) pred = ''.join(x[0] for x in preds) pos = [] conf = [] for _, start, end, c in preds: if self.bounds['text_direction'].startswith('horizontal'): xmin = coords[0] + self._scale_val(start, 0, self.box.size[0]) xmax = coords[0] + self._scale_val(end, 0, self.box.size[0]) pos.append([[xmin, coords[1]], [xmin, coords[3]], [xmax, coords[3]], [xmax, coords[1]]]) else: ymin = coords[1] + self._scale_val(start, 0, self.box.size[1]) ymax = coords[1] + self._scale_val(end, 0, self.box.size[1]) pos.append([[coords[0], ymin], [coords[2], ymin], [coords[2], ymax], [coords[0], ymax]]) conf.append(c) prediction += pred cuts.extend(pos) confidences.extend(conf) rec = BBoxOCRRecord(prediction, cuts, confidences, line_bbox) if self.bidi_reordering: logger.debug('BiDi reordering record.') return rec.logical_order(base_dir=self.bidi_reordering if self.bidi_reordering in ('L', 'R') else None) else: logger.debug('Emitting raw record') return rec.display_order(None) def _recognize_baseline_line(self, line): if self.tags_ignore is not None: for tag in line['lines'][0]['tags'].values(): if tag in self.tags_ignore: logger.info(f'Ignoring line segment with tags {line["lines"][0]["tags"]} based on {tag}.') return BaselineOCRRecord('', [], [], line['lines'][0]) try: box, coords = next(extract_polygons(self.im, line)) except KrakenInputException as e: logger.warning(f'Extracting line failed: {e}') return BaselineOCRRecord('', [], [], line['lines'][0]) self.box = box tag, net = self._resolve_tags_to_model(coords['tags'], self.nets) # check if boxes are non-zero in any dimension if 0 in box.size: logger.warning(f'bbox {coords} with zero dimension. Emitting empty record.') return BaselineOCRRecord('', [], [], coords) # try conversion into tensor try: line = self.ts[tag](box) except Exception as e: logger.warning(f'Tensor conversion failed with {e}. Emitting empty record.') return BaselineOCRRecord('', [], [], coords) # check if line is non-zero if line.max() == line.min(): logger.warning('Empty line after tensor conversion. Emitting empty record.') return BaselineOCRRecord('', [], [], coords) preds = net.predict(line.unsqueeze(0))[0] # calculate recognized LSTM locations of characters # scale between network output and network input self.net_scale = line.shape[2]/net.outputs.shape[2] # scale between network input and original line self.in_scale = box.size[0]/(line.shape[2]-2*self.pad) # XXX: fix bounding box calculation ocr_record for multi-codepoint labels. pred = ''.join(x[0] for x in preds) pos = [] conf = [] for _, start, end, c in preds: pos.append((self._scale_val(start, 0, self.box.size[0]), self._scale_val(end, 0, self.box.size[0]))) conf.append(c) rec = BaselineOCRRecord(pred, pos, conf, coords) if self.bidi_reordering: logger.debug('BiDi reordering record.') return rec.logical_order(base_dir=self.bidi_reordering if self.bidi_reordering in ('L', 'R') else None) else: logger.debug('Emitting raw record') return rec.display_order(None) def __next__(self): bound = self.bounds bound[self.seg_key] = [next(self.line_iter)] return self.next_iter(bound) def __iter__(self): return self def __len__(self): return self.len def _scale_val(self, val, min_val, max_val): return int(round(min(max(((val*self.net_scale)-self.pad)*self.in_scale, min_val), max_val-1))) def rpred(network: TorchSeqRecognizer, im: Image.Image, bounds: dict, pad: int = 16, bidi_reordering: Union[bool, str] = True) -> Generator[ocr_record, None, None]: """ Uses a TorchSeqRecognizer and a segmentation to recognize text Args: network (kraken.lib.models.TorchSeqRecognizer): A TorchSegRecognizer object im (PIL.Image.Image): Image to extract text from bounds (dict): A dictionary containing a 'boxes' entry with a list of coordinates (x0, y0, x1, y1) of a text line in the image and an entry 'text_direction' containing 'horizontal-lr/rl/vertical-lr/rl'. pad (int): Extra blank padding to the left and right of text line. Auto-disabled when expected network inputs are incompatible with padding. bidi_reordering (bool|str): Reorder classes in the ocr_record according to the Unicode bidirectional algorithm for correct display. Set to L|R to change base text direction. Yields: An ocr_record containing the recognized text, absolute character positions, and confidence values for each character. """ bounds = copy.deepcopy(bounds) if 'boxes' in bounds: boxes = bounds['boxes'] rewrite_boxes = [] for box in boxes: rewrite_boxes.append([('default', box)]) bounds['boxes'] = rewrite_boxes bounds['script_detection'] = True return mm_rpred(defaultdict(lambda: network), im, bounds, pad, bidi_reordering) def _resolve_tags_to_model(tags: Sequence[Dict[str, str]], model_map: Dict[str, TorchSeqRecognizer], default: Optional[TorchSeqRecognizer] = None) -> TorchSeqRecognizer: """ Resolves a sequence of tags """ for tag in tags.values(): if tag in model_map: return tag, model_map[tag] if default: return next(tags.values()), default raise KrakenInputException('No model for tags {}'.format(tags))
28,893
40.454806
121
py
kraken
kraken-main/kraken/linegen.py
# # Copyright 2014 Google Inc. All rights reserved. # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ linegen ~~~~~~~ An advanced line generation tool using Pango for proper text shaping. The actual drawing code was adapted from the create_image utility from nototools available at [0]. Line degradation uses a local model described in [1]. [0] https://github.com/googlei18n/nototools [1] Kanungo, Tapas, et al. "A statistical, nonparametric methodology for document degradation model validation." IEEE Transactions on Pattern Analysis and Machine Intelligence 22.11 (2000): 1209-1223. """ from scipy.ndimage.filters import gaussian_filter from scipy.ndimage.measurements import find_objects from scipy.ndimage.morphology import distance_transform_cdt, binary_closing from scipy.ndimage.interpolation import affine_transform, geometric_transform from PIL import Image, ImageOps from typing import AnyStr import logging import ctypes import ctypes.util import numpy as np from kraken.lib.exceptions import KrakenCairoSurfaceException from kraken.lib.util import pil2array, array2pil logger = logging.getLogger(__name__) pc_lib = ctypes.util.find_library('pangocairo-1.0') p_lib = ctypes.util.find_library('pango-1.0') c_lib = ctypes.util.find_library('cairo') if pc_lib is None: raise ImportError('Couldnt load pangocairo line generator dependency. Please install pangocairo, pango, and cairo.') if p_lib is None: raise ImportError('Couldnt load pango line generator dependency. Please install pangocairo, pango, and cairo.') if c_lib is None: raise ImportError('Couldnt load cairo line generator dependency. Please install pangocairo, pango, and cairo.') pangocairo = ctypes.CDLL(pc_lib) pango = ctypes.CDLL(p_lib) cairo = ctypes.CDLL(c_lib) __all__ = ['LineGenerator', 'ocropy_degrade', 'degrade_line', 'distort_line'] class CairoSurface(ctypes.Structure): pass class CairoContext(ctypes.Structure): pass class PangoFontDescription(ctypes.Structure): pass class PangoLanguage(ctypes.Structure): pass class PangoLayout(ctypes.Structure): pass class PangoContext(ctypes.Structure): pass class PangoRectangle(ctypes.Structure): _fields_ = [('x', ctypes.c_int), ('y', ctypes.c_int), ('width', ctypes.c_int), ('height', ctypes.c_int)] class ensureBytes(object): """ Simple class ensuring the arguments of type char * are actually a series of bytes. """ @classmethod def from_param(cls, value: AnyStr) -> bytes: if isinstance(value, bytes): return value else: return value.encode('utf-8') cairo.cairo_create.argtypes = [ctypes.POINTER(CairoSurface)] cairo.cairo_create.restype = ctypes.POINTER(CairoContext) cairo.cairo_destroy.argtypes = [ctypes.POINTER(CairoContext)] cairo.cairo_image_surface_create.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int] cairo.cairo_image_surface_create.restype = ctypes.POINTER(CairoSurface) cairo.cairo_surface_destroy.argtypes = [ctypes.POINTER(CairoSurface)] cairo.cairo_image_surface_get_data.restype = ctypes.c_void_p cairo.cairo_set_source_rgb.argtypes = [ctypes.POINTER(CairoContext), ctypes.c_double, ctypes.c_double, ctypes.c_double] cairo.cairo_paint.argtypes = [ctypes.POINTER(CairoContext)] pangocairo.pango_cairo_create_context.argtypes = [ctypes.POINTER(CairoContext)] pangocairo.pango_cairo_create_context.restype = ctypes.POINTER(PangoContext) pangocairo.pango_cairo_update_layout.argtypes = [ctypes.POINTER(CairoContext), ctypes.POINTER(PangoLayout)] pangocairo.pango_cairo_show_layout.argtypes = [ctypes.POINTER(CairoContext), ctypes.POINTER(PangoLayout)] pango.pango_language_from_string.argtypes = [ensureBytes] # type: ignore pango.pango_language_from_string.restype = ctypes.POINTER(PangoLanguage) pango.pango_context_set_language.argtypes = [ctypes.POINTER(PangoContext), ctypes.POINTER(PangoLanguage)] pango.pango_font_description_new.restype = ctypes.POINTER(PangoFontDescription) pango.pango_font_description_set_family.argtypes = [ctypes.POINTER(PangoFontDescription), ensureBytes] # type: ignore pango.pango_font_description_set_size.argtypes = [ctypes.POINTER(PangoFontDescription), ctypes.c_int] pango.pango_font_description_set_weight.argtypes = [ctypes.POINTER(PangoFontDescription), ctypes.c_uint] pango.pango_layout_new.restype = ctypes.POINTER(PangoLayout) pango.pango_layout_set_markup.argtypes = [ctypes.POINTER(PangoLayout), ensureBytes, ctypes.c_int] # type: ignore pango.pango_layout_set_font_description.argtypes = [ctypes.POINTER(PangoLayout), ctypes.POINTER(PangoFontDescription)] pango.pango_layout_get_context.argtypes = [ctypes.POINTER(PangoLayout)] pango.pango_layout_get_context.restype = ctypes.POINTER(PangoContext) pango.pango_layout_get_pixel_extents.argtypes = [ctypes.POINTER(PangoLayout), ctypes.POINTER(PangoRectangle), ctypes.POINTER(PangoRectangle)] class LineGenerator(object): """ Produces degraded line images using a single collection of font families. """ def __init__(self, family='Sans', font_size=32, font_weight=400, language=None): self.language = language self.font = pango.pango_font_description_new() # XXX: get PANGO_SCALE programmatically from somewhere logger.debug('Setting font {}, size {}, weight {}'.format(family, font_size, font_weight)) pango.pango_font_description_set_size(self.font, font_size * 1024) pango.pango_font_description_set_family(self.font, family) pango.pango_font_description_set_weight(self.font, font_weight) def render_line(self, text): """ Draws a line onto a Cairo surface which will be converted to an pillow Image. Args: text (unicode): A string which will be rendered as a single line. Returns: PIL.Image of mode 'L'. Raises: KrakenCairoSurfaceException if the Cairo surface couldn't be created (usually caused by invalid dimensions. """ logger.info('Rendering line \'{}\''.format(text)) logger.debug('Creating temporary cairo surface') temp_surface = cairo.cairo_image_surface_create(0, 0, 0) width, height = _draw_on_surface(temp_surface, self.font, self.language, text) cairo.cairo_surface_destroy(temp_surface) if width == 0 or height == 0: logger.error('Surface for \'{}\' zero pixels in at least one dimension'.format(text)) raise KrakenCairoSurfaceException('Surface zero pixels in at least one dimension', width, height) logger.debug('Creating sized cairo surface') real_surface = cairo.cairo_image_surface_create(0, width, height) _draw_on_surface(real_surface, self.font, self.language, text) logger.debug('Extracing data from real surface') data = cairo.cairo_image_surface_get_data(real_surface) size = int(4 * width * height) buffer = ctypes.create_string_buffer(size) ctypes.memmove(buffer, data, size) logger.debug('Loading data into PIL image') im = Image.frombuffer("RGBA", (width, height), buffer, "raw", "BGRA", 0, 1) cairo.cairo_surface_destroy(real_surface) logger.debug('Expand and grayscale image') im = im.convert('L') im = ImageOps.expand(im, 5, 255) return im def _draw_on_surface(surface, font, language, text): logger.debug('Creating cairo and pangocairo contexts') cr = cairo.cairo_create(surface) pangocairo_ctx = pangocairo.pango_cairo_create_context(cr) logger.debug('Creating pangocairo layout') layout = pango.pango_layout_new(pangocairo_ctx) pango_ctx = pango.pango_layout_get_context(layout) if language is not None: logger.debug('Setting language {} on context'.format(language)) pango_language = pango.pango_language_from_string(language) pango.pango_context_set_language(pango_ctx, pango_language) logger.debug('Setting font description on layout') pango.pango_layout_set_font_description(layout, font) logger.debug('Filling background of surface') cairo.cairo_set_source_rgb(cr, 1.0, 1.0, 1.0) cairo.cairo_paint(cr) logger.debug('Typsetting text') pango.pango_layout_set_markup(layout, text, -1) logger.debug('Drawing text') cairo.cairo_set_source_rgb(cr, 0.0, 0.0, 0.0) pangocairo.pango_cairo_update_layout(cr, layout) pangocairo.pango_cairo_show_layout(cr, layout) cairo.cairo_destroy(cr) logger.debug('Getting pixel extents') ink_rect = PangoRectangle() logical_rect = PangoRectangle() pango.pango_layout_get_pixel_extents(layout, ctypes.byref(ink_rect), ctypes.byref(logical_rect)) return max(ink_rect.width, logical_rect.width), max(ink_rect.height, logical_rect.height) def ocropy_degrade(im, distort=1.0, dsigma=20.0, eps=0.03, delta=0.3, degradations=((0.5, 0.0, 0.5, 0.0),)): """ Degrades and distorts a line using the same noise model used by ocropus. Args: im (PIL.Image): Input image distort (float): dsigma (float): eps (float): delta (float): degradations (list): list returning 4-tuples corresponding to the degradations argument of ocropus-linegen. Returns: PIL.Image in mode 'L' """ w, h = im.size # XXX: determine correct output shape from transformation matrices instead # of guesstimating. logger.debug('Pasting source image into canvas') image = Image.new('L', (int(1.5*w), 4*h), 255) image.paste(im, (int((image.size[0] - w) / 2), int((image.size[1] - h) / 2))) a = pil2array(image.convert('L')) logger.debug('Selecting degradations') (sigma, ssigma, threshold, sthreshold) = degradations[np.random.choice(len(degradations))] sigma += (2 * np.random.rand() - 1) * ssigma threshold += (2 * np.random.rand() - 1) * sthreshold a = a * 1.0 / np.amax(a) if sigma > 0.0: logger.debug('Apply Gaussian filter') a = gaussian_filter(a, sigma) logger.debug('Adding noise') a += np.clip(np.random.randn(*a.shape) * 0.2, -0.25, 0.25) logger.debug('Perform affine transformation and resize') m = np.array([[1 + eps * np.random.randn(), 0.0], [eps * np.random.randn(), 1.0 + eps * np.random.randn()]]) w, h = a.shape c = np.array([w / 2.0, h / 2]) d = c - np.dot(m, c) + np.array([np.random.randn() * delta, np.random.randn() * delta]) a = affine_transform(a, m, offset=d, order=1, mode='constant', cval=a[0, 0]) a = np.array(a > threshold, 'f') [[r, c]] = find_objects(np.array(a == 0, 'i')) r0 = r.start r1 = r.stop c0 = c.start c1 = c.stop a = a[r0 - 5:r1 + 5, c0 - 5:c1 + 5] if distort > 0: logger.debug('Perform geometric transformation') h, w = a.shape hs = np.random.randn(h, w) ws = np.random.randn(h, w) hs = gaussian_filter(hs, dsigma) ws = gaussian_filter(ws, dsigma) hs *= distort / np.amax(hs) ws *= distort / np.amax(ws) def _f(p): return (p[0] + hs[p[0], p[1]], p[1] + ws[p[0], p[1]]) a = geometric_transform(a, _f, output_shape=(h, w), order=1, mode='constant', cval=np.amax(a)) im = array2pil(a).convert('L') return im def degrade_line(im, eta=0.0, alpha=1.5, beta=1.5, alpha_0=1.0, beta_0=1.0): """ Degrades a line image by adding noise. For parameter meanings consult [1]. Args: im (PIL.Image): Input image eta (float): alpha (float): beta (float): alpha_0 (float): beta_0 (float): Returns: PIL.Image in mode '1' """ logger.debug('Inverting and normalizing input image') im = pil2array(im) im = np.amax(im)-im im = im*1.0/np.amax(im) logger.debug('Calculating foreground distance transform') fg_dist = distance_transform_cdt(1-im, metric='taxicab') logger.debug('Calculating flip to white probability') fg_prob = alpha_0 * np.exp(-alpha * (fg_dist**2)) + eta fg_prob[im == 1] = 0 fg_flip = np.random.binomial(1, fg_prob) logger.debug('Calculating background distance transform') bg_dist = distance_transform_cdt(im, metric='taxicab') logger.debug('Calculating flip to black probability') bg_prob = beta_0 * np.exp(-beta * (bg_dist**2)) + eta bg_prob[im == 0] = 0 bg_flip = np.random.binomial(1, bg_prob) # flip logger.debug('Flipping') im -= bg_flip im += fg_flip logger.debug('Binary closing') sel = np.array([[1, 1], [1, 1]]) im = binary_closing(im, sel) logger.debug('Converting to image') return array2pil(255-im.astype('B')*255) def distort_line(im, distort=3.0, sigma=10, eps=0.03, delta=0.3): """ Distorts a line image. Run BEFORE degrade_line as a white border of 5 pixels will be added. Args: im (PIL.Image): Input image distort (float): sigma (float): eps (float): delta (float): Returns: PIL.Image in mode 'L' """ w, h = im.size # XXX: determine correct output shape from transformation matrices instead # of guesstimating. logger.debug('Pasting source image into canvas') image = Image.new('L', (int(1.5*w), 4*h), 255) image.paste(im, (int((image.size[0] - w) / 2), int((image.size[1] - h) / 2))) line = pil2array(image.convert('L')) # shear in y direction with factor eps * randn(), scaling with 1 + eps * # randn() in x/y axis (all offset at d) logger.debug('Performing affine transformation') m = np.array([[1 + eps * np.random.randn(), 0.0], [eps * np.random.randn(), 1.0 + eps * np.random.randn()]]) c = np.array([w/2.0, h/2]) d = c - np.dot(m, c) + np.array([np.random.randn() * delta, np.random.randn() * delta]) line = affine_transform(line, m, offset=d, order=1, mode='constant', cval=255) hs = gaussian_filter(np.random.randn(4*h, int(1.5*w)), sigma) ws = gaussian_filter(np.random.randn(4*h, int(1.5*w)), sigma) hs *= distort/np.amax(hs) ws *= distort/np.amax(ws) def _f(p): return (p[0] + hs[p[0], p[1]], p[1] + ws[p[0], p[1]]) logger.debug('Performing geometric transformation') im = array2pil(geometric_transform(line, _f, order=1, mode='nearest')) logger.debug('Cropping canvas to content box') im = im.crop(ImageOps.invert(im).getbbox()) return im
15,140
36.201474
120
py
kraken
kraken-main/kraken/align.py
# # Copyright 2021 Teklia # Copyright 2021 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ align ~~~~~ A character alignment module using a network output lattice and ground truth to accuractely determine grapheme locations in input data. """ import torch import logging import numpy as np from PIL import Image from bidi.algorithm import get_display from dataclasses import dataclass from typing import List, Dict, Any, Optional, Literal from kraken import rpred from kraken.lib.codec import PytorchCodec from kraken.lib.models import TorchSeqRecognizer from kraken.lib.exceptions import KrakenInputException, KrakenEncodeException from kraken.lib.segmentation import compute_polygon_section logger = logging.getLogger('kraken') def forced_align(doc: Dict[str, Any], model: TorchSeqRecognizer, base_dir: Optional[Literal['L', 'R']] = None) -> List[rpred.ocr_record]: """ Performs a forced character alignment of text with recognition model output activations. Argument: doc: Parsed document. model: Recognition model to use for alignment. Returns: A list of kraken.rpred.ocr_record. """ im = Image.open(doc['image']) predictor = rpred.rpred(model, im, doc) if 'type' in predictor.bounds and predictor.bounds['type'] == 'baselines': rec_class = rpred.BaselineOCRRecord records = [] # enable training mode in last layer to get log_softmax output model.nn.nn[-1].training = True for idx, line in enumerate(doc['lines']): # convert text to display order do_text = get_display(line['text'], base_dir=base_dir) # encode into labels, ignoring unencodable sequences labels = model.codec.encode(do_text).long() next(predictor) if model.outputs.shape[2] < 2*len(labels): logger.warning(f'Could not align line {idx}. Output sequence length {model.outputs.shape[2]} < ' f'{2*len(labels)} (length of "{line["text"]}" after encoding).') records.append(rpred.BaselineOCRRecord('', [], [], line)) continue emission = torch.tensor(model.outputs).squeeze().T trellis = get_trellis(emission, labels) path = backtrack(trellis, emission, labels) path = merge_repeats(path, do_text) pred = [] pos = [] conf = [] for seg in path: pred.append(seg.label) pos.append((predictor._scale_val(seg.start, 0, predictor.box.size[0]), predictor._scale_val(seg.end, 0, predictor.box.size[0]))) conf.append(seg.score) records.append(rpred.BaselineOCRRecord(pred, pos, conf, line, display_order=True)) return records """ Copied from the forced alignment with Wav2Vec2 tutorial of pytorch available at: https://github.com/pytorch/audio/blob/main/examples/tutorials/forced_alignment_tutorial.py """ @dataclass class Point: token_index: int time_index: int score: float # Merge the labels @dataclass class Segment: label: str start: int end: int score: float def __repr__(self): return f"{self.label}\t({self.score:4.2f}): [{self.start:5d}, {self.end:5d})" @property def length(self): return self.end - self.start def get_trellis(emission, tokens): # width x labels in log domain num_frame = emission.size(0) num_tokens = len(tokens) # Trellis has extra dimensions for both time axis and tokens. # The extra dim for tokens represents <SoS> (start-of-sentence) # The extra dim for time axis is for simplification of the code. trellis = torch.empty((num_frame + 1, num_tokens + 1)) trellis[0, 0] = 0 trellis[1:, 0] = torch.cumsum(emission[:, 0], 0) trellis[0, -num_tokens:] = -float("inf") trellis[-num_tokens:, 0] = float("inf") for t in range(num_frame): trellis[t + 1, 1:] = torch.maximum( # Score for staying at the same token trellis[t, 1:] + emission[t, 0], # Score for changing to the next token trellis[t, :-1] + emission[t, tokens], ) return trellis def backtrack(trellis, emission, tokens): # Note: # j and t are indices for trellis, which has extra dimensions # for time and tokens at the beginning. # When referring to time frame index `T` in trellis, # the corresponding index in emission is `T-1`. # Similarly, when referring to token index `J` in trellis, # the corresponding index in transcript is `J-1`. j = trellis.size(1) - 1 t_start = torch.argmax(trellis[:, j]).item() path = [] for t in range(t_start, 0, -1): # 1. Figure out if the current position was stay or change # Note (again): # `emission[J-1]` is the emission at time frame `J` of trellis dimension. # Score for token staying the same from time frame J-1 to T. stayed = trellis[t - 1, j] + emission[t - 1, 0] # Score for token changing from C-1 at T-1 to J at T. changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]] # 2. Store the path with frame-wise probability. prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item() # Return token index and time index in non-trellis coordinate. path.append(Point(j - 1, t - 1, prob)) # 3. Update the token if changed > stayed: j -= 1 if j == 0: break else: raise ValueError("Failed to align") return path[::-1] def merge_repeats(path, ground_truth): i1, i2 = 0, 0 segments = [] while i1 < len(path): while i2 < len(path) and path[i1].token_index == path[i2].token_index: i2 += 1 score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1) segments.append( Segment( ground_truth[path[i1].token_index], path[i1].time_index, path[i2 - 1].time_index + 1, score, ) ) i1 = i2 return segments
6,614
32.75
137
py
kraken
kraken-main/kraken/blla.py
# # Copyright 2019 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.blla ~~~~~~~~~~~ Trainable layout analysis tools for kraken for line and region detection. The line recognizer uses the baseline paradigm. """ import PIL import torch import logging import numpy as np import pkg_resources import shapely.geometry as geom import torch.nn.functional as F import torchvision.transforms as tf from typing import Optional, Dict, Callable, Union, List, Any, Tuple from scipy.ndimage import gaussian_filter from skimage.filters import sobel from kraken.lib import vgsl, dataset from kraken.lib.util import is_bitonal, get_im_str from kraken.lib.exceptions import KrakenInputException, KrakenInvalidModelException from kraken.lib.segmentation import (polygonal_reading_order, vectorize_lines, vectorize_regions, scale_polygonal_lines, calculate_polygonal_environment, scale_regions) __all__ = ['segment'] logger = logging.getLogger(__name__) def compute_segmentation_map(im: PIL.Image.Image, mask: Optional[np.ndarray] = None, model: vgsl.TorchVGSLModel = None, device: str = 'cpu', autocast: bool = False) -> Dict[str, Any]: """ Args: im: Input image mask: A bi-level mask array of the same size as `im` where 0-valued regions are ignored for segmentation purposes. Disables column detection. model: A TorchVGSLModel containing a segmentation model. device: The target device to run the neural network on. autocast: Runs the model with automatic mixed precision Returns: A dictionary containing the heatmaps ('heatmap', torch.Tensor), class map ('cls_map', Dict[str, Dict[str, int]]), the bounding regions for polygonization purposes ('bounding_regions', List[str]), the scale between the input image and the network output ('scale', float), and the scaled input image to the network ('scal_im', PIL.Image.Image). Raises: KrakenInputException: When given an invalid mask. """ im_str = get_im_str(im) logger.info(f'Segmenting {im_str}') if model.input[1] == 1 and model.one_channel_mode == '1' and not is_bitonal(im): logger.warning('Running binary model on non-binary input image ' '(mode {}). This will result in severely degraded ' 'performance'.format(im.mode)) model.eval() model.to(device) batch, channels, height, width = model.input padding = model.user_metadata['hyper_params']['padding'] if 'padding' in model.user_metadata['hyper_params'] else (0, 0) # expand padding to 4-tuple (left, right, top, bottom) if isinstance(padding, int): padding = (padding,) * 4 elif len(padding) == 2: padding = (padding[0], padding[0], padding[1], padding[1]) transforms = dataset.ImageInputTransforms(batch, height, width, channels, padding, valid_norm=False) tf_idx, _ = next(filter(lambda x: isinstance(x[1], tf.ToTensor), enumerate(transforms.transforms))) res_tf = tf.Compose(transforms.transforms[:tf_idx]) scal_im = np.array(res_tf(im).convert('L')) tensor_im = transforms(im) if mask: if mask.mode != '1' and not is_bitonal(mask): logger.error('Mask is not bitonal') raise KrakenInputException('Mask is not bitonal') mask = mask.convert('1') if mask.size != im.size: logger.error('Mask size {mask.size} doesn\'t match image size {im.size}') raise KrakenInputException('Mask size {mask.size} doesn\'t match image size {im.size}') logger.info('Masking enabled in segmenter.') tensor_im[~transforms(mask).bool()] = 0 with torch.autocast(device_type=device.split(":")[0], enabled=autocast): with torch.no_grad(): logger.debug('Running network forward pass') o, _ = model.nn(tensor_im.unsqueeze(0).to(device)) logger.debug('Upsampling network output') o = F.interpolate(o, size=scal_im.shape) # remove padding padding = [pad if pad else None for pad in padding] padding[1] = -padding[1] if padding[1] else None padding[3] = -padding[3] if padding[3] else None o = o[:, :, padding[2]:padding[3], padding[0]:padding[1]] scal_im = scal_im[padding[2]:padding[3], padding[0]:padding[1]] o = o.squeeze().cpu().float().numpy() scale = np.divide(im.size, o.shape[:0:-1]) bounding_regions = model.user_metadata['bounding_regions'] if 'bounding_regions' in model.user_metadata else None return {'heatmap': o, 'cls_map': model.user_metadata['class_mapping'], 'bounding_regions': bounding_regions, 'scale': scale, 'scal_im': scal_im} def vec_regions(heatmap: torch.Tensor, cls_map: Dict, scale: float, **kwargs) -> Dict[str, List[List[Tuple[int, int]]]]: """ Computes regions from a stack of heatmaps, a class mapping, and scaling factor. Args: heatmap: A stack of heatmaps of shape `NxHxW` output from the network. cls_map: Dictionary mapping string identifiers to indices on the stack of heatmaps. scale: Scaling factor between heatmap and unscaled input image. Returns: A dictionary containing a key for each region type with a list of regions inside. """ logger.info('Vectorizing regions') regions = {} for region_type, idx in cls_map['regions'].items(): logger.debug(f'Vectorizing regions of type {region_type}') regions[region_type] = vectorize_regions(heatmap[idx]) for reg_id, regs in regions.items(): regions[reg_id] = scale_regions(regs, scale) return regions def vec_lines(heatmap: torch.Tensor, cls_map: Dict[str, Dict[str, int]], scale: float, text_direction: str = 'horizontal-lr', reading_order_fn: Callable = polygonal_reading_order, regions: List[np.ndarray] = None, scal_im: np.ndarray = None, suppl_obj: List[np.ndarray] = None, topline: Optional[bool] = False, raise_on_error: bool = False, **kwargs) -> List[Dict[str, Any]]: r""" Computes lines from a stack of heatmaps, a class mapping, and scaling factor. Args: heatmap: A stack of heatmaps of shape `NxHxW` output from the network. cls_map: Dictionary mapping string identifiers to indices on the stack of heatmaps. scale: Scaling factor between heatmap and unscaled input image. text_direction: Text directions used as hints in the reading order algorithm. reading_order_fn: Reading order calculation function. regions: Regions to be used as boundaries during polygonization and atomic blocks during reading order determination for lines contained within. scal_im: A numpy array containing the scaled input image. suppl_obj: Supplementary objects which are used as boundaries during polygonization. topline: True for a topline, False for baseline, or None for a centerline. raise_on_error: Raises error instead of logging them when they are not-blocking Returns: A list of dictionaries containing the baselines, bounding polygons, and line type in reading order: .. code-block:: :force: [{'script': '$baseline_type', baseline': [[x0, y0], [x1, y1], ..., [x_n, y_n]], 'boundary': [[x0, y0, x1, y1], ... [x_m, y_m]]}, {'script': '$baseline_type', baseline': [[x0, ...]], 'boundary': [[x0, ...]]}, {'script': '$baseline_type', baseline': [[x0, ...]], 'boundary': [[x0, ...]]}, ... ] """ st_sep = cls_map['aux']['_start_separator'] end_sep = cls_map['aux']['_end_separator'] logger.info('Vectorizing baselines') baselines = [] for bl_type, idx in cls_map['baselines'].items(): logger.debug(f'Vectorizing lines of type {bl_type}') baselines.extend([(bl_type, x) for x in vectorize_lines(heatmap[(st_sep, end_sep, idx), :, :], text_direction=text_direction[:-3])]) logger.debug('Polygonizing lines') im_feats = gaussian_filter(sobel(scal_im), 0.5) lines = [] reg_pols = [geom.Polygon(x) for x in regions] for bl_idx in range(len(baselines)): bl = baselines[bl_idx] mid_point = geom.LineString(bl[1]).interpolate(0.5, normalized=True) suppl_obj = [x[1] for x in baselines[:bl_idx] + baselines[bl_idx+1:]] for reg_idx, reg_pol in enumerate(reg_pols): if reg_pol.contains(mid_point): suppl_obj.append(regions[reg_idx]) pol = calculate_polygonal_environment( baselines=[bl[1]], im_feats=im_feats, suppl_obj=suppl_obj, topline=topline, raise_on_error=raise_on_error ) if pol[0] is not None: lines.append((bl[0], bl[1], pol[0])) logger.debug('Scaling vectorized lines') sc = scale_polygonal_lines([x[1:] for x in lines], scale) lines = list(zip([x[0] for x in lines], [x[0] for x in sc], [x[1] for x in sc])) logger.debug('Reordering baselines') lines = reading_order_fn(lines=lines, regions=regions, text_direction=text_direction[-2:]) return [{'tags': {'type': bl_type}, 'baseline': bl, 'boundary': pl} for bl_type, bl, pl in lines] def segment(im: PIL.Image.Image, text_direction: str = 'horizontal-lr', mask: Optional[np.ndarray] = None, reading_order_fn: Callable = polygonal_reading_order, model: Union[List[vgsl.TorchVGSLModel], vgsl.TorchVGSLModel] = None, device: str = 'cpu', raise_on_error: bool = False, autocast: bool = False) -> Dict[str, Any]: r""" Segments a page into text lines using the baseline segmenter. Segments a page into text lines and returns the polyline formed by each baseline and their estimated environment. Args: im: Input image. The mode can generally be anything but it is possible to supply a binarized-input-only model which requires accordingly treated images. text_direction: Passed-through value for serialization.serialize. mask: A bi-level mask image of the same size as `im` where 0-valued regions are ignored for segmentation purposes. Disables column detection. reading_order_fn: Function to determine the reading order. Has to accept a list of tuples (baselines, polygon) and a text direction (`lr` or `rl`). model: One or more TorchVGSLModel containing a segmentation model. If none is given a default model will be loaded. device: The target device to run the neural network on. raise_on_error: Raises error instead of logging them when they are not-blocking autocast: Runs the model with automatic mixed precision Returns: A dictionary containing the text direction and under the key 'lines' a list of reading order sorted baselines (polylines) and their respective polygonal boundaries. The last and first point of each boundary polygon are connected. .. code-block:: :force: {'text_direction': '$dir', 'type': 'baseline', 'lines': [ {'baseline': [[x0, y0], [x1, y1], ..., [x_n, y_n]], 'boundary': [[x0, y0, x1, y1], ... [x_m, y_m]]}, {'baseline': [[x0, ...]], 'boundary': [[x0, ...]]} ] 'regions': [ {'region': [[x0, y0], [x1, y1], ..., [x_n, y_n]], 'type': 'image'}, {'region': [[x0, ...]], 'type': 'text'} ] } Raises: KrakenInvalidModelException: if the given model is not a valid segmentation model. KrakenInputException: if the mask is not bitonal or does not match the image size. """ if model is None: logger.info('No segmentation model given. Loading default model.') model = vgsl.TorchVGSLModel.load_model(pkg_resources.resource_filename(__name__, 'blla.mlmodel')) if isinstance(model, vgsl.TorchVGSLModel): model = [model] for nn in model: if nn.model_type != 'segmentation': raise KrakenInvalidModelException(f'Invalid model type {nn.model_type} for {nn}') if 'class_mapping' not in nn.user_metadata: raise KrakenInvalidModelException(f'Segmentation model {nn} does not contain valid class mapping') im_str = get_im_str(im) logger.info(f'Segmenting {im_str}') for net in model: if 'topline' in net.user_metadata: loc = {None: 'center', True: 'top', False: 'bottom'}[net.user_metadata['topline']] logger.debug(f'Baseline location: {loc}') rets = compute_segmentation_map(im, mask, net, device, autocast=autocast) regions = vec_regions(**rets) # flatten regions for line ordering/fetch bounding regions line_regs = [] suppl_obj = [] for cls, regs in regions.items(): line_regs.extend(regs) if rets['bounding_regions'] is not None and cls in rets['bounding_regions']: suppl_obj.extend(regs) # convert back to net scale suppl_obj = scale_regions(suppl_obj, 1/rets['scale']) line_regs = scale_regions(line_regs, 1/rets['scale']) lines = vec_lines(**rets, regions=line_regs, reading_order_fn=reading_order_fn, text_direction=text_direction, suppl_obj=suppl_obj, topline=net.user_metadata['topline'] if 'topline' in net.user_metadata else False, raise_on_error=raise_on_error) if len(rets['cls_map']['baselines']) > 1: script_detection = True else: script_detection = False return {'text_direction': text_direction, 'type': 'baselines', 'lines': lines, 'regions': regions, 'script_detection': script_detection}
15,295
41.371191
140
py
kraken
kraken-main/kraken/pageseg.py
# # Copyright 2015 Benjamin Kiessling # 2014 Thomas M. Breuel # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.pageseg ~~~~~~~~~~~~~~ Layout analysis methods. """ import logging import numpy as np from typing import Tuple, List, Callable, Optional, Dict, Any, Union from scipy.ndimage.filters import (gaussian_filter, uniform_filter, maximum_filter) from kraken.lib import morph, sl from kraken.lib.util import pil2array, is_bitonal, get_im_str from kraken.lib.exceptions import KrakenInputException from kraken.lib.segmentation import reading_order, topsort __all__ = ['segment'] logger = logging.getLogger(__name__) class record(object): """ Simple dict-like object. """ def __init__(self, **kw): self.__dict__.update(kw) self.label = 0 # type: int self.bounds = [] # type: List self.mask = None # type: np.ndarray def find(condition): "Return the indices where ravel(condition) is true" res, = np.nonzero(np.ravel(condition)) return res def binary_objects(binary: np.ndarray) -> np.ndarray: """ Labels features in an array and segments them into objects. """ labels, _ = morph.label(binary) objects = morph.find_objects(labels) return objects def estimate_scale(binary: np.ndarray) -> float: """ Estimates image scale based on number of connected components. """ objects = binary_objects(binary) bysize = sorted(objects, key=sl.area) scalemap = np.zeros(binary.shape) for o in bysize: if np.amax(scalemap[o]) > 0: continue scalemap[o] = sl.area(o)**0.5 scale = np.median(scalemap[(scalemap > 3) & (scalemap < 100)]) return scale def compute_boxmap(binary: np.ndarray, scale: float, threshold: Tuple[float, int] = (.5, 4), dtype: str = 'i') -> np.ndarray: """ Returns grapheme cluster-like boxes based on connected components. """ objects = binary_objects(binary) bysize = sorted(objects, key=sl.area) boxmap = np.zeros(binary.shape, dtype) for o in bysize: if sl.area(o)**.5 < threshold[0]*scale: continue if sl.area(o)**.5 > threshold[1]*scale: continue boxmap[o] = 1 return boxmap def compute_lines(segmentation: np.ndarray, scale: float) -> List[record]: """Given a line segmentation map, computes a list of tuples consisting of 2D slices and masked images.""" logger.debug('Convert segmentation to lines') lobjects = morph.find_objects(segmentation) lines = [] for i, o in enumerate(lobjects): if o is None: continue if sl.dim1(o) < 2*scale or sl.dim0(o) < scale: continue mask = (segmentation[o] == i+1) if np.amax(mask) == 0: continue result = record() result.label = i+1 result.bounds = o result.mask = mask lines.append(result) return lines def compute_separators_morph(binary: np.ndarray, scale: float, sepwiden: int = 10, maxcolseps: int = 2) -> np.ndarray: """Finds vertical black lines corresponding to column separators.""" logger.debug('Finding vertical black column lines') d0 = int(max(5, scale/4)) d1 = int(max(5, scale)) + sepwiden thick = morph.r_dilation(binary, (d0, d1)) vert = morph.rb_opening(thick, (10*scale, 1)) vert = morph.r_erosion(vert, (d0//2, sepwiden)) vert = morph.select_regions(vert, sl.dim1, min=3, nbest=2*maxcolseps) vert = morph.select_regions(vert, sl.dim0, min=20*scale, nbest=maxcolseps) return vert def compute_colseps_conv(binary: np.ndarray, scale: float = 1.0, minheight: int = 10, maxcolseps: int = 2) -> np.ndarray: """ Find column separators by convolution and thresholding. Args: binary: scale: minheight: maxcolseps: Returns: Separators """ logger.debug(f'Finding max {maxcolseps} column separators') # find vertical whitespace by thresholding smoothed = gaussian_filter(1.0*binary, (scale, scale*0.5)) smoothed = uniform_filter(smoothed, (5.0*scale, 1)) thresh = (smoothed < np.amax(smoothed)*0.1) # find column edges by filtering grad = gaussian_filter(1.0*binary, (scale, scale*0.5), order=(0, 1)) grad = uniform_filter(grad, (10.0*scale, 1)) grad = (grad > 0.5*np.amax(grad)) # combine edges and whitespace seps = np.minimum(thresh, maximum_filter(grad, (int(scale), int(5*scale)))) seps = maximum_filter(seps, (int(2*scale), 1)) # select only the biggest column separators seps = morph.select_regions(seps, sl.dim0, min=minheight*scale, nbest=maxcolseps) return seps def compute_black_colseps(binary: np.ndarray, scale: float, maxcolseps: int) -> Tuple[np.ndarray, np.ndarray]: """ Computes column separators from vertical black lines. Args: binary: Numpy array of the binary image scale: maxcolseps: Returns: (colseps, binary): """ logger.debug('Extract vertical black column separators from lines') seps = compute_separators_morph(binary, scale, maxcolseps) colseps = np.maximum(compute_colseps_conv(binary, scale, maxcolseps=maxcolseps), seps) binary = np.minimum(binary, 1-seps) return colseps, binary def compute_white_colseps(binary: np.ndarray, scale: float, maxcolseps: int) -> Tuple[np.ndarray, np.ndarray]: """ Computes column separators either from vertical black lines or whitespace. Args: binary: Numpy array of the binary image scale: Returns: colseps: """ return compute_colseps_conv(binary, scale, maxcolseps=maxcolseps) def norm_max(v: np.ndarray) -> np.ndarray: """ Normalizes the input array by maximum value. """ return v/np.amax(v) def compute_gradmaps(binary: np.ndarray, scale: float, gauss: bool = False): """ Use gradient filtering to find baselines Args: binary: scale: gauss: Use gaussian instead of uniform filtering Returns: (bottom, top, boxmap) """ # use gradient filtering to find baselines logger.debug('Computing gradient maps') boxmap = compute_boxmap(binary, scale) cleaned = boxmap*binary if gauss: grad = gaussian_filter(1.0*cleaned, (0.3*scale, 6*scale), order=(1, 0)) else: grad = gaussian_filter(1.0*cleaned, (max(4, 0.3*scale), scale), order=(1, 0)) grad = uniform_filter(grad, (1, 6*scale)) bottom = norm_max((grad < 0)*(-grad)) top = norm_max((grad > 0)*grad) return bottom, top, boxmap def compute_line_seeds(binary: np.ndarray, bottom: np.ndarray, top: np.ndarray, colseps: np.ndarray, scale: float, threshold: float = 0.2) -> np.ndarray: """ Base on gradient maps, computes candidates for baselines and xheights. Then, it marks the regions between the two as a line seed. """ logger.debug('Finding line seeds') vrange = int(scale) bmarked = maximum_filter(bottom == maximum_filter(bottom, (vrange, 0)), (2, 2)) bmarked = bmarked * (bottom > threshold*np.amax(bottom)*threshold)*(1-colseps) tmarked = maximum_filter(top == maximum_filter(top, (vrange, 0)), (2, 2)) tmarked = tmarked * (top > threshold*np.amax(top)*threshold/2)*(1-colseps) tmarked = maximum_filter(tmarked, (1, 20)) seeds = np.zeros(binary.shape, 'i') delta = max(3, int(scale/2)) for x in range(bmarked.shape[1]): transitions = sorted([(y, 1) for y in find(bmarked[:, x])] + [(y, 0) for y in find(tmarked[:, x])])[::-1] transitions += [(0, 0)] for ls in range(len(transitions)-1): y0, s0 = transitions[ls] if s0 == 0: continue seeds[y0-delta:y0, x] = 1 y1, s1 = transitions[ls+1] if s1 == 0 and (y0-y1) < 5*scale: seeds[y1:y0, x] = 1 seeds = maximum_filter(seeds, (1, int(1+scale))) seeds = seeds * (1-colseps) seeds, _ = morph.label(seeds) return seeds def remove_hlines(binary: np.ndarray, scale: float, maxsize: int = 10) -> np.ndarray: """ Removes horizontal black lines that only interfere with page segmentation. Args: binary: scale: maxsize: maximum size of removed lines Returns: numpy.ndarray containing the filtered image. """ logger.debug('Filtering horizontal lines') labels, _ = morph.label(binary) objects = morph.find_objects(labels) for i, b in enumerate(objects): if sl.width(b) > maxsize*scale: labels[b][labels[b] == i+1] = 0 return np.array(labels != 0, 'B') def rotate_lines(lines: np.ndarray, angle: float, offset: int) -> np.ndarray: """ Rotates line bounding boxes around the origin and adding and offset. """ logger.debug(f'Rotate line coordinates by {angle} with offset {offset}') angle = np.radians(angle) r = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) p = np.array(lines).reshape((-1, 2)) offset = np.array([2*offset]) p = p.dot(r).reshape((-1, 4)).astype(int) + offset x = np.sort(p[:, [0, 2]]) y = np.sort(p[:, [1, 3]]) return np.column_stack((x.flatten(), y.flatten())).reshape(-1, 4) def segment(im, text_direction: str = 'horizontal-lr', scale: Optional[float] = None, maxcolseps: float = 2, black_colseps: bool = False, no_hlines: bool = True, pad: Union[int, Tuple[int, int]] = 0, mask: Optional[np.ndarray] = None, reading_order_fn: Callable = reading_order) -> Dict[str, Any]: """ Segments a page into text lines. Segments a page into text lines and returns the absolute coordinates of each line in reading order. Args: im: A bi-level page of mode '1' or 'L' text_direction: Principal direction of the text (horizontal-lr/rl/vertical-lr/rl) scale: Scale of the image. Will be auto-determined if set to `None`. maxcolseps: Maximum number of whitespace column separators black_colseps: Whether column separators are assumed to be vertical black lines or not no_hlines: Switch for small horizontal line removal. pad: Padding to add to line bounding boxes. If int the same padding is used both left and right. If a 2-tuple, uses (padding_left, padding_right). mask: A bi-level mask image of the same size as `im` where 0-valued regions are ignored for segmentation purposes. Disables column detection. reading_order_fn: Function to call to order line output. Callable accepting a list of slices (y, x) and a text direction in (`rl`, `lr`). Returns: A dictionary containing the text direction and a list of reading order sorted bounding boxes under the key 'boxes': .. code-block:: {'text_direction': '$dir', 'boxes': [(x1, y1, x2, y2),...]} Raises: KrakenInputException: if the input image is not binarized or the text direction is invalid. """ im_str = get_im_str(im) logger.info(f'Segmenting {im_str}') if im.mode != '1' and not is_bitonal(im): logger.error(f'Image {im_str} is not bi-level') raise KrakenInputException(f'Image {im_str} is not bi-level') # rotate input image for vertical lines if text_direction.startswith('horizontal'): angle = 0 offset = (0, 0) elif text_direction == 'vertical-lr': angle = 270 offset = (0, im.size[1]) elif text_direction == 'vertical-rl': angle = 90 offset = (im.size[0], 0) else: logger.error(f'Invalid text direction \'{text_direction}\'') raise KrakenInputException(f'Invalid text direction {text_direction}') logger.debug(f'Rotating input image by {angle} degrees') im = im.rotate(angle, expand=True) a = pil2array(im) binary = np.array(a > 0.5*(np.amin(a) + np.amax(a)), 'i') binary = 1 - binary _, ccs = morph.label(1 - binary) if ccs > np.dot(*im.size)/(30*30): logger.warning(f'Too many connected components for a page image: {ccs}') return {'text_direction': text_direction, 'boxes': []} if not scale: scale = estimate_scale(binary) if no_hlines: binary = remove_hlines(binary, scale) # emptyish images will cause exceptions here. try: if mask: if mask.mode != '1' and not is_bitonal(mask): logger.error('Mask is not bitonal') raise KrakenInputException('Mask is not bitonal') mask = mask.convert('1') if mask.size != im.size: logger.error(f'Mask size {mask.size} doesn\'t match image size {im.size}') raise KrakenInputException(f'Mask size {mask.size} doesn\'t match image size {im.size}') logger.info('Masking enabled in segmenter. Disabling column detection.') mask = mask.rotate(angle, expand=True) colseps = pil2array(mask) elif black_colseps: colseps, binary = compute_black_colseps(binary, scale, maxcolseps) else: colseps = compute_white_colseps(binary, scale, maxcolseps) except ValueError: logger.warning(f'Exception in column finder (probably empty image) for {im_str}') return {'text_direction': text_direction, 'boxes': []} bottom, top, boxmap = compute_gradmaps(binary, scale) seeds = compute_line_seeds(binary, bottom, top, colseps, scale) llabels = morph.propagate_labels(boxmap, seeds, conflict=0) spread = morph.spread_labels(seeds, maxdist=scale) llabels = np.where(llabels > 0, llabels, spread*binary) segmentation = llabels*binary lines = compute_lines(segmentation, scale) order = reading_order_fn([line.bounds for line in lines], text_direction[-2:]) lsort = topsort(order) lines = [lines[i].bounds for i in lsort] lines = [(s2.start, s1.start, s2.stop, s1.stop) for s1, s2 in lines] if isinstance(pad, int): pad = (pad, pad) lines = [(max(x[0]-pad[0], 0), x[1], min(x[2]+pad[1], im.size[0]), x[3]) for x in lines] return {'text_direction': text_direction, 'boxes': rotate_lines(lines, 360-angle, offset).tolist(), 'script_detection': False}
15,329
34.651163
118
py
kraken
kraken-main/kraken/kraken.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.kraken ~~~~~~~~~~~~~ Command line drivers for recognition functionality. """ import os import warnings import logging import pkg_resources from typing import Dict, Union, List, cast, Any, IO, Callable from pathlib import Path from rich.traceback import install from functools import partial from PIL import Image import click from kraken.lib import log from kraken.lib.progress import KrakenProgressBar, KrakenDownloadProgressBar warnings.simplefilter('ignore', UserWarning) logging.captureWarnings(True) logger = logging.getLogger('kraken') # install rich traceback handler install(suppress=[click]) APP_NAME = 'kraken' SEGMENTATION_DEFAULT_MODEL = pkg_resources.resource_filename(__name__, 'blla.mlmodel') DEFAULT_MODEL = ['en_best.mlmodel'] LEGACY_MODEL_DIR = '/usr/local/share/ocropus' # raise default max image size to 20k * 20k pixels Image.MAX_IMAGE_PIXELS = 20000 ** 2 def message(msg: str, **styles) -> None: if logger.getEffectiveLevel() >= 30: click.secho(msg, **styles) def get_input_parser(type_str: str) -> Callable[[str], Dict[str, Any]]: if type_str == 'alto': from kraken.lib.xml import parse_alto return parse_alto elif type_str == 'page': from kraken.lib.xml import parse_page return parse_page elif type_str == 'xml': from kraken.lib.xml import parse_xml return parse_xml elif type_str == 'image': return Image.open # chainable functions of functional components (binarization/segmentation/recognition) def binarizer(threshold, zoom, escale, border, perc, range, low, high, input, output) -> None: from kraken import binarization ctx = click.get_current_context() if ctx.meta['first_process']: if ctx.meta['input_format_type'] != 'image': input = get_input_parser(ctx.meta['input_format_type'])(input)['image'] ctx.meta['first_process'] = False else: raise click.UsageError('Binarization has to be the initial process.') try: im = Image.open(input) except IOError as e: raise click.BadParameter(str(e)) message('Binarizing\t', nl=False) try: res = binarization.nlbin(im, threshold, zoom, escale, border, perc, range, low, high) if ctx.meta['last_process'] and ctx.meta['output_mode'] != 'native': with click.open_file(output, 'w', encoding='utf-8') as fp: fp = cast(IO[Any], fp) logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output)) res.save(f'{output}.png') from kraken import serialization fp.write(serialization.serialize([], image_name=f'{output}.png', image_size=res.size, template=ctx.meta['output_template'], template_source='custom' if ctx.meta['output_mode'] == 'template' else 'native', processing_steps=ctx.meta['steps'])) else: form = None ext = os.path.splitext(output)[1] if ext in ['.jpg', '.jpeg', '.JPG', '.JPEG', '']: form = 'png' if ext: logger.warning('jpeg does not support 1bpp images. Forcing to png.') res.save(output, format=form) ctx.meta['base_image'] = output except Exception: if ctx.meta['raise_failed']: raise message('\u2717', fg='red') ctx.exit(1) message('\u2713', fg='green') def segmenter(legacy, model, text_direction, scale, maxcolseps, black_colseps, remove_hlines, pad, mask, device, input, output) -> None: import json from kraken import pageseg from kraken import blla ctx = click.get_current_context() if ctx.meta['first_process']: if ctx.meta['input_format_type'] != 'image': input = get_input_parser(ctx.meta['input_format_type'])(input)['image'] ctx.meta['first_process'] = False if 'base_image' not in ctx.meta: ctx.meta['base_image'] = input try: im = Image.open(input) except IOError as e: raise click.BadParameter(str(e)) if mask: try: mask = Image.open(mask) except IOError as e: raise click.BadParameter(str(e)) message('Segmenting\t', nl=False) try: if legacy: res = pageseg.segment(im, text_direction, scale, maxcolseps, black_colseps, no_hlines=remove_hlines, pad=pad, mask=mask) else: res = blla.segment(im, text_direction, mask=mask, model=model, device=device, raise_on_error=ctx.meta['raise_failed'], autocast=ctx.meta["autocast"]) except Exception: if ctx.meta['raise_failed']: raise message('\u2717', fg='red') ctx.exit(1) if ctx.meta['last_process'] and ctx.meta['output_mode'] != 'native': with click.open_file(output, 'w', encoding='utf-8') as fp: fp = cast(IO[Any], fp) logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output)) from kraken import serialization fp.write(serialization.serialize_segmentation(res, image_name=ctx.meta['base_image'], image_size=im.size, template=ctx.meta['output_template'], template_source='custom' if ctx.meta['output_mode'] == 'template' else 'native', processing_steps=ctx.meta['steps'])) else: with click.open_file(output, 'w') as fp: fp = cast(IO[Any], fp) json.dump(res, fp) message('\u2713', fg='green') def recognizer(model, pad, no_segmentation, bidi_reordering, tags_ignore, input, output) -> None: import json from kraken import rpred ctx = click.get_current_context() bounds = None if 'base_image' not in ctx.meta: ctx.meta['base_image'] = input if ctx.meta['first_process']: if ctx.meta['input_format_type'] != 'image': doc = get_input_parser(ctx.meta['input_format_type'])(input) ctx.meta['base_image'] = doc['image'] doc['text_direction'] = 'horizontal-lr' if doc['base_dir'] and bidi_reordering is True: message(f'Setting base text direction for BiDi reordering to {doc["base_dir"]} (from XML input file)') bidi_reordering = doc['base_dir'] bounds = doc try: im = Image.open(ctx.meta['base_image']) except IOError as e: raise click.BadParameter(str(e)) if not bounds and ctx.meta['base_image'] != input: with click.open_file(input, 'r') as fp: try: fp = cast(IO[Any], fp) bounds = json.load(fp) except ValueError as e: raise click.UsageError(f'{input} invalid segmentation: {str(e)}') elif not bounds: if no_segmentation: bounds = {'script_detection': False, 'text_direction': 'horizontal-lr', 'boxes': [(0, 0) + im.size]} else: raise click.UsageError('No line segmentation given. Add one with the input or run `segment` first.') elif no_segmentation: logger.warning('no_segmentation mode enabled but segmentation defined. Ignoring --no-segmentation option.') tags = set() # script detection if 'script_detection' in bounds and bounds['script_detection']: it = rpred.mm_rpred(model, im, bounds, pad, bidi_reordering=bidi_reordering, tags_ignore=tags_ignore) else: it = rpred.rpred(model['default'], im, bounds, pad, bidi_reordering=bidi_reordering) preds = [] with KrakenProgressBar() as progress: pred_task = progress.add_task('Processing', total=len(it), visible=True if not ctx.meta['verbose'] else False) for pred in it: preds.append(pred) progress.update(pred_task, advance=1) ctx = click.get_current_context() with click.open_file(output, 'w', encoding='utf-8') as fp: fp = cast(IO[Any], fp) message(f'Writing recognition results for {ctx.meta["orig_file"]}\t', nl=False) logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output)) if ctx.meta['output_mode'] != 'native': from kraken import serialization fp.write(serialization.serialize(records=preds, image_name=ctx.meta['base_image'], image_size=Image.open(ctx.meta['base_image']).size, writing_mode=ctx.meta['text_direction'], scripts=tags, regions=bounds['regions'] if 'regions' in bounds else None, template=ctx.meta['output_template'], template_source='custom' if ctx.meta['output_mode'] == 'template' else 'native', processing_steps=ctx.meta['steps'])) else: fp.write('\n'.join(s.prediction for s in preds)) message('\u2713', fg='green') @click.group(chain=True) @click.version_option() @click.option('-i', '--input', type=(click.Path(exists=True, dir_okay=False, path_type=Path), # type: ignore click.Path(writable=True, dir_okay=False, path_type=Path)), multiple=True, help='Input-output file pairs. Each input file (first argument) is mapped to one ' 'output file (second argument), e.g. `-i input.png output.txt`') @click.option('-I', '--batch-input', multiple=True, help='Glob expression to add multiple files at once.') @click.option('-o', '--suffix', default='', show_default=True, help='Suffix for output files from batch and PDF inputs.') @click.option('-v', '--verbose', default=0, count=True, show_default=True) @click.option('-f', '--format-type', type=click.Choice(['image', 'alto', 'page', 'pdf', 'xml']), default='image', help='Sets the default input type. In image mode inputs are image ' 'files, alto/page expects XML files in the respective format, pdf ' 'expects PDF files with numbered suffixes added to output file ' 'names as needed.') @click.option('-p', '--pdf-format', default='{src}_{idx:06d}', show_default=True, help='Format for output of PDF files. valid fields ' 'are `src` (source file), `idx` (page number), and `uuid` (v4 uuid). ' '`-o` suffixes are appended to this format string.') @click.option('-h', '--hocr', 'serializer', help='Switch between hOCR, ALTO, abbyyXML, PageXML or "native" ' 'output. Native are plain image files for image, JSON for ' 'segmentation, and text for transcription output.', flag_value='hocr') @click.option('-a', '--alto', 'serializer', flag_value='alto') @click.option('-y', '--abbyy', 'serializer', flag_value='abbyyxml') @click.option('-x', '--pagexml', 'serializer', flag_value='pagexml') @click.option('-n', '--native', 'serializer', flag_value='native', default=True, show_default=True) @click.option('-t', '--template', type=click.Path(exists=True, dir_okay=False), help='Explicitly set jinja template for output serialization. Overrides -h/-a/-y/-x/-n.') @click.option('-d', '--device', default='cpu', show_default=True, help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('-r', '--raise-on-error/--no-raise-on-error', default=False, show_default=True, help='Raises the exception that caused processing to fail in the case of an error') @click.option('-2', '--autocast', default=False, show_default=True, flag_value=True, help='On compatible devices, uses autocast for `segment` which lower the memory usage.') def cli(input, batch_input, suffix, verbose, format_type, pdf_format, serializer, template, device, raise_on_error, autocast): """ Base command for recognition functionality. Inputs are defined as one or more pairs `-i input_file output_file` followed by one or more chainable processing commands. Likewise, verbosity is set on all subcommands with the `-v` switch. """ ctx = click.get_current_context() if device != 'cpu': import torch try: torch.ones(1, device=device) except AssertionError as e: if raise_on_error: raise logger.error(f'Device {device} not available: {e.args[0]}.') ctx.exit(1) ctx.meta['device'] = device ctx.meta['input_format_type'] = format_type if format_type != 'pdf' else 'image' ctx.meta['raise_failed'] = raise_on_error if not template: ctx.meta['output_mode'] = serializer ctx.meta['output_template'] = serializer else: ctx.meta['output_mode'] = 'template' ctx.meta['output_template'] = template ctx.meta['verbose'] = verbose ctx.meta['steps'] = [] ctx.meta["autocast"] = autocast log.set_logger(logger, level=30 - min(10 * verbose, 20)) @cli.result_callback() def process_pipeline(subcommands, input, batch_input, suffix, verbose, format_type, pdf_format, **args): """ Helper function calling the partials returned by each subcommand and placing their respective outputs in temporary files. """ import glob import uuid import tempfile ctx = click.get_current_context() input = list(input) # expand batch inputs if batch_input and suffix: for batch_expr in batch_input: for in_file in glob.glob(batch_expr, recursive=True): input.append((in_file, '{}{}'.format(os.path.splitext(in_file)[0], suffix))) # parse pdfs if format_type == 'pdf': import pyvips if not batch_input: logger.warning('PDF inputs not added with batch option. Manual output filename will be ignored and `-o` utilized.') new_input = [] num_pages = 0 for (fpath, _) in input: doc = pyvips.Image.new_from_file(fpath, dpi=300, n=-1, access="sequential") if 'n-pages' in doc.get_fields(): num_pages += doc.get('n-pages') with KrakenProgressBar() as progress: pdf_parse_task = progress.add_task('Extracting PDF pages', total=num_pages, visible=True if not ctx.meta['verbose'] else False) for (fpath, _) in input: try: doc = pyvips.Image.new_from_file(fpath, dpi=300, n=-1, access="sequential") if 'n-pages' not in doc.get_fields(): logger.warning('{fpath} does not contain pages. Skipping.') continue n_pages = doc.get('n-pages') dest_dict = {'idx': -1, 'src': fpath, 'uuid': None} for i in range(0, n_pages): dest_dict['idx'] += 1 dest_dict['uuid'] = str(uuid.uuid4()) fd, filename = tempfile.mkstemp(suffix='.png') os.close(fd) doc = pyvips.Image.new_from_file(fpath, dpi=300, page=i, access="sequential") logger.info(f'Saving temporary image {fpath}:{dest_dict["idx"]} to {filename}') doc.write_to_file(filename) new_input.append((filename, pdf_format.format(**dest_dict) + suffix)) progress.update(pdf_parse_task, advance=1) except pyvips.error.Error: num_pages -= n_pages progress.update(pdf_parse_task, total=num_pages) logger.warning(f'{fpath} is not a PDF file. Skipping.') input = new_input ctx.meta['steps'].insert(0, {'category': 'preprocessing', 'description': 'PDF image extraction', 'settings': {}}) for io_pair in input: ctx.meta['first_process'] = True ctx.meta['last_process'] = False ctx.meta['orig_file'] = io_pair[0] if 'base_image' in ctx.meta: del ctx.meta['base_image'] try: tmps = [tempfile.mkstemp() for _ in subcommands[1:]] for tmp in tmps: os.close(tmp[0]) fc = [io_pair[0]] + [tmp[1] for tmp in tmps] + [io_pair[1]] for idx, (task, input, output) in enumerate(zip(subcommands, fc, fc[1:])): if len(fc) - 2 == idx: ctx.meta['last_process'] = True task(input=input, output=output) except Exception as e: logger.error(f'Failed processing {io_pair[0]}: {str(e)}') if ctx.meta['raise_failed']: raise finally: for f in fc[1:-1]: os.unlink(f) # clean up temporary PDF image files if format_type == 'pdf': logger.debug(f'unlinking {fc[0]}') os.unlink(fc[0]) @cli.command('binarize') @click.pass_context @click.option('--threshold', show_default=True, default=0.5, type=click.FLOAT) @click.option('--zoom', show_default=True, default=0.5, type=click.FLOAT) @click.option('--escale', show_default=True, default=1.0, type=click.FLOAT) @click.option('--border', show_default=True, default=0.1, type=click.FLOAT) @click.option('--perc', show_default=True, default=80, type=click.IntRange(1, 100)) @click.option('--range', show_default=True, default=20, type=click.INT) @click.option('--low', show_default=True, default=5, type=click.IntRange(1, 100)) @click.option('--high', show_default=True, default=90, type=click.IntRange(1, 100)) def binarize(ctx, threshold, zoom, escale, border, perc, range, low, high): """ Binarizes page images. """ ctx.meta['steps'].append({'category': 'preprocessing', 'description': 'Image binarization', 'settings': {'threshold': threshold, 'zoom': zoom, 'escale': escale, 'border': border, 'perc': perc, 'range': range, 'low': low, 'high': high}}) return partial(binarizer, threshold, zoom, escale, border, perc, range, low, high) @cli.command('segment') @click.pass_context @click.option('-i', '--model', default=None, show_default=True, help='Baseline detection model to use') @click.option('-x/-bl', '--boxes/--baseline', default=True, show_default=True, help='Switch between legacy box segmenter and neural baseline segmenter') @click.option('-d', '--text-direction', default='horizontal-lr', show_default=True, type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']), help='Sets principal text direction') @click.option('--scale', show_default=True, default=None, type=click.FLOAT) @click.option('-m', '--maxcolseps', show_default=True, default=2, type=click.INT) @click.option('-b/-w', '--black-colseps/--white_colseps', show_default=True, default=False) @click.option('-r/-l', '--remove_hlines/--hlines', show_default=True, default=True) @click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0), help='Left and right padding around lines') @click.option('-m', '--mask', show_default=True, default=None, type=click.File(mode='rb', lazy=True), help='Segmentation mask ' 'suppressing page areas for line detection. 0-valued image ' 'regions are ignored for segmentation purposes. Disables column ' 'detection.') def segment(ctx, model, boxes, text_direction, scale, maxcolseps, black_colseps, remove_hlines, pad, mask): """ Segments page images into text lines. """ if model and boxes: logger.warning(f'Baseline model ({model}) given but legacy segmenter selected. Forcing to -bl.') boxes = False if boxes is False: if not model: model = SEGMENTATION_DEFAULT_MODEL ctx.meta['steps'].append({'category': 'processing', 'description': 'Baseline and region segmentation', 'settings': {'model': os.path.basename(model), 'text_direction': text_direction}}) # first try to find the segmentation model by its given name, # then look in the kraken config folder location = None search = [model, os.path.join(click.get_app_dir(APP_NAME), model)] for loc in search: if os.path.isfile(loc): location = loc break if not location: raise click.BadParameter(f'No model for {model} found') from kraken.lib.vgsl import TorchVGSLModel message(f'Loading ANN {model}\t', nl=False) try: model = TorchVGSLModel.load_model(location) model.to(ctx.meta['device']) except Exception: if ctx.meta['raise_failed']: raise message('\u2717', fg='red') ctx.exit(1) message('\u2713', fg='green') else: ctx.meta['steps'].append({'category': 'processing', 'description': 'bounding box segmentation', 'settings': {'text_direction': text_direction, 'scale': scale, 'maxcolseps': maxcolseps, 'black_colseps': black_colseps, 'remove_hlines': remove_hlines, 'pad': pad}}) return partial(segmenter, boxes, model, text_direction, scale, maxcolseps, black_colseps, remove_hlines, pad, mask, ctx.meta['device']) def _validate_mm(ctx, param, value): """ Maps model mappings to a dictionary. """ model_dict = {'ignore': []} # type: Dict[str, Union[str, List[str]]] if len(value) == 1 and len(value[0].split(':')) == 1: model_dict['default'] = value[0] return model_dict try: for m in value: k, v = m.split(':') if v == 'ignore': model_dict['ignore'].append(k) # type: ignore else: model_dict[k] = os.path.expanduser(v) except Exception: raise click.BadParameter('Mappings must be in format tag:model') return model_dict @cli.command('ocr') @click.pass_context @click.option('-m', '--model', default=DEFAULT_MODEL, multiple=True, show_default=True, callback=_validate_mm, help='Path to an recognition model or mapping of the form ' '$tag1:$model1. Add multiple mappings to run multi-model ' 'recognition based on detected tags. Use the default keyword ' 'for adding a catch-all model. Recognition on tags can be ' 'ignored with the model value ignore.') @click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('-n', '--reorder/--no-reorder', show_default=True, default=True, help='Reorder code points to logical order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-s', '--no-segmentation', default=False, show_default=True, is_flag=True, help='Enables non-segmentation mode treating each input image as a whole line.') @click.option('-d', '--text-direction', default='horizontal-tb', show_default=True, type=click.Choice(['horizontal-tb', 'vertical-lr', 'vertical-rl']), help='Sets principal text direction in serialization output') @click.option('--threads', default=1, show_default=True, type=click.IntRange(1), help='Number of threads to use for OpenMP parallelization.') def ocr(ctx, model, pad, reorder, base_dir, no_segmentation, text_direction, threads): """ Recognizes text in line images. """ from kraken.lib import models if ctx.meta['input_format_type'] != 'image' and no_segmentation: raise click.BadParameter('no_segmentation mode is incompatible with page/alto inputs') if reorder and base_dir != 'auto': reorder = base_dir # first try to find the OCR model by its given name, then # in the kraken config folder, then in LEGACY_MODEL_DIR nm = {} # type: Dict[str, models.TorchSeqRecognizer] ign_tags = model.pop('ignore') for k, v in model.items(): search = [v, os.path.join(click.get_app_dir(APP_NAME), v), os.path.join(LEGACY_MODEL_DIR, v)] location = None for loc in search: if os.path.isfile(loc): location = loc break if not location: raise click.BadParameter(f'No model for {v} found') message(f'Loading ANN {v}\t', nl=False) try: rnn = models.load_any(location, device=ctx.meta['device']) nm[k] = rnn except Exception: if ctx.meta['raise_failed']: raise message('\u2717', fg='red') ctx.exit(1) message('\u2713', fg='green') if 'default' in nm: from collections import defaultdict nn = defaultdict(lambda: nm['default']) # type: Dict[str, models.TorchSeqRecognizer] nn.update(nm) nm = nn # thread count is global so setting it once is sufficient nm[k].nn.set_num_threads(threads) ctx.meta['steps'].append({'category': 'processing', 'description': 'Text line recognition', 'settings': {'text_direction': text_direction, 'models': ' '.join(os.path.basename(v) for v in model.values()), 'pad': pad, 'bidi_reordering': reorder}}) # set output mode ctx.meta['text_direction'] = text_direction return partial(recognizer, model=nm, pad=pad, no_segmentation=no_segmentation, bidi_reordering=reorder, tags_ignore=ign_tags) @cli.command('show') @click.pass_context @click.argument('model_id') def show(ctx, model_id): """ Retrieves model metadata from the repository. """ from kraken import repo from kraken.lib.util import make_printable, is_printable desc = repo.get_description(model_id) chars = [] combining = [] for char in sorted(desc['graphemes']): if not is_printable(char): combining.append(make_printable(char)) else: chars.append(char) message( 'name: {}\n\n{}\n\n{}\nscripts: {}\nalphabet: {} {}\naccuracy: {:.2f}%\nlicense: {}\nauthor(s): {}\ndate: {}'.format( model_id, desc['summary'], desc['description'], ' '.join( desc['script']), ''.join(chars), ', '.join(combining), desc['accuracy'], desc['license']['id'], '; '.join( x['name'] for x in desc['creators']), desc['publication_date'])) ctx.exit(0) @cli.command('list') @click.pass_context def list_models(ctx): """ Lists models in the repository. """ from kraken import repo with KrakenProgressBar() as progress: download_task = progress.add_task('Retrieving model list', total=0, visible=True if not ctx.meta['verbose'] else False) model_list = repo.get_listing(lambda total, advance: progress.update(download_task, total=total, advance=advance)) for id, metadata in model_list.items(): message('{} ({}) - {}'.format(id, ', '.join(metadata['type']), metadata['summary'])) ctx.exit(0) @cli.command('get') @click.pass_context @click.argument('model_id') def get(ctx, model_id): """ Retrieves a model from the repository. """ from kraken import repo try: os.makedirs(click.get_app_dir(APP_NAME)) except OSError: pass with KrakenDownloadProgressBar() as progress: download_task = progress.add_task('Processing', total=0, visible=True if not ctx.meta['verbose'] else False) filename = repo.get_model(model_id, click.get_app_dir(APP_NAME), lambda total, advance: progress.update(download_task, total=total, advance=advance)) message(f'Model name: {filename}') ctx.exit(0) if __name__ == '__main__': cli()
30,865
42.351124
139
py
kraken
kraken-main/kraken/transcribe.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Utility functions for ground truth transcription. """ from kraken.lib.exceptions import KrakenInputException from kraken.lib.util import get_im_str from typing import List from jinja2 import Environment, PackageLoader from io import BytesIO import uuid import base64 import logging logger = logging.getLogger() class TranscriptionInterface(object): def __init__(self, font=None, font_style=None): logging.info('Initializing transcription object.') logger.debug('Initializing jinja environment.') env = Environment(loader=PackageLoader('kraken', 'templates'), autoescape=True) logger.debug('Loading transcription template.') self.tmpl = env.get_template('layout.html') self.pages = [] # type: List[dict] self.font = {'font': font, 'style': font_style} self.text_direction = 'horizontal-tb' self.page_idx = 1 self.line_idx = 1 self.seg_idx = 1 def add_page(self, im, segmentation=None, records=None): """ Adds an image to the transcription interface, optionally filling in information from a list of ocr_record objects. Args: im (PIL.Image): Input image segmentation (dict): Output of the segment method. records (list): A list of ocr_record objects. """ im_str = get_im_str(im) logger.info('Adding page {} with {} lines'.format(im_str, len(segmentation) if segmentation else len(records))) page = {} fd = BytesIO() im.save(fd, format='png', optimize=True) page['index'] = self.page_idx self.page_idx += 1 logger.debug('Base64 encoding image') page['img'] = 'data:image/png;base64,' + base64.b64encode(fd.getvalue()).decode('ascii') page['lines'] = [] if records: logger.debug('Adding records.') self.text_direction = segmentation['text_direction'] for record, bbox in zip(records, segmentation['boxes']): page['lines'].append({'index': self.line_idx, 'text': record.prediction, 'left': 100*int(bbox[0]) / im.size[0], 'top': 100*int(bbox[1]) / im.size[1], 'width': 100*(bbox[2] - bbox[0])/im.size[0], 'height': 100*(int(bbox[3]) - int(bbox[1]))/im.size[1], 'bbox': '{}, {}, {}, {}'.format(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))}) self.line_idx += 1 elif segmentation: logger.debug('Adding segmentations.') self.text_direction = segmentation['text_direction'] for bbox in segmentation['boxes']: page['lines'].append({'index': self.line_idx, 'left': 100*int(bbox[0]) / im.size[0], 'top': 100*int(bbox[1]) / im.size[1], 'width': 100*(bbox[2] - bbox[0])/im.size[0], 'height': 100*(int(bbox[3]) - int(bbox[1]))/im.size[1], 'bbox': '{}, {}, {}, {}'.format(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))}) self.line_idx += 1 else: raise KrakenInputException('Neither segmentations nor records given') self.pages.append(page) def write(self, fd): """ Writes the HTML file to a file descriptor. Args: fd (File): File descriptor (mode='rb') to write to. """ logger.info('Rendering and writing transcription.') fd.write(self.tmpl.render(uuid=str(uuid.uuid4()), pages=self.pages, font=self.font, text_direction=self.text_direction).encode('utf-8'))
4,922
42.955357
119
py
kraken
kraken-main/kraken/__init__.py
""" entry point for kraken functionality """
45
10.5
36
py
kraken
kraken-main/kraken/binarization.py
# # Copyright 2015 Benjamin Kiessling # 2014 Thomas M. Breuel # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.binarization ~~~~~~~~~~~~~~~~~~~ An adaptive binarization algorithm. """ import warnings import logging import numpy as np from PIL import Image from kraken.lib.util import pil2array, array2pil, is_bitonal, get_im_str from scipy.ndimage import affine_transform, percentile_filter, gaussian_filter, binary_dilation from scipy.ndimage import zoom as _zoom from kraken.lib.exceptions import KrakenInputException __all__ = ['nlbin'] logger = logging.getLogger(__name__) def nlbin(im: Image.Image, threshold: float = 0.5, zoom: float = 0.5, escale: float = 1.0, border: float = 0.1, perc: int = 80, range: int = 20, low: int = 5, high: int = 90) -> Image.Image: """ Performs binarization using non-linear processing. Args: im: Input image threshold: zoom: Zoom for background page estimation escale: Scale for estimating a mask over the text region border: Ignore this much of the border perc: Percentage for filters range: Range for filters low: Percentile for black estimation high: Percentile for white estimation Returns: PIL.Image.Image containing the binarized image Raises: KrakenInputException: When trying to binarize an empty image. """ im_str = get_im_str(im) logger.info(f'Binarizing {im_str}') if is_bitonal(im): logger.info(f'Skipping binarization because {im_str} is bitonal.') return im # convert to grayscale first logger.debug(f'Converting {im_str} to grayscale') im = im.convert('L') raw = pil2array(im) logger.debug('Scaling and normalizing') # rescale image to between -1 or 0 and 1 raw = raw/float(np.iinfo(raw.dtype).max) # perform image normalization if np.amax(raw) == np.amin(raw): logger.warning(f'Trying to binarize empty image {im_str}') raise KrakenInputException('Image is empty') image = raw-np.amin(raw) image /= np.amax(image) logger.debug('Interpolation and percentile filtering') with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) m = _zoom(image, zoom) m = percentile_filter(m, perc, size=(range, 2)) m = percentile_filter(m, perc, size=(2, range)) mh, mw = m.shape oh, ow = image.shape scale = np.diag([mh * 1.0/oh, mw * 1.0/ow]) m = affine_transform(m, scale, output_shape=image.shape) w, h = np.minimum(np.array(image.shape), np.array(m.shape)) flat = np.clip(image[:w, :h]-m[:w, :h]+1, 0, 1) # estimate low and high thresholds d0, d1 = flat.shape o0, o1 = int(border*d0), int(border*d1) est = flat[o0:d0-o0, o1:d1-o1] logger.debug('Threshold estimates {}'.format(est)) # by default, we use only regions that contain # significant variance; this makes the percentile # based low and high estimates more reliable logger.debug('Refine estimates') v = est-gaussian_filter(est, escale*20.0) v = gaussian_filter(v**2, escale*20.0)**0.5 v = (v > 0.3*np.amax(v)) v = binary_dilation(v, structure=np.ones((int(escale * 50), 1))) v = binary_dilation(v, structure=np.ones((1, int(escale * 50)))) est = est[v] lo = np.percentile(est.ravel(), low) hi = np.percentile(est.ravel(), high) flat -= lo flat /= (hi-lo) flat = np.clip(flat, 0, 1) logger.debug(f'Thresholding at {threshold}') bin = np.array(255*(flat > threshold), 'B') return array2pil(bin)
4,189
33.344262
95
py
kraken
kraken-main/kraken/repo.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Accessors to the model repository on zenodo. """ import os import json import urllib import logging import requests from os import PathLike from pathlib import Path from contextlib import closing from typing import Callable, Any from kraken.lib.exceptions import KrakenRepoException __all__ = ['get_model', 'get_description', 'get_listing', 'publish_model'] logger = logging.getLogger(__name__) MODEL_REPO = 'https://zenodo.org/api/' SUPPORTED_MODELS = set(['kraken_pytorch']) def publish_model(model_file: [str, PathLike] = None, metadata: dict = None, access_token: str = None, callback: Callable[[int, int], Any] = lambda: None, private: bool = False) -> str: """ Publishes a model to the repository. Args: model_file: Path to read model from. metadata: Metadata dictionary access_token: Zenodo API access token callback: Function called with octet-wise progress. private: Whether to generate a community inclusion request that makes the model recoverable by the public. """ model_file = Path(model_file) fp = open(model_file, 'rb') _metadata = json.dumps(metadata) total = model_file.stat().st_size + len(_metadata) + 3 headers = {"Content-Type": "application/json"} r = requests.post(f'{MODEL_REPO}deposit/depositions', params={'access_token': access_token}, json={}, headers=headers) r.raise_for_status() callback(total, 1) deposition_id = r.json()['id'] data = {'filename': 'metadata.json'} files = {'file': ('metadata.json', _metadata)} r = requests.post(f'{MODEL_REPO}deposit/depositions/{deposition_id}/files', params={'access_token': access_token}, data=data, files=files) r.raise_for_status() callback(total, len(_metadata)) data = {'filename': metadata['name']} files = {'file': fp} r = requests.post(f'{MODEL_REPO}deposit/depositions/{deposition_id}/files', params={'access_token': access_token}, data=data, files=files) r.raise_for_status() callback(total, model_file.stat().st_size) # fill zenodo metadata data = {'metadata': { 'title': metadata['summary'], 'upload_type': 'publication', 'publication_type': 'other', 'description': metadata['description'], 'creators': metadata['authors'], 'access_right': 'open', 'keywords': ['kraken_pytorch'], 'license': metadata['license'] } } if not private: data['metadata']['communities'] = [{'identifier': 'ocr_models'}] # add link to training data to metadata if 'source' in metadata: data['metadata']['related_identifiers'] = [{'relation': 'isSupplementTo', 'identifier': metadata['source']}] r = requests.put(f'{MODEL_REPO}deposit/depositions/{deposition_id}', params={'access_token': access_token}, data=json.dumps(data), headers=headers) r.raise_for_status() callback(total, 1) r = requests.post(f'{MODEL_REPO}deposit/depositions/{deposition_id}/actions/publish', params={'access_token': access_token}) r.raise_for_status() callback(total, 1) return r.json()['doi'] def get_model(model_id: str, path: str, callback: Callable[[int, int], Any] = lambda total, advance: None) -> str: """ Retrieves a model and saves it to a path. Args: model_id (str): DOI of the model path (str): Destination to write model to. callback (func): Function called for every 1024 octet chunk received. Returns: The identifier the model can be called through on the command line. Will usually be the file name of the model. """ logger.info(f'Saving model {model_id} to {path}') r = requests.get(f'{MODEL_REPO}records', params={'q': f'doi:"{model_id}"'}) r.raise_for_status() callback(0, 0) resp = r.json() if resp['hits']['total'] != 1: logger.error(f'Found {resp["hits"]["total"]} models when querying for id \'{model_id}\'') raise KrakenRepoException(f'Found {resp["hits"]["total"]} models when querying for id \'{model_id}\'') metadata = resp['hits']['hits'][0] model_url = [x['links']['self'] for x in metadata['files'] if x['type'] == 'mlmodel'][0] # callable model identifier nat_id = os.path.basename(urllib.parse.urlparse(model_url).path) spath = os.path.join(path, nat_id) logger.debug(f'downloading model file {model_url} to {spath}') with closing(requests.get(model_url, stream=True)) as r: file_size = int(r.headers['Content-length']) with open(spath, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): callback(file_size, len(chunk)) f.write(chunk) return nat_id def get_description(model_id: str, callback: Callable[..., Any] = lambda: None) -> dict: """ Fetches the metadata for a single model from the zenodo repository. Args: model_id (str): DOI of the model. callback (callable): Optional function called once per HTTP request. Returns: Dict """ logger.info(f'Retrieving metadata for {model_id}') r = requests.get(f'{MODEL_REPO}records', params={'q': f'doi:"{model_id}"'}) r.raise_for_status() callback() resp = r.json() if resp['hits']['total'] != 1: logger.error(f'Found {resp["hits"]["total"]} models when querying for id \'{model_id}\'') raise KrakenRepoException(f'Found {resp["hits"]["total"]} models when querying for id \'{model_id}\'') record = resp['hits']['hits'][0] metadata = record['metadata'] if 'keywords' not in metadata: logger.error('No keywords included on deposit') raise KrakenRepoException('No keywords included on deposit.') model_type = SUPPORTED_MODELS.intersection(metadata['keywords']) if not model_type: msg = 'Unsupported model type(s): {}'.format(', '.join(metadata['keywords'])) logger.error(msg) raise KrakenRepoException(msg) meta_json = None for file in record['files']: if file['key'] == 'metadata.json': callback() r = requests.get(file['links']['self']) r.raise_for_status() callback() try: meta_json = r.json() except Exception: msg = f'Metadata for \'{record["metadata"]["title"]}\' ({record["metadata"]["doi"]}) not in JSON format' logger.error(msg) raise KrakenRepoException(msg) if not meta_json: msg = 'Mo metadata.jsn found for \'{}\' ({})'.format(record['metadata']['title'], record['metadata']['doi']) logger.error(msg) raise KrakenRepoException(msg) # merge metadata.json into DataCite metadata.update({'graphemes': meta_json['graphemes'], 'summary': meta_json['summary'], 'script': meta_json['script'], 'link': record['links']['latest'], 'type': [x.split('_')[1] for x in model_type], 'accuracy': meta_json['accuracy']}) return metadata def get_listing(callback: Callable[[int, int], Any] = lambda total, advance: None) -> dict: """ Fetches a listing of all kraken models from the zenodo repository. Args: callback (Callable): Function called after each HTTP request. Returns: Dict of models with each model. """ logger.info('Retrieving model list') records = [] r = requests.get('{}{}'.format(MODEL_REPO, 'records'), params={'communities': 'ocr_models'}) r.raise_for_status() callback(1, 1) resp = r.json() if not resp['hits']['total']: logger.error('No models found in community \'ocr_models\'') raise KrakenRepoException('No models found in repository \'ocr_models\'') logger.debug('Total of {} records in repository'.format(resp['hits']['total'])) total = resp['hits']['total'] callback(total, 0) records.extend(resp['hits']['hits']) while 'next' in resp['links']: logger.debug('Fetching next page') r = requests.get(resp['links']['next']) r.raise_for_status() resp = r.json() logger.debug('Found {} new records'.format(len(resp['hits']['hits']))) records.extend(resp['hits']['hits']) logger.debug('Retrieving model metadata') models = {} # fetch metadata.jsn for each model for record in records: if 'keywords' not in record['metadata']: continue model_type = SUPPORTED_MODELS.intersection(record['metadata']['keywords']) if not model_type: continue for file in record['files']: if file['key'] == 'metadata.json': callback(total, 1) r = requests.get(file['links']['self']) r.raise_for_status() try: metadata = r.json() except Exception: msg = f'Metadata for \'{record["metadata"]["title"]}\' ({record["metadata"]["doi"]}) not in JSON format' logger.error(msg) raise KrakenRepoException(msg) # merge metadata.jsn into DataCite key = record['metadata']['doi'] models[key] = record['metadata'] models[key].update({'graphemes': metadata['graphemes'], 'summary': metadata['summary'], 'script': metadata['script'], 'link': record['links']['latest'], 'type': [x.split('_')[1] for x in model_type]}) return models
10,633
39.280303
124
py
kraken
kraken-main/kraken/serialization.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. from jinja2 import Environment, PackageLoader, FunctionLoader import regex import logging import datetime import shapely.geometry as geom from os import PathLike from pkg_resources import get_distribution from collections import Counter from kraken.rpred import BaselineOCRRecord, BBoxOCRRecord, ocr_record from kraken.lib.util import make_printable from kraken.lib.segmentation import is_in_region from typing import Union, List, Tuple, Iterable, Optional, Sequence, Dict, Any, Literal logger = logging.getLogger(__name__) __all__ = ['serialize', 'serialize_segmentation', 'render_report'] def _rescale(val: Sequence[float], low: float, high: float) -> List[float]: """ Rescales a list of confidence value between 0 and 1 to an interval [low, high]. Args: val (float): List of values in interval (0,1) low (float): Lower bound of rescaling interval high (float): Upper bound of rescaling interval Returns: Rescaled value (float). """ return [(high - low) * x + low for x in val] def max_bbox(boxes: Iterable[Sequence[int]]) -> Tuple[int, int, int, int]: """ Calculates the minimal bounding box containing all contained in an iterator. Args: boxes (iterator): An iterator returning tuples of the format ((x0, y0), (x1, y1), ... (xn, yn)). Returns: A box (x0, y0, x1, y1) covering all bounding boxes in the input argument. """ flat_box = [point for pol in boxes for point in pol] flat_box = [x for point in flat_box for x in point] xmin, xmax = min(flat_box[::2]), max(flat_box[::2]) ymin, ymax = min(flat_box[1::2]), max(flat_box[1::2]) o = xmin, ymin, xmax, ymax # type: ignore return o def serialize(records: Sequence[ocr_record], image_name: Union[PathLike, str] = None, image_size: Tuple[int, int] = (0, 0), writing_mode: Literal['horizontal-tb', 'vertical-lr', 'vertical-rl'] = 'horizontal-tb', scripts: Optional[Iterable[str]] = None, regions: Optional[Dict[str, List[List[Tuple[int, int]]]]] = None, template: [PathLike, str] = 'alto', template_source: Literal['native', 'custom'] = 'native', processing_steps: Optional[List[Dict[str, Union[Dict, str, float, int, bool]]]] = None) -> str: """ Serializes a list of ocr_records into an output document. Serializes a list of predictions and their corresponding positions by doing some hOCR-specific preprocessing and then renders them through one of several jinja2 templates. Note: Empty records are ignored for serialization purposes. Args: records: List of kraken.rpred.ocr_record image_name: Name of the source image image_size: Dimensions of the source image writing_mode: Sets the principal layout of lines and the direction in which blocks progress. Valid values are horizontal-tb, vertical-rl, and vertical-lr. scripts: List of scripts contained in the OCR records regions: Dictionary mapping region types to a list of region polygons. template: Selector for the serialization format. May be 'hocr', 'alto', 'page' or any template found in the template directory. If template_source is set to `custom` a path to a template is expected. template_source: Switch to enable loading of custom templates from outside the kraken package. processing_steps: A list of dictionaries describing the processing kraken performed on the inputs:: {'category': 'preprocessing', 'description': 'natural language description of process', 'settings': {'arg0': 'foo', 'argX': 'bar'} } Returns: The rendered template """ logger.info(f'Serialize {len(records)} records from {image_name} with template {template}.') page = {'entities': [], 'size': image_size, 'name': image_name, 'writing_mode': writing_mode, 'scripts': scripts, 'date': datetime.datetime.now(datetime.timezone.utc).isoformat(), 'base_dir': [rec.base_dir for rec in records][0] if len(records) else None} # type: dict metadata = {'processing_steps': processing_steps, 'version': get_distribution('kraken').version} seg_idx = 0 char_idx = 0 region_map = {} idx = 0 if regions is not None: for id, regs in regions.items(): for reg in regs: region_map[idx] = (id, geom.Polygon(reg), reg) idx += 1 # build region and line type dict types = [] for line in records: if hasattr(line, 'tags') and line.tags is not None: types.extend(line.tags.values()) page['types'] = list(set(types)) if regions is not None: page['types'].extend(list(regions.keys())) is_in_reg = -1 for idx, record in enumerate(records): if record.type == 'baselines': l_obj = geom.LineString(record.baseline) else: l_obj = geom.LineString(record.line) reg = list(filter(lambda x: is_in_region(l_obj, x[1][1]), region_map.items())) if len(reg) == 0: cur_ent = page['entities'] elif reg[0][0] != is_in_reg: reg = reg[0] is_in_reg = reg[0] region = {'index': reg[0], 'bbox': [int(x) for x in reg[1][1].bounds], 'boundary': [list(x) for x in reg[1][2]], 'region_type': reg[1][0], 'lines': [], 'type': 'region' } page['entities'].append(region) cur_ent = region['lines'] # set field to indicate the availability of baseline segmentation in # addition to bounding boxes if record.type == 'baselines': page['seg_type'] = 'baselines' line = {'index': idx, 'bbox': max_bbox([record.line]), 'cuts': record.cuts, 'confidences': record.confidences, 'recognition': [], 'boundary': [list(x) for x in record.line], 'type': 'line' } if hasattr(record, 'tags') and record.tags is not None: line['tags'] = record.tags if record.type == 'baselines': line['baseline'] = [list(x) for x in record.baseline] splits = regex.split(r'(\s+)', record.prediction) line_offset = 0 logger.debug(f'Record contains {len(splits)} segments') for segment in splits: if len(segment) == 0: continue seg_cuts = record.cuts[line_offset:line_offset + len(segment)] seg_bbox = max_bbox(seg_cuts) seg_struct = {'bbox': seg_bbox, 'confidences': record.confidences[line_offset:line_offset + len(segment)], 'cuts': seg_cuts, 'text': segment, 'recognition': [{'bbox': max_bbox([cut]), 'boundary': cut, 'confidence': conf, 'text': char, 'index': cid} for conf, cut, char, cid in zip(record.confidences[line_offset:line_offset + len(segment)], seg_cuts, segment, range(char_idx, char_idx + len(segment)))], 'index': seg_idx} # compute convex hull of all characters in segment if record.type == 'baselines': seg_struct['boundary'] = record[line_offset:line_offset + len(segment)][1] line['recognition'].append(seg_struct) char_idx += len(segment) seg_idx += 1 line_offset += len(segment) cur_ent.append(line) # No records but there are regions -> serialize all regions if not records and regions: logger.debug(f'No lines given but {len(region_map)}. Serialize all regions.') for reg in region_map.items(): region = {'index': reg[0], 'bbox': [int(x) for x in reg[1][1].bounds], 'boundary': [list(x) for x in reg[1][2]], 'region_type': reg[1][0], 'lines': [], 'type': 'region' } page['entities'].append(region) if template_source == 'native': logger.debug('Initializing native jinja environment.') loader = PackageLoader('kraken', 'templates') elif template_source == 'custom': def _load_template(name): return open(template, 'r').read(), name, lambda: True loader = FunctionLoader(_load_template) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True, autoescape=True) env.tests['whitespace'] = str.isspace env.filters['rescale'] = _rescale logger.debug('Retrieving template.') tmpl = env.get_template(template) logger.debug('Rendering data.') return tmpl.render(page=page, metadata=metadata) def serialize_segmentation(segresult: Dict[str, Any], image_name: Union[PathLike, str] = None, image_size: Tuple[int, int] = (0, 0), template: Union[PathLike, str] = 'alto', template_source: Literal['native', 'custom'] = 'native', processing_steps: Optional[List[Dict[str, Union[Dict, str, float, int, bool]]]] = None) -> str: """ Serializes a segmentation result into an output document. Args: segresult: Result of blla.segment image_name: Name of the source image image_size: Dimensions of the source image template: Selector for the serialization format. Any value accepted by `serialize` is valid. template_source: Enables/disables loading of external templates. Returns: (str) rendered template. """ if 'type' in segresult and segresult['type'] == 'baselines': records = [BaselineOCRRecord('', (), (), bl) for bl in segresult['lines']] else: records = [] for line in segresult['boxes']: xmin, xmax = min(line[::2]), max(line[::2]) ymin, ymax = min(line[1::2]), max(line[1::2]) records.append(BBoxOCRRecord('', (), (), ((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)))) return serialize(records, image_name=image_name, image_size=image_size, regions=segresult['regions'] if 'regions' in segresult else None, template=template, template_source=template_source, processing_steps=processing_steps) def render_report(model: str, chars: int, errors: int, char_confusions: Counter, scripts: Counter, insertions: Counter, deletions: int, substitutions: Counter) -> str: """ Renders an accuracy report. Args: model (str): Model name. errors (int): Number of errors on test set. char_confusions (dict): Dictionary mapping a tuple (gt, pred) to a number of occurrences. scripts (dict): Dictionary counting character per script. insertions (dict): Dictionary counting insertion operations per Unicode script deletions (int): Number of deletions substitutions (dict): Dictionary counting substitution operations per Unicode script. Returns: A string containing the rendered report. """ logger.info(f'Serializing report for {model}.') report = {'model': model, 'chars': chars, 'errors': errors, 'accuracy': (chars-errors)/chars * 100, 'insertions': sum(insertions.values()), 'deletions': deletions, 'substitutions': sum(substitutions.values()), 'scripts': sorted([{'script': k, 'count': v, 'errors': insertions[k] + substitutions[k], 'accuracy': 100 * (v-(insertions[k] + substitutions[k]))/v} for k, v in scripts.items()], key=lambda x: x['accuracy'], reverse=True), 'counts': sorted([{'correct': make_printable(k[0]), 'generated': make_printable(k[1]), 'errors': v} for k, v in char_confusions.items() if k[0] != k[1]], key=lambda x: x['errors'], reverse=True)} logger.debug('Initializing jinja environment.') env = Environment(loader=PackageLoader('kraken', 'templates'), trim_blocks=True, lstrip_blocks=True, autoescape=True) logger.debug('Retrieving template.') tmpl = env.get_template('report') logger.debug('Rendering data.') return tmpl.render(report=report)
14,465
41.422287
123
py
kraken
kraken-main/kraken/ketos/pretrain.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.pretrain ~~~~~~~~~~~~~~~~~~~~~ Command line driver for unsupervised recognition pretraining """ import click import logging from PIL import Image from kraken.lib.default_specs import (RECOGNITION_PRETRAIN_HYPER_PARAMS, RECOGNITION_SPEC) from .util import _validate_manifests, _expand_gt, message, to_ptl_device logging.captureWarnings(True) logger = logging.getLogger('kraken') # raise default max image size to 20k * 20k pixels Image.MAX_IMAGE_PIXELS = 20000 ** 2 @click.command('pretrain') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['batch_size'], help='batch sample size') @click.option('--pad', show_default=True, type=click.INT, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['pad'], help='Left and right padding around lines') @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC, help='VGSL spec of the network to train.') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs ' 'during training. If frequency is >1 it must be an integer, ' 'i.e. running validation every n-th epoch.') @click.option('-q', '--quit', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--min-epochs', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['min_epochs'], help='Minimal number of epochs to train for when using early stopping.') @click.option('--lag', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--precision', show_default=True, default='32', type=click.Choice(['64', '32', 'bf16', '16']), help='Numerical precision to use for training. Default is 32-bit single-point precision.') @click.option('--optimizer', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, type=float, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--warmup', show_default=True, type=float, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['warmup'], help='Number of samples to ramp up to `lrate` initial learning rate.') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_PRETRAIN_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.') @click.option('-g', '--gamma', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('--fixed-splits/--ignore-fixed-splits', show_default=True, default=False, help='Whether to honor fixed splits in binary datasets.') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyperparameters from the model') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both line definitions and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with `.gt.txt` text files ' 'containing the transcription. In binary mode files are datasets ' 'files containing pre-extracted text lines.') @click.option('--augment/--no-augment', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.option('-mw', '--mask-width', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['mask_width'], help='Width of sampled masks at scale of the sampled tensor, e.g. ' '4X subsampling in convolutional layers with mask width 3 results ' 'in an effective mask width of 12.') @click.option('-mp', '--mask-probability', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['mask_prob'], help='Probability of a particular position being the start position of a mask.') @click.option('-nn', '--num-negatives', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['num_negatives'], help='Number of negative samples for the contrastive loss.') @click.option('-lt', '--logit-temp', show_default=True, default=RECOGNITION_PRETRAIN_HYPER_PARAMS['logit_temp'], help='Multiplicative factor for the logits used in contrastive loss.') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def pretrain(ctx, batch_size, pad, output, spec, load, freq, quit, epochs, min_epochs, lag, min_delta, device, precision, optimizer, lrate, momentum, weight_decay, warmup, schedule, gamma, step_size, sched_patience, cos_max, partition, fixed_splits, training_files, evaluation_files, workers, load_hyper_parameters, repolygonize, force_binarization, format_type, augment, mask_probability, mask_width, num_negatives, logit_temp, ground_truth): """ Trains a model from image-text pairs. """ if not (0 <= freq <= 1) and freq % 1.0 != 0: raise click.BadOptionUsage('freq', 'freq needs to be either in the interval [0,1.0] or a positive integer.') if augment: try: import albumentations # NOQA except ImportError: raise click.BadOptionUsage('augment', 'augmentation needs the `albumentations` package installed.') import shutil from kraken.lib.train import KrakenTrainer from kraken.lib.pretrain import PretrainDataModule, RecognitionPretrainModel hyper_params = RECOGNITION_PRETRAIN_HYPER_PARAMS.copy() hyper_params.update({'freq': freq, 'pad': pad, 'batch_size': batch_size, 'quit': quit, 'epochs': epochs, 'min_epochs': min_epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'warmup': warmup, 'schedule': schedule, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max, 'augment': augment, 'mask_prob': mask_probability, 'mask_width': mask_width, 'num_negatives': num_negatives, 'logit_temp': logit_temp}) # disable automatic partition when given evaluation set explicitly if evaluation_files: partition = 1 ground_truth = list(ground_truth) # merge training_files into ground_truth list if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') try: accelerator, device = to_ptl_device(device) except Exception as e: raise click.BadOptionUsage('device', str(e)) if hyper_params['freq'] > 1: val_check_interval = {'check_val_every_n_epoch': int(hyper_params['freq'])} else: val_check_interval = {'val_check_interval': hyper_params['freq']} model = RecognitionPretrainModel(hyper_params=hyper_params, output=output, spec=spec, model=load, load_hyper_parameters=load_hyper_parameters) data_module = PretrainDataModule(batch_size=hyper_params.pop('batch_size'), pad=hyper_params.pop('pad'), augment=hyper_params.pop('augment'), training_data=ground_truth, evaluation_data=evaluation_files, partition=partition, binary_dataset_split=fixed_splits, num_workers=workers, height=model.height, width=model.width, channels=model.channels, repolygonize=repolygonize, force_binarization=force_binarization, format_type=format_type) model.len_train_set = len(data_module.train_dataloader()) trainer = KrakenTrainer(accelerator=accelerator, devices=device, precision=precision, max_epochs=hyper_params['epochs'] if hyper_params['quit'] == 'dumb' else -1, min_epochs=hyper_params['min_epochs'], enable_progress_bar=True if not ctx.meta['verbose'] else False, deterministic=ctx.meta['deterministic'], **val_check_interval) trainer.fit(model, datamodule=data_module) if model.best_epoch == -1: logger.warning('Model did not improve during training.') ctx.exit(1) if quit == 'early': message(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel') logger.info(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel') shutil.copy(f'{model.best_model}', f'{output}_best.mlmodel')
15,667
52.474403
137
py
kraken
kraken-main/kraken/ketos/linegen.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.linegen ~~~~~~~~~~~~~~~~~~~~ Command line driver for synthetic recognition training data generation. """ import click @click.command('linegen', deprecated=True) @click.pass_context @click.option('-f', '--font', default='sans', help='Font family to render texts in.') @click.option('-n', '--maxlines', type=click.INT, default=0, help='Maximum number of lines to generate') @click.option('-e', '--encoding', default='utf-8', help='Decode text files with given codec.') @click.option('-u', '--normalization', type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Normalize ground truth') @click.option('-ur', '--renormalize', type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Renormalize text for rendering purposes.') @click.option('--reorder/--no-reorder', default=False, help='Reorder code points to display order') @click.option('-fs', '--font-size', type=click.INT, default=32, help='Font size to render texts in.') @click.option('-fw', '--font-weight', type=click.INT, default=400, help='Font weight to render texts in.') @click.option('-l', '--language', help='RFC-3066 language tag for language-dependent font shaping') @click.option('-ll', '--max-length', type=click.INT, default=None, help="Discard lines above length (in Unicode codepoints).") @click.option('--strip/--no-strip', help="Remove whitespace from start and end " "of lines.") @click.option('-D', '--disable-degradation', is_flag=True, help='Dont degrade ' 'output lines.') @click.option('-a', '--alpha', type=click.FLOAT, default=1.5, help="Mean of folded normal distribution for sampling foreground pixel flip probability") @click.option('-b', '--beta', type=click.FLOAT, default=1.5, help="Mean of folded normal distribution for sampling background pixel flip probability") @click.option('-d', '--distort', type=click.FLOAT, default=1.0, help='Mean of folded normal distribution to take distortion values from') @click.option('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0, help='Mean of folded normal distribution to take standard deviations for the ' 'Gaussian kernel from') @click.option('--legacy/--no-legacy', default=False, help='Use ocropy-style degradations') @click.option('-o', '--output', type=click.Path(), default='training_data', help='Output directory') @click.argument('text', nargs=-1, type=click.Path(exists=True)) def line_generator(ctx, font, maxlines, encoding, normalization, renormalize, reorder, font_size, font_weight, language, max_length, strip, disable_degradation, alpha, beta, distort, distortion_sigma, legacy, output, text): """ Generates artificial text line training data. """ import os import errno import logging import numpy as np import unicodedata from typing import Set from bidi.algorithm import get_display from kraken.lib.progress import KrakenProgressBar from kraken.lib.exceptions import KrakenCairoSurfaceException from .util import message from kraken import linegen from kraken.lib.util import make_printable logging.captureWarnings(True) logger = logging.getLogger('kraken') lines: Set[str] = set() if not text: return with KrakenProgressBar() as progress: read_task = progress.add_task('Reading texts', total=len(text), visible=True if not ctx.meta['verbose'] else False) for t in text: with click.open_file(t, encoding=encoding) as fp: logger.info('Reading {}'.format(t)) for line in fp: lines.add(line.rstrip('\r\n')) progress.update(read_task, advance=1) if normalization: lines = set([unicodedata.normalize(normalization, line) for line in lines]) if strip: lines = set([line.strip() for line in lines]) if max_length: lines = set([line for line in lines if len(line) < max_length]) logger.info('Read {} lines'.format(len(lines))) message('Read {} unique lines'.format(len(lines))) if maxlines and maxlines < len(lines): message('Sampling {} lines\t'.format(maxlines), nl=False) llist = list(lines) lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines)) message('\u2713', fg='green') try: os.makedirs(output) except OSError as e: if e.errno != errno.EEXIST: raise # calculate the alphabet and print it for verification purposes alphabet: Set[str] = set() for line in lines: alphabet.update(line) chars = [] combining = [] for char in sorted(alphabet): k = make_printable(char) if k != char: combining.append(k) else: chars.append(k) message('Σ (len: {})'.format(len(alphabet))) message('Symbols: {}'.format(''.join(chars))) if combining: message('Combining Characters: {}'.format(', '.join(combining))) lg = linegen.LineGenerator(font, font_size, font_weight, language) with KrakenProgressBar() as progress: gen_task = progress.add_task('Writing images', total=len(lines), visible=True if not ctx.meta['verbose'] else False) for idx, line in enumerate(lines): logger.info(line) try: if renormalize: im = lg.render_line(unicodedata.normalize(renormalize, line)) else: im = lg.render_line(line) except KrakenCairoSurfaceException as e: logger.info('{}: {} {}'.format(e.message, e.width, e.height)) continue if not disable_degradation and not legacy: im = linegen.degrade_line(im, alpha=alpha, beta=beta) im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma))) elif legacy: im = linegen.ocropy_degrade(im) im.save('{}/{:06d}.png'.format(output, idx)) with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp: if reorder: fp.write(get_display(line).encode('utf-8')) else: fp.write(line.encode('utf-8')) progress.update(gen_task, advance=1)
7,173
42.743902
124
py
kraken
kraken-main/kraken/ketos/dataset.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.dataset ~~~~~~~~~~~~~~~~~~~~ Command line driver for dataset compilation """ import click from .util import _validate_manifests @click.command('compile') @click.pass_context @click.option('-o', '--output', show_default=True, type=click.Path(), default='dataset.arrow', help='Output model file') @click.option('--workers', show_default=True, default=1, help='Number of parallel workers for text line extraction.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml', show_default=True, help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.option('-F', '--files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data.') @click.option('--random-split', type=float, nargs=3, default=None, show_default=True, help='Creates a fixed random split of the input data with the ' 'proportions (train, validation, test). Overrides the save split option.') @click.option('--force-type', type=click.Choice(['bbox', 'baseline']), default=None, show_default=True, help='Forces the dataset type to a specific value. Can be used to ' '"convert" a line strip-type collection to a baseline-style ' 'dataset, e.g. to disable centerline normalization.') @click.option('--save-splits/--ignore-splits', show_default=True, default=True, help='Whether to serialize explicit splits contained in XML ' 'files. Is ignored in `path` mode.') @click.option('--skip-empty-lines/--keep-empty-lines', show_default=True, default=True, help='Whether to keep or skip empty text lines. Text-less ' 'datasets are useful for unsupervised pretraining but ' 'loading datasets with many empty lines for recognition ' 'training is inefficient.') @click.option('--recordbatch-size', show_default=True, default=100, help='Minimum number of records per RecordBatch written to the ' 'output file. Larger batches require more transient memory ' 'but slightly improve reading performance.') @click.argument('ground_truth', nargs=-1, type=click.Path(exists=True, dir_okay=False)) def compile(ctx, output, workers, format_type, files, random_split, force_type, save_splits, skip_empty_lines, recordbatch_size, ground_truth): """ Precompiles a binary dataset from a collection of XML files. """ from .util import message from kraken.lib.progress import KrakenProgressBar ground_truth = list(ground_truth) if files: ground_truth.extend(files) if not ground_truth: raise click.UsageError('No training data was provided to the compile command. Use the `ground_truth` argument.') from kraken.lib import arrow_dataset force_type = {'bbox': 'kraken_recognition_bbox', 'baseline': 'kraken_recognition_baseline', None: None}[force_type] with KrakenProgressBar() as progress: extract_task = progress.add_task('Extracting lines', total=0, start=False, visible=True if not ctx.meta['verbose'] else False) arrow_dataset.build_binary_dataset(ground_truth, output, format_type, workers, save_splits, random_split, force_type, recordbatch_size, skip_empty_lines, lambda advance, total: progress.update(extract_task, total=total, advance=advance)) message(f'Output file written to {output}')
4,912
50.177083
134
py
kraken
kraken-main/kraken/ketos/segmentation.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.segmentation ~~~~~~~~~~~~~~~~~~~~~~~~~ Command line driver for segmentation training and evaluation. """ import click import pathlib import logging from PIL import Image from kraken.lib.progress import KrakenProgressBar from kraken.lib.exceptions import KrakenInputException from kraken.lib.default_specs import SEGMENTATION_HYPER_PARAMS, SEGMENTATION_SPEC from kraken.ketos.util import _validate_manifests, _expand_gt, message, to_ptl_device logging.captureWarnings(True) logger = logging.getLogger('kraken') # raise default max image size to 20k * 20k pixels Image.MAX_IMAGE_PIXELS = 20000 ** 2 def _validate_merging(ctx, param, value): """ Maps baseline/region merging to a dict of merge structures. """ if not value: return None merge_dict = {} # type: Dict[str, str] try: for m in value: k, v = m.split(':') merge_dict[v] = k # type: ignore except Exception: raise click.BadParameter('Mappings must be in format target:src') return merge_dict @click.command('segtrain') @click.pass_context @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=SEGMENTATION_SPEC, help='VGSL spec of the baseline labeling network') @click.option('--line-width', show_default=True, default=SEGMENTATION_HYPER_PARAMS['line_width'], help='The height of each baseline in the target after scaling') @click.option('--pad', show_default=True, type=(int, int), default=(0, 0), help='Padding (left/right, top/bottom) around the page image') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=SEGMENTATION_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs ' 'during training. If frequency is >1 it must be an integer, ' 'i.e. running validation every n-th epoch.') @click.option('-q', '--quit', show_default=True, default=SEGMENTATION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stopping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=SEGMENTATION_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--min-epochs', show_default=True, default=SEGMENTATION_HYPER_PARAMS['min_epochs'], help='Minimal number of epochs to train for when using early stopping.') @click.option('--lag', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=SEGMENTATION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. By default it scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--precision', show_default=True, default='32', type=click.Choice(['64', '32', 'bf16', '16']), help='Numerical precision to use for training. Default is 32-bit single-point precision.') @click.option('--optimizer', show_default=True, default=SEGMENTATION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop', 'Lamb']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=SEGMENTATION_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, default=SEGMENTATION_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--warmup', show_default=True, type=float, default=SEGMENTATION_HYPER_PARAMS['warmup'], help='Number of steps to ramp up to `lrate` initial learning rate.') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=SEGMENTATION_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--step-size` option.') @click.option('-g', '--gamma', show_default=True, default=SEGMENTATION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=SEGMENTATION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=SEGMENTATION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=SEGMENTATION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyper-parameters from the model') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.option('--suppress-regions/--no-suppress-regions', show_default=True, default=False, help='Disables region segmentation training.') @click.option('--suppress-baselines/--no-suppress-baselines', show_default=True, default=False, help='Disables baseline segmentation training.') @click.option('-vr', '--valid-regions', show_default=True, default=None, multiple=True, help='Valid region types in training data. May be used multiple times.') @click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True, help='Valid baseline types in training data. May be used multiple times.') @click.option('-mr', '--merge-regions', show_default=True, default=None, help='Region merge mapping. One or more mappings of the form `$target:$src` where $src is merged into $target.', multiple=True, callback=_validate_merging) @click.option('-mb', '--merge-baselines', show_default=True, default=None, help='Baseline type merge mapping. Same syntax as `--merge-regions`', multiple=True, callback=_validate_merging) @click.option('-br', '--bounding-regions', show_default=True, default=None, multiple=True, help='Regions treated as boundaries for polygonization purposes. May be used multiple times.') @click.option('--augment/--no-augment', show_default=True, default=SEGMENTATION_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.option('--resize', show_default=True, default='fail', type=click.Choice([ 'add', 'union', # Deprecation: `add` is deprecated, `union` is the new value 'both', 'new', # Deprecation: `both` is deprecated, `new` is the new value 'fail' ]), help='Output layer resizing option. If set to `add` new classes will be ' 'added, `both` will set the layer to match exactly ' 'the training data classes, `fail` will abort if training data and model ' 'classes do not match.') @click.option('-tl', '--topline', 'topline', show_default=True, flag_value='topline', help='Switch for the baseline location in the scripts. ' 'Set to topline if the data is annotated with a hanging baseline, as is ' 'common with Hebrew, Bengali, Devanagari, etc. Set to ' ' centerline for scripts annotated with a central line.') @click.option('-cl', '--centerline', 'topline', flag_value='centerline') @click.option('-bl', '--baseline', 'topline', flag_value='baseline', default='baseline') @click.option('--logger', 'pl_logger', show_default=True, type=click.Choice(['tensorboard']), default=None, help='Logger used by PyTorch Lightning to track metrics such as loss and accuracy.') @click.option('--log-dir', show_default=True, type=click.Path(exists=True, dir_okay=True, writable=True), help='Path to directory where the logger will store the logs. If not set, a directory will be created in the current working directory.') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def segtrain(ctx, output, spec, line_width, pad, load, freq, quit, epochs, min_epochs, lag, min_delta, device, precision, optimizer, lrate, momentum, weight_decay, warmup, schedule, gamma, step_size, sched_patience, cos_max, partition, training_files, evaluation_files, workers, load_hyper_parameters, force_binarization, format_type, suppress_regions, suppress_baselines, valid_regions, valid_baselines, merge_regions, merge_baselines, bounding_regions, augment, resize, topline, pl_logger, log_dir, ground_truth): """ Trains a baseline labeling model for layout analysis """ import shutil from kraken.lib.train import SegmentationModel, KrakenTrainer if resize != 'fail' and not load: raise click.BadOptionUsage('resize', 'resize option requires loading an existing model') if not (0 <= freq <= 1) and freq % 1.0 != 0: raise click.BadOptionUsage('freq', 'freq needs to be either in the interval [0,1.0] or a positive integer.') if augment: try: import albumentations # NOQA except ImportError: raise click.BadOptionUsage('augment', 'augmentation needs the `albumentations` package installed.') if pl_logger == 'tensorboard': try: import tensorboard # NOQA except ImportError: raise click.BadOptionUsage('logger', 'tensorboard logger needs the `tensorboard` package installed.') if log_dir is None: log_dir = pathlib.Path.cwd() logger.info('Building ground truth set from {} document images'.format(len(ground_truth) + len(training_files))) # populate hyperparameters from command line args hyper_params = SEGMENTATION_HYPER_PARAMS.copy() hyper_params.update({'line_width': line_width, 'padding': pad, 'freq': freq, 'quit': quit, 'epochs': epochs, 'min_epochs': min_epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'warmup': warmup, 'schedule': schedule, 'augment': augment, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max, }) # disable automatic partition when given evaluation set explicitly if evaluation_files: partition = 1 ground_truth = list(ground_truth) # merge training_files into ground_truth list if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') loc = {'topline': True, 'baseline': False, 'centerline': None} topline = loc[topline] try: accelerator, device = to_ptl_device(device) except Exception as e: raise click.BadOptionUsage('device', str(e)) if hyper_params['freq'] > 1: val_check_interval = {'check_val_every_n_epoch': int(hyper_params['freq'])} else: val_check_interval = {'val_check_interval': hyper_params['freq']} model = SegmentationModel(hyper_params, output=output, spec=spec, model=load, training_data=ground_truth, evaluation_data=evaluation_files, partition=partition, num_workers=workers, load_hyper_parameters=load_hyper_parameters, force_binarization=force_binarization, format_type=format_type, suppress_regions=suppress_regions, suppress_baselines=suppress_baselines, valid_regions=valid_regions, valid_baselines=valid_baselines, merge_regions=merge_regions, merge_baselines=merge_baselines, bounding_regions=bounding_regions, resize=resize, topline=topline) message('Training line types:') for k, v in model.train_set.dataset.class_mapping['baselines'].items(): message(f' {k}\t{v}\t{model.train_set.dataset.class_stats["baselines"][k]}') message('Training region types:') for k, v in model.train_set.dataset.class_mapping['regions'].items(): message(f' {k}\t{v}\t{model.train_set.dataset.class_stats["regions"][k]}') if len(model.train_set) == 0: raise click.UsageError('No valid training data was provided to the train command. Use `-t` or the `ground_truth` argument.') trainer = KrakenTrainer(accelerator=accelerator, devices=device, precision=precision, max_epochs=hyper_params['epochs'] if hyper_params['quit'] == 'dumb' else -1, min_epochs=hyper_params['min_epochs'], enable_progress_bar=True if not ctx.meta['verbose'] else False, deterministic=ctx.meta['deterministic'], pl_logger=pl_logger, log_dir=log_dir, **val_check_interval) trainer.fit(model) if model.best_epoch == -1: logger.warning('Model did not improve during training.') ctx.exit(1) if quit == 'early': message(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel') logger.info(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel') shutil.copy(f'{model.best_model}', f'{output}_best.mlmodel') @click.command('segtest') @click.pass_context @click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True), multiple=False, help='Model(s) to evaluate') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data.') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.option('--suppress-regions/--no-suppress-regions', show_default=True, default=False, help='Disables region segmentation training.') @click.option('--suppress-baselines/--no-suppress-baselines', show_default=True, default=False, help='Disables baseline segmentation training.') @click.option('-vr', '--valid-regions', show_default=True, default=None, multiple=True, help='Valid region types in training data. May be used multiple times.') @click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True, help='Valid baseline types in training data. May be used multiple times.') @click.option('-mr', '--merge-regions', show_default=True, default=None, help='Region merge mapping. One or more mappings of the form `$target:$src` where $src is merged into $target.', multiple=True, callback=_validate_merging) @click.option('-mb', '--merge-baselines', show_default=True, default=None, help='Baseline type merge mapping. Same syntax as `--merge-regions`', multiple=True, callback=_validate_merging) @click.option('-br', '--bounding-regions', show_default=True, default=None, multiple=True, help='Regions treated as boundaries for polygonization purposes. May be used multiple times.') @click.option("--threshold", type=click.FloatRange(.01, .99), default=.3, show_default=True, help="Threshold for heatmap binarization. Training threshold is .3, prediction is .5") @click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def segtest(ctx, model, evaluation_files, device, workers, threshold, force_binarization, format_type, test_set, suppress_regions, suppress_baselines, valid_regions, valid_baselines, merge_regions, merge_baselines, bounding_regions): """ Evaluate on a test set. """ if not model: raise click.UsageError('No model to evaluate given.') from torch.utils.data import DataLoader import torch import torch.nn.functional as F from kraken.lib.train import BaselineSet, ImageInputTransforms from kraken.lib.vgsl import TorchVGSLModel logger.info('Building test set from {} documents'.format(len(test_set) + len(evaluation_files))) message('Loading model {}\t'.format(model), nl=False) nn = TorchVGSLModel.load_model(model) message('\u2713', fg='green') test_set = list(test_set) if evaluation_files: test_set.extend(evaluation_files) if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') _batch, _channels, _height, _width = nn.input transforms = ImageInputTransforms( _batch, _height, _width, _channels, 0, valid_norm=False, force_binarization=force_binarization ) if 'file_system' in torch.multiprocessing.get_all_sharing_strategies(): logger.debug('Setting multiprocessing tensor sharing strategy to file_system') torch.multiprocessing.set_sharing_strategy('file_system') if not valid_regions: valid_regions = None if not valid_baselines: valid_baselines = None if suppress_regions: valid_regions = [] merge_regions = None if suppress_baselines: valid_baselines = [] merge_baselines = None test_set = BaselineSet(test_set, line_width=nn.user_metadata["hyper_params"]["line_width"], im_transforms=transforms, mode=format_type, augmentation=False, valid_baselines=valid_baselines, merge_baselines=merge_baselines, valid_regions=valid_regions, merge_regions=merge_regions) test_set.class_mapping = nn.user_metadata["class_mapping"] test_set.num_classes = sum([len(classDict) for classDict in test_set.class_mapping.values()]) baselines_diff = set(test_set.class_stats["baselines"].keys()).difference(test_set.class_mapping["baselines"].keys()) regions_diff = set(test_set.class_stats["regions"].keys()).difference(test_set.class_mapping["regions"].keys()) if baselines_diff: message(f'Model baseline types missing in test set: {", ".join(sorted(list(baselines_diff)))}') if regions_diff: message(f'Model region types missing in the test set: {", ".join(sorted(list(regions_diff)))}') try: accelerator, device = to_ptl_device(device) if device: device = f'{accelerator}:{device}' else: device = accelerator except Exception as e: raise click.BadOptionUsage('device', str(e)) if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') ds_loader = DataLoader(test_set, batch_size=1, num_workers=workers, pin_memory=True) nn.to(device) nn.eval() nn.set_num_threads(1) pages = [] lines_idx = list(test_set.class_mapping["baselines"].values()) regions_idx = list(test_set.class_mapping["regions"].values()) with KrakenProgressBar() as progress: batches = len(ds_loader) pred_task = progress.add_task('Evaluating', total=batches, visible=True if not ctx.meta['verbose'] else False) for batch in ds_loader: x, y = batch['image'], batch['target'] try: pred, _ = nn.nn(x) # scale target to output size y = F.interpolate(y, size=(pred.size(2), pred.size(3))).squeeze(0).bool() pred = pred.squeeze() > threshold pred = pred.view(pred.size(0), -1) y = y.view(y.size(0), -1) pages.append({ 'intersections': (y & pred).sum(dim=1, dtype=torch.double), 'unions': (y | pred).sum(dim=1, dtype=torch.double), 'corrects': torch.eq(y, pred).sum(dim=1, dtype=torch.double), 'cls_cnt': y.sum(dim=1, dtype=torch.double), 'all_n': torch.tensor(y.size(1), dtype=torch.double, device=device) }) if lines_idx: y_baselines = y[lines_idx].sum(dim=0, dtype=torch.bool) pred_baselines = pred[lines_idx].sum(dim=0, dtype=torch.bool) pages[-1]["baselines"] = { 'intersections': (y_baselines & pred_baselines).sum(dim=0, dtype=torch.double), 'unions': (y_baselines | pred_baselines).sum(dim=0, dtype=torch.double), } if regions_idx: y_regions_idx = y[regions_idx].sum(dim=0, dtype=torch.bool) pred_regions_idx = pred[regions_idx].sum(dim=0, dtype=torch.bool) pages[-1]["regions"] = { 'intersections': (y_regions_idx & pred_regions_idx).sum(dim=0, dtype=torch.double), 'unions': (y_regions_idx | pred_regions_idx).sum(dim=0, dtype=torch.double), } except FileNotFoundError as e: batches -= 1 progress.update(pred_task, total=batches) logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename)) except KrakenInputException as e: batches -= 1 progress.update(pred_task, total=batches) logger.warning(str(e)) progress.update(pred_task, advance=1) # Accuracy / pixel corrects = torch.stack([x['corrects'] for x in pages], -1).sum(dim=-1) all_n = torch.stack([x['all_n'] for x in pages]).sum() # Number of pixel for all pages class_pixel_accuracy = corrects / all_n mean_accuracy = torch.mean(class_pixel_accuracy) intersections = torch.stack([x['intersections'] for x in pages], -1).sum(dim=-1) unions = torch.stack([x['unions'] for x in pages], -1).sum(dim=-1) smooth = torch.finfo(torch.float).eps class_iu = (intersections + smooth) / (unions + smooth) mean_iu = torch.mean(class_iu) cls_cnt = torch.stack([x['cls_cnt'] for x in pages]).sum() freq_iu = torch.sum(cls_cnt / cls_cnt.sum() * class_iu.sum()) message(f"Mean Accuracy: {mean_accuracy.item():.3f}") message(f"Mean IOU: {mean_iu.item():.3f}") message(f"Frequency-weighted IOU: {freq_iu.item():.3f}") # Region accuracies if lines_idx: line_intersections = torch.stack([x["baselines"]['intersections'] for x in pages]).sum() line_unions = torch.stack([x["baselines"]['unions'] for x in pages]).sum() smooth = torch.finfo(torch.float).eps line_iu = (line_intersections + smooth) / (line_unions + smooth) message(f"Class-independent Baseline IOU: {line_iu.item():.3f}") # Region accuracies if regions_idx: region_intersections = torch.stack([x["regions"]['intersections'] for x in pages]).sum() region_unions = torch.stack([x["regions"]['unions'] for x in pages]).sum() smooth = torch.finfo(torch.float).eps region_iu = (region_intersections + smooth) / (region_unions + smooth) message(f"Class-independent Region IOU: {region_iu.item():.3f}") from rich.console import Console from rich.table import Table table = Table('Category', 'Class Name', 'Pixel Accuracy', 'IOU', 'Object Count') class_iu = class_iu.tolist() class_pixel_accuracy = class_pixel_accuracy.tolist() for (cat, class_name), iu, pix_acc in zip( [(cat, key) for (cat, subcategory) in test_set.class_mapping.items() for key in subcategory], class_iu, class_pixel_accuracy ): table.add_row(cat, class_name, f'{pix_acc:.3f}', f'{iu:.3f}', f'{test_set.class_stats[cat][class_name]}' if cat != "aux" else 'N/A') console = Console() console.print(table)
29,987
48.98
151
py
kraken
kraken-main/kraken/ketos/util.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.util ~~~~~~~~~~~~~~~~~~~~ Command line driver helpers """ import os import glob import click import logging from typing import List, Optional, Tuple logging.captureWarnings(True) logger = logging.getLogger('kraken') def _validate_manifests(ctx, param, value): images = [] for manifest in value: for entry in manifest.readlines(): im_p = entry.rstrip('\r\n') if os.path.isfile(im_p): images.append(im_p) else: logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name)) return images def _expand_gt(ctx, param, value): images = [] for expression in value: images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)]) return images def message(msg, **styles): if logger.getEffectiveLevel() >= 30: click.secho(msg, **styles) def to_ptl_device(device: str) -> Tuple[str, Optional[List[int]]]: if any([device == x for x in ['cpu', 'mps']]): return device, 'auto' elif any([device.startswith(x) for x in ['tpu', 'cuda', 'hpu', 'ipu']]): dev, idx = device.split(':') if dev == 'cuda': dev = 'gpu' return dev, [int(idx)] raise Exception(f'Invalid device {device} specified')
1,889
28.076923
95
py
kraken
kraken-main/kraken/ketos/__init__.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos ~~~~~~~~~~~~~ Command line drivers for training functionality. """ import click import logging from PIL import Image from rich.traceback import install from kraken.lib import log from .dataset import compile from .linegen import line_generator from .pretrain import pretrain from .recognition import train, test from .repo import publish from .segmentation import segtrain, segtest from .transcription import extract, transcription APP_NAME = 'kraken' logging.captureWarnings(True) logger = logging.getLogger('kraken') # install rich traceback handler install(suppress=[click]) # raise default max image size to 20k * 20k pixels Image.MAX_IMAGE_PIXELS = 20000 ** 2 @click.group() @click.version_option() @click.pass_context @click.option('-v', '--verbose', default=0, count=True) @click.option('-s', '--seed', default=None, type=click.INT, help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to ' 'ensure reproducible random splits of data') @click.option('-r', '--deterministic/--no-deterministic', default=False, help="Enables deterministic training. If no seed is given and enabled the seed will be set to 42.") def cli(ctx, verbose, seed, deterministic): ctx.meta['deterministic'] = False if not deterministic else 'warn' if seed: from pytorch_lightning import seed_everything seed_everything(seed, workers=True) elif deterministic: from pytorch_lightning import seed_everything seed_everything(42, workers=True) ctx.meta['verbose'] = verbose log.set_logger(logger, level=30 - min(10 * verbose, 20)) cli.add_command(compile) cli.add_command(pretrain) cli.add_command(train) cli.add_command(test) cli.add_command(segtrain) cli.add_command(segtest) cli.add_command(publish) # deprecated commands cli.add_command(line_generator) cli.add_command(extract) cli.add_command(transcription) if __name__ == '__main__': cli()
2,546
28.275862
113
py
kraken
kraken-main/kraken/ketos/transcription.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.transcription ~~~~~~~~~~~~~~~~~~~~~~~~~~ Legacy command line drivers for recognition training data annotation. """ import os import uuid import click import logging import unicodedata from typing import IO, Any, cast from bidi.algorithm import get_display from kraken.lib.progress import KrakenProgressBar from .util import message logging.captureWarnings(True) logger = logging.getLogger('kraken') @click.command('extract', deprecated=True) @click.pass_context @click.option('-b', '--binarize/--no-binarize', show_default=True, default=True, help='Binarize color/grayscale images') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Normalize ground truth') @click.option('-s', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') @click.option('-n', '--reorder/--no-reorder', default=False, show_default=True, help='Reorder transcribed lines to display order') @click.option('-r', '--rotate/--no-rotate', default=True, show_default=True, help='Skip rotation of vertical lines') @click.option('-o', '--output', type=click.Path(), default='training', show_default=True, help='Output directory') @click.option('--format', default='{idx:06d}', show_default=True, help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)') @click.argument('transcriptions', nargs=-1, type=click.File(lazy=True)) def extract(ctx, binarize, normalization, normalize_whitespace, reorder, rotate, output, format, transcriptions): """ Extracts image-text pairs from a transcription environment created using ``ketos transcribe``. """ import regex import base64 from io import BytesIO from PIL import Image from lxml import html, etree from kraken import binarization try: os.mkdir(output) except Exception: pass text_transforms = [] if normalization: text_transforms.append(lambda x: unicodedata.normalize(normalization, x)) if normalize_whitespace: text_transforms.append(lambda x: regex.sub(r'\s', ' ', x)) if reorder: text_transforms.append(get_display) idx = 0 manifest = [] with KrakenProgressBar() as progress: read_task = progress.add_task('Reading transcriptions', total=len(transcriptions), visible=True if not ctx.meta['verbose'] else False) for fp in transcriptions: logger.info('Reading {}'.format(fp.name)) doc = html.parse(fp) etree.strip_tags(doc, etree.Comment) td = doc.find(".//meta[@itemprop='text_direction']") if td is None: td = 'horizontal-lr' else: td = td.attrib['content'] im = None dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())} for section in doc.xpath('//section'): img = section.xpath('.//img')[0].get('src') fd = BytesIO(base64.b64decode(img.split(',')[1])) im = Image.open(fd) if not im: logger.info('Skipping {} because image not found'.format(fp.name)) break if binarize: im = binarization.nlbin(im) for line in section.iter('li'): if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())): dest_dict['idx'] = idx dest_dict['uuid'] = str(uuid.uuid4()) logger.debug('Writing line {:06d}'.format(idx)) l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')]) if rotate and td.startswith('vertical'): im.rotate(90, expand=True) l_img.save(('{output}/' + format + '.png').format(**dest_dict)) manifest.append((format + '.png').format(**dest_dict)) text = u''.join(line.itertext()).strip() for func in text_transforms: text = func(text) with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t: t.write(text.encode('utf-8')) idx += 1 progress.update(read_task, advance=1) logger.info('Extracted {} lines'.format(idx)) with open('{}/manifest.txt'.format(output), 'w') as fp: fp.write('\n'.join(manifest)) @click.command('transcribe', deprecated=True) @click.pass_context @click.option('-d', '--text-direction', default='horizontal-lr', type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']), help='Sets principal text direction', show_default=True) @click.option('--scale', default=None, type=click.FLOAT) @click.option('--bw/--orig', default=True, show_default=True, help="Put nonbinarized images in output") @click.option('-m', '--maxcolseps', default=2, type=click.INT, show_default=True) @click.option('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True) @click.option('-f', '--font', default='', help='Font family to use') @click.option('-fs', '--font-style', default=None, help='Font style to use') @click.option('-p', '--prefill', default=None, help='Use given model for prefill mode.') @click.option('--pad', show_default=True, type=(int, int), default=(0, 0), help='Left and right padding around lines') @click.option('-l', '--lines', type=click.Path(exists=True), show_default=True, help='JSON file containing line coordinates') @click.option('-o', '--output', type=click.File(mode='wb'), default='transcription.html', help='Output file', show_default=True) @click.argument('images', nargs=-1, type=click.File(mode='rb', lazy=True)) def transcription(ctx, text_direction, scale, bw, maxcolseps, black_colseps, font, font_style, prefill, pad, lines, output, images): """ Creates transcription environments for ground truth generation. """ import json from PIL import Image from kraken import rpred from kraken import pageseg from kraken import transcribe from kraken import binarization from kraken.lib import models ti = transcribe.TranscriptionInterface(font, font_style) if len(images) > 1 and lines: raise click.UsageError('--lines option is incompatible with multiple image files') if prefill: logger.info('Loading model {}'.format(prefill)) message('Loading ANN', nl=False) prefill = models.load_any(prefill) message('\u2713', fg='green') with KrakenProgressBar() as progress: read_task = progress.add_task('Reading images', total=len(images), visible=True if not ctx.meta['verbose'] else False) for fp in images: logger.info('Reading {}'.format(fp.name)) im = Image.open(fp) if im.mode not in ['1', 'L', 'P', 'RGB']: logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode)) im = im.convert('RGB') logger.info('Binarizing page') im_bin = binarization.nlbin(im) im_bin = im_bin.convert('1') logger.info('Segmenting page') if not lines: res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad) else: with click.open_file(lines, 'r') as fp: try: fp = cast(IO[Any], fp) res = json.load(fp) except ValueError as e: raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e))) if prefill: it = rpred.rpred(prefill, im_bin, res.copy()) preds = [] logger.info('Recognizing') for pred in it: logger.debug('{}'.format(pred.prediction)) preds.append(pred) ti.add_page(im, res, records=preds) else: ti.add_page(im, res) fp.close() progress.update(read_task, advance=1) logger.info('Writing transcription to {}'.format(output.name)) message('Writing output ', nl=False) ti.write(output) message('\u2713', fg='green')
9,437
40.946667
142
py
kraken
kraken-main/kraken/ketos/repo.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.repo ~~~~~~~~~~~~~~~~~ Command line driver for publishing models to the model repository. """ import os import click import logging from kraken.lib.progress import KrakenDownloadProgressBar from .util import message logging.captureWarnings(True) logger = logging.getLogger('kraken') @click.command('publish') @click.pass_context @click.option('-i', '--metadata', show_default=True, type=click.File(mode='r', lazy=True), help='Metadata for the ' 'model. Will be prompted from the user if not given') @click.option('-a', '--access-token', prompt=True, help='Zenodo access token') @click.option('-p', '--private/--public', default=False, help='Disables Zenodo ' 'community inclusion request. Allows upload of models that will not show ' 'up on `kraken list` output') @click.argument('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False)) def publish(ctx, metadata, access_token, private, model): """ Publishes a model on the zenodo model repository. """ import json import pkg_resources from jsonschema import validate from jsonschema.exceptions import ValidationError from kraken import repo from kraken.lib import models with pkg_resources.resource_stream('kraken', 'metadata.schema.json') as fp: schema = json.load(fp) nn = models.load_any(model) if not metadata: author = click.prompt('author') affiliation = click.prompt('affiliation') summary = click.prompt('summary') description = click.edit('Write long form description (training data, transcription standards) of the model here') accuracy_default = None # take last accuracy measurement in model metadata if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']: accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100 accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default) script = [ click.prompt( 'script', type=click.Choice( sorted( schema['properties']['script']['items']['enum'])), show_choices=True)] license = click.prompt( 'license', type=click.Choice( sorted( schema['properties']['license']['enum'])), show_choices=True) metadata = { 'authors': [{'name': author, 'affiliation': affiliation}], 'summary': summary, 'description': description, 'accuracy': accuracy, 'license': license, 'script': script, 'name': os.path.basename(model), 'graphemes': ['a'] } while True: try: validate(metadata, schema) except ValidationError as e: message(e.message) metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str) continue break else: metadata = json.load(metadata) validate(metadata, schema) metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())] with KrakenDownloadProgressBar() as progress: upload_task = progress.add_task('Uploading', total=0, visible=True if not ctx.meta['verbose'] else False) oid = repo.publish_model(model, metadata, access_token, lambda total, advance: progress.update(upload_task, total=total, advance=advance), private) message('model PID: {}'.format(oid))
4,240
37.207207
155
py
kraken
kraken-main/kraken/ketos/recognition.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.ketos.train ~~~~~~~~~~~~~~~~~~ Command line driver for recognition training and evaluation. """ import click import logging import pathlib from typing import List from kraken.lib.progress import KrakenProgressBar from kraken.lib.exceptions import KrakenInputException from kraken.lib.default_specs import RECOGNITION_HYPER_PARAMS, RECOGNITION_SPEC from .util import _validate_manifests, _expand_gt, message, to_ptl_device logging.captureWarnings(True) logger = logging.getLogger('kraken') @click.command('train') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_HYPER_PARAMS['batch_size'], help='batch sample size') @click.option('--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC, help='VGSL spec of the network to train. CTC layer will be added automatically.') @click.option('-a', '--append', show_default=True, default=None, type=click.INT, help='Removes layers before argument and then appends spec. Only works when loading an existing model') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=RECOGNITION_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs ' 'during training. If frequency is >1 it must be an integer, ' 'i.e. running validation every n-th epoch.') @click.option('-q', '--quit', show_default=True, default=RECOGNITION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=RECOGNITION_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--min-epochs', show_default=True, default=RECOGNITION_HYPER_PARAMS['min_epochs'], help='Minimal number of epochs to train for when using early stopping.') @click.option('--lag', show_default=True, default=RECOGNITION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=RECOGNITION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--precision', show_default=True, default='32', type=click.Choice(['64', '32', 'bf16', '16']), help='Numerical precision to use for training. Default is 32-bit single-point precision.') @click.option('--optimizer', show_default=True, default=RECOGNITION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=RECOGNITION_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=RECOGNITION_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, type=float, default=RECOGNITION_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--warmup', show_default=True, type=int, default=RECOGNITION_HYPER_PARAMS['warmup'], help='Number of steps to ramp up to `lrate` initial learning rate.') @click.option('--freeze-backbone', show_default=True, type=int, default=RECOGNITION_HYPER_PARAMS['freeze_backbone'], help='Number of samples to keep the backbone (everything but last layer) frozen.') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.') @click.option('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('--fixed-splits/--ignore-fixed-split', show_default=True, default=False, help='Whether to honor fixed splits in binary datasets.') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=RECOGNITION_HYPER_PARAMS['normalization'], help='Ground truth normalization') @click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=RECOGNITION_HYPER_PARAMS['normalize_whitespace'], help='Normalizes unicode whitespace') @click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True), help='Load a codec JSON definition (invalid if loading existing model)') @click.option('--resize', show_default=True, default='fail', type=click.Choice([ 'add', 'union', # Deprecation: `add` is deprecated, `union` is the new value 'both', 'new', # Deprecation: `both` is deprecated, `new` is the new value 'fail' ]), help='Codec/output layer resizing option. If set to `union` code ' 'points will be added, `new` will set the layer to match exactly ' 'the training data, `fail` will abort if training data and model ' 'codec do not match.') @click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyperparameters from the model') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both line definitions and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with `.gt.txt` text files ' 'containing the transcription. In binary mode files are datasets ' 'files containing pre-extracted text lines.') @click.option('--augment/--no-augment', show_default=True, default=RECOGNITION_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.option('--logger', 'pl_logger', show_default=True, type=click.Choice(['tensorboard']), default=None, help='Logger used by PyTorch Lightning to track metrics such as loss and accuracy.') @click.option('--log-dir', show_default=True, type=click.Path(exists=True, dir_okay=True, writable=True), help='Path to directory where the logger will store the logs. If not set, a directory will be created in the current working directory.') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def train(ctx, batch_size, pad, output, spec, append, load, freq, quit, epochs, min_epochs, lag, min_delta, device, precision, optimizer, lrate, momentum, weight_decay, warmup, freeze_backbone, schedule, gamma, step_size, sched_patience, cos_max, partition, fixed_splits, normalization, normalize_whitespace, codec, resize, reorder, base_dir, training_files, evaluation_files, workers, load_hyper_parameters, repolygonize, force_binarization, format_type, augment, pl_logger, log_dir, ground_truth): """ Trains a model from image-text pairs. """ if not load and append: raise click.BadOptionUsage('append', 'append option requires loading an existing model') if resize != 'fail' and not load: raise click.BadOptionUsage('resize', 'resize option requires loading an existing model') if not (0 <= freq <= 1) and freq % 1.0 != 0: raise click.BadOptionUsage('freq', 'freq needs to be either in the interval [0,1.0] or a positive integer.') if augment: try: import albumentations # NOQA except ImportError: raise click.BadOptionUsage('augment', 'augmentation needs the `albumentations` package installed.') if pl_logger == 'tensorboard': try: import tensorboard # NOQA except ImportError: raise click.BadOptionUsage('logger', 'tensorboard logger needs the `tensorboard` package installed.') if log_dir is None: log_dir = pathlib.Path.cwd() import json import shutil from kraken.lib.train import RecognitionModel, KrakenTrainer hyper_params = RECOGNITION_HYPER_PARAMS.copy() hyper_params.update({'freq': freq, 'pad': pad, 'batch_size': batch_size, 'quit': quit, 'epochs': epochs, 'min_epochs': min_epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'warmup': warmup, 'freeze_backbone': freeze_backbone, 'schedule': schedule, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max, 'normalization': normalization, 'normalize_whitespace': normalize_whitespace, 'augment': augment, }) # disable automatic partition when given evaluation set explicitly if evaluation_files: partition = 1 ground_truth = list(ground_truth) # merge training_files into ground_truth list if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') if reorder and base_dir != 'auto': reorder = base_dir if codec: logger.debug(f'Loading codec file from {codec}') codec = json.load(codec) try: accelerator, device = to_ptl_device(device) except Exception as e: raise click.BadOptionUsage('device', str(e)) if hyper_params['freq'] > 1: val_check_interval = {'check_val_every_n_epoch': int(hyper_params['freq'])} else: val_check_interval = {'val_check_interval': hyper_params['freq']} model = RecognitionModel(hyper_params=hyper_params, output=output, spec=spec, append=append, model=load, reorder=reorder, training_data=ground_truth, evaluation_data=evaluation_files, partition=partition, binary_dataset_split=fixed_splits, num_workers=workers, load_hyper_parameters=load_hyper_parameters, repolygonize=repolygonize, force_binarization=force_binarization, format_type=format_type, codec=codec, resize=resize) trainer = KrakenTrainer(accelerator=accelerator, devices=device, precision=precision, max_epochs=hyper_params['epochs'] if hyper_params['quit'] == 'dumb' else -1, min_epochs=hyper_params['min_epochs'], freeze_backbone=hyper_params['freeze_backbone'], enable_progress_bar=True if not ctx.meta['verbose'] else False, deterministic=ctx.meta['deterministic'], pl_logger=pl_logger, log_dir=log_dir, **val_check_interval) try: trainer.fit(model) except KrakenInputException as e: if e.args[0].startswith('Training data and model codec alphabets mismatch') and resize == 'fail': raise click.BadOptionUsage('resize', 'Mismatched training data for loaded model. Set option `--resize` to `new` or `add`') else: raise e if model.best_epoch == -1: logger.warning('Model did not improve during training.') ctx.exit(1) if quit == 'early': message(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel') logger.info(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel') shutil.copy(f'{model.best_model}', f'{output}_best.mlmodel') @click.command('test') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_HYPER_PARAMS['batch_size'], help='Batch sample size') @click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True), multiple=True, help='Model(s) to evaluate') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data.') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.') @click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Ground truth normalization') @click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note, that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information. In `binary` mode files are ' 'collections of pre-extracted text line images.') @click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def test(ctx, batch_size, model, evaluation_files, device, pad, workers, reorder, base_dir, normalization, normalize_whitespace, repolygonize, force_binarization, format_type, test_set): """ Evaluate on a test set. """ if not model: raise click.UsageError('No model to evaluate given.') import numpy as np from torch.utils.data import DataLoader from kraken.serialization import render_report from kraken.lib import models from kraken.lib.xml import preparse_xml_data from kraken.lib.dataset import (global_align, compute_confusions, PolygonGTDataset, GroundTruthDataset, ImageInputTransforms, ArrowIPCRecognitionDataset, collate_sequences) logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files))) nn = {} for p in model: message('Loading model {}\t'.format(p), nl=False) nn[p] = models.load_any(p) message('\u2713', fg='green') test_set = list(test_set) # set number of OpenMP threads next(iter(nn.values())).nn.set_num_threads(1) if evaluation_files: test_set.extend(evaluation_files) if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') if format_type in ['xml', 'page', 'alto']: if repolygonize: message('Repolygonizing data') test_set = preparse_xml_data(test_set, format_type, repolygonize) valid_norm = False DatasetClass = PolygonGTDataset elif format_type == 'binary': DatasetClass = ArrowIPCRecognitionDataset if repolygonize: logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.') test_set = [{'file': file} for file in test_set] valid_norm = False else: DatasetClass = GroundTruthDataset if force_binarization: logger.warning('Forced binarization enabled in `path` mode. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled in `path` mode. Will be ignored.') test_set = [{'image': img} for img in test_set] valid_norm = True if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') if reorder and base_dir != 'auto': reorder = base_dir acc_list = [] for p, net in nn.items(): algn_gt: List[str] = [] algn_pred: List[str] = [] chars = 0 error = 0 message('Evaluating {}'.format(p)) logger.info('Evaluating {}'.format(p)) batch, channels, height, width = net.nn.input ts = ImageInputTransforms(batch, height, width, channels, (pad, 0), valid_norm, force_binarization) ds = DatasetClass(normalization=normalization, whitespace_normalization=normalize_whitespace, reorder=reorder, im_transforms=ts) for line in test_set: try: ds.add(**line) except KrakenInputException as e: logger.info(e) # don't encode validation set as the alphabets may not match causing encoding failures ds.no_encode() ds_loader = DataLoader(ds, batch_size=batch_size, num_workers=workers, pin_memory=True, collate_fn=collate_sequences) with KrakenProgressBar() as progress: batches = len(ds_loader) pred_task = progress.add_task('Evaluating', total=batches, visible=True if not ctx.meta['verbose'] else False) for batch in ds_loader: im = batch['image'] text = batch['target'] lens = batch['seq_lens'] try: pred = net.predict_string(im, lens) for x, y in zip(pred, text): chars += len(y) c, algn1, algn2 = global_align(y, x) algn_gt.extend(algn1) algn_pred.extend(algn2) error += c except FileNotFoundError as e: batches -= 1 progress.update(pred_task, total=batches) logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename)) except KrakenInputException as e: batches -= 1 progress.update(pred_task, total=batches) logger.warning(str(e)) progress.update(pred_task, advance=1) acc_list.append((chars - error) / chars) confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred) rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs) logger.info(rep) message(rep) logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100)) message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
26,702
52.087475
151
py
kraken
kraken-main/kraken/contrib/heatmap_overlay.py
#! /usr/bin/env python """ Produces semi-transparent neural segmenter output overlays """ import click @click.command() @click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True), help='Baseline detection model to use.') @click.argument('files', nargs=-1) def cli(model, files): """ Applies a BLLA baseline segmentation model and outputs the raw heatmaps of the first baseline class. """ import torch from PIL import Image from kraken.lib import vgsl, dataset import torch.nn.functional as F from os.path import splitext import torchvision.transforms as tf model = vgsl.TorchVGSLModel.load_model(model) model.eval() batch, channels, height, width = model.input transforms = dataset.ImageInputTransforms(batch, height, width, channels, 0, valid_norm=False) torch.set_num_threads(1) for img in files: print(img) im = Image.open(img) xs = transforms(im) with torch.no_grad(): o, _ = model.nn(xs.unsqueeze(0)) o = F.interpolate(o, size=xs.shape[1:]) o = o.squeeze().numpy() scal_im = tf.ToPILImage()(1-xs) heat = Image.fromarray((o[2]*255).astype('uint8')) heat.save(splitext(img)[0] + '.heat.png') overlay = Image.new('RGBA', scal_im.size, (0, 130, 200, 255)) bl = Image.composite(overlay, scal_im.convert('RGBA'), heat) heat = Image.fromarray((o[1]*255).astype('uint8')) overlay = Image.new('RGBA', scal_im.size, (230, 25, 75, 255)) bl = Image.composite(overlay, bl, heat) heat = Image.fromarray((o[0]*255).astype('uint8')) overlay = Image.new('RGBA', scal_im.size, (60, 180, 75, 255)) Image.composite(overlay, bl, heat).save(splitext(img)[0] + '.overlay.png') del o del im if __name__ == '__main__': cli()
1,903
31.271186
98
py
kraken
kraken-main/kraken/contrib/recognition_boxes.py
#!/usr/bin/env python """ Draws transparent character bounding boxes over images giving a legacy segmenter model. """ import os import sys from PIL import Image, ImageDraw from kraken.pageseg import segment from kraken.binarization import nlbin from kraken.rpred import rpred from itertools import cycle from kraken.lib import models cmap = cycle([(230, 25, 75, 127), (60, 180, 75, 127), (255, 225, 25, 127), (0, 130, 200, 127), (245, 130, 48, 127), (145, 30, 180, 127), (70, 240, 240, 127)]) net = models.load_any(sys.argv[1]) for fname in sys.argv[2:]: im = Image.open(fname) print(fname) im = nlbin(im) res = segment(im, maxcolseps=0) pred = rpred(net, im, res) im = im.convert('RGBA') tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for line in pred: for box in line.cuts: draw.rectangle(box, fill=next(cmap)) im = Image.alpha_composite(im, tmp) im.save('high_{}'.format(os.path.basename(fname)))
1,081
24.761905
70
py
kraken
kraken-main/kraken/contrib/print_word_spreader.py
#!/usr/bin/env python #2020, Bruce Robertson #Master file at https://github.com/brobertson/Lace2-tools/blob/master/normalize_hocr.py import html, os, sys, argparse from statistics import mean from lxml import etree from PIL import Image #a custom exception to indicate when a page or other element doesn't #have a bounding box where we would expect it. class BboxError(Exception): pass #parse the arguments parser = argparse.ArgumentParser(description='''Convert kraken hocr output so that word bounding boxes are very likely to enclose the words, plus some space. This removes all spans of class ocrx_word that have single space text content. Its output is namespaced XHTML.''') parser.add_argument('--inputDir', help='Path to directory where source files are found', required=True) parser.add_argument('--outputDir', help='Path to directory where output is stored', required=True) parser.add_argument('--imageDir', help='Path to directory where images corresponding to the html files are stored.', required=False) parser.add_argument('-c', '--confidenceSummary', default=False, action="store_true", help="store summaries of word confidence in xhtml data- attributes and cut all material after the first ; from the word span title attribute, making their mouseover popups less obtrusive.") parser.add_argument('-f', '--fixBigWordSpans', default=False, action="store_true", help="fix word_span elements whose bbox area is greater than a sixth of the whole page area by assigning them the bbox of the previous word.") parser.add_argument('-s', '--shareSpaceSpans', default=False, action="store_true", help="normalize hocr output from kraken, which assigns a word to every space and gives it a bbox. This removes those space words and assigns their area to the words on either side, with some space in between, generating output more like Ocropus and tesseract.") parser.add_argument("-v", "--verbose", help="increase output verbosity", default=False, action="store_true") args = parser.parse_args() def get_bbox_val(span, position): try: parts = html.unescape(span.get('title')).split(';') bbox_string = "" for part in parts: part = part.strip() if part.startswith('bbox'): bbox_string = part if bbox_string == "": if (args.verbose): print("couldn't find the bbox part!") return int(bbox_string.split(' ')[position+1]) except Exception as e: #print("Exception getting title element on span {}".format(etree.tostring(span))) if (args.verbose): print(e) print("... therefore raising BboxError") raise BboxError def get_bbox_area(span): try: width = get_bbox_val(span,2) - get_bbox_val(span,0) height = get_bbox_val(span,3) - get_bbox_val(span,1) area = width * height if (args.verbose): print("this element's area is " + str(area)) return area except Exception as e: #print("Exception getting area on span {}".format(etree.tostring(span))) raise def set_bbox_value(span, position, val): try: parts = span.get('title').split(';') except Exception as e: print("Exception getting title element on span id {}.".format(span.get('id'))) raise BboxError bbox_parts = parts[0].split(' ') bbox_parts[position + 1] = str(val) bbox_out = ' '.join(bbox_parts) parts[0] = bbox_out parts_out = ';'.join(parts) span.set('title', parts_out) def share_space_spans(treeIn): right_max_fudge_factor = 7 left_max_fudge_factor = 5 space_spans = treeIn.xpath("//html:span[@class='ocrx_word'][text()=' ']",namespaces={'html':"http://www.w3.org/1999/xhtml"}) #print('space spans: {}'.format(len(space_spans))) for space_span in space_spans: try: previous_span = space_span.getprevious() except Exception as e: print("Exception on parsing previous span with space id {}".format(space_span.get('id'))) print(e) raise try: next_span = space_span.getnext() except Exception as e: print("Exception on parsing next span with space id {}".format(space_span.get('id'))) print(e) raise #check that we have both if ((not previous_span is None) and (not next_span is None)): #this means that there is both a previous and a next if (args.verbose) : print("***") print("space_span title: {}".format(space_span.get('title'))) print("previous span title: {}".format(previous_span.get('title'))) print("next span title: {}".format(next_span.get('title'))) left_pos = get_bbox_val(previous_span,2) right_pos = get_bbox_val(next_span,0) middle = int((left_pos + right_pos) / 2) third = int((right_pos - left_pos) / 3) left_fudge = min(third,left_max_fudge_factor) right_fudge = min(third,right_max_fudge_factor) if (args.verbose) : print("left side: {0}; right side: {1}; middle: {2}".format(left_pos, right_pos, middle)) set_bbox_value(previous_span, 2, left_pos + left_fudge) set_bbox_value(next_span, 0, right_pos - right_fudge) if (args.verbose): print(previous_span.text) print("previous_span new title: {}".format(previous_span.get('title'))) print("next_span new title: {}".format(next_span.get('title'))) #now remove the space span, no matter what space_span.getparent().remove(space_span) def confidence_summary(treeIn): word_spans = treeIn.xpath("//html:span[@class='ocrx_word']",namespaces={'html':"http://www.w3.org/1999/xhtml"}) for word_span in word_spans: try: #this gets the confidence values for each letter and represents them as a string list word_data = word_span.get('title').split(';') confs_string = word_data[1].split(' ')[2:] bbox_only = word_data[0] #convert to floats for math operations confs = [float(i) for i in confs_string] minimum = round(min(confs),2) average = round(mean(confs),2) #add attributes with these summary values word_span.set('data-min-confidence',str(minimum)) word_span.set('data-average-confidence',str(average)) word_span.set('title', bbox_only) except Exception as e: #there's not much to do if this goes wrong pass def push_edge_spans_to_borders_of_line(treeIn): first_spans = treeIn.xpath("//html:span[@class='ocr_line']/html:span[@class='ocrx_word'][1]",namespaces={'html':"http://www.w3.org/1999/xhtml"}) for span in first_spans: if (args.verbose): print("first span title: {}".format(span.get('title'))) parent = span.getparent() line_l_edge = get_bbox_val(parent, 0) line_l_edge = line_l_edge + 1 if (args.verbose): print("line_l_edge {}".format(line_l_edge)) set_bbox_value(span, 0, line_l_edge) last_spans = treeIn.xpath("//html:span[@class='ocr_line']/html:span[@class='ocrx_word'][last()]",namespaces={'html':"http://www.w3.org/1999/xhtml"}) for span in last_spans: parent = span.getparent() line_r_edge = get_bbox_val(parent,2) line_r_edge = line_r_edge - 1 set_bbox_value(span,2,line_r_edge) def fix_word_span_area(treeIn): word_spans = treeIn.xpath("//html:span[@class='ocrx_word'] | //html:span[@class='ocr_word']",namespaces={'html':"http://www.w3.org/1999/xhtml"}) image_area = get_bbox_area(treeIn.xpath("//html:div[@class='ocr_page'][1]",namespaces={'html':"http://www.w3.org/1999/xhtml"})[0]) #print("image area: {}".format(image_area)) for span in word_spans: area = get_bbox_area(span) #print("word area:",area) if (area > image_area / 6): original_area = span.get('title') my_next = span.getnext() my_previous = span.getprevious() if (my_next): span.set('title',my_next.get('title')) elif (my_previous): span.set('title',my_previous.get('title')) else: span.set('title',span.getparent().get('title')) print("big word area, changing title attribute {}".format(original_area)) print(etree.tostring(span)) #span.getparent().remove(span) def clean_ocr_page_title(xhtml, file_name): ocr_page = xhtml.xpath("//html:div[@class='ocr_page'][1]",namespaces={'html':"http://www.w3.org/1999/xhtml"})[0] #print(ocr_page) ocr_page_title = ocr_page.get('title') #print(ocr_page_title) sections = ocr_page_title.split(';') #print(sections) new_sections = "image " + (file_name.rsplit('.', 1)[0] + '.png') + "; " + sections[0] #print(new_sections) ocr_page.set('title',new_sections) return xhtml def rewrite_ocr_page_title(xhtml, file_name, image_x, image_y): ocr_page = xhtml.xpath("//html:div[@class='ocr_page'][1]",namespaces={'html':"http://www.w3.org/1999/xhtml"})[0] image_file_name = (file_name.rsplit('.', 1)[0] + '.png') image_path = os.path.join(args.imageDir, image_file_name) image = Image.open(image_path) image_x, image_y = image.size new_title = "bbox 0 0 " + str(image_x) + " " + str(image_y) + ";image " + image_file_name ocr_page.set('title',new_title) return xhtml if not(os.path.isdir(args.inputDir)): print('Input directory "'+args.inputDir+'" does not exist.\n\tExiting ...') sys.exit(1) #Create the output directory if it doesn't exist try: if not os.path.exists(args.outputDir): os.makedirs(args.outputDir, exist_ok=True) except Exception as e: print("Error on creating output directory '" + args.outputDir + "':\n\t" + str(e) + "\n\tExiting ...") sys.exit(1) if (args.verbose): print("Input dir:", args.inputDir) print("Output dir:", args.outputDir) #everything looks good. Let's loop over the html files in inputDir xslt_to_xhtml = etree.XML('''\ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" xmlns:html='http://www.w3.org/1999/xhtml'> <xsl:template match="*"> <xsl:element name="html:{local-name(.)}"> <xsl:apply-templates select="@*|*|text()"/> </xsl:element> </xsl:template> <xsl:template match="@*"> <xsl:attribute name="{name(.)}"><xsl:value-of select="."/></xsl:attribute> </xsl:template> </xsl:stylesheet>''') transform_to_xhtml = etree.XSLT(xslt_to_xhtml) EXTENSIONS = ('.hocr','.html', '.htm') XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" for root, dirs, files in os.walk(args.inputDir): for file_name in files: if file_name.endswith(EXTENSIONS): print(file_name) with open(os.path.join(args.inputDir,file_name)) as file: # Use file to refer to the file try: tree = etree.parse(file) find_xhtml_body = etree.ETXPath("//{%s}body" % XHTML_NAMESPACE) results = find_xhtml_body(tree) xhtml = transform_to_xhtml(tree) if (args.fixBigWordSpans): try: fix_word_span_area(xhtml) except BboxError: if (args.verbose): print("we get a bbox error while trying to fix word span areas on " + file_name + " so we'll try to rewrite the page bbox using imageDir") try: #todo check that args.imageDir is set if not(os.path.isdir(args.imageDir)): print("To correct these files, the imageDir argument has to be set to a valid directory.") exit(1) #remove '.html' and add '.png' image_file_name = file_name[:-5] + '.png' image_path = os.path.join(args.imageDir, image_file_name) image = Image.open(image_path) image_x, image_y = image.size rewrite_ocr_page_title(xhtml, file_name, image_x, image_y) fix_word_span_area(xhtml) except Exception: print("something went wrong trying to fix the page bbox on this file. Aborting.") raise clean_ocr_page_title(xhtml, file_name) if (args.shareSpaceSpans): share_space_spans(xhtml) if (args.confidenceSummary): confidence_summary(xhtml) #push_edge_spans_to_borders_of_line(xhtml) xhtml.write(os.path.join(args.outputDir,file_name),pretty_print=True, xml_declaration=True, encoding="utf-8") except Exception as e: print("This exception was thrown on file {}".format(file_name)) print(e)
13,880
49.111913
344
py
kraken
kraken-main/kraken/contrib/repolygonize.py
#!/usr/bin/env python """ Reads in a bunch of ALTO documents and repolygonizes the lines contained with the kraken polygonizer. """ import click @click.command() @click.option('-f', '--format-type', type=click.Choice(['alto', 'page']), default='alto', help='Sets the input document format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines, polygons, and a ' 'link to source images.') @click.option('-tl', '--topline', 'topline', show_default=True, flag_value='topline', help='Switch for the baseline location in the scripts. ' 'Set to topline if the data is annotated with a hanging baseline, as is ' 'common with Hebrew, Bengali, Devanagari, etc. Set to ' ' centerline for scripts annotated with a central line.') @click.option('-cl', '--centerline', 'topline', flag_value='centerline') @click.option('-bl', '--baseline', 'topline', flag_value='baseline', default='baseline') @click.argument('files', nargs=-1) def cli(format_type, topline, files): """ A small script repolygonizing line boundaries in ALTO or PageXML files. """ if len(files) == 0: ctx = click.get_current_context() click.echo(ctx.get_help()) ctx.exit() from lxml import etree from os.path import splitext from itertools import groupby from kraken.lib import xml from PIL import Image from kraken.lib.segmentation import calculate_polygonal_environment def _repl_alto(fname, polygons): with open(fname, 'rb') as fp: doc = etree.parse(fp) lines = doc.findall('.//{*}TextLine') idx = 0 for line in lines: if line.get('BASELINE') is None: continue pol = line.find('./{*}Shape/{*}Polygon') if pol is not None: if polygons[idx] is not None: pol.attrib['POINTS'] = ' '.join([str(coord) for pt in polygons[idx] for coord in pt]) else: pol.attrib['POINTS'] = '' idx += 1 with open(splitext(fname)[0] + '_rewrite.xml', 'wb') as fp: doc.write(fp, encoding='UTF-8', xml_declaration=True) def _parse_page_coords(coords): points = [x for x in coords.split(' ')] points = [int(c) for point in points for c in point.split(',')] pts = zip(points[::2], points[1::2]) return [k for k, g in groupby(pts)] def _repl_page(fname, polygons): with open(fname, 'rb') as fp: doc = etree.parse(fp) lines = doc.findall('.//{*}TextLine') idx = 0 for line in lines: base = line.find('./{*}Baseline') if base is not None and not base.get('points').isspace() and len(base.get('points')): try: _parse_page_coords(base.get('points')) except Exception: continue else: continue pol = line.find('./{*}Coords') if pol is not None: if polygons[idx] is not None: pol.attrib['points'] = ' '.join([','.join([str(x) for x in pt]) for pt in polygons[idx]]) else: pol.attrib['points'] = '' idx += 1 with open(splitext(fname)[0] + '_rewrite.xml', 'wb') as fp: doc.write(fp, encoding='UTF-8', xml_declaration=True) if format_type == 'page': parse_fn = xml.parse_page repl_fn = _repl_page else: parse_fn = xml.parse_alto repl_fn = _repl_alto topline = {'topline': True, 'baseline': False, 'centerline': None}[topline] for doc in files: click.echo(f'Processing {doc} ') seg = parse_fn(doc) im = Image.open(seg['image']).convert('L') baselines = [] for x in seg['lines']: bl = x['baseline'] if x['baseline'] is not None else [0, 0] baselines.append(bl) o = calculate_polygonal_environment(im, baselines, scale=(1800, 0), topline=topline) repl_fn(doc, o) if __name__ == '__main__': cli()
4,374
38.0625
113
py
kraken
kraken-main/kraken/contrib/segmentation_overlay.py
#!/usr/bin/env python """ Draws a transparent overlay of baseline segmenter output over a list of image files. """ import re import os import click import unicodedata from itertools import cycle from collections import defaultdict cmap = cycle([(230, 25, 75, 127), (60, 180, 75, 127)]) bmap = (0, 130, 200, 255) def slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value) value = re.sub(r'[^\w\s-]', '', value).strip().lower() value = re.sub(r'[-\s]+', '-', value) return value @click.command() @click.option('-f', '--format-type', type=click.Choice(['xml', 'alto', 'page']), default='xml', help='Sets the input document format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines, polygons, and a ' 'link to source images.') @click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True), help='Baseline detection model to use. Overrides format type and expects image files as input.') @click.option('-d', '--text-direction', default='horizontal-lr', show_default=True, type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']), help='Sets principal text direction') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note, that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.argument('files', nargs=-1) def cli(format_type, model, text_direction, repolygonize, files): """ A script producing overlays of lines and regions from either ALTO or PageXML files or run a model to do the same. """ if len(files) == 0: ctx = click.get_current_context() click.echo(ctx.get_help()) ctx.exit() from PIL import Image, ImageDraw from kraken.lib import vgsl, xml, segmentation from kraken import blla if model is None: if format_type == 'xml': fn = xml.parse_xml elif format_type == 'alto': fn = xml.parse_alto else: fn = xml.parse_page for doc in files: click.echo(f'Processing {doc} ', nl=False) data = fn(doc) if repolygonize: im = Image.open(data['image']).convert('L') lines = data['lines'] polygons = segmentation.calculate_polygonal_environment(im, [x['baseline'] for x in lines], scale=(1200, 0)) data['lines'] = [{'boundary': polygon, 'baseline': orig['baseline'], 'text': orig['text'], 'tags': orig['tags']} for orig, polygon in zip(lines, polygons)] # reorder lines by type lines = defaultdict(list) for line in data['lines']: lines[line['tags']['type']].append(line) im = Image.open(data['image']).convert('RGBA') for t, ls in lines.items(): tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for idx, line in enumerate(ls): c = next(cmap) if line['boundary']: draw.polygon([tuple(x) for x in line['boundary']], fill=c, outline=c[:3]) if line['baseline']: draw.line([tuple(x) for x in line['baseline']], fill=bmap, width=2, joint='curve') draw.text(line['baseline'][0], str(idx), fill=(0, 0, 0, 255)) base_image = Image.alpha_composite(im, tmp) base_image.save(f'high_{os.path.basename(doc)}_lines_{slugify(t)}.png') for t, regs in data['regions'].items(): tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for reg in regs: c = next(cmap) try: draw.polygon(reg, fill=c, outline=c[:3]) except Exception: pass base_image = Image.alpha_composite(im, tmp) base_image.save(f'high_{os.path.basename(doc)}_regions_{slugify(t)}.png') click.secho('\u2713', fg='green') else: net = vgsl.TorchVGSLModel.load_model(model) for doc in files: click.echo(f'Processing {doc} ', nl=False) im = Image.open(doc) res = blla.segment(im, model=net, text_direction=text_direction) # reorder lines by type lines = defaultdict(list) for line in res['lines']: lines[line['tags']['type']].append(line) im = im.convert('RGBA') for t, ls in lines.items(): tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for idx, line in enumerate(ls): c = next(cmap) draw.polygon([tuple(x) for x in line['boundary']], fill=c, outline=c[:3]) draw.line([tuple(x) for x in line['baseline']], fill=bmap, width=2, joint='curve') draw.text(line['baseline'][0], str(idx), fill=(0, 0, 0, 255)) base_image = Image.alpha_composite(im, tmp) base_image.save(f'high_{os.path.basename(doc)}_lines_{slugify(t)}.png') for t, regs in res['regions'].items(): tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for reg in regs: c = next(cmap) try: draw.polygon([tuple(x) for x in reg], fill=c, outline=c[:3]) except Exception: pass base_image = Image.alpha_composite(im, tmp) base_image.save(f'high_{os.path.basename(doc)}_regions_{slugify(t)}.png') click.secho('\u2713', fg='green') if __name__ == '__main__': cli()
6,737
43.622517
124
py
kraken
kraken-main/kraken/contrib/generate_scripts.py
#!/usr/bin/env python3 """ Script fetching the latest unicode Scripts.txt and dumping it as json. """ from urllib import request import json import regex uri = 'http://www.unicode.org/Public/UNIDATA/Scripts.txt' re = regex.compile(r'^(?P<start>[0-9A-F]{4,6})(..(?P<end>[0-9A-F]{4,6}))?\s+; (?P<name>[A-Za-z]+)') with open('scripts.json', 'w') as fp, request.urlopen(uri) as req: d = [] for line in req: line = line.decode('utf-8') if line.startswith('#') or line.strip() == '': continue m = re.match(line) if m: print(line) start = int(m.group('start'), base=16) end = start if m.group('end'): end = int(m.group('end'), base=16) name = m.group('name') if len(d) > 0 and d[-1][2] == name and (start - 1 == d[-1][1] or start - 1 == d[-1][0]): print('merging {} and ({}, {}, {})'.format(d[-1], start, end, name)) d[-1] = (d[-1][0], end, name) else: d.append((start, end if end != start else None, name)) json.dump(d, fp)
1,125
33.121212
100
py
kraken
kraken-main/kraken/contrib/forced_alignment_overlay.py
#!/usr/bin/env python """ Draws a transparent overlay of the forced alignment output over the input image. """ import re import os import click import unicodedata from lxml import etree from itertools import cycle from unicodedata import normalize cmap = cycle([(230, 25, 75, 127), (60, 180, 75, 127), (255, 225, 25, 127), (0, 130, 200, 127), (245, 130, 48, 127), (145, 30, 180, 127), (70, 240, 240, 127)]) def slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value) value = re.sub(r'[^\w\s-]', '', value).strip().lower() value = re.sub(r'[-\s]+', '-', value) return value def _repl_alto(fname, cuts): with open(fname, 'rb') as fp: doc = etree.parse(fp) lines = doc.findall('.//{*}TextLine') char_idx = 0 for line, line_cuts in zip(lines, cuts): idx = 0 for el in line: if el.tag.endswith('Shape'): continue elif el.tag.endswith('SP'): idx += 1 elif el.tag.endswith('String'): str_len = len(el.get('CONTENT')) # clear out all for chld in el: if chld.tag.endswith('Glyph'): el.remove(chld) for char in line_cuts[idx:str_len]: glyph = etree.SubElement(el, 'Glyph') glyph.set('ID', f'char_{char_idx}') char_idx += 1 glyph.set('CONTENT', char[0]) glyph.set('GC', str(char[2])) pol = etree.SubElement(etree.SubElement(glyph, 'Shape'), 'Polygon') pol.set('POINTS', ' '.join([str(coord) for pt in char[1] for coord in pt])) idx += str_len with open(f'{os.path.basename(fname)}_algn.xml', 'wb') as fp: doc.write(fp, encoding='UTF-8', xml_declaration=True) def _repl_page(fname, cuts): with open(fname, 'rb') as fp: doc = etree.parse(fp) lines = doc.findall('.//{*}TextLine') for line, line_cuts in zip(lines, cuts): glyphs = line.findall('../{*}Glyph/{*}Coords') for glyph, cut in zip(glyphs, line_cuts): glyph.attrib['points'] = ' '.join([','.join([str(x) for x in pt]) for pt in cut]) with open(f'{os.path.basename(fname)}_algn.xml', 'wb') as fp: doc.write(fp, encoding='UTF-8', xml_declaration=True) @click.command() @click.option('-f', '--format-type', type=click.Choice(['alto', 'page']), default='page', help='Sets the input document format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines, polygons, and a ' 'link to source images.') @click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True), help='Transcription model to use.') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Ground truth normalization') @click.option('-o', '--output', type=click.Choice(['xml', 'overlay']), show_default=True, default='overlay', help='Output mode. Either page or ' 'alto for xml output, overlay for image overlays.') @click.argument('files', nargs=-1) def cli(format_type, model, normalization, output, files): """ A script producing overlays of lines and regions from either ALTO or PageXML files or run a model to do the same. """ if len(files) == 0: ctx = click.get_current_context() click.echo(ctx.get_help()) ctx.exit() from PIL import Image, ImageDraw from kraken.lib import models, xml from kraken import align if format_type == 'alto': fn = xml.parse_alto repl_fn = _repl_alto else: fn = xml.parse_page repl_fn = _repl_page click.echo(f'Loading model {model}') net = models.load_any(model) for doc in files: click.echo(f'Processing {doc} ', nl=False) data = fn(doc) if normalization: for line in data["lines"]: line["text"] = normalize(normalization, line["text"]) im = Image.open(data['image']).convert('RGBA') records = align.forced_align(data, net) if output == 'overlay': tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for record in records: for pol in record.cuts: c = next(cmap) draw.polygon([tuple(x) for x in pol], fill=c, outline=c[:3]) base_image = Image.alpha_composite(im, tmp) base_image.save(f'high_{os.path.basename(doc)}_algn.png') else: repl_fn(doc, records) click.secho('\u2713', fg='green') if __name__ == '__main__': cli()
5,190
36.345324
108
py
kraken
kraken-main/kraken/contrib/set_seg_options.py
#!/usr/bin/env python """ A script setting the metadata of segmentation models. """ import click import shutil @click.command() @click.option('-b', '--bounding-region', multiple=True, help='Sets region identifiers which bound line bounding polygons') @click.option('--topline/--baseline', default=False, help='Sets model line type to baseline or topline') @click.option('--pad', show_default=True, type=(int, int), default=(0, 0), help='Padding (left/right, top/bottom) around the page image') @click.argument('model', nargs=1, type=click.Path(exists=True)) def cli(bounding_region, topline, pad, model): """ A script setting the metadata of segmentation models. """ from kraken.lib import vgsl net = vgsl.TorchVGSLModel.load_model(model) if net.model_type != 'segmentation': print('Model is not a segmentation model.') return print('detectable line and region types:') for k, v in net.user_metadata['class_mapping']['baselines'].items(): print(f' {k}\t{v}') print('Training region types:') for k, v in net.user_metadata['class_mapping']['regions'].items(): print(f' {k}\t{v}') print(f'existing bounding regions: {net.user_metadata["bounding_regions"]}') if bounding_region: br = set(net.user_metadata["bounding_regions"]) br_new = set(bounding_region) print(f'removing: {br.difference(br_new)}') print(f'adding: {br_new.difference(br)}') net.user_metadata["bounding_regions"] = bounding_region print(f'Model is {"topline" if "topline" in net.user_metadata and net.user_metadata["topline"] else "baseline"}') print(f'Setting to {"topline" if topline else "baseline"}') net.user_metadata['topline'] = topline print(f"Model has padding {net.user_metadata['hyper_params']['padding'] if 'padding' in net.user_metadata['hyper_params'] else (0, 0)}") print(f'Setting to {pad}') net.user_metadata['hyper_params']['padding'] = pad shutil.copy(model, f'{model}.bak') net.save_model(model) if __name__ == '__main__': cli()
2,098
35.824561
140
py
kraken
kraken-main/kraken/contrib/extract_lines.py
#! /usr/bin/env python import click @click.command() @click.option('-f', '--format-type', type=click.Choice(['xml', 'alto', 'page', 'binary']), default='xml', help='Sets the input document format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines, polygons, and a ' 'link to source images.') @click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True), help='Baseline detection model to use. Overrides format type and expects image files as input.') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note, that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.argument('files', nargs=-1) def cli(format_type, model, repolygonize, files): """ A small script extracting rectified line polygons as defined in either ALTO or PageXML files or run a model to do the same. """ if len(files) == 0: ctx = click.get_current_context() click.echo(ctx.get_help()) ctx.exit() from PIL import Image from os.path import splitext from kraken import blla from kraken.lib import segmentation, vgsl, xml import io import json import pyarrow as pa if model is None: for doc in files: click.echo(f'Processing {doc} ', nl=False) if format_type != 'binary': data = xml.preparse_xml_data([doc], format_type, repolygonize=repolygonize) if len(data) > 0: bounds = {'type': 'baselines', 'lines': [{'boundary': t['boundary'], 'baseline': t['baseline'], 'text': t['text']} for t in data]} for idx, (im, box) in enumerate(segmentation.extract_polygons(Image.open(data[0]['image']), bounds)): click.echo('.', nl=False) im.save('{}.{}.jpg'.format(splitext(data[0]['image'])[0], idx)) with open('{}.{}.gt.txt'.format(splitext(data[0]['image'])[0], idx), 'w') as fp: fp.write(box['text']) else: with pa.memory_map(doc, 'rb') as source: ds_table = pa.ipc.open_file(source).read_all() raw_metadata = ds_table.schema.metadata if not raw_metadata or b'lines' not in raw_metadata: raise ValueError(f'{doc} does not contain a valid metadata record.') metadata = json.loads(raw_metadata[b'lines']) for idx in range(metadata['counts']['all']): sample = ds_table.column('lines')[idx].as_py() im = Image.open(io.BytesIO(sample['im'])) im.save('{}.{}.jpg'.format(splitext(doc)[0], idx)) with open('{}.{}.gt.txt'.format(splitext(doc)[0], idx), 'w') as fp: fp.write(sample['text']) else: net = vgsl.TorchVGSLModel.load_model(model) for doc in files: click.echo(f'Processing {doc} ', nl=False) full_im = Image.open(doc) bounds = blla.segment(full_im, model=net) for idx, (im, box) in enumerate(segmentation.extract_polygons(full_im, bounds)): click.echo('.', nl=False) im.save('{}.{}.jpg'.format(splitext(doc)[0], idx)) if __name__ == '__main__': cli()
3,955
49.075949
150
py
kraken
kraken-main/kraken/contrib/baselineset_overlay.py
#! /usr/bin/env python """ Produces semi-transparent neural segmenter output overlays """ import click @click.command() @click.argument('files', nargs=-1) def cli(files): import torch from PIL import Image from os.path import splitext import torchvision.transforms as tf from kraken.lib import dataset batch, channels, height, width = 1, 3, 1200, 0 transforms = dataset.ImageInputTransforms(batch, height, width, channels, 0, valid_norm=False) torch.set_num_threads(1) ds = dataset.BaselineSet(files, im_transforms=transforms, mode='xml') for idx, batch in enumerate(ds): img = ds.imgs[idx] print(img) im = Image.open(img) res_tf = tf.Compose(transforms.transforms[:2]) scal_im = res_tf(im) o = batch['target'].numpy() heat = Image.fromarray((o[ds.class_mapping['baselines']['default']]*255).astype('uint8')) heat.save(splitext(img)[0] + '.heat.png') overlay = Image.new('RGBA', scal_im.size, (0, 130, 200, 255)) bl = Image.composite(overlay, scal_im.convert('RGBA'), heat) heat = Image.fromarray((o[ds.class_mapping['aux']['_start_separator']]*255).astype('uint8')) overlay = Image.new('RGBA', scal_im.size, (230, 25, 75, 255)) bl = Image.composite(overlay, bl, heat) heat = Image.fromarray((o[ds.class_mapping['aux']['_end_separator']]*255).astype('uint8')) overlay = Image.new('RGBA', scal_im.size, (60, 180, 75, 255)) Image.composite(overlay, bl, heat).save(splitext(img)[0] + '.overlay.png') del o del im if __name__ == '__main__': cli()
1,640
33.1875
100
py
kraken
kraken-main/kraken/contrib/hyperparameters/tune_pretraining.py
#!/usr/bin/env python """ A script for a grid search over pretraining hyperparameters. """ import click from functools import partial from ray import tune from ray.tune.integration.pytorch_lightning import TuneReportCallback from kraken.lib.default_specs import RECOGNITION_PRETRAIN_HYPER_PARAMS, RECOGNITION_SPEC from kraken.lib.pretrain.model import PretrainDataModule, RecognitionPretrainModel from kraken.ketos.util import _validate_manifests import pytorch_lightning as pl from pytorch_lightning import seed_everything config = {'lrate': tune.loguniform(1e-8, 1e-2), 'num_negatives': tune.qrandint(1, 4, 1), 'mask_prob': tune.loguniform(0.01, 0.2), 'mask_width': tune.qrandint(2, 8, 2)} resources_per_trial = {"cpu": 8, "gpu": 0.5} def train_tune(config, training_data=None, epochs=100, spec=RECOGNITION_SPEC): hyper_params = RECOGNITION_PRETRAIN_HYPER_PARAMS.copy() hyper_params.update(config) model = RecognitionPretrainModel(hyper_params=hyper_params, output='./model', spec=spec) data_module = PretrainDataModule(batch_size=hyper_params.pop('batch_size'), pad=hyper_params.pop('pad'), augment=hyper_params.pop('augment'), training_data=training_data, num_workers=resources_per_trial['cpu'], height=model.height, width=model.width, channels=model.channels, format_type='binary') callback = TuneReportCallback({'loss': 'CE'}, on='validation_end') trainer = pl.Trainer(max_epochs=epochs, accelerator='gpu', devices=1, callbacks=[callback], enable_progress_bar=False) trainer.fit(model, datamodule=data_module) @click.command() @click.option('-v', '--verbose', default=0, count=True) @click.option('-s', '--seed', default=42, type=click.INT, help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to ' 'ensure reproducible random splits of data') @click.option('-o', '--output', show_default=True, type=click.Path(), default='pretrain_hyper', help='output directory') @click.option('-n', '--num-samples', show_default=True, type=int, default=100, help='Number of samples to train') @click.option('-N', '--epochs', show_default=True, type=int, default=10, help='Maximum number of epochs to train per sample') @click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC, help='VGSL spec of the network to train.') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.argument('files', nargs=-1) def cli(verbose, seed, output, num_samples, epochs, spec, training_files, files): files = list(files) if training_files: files.extend(training_files) if not files: raise click.UsageError('No training data was provided to the search command. Use `-t` or the `files` argument.') seed_everything(seed, workers=True) analysis = tune.run(partial(train_tune, training_data=files, epochs=epochs, spec=spec), local_dir=output, num_samples=num_samples, resources_per_trial=resources_per_trial, config=config) click.echo("Best hyperparameters found were: ", analysis.get_best_config(metric='accuracy', mode='max')) if __name__ == '__main__': cli()
3,872
43.011364
142
py
kraken
kraken-main/kraken/contrib/hyperparameters/tune_training.py
#!/usr/bin/env python """ A script for a grid search over pretraining hyperparameters. """ import sys from functools import partial from ray import tune from ray.tune.integration.pytorch_lightning import TuneReportCallback from kraken.lib.default_spec import RECOGNITION_PRETRAIN_HYPER_PARAMS, RECOGNITION_SPEC from kraken.lib.pretrain.model import PretrainDataModule, RecognitionPretrainModel from ray.tune.schedulers import ASHAScheduler import pytorch_lightning as pl config = {'lrate': tune.loguniform(1e-8, 1e-2), 'num_negatives': tune.qrandint(2, 100, 8), 'mask_prob': tune.loguniform(0.01, 0.2), 'mask_width': tune.qrandint(2, 8, 2)} resources_per_trial = {"cpu": 8, "gpu": 0.5} def train_tune(config, training_data=None, epochs=100): hyper_params = RECOGNITION_PRETRAIN_HYPER_PARAMS.copy() hyper_params.update(config) model = RecognitionPretrainModel(hyper_params=hyper_params, output='model', spec=RECOGNITION_SPEC) data_module = PretrainDataModule(batch_size=hyper_params.pop('batch_size'), pad=hyper_params.pop('pad'), augment=hyper_params.pop('augment'), training_data=training_data, num_workers=resources_per_trial['cpu'], height=model.height, width=model.width, channels=model.channels, format_type='binary') callback = TuneReportCallback({'loss': 'CE'}, on='validation_end') trainer = pl.Trainer(max_epochs=epochs, gpus=1, callbacks=[callback], enable_progress_bar=False) trainer.fit(model) analysis = tune.run(partial(train_tune, training_data=sys.argv[2:]), local_dir=sys.argv[1], num_samples=100, resources_per_trial=resources_per_trial, config=config) print("Best hyperparameters found were: ", analysis.get_best_config(metric='accuracy', mode='max'))
2,190
37.438596
164
py
kraken
kraken-main/kraken/lib/lstm.py
# flake8: noqa from typing import Dict from scipy.special import expit initial_range = 0.1 class Codec(object): """Translate between integer codes and characters.""" def init(self, charset): charset = sorted(list(set(charset))) self.code2char = {} # type: Dict[int, str] self.char2code = {} # type: Dict[str, int] for code,char in enumerate(charset): self.code2char[code] = char self.char2code[char] = code return self def size(self): """The total number of codes (use this for the number of output classes when training a classifier.""" return len(list(self.code2char.keys())) def encode(self, s): "Encode the string `s` into a code sequence." dflt = self.char2code["~"] return [self.char2code.get(c,dflt) for c in s] def decode(self, l): "Decode a code sequence into a string." s = [self.code2char.get(c,"~") for c in l] return s class Network: def predict(self,xs): """Prediction is the same as forward propagation.""" return self.forward(xs) class Softmax(Network): """A logistic regression network.""" def __init__(self,Nh,No,initial_range=0.1,rand=None): pass def ninputs(self): pass def noutputs(self): pass def forward(self,ys): pass def backward(self,deltas): pass class LSTM(Network): """A standard LSTM network. This is a direct implementation of all the forward and backward propagation formulas, mainly for speed. (There is another, more abstract implementation as well, but that's significantly slower in Python due to function call overhead.)""" def __init__(self,ni,ns,initial=0.1,maxlen=5000): pass def init_weights(self,initial): pass def allocate(self,n): pass def reset(self,n): pass def forward(self,xs): pass ################################################################ # combination classifiers ################################################################ class Stacked(Network): """Stack two networks on top of each other.""" def __init__(self,nets): self.nets = nets def forward(self,xs): pass class Reversed(Network): """Run a network on the time-reversed input.""" def __init__(self,net): self.net = net def forward(self,xs): pass class Parallel(Network): """Run multiple networks in parallel on the same input.""" def __init__(self,*nets): self.nets = nets def forward(self,xs): pass def BIDILSTM(Ni,Ns,No): """A bidirectional LSTM, constructed from regular and reversed LSTMs.""" lstm1 = LSTM(Ni,Ns) lstm2 = Reversed(LSTM(Ni,Ns)) bidi = Parallel(lstm1,lstm2) logreg = Softmax(2*Ns,No) stacked = Stacked([bidi,logreg]) return stacked class SeqRecognizer(Network): """Perform sequence recognition using BIDILSTM and alignment.""" def __init__(self,ninput,nstates,noutput=-1,codec=None,normalize=None): self.Ni = ninput if codec: noutput = codec.size() self.No = noutput self.lstm = BIDILSTM(ninput,nstates,noutput) self.codec = codec def translate_back(self, output): pass def translate_back_locations(self, output): pass def predictSequence(self,xs): "Predict an integer sequence of codes." pass def l2s(self,l): "Convert a code sequence into a unicode string after recognition." l = self.codec.decode(l) return u"".join(l) def predictString(self,xs): "Predict output as a string. This uses codec and normalizer." pass
3,744
28.488189
82
py
kraken
kraken-main/kraken/lib/exceptions.py
""" kraken.lib.exceptions ~~~~~~~~~~~~~~~~~~~~~ All custom exceptions raised by kraken's modules and packages. Packages should always define their exceptions here. """ class KrakenCodecException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenStopTrainingException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenEncodeException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenRecordException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenInvalidModelException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenInputException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenRepoException(Exception): def __init__(self, message=None): Exception.__init__(self, message) class KrakenCairoSurfaceException(Exception): """ Raised when the Cairo surface couldn't be created. Attributes: message (str): Error message width (int): Width of the surface height (int): Height of the surface """ def __init__(self, message: str, width: int, height: int) -> None: self.message = message self.width = width self.height = height def __repr__(self) -> str: return repr(self.message)
1,521
21.382353
78
py
kraken
kraken-main/kraken/lib/xml.py
# # Copyright 2019 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ ALTO/Page data loaders for segmentation training """ import re import logging from pathlib import Path from itertools import groupby from lxml import etree from PIL import Image from typing import Union, Dict, Any, Sequence, Tuple from os import PathLike from collections import defaultdict from kraken.lib.segmentation import calculate_polygonal_environment from kraken.lib.exceptions import KrakenInputException logger = logging.getLogger(__name__) __all__ = ['parse_xml', 'parse_page', 'parse_alto', 'preparse_xml_data'] # fallback mapping between PAGE region types and tags page_regions = {'TextRegion': 'text', 'ImageRegion': 'image', 'LineDrawingRegion': 'line drawing', 'GraphicRegion': 'graphic', 'TableRegion': 'table', 'ChartRegion': 'chart', 'MapRegion': 'map', 'SeparatorRegion': 'separator', 'MathsRegion': 'maths', 'ChemRegion': 'chem', 'MusicRegion': 'music', 'AdvertRegion': 'advert', 'NoiseRegion': 'noise', 'UnknownRegion': 'unknown', 'CustomRegion': 'custom'} # same for ALTO alto_regions = {'TextBlock': 'text', 'IllustrationType': 'illustration', 'GraphicalElementType': 'graphic', 'ComposedBlock': 'composed'} def preparse_xml_data(filenames: Sequence[Union[str, PathLike]], format_type: str = 'xml', repolygonize: bool = False) -> Dict[str, Any]: """ Loads training data from a set of xml files. Extracts line information from Page/ALTO xml files for training of recognition models. Args: filenames: List of XML files. format_type: Either `page`, `alto` or `xml` for autodetermination. repolygonize: (Re-)calculates polygon information using the kraken algorithm. Returns: A list of dicts {'text': text, 'baseline': [[x0, y0], ...], 'boundary': [[x0, y0], ...], 'image': PIL.Image}. """ training_pairs = [] if format_type == 'xml': parse_fn = parse_xml elif format_type == 'alto': parse_fn = parse_alto elif format_type == 'page': parse_fn = parse_page else: raise ValueError(f'invalid format {format_type} for preparse_xml_data') for fn in filenames: try: data = parse_fn(fn) except KrakenInputException as e: logger.warning(e) continue try: with open(data['image'], 'rb') as fp: Image.open(fp) except FileNotFoundError as e: logger.warning(f'Could not open file {e.filename} in {fn}') continue if repolygonize: logger.info('repolygonizing {} lines in {}'.format(len(data['lines']), data['image'])) data['lines'] = _repolygonize(data['image'], data['lines']) for line in data['lines']: training_pairs.append({'image': data['image'], **line}) return training_pairs def _repolygonize(im: Image.Image, lines: Sequence[Dict[str, Any]]): """ Helper function taking an output of the lib.xml parse_* functions and recalculating the contained polygonization. Args: im (Image.Image): Input image lines (list): List of dicts [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': 'abcvsd'}, {...] Returns: A data structure `lines` with a changed polygonization. """ im = Image.open(im).convert('L') polygons = calculate_polygonal_environment(im, [x['baseline'] for x in lines]) return [{'boundary': polygon, 'baseline': orig['baseline'], 'text': orig['text'], 'script': orig['script']} for orig, polygon in zip(lines, polygons)] def parse_xml(filename: Union[str, PathLike]) -> Dict[str, Any]: """ Parses either a PageXML or ALTO file with autodetermination of the file format. Args: filename: path to an XML file. Returns: A dict:: {'image': impath, 'lines': [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': apdjfqpf', 'tags': {'type': 'default', ...}}, ... {...}], 'regions': {'region_type_0': [[[x0, y0], ...], ...], ...}} """ with open(filename, 'rb') as fp: try: doc = etree.parse(fp) except etree.XMLSyntaxError as e: raise KrakenInputException(f'Parsing {filename} failed: {e}') if doc.getroot().tag.endswith('alto'): return parse_alto(filename) elif doc.getroot().tag.endswith('PcGts'): return parse_page(filename) else: raise KrakenInputException(f'Unknown XML format in {filename}') def parse_page(filename: Union[str, PathLike]) -> Dict[str, Any]: """ Parses a PageXML file, returns the baselines defined in it, and loads the referenced image. Args: filename: path to a PageXML file. Returns: A dict:: {'image': impath, 'lines': [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': apdjfqpf', 'tags': {'type': 'default', ...}}, ... {...}], 'regions': {'region_type_0': [[[x0, y0], ...], ...], ...}} """ def _parse_page_custom(s): o = {} s = s.strip() l_chunks = [l_chunk for l_chunk in s.split('}') if l_chunk.strip()] if l_chunks: for chunk in l_chunks: tag, vals = chunk.split('{') tag_vals = {} vals = [val.strip() for val in vals.split(';') if val.strip()] for val in vals: key, *val = val.split(':') tag_vals[key] = ":".join(val) o[tag.strip()] = tag_vals return o def _parse_coords(coords): points = [x for x in coords.split(' ')] points = [int(c) for point in points for c in point.split(',')] pts = zip(points[::2], points[1::2]) return [k for k, g in groupby(pts)] with open(filename, 'rb') as fp: base_dir = Path(filename).parent try: doc = etree.parse(fp) except etree.XMLSyntaxError as e: raise KrakenInputException('Parsing {} failed: {}'.format(filename, e)) image = doc.find('.//{*}Page') if image is None or image.get('imageFilename') is None: raise KrakenInputException('No valid image filename found in PageXML file {}'.format(filename)) try: base_direction = {'left-to-right': 'L', 'right-to-left': 'R', 'top-to-bottom': 'L', 'bottom-to-top': 'R', None: None}[image.get('readingDirection')] except KeyError: logger.warning(f'Invalid value {image.get("readingDirection")} encountered in page-level reading direction.') base_direction = None lines = doc.findall('.//{*}TextLine') data = {'image': base_dir.joinpath(image.get('imageFilename')), 'lines': [], 'type': 'baselines', 'base_dir': base_direction, 'regions': {}} # find all image regions regions = [] for x in page_regions.keys(): regions.extend(doc.findall('.//{{*}}{}'.format(x))) # parse region type and coords region_data = defaultdict(list) for region in regions: coords = region.find('{*}Coords') if coords is not None and not coords.get('points').isspace() and len(coords.get('points')): try: coords = _parse_coords(coords.get('points')) except Exception: logger.warning('Region {} without coordinates'.format(region.get('id'))) continue else: logger.warning('Region {} without coordinates'.format(region.get('id'))) continue rtype = region.get('type') # parse transkribus-style custom field if possible custom_str = region.get('custom') if not rtype and custom_str: cs = _parse_page_custom(custom_str) if 'structure' in cs and 'type' in cs['structure']: rtype = cs['structure']['type'] # fall back to default region type if nothing is given if not rtype: rtype = page_regions[region.tag.split('}')[-1]] region_data[rtype].append(coords) data['regions'] = region_data # parse line information tag_set = set(('default',)) for line in lines: pol = line.find('./{*}Coords') boundary = None if pol is not None and not pol.get('points').isspace() and len(pol.get('points')): try: boundary = _parse_coords(pol.get('points')) except Exception: logger.info('TextLine {} without polygon'.format(line.get('id'))) else: logger.info('TextLine {} without polygon'.format(line.get('id'))) base = line.find('./{*}Baseline') baseline = None if base is not None and not base.get('points').isspace() and len(base.get('points')): try: baseline = _parse_coords(base.get('points')) except Exception: logger.info('TextLine {} without baseline'.format(line.get('id'))) continue else: logger.info('TextLine {} without baseline'.format(line.get('id'))) continue text = '' manual_transcription = line.find('./{*}TextEquiv') if manual_transcription is not None: transcription = manual_transcription else: transcription = line for el in transcription.findall('.//{*}Unicode'): if el.text: text += el.text # retrieve line tags if custom string is set and contains tags = {'type': 'default'} split_type = None custom_str = line.get('custom') if custom_str: cs = _parse_page_custom(custom_str) if 'structure' in cs and 'type' in cs['structure']: tags['type'] = cs['structure']['type'] tag_set.add(tags['type']) # retrieve data split if encoded in custom string. if 'split' in cs and 'type' in cs['split'] and cs['split']['type'] in ['train', 'validation', 'test']: split_type = cs['split']['type'] tags['split'] = split_type tag_set.add(split_type) data['lines'].append({'baseline': baseline, 'boundary': boundary, 'text': text, 'split': split_type, 'tags': tags}) if len(tag_set) > 1: data['script_detection'] = True else: data['script_detection'] = False return data def parse_alto(filename: Union[str, PathLike]) -> Dict[str, Any]: """ Parses an ALTO file, returns the baselines defined in it, and loads the referenced image. Args: filename: path to an ALTO file. Returns: A dict:: {'image': impath, 'lines': [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': apdjfqpf', 'tags': {'type': 'default', ...}}, ... {...}], 'regions': {'region_type_0': [[[x0, y0], ...], ...], ...}} """ def _parse_pointstype(coords: str) -> Sequence[Tuple[float, float]]: """ ALTO's PointsType is underspecified so a variety of serializations are valid: x0, y0 x1, y1 ... x0 y0 x1 y1 ... (x0, y0) (x1, y1) ... (x0 y0) (x1 y1) ... Returns: A list of tuples [(x0, y0), (x1, y1), ...] """ float_re = re.compile(r'[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?') points = [float(point.group()) for point in float_re.finditer(coords)] if len(points) % 2: raise ValueError(f'Odd number of points in points sequence: {points}') pts = zip(points[::2], points[1::2]) return [k for k, g in groupby(pts)] with open(filename, 'rb') as fp: base_dir = Path(filename).parent try: doc = etree.parse(fp) except etree.XMLSyntaxError as e: raise KrakenInputException('Parsing {} failed: {}'.format(filename, e)) image = doc.find('.//{*}fileName') if image is None or not image.text: raise KrakenInputException('No valid filename found in ALTO file') lines = doc.findall('.//{*}TextLine') data = {'image': base_dir.joinpath(image.text), 'lines': [], 'type': 'baselines', 'base_dir': None, 'regions': {}} # find all image regions regions = [] for x in alto_regions.keys(): regions.extend(doc.findall('./{{*}}Layout/{{*}}Page/{{*}}PrintSpace/{{*}}{}'.format(x))) # find overall dimensions to filter out dummy TextBlocks ps = doc.find('./{*}Layout/{*}Page/{*}PrintSpace') x_min = int(float(ps.get('HPOS'))) y_min = int(float(ps.get('VPOS'))) width = int(float(ps.get('WIDTH'))) height = int(float(ps.get('HEIGHT'))) page_boundary = [(x_min, y_min), (x_min, y_min + height), (x_min + width, y_min + height), (x_min + width, y_min)] # parse tagrefs cls_map = {} tags = doc.find('.//{*}Tags') if tags is not None: for x in ['StructureTag', 'LayoutTag', 'OtherTag']: for tag in tags.findall('./{{*}}{}'.format(x)): cls_map[tag.get('ID')] = (x[:-3].lower(), tag.get('LABEL')) # parse region type and coords region_data = defaultdict(list) for region in regions: # try to find shape object coords = region.find('./{*}Shape/{*}Polygon') if coords is not None: boundary = _parse_pointstype(coords.get('POINTS')) elif (region.get('HPOS') is not None and region.get('VPOS') is not None and region.get('WIDTH') is not None and region.get('HEIGHT') is not None): # use rectangular definition x_min = int(float(region.get('HPOS'))) y_min = int(float(region.get('VPOS'))) width = int(float(region.get('WIDTH'))) height = int(float(region.get('HEIGHT'))) boundary = [(x_min, y_min), (x_min, y_min + height), (x_min + width, y_min + height), (x_min + width, y_min)] else: continue rtype = region.get('TYPE') # fall back to default region type if nothing is given tagrefs = region.get('TAGREFS') if tagrefs is not None and rtype is None: for tagref in tagrefs.split(): ttype, rtype = cls_map.get(tagref, (None, None)) if rtype is not None and ttype: break if rtype is None: rtype = alto_regions[region.tag.split('}')[-1]] if boundary == page_boundary and rtype == 'text': logger.info('Skipping TextBlock with same size as page image.') continue region_data[rtype].append(boundary) data['regions'] = region_data tag_set = set(('default',)) for line in lines: if line.get('BASELINE') is None: logger.info('TextLine {} without baseline'.format(line.get('ID'))) continue pol = line.find('./{*}Shape/{*}Polygon') boundary = None if pol is not None: try: boundary = _parse_pointstype(pol.get('POINTS')) except ValueError: logger.info('TextLine {} without polygon'.format(line.get('ID'))) else: logger.info('TextLine {} without polygon'.format(line.get('ID'))) baseline = None try: baseline = _parse_pointstype(line.get('BASELINE')) except ValueError: logger.info('TextLine {} without baseline'.format(line.get('ID'))) text = '' for el in line.xpath(".//*[local-name() = 'String'] | .//*[local-name() = 'SP']"): text += el.get('CONTENT') if el.get('CONTENT') else ' ' # find line type tags = {'type': 'default'} split_type = None tagrefs = line.get('TAGREFS') if tagrefs is not None: for tagref in tagrefs.split(): ttype, ltype = cls_map.get(tagref, (None, None)) if ltype is not None: tag_set.add(ltype) if ttype == 'other': tags['type'] = ltype else: tags[ttype] = ltype if ltype in ['train', 'validation', 'test']: split_type = ltype data['lines'].append({'baseline': baseline, 'boundary': boundary, 'text': text, 'tags': tags, 'split': split_type}) if len(tag_set) > 1: data['tags'] = True else: data['tags'] = False return data
19,114
38.906054
121
py
kraken
kraken-main/kraken/lib/codec.py
# # Copyright 2017 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Pytorch compatible codec with many-to-many mapping between labels and graphemes. """ import logging import numpy as np from collections import Counter from typing import List, Tuple, Set, Union, Dict, Sequence from torch import IntTensor from kraken.lib.exceptions import KrakenEncodeException, KrakenCodecException __all__ = ['PytorchCodec'] logger = logging.getLogger(__name__) class PytorchCodec(object): """ Builds a codec converting between graphemes/code points and integer label sequences. charset may either be a string, a list or a dict. In the first case each code point will be assigned a label, in the second case each string in the list will be assigned a label, and in the final case each key string will be mapped to the value sequence of integers. In the first two cases labels will be assigned automatically. When a mapping is manually provided the label codes need to be a prefix-free code. As 0 is the blank label in a CTC output layer, output labels and input dictionaries are/should be 1-indexed. Args: charset: Input character set. strict: Flag indicating if encoding/decoding errors should be ignored or cause an exception. Raises: KrakenCodecException: If the character set contains duplicate entries or the mapping is non-singular or non-prefix-free. """ def __init__(self, charset: Union[Dict[str, Sequence[int]], Sequence[str], str], strict=False): if isinstance(charset, dict): self.c2l = charset else: cc = Counter(charset) if len(cc) < len(charset): raise KrakenCodecException(f'Duplicate entry in codec definition string: {cc}') self.c2l = {k: [v] for v, k in enumerate(sorted(charset), start=1)} self.c_sorted = sorted(self.c2l.keys(), key=len, reverse=True) self.l2c = {tuple(v): k for k, v in self.c2l.items()} # type: Dict[Tuple[int], str] self.l2c_single = {k[0]: v for k, v in self.l2c.items() if len(k) == 1} self.strict = strict if not self.is_valid: raise KrakenCodecException('Codec is not valid (non-singular/non-prefix free).') def __len__(self) -> int: """ Total number of input labels the codec can decode. """ return len(self.l2c.keys()) @property def is_valid(self) -> bool: """ Returns True if the codec is prefix-free (in label space) and non-singular (in both directions). """ # quick test for non-singularity if len(self.l2c.keys()) != len(self.c2l.keys()): return False for i, code_1 in enumerate(sorted(self.l2c.keys())): for j, code_2 in enumerate(sorted(self.l2c.keys())): if i != j and code_1[:len(code_2)] == code_2: return False return True @property def max_label(self) -> int: """ Returns the maximum label value. """ return max(label for labels in self.c2l.values() for label in labels) def encode(self, s: str) -> IntTensor: """ Encodes a string into a sequence of labels. If the code is non-singular we greedily encode the longest sequence first. Args: s: Input unicode string Returns: Ecoded label sequence Raises: KrakenEncodeException: if the a subsequence is not encodable and the codec is set to strict mode. """ labels = [] # type: List[int] idx = 0 while idx < len(s): encodable_suffix = False for code in self.c_sorted: if len(code) == 1: break if s[idx:].startswith(code): labels.extend(self.c2l[code]) idx += len(code) encodable_suffix = True break if not encodable_suffix and s[idx] in self.c2l: labels.extend(self.c2l[s[idx]]) idx += 1 encodable_suffix = True if not encodable_suffix: if self.strict: raise KrakenEncodeException(f'Non-encodable sequence {s[idx:idx+5]}... encountered.') logger.warning(f'Non-encodable sequence {s[idx:idx+5]}... encountered. Advancing one code point.') idx += 1 return IntTensor(labels) def decode(self, labels: Sequence[Tuple[int, int, int, float]]) -> List[Tuple[str, int, int, float]]: """ Decodes a labelling. Given a labelling with cuts and confidences returns a string with the cuts and confidences aggregated across label-code point correspondences. When decoding multilabels to code points the resulting cuts are min/max, confidences are averaged. Args: labels: Input containing tuples (label, start, end, confidence). Returns: A list of tuples (code point, start, end, confidence) """ start = [x for _, x, _, _ in labels] end = [x for _, _, x, _ in labels] con = [x for _, _, _, x in labels] labels = tuple(x for x, _, _, _ in labels) decoded = [] idx = 0 while idx < len(labels): decodable_suffix = False if int(labels[idx]) in self.l2c_single: code = self.l2c_single[int(labels[idx])] decoded.extend([(c, s, e, u) for c, s, e, u in zip(code, len(code) * [start[idx]], len(code) * [end[idx]], len(code) * [con[idx]])]) idx += 1 decodable_suffix = True else: for code in self.l2c.keys(): if code == labels[idx:idx+len(code)]: decoded.extend([(c, s, e, u) for c, s, e, u in zip(self.l2c[code], len(self.l2c[code]) * [start[idx]], len(self.l2c[code]) * [end[idx + len(code) - 1]], len(self.l2c[code]) * [np.mean(con[idx:idx + len(code)])])]) idx += len(code) decodable_suffix = True break if not decodable_suffix: if self.strict: raise KrakenEncodeException(f'Non-decodable sequence {labels[idx:idx+5]}... encountered.') logger.debug(f'Non-decodable sequence {labels[idx:idx+5]}... encountered. Advancing one label.') idx += 1 return decoded def merge(self, codec: 'PytorchCodec') -> Tuple['PytorchCodec', Set]: """ Transforms this codec (c1) into another (c2) reusing as many labels as possible. The resulting codec is able to encode the same code point sequences while not necessarily having the same labels for them as c2. Retains matching character -> label mappings from both codecs, removes mappings not c2, and adds mappings not in c1. Compound labels in c2 for code point sequences not in c1 containing labels also in use in c1 are added as separate labels. Args: codec: PytorchCodec to merge with Returns: A merged codec and a list of labels that were removed from the original codec. """ # find character sequences not encodable (exact match) by new codec. # get labels for these sequences as deletion candidates rm_candidates = {cseq: enc for cseq, enc in self.c2l.items() if cseq not in codec.c2l} c2l_cand = self.c2l.copy() for x in rm_candidates.keys(): c2l_cand.pop(x) # remove labels from candidate list that are in use for other decodings rm_labels = [label for v in rm_candidates.values() for label in v] for v in c2l_cand.values(): for label in rm_labels: if label in v: rm_labels.remove(label) # iteratively remove labels, decrementing subsequent labels to close # (new) holes in the codec. offset_rm_labels = [v-idx for idx, v in enumerate(sorted(set(rm_labels)))] for rlabel in offset_rm_labels: c2l_cand = {k: [label-1 if label > rlabel else label for label in v] for k, v in c2l_cand.items()} # add mappings not in original codec add_list = {cseq: enc for cseq, enc in codec.c2l.items() if cseq not in self.c2l} # renumber start_idx = max((0,) + tuple(label for v in c2l_cand.values() for label in v)) + 1 add_labels = {k: v for v, k in enumerate(sorted(set(label for v in add_list.values() for label in v)), start_idx)} for k, v in add_list.items(): c2l_cand[k] = [add_labels[label] for label in v] return PytorchCodec(c2l_cand, self.strict), set(rm_labels) def add_labels(self, charset: Union[Dict[str, Sequence[int]], Sequence[str], str]) -> 'PytorchCodec': """ Adds additional characters/labels to the codec. charset may either be a string, a list or a dict. In the first case each code point will be assigned a label, in the second case each string in the list will be assigned a label, and in the final case each key string will be mapped to the value sequence of integers. In the first two cases labels will be assigned automatically. As 0 is the blank label in a CTC output layer, output labels and input dictionaries are/should be 1-indexed. Args: charset: Input character set. """ if isinstance(charset, dict): c2l = self.c2l.copy() c2l.update(charset) else: c2l = self.c2l.copy() c2l.update({k: [v] for v, k in enumerate(sorted(charset), start=self.max_label+1)}) return PytorchCodec(c2l, self.strict) def __repr__(self): return f'PytorchCodec({self.c2l})'
11,105
40.909434
135
py
kraken
kraken-main/kraken/lib/progress.py
# Copyright Benjamin Kiessling # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Handlers for rich-based progress bars. """ from typing import Any, Dict, Optional, Union from numbers import Number from dataclasses import dataclass import pytorch_lightning as pl from pytorch_lightning.callbacks.progress.rich_progress import CustomProgress, RichProgressBar, MetricsTextColumn from rich import get_console, reconfigure from rich.console import Console, RenderableType from rich.progress import BarColumn, Progress, ProgressColumn, Task, TextColumn, TimeRemainingColumn, TimeElapsedColumn, DownloadColumn from rich.text import Text from rich.style import Style from rich.default_styles import DEFAULT_STYLES __all__ = ['KrakenProgressBar', 'KrakenDownloadProgressBar', 'KrakenTrainProgressBar'] class BatchesProcessedColumn(ProgressColumn): def __init__(self): super().__init__() def render(self, task) -> RenderableType: total = task.total if task.total != float("inf") else "--" return Text(f"{int(task.completed)}/{total}", style='magenta') class EarlyStoppingColumn(ProgressColumn): """ A column containing text. """ def __init__(self, trainer): self._trainer = trainer super().__init__() def render(self, task) -> Text: text = f'early_stopping: ' \ f'{self._trainer.early_stopping_callback.wait_count}/{self._trainer.early_stopping_callback.patience} ' \ f'{self._trainer.early_stopping_callback.best_score:.5f}' return Text(text, justify="left") class KrakenProgressBar(Progress): """ Adaptation of the default rich progress bar to fit with kraken/ketos output. """ def __init__(self, *args, **kwargs): columns = [TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), BatchesProcessedColumn(), TimeRemainingColumn(), TimeElapsedColumn()] kwargs['refresh_per_second'] = 1 super().__init__(*columns, *args, **kwargs) class KrakenDownloadProgressBar(Progress): """ Adaptation of the default rich progress bar to fit with kraken/ketos download output. """ def __init__(self, *args, **kwargs): columns = [TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), DownloadColumn(), TimeRemainingColumn(), TimeElapsedColumn()] kwargs['refresh_per_second'] = 1 super().__init__(*columns, *args, **kwargs) class KrakenTrainProgressBar(RichProgressBar): """ Adaptation of the default ptl rich progress bar to fit with kraken (segtrain, train) output. Args: refresh_rate: Determines at which rate (in number of batches) the progress bars get updated. Set it to ``0`` to disable the display. leave: Leaves the finished progress bar in the terminal at the end of the epoch. Default: False console_kwargs: Args for constructing a `Console` """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, theme=RichProgressBarTheme()) def _init_progress(self, trainer): if self.is_enabled and (self.progress is None or self._progress_stopped): self._reset_progress_bar_ids() reconfigure(**self._console_kwargs) self._console = get_console() self._console.clear_live() self._metric_component = MetricsTextColumn(trainer, self.theme.metrics) columns = self.configure_columns(trainer) columns.append(self._metric_component) if trainer.early_stopping_callback: self._early_stopping_component = EarlyStoppingColumn(trainer) columns.append(self._early_stopping_component) self.progress = CustomProgress( *columns, auto_refresh=False, disable=self.is_disabled, console=self._console, ) self.progress.start() # progress has started self._progress_stopped = False def _get_train_description(self, current_epoch: int) -> str: return f"stage {current_epoch}/" \ f"{self.trainer.max_epochs if self.trainer.model.hparams['quit'] == 'fixed' else '∞'}" @dataclass class RichProgressBarTheme: """Styles to associate to different base components. Args: description: Style for the progress bar description. For eg., Epoch x, Testing, etc. progress_bar: Style for the bar in progress. progress_bar_finished: Style for the finished progress bar. progress_bar_pulse: Style for the progress bar when `IterableDataset` is being processed. batch_progress: Style for the progress tracker (i.e 10/50 batches completed). time: Style for the processed time and estimate time remaining. processing_speed: Style for the speed of the batches being processed. metrics: Style for the metrics https://rich.readthedocs.io/en/stable/style.html """ description: Union[str, Style] = DEFAULT_STYLES['progress.description'] progress_bar: Union[str, Style] = DEFAULT_STYLES['bar.complete'] progress_bar_finished: Union[str, Style] = DEFAULT_STYLES['bar.finished'] progress_bar_pulse: Union[str, Style] = DEFAULT_STYLES['bar.pulse'] batch_progress: Union[str, Style] = DEFAULT_STYLES['progress.description'] time: Union[str, Style] = DEFAULT_STYLES['progress.elapsed'] processing_speed: Union[str, Style] = DEFAULT_STYLES['progress.data.speed'] metrics: Union[str, Style] = DEFAULT_STYLES['progress.description']
6,470
39.698113
135
py
kraken
kraken-main/kraken/lib/ctc_decoder.py
# # Copyright 2017 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Decoders for softmax outputs of CTC trained networks. Decoders extract label sequences out of the raw output matrix of the line recognition network. There are multiple different approaches implemented here, from a simple greedy decoder, to the legacy ocropy thresholding decoder, and a more complex beam search decoder. Extracted label sequences are converted into the code point domain using kraken.lib.codec.PytorchCodec. """ import collections import numpy as np from typing import List, Tuple from scipy.special import logsumexp from scipy.ndimage import measurements from itertools import groupby __all__ = ['beam_decoder', 'greedy_decoder', 'blank_threshold_decoder'] def beam_decoder(outputs: np.ndarray, beam_size: int = 3) -> List[Tuple[int, int, int, float]]: """ Translates back the network output to a label sequence using same-prefix-merge beam search decoding as described in [0]. [0] Hannun, Awni Y., et al. "First-pass large vocabulary continuous speech recognition using bi-directional recurrent DNNs." arXiv preprint arXiv:1408.2873 (2014). Args: output: (C, W) shaped softmax output tensor beam_size: Size of the beam Returns: A list with tuples (class, start, end, prob). max is the maximum value of the softmax layer in the region. """ c, w = outputs.shape probs = np.log(outputs) beam = [(tuple(), (0.0, float('-inf')))] # type: List[Tuple[Tuple, Tuple[float, float]]] # loop over each time step for t in range(w): next_beam = collections.defaultdict(lambda: 2*(float('-inf'),)) # type: dict # p_b -> prob for prefix ending in blank # p_nb -> prob for prefix not ending in blank for prefix, (p_b, p_nb) in beam: # only update ending-in-blank-prefix probability for blank n_p_b, n_p_nb = next_beam[prefix] n_p_b = logsumexp((n_p_b, p_b + probs[0, t], p_nb + probs[0, t])) next_beam[prefix] = (n_p_b, n_p_nb) # loop over non-blank classes for s in range(1, c): # only update the not-ending-in-blank-prefix probability for prefix+s l_end = prefix[-1][0] if prefix else None n_prefix = prefix + ((s, t, t),) n_p_b, n_p_nb = next_beam[n_prefix] if s == l_end: # substitute the previous non-blank-ending-prefix # probability for repeated labels n_p_nb = logsumexp((n_p_nb, p_b + probs[s, t])) else: n_p_nb = logsumexp((n_p_nb, p_b + probs[s, t], p_nb + probs[s, t])) next_beam[n_prefix] = (n_p_b, n_p_nb) # If s is repeated at the end we also update the unchanged # prefix. This is the merging case. if s == l_end: n_p_b, n_p_nb = next_beam[prefix] n_p_nb = logsumexp((n_p_nb, p_nb + probs[s, t])) # rewrite both new and old prefix positions next_beam[prefix[:-1] + ((prefix[-1][0], prefix[-1][1], t),)] = (n_p_b, n_p_nb) next_beam[n_prefix[:-1] + ((n_prefix[-1][0], n_prefix[-1][1], t),)] = next_beam.pop(n_prefix) # Sort and trim the beam before moving on to the # next time-step. beam = sorted(next_beam.items(), key=lambda x: logsumexp(x[1]), reverse=True) beam = beam[:beam_size] return [(c, start, end, max(outputs[c, start:end+1])) for (c, start, end) in beam[0][0]] def greedy_decoder(outputs: np.ndarray) -> List[Tuple[int, int, int, float]]: """ Translates back the network output to a label sequence using greedy/best path decoding as described in [0]. [0] Graves, Alex, et al. "Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks." Proceedings of the 23rd international conference on Machine learning. ACM, 2006. Args: output: (C, W) shaped softmax output tensor Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ labels = np.argmax(outputs, 0) seq_len = outputs.shape[1] mask = np.eye(outputs.shape[0], dtype='bool')[labels].T classes = [] for label, group in groupby(zip(np.arange(seq_len), labels, outputs[mask]), key=lambda x: x[1]): lgroup = list(group) if label != 0: classes.append((label, lgroup[0][0], lgroup[-1][0], max(x[2] for x in lgroup))) return classes def blank_threshold_decoder(outputs: np.ndarray, threshold: float = 0.5) -> List[Tuple[int, int, int, float]]: """ Translates back the network output to a label sequence as the original ocropy/clstm. Thresholds on class 0, then assigns the maximum (non-zero) class to each region. Args: output: (C, W) shaped softmax output tensor threshold: Threshold for 0 class when determining possible label locations. Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ outputs = outputs.T labels, n = measurements.label(outputs[:, 0] < threshold) mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask)+1)) p = 0 start = None x = [] for idx, val in enumerate(labels): if val != 0 and start is None: start = idx p += 1 if val == 0 and start is not None: if maxima[p-1][1] == 0: start = None else: x.append((maxima[p-1][1], start, idx, outputs[maxima[p-1]])) start = None # append last non-zero region to list of no zero region occurs after it if start: x.append((maxima[p-1][1], start, len(outputs), outputs[maxima[p-1]])) return [y for y in x if x[0] != 0]
6,701
39.131737
113
py
kraken
kraken-main/kraken/lib/vgsl.py
""" VGSL plumbing """ import re import json import torch import logging import warnings from torch import nn from os import PathLike from typing import Sequence, List, Tuple, Union, Optional, Iterable, Callable, Dict, Any from kraken.lib import layers from kraken.lib.codec import PytorchCodec from kraken.lib.exceptions import KrakenInvalidModelException # filter out coreml warnings coming from their conversion routines (which we don't use). with warnings.catch_warnings(): warnings.filterwarnings(action='ignore', message='has not been tested with coremltools') warnings.filterwarnings(action='ignore', message='is not supported') from coremltools.models import MLModel from coremltools.models import datatypes from coremltools.models.neural_network import NeuralNetworkBuilder from google.protobuf.message import DecodeError # all tensors are ordered NCHW, the "feature" dimension is C, so the output of # an LSTM will be put into C same as the filters of a CNN. __all__ = ['TorchVGSLModel'] logger = logging.getLogger(__name__) class VGSLBlock(object): def __init__(self, block: str, layer: str, name: str, idx: int): if name: name = name[1:-1] else: name = '{}_{}'.format(re.sub(r'\W+', '_', layer), idx) block = re.sub(r'\{.+\}', '', block) lsplits = re.split(r'(^[^\d]+)', block) lsplits.insert(-1, '{{{}}}'.format(name)) self._block = ''.join(lsplits) self._name = name self._layer = layer def __str__(self): return self._block @property def name(self): return self._name @property def layer(self): return self._layer class TorchVGSLModel(object): """ Class building a torch module from a VSGL spec. The initialized class will contain a variable number of layers and a loss function. Inputs and outputs are always 4D tensors in order (batch, channels, height, width) with channels always being the feature dimension. Importantly this means that a recurrent network will be fed the channel vector at each step along its time axis, i.e. either put the non-time-axis dimension into the channels dimension or use a summarizing RNN squashing the time axis to 1 and putting the output into the channels dimension respectively. Attributes: input: Expected input tensor as a 4-tuple. nn: Stack of layers parsed from the spec. criterion: Fully parametrized loss function. user_metadata: dict with user defined metadata. Is flushed into model file during saving/overwritten by loading operations. one_channel_mode: Field indicating the image type used during training of one-channel images. Is '1' for models trained on binarized images, 'L' for grayscale, and None otherwise. """ def __init__(self, spec: str) -> None: """ Constructs a torch module from a (subset of) VSGL spec. Args: spec: Model definition similar to tesseract as follows: ============ FUNCTIONAL OPS ============ C[T](s|t|r|l|rl|m)[{name}]<y>,<x>,<d>[,<y_stride>,<x_stride>][,<y_dilation>,<x_dilation>] Convolves using a y,x window, with no shrinkage, SAME infill, d outputs, with s|t|r|l|m non-linear layer, T for transposed convolution. (s|t|r|l|m) specifies the type of non-linearity: s = sigmoid t = tanh r = relu lr = leaky relu l = linear (i.e., None) m = softmax L(f|r|b)(x|y)[s][{name}]<n> LSTM cell with n outputs. f runs the LSTM forward only. r runs the LSTM reversed only. b runs the LSTM bidirectionally. x runs the LSTM in the x-dimension (on data with or without the y-dimension). y runs the LSTM in the y-dimension (data must have a y dimension). s (optional) summarizes the output in the requested dimension, outputting only the final step, collapsing the dimension to a single element. Examples: Lfx128 runs a forward-only LSTM in the x-dimension with 128 outputs, treating any y dimension independently. Lfys64 runs a forward-only LSTM in the y-dimension with 64 outputs and collapses the y-dimension to 1 element. Do[{name}][<p>,<d>] Insert a dropout layer operating in <d> dimensions with probability <p>. Defaults to 1D with 0.5 probability. Gn[{name}]<n> A group normalization layer with n groups ============ PLUMBING OPS ============ [...] Execute ... networks in series (layers). (...) Execute ... networks in parallel. I[{name}] Identity function to build residual connections in parallel layers. Mp[{name}]<y>,<x>[<y_stride>,<x_stride>] Maxpool the input, reducing the (y,x) rectangle to a single vector value. S[{name}]<d>(<a>x<b>)<e>,<f> Splits one dimension, moves one part to another dimension. """ self.spec = spec self.named_spec = [] # type: List[str] self.ops = [self.build_addition, self.build_identity, self.build_rnn, self.build_dropout, self.build_maxpool, self.build_conv, self.build_output, self.build_reshape, self.build_wav2vec2, self.build_groupnorm, self.build_series, self.build_parallel] self.codec = None # type: Optional[PytorchCodec] self.criterion = None # type: Any self.nn = layers.MultiParamSequential() self.user_metadata = {'accuracy': [], 'metrics': [], 'seg_type': None, 'one_channel_mode': None, 'model_type': None, 'hyper_params': {}} # type: dict[str, Any] self._aux_layers = nn.ModuleDict() self.idx = -1 spec = spec.strip() if spec[0] != '[' or spec[-1] != ']': raise ValueError('Non-sequential models not supported') spec = spec[1:-1] blocks = spec.split(' ') self.named_spec.append(blocks[0]) pattern = re.compile(r'(\d+),(\d+),(\d+),(\d+)') m = pattern.match(blocks.pop(0)) if not m: raise ValueError('Invalid input spec.') batch, height, width, channels = [int(x) for x in m.groups()] self.input = (batch, channels, height, width) named_spec, self.nn, self.output = self._parse(self.input, blocks) self.named_spec.extend(str(x) for x in named_spec) self.init_weights() def _parse(self, input: Tuple[int, int, int, int], blocks: Sequence[str], parallel=False, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> None: """ Parses VGSL spec and appends layers to nn """ logger.debug('layer\t\ttype\tparams') named_spec = [] if not parallel: nn = layers.MultiParamSequential() else: nn = layers.MultiParamParallel() prev_oshape = None channels = 0 idx = 0 while idx < len(blocks): oshape = None layer = None for op in self.ops: oshape, name, layer = op(input, blocks, idx, target_output_shape=target_output_shape if parallel or idx == len(blocks) - 1 else None) if oshape: break if oshape: if not parallel: input = oshape else: if prev_oshape and prev_oshape[2:] != oshape[2:]: raise ValueError('Output shape in parallel block not equal!') else: prev_oshape = oshape target_output_shape = oshape channels += oshape[1] named_spec.extend(name) # type: ignore idx += len(name) nn.add_module(' '.join(n.name for n in name), layer) else: raise ValueError('{} invalid layer definition'.format(blocks[idx])) if parallel: return named_spec, nn, (oshape[0], channels, *oshape[2:]) else: return named_spec, nn, oshape def append(self, idx: int, spec: str) -> None: """ Splits a model at layer `idx` and append layers `spec`. New layers are initialized using the init_weights method. Args: idx (int): Index of layer to append spec to starting with 1. To select the whole layer stack set idx to None. spec (str): VGSL spec without input block to append to model. """ self.nn = self.nn[:idx] self.idx = idx-1 spec = spec[1:-1] blocks = spec.split(' ') self.named_spec = self.named_spec[:idx+1] named_spec, nn, self.output = self._parse(self.nn[-1].output_shape, blocks) self.named_spec.extend(str(x) for x in named_spec) for module in nn.named_children(): self.nn.add_module(*module) self.spec = '[' + ' '.join(self.named_spec) + ']' self.init_weights(slice(idx, -1)) def to(self, device: Union[str, torch.device]) -> None: self.nn = self.nn.to(device) if self.criterion: self.criterion = self.criterion.to(device) def eval(self) -> None: """ Sets the model to evaluation/inference mode, disabling dropout and gradient calculation. """ self.nn.eval() torch.set_grad_enabled(False) def train(self) -> None: """ Sets the model to training mode (enables dropout layers and disables softmax on CTC layers). """ self.nn.train() # set last layer back to eval mode if not CTC output layer # (log_softmax/softmax switch). if not self.criterion: self.nn[-1].eval() torch.set_grad_enabled(True) def set_num_threads(self, num: int) -> None: """ Sets number of OpenMP threads to use. """ torch.set_num_threads(num) @classmethod def load_model(cls, path: Union[str, PathLike]): """ Deserializes a VGSL model from a CoreML file. Args: path: CoreML file Returns: A TorchVGSLModel instance. Raises: KrakenInvalidModelException if the model data is invalid (not a string, protobuf file, or without appropriate metadata). FileNotFoundError if the path doesn't point to a file. """ if isinstance(path, PathLike): path = path.as_posix() try: mlmodel = MLModel(path) except TypeError as e: raise KrakenInvalidModelException(str(e)) from e except DecodeError as e: raise KrakenInvalidModelException('Failure parsing model protobuf: {}'.format(str(e))) from e if 'vgsl' not in mlmodel.user_defined_metadata: raise KrakenInvalidModelException('No VGSL spec in model metadata') vgsl_spec = mlmodel.user_defined_metadata['vgsl'] nn = cls(vgsl_spec) def _deserialize_layers(name, layer): logger.debug(f'Deserializing layer {name} with type {type(layer)}') if type(layer) in (layers.MultiParamParallel, layers.MultiParamSequential): for name, l in layer.named_children(): _deserialize_layers(name, l) else: layer.deserialize(name, mlmodel.get_spec()) try: _deserialize_layers('', nn.nn) except Exception as exc: raise KrakenInvalidModelException('Failed parsing out layers from model weights') from exc if 'aux_layers' in mlmodel.user_defined_metadata: logger.info('Deserializing auxiliary layers.') nn.aux_layers = {k: cls(v).nn.get_submodule(k) for k, v in json.loads(mlmodel.user_defined_metadata['aux_layers']).items()} if 'codec' in mlmodel.user_defined_metadata: nn.add_codec(PytorchCodec(json.loads(mlmodel.user_defined_metadata['codec']))) nn.user_metadata = {'accuracy': [], 'metrics': [], 'seg_type': 'bbox', 'one_channel_mode': '1', 'model_type': None, 'hyper_params': {}} # type: dict[str, str] if 'kraken_meta' in mlmodel.user_defined_metadata: nn.user_metadata.update(json.loads(mlmodel.user_defined_metadata['kraken_meta'])) return nn @property def one_channel_mode(self): return self.user_metadata['one_channel_mode'] @one_channel_mode.setter def one_channel_mode(self, val: str): if val not in ['1', 'L', None]: raise ValueError('one_channel_mode {} is not one of [1, L, None]'.format(val)) self.user_metadata['one_channel_mode'] = val @property def model_type(self): return self.user_metadata['model_type'] @model_type.setter def model_type(self, val: str): if val not in ['recognition', 'segmentation']: raise ValueError('model_type {} is not one of [recognition, segmentation]'.format(val)) self.user_metadata['model_type'] = val @property def seg_type(self): return self.user_metadata['seg_type'] @seg_type.setter def seg_type(self, val: str): if val not in ['bbox', 'baselines', None]: raise ValueError('segmentation type {} is not one of [bbox, baselines, None]'.format(val)) self.user_metadata['seg_type'] = val @property def hyper_params(self, **kwargs): return self.user_metadata['hyper_params'] @hyper_params.setter def hyper_params(self, val: Dict[str, Any]): self.user_metadata['hyper_params'].update(val) @property def aux_layers(self, **kwargs): return self._aux_layers @aux_layers.setter def aux_layers(self, val: Dict[str, torch.nn.Module]): self._aux_layers.update(val) def save_model(self, path: str): """ Serializes the model into path. Args: path: Target destination """ inputs = [('input', datatypes.Array(*self.input))] outputs = [('output', datatypes.Array(*self.output))] net_builder = NeuralNetworkBuilder(inputs, outputs) input = 'input' prev_device = next(self.nn.parameters()).device try: self.nn.to('cpu') def _serialize_layer(net, input, net_builder): for name, l in net.named_children(): logger.debug(f'Serializing layer {name} with type {type(l)}') if type(l) in (layers.MultiParamParallel, layers.MultiParamSequential): _serialize_layer(l, input, net_builder) else: l.serialize(name, input, net_builder) _serialize_layer(self.nn, input, net_builder) if self.aux_layers: prev_aux_device = next(self.aux_layers.parameters()).device try: logger.debug(f'Serializing {len(self.aux_layers)} auxiliary layers') self.aux_layers.to('cpu') _serialize_layer(self.aux_layers, input, net_builder) finally: self.aux_layers.to(prev_aux_device) mlmodel = MLModel(net_builder.spec) mlmodel.short_description = 'kraken model' mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']' if self.codec: mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l) if self.user_metadata: mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata) if self.aux_layers: mlmodel.user_defined_metadata['aux_layers'] = json.dumps({k: v.get_spec(k) for k, v in self.aux_layers.items()}) mlmodel.save(path) finally: self.nn.to(prev_device) def add_codec(self, codec: PytorchCodec) -> None: """ Adds a PytorchCodec to the model. """ self.codec = codec def init_weights(self, idx: slice = slice(0, None)) -> None: """ Initializes weights for all or a subset of layers in the graph. LSTM/GRU layers are orthogonally initialized, convolutional layers uniformly from (-0.1,0.1). Args: idx (slice): A slice object representing the indices of layers to initialize. """ def _wi(m): if isinstance(m, torch.nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) torch.nn.init.constant_(m.bias.data, 0) elif isinstance(m, torch.nn.LSTM): for p in m.parameters(): # weights if p.data.dim() == 2: torch.nn.init.orthogonal_(p.data) # initialize biases to 1 (jozefowicz 2015) else: torch.nn.init.constant_(p.data[len(p)//4:len(p)//2], 1.0) elif isinstance(m, torch.nn.GRU): for p in m.parameters(): torch.nn.init.orthogonal_(p.data) elif isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d): for p in m.parameters(): torch.nn.init.uniform_(p.data, -0.1, 0.1) self.nn[idx].apply(_wi) def resize_output(self, output_size: int, del_indices: Optional[Iterable] = None) -> None: """ Resizes an output layer. Args: output_size (int): New size/output channels of last layer del_indices (list): list of outputs to delete from layer """ if type(self.nn[-1]) not in [layers.ActConv2D, layers.LinSoftmax]: raise ValueError('last layer is neither linear nor convolutional layer') logger.debug('Resizing output layer to {}'.format(output_size)) self.nn[-1].resize(output_size, del_indices) pattern = re.compile(r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)') m = pattern.match(self.named_spec[-1]) if not m: raise ValueError('Output specification is not parsable') aug = m.group('aug') if m.group('aug') else '' self.named_spec[-1] = 'O{}{}{}{}{}'.format(m.group('name'), m.group('dim'), m.group('type'), aug, output_size) self.spec = '[' + ' '.join(self.named_spec) + ']' def build_rnn(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds an LSTM/GRU layer returning number of outputs and layer. """ pattern = re.compile(r'(?P<type>L|G)(?P<dir>f|r|b)(?P<dim>x|y)(?P<sum>s)?(?P<legacy>c|o)?(?P<name>{\w+})?(?P<out>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None type = m.group('type') direction = m.group('dir') dim = m.group('dim') == 'y' summarize = m.group('sum') == 's' legacy = None if m.group('legacy') == 'c': legacy = 'clstm' elif m.group('legacy') == 'o': legacy = 'ocropy' hidden = int(m.group(7)) fn = layers.TransposedSummarizingRNN(input[1], hidden, direction, dim, summarize, legacy) self.idx += 1 logger.debug(f'{self.idx}\t\trnn\tdirection {direction} transposed {dim} ' f'summarize {summarize} out {hidden} legacy {legacy}') return fn.get_shape(input), [VGSLBlock(blocks[idx], type, m.group('name'), self.idx)], fn def build_dropout(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>Do)(?P<name>{\w+})?(?P<p>(\d+(\.\d*)?|\.\d+))?(,(?P<dim>\d+))?') m = pattern.match(blocks[idx]) if not m: return None, None, None prob = float(m.group('p')) if m.group('p') else 0.5 dim = int(m.group('dim')) if m.group('dim') else 1 fn = layers.Dropout(prob, dim) self.idx += 1 logger.debug('{}\t\tdropout\tprobability {} dims {}'.format(self.idx, prob, dim)) return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_addition(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>A)(?P<name>{\w+})?(?P<dim>\d+),(?P<chunk_size>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None dim_map = {0: 0, 1: 2, 2: 3, 3: 1} dim = int(m.group('dim')) chunk_size = int(m.group('chunk_size')) if dim > 3: raise ValueError(f'Invalid dimension {dim} in addition block') dim = dim_map[dim] fn = layers.Addition(dim=dim, chunk_size=chunk_size) self.idx += 1 logger.debug(f'{self.idx}\t\taddition dim: {dim} chunk: {chunk_size}') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_identity(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>I)(?P<name>{\w+})?') m = pattern.match(blocks[idx]) if not m: return None, None, None fn = layers.Identity() self.idx += 1 logger.debug(f'{self.idx}\t\tidentity') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_groupnorm(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>Gn)(?P<name>{\w+})?(?P<groups>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None groups = int(m.group('groups')) fn = layers.GroupNorm(input[1], groups) self.idx += 1 logger.debug('{}\t\tgroupnorm\tgroups {}'.format(self.idx, groups)) return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_wav2vec2(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a Wav2Vec2 masking layer. """ pattern = re.compile(r'(?P<type>W)(?P<name>{\w+})(?P<final_dim>\d+),(?P<mask_width>\d+),(?P<mask_prob>(\d+(\.\d*)?|\.\d+)),(?P<num_negatives>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None final_dim = int(m.group('final_dim')) mask_width = int(m.group('mask_width')) mask_prob = float(m.group('mask_prob')) num_negatives = int(m.group('num_negatives')) from kraken.lib import pretrain fn = pretrain.layers.Wav2Vec2Mask(input[1], final_dim, mask_width, mask_prob, num_negatives) self.idx += 1 logger.debug(f'{self.idx}\t\twav2vec2\tmask width {mask_width}, prob ' f'{mask_prob}, negative samples {num_negatives}') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_conv(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a 2D convolution layer. """ pattern = re.compile(r'(?P<type>C)(?P<trans>T)?(?P<nl>s|t|r|l|lr|m)(?P<name>{\w+})?(\d+),' r'(\d+),(?P<out>\d+)(,(?P<stride_y>\d+),(?P<stride_x>\d+))?(,(?P<dilation_y>\d+),(?P<dilation_x>\d+))?') m = pattern.match(blocks[idx]) if not m: return None, None, None transposed = m.group('trans') is not None kernel_size = (int(m.group(5)), int(m.group(6))) filters = int(m.group('out')) stride = (int(m.group('stride_y')), int(m.group('stride_x'))) if m.group('stride_x') else (1, 1) dilation = (int(m.group('dilation_y')), int(m.group('dilation_x'))) if m.group('dilation_x') else (1, 1) nl = m.group('nl') fn = layers.ActConv2D(input[1], filters, kernel_size, stride, nl, dilation, transposed) self.idx += 1 logger.debug(f'{self.idx}\t\t{"transposed " if transposed else ""}conv\tkernel {kernel_size[0]} x {kernel_size[1]} ' f'filters {filters} stride {stride} dilation {dilation} activation {nl}') return fn.get_shape(input, target_output_shape), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_maxpool(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a maxpool layer. """ pattern = re.compile(r'(?P<type>Mp)(?P<name>{\w+})?(\d+),(\d+)(?:,(\d+),(\d+))?') m = pattern.match(blocks[idx]) if not m: return None, None, None kernel_size = (int(m.group(3)), int(m.group(4))) stride = (kernel_size[0] if not m.group(5) else int(m.group(5)), kernel_size[1] if not m.group(6) else int(m.group(6))) fn = layers.MaxPool(kernel_size, stride) self.idx += 1 logger.debug(f'{self.idx}\t\tmaxpool\tkernel {kernel_size[0]} x {kernel_size[1]} stride {stride[0]} x {stride[1]}') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_reshape(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a reshape layer """ pattern = re.compile(r'(?P<type>S)(?P<name>{\w+})?(?P<dim>\d+)\((?P<part_a>\d+)x' r'(?P<part_b>\d+)\)(?P<high>\d+),(?P<low>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None src_dim = int(m.group('dim')) part_a = int(m.group('part_a')) part_b = int(m.group('part_b')) high = int(m.group('high')) low = int(m.group('low')) dim_map = {0: 0, 1: 2, 2: 3, 3: 1} if part_a == 0: part_a = -1 elif part_b == 0: part_b = -1 if src_dim != high and src_dim != low: raise ValueError('Either high ({}) or low ({}) must be source dimension ({})'.format(high, low, src_dim)) if part_a == 0 or part_b == 0: raise ValueError('Expected non-zero size for part_a ({}) or part_b ({})'.format(part_a, part_b)) if part_a == -1 and part_b == -1: raise ValueError('Only one size may be -1') self.idx += 1 logger.debug('{}\t\treshape from {} {} x {} to {}/{}'.format(self.idx, src_dim, part_a, part_b, high, low)) src_dim = dim_map[src_dim] high = dim_map[high] low = dim_map[low] fn = layers.Reshape(src_dim, part_a, part_b, high, low) return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_output(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds an output layer. """ pattern = re.compile(r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None dim = int(m.group('dim')) nl = m.group('type') outdim = int(m.group('out')) if dim == 0: raise ValueError('categorical output not supported, yet.') if nl == 'c' and dim == 2: raise ValueError('CTC not supported for heatmap output') if nl in ['l', 's'] and int(m.group('out')) >= 1: self.criterion = nn.BCEWithLogitsLoss() elif nl == 'c': self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True) else: raise ValueError('unsupported output specification') # heatmap output if dim == 2: act = 's' if nl == 'l' else 'm' fn = layers.ActConv2D(input[1], outdim, (1, 1), (1, 1), act) self.idx += 1 logger.debug('{}\t\tconv\tkernel 1 x 1 filters {} stride 1 activation {}'.format(self.idx, outdim, nl)) return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn else: aug = True if m.group('aug') else False lin = layers.LinSoftmax(input[1], int(m.group('out')), aug) self.idx += 1 logger.debug('{}\t\tlinear\taugmented {} out {}'.format(self.idx, aug, m.group('out'))) return lin.get_shape(input), [VGSLBlock(blocks[idx], m.group(1), m.group('name'), self.idx)], lin def _bracket_count(self, block: str) -> int: rst = 0 for c in block: if c == "[": rst += 1 elif c != "(": break for c in block[::-1]: if c == "]": rst -= 1 elif c != ")": break return rst def _parenthesis_count(self, block: str) -> int: rst = 0 for c in block: if c == "(": rst += 1 elif c != "[": break for c in block[::-1]: if c == ")": rst -= 1 elif c != "]": break return rst def build_series(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a serial block of layers. """ if not blocks[idx] or blocks[idx][0] != '[': return None, None, None # single layer in serial block if blocks[idx][0] == '[' and blocks[idx][-1] == ']': named_spec, nn, oshape = self._parse(input, [blocks[idx][1:-1]]) named_spec[0]._block = '[' + named_spec[0]._block + ']' return oshape, named_spec, nn # multiple layers in serial block block_depth = 0 for bl_idx, block in enumerate(blocks[idx:]): block_depth += self._bracket_count(block) if block_depth == 0: break if block_depth: raise ValueError('Unbalanced parentheses in VGSL spec') named_spec, nn, oshape = self._parse(input, [blocks[idx][1:]] + blocks[idx+1:idx+bl_idx] + [blocks[idx+bl_idx][:-1]], target_output_shape=target_output_shape) named_spec[0]._block = '[' + named_spec[0]._block named_spec[-1]._block = named_spec[-1]._block + ']' return oshape, named_spec, nn def build_parallel(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a block of parallel layers. """ if not blocks[idx] or blocks[idx][0] != '(': return None, None, None # single layer in parallel block if blocks[idx][0] == '(' and blocks[idx][-1] == ')': named_spec, nn, oshape = self._parse(input, [blocks[idx][1:-1]], parallel=True) named_spec[0]._block = '(' + named_spec[0]._block + ')' return oshape, named_spec, nn block_depth = 0 for bl_idx, block in enumerate(blocks[idx:]): block_depth += self._parenthesis_count(block) if block_depth == 0: break if block_depth: raise ValueError('Unbalanced parentheses in VGSL spec') named_spec, nn, oshape = self._parse(input, [blocks[idx][1:]] + blocks[idx+1:idx+bl_idx] + [blocks[idx+bl_idx][:-1]], parallel=True, target_output_shape=target_output_shape) named_spec[0]._block = '(' + named_spec[0]._block named_spec[-1]._block = named_spec[-1]._block + ')' return oshape, named_spec, nn
35,478
43.740227
181
py
kraken
kraken-main/kraken/lib/morph.py
""" Various add-ons to the SciPy morphology package """ import numpy as np from scipy.ndimage import label as _label from scipy.ndimage import distance_transform_edt from scipy.ndimage import find_objects as _find_objects from scipy.ndimage import filters def label(image: np.ndarray, **kw) -> np.ndarray: """ Redefine the scipy.ndimage.measurements.label function to work with a wider range of data types. The default function is inconsistent about the data types it accepts on different platforms. """ try: return _label(image, **kw) except Exception: pass types = ["int32", "uint32", "int64", "uint64", "int16", "uint16"] for t in types: try: return _label(np.array(image, dtype=t), **kw) except Exception: pass # let it raise the same exception as before return _label(image, **kw) def find_objects(image: np.ndarray, **kw) -> np.ndarray: """ Redefine the scipy.ndimage.measurements.find_objects function to work with a wider range of data types. The default function is inconsistent about the data types it accepts on different platforms. """ try: return _find_objects(image, **kw) except Exception: pass types = ["int32", "uint32", "int64", "uint64", "int16", "uint16"] for t in types: try: return _find_objects(np.array(image, dtype=t), **kw) except Exception: pass # let it raise the same exception as before return _find_objects(image, **kw) def r_dilation(image, size, origin=0): """Dilation with rectangular structuring element using maximum_filter""" return filters.maximum_filter(image, size, origin=origin) def r_erosion(image, size, origin=0): """Erosion with rectangular structuring element using maximum_filter""" return filters.minimum_filter(image, size, origin=origin) def rb_dilation(image, size, origin=0): """Binary dilation using linear filters.""" output = np.zeros(image.shape, 'f') filters.uniform_filter(image, size, output=output, origin=origin, mode='constant', cval=0) return np.array(output > 0, 'i') def rb_erosion(image, size, origin=0): """Binary erosion using linear filters.""" output = np.zeros(image.shape, 'f') filters.uniform_filter(image, size, output=output, origin=origin, mode='constant', cval=1) return np.array(output == 1, 'i') def rb_opening(image, size, origin=0): """Binary opening using linear filters.""" image = rb_erosion(image, size, origin=origin) return rb_dilation(image, size, origin=origin) def spread_labels(labels, maxdist=9999999): """Spread the given labels to the background""" distances, features = distance_transform_edt(labels == 0, return_distances=1, return_indices=1) indexes = features[0] * labels.shape[1] + features[1] spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape) spread *= (distances < maxdist) return spread def correspondences(labels1, labels2): """Given two labeled images, compute an array giving the correspondences between labels in the two images.""" q = 100000 combo = labels1 * q + labels2 result = np.unique(combo) result = np.array([result // q, result % q]) return result def propagate_labels(image, labels, conflict=0): """Given an image and a set of labels, apply the labels to all the regions in the image that overlap a label. Assign the value `conflict` to any labels that have a conflict.""" rlabels, _ = label(image) cors = correspondences(rlabels, labels) outputs = np.zeros(np.amax(rlabels) + 1, 'i') oops = -(1 << 30) for o, i in cors.T: if outputs[o] != 0: outputs[o] = oops else: outputs[o] = i outputs[outputs == oops] = conflict outputs[0] = 0 return outputs[rlabels] def select_regions(binary, f, min=0, nbest=100000): """Given a scoring function f over slice tuples (as returned by find_objects), keeps at most nbest regions whose scores is higher than min.""" labels, n = label(binary) objects = find_objects(labels) scores = [f(o) for o in objects] best = np.argsort(scores) keep = np.zeros(len(objects) + 1, 'i') if nbest > 0: for i in best[-nbest:]: if scores[i] <= min: continue keep[i+1] = 1 return keep[labels]
4,601
32.591241
79
py
kraken
kraken-main/kraken/lib/lineest.py
import warnings from PIL import Image import numpy as np from kraken.lib.util import pil2array, array2pil from scipy.ndimage import affine_transform, gaussian_filter, uniform_filter __all__ = ['CenterNormalizer', 'dewarp'] def scale_to_h(img, target_height, order=1, dtype=np.dtype('f'), cval=0): h, w = img.shape scale = target_height*1.0/h target_width = int(scale*w) with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) output = affine_transform(1.0*img, np.ones(2)/scale, order=order, output_shape=(target_height, target_width), mode='constant', cval=cval) output = np.array(output, dtype=dtype) return output class CenterNormalizer(object): def __init__(self, target_height=48, params=(4, 1.0, 0.3)): self.target_height = target_height self.range, self.smoothness, self.extra = params def setHeight(self, target_height): self.target_height = target_height def measure(self, line): h, w = line.shape # XXX: this filter is awfully slow smoothed = gaussian_filter(line, (h*0.5, h*self.smoothness), mode='constant') smoothed += 0.001*uniform_filter(smoothed, (h*0.5, w), mode='constant') self.shape = (h, w) a = np.argmax(smoothed, axis=0) a = gaussian_filter(a, h*self.extra) self.center = np.array(a, 'i') deltas = np.abs(np.arange(h)[:, np.newaxis]-self.center[np.newaxis, :]) self.mad = np.mean(deltas[line != 0]) self.r = int(1+self.range*self.mad) def dewarp(self, img, cval=0, dtype=np.dtype('f')): if img.shape != self.shape: raise Exception('Measured and dewarp image shapes different') h, w = img.shape padded = np.vstack([cval*np.ones((h, w)), img, cval*np.ones((h, w))]) center = self.center+h dewarped = [padded[center[i]-self.r:center[i]+self.r, i] for i in range(w)] dewarped = np.array(dewarped, dtype=dtype).T return dewarped def normalize(self, img, order=1, dtype=np.dtype('f'), cval=0): dewarped = self.dewarp(img, cval=cval, dtype=dtype) if dewarped.shape[0] == 0: dewarped = img scaled = scale_to_h(dewarped, self.target_height, order=order, dtype=dtype, cval=cval) return scaled def dewarp(normalizer: CenterNormalizer, im: Image.Image) -> Image.Image: """ Dewarps an image of a line using a kraken.lib.lineest.CenterNormalizer instance. Args: normalizer (kraken.lib.lineest.CenterNormalizer): A line normalizer instance im (PIL.Image.Image): Image to dewarp Returns: PIL.Image containing the dewarped image. """ line = pil2array(im) temp = np.amax(line)-line temp = temp*1.0/np.amax(temp) normalizer.measure(temp) line = normalizer.normalize(line, cval=np.amax(line)) return array2pil(line)
3,119
35.705882
79
py
kraken
kraken-main/kraken/lib/segmentation.py
# # Copyright 2019 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Processing for baseline segmenter output """ import PIL import logging import numpy as np import shapely.geometry as geom from collections import defaultdict from PIL import Image from scipy.ndimage import maximum_filter, binary_erosion from scipy.ndimage.morphology import distance_transform_cdt from scipy.spatial.distance import pdist, squareform from shapely.ops import nearest_points, unary_union from shapely.validation import explain_validity from skimage import draw, filters from skimage.graph import MCP_Connect from skimage.filters import apply_hysteresis_threshold, sobel from skimage.measure import approximate_polygon, subdivide_polygon, regionprops, label from skimage.morphology import skeletonize from skimage.transform import PiecewiseAffineTransform, SimilarityTransform, AffineTransform, warp from typing import List, Tuple, Union, Dict, Any, Sequence, Optional from kraken.lib import default_specs from kraken.lib.exceptions import KrakenInputException from scipy.signal import convolve2d from scipy.ndimage import gaussian_filter logger = logging.getLogger('kraken') __all__ = ['reading_order', 'denoising_hysteresis_thresh', 'vectorize_lines', 'calculate_polygonal_environment', 'polygonal_reading_order', 'scale_polygonal_lines', 'scale_regions', 'compute_polygon_section', 'extract_polygons'] def reading_order(lines: Sequence[Tuple[slice, slice]], text_direction: str = 'lr') -> np.ndarray: """Given the list of lines (a list of 2D slices), computes the partial reading order. The output is a binary 2D array such that order[i,j] is true if line i comes before line j in reading order.""" logger.info('Compute reading order on {} lines in {} direction'.format(len(lines), text_direction)) order = np.zeros((len(lines), len(lines)), 'B') def _x_overlaps(u, v): return u[1].start < v[1].stop and u[1].stop > v[1].start def _above(u, v): return u[0].start < v[0].start def _left_of(u, v): return u[1].stop < v[1].start def _separates(w, u, v): if w == u or w == v: return 0 if w[0].stop < min(u[0].start, v[0].start): return 0 if w[0].start > max(u[0].stop, v[0].stop): return 0 if w[1].start < u[1].stop and w[1].stop > v[1].start: return 1 return 0 if text_direction == 'rl': def horizontal_order(u, v): return not _left_of(u, v) else: horizontal_order = _left_of for i, u in enumerate(lines): for j, v in enumerate(lines): if _x_overlaps(u, v): if _above(u, v): order[i, j] = 1 else: if [w for w in lines if _separates(w, u, v)] == []: if horizontal_order(u, v): order[i, j] = 1 return order def topsort(order: np.ndarray) -> List[int]: """Given a binary array defining a partial order (o[i,j]==True means i<j), compute a topological sort. This is a quick and dirty implementation that works for up to a few thousand elements.""" logger.info('Perform topological sort on partially ordered lines') n = len(order) visited = np.zeros(n) L = [] def _visit(k): if visited[k]: return visited[k] = 1 a, = np.nonzero(np.ravel(order[:, k])) for line in a: _visit(line) L.append(k) for k in range(n): _visit(k) return L def denoising_hysteresis_thresh(im, low, high, sigma): im = gaussian_filter(im, sigma) return apply_hysteresis_threshold(im, low, high) def moore_neighborhood(current, backtrack): operations = np.array([[-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]) neighbors = (current + operations).astype(int) for i, point in enumerate(neighbors): if np.all(point == backtrack): # we return the sorted neighborhood return np.concatenate((neighbors[i:], neighbors[:i])) return 0 def boundary_tracing(region): """ Find coordinates of the region's boundary. The region must not have isolated points. Code copied from https://github.com/machine-shop/deepwings/blob/master/deepwings/method_features_extraction/image_processing.py#L185 Args: region: object obtained with skimage.measure.regionprops(). Returns: List of coordinates of pixels in the boundary. """ # creating the binary image coords = region.coords maxs = np.amax(coords, axis=0) binary = np.zeros((maxs[0] + 2, maxs[1] + 2)) x = coords[:, 1] y = coords[:, 0] binary[tuple([y, x])] = 1 # initialization # starting point is the most upper left point idx_start = 0 while True: # asserting that the starting point is not isolated start = [y[idx_start], x[idx_start]] focus_start = binary[start[0]-1:start[0]+2, start[1]-1:start[1]+2] if np.sum(focus_start) > 1: break idx_start += 1 # Determining backtrack pixel for the first element if (binary[start[0] + 1, start[1]] == 0 and binary[start[0]+1, start[1]-1] == 0): backtrack_start = [start[0]+1, start[1]] else: backtrack_start = [start[0], start[1] - 1] current = start backtrack = backtrack_start boundary = [] counter = 0 while True: neighbors_current = moore_neighborhood(current, backtrack) y = neighbors_current[:, 0] x = neighbors_current[:, 1] idx = np.argmax(binary[tuple([y, x])]) boundary.append(current) backtrack = neighbors_current[idx-1] current = neighbors_current[idx] counter += 1 if (np.all(current == start) and np.all(backtrack == backtrack_start)): break return np.array(boundary) def _extend_boundaries(baselines, bin_bl_map): # find baseline blob boundaries labelled = label(bin_bl_map) boundaries = [] for x in regionprops(labelled): try: # skip lines with very small bounding polygons if x.area < 6: logger.info(f'Skipping baseline extension for very small blob of area {x.area}') continue b = boundary_tracing(x) if len(b) > 3: boundaries.append(geom.Polygon(b).simplify(0.01).buffer(0)) except Exception as e: logger.warning(f'Boundary tracing failed in baseline elongation: {e}') continue # extend lines to polygon boundary for bl in baselines: ls = geom.LineString(bl) try: boundary_pol = next(filter(lambda x: x.contains(ls), boundaries)) except Exception: continue # 'left' side if boundary_pol.contains(geom.Point(bl[0])): l_point = boundary_pol.boundary.intersection(geom.LineString([(bl[0][0]-10*(bl[1][0]-bl[0][0]), bl[0][1]-10*(bl[1][1]-bl[0][1])), bl[0]])) if l_point.geom_type != 'Point': bl[0] = np.array(nearest_points(geom.Point(bl[0]), boundary_pol)[1].coords[0], 'int').tolist() else: bl[0] = np.array(l_point.coords[0], 'int').tolist() # 'right' side if boundary_pol.contains(geom.Point(bl[-1])): r_point = boundary_pol.boundary.intersection(geom.LineString([(bl[-1][0]-10*(bl[-2][0]-bl[-1][0]), bl[-1][1]-10*(bl[-2][1]-bl[-1][1])), bl[-1]])) if r_point.geom_type != 'Point': bl[-1] = np.array(nearest_points(geom.Point(bl[-1]), boundary_pol)[1].coords[0], 'int').tolist() else: bl[-1] = np.array(r_point.coords[0], 'int').tolist() return baselines class LineMCP(MCP_Connect): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.connections = dict() self.scores = defaultdict(lambda: np.inf) def create_connection(self, id1, id2, pos1, pos2, cost1, cost2): k = (min(id1, id2), max(id1, id2)) s = cost1 + cost2 if self.scores[k] > s: self.connections[k] = (pos1, pos2, s) self.scores[k] = s def get_connections(self): results = [] for k, (pos1, pos2, s) in self.connections.items(): results.append(np.concatenate([self.traceback(pos1), self.traceback(pos2)[::-1]])) return results def goal_reached(self, int_index, float_cumcost): return 2 if float_cumcost else 0 def vectorize_lines(im: np.ndarray, threshold: float = 0.17, min_length=5, text_direction: str = 'horizontal'): """ Vectorizes lines from a binarized array. Args: im (np.ndarray): Array of shape (3, H, W) with the first dimension being probabilities for (start_separators, end_separators, baseline). threshold (float): Threshold for baseline blob detection. min_length (int): Minimal length of output baselines. text_direction (str): Base orientation of the text line (horizontal or vertical). Returns: [[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ] A list of lists containing the points of all baseline polylines. """ if text_direction not in ['horizontal', 'vertical']: raise ValueError(f'Invalid text direction "{text_direction}"') # split into baseline and separator map st_map = im[0] end_map = im[1] bl_map = im[2] bl_map = filters.sato(bl_map, black_ridges=False, mode='constant') bin_bl_map = bl_map > threshold # skeletonize line_skel = skeletonize(bin_bl_map) # find end points kernel = np.array([[1, 1, 1], [1, 10, 1], [1, 1, 1]]) line_extrema = np.transpose(np.where((convolve2d(line_skel, kernel, mode='same') == 11) * line_skel)) mcp = LineMCP(~line_skel) try: mcp.find_costs(line_extrema) except ValueError: return [] lines = [approximate_polygon(line, 3).tolist() for line in mcp.get_connections()] # extend baselines to blob boundary lines = _extend_boundaries(lines, bin_bl_map) # orient lines f_st_map = maximum_filter(st_map, size=20) f_end_map = maximum_filter(end_map, size=20) oriented_lines = [] for bl in lines: l_end = tuple(bl[0]) r_end = tuple(bl[-1]) if f_st_map[l_end] - f_end_map[l_end] > 0.2 and f_st_map[r_end] - f_end_map[r_end] < -0.2: pass elif f_st_map[l_end] - f_end_map[l_end] < -0.2 and f_st_map[r_end] - f_end_map[r_end] > 0.2: bl = bl[::-1] else: if text_direction == 'horizontal': logger.debug('Insufficient marker confidences in output. Defaulting to horizontal upright line.') if bl[0][1] > bl[-1][1]: bl = bl[::-1] else: logger.debug('Insufficient marker confidences in output. Defaulting to top-to-bottom line.') if bl[0][0] > bl[-1][0]: bl = bl[::-1] if geom.LineString(bl).length >= min_length: oriented_lines.append([x[::-1] for x in bl]) return oriented_lines def vectorize_regions(im: np.ndarray, threshold: float = 0.5): """ Vectorizes lines from a binarized array. Args: im (np.ndarray): Array of shape (H, W) with the first dimension being a probability distribution over the region. threshold (float): Threshold for binarization Returns: [[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ] A list of lists containing the region polygons. """ bin = im > threshold labelled = label(bin) boundaries = [] for x in regionprops(labelled): boundary = boundary_tracing(x) if len(boundary) > 2: boundaries.append(geom.Polygon(boundary)) # merge regions that overlap boundaries = unary_union(boundaries) # simplify them afterwards if boundaries.geom_type == 'Polygon': boundaries = [boundaries.boundary.simplify(10)] else: boundaries = [x.boundary.simplify(10) for x in boundaries.geoms] return [np.array(x.coords, dtype=np.uint)[:, [1, 0]].tolist() for x in boundaries] def _rotate(image, angle, center, scale, cval=0): """ Rotate function taken mostly from scikit image. Main difference is that this one allows dimensional scaling and records the final translation to ensure no image content is lost. This is needed to rotate the seam back into the original image. """ rows, cols = image.shape[0], image.shape[1] tform1 = SimilarityTransform(translation=center) tform2 = SimilarityTransform(rotation=angle) tform3 = SimilarityTransform(translation=-center) tform4 = AffineTransform(scale=(1/scale, 1)) tform = tform4 + tform3 + tform2 + tform1 corners = np.array([ [0, 0], [0, rows - 1], [cols - 1, rows - 1], [cols - 1, 0] ]) corners = tform.inverse(corners) minc = corners[:, 0].min() minr = corners[:, 1].min() maxc = corners[:, 0].max() maxr = corners[:, 1].max() out_rows = maxr - minr + 1 out_cols = maxc - minc + 1 output_shape = np.around((out_rows, out_cols)) # fit output image in new shape translation = (minc, minr) tform5 = SimilarityTransform(translation=translation) tform = tform5 + tform tform.params[2] = (0, 0, 1) return tform, warp(image, tform, output_shape=output_shape, order=0, cval=cval, clip=False, preserve_range=True) def line_regions(line, regions): """ Filters a list of regions by line association. Args: line (list): Polyline representing the line. regions (list): List of region polygons Returns: A list of regions that contain the line mid-point. """ mid_point = geom.LineString(line).interpolate(0.5, normalized=True) reg_pols = [geom.Polygon(x) for x in regions] regs = [] for reg_idx, reg_pol in enumerate(reg_pols): if reg_pol.contains(mid_point): regs.append(regions[reg_idx]) return regs def _ray_intersect_boundaries(ray, direction, aabb): """ Simplified version of [0] for 2d and AABB anchored at (0,0). [0] http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms """ dir_fraction = np.empty(2, dtype=ray.dtype) dir_fraction[direction == 0.0] = np.inf dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0]) t1 = (-ray[0]) * dir_fraction[0] t2 = (aabb[0] - ray[0]) * dir_fraction[0] t3 = (-ray[1]) * dir_fraction[1] t4 = (aabb[1] - ray[1]) * dir_fraction[1] tmin = max(min(t1, t2), min(t3, t4)) tmax = min(max(t1, t2), max(t3, t4)) t = min(x for x in [tmin, tmax] if x >= 0) return ray + (direction * t) def _calc_seam(baseline, polygon, angle, im_feats, bias=150): """ Calculates seam between baseline and ROI boundary on one side. Adds a baseline-distance-weighted bias to the feature map, masks out the bounding polygon and rotates the line so it is roughly level. """ MASK_VAL = 99999 r, c = draw.polygon(polygon[:, 1], polygon[:, 0]) c_min, c_max = int(polygon[:, 0].min()), int(polygon[:, 0].max()) r_min, r_max = int(polygon[:, 1].min()), int(polygon[:, 1].max()) patch = im_feats[r_min:r_max+2, c_min:c_max+2].copy() # bias feature matrix by distance from baseline mask = np.ones_like(patch) for line_seg in zip(baseline[:-1] - (c_min, r_min), baseline[1:] - (c_min, r_min)): line_locs = draw.line(line_seg[0][1], line_seg[0][0], line_seg[1][1], line_seg[1][0]) mask[line_locs] = 0 dist_bias = distance_transform_cdt(mask) # absolute mask mask = np.ones_like(patch, dtype=bool) mask[r-r_min, c-c_min] = False # dilate mask to compensate for aliasing during rotation mask = binary_erosion(mask, border_value=True, iterations=2) # combine weights with features patch[mask] = MASK_VAL patch += (dist_bias*(np.mean(patch[patch != MASK_VAL])/bias)) extrema = baseline[(0, -1), :] - (c_min, r_min) # scale line image to max 600 pixel width scale = min(1.0, 600/(c_max-c_min)) tform, rotated_patch = _rotate(patch, angle, center=extrema[0], scale=scale, cval=MASK_VAL) # ensure to cut off padding after rotation x_offsets = np.sort(np.around(tform.inverse(extrema)[:, 0]).astype('int')) rotated_patch = rotated_patch[:, x_offsets[0]:x_offsets[1]+1] # infinity pad for seamcarve rotated_patch = np.pad(rotated_patch, ((1, 1), (0, 0)), mode='constant', constant_values=np.inf) r, c = rotated_patch.shape # fold into shape (c, r-2 3) A = np.lib.stride_tricks.as_strided(rotated_patch, (c, r-2, 3), (rotated_patch.strides[1], rotated_patch.strides[0], rotated_patch.strides[0])) B = rotated_patch[1:-1, 1:].swapaxes(0, 1) backtrack = np.zeros_like(B, dtype='int') T = np.empty((B.shape[1]), 'f') R = np.arange(-1, len(T)-1) for i in np.arange(c-1): A[i].min(1, T) backtrack[i] = A[i].argmin(1) + R B[i] += T # backtrack seam = [] j = np.argmin(rotated_patch[1:-1, -1]) for i in range(c-2, -2, -1): seam.append((i+x_offsets[0]+1, j)) j = backtrack[i, j] seam = np.array(seam)[::-1] seam_mean = seam[:, 1].mean() seam_std = seam[:, 1].std() seam[:, 1] = np.clip(seam[:, 1], seam_mean-seam_std, seam_mean+seam_std) # rotate back seam = tform(seam).astype('int') # filter out seam points in masked area of original patch/in padding seam = seam[seam.min(axis=1) >= 0, :] m = (seam < mask.shape[::-1]).T seam = seam[np.logical_and(m[0], m[1]), :] seam = seam[np.invert(mask[seam.T[1], seam.T[0]])] seam += (c_min, r_min) return seam def _extract_patch(env_up, env_bottom, baseline, offset_baseline, end_points, dir_vec, topline, offset, im_feats, bounds): """ Calculate a line image patch from a ROI and the original baseline. """ upper_polygon = np.concatenate((baseline, env_up[::-1])) bottom_polygon = np.concatenate((baseline, env_bottom[::-1])) upper_offset_polygon = np.concatenate((offset_baseline, env_up[::-1])) bottom_offset_polygon = np.concatenate((offset_baseline, env_bottom[::-1])) angle = np.arctan2(dir_vec[1], dir_vec[0]) roi_polygon = unary_union([geom.Polygon(upper_polygon), geom.Polygon(bottom_polygon)]) if topline: upper_seam = _calc_seam(baseline, upper_polygon, angle, im_feats) bottom_seam = _calc_seam(offset_baseline, bottom_offset_polygon, angle, im_feats) else: upper_seam = _calc_seam(offset_baseline, upper_offset_polygon, angle, im_feats) bottom_seam = _calc_seam(baseline, bottom_polygon, angle, im_feats) upper_seam = geom.LineString(upper_seam).simplify(5) bottom_seam = geom.LineString(bottom_seam).simplify(5) # ugly workaround against GEOM parallel_offset bug creating a # MultiLineString out of offset LineString if upper_seam.parallel_offset(offset//2, side='right').geom_type == 'MultiLineString' or offset == 0: upper_seam = np.array(upper_seam.coords, dtype=int) else: upper_seam = np.array(upper_seam.parallel_offset(offset//2, side='right').coords, dtype=int)[::-1] if bottom_seam.parallel_offset(offset//2, side='left').geom_type == 'MultiLineString' or offset == 0: bottom_seam = np.array(bottom_seam.coords, dtype=int) else: bottom_seam = np.array(bottom_seam.parallel_offset(offset//2, side='left').coords, dtype=int) # offsetting might produce bounds outside the image. Clip it to the image bounds. polygon = np.concatenate(([end_points[0]], upper_seam, [end_points[-1]], bottom_seam[::-1])) polygon = geom.Polygon(polygon) if not polygon.is_valid: polygon = np.concatenate(([end_points[-1]], upper_seam, [end_points[0]], bottom_seam)) polygon = geom.Polygon(polygon) if not polygon.is_valid: raise Exception(f'Invalid bounding polygon computed: {explain_validity(polygon)}') polygon = np.array(roi_polygon.intersection(polygon).boundary.coords, dtype=int) return polygon def _calc_roi(line, bounds, baselines, suppl_obj, p_dir): # interpolate baseline ls = geom.LineString(line) ip_line = [line[0]] dist = 10 while dist < ls.length: ip_line.append(np.array(ls.interpolate(dist).coords[0])) dist += 10 ip_line.append(line[-1]) ip_line = np.array(ip_line) upper_bounds_intersects = [] bottom_bounds_intersects = [] for point in ip_line: upper_bounds_intersects.append(_ray_intersect_boundaries(point, (p_dir*(-1, 1))[::-1], bounds+1).astype('int')) bottom_bounds_intersects.append(_ray_intersect_boundaries(point, (p_dir*(1, -1))[::-1], bounds+1).astype('int')) # build polygon between baseline and bbox intersects upper_polygon = geom.Polygon(ip_line.tolist() + upper_bounds_intersects) bottom_polygon = geom.Polygon(ip_line.tolist() + bottom_bounds_intersects) # select baselines at least partially in each polygon side_a = [geom.LineString(upper_bounds_intersects)] side_b = [geom.LineString(bottom_bounds_intersects)] for adj_line in baselines + suppl_obj: adj_line = geom.LineString(adj_line) if upper_polygon.intersects(adj_line): side_a.append(adj_line) elif bottom_polygon.intersects(adj_line): side_b.append(adj_line) side_a = unary_union(side_a).buffer(1).boundary side_b = unary_union(side_b).buffer(1).boundary def _find_closest_point(pt, intersects): spt = geom.Point(pt) if intersects.is_empty: raise Exception(f'No intersection with boundaries. Shapely intersection object: {intersects.wkt}') if intersects.geom_type == 'MultiPoint': return min([p for p in intersects.geoms], key=lambda x: spt.distance(x)) elif intersects.geom_type == 'Point': return intersects elif intersects.geom_type == 'GeometryCollection' and len(intersects.geoms) > 0: t = min([p for p in intersects.geoms], key=lambda x: spt.distance(x)) if t == 'Point': return t else: return nearest_points(spt, t)[1] else: raise Exception(f'No intersection with boundaries. Shapely intersection object: {intersects.wkt}') env_up = [] env_bottom = [] # find orthogonal (to linear regression) intersects with adjacent objects to complete roi for point, upper_bounds_intersect, bottom_bounds_intersect in zip(ip_line, upper_bounds_intersects, bottom_bounds_intersects): upper_limit = _find_closest_point(point, geom.LineString( [point, upper_bounds_intersect]).intersection(side_a)) bottom_limit = _find_closest_point(point, geom.LineString( [point, bottom_bounds_intersect]).intersection(side_b)) env_up.append(upper_limit.coords[0]) env_bottom.append(bottom_limit.coords[0]) env_up = np.array(env_up, dtype='uint') env_bottom = np.array(env_bottom, dtype='uint') return env_up, env_bottom def calculate_polygonal_environment(im: PIL.Image.Image = None, baselines: Sequence[Sequence[Tuple[int, int]]] = None, suppl_obj: Sequence[Sequence[Tuple[int, int]]] = None, im_feats: np.ndarray = None, scale: Tuple[int, int] = None, topline: bool = False, raise_on_error: bool = False): """ Given a list of baselines and an input image, calculates a polygonal environment around each baseline. Args: im (PIL.Image): grayscale input image (mode 'L') baselines (sequence): List of lists containing a single baseline per entry. suppl_obj (sequence): List of lists containing additional polylines that should be considered hard boundaries for polygonizaton purposes. Can be used to prevent polygonization into non-text areas such as illustrations or to compute the polygonization of a subset of the lines in an image. im_feats (numpy.array): An optional precomputed seamcarve energy map. Overrides data in `im`. The default map is `gaussian_filter(sobel(im), 2)`. scale (tuple): A 2-tuple (h, w) containing optional scale factors of the input. Values of 0 are used for aspect-preserving scaling. `None` skips input scaling. topline (bool): Switch to change default baseline location for offset calculation purposes. If set to False, baselines are assumed to be on the bottom of the text line and will be offset upwards, if set to True, baselines are on the top and will be offset downwards. If set to None, no offset will be applied. raise_on_error: Raises error instead of logging them when they are not-blocking Returns: List of lists of coordinates. If no polygonization could be compute for a baseline `None` is returned instead. """ if scale is not None and (scale[0] > 0 or scale[1] > 0): w, h = im.size oh, ow = scale if oh == 0: oh = int(h * ow/w) elif ow == 0: ow = int(w * oh/h) im = im.resize((ow, oh)) scale = np.array((ow/w, oh/h)) # rescale baselines baselines = [(np.array(bl) * scale).astype('int').tolist() for bl in baselines] # rescale suppl_obj if suppl_obj is not None: suppl_obj = [(np.array(bl) * scale).astype('int').tolist() for bl in suppl_obj] if im_feats is None: bounds = np.array(im.size, dtype=float) - 1 im = np.array(im.convert('L')) # compute image gradient im_feats = gaussian_filter(sobel(im), 0.5) else: bounds = np.array(im_feats.shape[::-1], dtype=float) - 1 polygons = [] if suppl_obj is None: suppl_obj = [] for idx, line in enumerate(baselines): try: end_points = (line[0], line[-1]) line = geom.LineString(line) offset = default_specs.SEGMENTATION_HYPER_PARAMS['line_width'] if topline is not None else 0 offset_line = line.parallel_offset(offset, side='left' if topline else 'right') line = np.array(line.coords, dtype=float) offset_line = np.array(offset_line.coords, dtype=float) # parallel_offset on the right reverses the coordinate order if not topline: offset_line = offset_line[::-1] # calculate magnitude-weighted average direction vector lengths = np.linalg.norm(np.diff(line.T), axis=0) p_dir = np.mean(np.diff(line.T) * lengths/lengths.sum(), axis=1) p_dir = (p_dir.T / np.sqrt(np.sum(p_dir**2, axis=-1))) env_up, env_bottom = _calc_roi(line, bounds, baselines[:idx] + baselines[idx+1:], suppl_obj, p_dir) polygons.append(_extract_patch(env_up, env_bottom, line.astype('int'), offset_line.astype('int'), end_points, p_dir, topline, offset, im_feats, bounds)) except Exception as e: if raise_on_error: raise logger.warning(f'Polygonizer failed on line {idx}: {e}') polygons.append(None) if scale is not None: polygons = [(np.array(pol)/scale).astype('uint').tolist() if pol is not None else None for pol in polygons] return polygons def polygonal_reading_order(lines: Sequence[Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]], text_direction: str = 'lr', regions: Optional[Sequence[List[Tuple[int, int]]]] = None) -> Sequence[Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]]: """ Given a list of baselines and regions, calculates the correct reading order and applies it to the input. Args: lines (Sequence): List of tuples containing the baseline and its polygonization. regions (Sequence): List of region polygons. text_direction (str): Set principal text direction for column ordering. Can be 'lr' or 'rl' Returns: A reordered input. """ bounds = [] if regions is not None: r = [geom.Polygon(reg) for reg in regions] else: r = [] region_lines = [[] for _ in range(len(r))] indizes = {} for line_idx, line in enumerate(lines): s_line = geom.LineString(line[1]) in_region = False for idx, reg in enumerate(r): if is_in_region(s_line, reg): region_lines[idx].append((line_idx, (slice(s_line.bounds[1], s_line.bounds[3]), slice(s_line.bounds[0], s_line.bounds[2])))) in_region = True break if not in_region: bounds.append((slice(s_line.bounds[1], s_line.bounds[3]), slice(s_line.bounds[0], s_line.bounds[2]))) indizes[line_idx] = ('line', line) # order everything in regions intra_region_order = [[] for _ in range(len(r))] for idx, reg in enumerate(r): if len(region_lines[idx]) > 0: order = reading_order([x[1] for x in region_lines[idx]], text_direction) lsort = topsort(order) intra_region_order[idx] = [region_lines[idx][i][0] for i in lsort] reg = reg.bounds bounds.append((slice(reg[1], reg[3]), slice(reg[0], reg[2]))) indizes[line_idx+idx+1] = ('region', idx) # order unassigned lines and regions order = reading_order(bounds, text_direction) lsort = topsort(order) sidz = sorted(indizes.keys()) lsort = [sidz[i] for i in lsort] ordered_lines = [] for i in lsort: if indizes[i][0] == 'line': ordered_lines.append(indizes[i][1]) else: ordered_lines.extend(lines[x] for x in intra_region_order[indizes[i][1]]) return ordered_lines def is_in_region(line, region) -> bool: """ Tests if a line is inside a region, i.e. if the mid point of the baseline is inside the region. Args: line (geom.LineString): line to test region (geom.Polygon): region to test against Returns: False if line is not inside region, True otherwise. """ l_obj = line.interpolate(0.5, normalized=True) return region.contains(l_obj) def scale_regions(regions: Sequence[Tuple[List[int], List[int]]], scale: Union[float, Tuple[float, float]]) -> Sequence[Tuple[List, List]]: """ Scales baselines/polygon coordinates by a certain factor. Args: lines (Sequence): List of tuples containing the baseline and it's polygonization. scale (float or tuple of floats): Scaling factor """ if isinstance(scale, float): scale = (scale, scale) scaled_regions = [] for region in regions: scaled_regions.append((np.array(region) * scale).astype('uint').tolist()) return scaled_regions def scale_polygonal_lines(lines: Sequence[Tuple[List, List]], scale: Union[float, Tuple[float, float]]) -> Sequence[Tuple[List, List]]: """ Scales baselines/polygon coordinates by a certain factor. Args: lines (Sequence): List of tuples containing the baseline and it's polygonization. scale (float or tuple of floats): Scaling factor """ if isinstance(scale, float): scale = (scale, scale) scaled_lines = [] for line in lines: bl, pl = line scaled_lines.append(((np.array(bl) * scale).astype('int').tolist(), (np.array(pl) * scale).astype('int').tolist())) return scaled_lines def _test_intersect(bp, uv, bs): """ Returns the intersection points of a ray with direction `uv` from `bp` with a polygon `bs`. """ u = bp - np.roll(bs, 2) v = bs - np.roll(bs, 2) points = [] for dir in ((1, -1), (-1, 1)): w = (uv * dir * (1, -1))[::-1] z = np.dot(v, w) t1 = np.cross(v, u) / (z + np.finfo(float).eps) t2 = np.dot(u, w) / (z + np.finfo(float).eps) t1 = t1[np.logical_and(t2 >= 0.0, t2 <= 1.0)] points.extend(bp + (t1[np.where(t1 >= 0)[0].min()] * (uv * dir))) return np.array(points) def compute_polygon_section(baseline: Sequence[Tuple[int, int]], boundary: Sequence[Tuple[int, int]], dist1: int, dist2: int) -> Tuple[Tuple[int, int]]: """ Given a baseline, polygonal boundary, and two points on the baseline return the rectangle formed by the orthogonal cuts on that baseline segment. The resulting polygon is not garantueed to have a non-zero area. The distance can be larger than the actual length of the baseline if the baseline endpoints are inside the bounding polygon. In that case the baseline will be extrapolated to the polygon edge. Args: baseline (list): A polyline ((x1, y1), ..., (xn, yn)) boundary (list): A bounding polygon around the baseline (same format as baseline). dist1 (int): Absolute distance along the baseline of the first point. dist2 (int): Absolute distance along the baseline of the second point. Returns: A sequence of polygon points. """ # find baseline segments the points are in if dist1 == 0: dist1 = np.finfo(float).eps if dist2 == 0: dist2 = np.finfo(float).eps boundary_pol = geom.Polygon(boundary) bl = np.array(baseline) # extend first/last segment of baseline if not on polygon boundary if boundary_pol.contains(geom.Point(bl[0])): logger.debug(f'Extending leftmost end of baseline {bl} to polygon boundary') l_point = boundary_pol.boundary.intersection(geom.LineString( [(bl[0][0]-10*(bl[1][0]-bl[0][0]), bl[0][1]-10*(bl[1][1]-bl[0][1])), bl[0]])) # intersection is incidental with boundary so take closest point instead if l_point.geom_type != 'Point': bl[0] = np.array(nearest_points(geom.Point(bl[0]), boundary_pol)[1].coords[0], 'int') else: bl[0] = np.array(l_point.coords[0], 'int') if boundary_pol.contains(geom.Point(bl[-1])): logger.debug(f'Extending rightmost end of baseline {bl} to polygon boundary') r_point = boundary_pol.boundary.intersection(geom.LineString( [(bl[-1][0]-10*(bl[-2][0]-bl[-1][0]), bl[-1][1]-10*(bl[-2][1]-bl[-1][1])), bl[-1]])) if r_point.geom_type != 'Point': bl[-1] = np.array(nearest_points(geom.Point(bl[-1]), boundary_pol)[1].coords[0], 'int') else: bl[-1] = np.array(r_point.coords[0], 'int') dist1 = min(geom.LineString(bl).length - np.finfo(float).eps, dist1) dist2 = min(geom.LineString(bl).length - np.finfo(float).eps, dist2) dists = np.cumsum(np.diag(np.roll(squareform(pdist(bl)), 1))) segs_idx = np.searchsorted(dists, [dist1, dist2]) segs = np.dstack((bl[segs_idx-1], bl[segs_idx])) # compute unit vector of segments (NOT orthogonal) norm_vec = (segs[..., 1] - segs[..., 0]) norm_vec_len = np.sqrt(np.sum(norm_vec**2, axis=1)) unit_vec = norm_vec / np.tile(norm_vec_len, (2, 1)).T # find point start/end point on segments seg_dists = (dist1, dist2) - dists[segs_idx-1] seg_points = segs[..., 0] + (seg_dists * unit_vec.T).T # get intersects bounds = np.array(boundary) try: points = [_test_intersect(point, uv[::-1], bounds).round() for point, uv in zip(seg_points, unit_vec)] except ValueError: logger.debug('No intercepts with polygon (possibly misshaped polygon)') return seg_points.astype('int').tolist() o = np.int_(points[0]).reshape(-1, 2).tolist() o.extend(np.int_(np.roll(points[1], 2)).reshape(-1, 2).tolist()) return tuple(o) def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image.Image: """ Yields the subimages of image im defined in the list of bounding polygons with baselines preserving order. Args: im: Input image bounds: A list of dicts in baseline:: {'type': 'baselines', 'lines': [{'baseline': [[x_0, y_0], ... [x_n, y_n]], 'boundary': [[x_0, y_0], ... [x_n, y_n]]}, ....] } or bounding box format:: {'boxes': [[x_0, y_0, x_1, y_1], ...], 'text_direction': 'horizontal-lr'} Yields: The extracted subimage """ if 'type' in bounds and bounds['type'] == 'baselines': # select proper interpolation scheme depending on shape if im.mode == '1': order = 0 im = im.convert('L') else: order = 1 im = np.array(im) for line in bounds['lines']: if line['boundary'] is None: raise KrakenInputException('No boundary given for line') pl = np.array(line['boundary']) baseline = np.array(line['baseline']) c_min, c_max = int(pl[:, 0].min()), int(pl[:, 0].max()) r_min, r_max = int(pl[:, 1].min()), int(pl[:, 1].max()) if (pl < 0).any() or (pl.max(axis=0)[::-1] >= im.shape[:2]).any(): raise KrakenInputException('Line polygon outside of image bounds') if (baseline < 0).any() or (baseline.max(axis=0)[::-1] >= im.shape[:2]).any(): raise KrakenInputException('Baseline outside of image bounds') # fast path for straight baselines requiring only rotation if len(baseline) == 2: baseline = baseline.astype(float) # calculate direction vector lengths = np.linalg.norm(np.diff(baseline.T), axis=0) p_dir = np.mean(np.diff(baseline.T) * lengths/lengths.sum(), axis=1) p_dir = (p_dir.T / np.sqrt(np.sum(p_dir**2, axis=-1))) angle = np.arctan2(p_dir[1], p_dir[0]) patch = im[r_min:r_max+1, c_min:c_max+1].copy() offset_polygon = pl - (c_min, r_min) r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0]) mask = np.zeros(patch.shape[:2], dtype=bool) mask[r, c] = True patch[mask != True] = 0 extrema = offset_polygon[(0, -1), :] # scale line image to max 600 pixel width tform, rotated_patch = _rotate(patch, angle, center=extrema[0], scale=1.0, cval=0) i = Image.fromarray(rotated_patch.astype('uint8')) # normal slow path with piecewise affine transformation else: if len(pl) > 50: pl = approximate_polygon(pl, 2) full_polygon = subdivide_polygon(pl, preserve_ends=True) pl = geom.MultiPoint(full_polygon) bl = zip(baseline[:-1:], baseline[1::]) bl = [geom.LineString(x) for x in bl] cum_lens = np.cumsum([0] + [line.length for line in bl]) # distance of intercept from start point and number of line segment control_pts = [] for point in pl.geoms: npoint = np.array(point.coords)[0] line_idx, dist, intercept = min(((idx, line.project(point), np.array(line.interpolate(line.project(point)).coords)) for idx, line in enumerate(bl)), key=lambda x: np.linalg.norm(npoint-x[2])) # absolute distance from start of line line_dist = cum_lens[line_idx] + dist intercept = np.array(intercept) # side of line the point is at side = np.linalg.det(np.array([[baseline[line_idx+1][0]-baseline[line_idx][0], npoint[0]-baseline[line_idx][0]], [baseline[line_idx+1][1]-baseline[line_idx][1], npoint[1]-baseline[line_idx][1]]])) side = np.sign(side) # signed perpendicular distance from the rectified distance per_dist = side * np.linalg.norm(npoint-intercept) control_pts.append((line_dist, per_dist)) # calculate baseline destination points bl_dst_pts = baseline[0] + np.dstack((cum_lens, np.zeros_like(cum_lens)))[0] # calculate bounding polygon destination points pol_dst_pts = np.array([baseline[0] + (line_dist, per_dist) for line_dist, per_dist in control_pts]) # extract bounding box patch c_dst_min, c_dst_max = int(pol_dst_pts[:, 0].min()), int(pol_dst_pts[:, 0].max()) r_dst_min, r_dst_max = int(pol_dst_pts[:, 1].min()), int(pol_dst_pts[:, 1].max()) output_shape = np.around((r_dst_max - r_dst_min + 1, c_dst_max - c_dst_min + 1)) patch = im[r_min:r_max+1, c_min:c_max+1].copy() # offset src points by patch shape offset_polygon = full_polygon - (c_min, r_min) offset_baseline = baseline - (c_min, r_min) # offset dst point by dst polygon shape offset_bl_dst_pts = bl_dst_pts - (c_dst_min, r_dst_min) offset_pol_dst_pts = pol_dst_pts - (c_dst_min, r_dst_min) # mask out points outside bounding polygon mask = np.zeros(patch.shape[:2], dtype=bool) r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0]) mask[r, c] = True patch[mask != True] = 0 # estimate piecewise transform src_points = np.concatenate((offset_baseline, offset_polygon)) dst_points = np.concatenate((offset_bl_dst_pts, offset_pol_dst_pts)) tform = PiecewiseAffineTransform() tform.estimate(src_points, dst_points) o = warp(patch, tform.inverse, output_shape=output_shape, preserve_range=True, order=order) i = Image.fromarray(o.astype('uint8')) yield i.crop(i.getbbox()), line else: if bounds['text_direction'].startswith('vertical'): angle = 90 else: angle = 0 for box in bounds['boxes']: if isinstance(box, tuple): box = list(box) if (box < [0, 0, 0, 0] or box[::2] >= [im.size[0], im.size[0]] or box[1::2] >= [im.size[1], im.size[1]]): logger.error('bbox {} is outside of image bounds {}'.format(box, im.size)) raise KrakenInputException('Line outside of image bounds') yield im.crop(box).rotate(angle, expand=True), box
45,374
41.052827
152
py
kraken
kraken-main/kraken/lib/log.py
# # Copyright 2018 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ kraken.lib.log ~~~~~~~~~~~~~~~~~ Handlers and formatters for logging. """ import logging from rich.logging import RichHandler def set_logger(logger=None, level=logging.ERROR): logger.addHandler(RichHandler(rich_tracebacks=True)) logger.setLevel(level)
848
28.275862
69
py
kraken
kraken-main/kraken/lib/arrow_dataset.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Arrow IPC format dataset builders. """ __all__ = ['build_binary_dataset'] import io import json import numpy as np import pyarrow as pa import tempfile from PIL import Image, UnidentifiedImageError from functools import partial from collections import Counter from typing import Optional, List, Union, Callable, Tuple, Dict from multiprocessing import Pool from kraken.lib import functional_im_transforms as F_t from kraken.lib.segmentation import extract_polygons from kraken.lib.xml import parse_xml, parse_alto, parse_page from kraken.lib.util import is_bitonal, make_printable from kraken.lib.exceptions import KrakenInputException from os import extsep, PathLike import logging logger = logging.getLogger(__name__) def _extract_line(xml_record, skip_empty_lines: bool = True): lines = [] try: im = Image.open(xml_record['image']) except (FileNotFoundError, UnidentifiedImageError): return lines, None, None if is_bitonal(im): im = im.convert('1') seg_key = 'lines' if 'lines' in xml_record else 'boxes' recs = xml_record.pop(seg_key) for idx, rec in enumerate(recs): try: line_im, line = next(extract_polygons(im, {**xml_record, seg_key: [rec]})) except KrakenInputException as e: logger.warning(f'Invalid line {idx} in {im.filename}: {e}') continue except Exception as e: logger.warning(f'Unexpected exception {e} from line {idx} in {im.filename}') continue if not line['text'] and skip_empty_lines: continue fp = io.BytesIO() line_im.save(fp, format='png') lines.append({'text': line['text'], 'im': fp.getvalue()}) return lines, im.mode def _extract_path_line(xml_record, skip_empty_lines: bool = True): try: im = Image.open(xml_record['image']) except (FileNotFoundError, UnidentifiedImageError): return [], None, None if not xml_record['lines'][0]['text'] and skip_empty_lines: return [], None, None if is_bitonal(im): im = im.convert('1') fp = io.BytesIO() im.save(fp, format='png') line = {'text': xml_record['lines'][0]['text'], 'im': fp.getvalue()} return [line], im.mode def parse_path(path: Union[str, PathLike], suffix: str = '.gt.txt', split=F_t.default_split, skip_empty_lines: bool = True): with open(F_t.suffix_split(path, split=split, suffix=suffix), 'r', encoding='utf-8') as fp: gt = fp.read().strip('\n\r') if not gt and skip_empty_lines: raise KrakenInputException(f'No text for ground truth line {path}.') return {'image': path, 'lines': [{'text': gt}]} def build_binary_dataset(files: Optional[List[Union[str, PathLike, Dict]]] = None, output_file: Union[str, PathLike] = None, format_type: str = 'xml', num_workers: int = 0, ignore_splits: bool = False, random_split: Optional[Tuple[float, float, float]] = None, force_type: Optional[str] = None, recordbatch_size: int = 100, skip_empty_lines: bool = True, callback: Callable[[int, int], None] = lambda chunk, lines: None) -> None: """ Parses XML files and dumps the baseline-style line images and text into a binary dataset. Args: files: List of XML input files. output_file: Path to the output file. format_type: One of `xml`, `alto`, `page`, `path`, or None. In `None` mode, the files argument is expected to be a list of dictionaries in the output format of the `kraken.lib.xml.parse_{alto,page,xml}` functions. num_workers: Number of workers for parallelized extraction of line images. Set to `0` to disable parallelism. ignore_splits: Switch to disable serialization of the explicit train/validation/test splits contained in the source files. random_split: Serializes a random split into the dataset with the proportions (train, val, test). force_type: Forces a dataset type. Can be `kraken_recognition_baseline` or `kraken_recognition_bbox`. recordbatch_size: Minimum number of records per RecordBatch written to the output file. Larger batches require more transient memory but slightly improve reading performance. skip_empty_lines: Do not compile empty text lines into the dataset. callback: Function called every time a new recordbatch is flushed into the Arrow IPC file. """ logger.info('Parsing XML files') extract_fn = partial(_extract_line, skip_empty_lines=skip_empty_lines) parse_fn = None if format_type == 'xml': parse_fn = parse_xml elif format_type == 'alto': parse_fn = parse_alto elif format_type == 'page': parse_fn = parse_page elif format_type == 'path': if not ignore_splits: logger.warning('ignore_splits is False and format_type is path. Will not serialize splits.') parse_fn = partial(parse_path, skip_empty_lines=skip_empty_lines) extract_fn = partial(_extract_path_line, skip_empty_lines=skip_empty_lines) elif format_type is None: pass else: raise ValueError(f'invalid format {format_type} for parse_(xml,alto,page,path)') if force_type and force_type not in ['kraken_recognition_baseline', 'kraken_recognition_bbox']: raise ValueError(f'force_type set to invalid value {force_type}') docs = [] if parse_fn: for doc in files: try: data = parse_fn(doc) except (FileNotFoundError, KrakenInputException, ValueError): logger.warning(f'Invalid input file {doc}') continue try: name_ext = str(data['image']).split(extsep, 1) if name_ext[1] == 'gt.txt': data['image'] = name_ext[0] + '.png' with open(data['image'], 'rb') as fp: Image.open(fp) except (FileNotFoundError, UnidentifiedImageError) as e: logger.warning(f'Could not open file {e.filename} in {doc}') continue docs.append(data) logger.info(f'Parsed {len(docs)} files.') else: docs = files.copy() logger.info(f'Got {len(docs)} preparsed files.') logger.info('Assembling dataset alphabet.') alphabet = Counter() num_lines = 0 for doc in docs: for line in doc['lines']: num_lines += 1 alphabet.update(line['text']) callback(0, num_lines) for k, v in sorted(alphabet.items(), key=lambda x: x[1], reverse=True): char = make_printable(k) if char == k: char = '\t' + char logger.info(f'{char}\t{v}') if force_type: ds_type = force_type else: ds_type = 'kraken_recognition_baseline' if format_type != 'path' else 'kraken_recognition_bbox' metadata = {'lines': {'type': ds_type, 'alphabet': alphabet, 'text_type': 'raw', 'image_type': 'raw', 'splits': ['train', 'eval', 'test'], 'im_mode': '1', 'counts': Counter({'all': 0, 'train': 0, 'validation': 0, 'test': 0 } ), } } ty = pa.struct([('text', pa.string()), ('im', pa.binary())]) schema = pa.schema([('lines', ty), ('train', pa.bool_()), ('validation', pa.bool_()), ('test', pa.bool_())]) def _make_record_batch(line_cache): ar = pa.array(line_cache, type=ty) if random_split: indices = np.random.choice(4, len(line_cache), p=(0.0,) + random_split) else: indices = np.zeros(len(line_cache)) tr_ind = np.zeros(len(line_cache), dtype=bool) tr_ind[indices == 1] = True val_ind = np.zeros(len(line_cache), dtype=bool) val_ind[indices == 2] = True test_ind = np.zeros(len(line_cache), dtype=bool) test_ind[indices == 3] = True train_mask = pa.array(tr_ind) val_mask = pa.array(val_ind) test_mask = pa.array(test_ind) rbatch = pa.RecordBatch.from_arrays([ar, train_mask, val_mask, test_mask], schema=schema) return rbatch, (len(line_cache), int(sum(indices == 1)), int(sum(indices == 2)), int(sum(indices == 3))) line_cache = [] logger.info('Writing lines to temporary file.') with tempfile.TemporaryDirectory() as tmp_output_dir: tmp_file = tmp_output_dir + '/dataset.arrow' with pa.OSFile(tmp_file, 'wb') as sink: with pa.ipc.new_file(sink, schema) as writer: if num_workers and num_workers > 1: logger.info(f'Spinning up processing pool with {num_workers} workers.') with Pool(num_workers) as pool: for page_lines, im_mode in pool.imap_unordered(extract_fn, docs): if page_lines: line_cache.extend(page_lines) # comparison RGB(A) > L > 1 if im_mode > metadata['lines']['im_mode']: metadata['lines']['im_mode'] = im_mode if len(line_cache) >= recordbatch_size: logger.info(f'Flushing {len(line_cache)} lines into {tmp_file}.') rbatch, counts = _make_record_batch(line_cache) metadata['lines']['counts'].update({'all': counts[0], 'train': counts[1], 'validation': counts[2], 'test': counts[3]}) writer.write(rbatch) callback(len(line_cache), num_lines) line_cache = [] else: for page_lines, im_mode in map(extract_fn, docs): if page_lines: line_cache.extend(page_lines) # comparison RGB(A) > L > 1 if im_mode > metadata['lines']['im_mode']: metadata['lines']['im_mode'] = im_mode if len(line_cache) >= recordbatch_size: logger.info(f'Flushing {len(line_cache)} lines into {tmp_file}.') rbatch, counts = _make_record_batch(line_cache) metadata['lines']['counts'].update({'all': counts[0], 'train': counts[1], 'validation': counts[2], 'test': counts[3]}) writer.write(rbatch) callback(len(line_cache), num_lines) line_cache = [] if line_cache: logger.info(f'Flushing last {len(line_cache)} lines into {tmp_file}.') rbatch, counts = _make_record_batch(line_cache) metadata['lines']['counts'].update({'all': counts[0], 'train': counts[1], 'validation': counts[2], 'test': counts[3]}) writer.write(rbatch) callback(len(line_cache), num_lines) logger.info('Dataset metadata') logger.info(f"type: {metadata['lines']['type']}\n" f"text_type: {metadata['lines']['text_type']}\n" f"image_type: {metadata['lines']['image_type']}\n" f"splits: {metadata['lines']['splits']}\n" f"im_mode: {metadata['lines']['im_mode']}\n" f"lines: {metadata['lines']['counts']}\n") with pa.memory_map(tmp_file, 'rb') as source: logger.info(f'Rewriting output ({output_file}) to update metadata.') ds = pa.ipc.open_file(source).read_all() metadata['lines']['counts'] = dict(metadata['lines']['counts']) metadata['lines'] = json.dumps(metadata['lines']) schema = schema.with_metadata(metadata) with pa.OSFile(output_file, 'wb') as sink: with pa.ipc.new_file(sink, schema) as writer: writer.write(ds)
13,976
43.942122
112
py
kraken
kraken-main/kraken/lib/default_specs.py
# # Copyright 2020 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Default VGSL specs and hyperparameters """ SEGMENTATION_SPEC = '[1,1800,0,3 Cr7,7,64,2,2 Gn32 Cr3,3,128,2,2 Gn32 Cr3,3,128 Gn32 Cr3,3,256 Gn32 Cr3,3,256 Gn32 Lbx32 Lby32 Cr1,1,32 Gn32 Lby32 Lbx32]' # NOQA RECOGNITION_SPEC = '[1,120,0,1 Cr3,13,32 Do0.1,2 Mp2,2 Cr3,13,32 Do0.1,2 Mp2,2 Cr3,9,64 Do0.1,2 Mp2,2 Cr3,9,64 Do0.1,2 S1(1x0)1,3 Lbx200 Do0.1,2 Lbx200 Do0.1,2 Lbx200 Do]' # NOQA RECOGNITION_PRETRAIN_HYPER_PARAMS = {'pad': 16, 'freq': 1.0, 'batch_size': 64, 'quit': 'early', 'epochs': -1, 'min_epochs': 100, 'lag': 5, 'min_delta': None, 'optimizer': 'Adam', 'lrate': 1e-6, 'momentum': 0.9, 'weight_decay': 0.01, 'schedule': 'cosine', 'completed_epochs': 0, 'augment': False, # lr scheduler params # step/exp decay 'step_size': 10, 'gamma': 0.1, # reduce on plateau 'rop_factor': 0.1, 'rop_patience': 5, # cosine 'cos_t_max': 100, # masking parameters 'mask_width': 4, 'mask_prob': 0.5, 'num_negatives': 100, 'logit_temp': 0.1, 'warmup': 32000, } RECOGNITION_HYPER_PARAMS = {'pad': 16, 'freq': 1.0, 'batch_size': 1, 'quit': 'early', 'epochs': -1, 'min_epochs': 0, 'lag': 10, 'min_delta': None, 'optimizer': 'Adam', 'lrate': 1e-3, 'momentum': 0.9, 'weight_decay': 0.0, 'schedule': 'constant', 'normalization': None, 'normalize_whitespace': True, 'completed_epochs': 0, 'augment': False, # lr scheduler params # step/exp decay 'step_size': 10, 'gamma': 0.1, # reduce on plateau 'rop_factor': 0.1, 'rop_patience': 5, # cosine 'cos_t_max': 50, 'warmup': 0, 'freeze_backbone': 0, } SEGMENTATION_HYPER_PARAMS = {'line_width': 8, 'padding': (0, 0), 'freq': 1.0, 'quit': 'dumb', 'epochs': 50, 'min_epochs': 0, 'lag': 10, 'min_delta': None, 'optimizer': 'Adam', 'lrate': 2e-4, 'momentum': 0.9, 'weight_decay': 1e-5, 'schedule': 'constant', 'completed_epochs': 0, 'augment': False, # lr scheduler params # step/exp decay 'step_size': 10, 'gamma': 0.1, # reduce on plateau 'rop_factor': 0.1, 'rop_patience': 5, # cosine 'cos_t_max': 50, 'warmup': 0, }
5,152
45.845455
178
py
kraken
kraken-main/kraken/lib/layers.py
""" Layers for VGSL models """ import torch import numpy as np from typing import List, Tuple, Optional, Iterable from torch.nn import Module, Sequential from torch.nn import functional as F from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence from coremltools.proto import NeuralNetwork_pb2 # all tensors are ordered NCHW, the "feature" dimension is C, so the output of # an LSTM will be put into C same as the filters of a CNN. __all__ = ['Addition', 'MaxPool', 'Reshape', 'Dropout', 'TransposedSummarizingRNN', 'LinSoftmax', 'ActConv2D'] class MultiParamSequential(Sequential): """ Sequential variant accepting multiple arguments. """ def forward(self, *inputs, output_shape: Optional[Tuple[int, int]] = None): modules = self._modules.values() i = 0 for module in modules: if type(inputs) == tuple: inputs = module(*inputs, output_shape=output_shape if i == len(modules) - 1 else None) else: inputs = module(inputs, output_shape=output_shape if i == len(modules) - 1 else None) i += 1 return inputs class MultiParamParallel(Module): """ Parallel module. """ def forward(self, *inputs, output_shape: Optional[Tuple[int, int]] = None): outputs = [] seq_lens = None for module in self._modules.values(): if type(inputs) == tuple: output, seq_lens = module(*inputs, output_shape=output_shape) outputs.append(output) else: outputs.append(module(inputs, output_shape=output_shape)) if output_shape is None: output_shape = outputs[-1].shape[2:] return torch.cat(outputs, dim=1), seq_lens def PeepholeLSTMCell(input: torch.Tensor, hidden: Tuple[torch.Tensor, torch.Tensor], w_ih: torch.Tensor, w_hh: torch.Tensor, w_ip: torch.Tensor, w_fp: torch.Tensor, w_op: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ An LSTM cell with peephole connections without biases. Mostly ripped from the pytorch autograd lstm implementation. """ hx, cx = hidden gates = F.linear(input, w_ih) + F.linear(hx, w_hh) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) peep_i = w_ip.unsqueeze(0).expand_as(cx) * cx ingate = ingate + peep_i peep_f = w_fp.unsqueeze(0).expand_as(cx) * cx forgetgate = forgetgate + peep_f ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) cy = (forgetgate * cx) + (ingate * cellgate) peep_o = w_op.unsqueeze(0).expand_as(cy) * cy outgate = outgate + peep_o hy = outgate * F.tanh(cy) return hy, cy def StackedRNN(inners, num_layers: int, num_directions: int): num_directions = len(inners) total_layers = num_layers * num_directions def forward(input, hidden, weight): next_hidden = [] for i in range(num_layers): all_output = [] for j, inner in enumerate(inners): length = i * num_directions + j hy, output = inner(input, hidden[length], weight[length]) next_hidden.append(hy) all_output.append(output) input = torch.cat(all_output, input.dim() - 1) next_h, next_c = zip(*next_hidden) next_hidden = [ torch.cat(next_h, 0).view(total_layers, *next_h[0].size()), torch.cat(next_c, 0).view(total_layers, *next_c[0].size()) ] return next_hidden, input return forward def Recurrent(inner, reverse: bool = False): def forward(input, hidden, weight): output = [] steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0)) for i in steps: hidden = inner(input[i], hidden, *weight) # hack to handle LSTM output.append(hidden[0] if isinstance(hidden, tuple) else hidden) if reverse: output.reverse() output = torch.cat(output, 0).view(input.size(0), *output[0].size()) return hidden, output return forward class PeepholeBidiLSTM(Module): def __init__(self, input_size: int, hidden_size: int) -> None: super().__init__() self.input_size = input_size self.hidden_size = hidden_size self._all_weights = [] # type: List[List[str]] gate_size = 4 * hidden_size for direction in range(2): w_ih = torch.nn.Parameter(torch.Tensor(gate_size, input_size)) w_hh = torch.nn.Parameter(torch.Tensor(gate_size, hidden_size)) w_ci = torch.nn.Parameter(torch.Tensor(hidden_size)) w_cf = torch.nn.Parameter(torch.Tensor(hidden_size)) w_co = torch.nn.Parameter(torch.Tensor(hidden_size)) layer_params = (w_ih, w_hh, w_ci, w_cf, w_co) suffix = '_reverse' if direction == 1 else '' param_names = ['weight_ih_l0{}', 'weight_hh_l0{}', 'weight_ip_l0{}', 'weight_fp_l0{}', 'weight_op_l0{}'] param_names = [x.format(suffix) for x in param_names] for name, param in zip(param_names, layer_params): setattr(self, name, param) self._all_weights.append(param_names) def forward(self, input: torch.Tensor, output_shape: Optional[List[int]] = None) -> torch.Tensor: layer = (Recurrent(PeepholeLSTMCell), Recurrent(PeepholeLSTMCell, reverse=True)) func = StackedRNN(layer, 1, 2) input = input.transpose(0, 1) hidden = (torch.zeros(2, input.shape[1], self.hidden_size).to(input.device), torch.zeros(2, input.shape[1], self.hidden_size).to(input.device)) hidden, output = func(input, hidden, self.all_weights) output = output.transpose(0, 1) return output, hidden @property def all_weights(self): return [[getattr(self, weight) for weight in weights] for weights in self._all_weights] class Addition(Module): """ An addition module """ def __init__(self, dim: int, chunk_size: int) -> None: """ An addition module Shape: - Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H` height, and `W` width. - Outputs output :math:`(N, C, H, W)` """ self.dim = dim self.chunk_size = chunk_size super().__init__() def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[List[int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: out = inputs.unfold(self.dim, self.chunk_size, self.chunk_size) out = out.sum(self.dim, keepdim=True) out = out.transpose(-1, self.dim).squeeze(-1) return out, seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """ Calculates the output shape from input 4D tuple NCHW. """ input = list(input) input[self.dim] = self.chunk_size self.output_shape = tuple(input) return self.output_shape def deserialize(self, name, spec): """ Noop for deserialization """ pass def serialize(self, name, input, builder): params = NeuralNetwork_pb2.CustomLayerParams() params.className = 'addition' params.description = 'An addition layer' params.parameters['dim'].intValue = self.dim params.parameters['chunk_size'].intValue = self.chunk_size builder.add_custom(name, input_names=[input], output_names=[name], custom_proto_spec=params) return name class Identity(Module): """ A placeholder identity operator. """ def __init__(self) -> None: """ A placeholder identity operator (mostly used for residual connections and similar). Shape: - Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H` height, and `W` width. - Outputs output :math:`(N, C, H, W)` """ super().__init__() def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: return inputs, seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: self.output_shape = input return input def deserialize(self, name, spec): """ Noop for deserialization """ pass def serialize(self, name, input, builder): params = NeuralNetwork_pb2.CustomLayerParams() params.className = 'identity' params.description = 'An identity layer' builder.add_custom(name, input_names=[input], output_names=[name], custom_proto_spec=params) return name class Reshape(Module): """ Reshapes input and moves it into other dimensions. """ def __init__(self, src_dim: int, part_a: int, part_b: int, high: int, low: int) -> None: """ A wrapper around reshape with serialization and layer arithmetic. Args: src_dim (int): Source dimension part_a (int): Size of split dim to move to `high` part_b (int): Size of split dim to move to `low` high (int): Target dimension 1 low (int): Target dimension 2 Shape: - Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H` height, and `W` width. - Outputs output :math:`(N, C, H, W)` """ super().__init__() self.src_dim = src_dim self.part_a = part_a self.part_b = part_b self.high = high self.low = low def forward(self, input: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: initial_len = input.shape[3] # split dimension src_dim into part_a x part_b input = input.reshape(input.shape[:self.src_dim] + (self.part_a, self.part_b) + input.shape[self.src_dim + 1:]) dest = self.low src_dim = self.src_dim if self.high != src_dim: dest = self.high else: src_dim += 1 # rotate dimension permutation list perm = list(range(len(input.shape))) step = 1 if dest > src_dim else -1 for x in range(src_dim, dest, step): perm[x], perm[x + step] = perm[x + step], perm[x] input = input.permute(perm) o = input.reshape(input.shape[:dest] + (input.shape[dest] * input.shape[dest + 1],) + input.shape[dest + 2:]) if seq_len is not None: seq_len = (seq_len * (float(initial_len)/o.shape[3])).int() return o, seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: input_shape = torch.zeros([x if x else 1 for x in input]) with torch.no_grad(): o, _ = self.forward(input_shape) self.output_shape = tuple(o.shape) return self.output_shape # type: ignore def deserialize(self, name, spec): """ Noop for deserialization """ pass def serialize(self, name: str, input: str, builder) -> str: params = NeuralNetwork_pb2.CustomLayerParams() params.className = 'reshape' params.description = 'A generalized reshape layer' params.parameters['src_dim'].intValue = self.src_dim params.parameters['part_a'].intValue = self.part_a params.parameters['part_b'].intValue = self.part_b params.parameters['high'].intValue = self.high params.parameters['low'].intValue = self.low builder.add_custom(name, input_names=[input], output_names=[name], custom_proto_spec=params) return name class MaxPool(Module): """ A simple wrapper for MaxPool layers """ def __init__(self, kernel_size: Tuple[int, int], stride: Tuple[int, int]) -> None: """ A wrapper around MaxPool layers with serialization and layer arithmetic. """ super().__init__() self.kernel_size = kernel_size self.stride = stride self.layer = torch.nn.MaxPool2d(kernel_size, stride) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: o = self.layer(inputs) if seq_len is not None: seq_len = torch.floor((seq_len-(self.kernel_size[1]-1)-1).float()/self.stride[1]+1).int() return o, seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: self.output_shape = (input[0], input[1], int(np.floor((input[2]-(self.kernel_size[0]-1)-1)/self.stride[0]+1) if input[2] != 0 else 0), int(np.floor((input[3]-(self.kernel_size[1]-1)-1)/self.stride[1]+1) if input[3] != 0 else 0)) return self.output_shape def deserialize(self, name, spec) -> None: """ Noop for MaxPool deserialization """ pass def serialize(self, name: str, input: str, builder) -> str: builder.add_pooling(name, self.kernel_size[0], self.kernel_size[1], self.stride[0], self.stride[1], layer_type='MAX', padding_type='SAME', input_name=input, output_name=name) return name class Dropout(Module): """ A simple wrapper for dropout layers """ def __init__(self, p: float, dim: int) -> None: """ A wrapper around dropout layers with serialization and layer arithmetic. """ super().__init__() self.p = p self.dim = dim if dim == 1: self.layer = torch.nn.Dropout(p) elif dim == 2: self.layer = torch.nn.Dropout2d(p) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: return self.layer(inputs), seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: self.output_shape = input return input def deserialize(self, name, spec): """ Noop for deserialization """ pass def serialize(self, name, input, builder): params = NeuralNetwork_pb2.CustomLayerParams() params.className = 'dropout' params.description = 'An n-dimensional dropout layer' params.parameters['dim'].intValue = self.dim params.parameters['p'].doubleValue = self.p builder.add_custom(name, input_names=[input], output_names=[name], custom_proto_spec=params) return name class TransposedSummarizingRNN(Module): """ An RNN wrapper allowing time axis transpositions and other """ def __init__(self, input_size: int, hidden_size: int, direction: str = 'b', transpose: bool = True, summarize: bool = True, legacy: Optional[str] = None) -> None: """ A wrapper around torch.nn.LSTM optionally transposing inputs and returning only the last column of output. Args: input_size: hidden_size: direction (str): transpose (bool): Transpose width/height dimension summarize (bool): Only return the last time step. legacy (str): Set to `clstm` for clstm rnns and `ocropy` for ocropus models. Shape: - Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H` height, and `W` width. - Outputs output :math:`(N, hidden_size * num_directions, H, S)` S (or H) being 1 if summarize (and transpose) are true """ super().__init__() self.transpose = transpose self.summarize = summarize self.legacy = legacy self.input_size = input_size if self.legacy is not None: self.input_size += 1 self.hidden_size = hidden_size self.bidi = direction == 'b' self.output_size = hidden_size if not self.bidi else 2*hidden_size if legacy == 'ocropy': self.layer = PeepholeBidiLSTM(self.input_size, hidden_size) else: self.layer = torch.nn.LSTM(self.input_size, hidden_size, bidirectional=self.bidi, batch_first=True, bias=False if legacy else True) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # NCHW -> HNWC inputs = inputs.permute(2, 0, 3, 1) if self.transpose: # HNWC -> WNHC inputs = inputs.transpose(0, 2) if self.legacy is not None: ones = torch.ones(inputs.shape[:3] + (1,)) inputs = torch.cat([ones, inputs], dim=3) # HNWC -> (H*N)WC siz = inputs.size() inputs = inputs.contiguous().view(-1, siz[2], siz[3]) if not self.transpose and seq_len is not None: if inputs.shape[0] != len(seq_len): raise Exception(f'Height has to be 1 (not f{inputs.shape[0]} for batching/multi-sequences.') seq_len = seq_len.cpu() inputs = pack_padded_sequence(inputs, seq_len, batch_first=True, enforce_sorted=False) # (H*N)WO o, _ = self.layer(inputs) if not self.transpose and seq_len is not None: o, seq_len = pad_packed_sequence(o, batch_first=True) # resize to HNWO o = o.view(siz[0], siz[1], siz[2], self.output_size) if self.summarize: # HN1O o = o[:, :, -1, :].unsqueeze(2) if self.transpose: o = o.transpose(0, 2) # HNWO -> NOHW if seq_len is not None and seq_len.max() > o.shape[2]: raise Exception('Do not use summarizing layer in x-axis with batching/sequences') return o.permute(1, 3, 0, 2), seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """ Calculates the output shape from input 4D tuple (batch, channel, input_size, seq_len). """ if self.summarize: if self.transpose: layer = (1, input[3]) else: layer = (input[2], 1) else: layer = (input[2], input[3]) self.output_shape = (input[0], self.output_size) + layer return self.output_shape # type: ignore def deserialize(self, name: str, spec) -> None: """ Sets the weights of an initialized layer from a coreml spec. """ nn = [x for x in spec.neuralNetwork.layers if x.name == name][0] arch = nn.WhichOneof('layer') layer = getattr(nn, arch) if arch == 'permute': nn = [x for x in spec.neuralNetwork.layers if x.input == nn.output][0] arch = nn.WhichOneof('layer') layer = getattr(nn, arch) def _deserialize_weights(params, layer, direction): # ih_matrix weight_ih = torch.Tensor([params.inputGateWeightMatrix.floatValue, # wi params.forgetGateWeightMatrix.floatValue, # wf params.blockInputWeightMatrix.floatValue, # wz/wg params.outputGateWeightMatrix.floatValue]) # wo # hh_matrix weight_hh = torch.Tensor([params.inputGateRecursionMatrix.floatValue, # wi params.forgetGateRecursionMatrix.floatValue, # wf params.blockInputRecursionMatrix.floatValue, # wz/wg params.outputGateRecursionMatrix.floatValue]) # wo if direction == 'fwd': layer.weight_ih_l0 = torch.nn.Parameter(weight_ih.resize_as_(layer.weight_ih_l0.data)) layer.weight_hh_l0 = torch.nn.Parameter(weight_hh.resize_as_(layer.weight_hh_l0.data)) elif direction == 'bwd': layer.weight_ih_l0_reverse = torch.nn.Parameter(weight_ih.resize_as_(layer.weight_ih_l0.data)) layer.weight_hh_l0_reverse = torch.nn.Parameter(weight_hh.resize_as_(layer.weight_hh_l0.data)) def _deserialize_biases(params, layer, direction): # ih biases biases = torch.Tensor([params.inputGateBiasVector.floatValue, # bi params.forgetGateBiasVector.floatValue, # bf params.blockInputBiasVector.floatValue, # bz/bg params.outputGateBiasVector.floatValue]) # bo if direction == 'fwd': layer.bias_hh_l0 = torch.nn.Parameter(biases.resize_as_(layer.bias_hh_l0.data)) # no ih_biases layer.bias_ih_l0 = torch.nn.Parameter(torch.zeros(layer.bias_ih_l0.size())) elif direction == 'bwd': layer.bias_hh_l0_reverse = torch.nn.Parameter(biases.resize_as_(layer.bias_hh_l0.data)) # no ih_biases layer.bias_ih_l0_reverse = torch.nn.Parameter(torch.zeros(layer.bias_ih_l0.size())) fwd_params = layer.weightParams if arch == 'uniDirectionalLSTM' else layer.weightParams[0] _deserialize_weights(fwd_params, self.layer, 'fwd') if not self.legacy: _deserialize_biases(fwd_params, self.layer, 'fwd') # get backward weights if arch == 'biDirectionalLSTM': bwd_params = layer.weightParams[1] _deserialize_weights(bwd_params, self.layer, 'bwd') if not self.legacy: _deserialize_biases(bwd_params, self.layer, 'bwd') def serialize(self, name: str, input: str, builder) -> str: """ Serializes the module using a NeuralNetworkBuilder. """ # coreml weight order is IFOG while pytorch uses IFGO # it also uses a single bias while pytorch splits them for some reason def _reorder_indim(tensor, splits=4, idx=[0, 1, 3, 2]): """ Splits the first dimension into `splits` chunks, reorders them according to idx, and convert them to a numpy array. """ s = tensor.chunk(splits) return [s[i].data.numpy() for i in idx] if self.transpose: ninput = '{}_transposed'.format(name) builder.add_permute(name=name, dim=[0, 1, 3, 2], input_name=input, output_name=ninput) input = ninput name = ninput if self.bidi: builder.add_bidirlstm(name=name, W_h=_reorder_indim(self.layer.weight_hh_l0), W_x=_reorder_indim(self.layer.weight_ih_l0), b=_reorder_indim((self.layer.bias_ih_l0 + self.layer.bias_hh_l0) ) if not self.legacy else None, W_h_back=_reorder_indim(self.layer.weight_hh_l0_reverse), W_x_back=_reorder_indim(self.layer.weight_ih_l0_reverse), b_back=_reorder_indim((self.layer.bias_ih_l0_reverse + self.layer.bias_hh_l0_reverse)) if not self.legacy else None, hidden_size=self.hidden_size, input_size=self.input_size, input_names=[input], output_names=[name], peep=[self.layer.weight_ip_l0.data.numpy(), self.layer.weight_fp_l0.data.numpy(), self.layer.weight_op_l0.data.numpy()] if self.legacy == 'ocropy' else None, peep_back=[self.layer.weight_ip_l0_reverse.data.numpy(), self.layer.weight_fp_l0_reverse.data.numpy(), self.layer.weight_op_l0_reverse.data.numpy()] if self.legacy == 'ocropy' else None, output_all=not self.summarize) else: builder.add_unilstm(name=name, W_h=_reorder_indim(self.layer.weight_hh_l0), W_x=_reorder_indim(self.layer.weight_ih_l0), b=_reorder_indim((self.layer.bias_ih_l0 + self.layer.bias_hh_l0)) if not self.legacy else None, hidden_size=self.hidden_size, input_size=self.input_size, input_names=[input], output_names=[name], peep=[self.layer.weight_ip_l0.data.numpy(), self.layer.weight_fp_l0.data.numpy(), self.layer.weight_op_l0.data.numpy()] if self.legacy == 'ocropy' else None, output_all=not self.summarize) return name class LinSoftmax(Module): """ A wrapper for linear projection + softmax dealing with dimensionality mangling. """ def __init__(self, input_size: int, output_size: int, augmentation: bool = False) -> None: """ Args: input_size: Number of inputs in the feature dimension output_size: Number of outputs in the feature dimension augmentation (bool): Enables 1-augmentation of input vectors Shape: - Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H` height, and `W` width. - Outputs output :math:`(N, output_size, H, S)` """ super().__init__() self.input_size = input_size self.output_size = output_size self.augmentation = augmentation if self.augmentation: self.input_size += 1 self.lin = torch.nn.Linear(self.input_size, output_size) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # move features (C) to last dimension for linear activation # NCHW -> NWHC inputs = inputs.transpose(1, 3) # augment with ones along the input (C) axis if self.augmentation: inputs = torch.cat([torch.ones(inputs.shape[:3] + (1,)), inputs], dim=3) o = self.lin(inputs) # switch between log softmax (needed by ctc) and regular (for inference). if not self.training: o = F.softmax(o, dim=3) else: o = F.log_softmax(o, dim=3) # and swap again return o.transpose(1, 3), seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """ Calculates the output shape from input 4D tuple NCHW. """ self.output_shape = (input[0], self.output_size, input[2], input[3]) return self.output_shape def deserialize(self, name: str, spec) -> None: """ Sets the weights of an initialized module from a CoreML protobuf spec. """ # extract conv parameters lin = [x for x in spec.neuralNetwork.layers if x.name == '{}_lin'.format(name)][0].innerProduct weights = torch.Tensor(lin.weights.floatValue).resize_as_(self.lin.weight.data) bias = torch.Tensor(lin.bias.floatValue) self.lin.weight = torch.nn.Parameter(weights) self.lin.bias = torch.nn.Parameter(bias) def serialize(self, name: str, input: str, builder): """ Serializes the module using a NeuralNetworkBuilder. """ lin_name = '{}_lin'.format(name) softmax_name = '{}_softmax'.format(name) builder.add_inner_product(lin_name, self.lin.weight.data.numpy(), self.lin.bias.data.numpy(), self.input_size, self.output_size, has_bias=True, input_name=input, output_name=lin_name) builder.add_softmax(softmax_name, lin_name, name) return name def resize(self, output_size: int, del_indices: Optional[Iterable[int]] = None) -> None: """ Resizes the linear layer with minimal disturbance to the existing weights. First removes the weight and bias at the output positions in del_indices, then resizes both tensors to a new output size. Args: output_size (int): Desired output size after resizing del_indices (list): List of connection to outputs to remove. """ if not del_indices: del_indices = [] old_shape = self.lin.weight.size(0) self.output_size = output_size idx = torch.tensor([x for x in range(old_shape) if x not in del_indices]) weight = self.lin.weight.index_select(0, idx) rweight = torch.zeros((output_size - weight.size(0), weight.size(1))) torch.nn.init.xavier_uniform_(rweight) weight = torch.cat([weight, rweight]) bias = self.lin.bias.index_select(0, idx) bias = torch.cat([bias, torch.zeros(output_size - bias.size(0))]) self.lin = torch.nn.Linear(self.input_size, output_size) self.lin.weight = torch.nn.Parameter(weight) self.lin.bias = torch.nn.Parameter(bias) class ActConv2D(Module): """ A wrapper for convolution + activation with automatic padding ensuring no dropped columns. """ def __init__(self, in_channels: int, out_channels: int, kernel_size: Tuple[int, int], stride: Tuple[int, int], nl: str = 'l', dilation: Tuple[int, int] = (1, 1), transposed: bool = False) -> None: super().__init__() self.in_channels = in_channels self.kernel_size = kernel_size self.out_channels = out_channels self.stride = stride self.dilation = dilation self.padding = tuple((dilation[i] * (kernel_size[i] - 1)) // 2 for i in range(2)) self.transposed = transposed if nl == 's': self.nl = torch.sigmoid self.nl_name = 'SIGMOID' elif nl == 't': self.nl = torch.tanh self.nl_name = 'TANH' elif nl == 'm': self.nl = torch.nn.Softmax(dim=1) self.nl_name = 'SOFTMAX' elif nl == 'r': self.nl = torch.relu self.nl_name = 'RELU' elif nl == 'lr': self.nl = torch.nn.LeakyReLU() self.nl_name = 'LEAKYRELU' else: self.nl_name = 'LINEAR' self.nl = lambda x: x if self.transposed: self.co = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, dilation=self.dilation) else: self.co = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, dilation=self.dilation) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if self.transposed: o = self.co(inputs, output_size=output_shape) else: o = self.co(inputs) # return logits for sigmoid activation during training if not (self.nl_name == 'SIGMOID' and self.training): o = self.nl(o) if seq_len is not None: if self.transposed: seq_len = torch.floor( ((seq_len - 1) * self.stride[1]\ - 2 * self.padding[1]\ + self.dilation[1] * (self.kernel_size[1] - 1) + 1)) else: seq_len = torch.clamp(torch.floor( (seq_len+2*self.padding[1]-self.dilation[1]*(self.kernel_size[1]-1)-1).float()/self.stride[1]+1), min=1).int() return o, seq_len def get_shape(self, input: Tuple[int, int, int, int], target_shape: Optional[Tuple[int, int, int, int]] = None) -> Tuple[int, int, int, int]: if self.transposed: """ For transposed convolution, there is some flexibilty. """ min_y = int((input[2] - 1) * self.stride[0] - 2 * self.padding[0] + self.dilation[0] * (self.kernel_size[0] - 1) + 1 if input[2] != 0 else 0) target_y = min_y if not target_shape or target_shape[2] == 0 else target_shape[2] min_x = int((input[3] - 1) * self.stride[1] - 2 * self.padding[1] + self.dilation[1] * (self.kernel_size[1] - 1) + 1 if input[3] != 0 else 0) target_x = min_x if not target_shape or target_shape[3] == 0 else target_shape[3] self.output_shape = (input[0], self.out_channels, min(min_y + self.stride[0] - 1, max(target_y, min_y)), min(min_x + self.stride[1] - 1, max(target_x, min_x))) else: self.output_shape = (input[0], self.out_channels, int(max(np.floor((input[2]+2*self.padding[0]-self.dilation[0]*(self.kernel_size[0]-1)-1) / self.stride[0]+1), 1) if input[2] != 0 else 0), int(max(np.floor((input[3]+2*self.padding[1]-self.dilation[1]*(self.kernel_size[1]-1)-1)/self.stride[1]+1), 1) if input[3] != 0 else 0)) return self.output_shape def deserialize(self, name: str, spec) -> None: """ Sets the weight of an initialized model from a CoreML protobuf spec. """ conv = [x for x in spec.neuralNetwork.layers if x.name == '{}_conv'.format(name)][0].convolution if self.transposed: self.co.weight = torch.nn.Parameter(torch.Tensor(conv.weights.floatValue).view(self.in_channels, self.out_channels, *self.kernel_size)) else: self.co.weight = torch.nn.Parameter(torch.Tensor(conv.weights.floatValue).view(self.out_channels, self.in_channels, *self.kernel_size)) self.co.bias = torch.nn.Parameter(torch.Tensor(conv.bias.floatValue)) def serialize(self, name: str, input: str, builder) -> str: """ Serializes the module using a NeuralNetworkBuilder. """ conv_name = '{}_conv'.format(name) act_name = '{}_act'.format(name) W = self.co.weight.permute(2, 3, 0, 1).data.numpy() if self.transposed else self.co.weight.permute(2, 3, 1, 0).data.numpy() builder.add_convolution(name=conv_name, kernel_channels=self.in_channels, output_channels=self.out_channels, height=self.kernel_size[0], width=self.kernel_size[1], stride_height=self.stride[0], stride_width=self.stride[1], dilation_factors=self.dilation, border_mode='same', groups=1, W=W, b=self.co.bias.data.numpy(), has_bias=True, is_deconv=self.transposed, input_name=input, output_name=conv_name) if self.nl_name != 'SOFTMAX': builder.add_activation(act_name, self.nl_name, conv_name, name, params=None if self.nl_name != 'LEAKYRELU' else [self.nl.negative_slope]) else: builder.add_softmax(act_name, conv_name, name) return name def resize(self, output_size: int, del_indices: Optional[Iterable[int]] = None) -> None: """ Resizes the convolutional filters of the layer First removes the filters at output positions in del_indices, then resizes both tensors to a new output size. Args: output_size (int): Desired output dimensionality after resizing del_indices (list): List of connection to outputs to remove. """ if not del_indices: del_indices = [] old_shape = self.co.weight.size(0) self.out_channels = output_size idx = torch.tensor([x for x in range(old_shape) if x not in del_indices]) weight = self.co.weight.index_select(0, idx) rweight = torch.zeros((output_size - weight.size(0), weight.size(1), weight.size(2), weight.size(3))) if rweight.shape[0] > 0: torch.nn.init.xavier_uniform_(rweight) weight = torch.cat([weight, rweight], dim=0) bias = self.co.bias.index_select(0, idx) bias = torch.cat([bias, torch.zeros(output_size - bias.size(0))]) if self.transposed: self.co = torch.nn.ConvTranspose2d(self.in_channels, self.out_channels, self.kernel_size, stride=self.stride, padding=self.padding) else: self.co = torch.nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, stride=self.stride, padding=self.padding) self.co.weight = torch.nn.Parameter(weight) self.co.bias = torch.nn.Parameter(bias) class GroupNorm(Module): """ A group normalization layer """ def __init__(self, in_channels: int, num_groups: int) -> None: super().__init__() self.in_channels = in_channels self.num_groups = num_groups self.layer = torch.nn.GroupNorm(num_groups, in_channels) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor], output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: t = inputs.dtype # XXX: verify that pytorch AMP casts the inputs to float32 correctly at # some point. o = self.layer(inputs.type(torch.float32)) return o.type(t), seq_len def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: self.output_shape = input return self.output_shape # type: ignore def deserialize(self, name: str, spec) -> None: """ Sets the weight of an initialized model from a CoreML protobuf spec. """ gn = [x for x in spec.neuralNetwork.layers if x.name == '{}'.format(name)][0].custom self.layer.weight = torch.nn.Parameter(torch.Tensor(gn.weights[0].floatValue).resize_as_(self.layer.weight)) self.layer.bias = torch.nn.Parameter(torch.Tensor(gn.weights[1].floatValue).resize_as_(self.layer.bias)) def serialize(self, name: str, input: str, builder) -> str: """ Serializes the module using a NeuralNetworkBuilder. """ params = NeuralNetwork_pb2.CustomLayerParams() params.className = 'groupnorm' params.description = 'A Group Normalization layer' params.parameters['in_channels'].intValue = self.in_channels params.parameters['num_groups'].intValue = self.num_groups weight = params.weights.add() weight.floatValue.extend(self.layer.weight.data.numpy()) bias = params.weights.add() bias.floatValue.extend(self.layer.bias.data.numpy()) builder.add_custom(name, input_names=[input], output_names=[name], custom_proto_spec=params) return name
41,375
42.416579
200
py
kraken
kraken-main/kraken/lib/util.py
""" Ocropus's magic PIL-numpy array conversion routines. They express slightly different behavior from PIL.Image.toarray(). """ import torch import unicodedata import numpy as np from PIL import Image from typing import Union __all__ = ['pil2array', 'array2pil', 'is_bitonal', 'make_printable', 'get_im_str'] def pil2array(im: Image.Image, alpha: int = 0) -> np.ndarray: if im.mode == '1': return np.array(im.convert('L')) return np.array(im) def array2pil(a: np.ndarray) -> Image.Image: if a.dtype == np.dtype("B"): if a.ndim == 2: return Image.frombytes("L", (a.shape[1], a.shape[0]), a.tobytes()) elif a.ndim == 3: return Image.frombytes("RGB", (a.shape[1], a.shape[0]), a.tobytes()) else: raise Exception("bad image rank") elif a.dtype == np.dtype('float32'): return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tobytes()) else: raise Exception("unknown image type") def is_bitonal(im: Union[Image.Image, torch.Tensor]) -> bool: """ Tests a PIL image or torch tensor for bitonality. Args: im: Image to test Returns: True if the image contains only two different color values. False otherwise. """ if isinstance(im, Image.Image): return im.getcolors(2) is not None and len(im.getcolors(2)) == 2 elif isinstance(im, torch.Tensor): return len(im.int().unique()) == 2 def get_im_str(im: Image.Image) -> str: return im.filename if hasattr(im, 'filename') else str(im) def is_printable(char: str) -> bool: """ Determines if a chode point is printable/visible when printed. Args: char (str): Input code point. Returns: True if printable, False otherwise. """ letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu') numbers = ('Nd', 'Nl', 'No') punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps') symbol = ('Sc', 'Sk', 'Sm', 'So') printable = letters + numbers + punctuation + symbol return unicodedata.category(char) in printable def make_printable(char: str) -> str: """ Takes a Unicode code point and return a printable representation of it. Args: char (str): Input code point Returns: Either the original code point, the name of the code point if it is a combining mark, whitespace etc., or the hex code if it is a control symbol. """ if not char or is_printable(char): return char elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'): return '0x{:x}'.format(ord(char)) else: return unicodedata.name(char)
2,723
27.375
82
py
kraken
kraken-main/kraken/lib/models.py
""" kraken.lib.models ~~~~~~~~~~~~~~~~~ Wrapper around TorchVGSLModel including a variety of forward pass helpers for sequence classification. """ from os import PathLike from os.path import expandvars, expanduser, abspath import torch import numpy as np import kraken.lib.lineest import kraken.lib.ctc_decoder from typing import List, Tuple, Optional, Union from kraken.lib.vgsl import TorchVGSLModel from kraken.lib.exceptions import KrakenInvalidModelException, KrakenInputException __all__ = ['TorchSeqRecognizer', 'load_any'] import logging logger = logging.getLogger(__name__) class TorchSeqRecognizer(object): """ A wrapper class around a TorchVGSLModel for text recognition. """ def __init__(self, nn: TorchVGSLModel, decoder=kraken.lib.ctc_decoder.greedy_decoder, train: bool = False, device: str = 'cpu'): """ Constructs a sequence recognizer from a VGSL model and a decoder. Args: nn: Neural network used for recognition. decoder: Decoder function used for mapping softmax activations to labels and positions. train: Enables or disables gradient calculation and dropout. device: Device to run model on. Attributes: nn: Neural network used for recognition. codec: PytorchCodec extracted from the recognition model. decoder: Decoder function used for mapping softmax activations to labels and positions. train: Enables or disables gradient calculation and dropout. device: Device to run model on. one_channel_mode: flag indicating if the model expects binary or grayscale input images. seg_type: flag indicating if the model expects baseline- or bounding box-derived text line images. Raises: ValueError: Is raised when the model type is not a sequence recognizer. """ self.nn = nn self.kind = '' if train is True: self.nn.train() elif train is False: self.nn.eval() self.codec = self.nn.codec self.decoder = decoder self.train = train self.device = device if nn.model_type not in [None, 'recognition']: raise ValueError(f'Models of type {nn.model_type} are not supported by TorchSeqRecognizer') self.one_channel_mode = nn.one_channel_mode self.seg_type = nn.seg_type if self.device: self.nn.to(device) def to(self, device): """ Moves model to device and automatically loads input tensors onto it. """ self.device = device self.nn.to(device) def forward(self, line: torch.Tensor, lens: torch.Tensor = None) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: """ Performs a forward pass on a torch tensor of one or more lines with shape (N, C, H, W) and returns a numpy array (N, W, C). Args: line: NCHW line tensor lens: Optional tensor containing sequence lengths if N > 1 Returns: Tuple with (N, W, C) shaped numpy array and final output sequence lengths. Raises: KrakenInputException: Is raised if the channel dimension isn't of size 1 in the network output. """ if self.device: line = line.to(self.device) o, olens = self.nn.nn(line, lens) if o.size(2) != 1: raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(o.size())) self.outputs = o.detach().squeeze(2).cpu().numpy() if olens is not None: olens = olens.cpu().numpy() return self.outputs, olens def predict(self, line: torch.Tensor, lens: Optional[torch.Tensor] = None) -> List[List[Tuple[str, int, int, float]]]: """ Performs a forward pass on a torch tensor of a line with shape (N, C, H, W) and returns the decoding as a list of tuples (string, start, end, confidence). Args: line: NCHW line tensor lens: Optional tensor containing sequence lengths if N > 1 Returns: List of decoded sequences. """ o, olens = self.forward(line, lens) dec_seqs = [] if olens is not None: for seq, seq_len in zip(o, olens): locs = self.decoder(seq[:, :seq_len]) dec_seqs.append(self.codec.decode(locs)) else: locs = self.decoder(o[0]) dec_seqs.append(self.codec.decode(locs)) return dec_seqs def predict_string(self, line: torch.Tensor, lens: Optional[torch.Tensor] = None) -> List[str]: """ Performs a forward pass on a torch tensor of a line with shape (N, C, H, W) and returns a string of the results. Args: line: NCHW line tensor lens: Optional tensor containing the sequence lengths of the input batch. """ o, olens = self.forward(line, lens) dec_strs = [] if olens is not None: for seq, seq_len in zip(o, olens): locs = self.decoder(seq[:, :seq_len]) dec_strs.append(''.join(x[0] for x in self.codec.decode(locs))) else: locs = self.decoder(o[0]) dec_strs.append(''.join(x[0] for x in self.codec.decode(locs))) return dec_strs def predict_labels(self, line: torch.tensor, lens: torch.Tensor = None) -> List[List[Tuple[int, int, int, float]]]: """ Performs a forward pass on a torch tensor of a line with shape (N, C, H, W) and returns a list of tuples (class, start, end, max). Max is the maximum value of the softmax layer in the region. """ o, olens = self.forward(line, lens) oseqs = [] if olens is not None: for seq, seq_len in zip(o, olens): oseqs.append(self.decoder(seq[:, :seq_len])) else: oseqs.append(self.decoder(o[0])) return oseqs def load_any(fname: Union[PathLike, str], train: bool = False, device: str = 'cpu') -> TorchSeqRecognizer: """ Loads anything that was, is, and will be a valid ocropus model and instantiates a shiny new kraken.lib.lstm.SeqRecognizer from the RNN configuration in the file. Currently it recognizes the following kinds of models: * protobuf models containing VGSL segmentation and recognition networks. Additionally an attribute 'kind' will be added to the SeqRecognizer containing a string representation of the source kind. Current known values are: * vgsl for VGSL models Args: fname: Path to the model train: Enables gradient calculation and dropout layers in model. device: Target device Returns: A kraken.lib.models.TorchSeqRecognizer object. Raises: KrakenInvalidModelException: if the model is not loadable by any parser. """ nn = None fname = abspath(expandvars(expanduser(fname))) logger.info('Loading model from {}'.format(fname)) try: nn = TorchVGSLModel.load_model(str(fname)) except Exception as e: raise KrakenInvalidModelException('File {} not loadable by any parser.'.format(fname)) from e seq = TorchSeqRecognizer(nn, train=train, device=device) seq.kind = 'vgsl' return seq def validate_hyper_parameters(hyper_params): """ Validate some model's hyper parameters and modify them in place if need be. """ if (hyper_params['quit'] == 'dumb' and hyper_params['completed_epochs'] >= hyper_params['epochs']): logger.warning('Maximum epochs reached (might be loaded from given model), starting again from 0.') hyper_params['completed_epochs'] = 0
8,040
35.058296
122
py
kraken
kraken-main/kraken/lib/functional_im_transforms.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Named functions for all the transforms that were lambdas in the past to facilitate pickling. """ import torch import regex import unicodedata import bidi.algorithm as bd from os import PathLike from pathlib import Path from PIL import Image from PIL.Image import Resampling from typing import Tuple, Optional, Callable, Any, Union from kraken.binarization import nlbin from kraken.lib.lineest import dewarp, CenterNormalizer def pil_to_mode(im: Image.Image, mode: str) -> Image.Image: return im.convert(mode) def pil_to_bin(im: Image.Image) -> Image.Image: return nlbin(im) def dummy(x: Any) -> Any: return x def pil_dewarp(im: Image.Image, lnorm: CenterNormalizer) -> Image.Image: return dewarp(lnorm, im) def pil_fixed_resize(im: Image.Image, scale: Tuple[int, int]) -> Image.Image: return _fixed_resize(im, scale, Resampling.LANCZOS) def tensor_invert(im: torch.Tensor) -> torch.Tensor: return im.max() - im def tensor_permute(im: torch.Tensor, perm: Tuple[int, ...]) -> torch.Tensor: return im.permute(*perm) def _fixed_resize(img: Image.Image, size: Tuple[int, int], interpolation: int = Resampling.LANCZOS): """ Doesn't do the annoying runtime scale dimension switching the default pytorch transform does. Args: img (PIL.Image.Image): image to resize size (tuple): Tuple (height, width) """ w, h = img.size oh, ow = size if oh == 0: oh = int(h * ow/w) elif ow == 0: ow = int(w * oh/h) img = img.resize((ow, oh), interpolation) return img def text_normalize(text: str, normalization: str) -> str: return unicodedata.normalize(normalization, text) def text_whitespace_normalize(text: str) -> str: return regex.sub(r'\s', ' ', text).strip() def text_reorder(text: str, base_dir: Optional[str] = None) -> str: return bd.get_display(text, base_dir=base_dir) def default_split(x: Union[PathLike, str]) -> str: x = Path(x) while x.suffixes: x = x.with_suffix('') return str(x) def suffix_split(x: Union[PathLike, str], split: Callable[[Union[PathLike, str]], str], suffix: str) -> str: return split(x) + suffix
2,764
25.84466
108
py
kraken
kraken-main/kraken/lib/__init__.py
0
0
0
py
kraken
kraken-main/kraken/lib/train.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Training loop interception helpers """ import re import torch import logging import warnings import numpy as np import torch.nn.functional as F import pytorch_lightning as pl from os import PathLike from functools import partial from torch.multiprocessing import Pool from torchmetrics import CharErrorRate, WordErrorRate from torchmetrics.classification import MultilabelAccuracy, MultilabelJaccardIndex from torch.optim import lr_scheduler from typing import Callable, Dict, Optional, Sequence, Union, Any, Literal from pytorch_lightning.callbacks import Callback, EarlyStopping, BaseFinetuning, LearningRateMonitor from kraken.lib import models, vgsl, default_specs, progress from kraken.lib.xml import preparse_xml_data from kraken.lib.util import make_printable from kraken.lib.codec import PytorchCodec from kraken.lib.dataset import (ArrowIPCRecognitionDataset, BaselineSet, GroundTruthDataset, PolygonGTDataset, ImageInputTransforms, collate_sequences) from kraken.lib.models import validate_hyper_parameters from kraken.lib.exceptions import KrakenInputException, KrakenEncodeException from torch.utils.data import DataLoader, random_split, Subset logger = logging.getLogger(__name__) def _star_fun(fun, kwargs): try: return fun(**kwargs) except FileNotFoundError as e: logger.warning(f'{e.strerror}: {e.filename}. Skipping.') except KrakenInputException as e: logger.warning(str(e)) return None def _validation_worker_init_fn(worker_id): """ Fix random seeds so that augmentation always produces the same results when validating. Temporarily increase the logging level for lightning because otherwise it will display a message at info level about the seed being changed. """ from pytorch_lightning import seed_everything level = logging.getLogger("lightning_fabric.utilities.seed").level logging.getLogger("lightning_fabric.utilities.seed").setLevel(logging.WARN) seed_everything(42) logging.getLogger("lightning_fabric.utilities.seed").setLevel(level) class KrakenTrainer(pl.Trainer): def __init__(self, enable_progress_bar: bool = True, enable_summary: bool = True, min_epochs: int = 5, max_epochs: int = 100, freeze_backbone=-1, pl_logger: Union[pl.loggers.logger.Logger, str, None] = None, log_dir: Optional[PathLike] = None, *args, **kwargs): kwargs['enable_checkpointing'] = False kwargs['enable_progress_bar'] = enable_progress_bar kwargs['min_epochs'] = min_epochs kwargs['max_epochs'] = max_epochs kwargs['callbacks'] = ([] if 'callbacks' not in kwargs else kwargs['callbacks']) if not isinstance(kwargs['callbacks'], list): kwargs['callbacks'] = [kwargs['callbacks']] if pl_logger: if 'logger' in kwargs and isinstance(kwargs['logger'], pl.loggers.logger.Logger): logger.debug('Experiment logger has been provided outside KrakenTrainer as `logger`') elif isinstance(pl_logger, pl.loggers.logger.Logger): logger.debug('Experiment logger has been provided outside KrakenTrainer as `pl_logger`') kwargs['logger'] = pl_logger elif pl_logger == 'tensorboard': logger.debug('Creating default experiment logger') kwargs['logger'] = pl.loggers.TensorBoardLogger(log_dir) else: logger.error('`pl_logger` was set, but %s is not an accepted value', pl_logger) raise ValueError(f'{pl_logger} is not acceptable as logger') kwargs['callbacks'].append(LearningRateMonitor(logging_interval='step')) else: kwargs['logger'] = False if enable_progress_bar: progress_bar_cb = progress.KrakenTrainProgressBar(leave=True) kwargs['callbacks'].append(progress_bar_cb) if enable_summary: from pytorch_lightning.callbacks import RichModelSummary summary_cb = RichModelSummary(max_depth=2) kwargs['callbacks'].append(summary_cb) kwargs['enable_model_summary'] = False if freeze_backbone > 0: kwargs['callbacks'].append(KrakenFreezeBackbone(freeze_backbone)) kwargs['callbacks'].extend([KrakenSetOneChannelMode(), KrakenSaveModel()]) super().__init__(*args, **kwargs) self.automatic_optimization = False def fit(self, *args, **kwargs): with warnings.catch_warnings(): warnings.filterwarnings(action='ignore', category=UserWarning, message='The dataloader,') super().fit(*args, **kwargs) class KrakenFreezeBackbone(BaseFinetuning): """ Callback freezing all but the last layer for fixed number of iterations. """ def __init__(self, unfreeze_at_iterations=10): super().__init__() self.unfreeze_at_iteration = unfreeze_at_iterations def freeze_before_training(self, pl_module): pass def finetune_function(self, pl_module, current_epoch, optimizer): pass def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: self.freeze(pl_module.net[:-1]) def on_train_batch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch, batch_idx) -> None: """ Called for each training batch. """ if trainer.global_step == self.unfreeze_at_iteration: for opt_idx, optimizer in enumerate(trainer.optimizers): num_param_groups = len(optimizer.param_groups) self.unfreeze_and_add_param_group(modules=pl_module.net[:-1], optimizer=optimizer, train_bn=True,) current_param_groups = optimizer.param_groups self._store(pl_module, opt_idx, num_param_groups, current_param_groups) def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the epoch begins.""" pass class KrakenSetOneChannelMode(Callback): """ Callback that sets the one_channel_mode of the model after the first epoch. """ def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: # fill one_channel_mode after 1 iteration over training data set if not trainer.sanity_checking and trainer.current_epoch == 0 and trainer.model.nn.model_type == 'recognition': ds = getattr(pl_module, 'train_set', None) if not ds and trainer.datamodule: ds = trainer.datamodule.train_set im_mode = ds.dataset.im_mode if im_mode in ['1', 'L']: logger.info(f'Setting model one_channel_mode to {im_mode}.') trainer.model.nn.one_channel_mode = im_mode class KrakenSaveModel(Callback): """ Kraken's own serialization callback instead of pytorch's. """ def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: if not trainer.sanity_checking: trainer.model.nn.hyper_params['completed_epochs'] += 1 metric = float(trainer.logged_metrics['val_metric']) if 'val_metric' in trainer.logged_metrics else -1.0 trainer.model.nn.user_metadata['accuracy'].append((trainer.global_step, metric)) trainer.model.nn.user_metadata['metrics'].append((trainer.global_step, {k: float(v) for k, v in trainer.logged_metrics.items()})) logger.info('Saving to {}_{}.mlmodel'.format(trainer.model.output, trainer.current_epoch)) trainer.model.nn.save_model(f'{trainer.model.output}_{trainer.current_epoch}.mlmodel') trainer.model.best_model = f'{trainer.model.output}_{trainer.model.best_epoch}.mlmodel' class RecognitionModel(pl.LightningModule): def __init__(self, hyper_params: Dict[str, Any] = None, output: str = 'model', spec: str = default_specs.RECOGNITION_SPEC, append: Optional[int] = None, model: Optional[Union[PathLike, str]] = None, reorder: Union[bool, str] = True, training_data: Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]] = None, evaluation_data: Optional[Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]]] = None, partition: Optional[float] = 0.9, binary_dataset_split: bool = False, num_workers: int = 1, load_hyper_parameters: bool = False, repolygonize: bool = False, force_binarization: bool = False, format_type: Literal['path', 'alto', 'page', 'xml', 'binary'] = 'path', codec: Optional[Dict] = None, resize: Literal['fail', 'both', 'new', 'add', 'union'] = 'fail'): """ A LightningModule encapsulating the training setup for a text recognition model. Setup parameters (load, training_data, evaluation_data, ....) are named, model hyperparameters (everything in `kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS`) are in in the `hyper_params` argument. Args: hyper_params (dict): Hyperparameter dictionary containing all fields from kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS **kwargs: Setup parameters, i.e. CLI parameters of the train() command. """ super().__init__() hyper_params_ = default_specs.RECOGNITION_HYPER_PARAMS.copy() if model: logger.info(f'Loading existing model from {model} ') self.nn = vgsl.TorchVGSLModel.load_model(model) if self.nn.model_type not in [None, 'recognition']: raise ValueError(f'Model {model} is of type {self.nn.model_type} while `recognition` is expected.') if load_hyper_parameters: hp = self.nn.hyper_params else: hp = {} hyper_params_.update(hp) else: self.nn = None if hyper_params: hyper_params_.update(hyper_params) self.save_hyperparameters(hyper_params_) self.reorder = reorder self.append = append self.model = model self.num_workers = num_workers if resize == "add": resize = "union" warnings.warn("'add' value for resize has been deprecated. Use 'union' instead.", DeprecationWarning) elif resize == "both": resize = "new" warnings.warn("'both' value for resize has been deprecated. Use 'new' instead.", DeprecationWarning) self.resize = resize self.format_type = format_type self.output = output self.best_epoch = -1 self.best_metric = 0.0 self.best_model = None DatasetClass = GroundTruthDataset valid_norm = True if format_type in ['xml', 'page', 'alto']: logger.info(f'Parsing {len(training_data)} XML files for training data') training_data = preparse_xml_data(training_data, format_type, repolygonize) if evaluation_data: logger.info(f'Parsing {len(evaluation_data)} XML files for validation data') evaluation_data = preparse_xml_data(evaluation_data, format_type, repolygonize) if binary_dataset_split: logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.') binary_dataset_split = False DatasetClass = PolygonGTDataset valid_norm = False elif format_type == 'binary': DatasetClass = ArrowIPCRecognitionDataset if repolygonize: logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.') valid_norm = False logger.info(f'Got {len(training_data)} binary dataset files for training data') training_data = [{'file': file} for file in training_data] if evaluation_data: logger.info(f'Got {len(evaluation_data)} binary dataset files for validation data') evaluation_data = [{'file': file} for file in evaluation_data] elif format_type == 'path': if force_binarization: logger.warning('Forced binarization enabled in `path` mode. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled in `path` mode. Will be ignored.') if binary_dataset_split: logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.') binary_dataset_split = False logger.info(f'Got {len(training_data)} line strip images for training data') training_data = [{'image': im} for im in training_data] if evaluation_data: logger.info(f'Got {len(evaluation_data)} line strip images for validation data') evaluation_data = [{'image': im} for im in evaluation_data] valid_norm = True # format_type is None. Determine training type from length of training data entry elif not format_type: if len(training_data[0]) >= 4: DatasetClass = PolygonGTDataset valid_norm = False else: if force_binarization: logger.warning('Forced binarization enabled with box lines. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled with box lines. Will be ignored.') if binary_dataset_split: logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.') binary_dataset_split = False else: raise ValueError(f'format_type {format_type} not in [alto, page, xml, path, binary].') spec = spec.strip() if spec[0] != '[' or spec[-1] != ']': raise ValueError(f'VGSL spec {spec} not bracketed') self.spec = spec # preparse input sizes from vgsl string to seed ground truth data set # sizes and dimension ordering. if not self.nn: blocks = spec[1:-1].split(' ') m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0]) if not m: raise ValueError(f'Invalid input spec {blocks[0]}') batch, height, width, channels = [int(x) for x in m.groups()] else: batch, channels, height, width = self.nn.input self.transforms = ImageInputTransforms(batch, height, width, channels, (self.hparams.pad, 0), valid_norm, force_binarization) self.example_input_array = torch.Tensor(batch, channels, height if height else 32, width if width else 400) if 'file_system' in torch.multiprocessing.get_all_sharing_strategies(): logger.debug('Setting multiprocessing tensor sharing strategy to file_system') torch.multiprocessing.set_sharing_strategy('file_system') if evaluation_data: train_set = self._build_dataset(DatasetClass, training_data) self.train_set = Subset(train_set, range(len(train_set))) val_set = self._build_dataset(DatasetClass, evaluation_data) self.val_set = Subset(val_set, range(len(val_set))) elif binary_dataset_split: train_set = self._build_dataset(DatasetClass, training_data, split_filter='train') self.train_set = Subset(train_set, range(len(train_set))) val_set = self._build_dataset(DatasetClass, training_data, split_filter='validation') self.val_set = Subset(val_set, range(len(val_set))) logger.info(f'Found {len(self.train_set)} (train) / {len(self.val_set)} (val) samples in pre-encoded dataset') else: train_set = self._build_dataset(DatasetClass, training_data) train_len = int(len(train_set)*partition) val_len = len(train_set) - train_len logger.info(f'No explicit validation data provided. Splitting off ' f'{val_len} (of {len(train_set)}) samples to validation ' 'set. (Will disable alphabet mismatch detection.)') self.train_set, self.val_set = random_split(train_set, (train_len, val_len)) if len(self.train_set) == 0 or len(self.val_set) == 0: raise ValueError('No valid training data was provided to the train ' 'command. Please add valid XML, line, or binary data.') logger.info(f'Training set {len(self.train_set)} lines, validation set ' f'{len(self.val_set)} lines, alphabet {len(train_set.alphabet)} ' 'symbols') alpha_diff_only_train = set(self.train_set.dataset.alphabet).difference(set(self.val_set.dataset.alphabet)) alpha_diff_only_val = set(self.val_set.dataset.alphabet).difference(set(self.train_set.dataset.alphabet)) if alpha_diff_only_train: logger.warning(f'alphabet mismatch: chars in training set only: ' f'{alpha_diff_only_train} (not included in accuracy test ' 'during training)') if alpha_diff_only_val: logger.warning(f'alphabet mismatch: chars in validation set only: {alpha_diff_only_val} (not trained)') logger.info('grapheme\tcount') for k, v in sorted(train_set.alphabet.items(), key=lambda x: x[1], reverse=True): char = make_printable(k) if char == k: char = '\t' + char logger.info(f'{char}\t{v}') if codec: logger.info('Instantiating codec') self.codec = PytorchCodec(codec) for k, v in self.codec.c2l.items(): char = make_printable(k) if char == k: char = '\t' + char logger.info(f'{char}\t{v}') else: self.codec = None logger.info('Encoding training set') self.val_cer = CharErrorRate() self.val_wer = WordErrorRate() def _build_dataset(self, DatasetClass, training_data, **kwargs): dataset = DatasetClass(normalization=self.hparams.normalization, whitespace_normalization=self.hparams.normalize_whitespace, reorder=self.reorder, im_transforms=self.transforms, augmentation=self.hparams.augment, **kwargs) if (self.num_workers and self.num_workers > 1) and self.format_type != 'binary': with Pool(processes=self.num_workers) as pool: for im in pool.imap_unordered(partial(_star_fun, dataset.parse), training_data, 5): logger.debug(f'Adding sample {im} to training set') if im: dataset.add(**im) else: for im in training_data: try: dataset.add(**im) except KrakenInputException as e: logger.warning(str(e)) if self.format_type == 'binary' and self.hparams.normalization: logger.debug('Rebuilding dataset using unicode normalization') dataset.rebuild_alphabet() return dataset def forward(self, x, seq_lens=None): return self.net(x, seq_lens) def training_step(self, batch, batch_idx): input, target = batch['image'], batch['target'] # sequence batch if 'seq_lens' in batch: seq_lens, label_lens = batch['seq_lens'], batch['target_lens'] target = (target, label_lens) o = self.net(input, seq_lens) else: o = self.net(input) seq_lens = o[1] output = o[0] target_lens = target[1] target = target[0] # height should be 1 by now if output.size(2) != 1: raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(output.size(2))) output = output.squeeze(2) # NCW -> WNC loss = self.nn.criterion(output.permute(2, 0, 1), # type: ignore target, seq_lens, target_lens) self.log('train_loss', loss, on_step=True, on_epoch=False, prog_bar=False, logger=True) return loss def validation_step(self, batch, batch_idx): pred = self.rec_nn.predict_string(batch['image'], batch['seq_lens']) idx = 0 decoded_targets = [] for offset in batch['target_lens']: decoded_targets.append(''.join([x[0] for x in self.val_codec.decode([(x, 0, 0, 0) for x in batch['target'][idx:idx+offset]])])) idx += offset self.val_cer.update(pred, decoded_targets) self.val_wer.update(pred, decoded_targets) if self.logger and self.trainer.state.stage != 'sanity_check' and self.hparams.batch_size * batch_idx < 16: for i in range(self.hparams.batch_size): count = self.hparams.batch_size * batch_idx + i if count < 16: self.logger.experiment.add_image(f'Validation #{count}, target: {decoded_targets[i]}', batch['image'][i], self.global_step, dataformats="CHW") self.logger.experiment.add_text(f'Validation #{count}, target: {decoded_targets[i]}', pred[i], self.global_step) def on_validation_epoch_end(self): accuracy = 1.0 - self.val_cer.compute() word_accuracy = 1.0 - self.val_wer.compute() if accuracy > self.best_metric: logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {accuracy} ({self.current_epoch})') self.best_epoch = self.current_epoch self.best_metric = accuracy logger.info(f'validation run: total chars {self.val_cer.total} errors {self.val_cer.errors} accuracy {accuracy}') self.log('val_accuracy', accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_word_accuracy', word_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_metric', accuracy, on_step=False, on_epoch=True, prog_bar=False, logger=True) self.val_cer.reset() self.val_wer.reset() def setup(self, stage: Optional[str] = None): # finalize models in case of appending/loading if stage in [None, 'fit']: # Log a few sample images before the datasets are encoded. # This is only possible for Arrow datasets, because the # other dataset types can only be accessed after encoding if self.logger and isinstance(self.train_set.dataset, ArrowIPCRecognitionDataset) : for i in range(min(len(self.train_set), 16)): idx = np.random.randint(len(self.train_set)) sample = self.train_set[idx] self.logger.experiment.add_image(f'train_set sample #{i}: {sample["target"]}', sample['image']) if self.append: self.train_set.dataset.encode(self.codec) # now we can create a new model self.spec = '[{} O1c{}]'.format(self.spec[1:-1], self.train_set.dataset.codec.max_label + 1) logger.info(f'Appending {self.spec} to existing model {self.nn.spec} after {self.append}') self.nn.append(self.append, self.spec) self.nn.add_codec(self.train_set.dataset.codec) logger.info(f'Assembled model spec: {self.nn.spec}') elif self.model: self.spec = self.nn.spec # prefer explicitly given codec over network codec if mode is 'new' codec = self.codec if (self.codec and self.resize == 'new') else self.nn.codec codec.strict = True try: self.train_set.dataset.encode(codec) except KrakenEncodeException: alpha_diff = set(self.train_set.dataset.alphabet).difference( set(codec.c2l.keys()) ) if self.resize == 'fail': raise KrakenInputException(f'Training data and model codec alphabets mismatch: {alpha_diff}') elif self.resize == 'union': logger.info(f'Resizing codec to include ' f'{len(alpha_diff)} new code points') # Construct two codecs: # 1. training codec containing only the vocabulary in the training dataset # 2. validation codec = training codec + validation set vocabulary # This keep the codec in the model from being 'polluted' by non-trained characters. train_codec = codec.add_labels(alpha_diff) self.nn.add_codec(train_codec) logger.info(f'Resizing last layer in network to {train_codec.max_label+1} outputs') self.nn.resize_output(train_codec.max_label + 1) self.train_set.dataset.encode(train_codec) elif self.resize == 'new': logger.info(f'Resizing network or given codec to ' f'{len(self.train_set.dataset.alphabet)} ' f'code sequences') # same codec procedure as above, just with merging. self.train_set.dataset.encode(None) train_codec, del_labels = codec.merge(self.train_set.dataset.codec) # Switch codec. self.nn.add_codec(train_codec) logger.info(f'Deleting {len(del_labels)} output classes from network ' f'({len(codec)-len(del_labels)} retained)') self.nn.resize_output(train_codec.max_label + 1, del_labels) self.train_set.dataset.encode(train_codec) else: raise ValueError(f'invalid resize parameter value {self.resize}') self.nn.codec.strict = False self.spec = self.nn.spec else: self.train_set.dataset.encode(self.codec) logger.info(f'Creating new model {self.spec} with {self.train_set.dataset.codec.max_label+1} outputs') self.spec = '[{} O1c{}]'.format(self.spec[1:-1], self.train_set.dataset.codec.max_label + 1) self.nn = vgsl.TorchVGSLModel(self.spec) # initialize weights self.nn.init_weights() self.nn.add_codec(self.train_set.dataset.codec) val_diff = set(self.val_set.dataset.alphabet).difference( set(self.train_set.dataset.codec.c2l.keys()) ) logger.info(f'Adding {len(val_diff)} dummy labels to validation set codec.') val_codec = self.nn.codec.add_labels(val_diff) self.val_set.dataset.encode(val_codec) self.val_codec = val_codec if self.nn.one_channel_mode and self.train_set.dataset.im_mode != self.nn.one_channel_mode: logger.warning(f'Neural network has been trained on mode {self.nn.one_channel_mode} images, ' f'training set contains mode {self.train_set.dataset.im_mode} data. Consider setting `force_binarization`') if self.format_type != 'path' and self.nn.seg_type == 'bbox': logger.warning('Neural network has been trained on bounding box image information but training set is polygonal.') self.nn.hyper_params = self.hparams self.nn.model_type = 'recognition' if not self.nn.seg_type: logger.info(f'Setting seg_type to {self.train_set.dataset.seg_type}.') self.nn.seg_type = self.train_set.dataset.seg_type self.rec_nn = models.TorchSeqRecognizer(self.nn, train=None, device=None) self.net = self.nn.nn torch.set_num_threads(max(self.num_workers, 1)) def train_dataloader(self): return DataLoader(self.train_set, batch_size=self.hparams.batch_size, num_workers=self.num_workers, pin_memory=True, shuffle=True, collate_fn=collate_sequences) def val_dataloader(self): return DataLoader(self.val_set, shuffle=False, batch_size=self.hparams.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=collate_sequences, worker_init_fn=_validation_worker_init_fn) def configure_callbacks(self): callbacks = [] if self.hparams.quit == 'early': callbacks.append(EarlyStopping(monitor='val_accuracy', mode='max', patience=self.hparams.lag, stopping_threshold=1.0)) return callbacks # configuration of optimizers and learning rate schedulers # -------------------------------------------------------- # # All schedulers are created internally with a frequency of step to enable # batch-wise learning rate warmup. In lr_scheduler_step() calls to the # scheduler are then only performed at the end of the epoch. def configure_optimizers(self): return _configure_optimizer_and_lr_scheduler(self.hparams, self.nn.nn.parameters(), len_train_set=len(self.train_set), loss_tracking_mode='max') def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure): # update params optimizer.step(closure=optimizer_closure) # linear warmup between 0 and the initial learning rate `lrate` in `warmup` # steps. if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup: lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup) for pg in optimizer.param_groups: pg["lr"] = lr_scale * self.hparams.lrate def lr_scheduler_step(self, scheduler, metric): if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup: # step OneCycleLR each batch if not in warmup phase if isinstance(scheduler, lr_scheduler.OneCycleLR): scheduler.step() # step every other scheduler epoch-wise elif self.trainer.is_last_batch: if metric is None: scheduler.step() else: scheduler.step(metric) class SegmentationModel(pl.LightningModule): def __init__(self, hyper_params: Dict = None, load_hyper_parameters: bool = False, progress_callback: Callable[[str, int], Callable[[None], None]] = lambda string, length: lambda: None, message: Callable[[str], None] = lambda *args, **kwargs: None, output: str = 'model', spec: str = default_specs.SEGMENTATION_SPEC, model: Optional[Union[PathLike, str]] = None, training_data: Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]] = None, evaluation_data: Optional[Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]]] = None, partition: Optional[float] = 0.9, num_workers: int = 1, force_binarization: bool = False, format_type: Literal['path', 'alto', 'page', 'xml'] = 'path', suppress_regions: bool = False, suppress_baselines: bool = False, valid_regions: Optional[Sequence[str]] = None, valid_baselines: Optional[Sequence[str]] = None, merge_regions: Optional[Dict[str, str]] = None, merge_baselines: Optional[Dict[str, str]] = None, bounding_regions: Optional[Sequence[str]] = None, resize: Literal['fail', 'both', 'new', 'add', 'union'] = 'fail', topline: Union[bool, None] = False): """ A LightningModule encapsulating the training setup for a page segmentation model. Setup parameters (load, training_data, evaluation_data, ....) are named, model hyperparameters (everything in `kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS`) are in in the `hyper_params` argument. Args: hyper_params (dict): Hyperparameter dictionary containing all fields from kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS **kwargs: Setup parameters, i.e. CLI parameters of the segtrain() command. """ super().__init__() self.best_epoch = -1 self.best_metric = 0.0 self.best_model = None self.model = model self.num_workers = num_workers if resize == "add": resize = "union" warnings.warn("'add' value for resize has been deprecated. Use 'union' instead.", DeprecationWarning) elif resize == "both": resize = "new" warnings.warn("'both' value for resize has been deprecated. Use 'new' instead.", DeprecationWarning) self.resize = resize self.format_type = format_type self.output = output self.bounding_regions = bounding_regions self.topline = topline hyper_params_ = default_specs.SEGMENTATION_HYPER_PARAMS.copy() if model: logger.info(f'Loading existing model from {model}') self.nn = vgsl.TorchVGSLModel.load_model(model) if self.nn.model_type not in [None, 'segmentation']: raise ValueError(f'Model {model} is of type {self.nn.model_type} while `segmentation` is expected.') if load_hyper_parameters: hp = self.nn.hyper_params else: hp = {} hyper_params_.update(hp) batch, channels, height, width = self.nn.input else: self.nn = None spec = spec.strip() if spec[0] != '[' or spec[-1] != ']': raise ValueError(f'VGSL spec "{spec}" not bracketed') self.spec = spec blocks = spec[1:-1].split(' ') m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0]) if not m: raise ValueError(f'Invalid input spec {blocks[0]}') batch, height, width, channels = [int(x) for x in m.groups()] if hyper_params: hyper_params_.update(hyper_params) validate_hyper_parameters(hyper_params_) self.save_hyperparameters(hyper_params_) if not training_data: raise ValueError('No training data provided. Please add some.') transforms = ImageInputTransforms(batch, height, width, channels, self.hparams.padding, valid_norm=False, force_binarization=force_binarization) self.example_input_array = torch.Tensor(batch, channels, height if height else 400, width if width else 300) # set multiprocessing tensor sharing strategy if 'file_system' in torch.multiprocessing.get_all_sharing_strategies(): logger.debug('Setting multiprocessing tensor sharing strategy to file_system') torch.multiprocessing.set_sharing_strategy('file_system') if not valid_regions: valid_regions = None if not valid_baselines: valid_baselines = None if suppress_regions: valid_regions = [] merge_regions = None if suppress_baselines: valid_baselines = [] merge_baselines = None train_set = BaselineSet(training_data, line_width=self.hparams.line_width, im_transforms=transforms, mode=format_type, augmentation=self.hparams.augment, valid_baselines=valid_baselines, merge_baselines=merge_baselines, valid_regions=valid_regions, merge_regions=merge_regions) if format_type is None: for page in training_data: train_set.add(**page) if evaluation_data: val_set = BaselineSet(evaluation_data, line_width=self.hparams.line_width, im_transforms=transforms, mode=format_type, augmentation=False, valid_baselines=valid_baselines, merge_baselines=merge_baselines, valid_regions=valid_regions, merge_regions=merge_regions) if format_type is None: for page in evaluation_data: val_set.add(**page) train_set = Subset(train_set, range(len(train_set))) val_set = Subset(val_set, range(len(val_set))) else: train_len = int(len(train_set)*partition) val_len = len(train_set) - train_len logger.info(f'No explicit validation data provided. Splitting off ' f'{val_len} (of {len(train_set)}) samples to validation ' 'set.') train_set, val_set = random_split(train_set, (train_len, val_len)) if len(train_set) == 0: raise ValueError('No valid training data provided. Please add some.') if len(val_set) == 0: raise ValueError('No valid validation data provided. Please add some.') # overwrite class mapping in validation set val_set.dataset.num_classes = train_set.dataset.num_classes val_set.dataset.class_mapping = train_set.dataset.class_mapping self.train_set = train_set self.val_set = val_set def forward(self, x): return self.nn.nn(x) def training_step(self, batch, batch_idx): input, target = batch['image'], batch['target'] output, _ = self.nn.nn(input) output = F.interpolate(output, size=(target.size(2), target.size(3))) loss = self.nn.criterion(output, target) self.log('train_loss', loss, on_step=True, on_epoch=False, prog_bar=False, logger=True) return loss def validation_step(self, batch, batch_idx): x, y = batch['image'], batch['target'] pred, _ = self.nn.nn(x) # scale target to output size y = F.interpolate(y, size=(pred.size(2), pred.size(3))).int() self.val_px_accuracy.update(pred, y) self.val_mean_accuracy.update(pred, y) self.val_mean_iu.update(pred, y) self.val_freq_iu.update(pred, y) def on_validation_epoch_end(self): pixel_accuracy = self.val_px_accuracy.compute() mean_accuracy = self.val_mean_accuracy.compute() mean_iu = self.val_mean_iu.compute() freq_iu = self.val_freq_iu.compute() if mean_iu > self.best_metric: logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {mean_iu} ({self.current_epoch})') self.best_epoch = self.current_epoch self.best_metric = mean_iu logger.info(f'validation run: accuracy {pixel_accuracy} mean_acc {mean_accuracy} mean_iu {mean_iu} freq_iu {freq_iu}') self.log('val_accuracy', pixel_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_mean_acc', mean_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_mean_iu', mean_iu, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_freq_iu', freq_iu, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_metric', mean_iu, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.val_px_accuracy.reset() self.val_mean_accuracy.reset() self.val_mean_iu.reset() self.val_freq_iu.reset() def setup(self, stage: Optional[str] = None): # finalize models in case of appending/loading if stage in [None, 'fit']: if not self.model: self.spec = f'[{self.spec[1:-1]} O2l{self.train_set.dataset.num_classes}]' logger.info(f'Creating model {self.spec} with {self.train_set.dataset.num_classes} outputs') nn = vgsl.TorchVGSLModel(self.spec) if self.bounding_regions is not None: nn.user_metadata['bounding_regions'] = self.bounding_regions nn.user_metadata['topline'] = self.topline self.nn = nn else: if self.train_set.dataset.class_mapping['baselines'].keys() != self.nn.user_metadata['class_mapping']['baselines'].keys() or \ self.train_set.dataset.class_mapping['regions'].keys() != self.nn.user_metadata['class_mapping']['regions'].keys(): bl_diff = set(self.train_set.dataset.class_mapping['baselines'].keys()).symmetric_difference( set(self.nn.user_metadata['class_mapping']['baselines'].keys())) regions_diff = set(self.train_set.dataset.class_mapping['regions'].keys()).symmetric_difference( set(self.nn.user_metadata['class_mapping']['regions'].keys())) if self.resize == 'fail': raise ValueError(f'Training data and model class mapping differ (bl: {bl_diff}, regions: {regions_diff}') elif self.resize == 'union': new_bls = self.train_set.dataset.class_mapping['baselines'].keys() - self.nn.user_metadata['class_mapping']['baselines'].keys() new_regions = self.train_set.dataset.class_mapping['regions'].keys() - self.nn.user_metadata['class_mapping']['regions'].keys() cls_idx = max(max(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else -1, max(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else -1) logger.info(f'Adding {len(new_bls) + len(new_regions)} missing types to network output layer.') self.nn.resize_output(cls_idx + len(new_bls) + len(new_regions) + 1) for c in new_bls: cls_idx += 1 self.nn.user_metadata['class_mapping']['baselines'][c] = cls_idx for c in new_regions: cls_idx += 1 self.nn.user_metadata['class_mapping']['regions'][c] = cls_idx elif self.resize == 'new': logger.info('Fitting network exactly to training set.') new_bls = self.train_set.dataset.class_mapping['baselines'].keys() - self.nn.user_metadata['class_mapping']['baselines'].keys() new_regions = self.train_set.dataset.class_mapping['regions'].keys() - self.nn.user_metadata['class_mapping']['regions'].keys() del_bls = self.nn.user_metadata['class_mapping']['baselines'].keys() - self.train_set.dataset.class_mapping['baselines'].keys() del_regions = self.nn.user_metadata['class_mapping']['regions'].keys() - self.train_set.dataset.class_mapping['regions'].keys() logger.info(f'Adding {len(new_bls) + len(new_regions)} missing ' f'types and removing {len(del_bls) + len(del_regions)} to network output layer ') cls_idx = max(max(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else -1, max(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else -1) del_indices = [self.nn.user_metadata['class_mapping']['baselines'][x] for x in del_bls] del_indices.extend(self.nn.user_metadata['class_mapping']['regions'][x] for x in del_regions) self.nn.resize_output(cls_idx + len(new_bls) + len(new_regions) - len(del_bls) - len(del_regions) + 1, del_indices) # delete old baseline/region types cls_idx = min(min(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else np.inf, min(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else np.inf) bls = {} for k, v in sorted(self.nn.user_metadata['class_mapping']['baselines'].items(), key=lambda item: item[1]): if k not in del_bls: bls[k] = cls_idx cls_idx += 1 regions = {} for k, v in sorted(self.nn.user_metadata['class_mapping']['regions'].items(), key=lambda item: item[1]): if k not in del_regions: regions[k] = cls_idx cls_idx += 1 self.nn.user_metadata['class_mapping']['baselines'] = bls self.nn.user_metadata['class_mapping']['regions'] = regions # add new baseline/region types cls_idx -= 1 for c in new_bls: cls_idx += 1 self.nn.user_metadata['class_mapping']['baselines'][c] = cls_idx for c in new_regions: cls_idx += 1 self.nn.user_metadata['class_mapping']['regions'][c] = cls_idx else: raise ValueError(f'invalid resize parameter value {self.resize}') # backfill train_set/val_set mapping if key-equal as the actual # numbering in the train_set might be different self.train_set.dataset.class_mapping = self.nn.user_metadata['class_mapping'] self.val_set.dataset.class_mapping = self.nn.user_metadata['class_mapping'] # updates model's hyper params with user-defined ones self.nn.hyper_params = self.hparams # change topline/baseline switch loc = {None: 'centerline', True: 'topline', False: 'baseline'} if 'topline' not in self.nn.user_metadata: logger.warning(f'Setting baseline location to {loc[self.topline]} from unset model.') elif self.nn.user_metadata['topline'] != self.topline: from_loc = loc[self.nn.user_metadata['topline']] logger.warning(f'Changing baseline location from {from_loc} to {loc[self.topline]}.') self.nn.user_metadata['topline'] = self.topline logger.info('Training line types:') for k, v in self.train_set.dataset.class_mapping['baselines'].items(): logger.info(f' {k}\t{v}\t{self.train_set.dataset.class_stats["baselines"][k]}') logger.info('Training region types:') for k, v in self.train_set.dataset.class_mapping['regions'].items(): logger.info(f' {k}\t{v}\t{self.train_set.dataset.class_stats["regions"][k]}') if len(self.train_set) == 0: raise ValueError('No valid training data was provided to the train command. Please add valid XML data.') # set model type metadata field and dump class_mapping self.nn.model_type = 'segmentation' self.nn.user_metadata['class_mapping'] = self.val_set.dataset.class_mapping # for model size/trainable parameter output self.net = self.nn.nn torch.set_num_threads(max(self.num_workers, 1)) # set up validation metrics after output classes have been determined self.val_px_accuracy = MultilabelAccuracy(average='micro', num_labels=self.train_set.dataset.num_classes) self.val_mean_accuracy = MultilabelAccuracy(average='macro', num_labels=self.train_set.dataset.num_classes) self.val_mean_iu = MultilabelJaccardIndex(average='macro', num_labels=self.train_set.dataset.num_classes) self.val_freq_iu = MultilabelJaccardIndex(average='weighted', num_labels=self.train_set.dataset.num_classes) def train_dataloader(self): return DataLoader(self.train_set, batch_size=1, num_workers=self.num_workers, shuffle=True, pin_memory=True) def val_dataloader(self): return DataLoader(self.val_set, shuffle=False, batch_size=1, num_workers=self.num_workers, pin_memory=True) def configure_callbacks(self): callbacks = [] if self.hparams.quit == 'early': callbacks.append(EarlyStopping(monitor='val_mean_iu', mode='max', patience=self.hparams.lag, stopping_threshold=1.0)) return callbacks # configuration of optimizers and learning rate schedulers # -------------------------------------------------------- # # All schedulers are created internally with a frequency of step to enable # batch-wise learning rate warmup. In lr_scheduler_step() calls to the # scheduler are then only performed at the end of the epoch. def configure_optimizers(self): return _configure_optimizer_and_lr_scheduler(self.hparams, self.nn.nn.parameters(), len_train_set=len(self.train_set), loss_tracking_mode='max') def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure): # update params optimizer.step(closure=optimizer_closure) # linear warmup between 0 and the initial learning rate `lrate` in `warmup` # steps. if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup: lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup) for pg in optimizer.param_groups: pg["lr"] = lr_scale * self.hparams.lrate def lr_scheduler_step(self, scheduler, metric): if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup: # step OneCycleLR each batch if not in warmup phase if isinstance(scheduler, lr_scheduler.OneCycleLR): scheduler.step() # step every other scheduler epoch-wise elif self.trainer.is_last_batch: scheduler.step() def _configure_optimizer_and_lr_scheduler(hparams, params, len_train_set=None, loss_tracking_mode='max'): # XXX: Warmup is not configured here because it needs to be manually done in optimizer_step() logger.debug(f'Constructing {hparams.optimizer} optimizer (lr: {hparams.lrate}, momentum: {hparams.momentum})') if hparams.optimizer == 'Adam': optim = torch.optim.Adam(params, lr=hparams.lrate, weight_decay=hparams.weight_decay) else: optim = getattr(torch.optim, hparams.optimizer)(params, lr=hparams.lrate, momentum=hparams.momentum, weight_decay=hparams.weight_decay) lr_sched = {} if hparams.schedule == 'exponential': lr_sched = {'scheduler': lr_scheduler.ExponentialLR(optim, hparams.gamma, last_epoch=hparams.completed_epochs-1), 'interval': 'step'} elif hparams.schedule == 'cosine': lr_sched = {'scheduler': lr_scheduler.CosineAnnealingLR(optim, hparams.gamma, last_epoch=hparams.completed_epochs-1), 'interval': 'step'} elif hparams.schedule == 'step': lr_sched = {'scheduler': lr_scheduler.StepLR(optim, hparams.step_size, hparams.gamma, last_epoch=hparams.completed_epochs-1), 'interval': 'step'} elif hparams.schedule == 'reduceonplateau': lr_sched = {'scheduler': lr_scheduler.ReduceLROnPlateau(optim, mode=loss_tracking_mode, factor=hparams.rop_factor, patience=hparams.rop_patience), 'interval': 'step'} elif hparams.schedule == '1cycle': if hparams.epochs <= 0: raise ValueError('1cycle learning rate scheduler selected but ' 'number of epochs is less than 0 ' f'({hparams.epochs}).') last_epoch = hparams.completed_epochs*len_train_set if hparams.completed_epochs else -1 lr_sched = {'scheduler': lr_scheduler.OneCycleLR(optim, max_lr=hparams.lrate, epochs=hparams.epochs, steps_per_epoch=len_train_set, last_epoch=last_epoch), 'interval': 'step'} elif hparams.schedule != 'constant': raise ValueError(f'Unsupported learning rate scheduler {hparams.schedule}.') ret = {'optimizer': optim} if lr_sched: ret['lr_scheduler'] = lr_sched if hparams.schedule == 'reduceonplateau': lr_sched['monitor'] = 'val_metric' lr_sched['strict'] = False lr_sched['reduce_on_plateau'] = True return ret
57,490
49.742277
171
py
kraken
kraken-main/kraken/lib/sl.py
import numpy as np def dim0(s): """Dimension of the slice list for dimension 0.""" return s[0].stop-s[0].start def dim1(s): """Dimension of the slice list for dimension 1.""" return s[1].stop-s[1].start def area(a): """Return the area of the slice list (ignores anything past a[:2].""" return np.prod([max(x.stop-x.start, 0) for x in a[:2]]) def width(s): return s[1].stop-s[1].start def height(s): return s[0].stop-s[0].start def aspect(a): return height(a)*1.0/width(a) def xcenter(s): return np.mean([s[1].stop, s[1].start]) def ycenter(s): return np.mean([s[0].stop, s[0].start]) def center(s): return (ycenter(s), xcenter(s))
697
16.02439
73
py
kraken
kraken-main/kraken/lib/dataset/utils.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Utility functions for data loading and training of VGSL networks. """ import json import torch import numbers import pkg_resources import torch.nn.functional as F from functools import partial from torchvision import transforms from collections import Counter from typing import Dict, List, Tuple, Sequence, Any, Union from kraken.lib.models import TorchSeqRecognizer from kraken.lib.exceptions import KrakenInputException from kraken.lib.lineest import CenterNormalizer from kraken.lib import functional_im_transforms as F_t __all__ = ['ImageInputTransforms', 'collate_sequences'] import logging logger = logging.getLogger(__name__) class ImageInputTransforms(transforms.Compose): def __init__(self, batch: int, height: int, width: int, channels: int, pad: Union[int, Tuple[int, int], Tuple[int, int, int, int]], valid_norm: bool = True, force_binarization: bool = False) -> None: """ Container for image input transforms for recognition and segmentation networks. Args: batch: mini-batch size height: height of input image in pixels width: width of input image in pixels channels: color channels of input pad: Amount of padding on horizontal ends of image valid_norm: Enables/disables baseline normalization as a valid preprocessing step. If disabled we will fall back to standard scaling. force_binarization: Forces binarization of input images using the nlbin algorithm. """ super().__init__(None) self._scale = (height, width) # type: Tuple[int, int] self._valid_norm = valid_norm self._force_binarization = force_binarization self._batch = batch self._channels = channels self.pad = pad self._create_transforms() def _create_transforms(self) -> None: height = self._scale[0] width = self._scale[1] self._center_norm = False self._mode = 'RGB' if self._channels == 3 else 'L' if height == 1 and width == 0 and self._channels > 3: perm = (1, 0, 2) self._scale = (self._channels, 0) self._channels = 1 if self._valid_norm: self._center_norm = True self._mode = 'L' elif height > 1 and width == 0 and self._channels in (1, 3): perm = (0, 1, 2) if self._valid_norm and self._channels == 1: self._center_norm = True elif height == 0 and width > 1 and self._channels in (1, 3): perm = (0, 1, 2) # fixed height and width image => bicubic scaling of the input image, disable padding elif height > 0 and width > 0 and self._channels in (1, 3): perm = (0, 1, 2) self._pad = 0 elif height == 0 and width == 0 and self._channels in (1, 3): perm = (0, 1, 2) self._pad = 0 else: raise KrakenInputException(f'Invalid input spec {self._batch}, {height}, {width}, {self._channels}, {self._pad}.') if self._mode != 'L' and self._force_binarization: raise KrakenInputException(f'Invalid input spec {self._batch}, {height}, {width}, {self._channels}, {self._pad} in ' 'combination with forced binarization.') self.transforms = [] self.transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=self._mode))) if self._force_binarization: self.transforms.append(transforms.Lambda(F_t.pil_to_bin)) if self._scale != (0, 0): if self._center_norm: lnorm = CenterNormalizer(self._scale[0]) self.transforms.append(transforms.Lambda(partial(F_t.pil_dewarp, lnorm=lnorm))) self.transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=self._mode))) else: self.transforms.append(transforms.Lambda(partial(F_t.pil_fixed_resize, scale=self._scale))) if self._pad: self.transforms.append(transforms.Pad(self._pad, fill=255)) self.transforms.append(transforms.ToTensor()) # invert self.transforms.append(transforms.Lambda(F_t.tensor_invert)) self.transforms.append(transforms.Lambda(partial(F_t.tensor_permute, perm=perm))) @property def batch(self) -> int: """ Batch size attribute. Ignored. """ return self._batch @batch.setter def batch(self, batch: int) -> None: self._batch = batch @property def channels(self) -> int: """ Channels attribute. Can be either 1 (binary/grayscale), 3 (RGB). """ if self._channels not in [1, 3] and self._scale[0] == self._channels: return 1 else: return self._channels @channels.setter def channels(self, channels: int) -> None: self._channels = channels self._create_transforms() @property def height(self) -> int: """ Desired output image height. If set to 0, image will be rescaled proportionally with width, if 1 and `channels` is larger than 3 output will be grayscale and of the height set with the channels attribute. """ if self._scale == (1, 0) and self.channels > 3: return self._channels else: return self._scale[0] @height.setter def height(self, height: int) -> None: self._scale = (height, self.scale[1]) self._create_transforms() @property def width(self) -> int: """ Desired output image width. If set to 0, image will be rescaled proportionally with height. """ return self._scale[1] @width.setter def width(self, width: int) -> None: self._scale = (self._scale[0], width) self._create_transforms() @property def mode(self) -> str: """ Imaginary PIL.Image.Image mode of the output tensor. Possible values are RGB, L, and 1. """ return self._mode if not self.force_binarization else '1' @property def scale(self) -> Tuple[int, int]: """ Desired output shape (height, width) of the image. If any value is set to 0, image will be rescaled proportionally with height, width, if 1 and `channels` is larger than 3 output will be grayscale and of the height set with the channels attribute. """ if self._scale == (1, 0) and self.channels > 3: return (self._channels, self._scale[1]) else: return self._scale @scale.setter def scale(self, scale: Tuple[int, int]) -> None: self._scale = scale self._create_transforms() @property def pad(self) -> int: """ Amount of padding around left/right end of image. """ return self._pad @pad.setter def pad(self, pad: Union[int, Tuple[int, int], Tuple[int, int, int, int]]) -> None: if not isinstance(pad, (numbers.Number, tuple, list)): raise TypeError('Got inappropriate padding arg') self._pad = pad self._create_transforms() @property def valid_norm(self) -> bool: """ Switch allowing/disallowing centerline normalization. Even if enabled won't be applied to 3-channel images. """ return self._valid_norm @valid_norm.setter def valid_norm(self, valid_norm: bool) -> None: self._valid_norm = valid_norm self._create_transforms() @property def centerline_norm(self) -> bool: """ Attribute indicating if centerline normalization will be applied to input images. """ return self._center_norm @property def force_binarization(self) -> bool: """ Switch enabling/disabling forced binarization. """ return self._force_binarization @force_binarization.setter def force_binarization(self, force_binarization: bool) -> None: self._force_binarization = force_binarization self._create_transforms() def global_align(seq1: Sequence[Any], seq2: Sequence[Any]) -> Tuple[int, List[str], List[str]]: """ Computes a global alignment of two strings. Args: seq1 (Sequence[Any]): seq2 (Sequence[Any]): Returns a tuple (distance, list(algn1), list(algn2)) """ # calculate cost and direction matrix cost = [[0] * (len(seq2) + 1) for x in range(len(seq1) + 1)] for i in range(1, len(cost)): cost[i][0] = i for i in range(1, len(cost[0])): cost[0][i] = i direction = [[(0, 0)] * (len(seq2) + 1) for x in range(len(seq1) + 1)] direction[0] = [(0, x) for x in range(-1, len(seq2))] for i in range(-1, len(direction) - 1): direction[i + 1][0] = (i, 0) for i in range(1, len(cost)): for j in range(1, len(cost[0])): delcost = ((i - 1, j), cost[i - 1][j] + 1) addcost = ((i, j - 1), cost[i][j - 1] + 1) subcost = ((i - 1, j - 1), cost[i - 1][j - 1] + (seq1[i - 1] != seq2[j - 1])) best = min(delcost, addcost, subcost, key=lambda x: x[1]) cost[i][j] = best[1] direction[i][j] = best[0] d = cost[-1][-1] # backtrace algn1: List[Any] = [] algn2: List[Any] = [] i = len(direction) - 1 j = len(direction[0]) - 1 while direction[i][j] != (-1, 0): k, m = direction[i][j] if k == i - 1 and m == j - 1: algn1.insert(0, seq1[i - 1]) algn2.insert(0, seq2[j - 1]) elif k < i: algn1.insert(0, seq1[i - 1]) algn2.insert(0, '') elif m < j: algn1.insert(0, '') algn2.insert(0, seq2[j - 1]) i, j = k, m return d, algn1, algn2 def compute_confusions(algn1: Sequence[str], algn2: Sequence[str]): """ Compute confusion matrices from two globally aligned strings. Args: align1 (Sequence[str]): sequence 1 align2 (Sequence[str]): sequence 2 Returns: A tuple (counts, scripts, ins, dels, subs) with `counts` being per-character confusions, `scripts` per-script counts, `ins` a dict with per script insertions, `del` an integer of the number of deletions, `subs` per script substitutions. """ counts: Dict[Tuple[str, str], int] = Counter() with pkg_resources.resource_stream(__name__, 'scripts.json') as fp: script_map = json.load(fp) def _get_script(c): for s, e, n in script_map: if ord(c) == s or (e and s <= ord(c) <= e): return n return 'Unknown' scripts: Dict[Tuple[str, str], int] = Counter() ins: Dict[Tuple[str, str], int] = Counter() dels: int = 0 subs: Dict[Tuple[str, str], int] = Counter() for u, v in zip(algn1, algn2): counts[(u, v)] += 1 for k, v in counts.items(): if k[0] == '': dels += v else: script = _get_script(k[0]) scripts[script] += v if k[1] == '': ins[script] += v elif k[0] != k[1]: subs[script] += v return counts, scripts, ins, dels, subs def collate_sequences(batch): """ Sorts and pads sequences. """ sorted_batch = sorted(batch, key=lambda x: x['image'].shape[2], reverse=True) seqs = [x['image'] for x in sorted_batch] seq_lens = torch.LongTensor([seq.shape[2] for seq in seqs]) max_len = seqs[0].shape[2] seqs = torch.stack([F.pad(seq, pad=(0, max_len-seq.shape[2])) for seq in seqs]) if isinstance(sorted_batch[0]['target'], str): labels = [x['target'] for x in sorted_batch] else: labels = torch.cat([x['target'] for x in sorted_batch]).long() label_lens = torch.LongTensor([len(x['target']) for x in sorted_batch]) return {'image': seqs, 'target': labels, 'seq_lens': seq_lens, 'target_lens': label_lens}
12,846
34.00545
128
py
kraken
kraken-main/kraken/lib/dataset/segmentation.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Utility functions for data loading and training of VGSL networks. """ import json import torch import traceback import numpy as np import torch.nn.functional as F import shapely.geometry as geom from os import path, PathLike from PIL import Image from shapely.ops import split from itertools import groupby from torchvision import transforms from collections import defaultdict from torch.utils.data import Dataset from typing import Dict, List, Tuple, Sequence, Callable, Any, Union, Literal, Optional from skimage.draw import polygon from kraken.lib.xml import parse_alto, parse_page, parse_xml from kraken.lib.exceptions import KrakenInputException __all__ = ['BaselineSet'] import logging logger = logging.getLogger(__name__) class BaselineSet(Dataset): """ Dataset for training a baseline/region segmentation model. """ def __init__(self, imgs: Sequence[Union[PathLike, str]] = None, suffix: str = '.path', line_width: int = 4, padding: Tuple[int, int, int, int] = (0, 0, 0, 0), im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]), mode: Optional[Literal['path', 'alto', 'page', 'xml']] = 'path', augmentation: bool = False, valid_baselines: Sequence[str] = None, merge_baselines: Dict[str, Sequence[str]] = None, valid_regions: Sequence[str] = None, merge_regions: Dict[str, Sequence[str]] = None): """ Reads a list of image-json pairs and creates a data set. Args: imgs: suffix: Suffix to attach to image base name to load JSON files from. line_width: Height of the baseline in the scaled input. padding: Tuple of ints containing the left/right, top/bottom padding of the input images. target_size: Target size of the image as a (height, width) tuple. mode: Either path, alto, page, xml, or None. In alto, page, and xml mode the baseline paths and image data is retrieved from an ALTO/PageXML file. In `None` mode data is iteratively added through the `add` method. augmentation: Enable/disable augmentation. valid_baselines: Sequence of valid baseline identifiers. If `None` all are valid. merge_baselines: Sequence of baseline identifiers to merge. Note that merging occurs after entities not in valid_* have been discarded. valid_regions: Sequence of valid region identifiers. If `None` all are valid. merge_regions: Sequence of region identifiers to merge. Note that merging occurs after entities not in valid_* have been discarded. """ super().__init__() self.mode = mode self.im_mode = '1' self.pad = padding self.aug = None self.targets = [] # n-th entry contains semantic of n-th class self.class_mapping = {'aux': {'_start_separator': 0, '_end_separator': 1}, 'baselines': {}, 'regions': {}} # keep track of samples that failed to load self.failed_samples = set() self.class_stats = {'baselines': defaultdict(int), 'regions': defaultdict(int)} self.num_classes = 2 self.mbl_dict = merge_baselines if merge_baselines is not None else {} self.mreg_dict = merge_regions if merge_regions is not None else {} self.valid_baselines = valid_baselines self.valid_regions = valid_regions if mode in ['alto', 'page', 'xml']: if mode == 'alto': fn = parse_alto elif mode == 'page': fn = parse_page elif mode == 'xml': fn = parse_xml im_paths = [] self.targets = [] for img in imgs: try: data = fn(img) im_paths.append(data['image']) lines = defaultdict(list) for line in data['lines']: if valid_baselines is None or set(line['tags'].values()).intersection(valid_baselines): tags = set(line['tags'].values()).intersection(valid_baselines) if valid_baselines else line['tags'].values() for tag in tags: lines[self.mbl_dict.get(tag, tag)].append(line['baseline']) self.class_stats['baselines'][self.mbl_dict.get(tag, tag)] += 1 regions = defaultdict(list) for k, v in data['regions'].items(): if valid_regions is None or k in valid_regions: regions[self.mreg_dict.get(k, k)].extend(v) self.class_stats['regions'][self.mreg_dict.get(k, k)] += len(v) data['regions'] = regions self.targets.append({'baselines': lines, 'regions': data['regions']}) except KrakenInputException as e: logger.warning(e) continue # get line types imgs = im_paths # calculate class mapping line_types = set() region_types = set() for page in self.targets: for line_type in page['baselines'].keys(): line_types.add(line_type) for reg_type in page['regions'].keys(): region_types.add(reg_type) idx = -1 for idx, line_type in enumerate(line_types): self.class_mapping['baselines'][line_type] = idx + self.num_classes self.num_classes += idx + 1 idx = -1 for idx, reg_type in enumerate(region_types): self.class_mapping['regions'][reg_type] = idx + self.num_classes self.num_classes += idx + 1 elif mode == 'path': pass elif mode is None: imgs = [] else: raise Exception('invalid dataset mode') if augmentation: import cv2 cv2.setNumThreads(0) from albumentations import ( Compose, ToFloat, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, HueSaturationValue, ) self.aug = Compose([ ToFloat(), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), ElasticTransform(p=0.1), ], p=0.2), HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3), ], p=0.5) self.imgs = imgs self.line_width = line_width self.transforms = im_transforms self.seg_type = None def add(self, image: Union[PathLike, str, Image.Image], baselines: List[List[List[Tuple[int, int]]]] = None, regions: Dict[str, List[List[Tuple[int, int]]]] = None, *args, **kwargs): """ Adds a page to the dataset. Args: im: Path to the whole page image baseline: A list containing dicts with a list of coordinates and tags [{'baseline': [[x0, y0], ..., [xn, yn]], 'tags': ('script_type',)}, ...] regions: A dict containing list of lists of coordinates {'region_type_0': [[x0, y0], ..., [xn, yn]]], 'region_type_1': ...}. """ if self.mode: raise Exception(f'The `add` method is incompatible with dataset mode {self.mode}') baselines_ = defaultdict(list) for line in baselines: if self.valid_baselines is None or set(line['tags'].values()).intersection(self.valid_baselines): tags = set(line['tags'].values()).intersection(self.valid_baselines) if self.valid_baselines else line['tags'].values() for tag in tags: baselines_[tag].append(line['baseline']) self.class_stats['baselines'][tag] += 1 if tag not in self.class_mapping['baselines']: self.num_classes += 1 self.class_mapping['baselines'][tag] = self.num_classes - 1 regions_ = defaultdict(list) for k, v in regions.items(): reg_type = self.mreg_dict.get(k, k) if self.valid_regions is None or reg_type in self.valid_regions: regions_[reg_type].extend(v) self.class_stats['baselines'][reg_type] += len(v) if reg_type not in self.class_mapping['regions']: self.num_classes += 1 self.class_mapping['regions'][reg_type] = self.num_classes - 1 self.targets.append({'baselines': baselines_, 'regions': regions_}) self.imgs.append(image) def __getitem__(self, idx): im = self.imgs[idx] if self.mode != 'path': target = self.targets[idx] else: with open('{}.path'.format(path.splitext(im)[0]), 'r') as fp: target = json.load(fp) if not isinstance(im, Image.Image): try: logger.debug(f'Attempting to load {im}') im = Image.open(im) im, target = self.transform(im, target) return {'image': im, 'target': target} except Exception: self.failed_samples.add(idx) idx = np.random.randint(0, len(self.imgs)) logger.debug(traceback.format_exc()) logger.info(f'Failed. Replacing with sample {idx}') return self[idx] im, target = self.transform(im, target) return {'image': im, 'target': target} @staticmethod def _get_ortho_line(lineseg, point, line_width, offset): lineseg = np.array(lineseg) norm_vec = lineseg[1, ...] - lineseg[0, ...] norm_vec_len = np.sqrt(np.sum(norm_vec**2)) unit_vec = norm_vec / norm_vec_len ortho_vec = unit_vec[::-1] * ((1, -1), (-1, 1)) if offset == 'l': point -= unit_vec * line_width else: point += unit_vec * line_width return (ortho_vec * 10 + point).astype('int').tolist() def transform(self, image, target): orig_size = image.size image = self.transforms(image) scale = (image.shape[2] - 2*self.pad[1])/orig_size[0] t = torch.zeros((self.num_classes,) + tuple(np.subtract(image.shape[1:], (2*self.pad[1], 2*self.pad[0])))) start_sep_cls = self.class_mapping['aux']['_start_separator'] end_sep_cls = self.class_mapping['aux']['_end_separator'] for key, lines in target['baselines'].items(): try: cls_idx = self.class_mapping['baselines'][key] except KeyError: # skip lines of classes not present in the training set continue for line in lines: # buffer out line to desired width line = [k for k, g in groupby(line)] line = np.array(line)*scale shp_line = geom.LineString(line) split_offset = min(5, shp_line.length/2) line_pol = np.array(shp_line.buffer(self.line_width/2, cap_style=2).boundary.coords, dtype=int) rr, cc = polygon(line_pol[:, 1], line_pol[:, 0], shape=image.shape[1:]) t[cls_idx, rr, cc] = 1 split_pt = shp_line.interpolate(split_offset).buffer(0.001) # top start_sep = np.array((split(shp_line, split_pt).geoms[0].buffer(self.line_width, cap_style=3).boundary.coords), dtype=int) rr_s, cc_s = polygon(start_sep[:, 1], start_sep[:, 0], shape=image.shape[1:]) t[start_sep_cls, rr_s, cc_s] = 1 t[start_sep_cls, rr, cc] = 0 split_pt = shp_line.interpolate(-split_offset).buffer(0.001) # top end_sep = np.array((split(shp_line, split_pt).geoms[-1].buffer(self.line_width, cap_style=3).boundary.coords), dtype=int) rr_s, cc_s = polygon(end_sep[:, 1], end_sep[:, 0], shape=image.shape[1:]) t[end_sep_cls, rr_s, cc_s] = 1 t[end_sep_cls, rr, cc] = 0 for key, regions in target['regions'].items(): try: cls_idx = self.class_mapping['regions'][key] except KeyError: # skip regions of classes not present in the training set continue for region in regions: region = np.array(region)*scale rr, cc = polygon(region[:, 1], region[:, 0], shape=image.shape[1:]) t[cls_idx, rr, cc] = 1 target = F.pad(t, self.pad) if self.aug: image = image.permute(1, 2, 0).numpy() target = target.permute(1, 2, 0).numpy() o = self.aug(image=image, mask=target) image = torch.tensor(o['image']).permute(2, 0, 1) target = torch.tensor(o['mask']).permute(2, 0, 1) return image, target def __len__(self): return len(self.imgs)
14,843
44.673846
137
py
kraken
kraken-main/kraken/lib/dataset/__init__.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Top-level module containing datasets for recognition and segmentation training. """ from .recognition import ArrowIPCRecognitionDataset, PolygonGTDataset, GroundTruthDataset # NOQA from .segmentation import BaselineSet # NOQA from .utils import ImageInputTransforms, collate_sequences, global_align, compute_confusions # NOQA
910
42.380952
99
py
kraken
kraken-main/kraken/lib/dataset/recognition.py
# # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Utility functions for data loading and training of VGSL networks. """ import io import json import torch import traceback import numpy as np import pyarrow as pa from PIL import Image from os import PathLike from functools import partial from torchvision import transforms from collections import Counter from torch.utils.data import Dataset from typing import Dict, List, Tuple, Callable, Optional, Any, Union, Literal from kraken.lib.util import is_bitonal from kraken.lib.codec import PytorchCodec from kraken.lib.segmentation import extract_polygons from kraken.lib.exceptions import KrakenInputException, KrakenEncodeException from kraken.lib import functional_im_transforms as F_t __all__ = ['DefaultAugmenter', 'ArrowIPCRecognitionDataset', 'PolygonGTDataset', 'GroundTruthDataset'] import logging logger = logging.getLogger(__name__) class DefaultAugmenter(): def __init__(self): import cv2 cv2.setNumThreads(0) from albumentations import ( Compose, ToFloat, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, PixelDropout ) self._transforms = Compose([ ToFloat(), PixelDropout(p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=1, p=0.2), OneOf([ OpticalDistortion(p=0.3), ElasticTransform(alpha=64, sigma=25, alpha_affine=0.25, p=0.1), ], p=0.2), ], p=0.5) def __call__(self, image): return self._transforms(image=image) class ArrowIPCRecognitionDataset(Dataset): """ Dataset for training a recognition model from a precompiled dataset in Arrow IPC format. """ def __init__(self, normalization: Optional[str] = None, whitespace_normalization: bool = True, skip_empty_lines: bool = True, reorder: Union[bool, Literal['L', 'R']] = True, im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]), augmentation: bool = False, split_filter: Optional[str] = None) -> None: """ Creates a dataset for a polygonal (baseline) transcription model. Args: normalization: Unicode normalization for gt whitespace_normalization: Normalizes unicode whitespace and strips whitespace. skip_empty_lines: Whether to return samples without text. reorder: Whether to rearrange code points in "display"/LTR order. Set to L|R to change the default text direction. im_transforms: Function taking an PIL.Image and returning a tensor suitable for forward passes. augmentation: Enables augmentation. split_filter: Enables filtering of the dataset according to mask values in the set split. If set to `None` all rows are sampled, if set to `train`, `validation`, or `test` only rows with the appropriate flag set in the file will be considered. """ self.alphabet = Counter() # type: Counter self.text_transforms = [] # type: List[Callable[[str], str]] self.failed_samples = set() self.transforms = im_transforms self.aug = None self._split_filter = split_filter self._num_lines = 0 self.arrow_table = None self.codec = None self.skip_empty_lines = skip_empty_lines self.seg_type = None # built text transformations if normalization: self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization)) if whitespace_normalization: self.text_transforms.append(F_t.text_whitespace_normalize) if reorder: if reorder in ('L', 'R'): self.text_transforms.append(partial(F_t.text_reorder, base_dir=reorder)) else: self.text_transforms.append(F_t.text_reorder) if augmentation: self.aug = DefaultAugmenter() self.im_mode = self.transforms.mode def add(self, file: Union[str, PathLike]) -> None: """ Adds an Arrow IPC file to the dataset. Args: file: Location of the precompiled dataset file. """ # extract metadata and update alphabet with pa.memory_map(file, 'rb') as source: ds_table = pa.ipc.open_file(source).read_all() raw_metadata = ds_table.schema.metadata if not raw_metadata or b'lines' not in raw_metadata: raise ValueError(f'{file} does not contain a valid metadata record.') metadata = json.loads(raw_metadata[b'lines']) if metadata['type'] == 'kraken_recognition_baseline': if not self.seg_type: self.seg_type = 'baselines' if self.seg_type != 'baselines': raise ValueError(f'File {file} has incompatible type {metadata["type"]} for dataset with type {self.seg_type}.') elif metadata['type'] == 'kraken_recognition_bbox': if not self.seg_type: self.seg_type = 'bbox' if self.seg_type != 'bbox': raise ValueError(f'File {file} has incompatible type {metadata["type"]} for dataset with type {self.seg_type}.') else: raise ValueError(f'Unknown type {metadata["type"]} of dataset.') if self._split_filter and metadata['counts'][self._split_filter] == 0: logger.warning(f'No explicit split for "{self._split_filter}" in dataset {file} (with splits {metadata["counts"].items()}).') return if metadata['im_mode'] > self.im_mode and self.transforms.mode >= metadata['im_mode']: logger.info(f'Upgrading "im_mode" from {self.im_mode} to {metadata["im_mode"]}.') self.im_mode = metadata['im_mode'] # centerline normalize raw bbox dataset if self.seg_type == 'bbox' and metadata['image_type'] == 'raw': self.transforms.valid_norm = True self.alphabet.update(metadata['alphabet']) num_lines = metadata['counts'][self._split_filter] if self._split_filter else metadata['counts']['all'] if self._split_filter: ds_table = ds_table.filter(ds_table.column(self._split_filter)) if self.skip_empty_lines: logger.debug('Getting indices of empty lines after text transformation.') self.skip_empty_lines = False mask = np.ones(len(ds_table), dtype=bool) for index in range(len(ds_table)): try: text = self._apply_text_transform(ds_table.column('lines')[index].as_py(),) except KrakenInputException: mask[index] = False continue num_lines = np.count_nonzero(mask) logger.debug(f'Filtering out {np.count_nonzero(~mask)} empty lines') if np.any(~mask): ds_table = ds_table.filter(pa.array(mask)) self.skip_empty_lines = True if not self.arrow_table: self.arrow_table = ds_table else: self.arrow_table = pa.concat_tables([self.arrow_table, ds_table]) self._num_lines += num_lines def rebuild_alphabet(self): """ Recomputes the alphabet depending on the given text transformation. """ self.alphabet = Counter() for index in range(len(self)): try: text = self._apply_text_transform(self.arrow_table.column('lines')[index].as_py(),) self.alphabet.update(text) except KrakenInputException: continue def _apply_text_transform(self, sample) -> str: """ Applies text transform to a sample. """ text = sample['text'] for func in self.text_transforms: text = func(text) if not text: logger.debug(f'Text line "{sample["text"]}" is empty after transformations') if not self.skip_empty_lines: raise KrakenInputException('empty text line') return text def encode(self, codec: Optional[PytorchCodec] = None) -> None: """ Adds a codec to the dataset. """ if codec: self.codec = codec logger.info(f'Trying to encode dataset with codec {codec}') for index in range(self._num_lines): try: text = self._apply_text_transform( self.arrow_table.column('lines')[index].as_py(), ) self.codec.encode(text) except KrakenEncodeException as e: raise e except KrakenInputException: pass else: self.codec = PytorchCodec(''.join(self.alphabet.keys())) def no_encode(self) -> None: """ Creates an unencoded dataset. """ pass def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: try: sample = self.arrow_table.column('lines')[index].as_py() logger.debug(f'Loading sample {index}') im = Image.open(io.BytesIO(sample['im'])) im = self.transforms(im) if self.aug: im = im.permute((1, 2, 0)).numpy() o = self.aug(image=im) im = torch.tensor(o['image'].transpose(2, 0, 1)) text = self._apply_text_transform(sample) except Exception: self.failed_samples.add(index) idx = np.random.randint(0, len(self)) logger.debug(traceback.format_exc()) logger.info(f'Failed. Replacing with sample {idx}') return self[idx] return {'image': im, 'target': self.codec.encode(text) if self.codec is not None else text} def __len__(self) -> int: return self._num_lines class PolygonGTDataset(Dataset): """ Dataset for training a line recognition model from polygonal/baseline data. """ def __init__(self, normalization: Optional[str] = None, whitespace_normalization: bool = True, skip_empty_lines: bool = True, reorder: Union[bool, Literal['L', 'R']] = True, im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]), augmentation: bool = False) -> None: """ Creates a dataset for a polygonal (baseline) transcription model. Args: normalization: Unicode normalization for gt whitespace_normalization: Normalizes unicode whitespace and strips whitespace. skip_empty_lines: Whether to return samples without text. reorder: Whether to rearrange code points in "display"/LTR order. Set to L|R to change the default text direction. im_transforms: Function taking an PIL.Image and returning a tensor suitable for forward passes. augmentation: Enables augmentation. """ self._images = [] # type: Union[List[Image], List[torch.Tensor]] self._gt = [] # type: List[str] self.alphabet = Counter() # type: Counter self.text_transforms = [] # type: List[Callable[[str], str]] self.transforms = im_transforms self.aug = None self.skip_empty_lines = skip_empty_lines self.failed_samples = set() self.seg_type = 'baselines' # built text transformations if normalization: self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization)) if whitespace_normalization: self.text_transforms.append(F_t.text_whitespace_normalize) if reorder: if reorder in ('L', 'R'): self.text_transforms.append(partial(F_t.text_reorder, base_dir=reorder)) else: self.text_transforms.append(F_t.text_reorder) if augmentation: self.aug = DefaultAugmenter() self.im_mode = '1' def add(self, *args, **kwargs): """ Adds a line to the dataset. Args: im (path): Path to the whole page image text (str): Transcription of the line. baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]]. boundary (list): A polygon mask for the line. """ if 'preparse' not in kwargs or not kwargs['preparse']: kwargs = self.parse(*args, **kwargs) self._images.append((kwargs['image'], kwargs['baseline'], kwargs['boundary'])) self._gt.append(kwargs['text']) self.alphabet.update(kwargs['text']) def parse(self, image: Union[PathLike, str, Image.Image], text: str, baseline: List[Tuple[int, int]], boundary: List[Tuple[int, int]], *args, **kwargs): """ Parses a sample for the dataset and returns it. This function is mainly uses for parallelized loading of training data. Args: im (path): Path to the whole page image text (str): Transcription of the line. baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]]. boundary (list): A polygon mask for the line. """ orig_text = text for func in self.text_transforms: text = func(text) if not text and self.skip_empty_lines: raise KrakenInputException(f'Text line "{orig_text}" is empty after transformations') if not baseline: raise KrakenInputException('No baseline given for line') if not boundary: raise KrakenInputException('No boundary given for line') return {'text': text, 'image': image, 'baseline': baseline, 'boundary': boundary, 'preparse': True} def encode(self, codec: Optional[PytorchCodec] = None) -> None: """ Adds a codec to the dataset and encodes all text lines. Has to be run before sampling from the dataset. """ if codec: self.codec = codec else: self.codec = PytorchCodec(''.join(self.alphabet.keys())) self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]] for im, gt in zip(self._images, self._gt): self.training_set.append((im, self.codec.encode(gt))) def no_encode(self) -> None: """ Creates an unencoded dataset. """ self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]] for im, gt in zip(self._images, self._gt): self.training_set.append((im, gt)) def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: item = self.training_set[index] try: logger.debug(f'Attempting to load {item[0]}') im = item[0][0] if not isinstance(im, Image.Image): im = Image.open(im) im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': item[0][1], 'boundary': item[0][2]}]})) im = self.transforms(im) if im.shape[0] == 3: im_mode = 'RGB' elif im.shape[0] == 1: im_mode = 'L' if is_bitonal(im): im_mode = '1' if im_mode > self.im_mode: logger.info(f'Upgrading "im_mode" from {self.im_mode} to {im_mode}') self.im_mode = im_mode if self.aug: im = im.permute((1, 2, 0)).numpy() o = self.aug(image=im) im = torch.tensor(o['image'].transpose(2, 0, 1)) return {'image': im, 'target': item[1]} except Exception: self.failed_samples.add(index) idx = np.random.randint(0, len(self.training_set)) logger.debug(traceback.format_exc()) logger.info(f'Failed. Replacing with sample {idx}') return self[idx] def __len__(self) -> int: return len(self._images) class GroundTruthDataset(Dataset): """ Dataset for training a line recognition model. All data is cached in memory. """ def __init__(self, split: Callable[[Union[PathLike, str]], str] = F_t.default_split, suffix: str = '.gt.txt', normalization: Optional[str] = None, whitespace_normalization: bool = True, skip_empty_lines: bool = True, reorder: Union[bool, str] = True, im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]), augmentation: bool = False) -> None: """ Reads a list of image-text pairs and creates a ground truth set. Args: split: Function for generating the base name without extensions from paths suffix: Suffix to attach to image base name for text retrieval mode: Image color space. Either RGB (color) or L (grayscale/bw). Only L is compatible with vertical scaling/dewarping. scale: Target height or (width, height) of dewarped line images. Vertical-only scaling is through CenterLineNormalizer, resizing with Lanczos interpolation. Set to 0 to disable. normalization: Unicode normalization for gt whitespace_normalization: Normalizes unicode whitespace and strips whitespace. skip_empty_lines: Whether to return samples without text. reorder: Whether to rearrange code points in "display"/LTR order. Set to L|R to change the default text direction. im_transforms: Function taking an PIL.Image and returning a tensor suitable for forward passes. augmentation: Enables augmentation. """ self.suffix = suffix self.split = partial(F_t.suffix_split, split=split, suffix=suffix) self._images = [] # type: Union[List[Image], List[torch.Tensor]] self._gt = [] # type: List[str] self.alphabet = Counter() # type: Counter self.text_transforms = [] # type: List[Callable[[str], str]] self.transforms = im_transforms self.skip_empty_lines = skip_empty_lines self.aug = None self.failed_samples = set() self.seg_type = 'bbox' # built text transformations if normalization: self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization)) if whitespace_normalization: self.text_transforms.append(F_t.text_whitespace_normalize) if reorder: if reorder in ('L', 'R'): self.text_transforms.append(partial(F_t.text_reorder, base_dir=reorder)) else: self.text_transforms.append(F_t.text_reorder) if augmentation: self.aug = DefaultAugmenter() self.im_mode = '1' def add(self, *args, **kwargs) -> None: """ Adds a line-image-text pair to the dataset. Args: image (str): Input image path """ if 'preparse' not in kwargs or not kwargs['preparse']: kwargs = self.parse(*args, **kwargs) self._images.append(kwargs['image']) self._gt.append(kwargs['text']) self.alphabet.update(kwargs['text']) def parse(self, image: Union[PathLike, str, Image.Image], *args, **kwargs) -> Dict: """ Parses a sample for this dataset. This is mostly used to parallelize populating the dataset. Args: image (str): Input image path """ with open(self.split(image), 'r', encoding='utf-8') as fp: text = fp.read().strip('\n\r') for func in self.text_transforms: text = func(text) if not text and self.skip_empty_lines: raise KrakenInputException(f'Text line is empty ({fp.name})') return {'image': image, 'text': text, 'preparse': True} def encode(self, codec: Optional[PytorchCodec] = None) -> None: """ Adds a codec to the dataset and encodes all text lines. Has to be run before sampling from the dataset. """ if codec: self.codec = codec else: self.codec = PytorchCodec(''.join(self.alphabet.keys())) self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]] for im, gt in zip(self._images, self._gt): self.training_set.append((im, self.codec.encode(gt))) def no_encode(self) -> None: """ Creates an unencoded dataset. """ self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]] for im, gt in zip(self._images, self._gt): self.training_set.append((im, gt)) def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: item = self.training_set[index] try: logger.debug(f'Attempting to load {item[0]}') im = item[0] if not isinstance(im, Image.Image): im = Image.open(im) im = self.transforms(im) if im.shape[0] == 3: im_mode = 'RGB' elif im.shape[0] == 1: im_mode = 'L' if is_bitonal(im): im_mode = '1' if im_mode > self.im_mode: logger.info(f'Upgrading "im_mode" from {self.im_mode} to {im_mode}') self.im_mode = im_mode if self.aug: im = im.permute((1, 2, 0)).numpy() o = self.aug(image=im) im = torch.tensor(o['image'].transpose(2, 0, 1)) return {'image': im, 'target': item[1]} except Exception: self.failed_samples.add(index) idx = np.random.randint(0, len(self.training_set)) logger.debug(traceback.format_exc()) logger.info(f'Failed. Replacing with sample {idx}') return self[idx] def __len__(self) -> int: return len(self._images)
23,940
40.277586
137
py
kraken
kraken-main/kraken/lib/pretrain/model.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Pytorch-lightning modules for recognition model pretraining. Pretraining is based on an image inpainting surrogate task that aims to reconstruct randomly sampled masked patches from the initial convolutional feature maps that have been replaced with a learnable embedding. The model is trained with a contrastive loss where negative samples are randomly generated from the unmasked parts of the sequence. Apart from an improved sampling method the implementation is mostly a faithful adaptation of: Vogler, Nikolai, et al. "Lacuna Reconstruction: Self-supervised Pre-training for Low-Resource Historical Document Transcription." arXiv preprint arXiv:2112.08692 (2021). """ import re import math import torch import logging import numpy as np import torch.nn.functional as F import pytorch_lightning as pl from os import PathLike from itertools import chain from functools import partial from torch.optim import lr_scheduler from torch.multiprocessing import Pool from typing import Dict, Optional, Sequence, Union, Any from pytorch_lightning.callbacks import EarlyStopping from pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda from kraken.lib import vgsl, default_specs, layers from kraken.lib.xml import preparse_xml_data from kraken.lib.codec import PytorchCodec from kraken.lib.dataset import (ArrowIPCRecognitionDataset, GroundTruthDataset, PolygonGTDataset, ImageInputTransforms, collate_sequences) from kraken.lib.exceptions import KrakenInputException from kraken.lib.train import _configure_optimizer_and_lr_scheduler from kraken.lib.pretrain.layers import Wav2Vec2Mask from torch.utils.data import DataLoader, random_split, Subset logger = logging.getLogger(__name__) def _star_fun(fun, kwargs): try: return fun(**kwargs) except FileNotFoundError as e: logger.warning(f'{e.strerror}: {e.filename}. Skipping.') except KrakenInputException as e: logger.warning(str(e)) return None class PretrainDataModule(pl.LightningDataModule): def __init__(self, training_data: Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]] = None, evaluation_data: Optional[Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]]] = None, partition: Optional[float] = 0.9, binary_dataset_split: bool = False, batch_size: int = 4, height: int = 48, width: int = 0, channels: int = 1, num_workers: int = 1, repolygonize: bool = False, force_binarization: bool = False, format_type: str = 'path', pad: int = 16, augment: bool = default_specs.RECOGNITION_PRETRAIN_HYPER_PARAMS['augment']): """ A LightningDataModule encapsulating text-less training data for unsupervised recognition model pretraining. Args: training_data: evaluation_data: partition: binary_dataset_split: batch_size: num_workers: force_binarization: format_type: augment: """ super().__init__() self.save_hyperparameters() DatasetClass = GroundTruthDataset valid_norm = True if format_type in ['xml', 'page', 'alto']: logger.info(f'Parsing {len(training_data)} XML files for training data') training_data = preparse_xml_data(training_data, format_type, repolygonize) if evaluation_data: logger.info(f'Parsing {len(evaluation_data)} XML files for validation data') evaluation_data = preparse_xml_data(evaluation_data, format_type, repolygonize) if binary_dataset_split: logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.') binary_dataset_split = False DatasetClass = PolygonGTDataset valid_norm = False elif format_type == 'binary': DatasetClass = ArrowIPCRecognitionDataset if repolygonize: logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.') valid_norm = False logger.info(f'Got {len(training_data)} binary dataset files for training data') training_data = [{'file': file} for file in training_data] if evaluation_data: logger.info(f'Got {len(evaluation_data)} binary dataset files for validation data') evaluation_data = [{'file': file} for file in evaluation_data] elif format_type == 'path': if force_binarization: logger.warning('Forced binarization enabled in `path` mode. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled in `path` mode. Will be ignored.') if binary_dataset_split: logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.') binary_dataset_split = False logger.info(f'Got {len(training_data)} line strip images for training data') training_data = [{'image': im} for im in training_data] if evaluation_data: logger.info(f'Got {len(evaluation_data)} line strip images for validation data') evaluation_data = [{'image': im} for im in evaluation_data] valid_norm = True # format_type is None. Determine training type from length of training data entry elif not format_type: if len(training_data[0]) >= 4: DatasetClass = PolygonGTDataset valid_norm = False else: if force_binarization: logger.warning('Forced binarization enabled with box lines. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled with box lines. Will be ignored.') if binary_dataset_split: logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.') binary_dataset_split = False else: raise ValueError(f'format_type {format_type} not in [alto, page, xml, path, binary].') self.transforms = ImageInputTransforms(batch_size, height, width, channels, (pad, 0), valid_norm, force_binarization) if evaluation_data: train_set = self._build_dataset(DatasetClass, training_data) self.train_set = Subset(train_set, range(len(train_set))) val_set = self._build_dataset(DatasetClass, evaluation_data) self.val_set = Subset(val_set, range(len(val_set))) elif binary_dataset_split: train_set = self._build_dataset(DatasetClass, training_data, split_filter='train') self.train_set = Subset(train_set, range(len(train_set))) val_set = self._build_dataset(DatasetClass, training_data, split_filter='validation') self.val_set = Subset(val_set, range(len(val_set))) logger.info(f'Found {len(self.train_set)} (train) / {len(self.val_set)} (val) samples in pre-encoded dataset') else: train_set = self._build_dataset(DatasetClass, training_data) train_len = int(len(train_set)*partition) val_len = len(train_set) - train_len logger.info(f'No explicit validation data provided. Splitting off ' f'{val_len} (of {len(train_set)}) samples to validation ' 'set. (Will disable alphabet mismatch detection.)') self.train_set, self.val_set = random_split(train_set, (train_len, val_len)) if len(self.train_set) == 0 or len(self.val_set) == 0: raise ValueError('No valid training data was provided to the train ' 'command. Please add valid XML, line, or binary data.') logger.info(f'Training set {len(self.train_set)} lines, validation set ' f'{len(self.val_set)} lines') def _build_dataset(self, DatasetClass, training_data, **kwargs): dataset = DatasetClass(im_transforms=self.transforms, augmentation=self.hparams.augment, skip_empty_lines=False, **kwargs) if (self.hparams.num_workers and self.hparams.num_workers > 1) and self.hparams.format_type != 'binary': with Pool(processes=self.hparams.num_workers) as pool: for im in pool.imap_unordered(partial(_star_fun, dataset.parse), training_data, 5): logger.debug(f'Adding sample {im} to training set') if im: dataset.add(**im) else: for im in training_data: try: dataset.add(**im) except KrakenInputException as e: logger.warning(str(e)) return dataset def train_dataloader(self): return DataLoader(self.train_set, collate_fn=collate_sequences, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, pin_memory=True) def val_dataloader(self): return DataLoader(self.val_set, collate_fn=collate_sequences, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, pin_memory=True) def setup(self, stage: Optional[str] = None): self.train_set.dataset.no_encode() self.val_set.dataset.no_encode() class RecognitionPretrainModel(pl.LightningModule): def __init__(self, hyper_params: Dict[str, Any] = None, output: str = 'model', spec: str = default_specs.RECOGNITION_SPEC, model: Optional[Union[PathLike, str]] = None, load_hyper_parameters: bool = False, len_train_set: int = -1): """ A LightningModule encapsulating the unsupervised pretraining setup for a text recognition model. Setup parameters (load, training_data, evaluation_data, ....) are named, model hyperparameters (everything in `kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS`) are in in the `hyper_params` argument. Args: hyper_params (dict): Hyperparameter dictionary containing all fields from kraken.lib.default_specs.RECOGNITION_PRETRAIN_HYPER_PARAMS **kwargs: Setup parameters, i.e. CLI parameters of the train() command. """ super().__init__() hyper_params_ = default_specs.RECOGNITION_PRETRAIN_HYPER_PARAMS if model: logger.info(f'Loading existing model from {model} ') self.nn = vgsl.TorchVGSLModel.load_model(model) if self.nn.model_type not in [None, 'recognition']: raise ValueError(f'Model {model} is of type {self.nn.model_type} while `recognition` is expected.') if load_hyper_parameters: hp = self.nn.hyper_params else: hp = {} hyper_params_.update(hp) else: self.nn = None if hyper_params: hyper_params_.update(hyper_params) self.save_hyperparameters(hyper_params_) self.model = model self.output = output self.len_train_set = len_train_set self.best_epoch = 0 self.best_metric = math.inf self.best_model = None self.val_ce = [] spec = spec.strip() if spec[0] != '[' or spec[-1] != ']': raise ValueError(f'VGSL spec {spec} not bracketed') self.spec = spec # preparse input sizes from vgsl string to seed ground truth data set # sizes and dimension ordering. if not self.nn: blocks = spec[1:-1].split(' ') m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0]) if not m: raise ValueError(f'Invalid input spec {blocks[0]}') self.batch, self.height, self.width, self.channels = [int(x) for x in m.groups()] else: self.batch, self.channels, self.height, self.width = self.nn.input if 'file_system' in torch.multiprocessing.get_all_sharing_strategies(): logger.debug('Setting multiprocessing tensor sharing strategy to file_system') torch.multiprocessing.set_sharing_strategy('file_system') logger.info('Encoding training set') def forward(self, x, seq_lens): return self.net(x, seq_lens) def _step(self, batch, batch_idx): try: # sequence batch if 'seq_lens' in batch: output = self.features(batch['image'], batch['seq_lens']) else: output = self.features(batch['image']) # height should be 1 by now if output[0].size(2) != 1: raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(output[0].size(2))) mask_output = self.wav2vec2mask(*output) # run contextual encoder, i.e. recurrent layers output, seq_lens = self.encoder(mask_output['output'], mask_output['seq_len']) # unmasked features in encoder output domain y = mask_output['unmasked_samples'] # negative samples negatives = mask_output['negative_samples'] N, C, H, W = output.shape output = output.transpose(1, 3).reshape(-1, W, C) # masked features after encoder x = output[mask_output['mask']].reshape_as(y) mask_n_neg = torch.cat([y.unsqueeze(0), negatives], dim=0) logits = torch.cosine_similarity(x.float(), mask_n_neg.float(), dim=-1).type_as(x) targets = logits.new_zeros(logits.size(1) * logits.size(2), dtype=torch.long) logits = logits.transpose(0, 2) logits = logits.reshape(-1, logits.size(-1)) logits /= self.hparams.logit_temp loss = F.cross_entropy(logits, targets) return logits, targets, loss except RuntimeError as e: if is_oom_error(e): logger.warning('Out of memory error in trainer. Skipping batch and freeing caches.') garbage_collection_cuda() else: raise def validation_step(self, batch, batch_idx): o = self._step(batch, batch_idx) if o is not None: logits, targets, loss = o with torch.no_grad(): if logits.numel() == 0: corr = 0 count = 0 else: _max = logits.argmax(-1) == 0 _min = logits.argmin(-1) == 0 both = _max & _min corr = _max.long().sum().item() - both.long().sum().item() self.val_ce.append(loss.cpu()) self.log('CE', loss, on_step=True, on_epoch=True) def on_validation_epoch_end(self): ce = np.mean(self.val_ce) self.val_ce.clear() if ce < self.best_metric: logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {ce} ({self.current_epoch})') self.best_epoch = self.current_epoch self.best_metric = ce logger.info(f'validation run: cross_enctropy: {ce}') self.log('val_ce', ce, on_step=False, on_epoch=True, prog_bar=True, logger=True) def training_step(self, batch, batch_idx): o = self._step(batch, batch_idx) if o is not None: _, _, loss = o self.log('CE', loss) return loss def configure_optimizers(self): return _configure_optimizer_and_lr_scheduler(self.hparams, chain(self.features.parameters(), self.wav2vec2mask.parameters(), self.encoder.parameters()), len_train_set=self.len_train_set, loss_tracking_mode='min') def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure): # update params optimizer.step(closure=optimizer_closure) # linear warmup between 0 and the initial learning rate `lrate` in `warmup` # steps. if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup: lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup) for pg in optimizer.param_groups: pg["lr"] = lr_scale * self.hparams.lrate def lr_scheduler_step(self, scheduler, metric): if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup: # step OneCycleLR each batch if not in warmup phase if isinstance(scheduler, lr_scheduler.OneCycleLR): scheduler.step() # step every other scheduler epoch-wise elif self.trainer.is_last_batch: scheduler.step() def setup(self, stage: Optional[str] = None): # finalize models in case of appending/loading if stage in [None, 'fit']: if self.model: self.spec = self.nn.spec else: logger.info(f'Creating new model {self.spec}') self.nn = vgsl.TorchVGSLModel(self.spec) # initialize weights self.nn.init_weights() self.net = self.nn.nn for idx, layer in enumerate(self.net.children()): if isinstance(layer, layers.TransposedSummarizingRNN): break self.features = self.net[:idx] if self.model and 'wav2vec2mask' in self.nn.aux_layers: logger.info('Extracting wav2vec2mask layer from model: mask width ' f'{self.nn.aux_layers["wav2vec2mask"].mask_width}, prob ' f'{self.nn.aux_layers["wav2vec2mask"].mask_prob}, negative samples ' f'{self.nn.aux_layers["wav2vec2mask"].num_negatives}') self.wav2vec2mask = self.nn.aux_layers['wav2vec2mask'] logger.info("Overriding masking hyperparameters with model one's: ") self.hparams.mask_width = self.wav2vec2mask.mask_width self.hparams.mask_mask_prob = self.wav2vec2mask.mask_prob self.hparams.num_negatives = self.wav2vec2mask.num_negatives else: logger.info(f'Instantiating new wav2vec2mask layer: mask width ' f'{self.hparams.mask_width}, prob ' f'{self.hparams.mask_prob}, negative samples ' f'{self.hparams.num_negatives}') self.wav2vec2mask = Wav2Vec2Mask(self.net[idx-1].output_shape[1], self.net[-1].output_shape[1], self.hparams.mask_width, self.hparams.mask_prob, self.hparams.num_negatives) self.nn.aux_layers = {'wav2vec2mask': self.wav2vec2mask} # add dummy codec and output layer if not self.nn.codec and not isinstance(self.net[-1], layers.LinSoftmax): logger.info('Adding dummy codec and output layer to model') self.nn.add_codec(PytorchCodec(' ')) self.nn.append(len(self.net), "[O1c2]") self.encoder = self.net[idx:] self.nn.hyper_params = self.hparams self.nn.model_type = 'recognition' def configure_callbacks(self): callbacks = [] if self.hparams.quit == 'early': callbacks.append(EarlyStopping(monitor='CE', mode='min', patience=self.hparams.lag, stopping_threshold=0.0)) return callbacks
21,793
44.404167
133
py
kraken
kraken-main/kraken/lib/pretrain/layers.py
""" Layers for VGSL models """ import torch from typing import Tuple, Optional from torch.nn import Module, Embedding, Linear from kraken.lib.vgsl import VGSLBlock from kraken.lib.pretrain.util import compute_mask_indices, sample_negatives # all tensors are ordered NCHW, the "feature" dimension is C, so the output of # an LSTM will be put into C same as the filters of a CNN. __all__ = ['Wav2Vec2Mask'] class Wav2Vec2Mask(Module): """ A layer for Wav2Vec2-style masking. Needs to be placed just before recurrent/contextual layers. """ def __init__(self, context_encoder_input_dim: int, final_dim: int, mask_width: int, mask_prob: float, num_negatives: int) -> None: """ Args: context_encoder_input_dim: size of the `C` input dimension final_dim: size of the decoder `C` output dimension just before the final linear projection. mask_width: width of the non-overlapping masked areas. mask_prob: probability of masking at each time step num_negatives: number of negative samples with width mask_width * num_masks Shape: - Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H` height, and `W` width. - Outputs output :math:`(N, C, H, W)` """ super().__init__() self.context_encoder_input_dim = context_encoder_input_dim self.final_dim = final_dim self.mask_width = mask_width self.mask_prob = mask_prob self.num_negatives = num_negatives # mask embedding replacing the masked out areas self.mask_emb = Embedding(1, context_encoder_input_dim) self.project_q = Linear(context_encoder_input_dim, final_dim) def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: N, C, H, W = inputs.shape if H != 1: raise Exception(f'Height has to be 1, not {H} for Wav2Vec2 masking layer.') # NCHW -> NWC inputs = inputs.transpose(1, 3).reshape(-1, W, C) mask_indices = compute_mask_indices((N, W), self.mask_prob, self.mask_width) mask_indices = torch.from_numpy(mask_indices).to(inputs.device) unmasked_features = inputs.clone() # mask out inputs[mask_indices] = self.mask_emb.weight # project into same dimensionality as final recurrent layer unmasked_features = self.project_q(unmasked_features) unmasked_samples = unmasked_features[mask_indices].view(unmasked_features.size(0), -1, unmasked_features.size(-1)) # negative samples negative_samples = sample_negatives(unmasked_samples, unmasked_samples.size(1), self.num_negatives) # NWC -> NCHW inputs = inputs.permute(0, 2, 1).unsqueeze(2) return {'output': inputs, 'unmasked_samples': unmasked_samples, 'negative_samples': negative_samples, 'seq_len': seq_len, 'mask': mask_indices} def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """ Calculates the output shape from input 4D tuple NCHW. """ return input def get_spec(self, name) -> "VGSLBlock": """ Generates a VGSL spec block from the layer instance. """ return f'[1,{self.final_dim},0,{self.context_encoder_input_dim} W{{{name}}}{self.final_dim},{self.mask_width},{self.mask_prob},{self.num_negatives}]' def deserialize(self, name: str, spec) -> None: """ Sets the weights of an initialized module from a CoreML protobuf spec. """ # extract embedding parameters emb = [x for x in spec.neuralNetwork.layers if x.name == '{}_wave2vec2_emb'.format(name)][0].embedding weights = torch.Tensor(emb.weights.floatValue).resize_as_(self.mask_emb.weight.data) self.mask_emb.weight = torch.nn.Parameter(weights) # extract linear projection parameters lin = [x for x in spec.neuralNetwork.layers if x.name == '{}_wave2vec2_lin'.format(name)][0].innerProduct weights = torch.Tensor(lin.weights.floatValue).resize_as_(self.project_q.weight.data) bias = torch.Tensor(lin.bias.floatValue) self.project_q.weight = torch.nn.Parameter(weights) self.project_q.bias = torch.nn.Parameter(bias) def serialize(self, name: str, input: str, builder): """ Serializes the module using a NeuralNetworkBuilder. """ wave2vec2_emb_name = f'{name}_wave2vec2_emb' builder.add_embedding(wave2vec2_emb_name, self.mask_emb.weight.data.numpy(), None, self.context_encoder_input_dim, self.mask_width, has_bias=False, input_name=input, output_name=wave2vec2_emb_name) wave2vec2_lin_name = f'{name}_wave2vec2_lin' builder.add_inner_product(wave2vec2_lin_name, self.project_q.weight.data.numpy(), self.project_q.bias.data.numpy(), self.context_encoder_input_dim, self.final_dim, has_bias=True, input_name=input, output_name=wave2vec2_lin_name) return name
5,457
41.640625
157
py
kraken
kraken-main/kraken/lib/pretrain/util.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Sequence, Union, Tuple import torch import random import numpy as np def positive_integers_with_sum(n, total): ls = [0] rv = [] while len(ls) < n: c = random.randint(0, total) ls.append(c) ls = sorted(ls) ls.append(total) for i in range(1, len(ls)): rv.append(ls[i] - ls[i-1]) return rv def compute_masks(mask_prob: int, mask_width: int, num_neg_samples: int, seq_lens: Union[torch.Tensor, Sequence[int]]): """ Samples num_mask non-overlapping random masks of length mask_width in sequence of length seq_len. Args: mask_prob: Probability of each individual token being chosen as start of a masked sequence. Overall number of masks num_masks is mask_prob * sum(seq_lens) / mask_width. mask_width: width of each mask num_neg_samples: Number of samples from unmasked sequence parts (gets multiplied by num_mask) seq_lens: sequence lengths Returns: An index array containing 1 for masked bits, 2 for negative samples, the number of masks, and the actual number of negative samples. """ mask_samples = np.zeros(sum(seq_lens)) num_masks = int(mask_prob * sum(seq_lens.numpy()) // mask_width) num_neg_samples = num_masks * num_neg_samples num_masks += num_neg_samples indices = [x+mask_width for x in positive_integers_with_sum(num_masks, sum(seq_lens)-num_masks*mask_width)] start = 0 mask_slices = [] for i in indices: i_start = random.randint(start, i+start-mask_width) mask_slices.append(slice(i_start, i_start+mask_width)) start += i neg_idx = random.sample(range(len(mask_slices)), num_neg_samples) neg_slices = [mask_slices.pop(idx) for idx in sorted(neg_idx, reverse=True)] mask_samples[np.r_[tuple(mask_slices)]] = 1 mask_samples[np.r_[tuple(neg_slices)]] = 2 return mask_samples, num_masks - num_neg_samples, num_neg_samples def buffered_arange(max): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def sample_negatives(y, num_samples, num_neg_samples: int): B, W, C = y.shape y = y.view(-1, C) # BTC => (BxT)C with torch.no_grad(): tszs = (buffered_arange(num_samples).unsqueeze(-1).expand(-1, num_neg_samples).flatten()) neg_idxs = torch.randint(low=0, high=W - 1, size=(B, num_neg_samples * num_samples)) neg_idxs[neg_idxs >= tszs] += 1 for i in range(1, B): neg_idxs[i] += i * W negs = y[neg_idxs.view(-1)] negs = negs.view(B, num_samples, num_neg_samples, C).permute(2, 0, 1, 3) # to NxBxTxC return negs def compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int = 4, mask_min_space: int = 2) -> np.ndarray: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller. """ bsz, all_sz = shape mask = np.full((bsz, all_sz), False) all_num_mask = int(mask_prob * all_sz / float(mask_length) + np.random.rand()) mask_idcs = [] for i in range(bsz): # import ipdb; ipdb.set_trace() sz = all_sz num_mask = all_num_mask lengths = np.full(num_mask, mask_length) if sum(lengths) == 0: lengths[0] = min(mask_length, sz - 1) mask_idc = [] def arrange(s, e, length, keep_length): span_start = np.random.randint(s, e - length) mask_idc.extend(span_start + i for i in range(length)) new_parts = [] if span_start - s - mask_min_space >= keep_length: new_parts.append((s, span_start - mask_min_space + 1)) if e - span_start - keep_length - mask_min_space > keep_length: new_parts.append((span_start + length + mask_min_space, e)) return new_parts parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): lens = np.fromiter( (e - s if e - s >= length + mask_min_space else 0 for s, e in parts), np.int, ) l_sum = np.sum(lens) if l_sum == 0: break probs = lens / np.sum(lens) c = np.random.choice(len(parts), p=probs) s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = np.asarray(mask_idc) mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) # make sure all masks are the same length in the batch by removing masks # if they are greater than the min length mask min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): if len(mask_idc) > min_len: mask_idc = np.random.choice(mask_idc, min_len, replace=False) assert len(mask_idc) == min_len mask[i, mask_idc] = True return mask
5,805
33.975904
128
py
kraken
kraken-main/kraken/lib/pretrain/__init__.py
# # Copyright 2022 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """ Tools for unsupervised pretraining of recognition models. """ from .model import PretrainDataModule, RecognitionPretrainModel # NOQA
718
34.95
70
py
kraken
kraken-main/tests/test_layers.py
# -*- coding: utf-8 -*- import unittest import torch from kraken.lib import layers class TestLayers(unittest.TestCase): """ Testing custom layer implementations. """ def setUp(self): torch.set_grad_enabled(False) def test_maxpool(self): """ Test maximum pooling layer. """ mp = layers.MaxPool((3, 3), (2, 2)) o = mp(torch.randn(1, 2, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 15, 31)) def test_1d_dropout(self): """ Test 1d dropout layer. """ do = layers.Dropout(0.2, 1) o = do(torch.randn(1, 2, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 32, 64)) def test_2d_dropout(self): """ Test 2d dropout layer. """ do = layers.Dropout(0.2, 2) o = do(torch.randn(1, 2, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 32, 64)) def test_forward_rnn_layer_x(self): """ Test unidirectional RNN layer in x-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, False) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 32, 64)) def test_forward_rnn_layer_y(self): """ Test unidirectional RNN layer in y-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'f', True, False) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 32, 64)) def test_forward_rnn_layer_x_summarize(self): """ Test unidirectional summarizing RNN layer in x-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, True) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 32, 1)) def test_forward_rnn_layer_y_summarize(self): """ Test unidirectional summarizing RNN layer in y-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'f', True, True) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 2, 1, 64)) def test_bidi_rnn_layer_x(self): """ Test bidirectional RNN layer in x-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'b', False, False) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 4, 32, 64)) def test_bidi_rnn_layer_y(self): """ Test bidirectional RNN layer in y-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'b', True, False) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 4, 32, 64)) def test_bidi_rnn_layer_x_summarize(self): """ Test bidirectional summarizing RNN layer in x-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'b', False, True) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 4, 32, 1)) def test_bidi_rnn_layer_y_summarize(self): """ Test bidirectional summarizing RNN layer in y-dimension. """ rnn = layers.TransposedSummarizingRNN(10, 2, 'b', True, True) o = rnn(torch.randn(1, 10, 32, 64)) self.assertEqual(o[0].shape, (1, 4, 1, 64)) def test_linsoftmax(self): """ Test basic function of linear layer. """ lin = layers.LinSoftmax(20, 10) o = lin(torch.randn(1, 20, 12, 24)) self.assertEqual(o[0].shape, (1, 10, 12, 24)) def test_linsoftmax_train(self): """ Test function of linear layer in training mode (log_softmax) """ lin = layers.LinSoftmax(20, 10).train() o = lin(torch.randn(1, 20, 12, 24)) self.assertLess(o[0].max(), 0) def test_linsoftmax_test(self): """ Test function of linear layer in eval mode (softmax) """ lin = layers.LinSoftmax(20, 10).eval() o = lin(torch.randn(1, 20, 12, 24)) self.assertGreaterEqual(o[0].min(), 0) def test_linsoftmax_aug(self): """ Test basic function of linear layer with 1-augmentation. """ lin = layers.LinSoftmax(20, 10, True) o = lin(torch.randn(1, 20, 12, 24)) self.assertEqual(o[0].shape, (1, 10, 12, 24)) def test_linsoftmax_aug_train(self): """ Test function of linear layer in training mode (log_softmax) with 1-augmentation """ lin = layers.LinSoftmax(20, 10, True).train() o = lin(torch.randn(1, 20, 12, 24)) self.assertLess(o[0].max(), 0) def test_linsoftmax_aug_test(self): """ Test function of linear layer in eval mode (softmax) with 1-augmentation """ lin = layers.LinSoftmax(20, 10, True).eval() o = lin(torch.randn(1, 20, 12, 24)) self.assertGreaterEqual(o[0].min(), 0) def test_actconv2d_lin(self): """ Test convolutional layer without activation. """ conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'l') o = conv(torch.randn(1, 5, 24, 12)) self.assertEqual(o[0].shape, (1, 12, 24, 12)) def test_actconv2d_train_sigmoid(self): """ Test convolutional layer with sigmoid activation. """ conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 's') o = conv(torch.randn(1, 5, 24, 12)) conv.train() self.assertFalse(0 <= o[0].min() <= 1) self.assertFalse(0 <= o[0].max() <= 1) def test_actconv2d_eval_sigmoid(self): """ Test convolutional layer with sigmoid activation. """ conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 's') conv.eval() o = conv(torch.randn(1, 5, 24, 12)) self.assertTrue(0 <= o[0].min() <= 1) self.assertTrue(0 <= o[0].max() <= 1) def test_actconv2d_tanh(self): """ Test convolutional layer with tanh activation. """ conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 't') o = conv(torch.randn(1, 5, 24, 12)) self.assertTrue(-1 <= o[0].min() <= 1) self.assertTrue(-1 <= o[0].max() <= 1) def test_actconv2d_softmax(self): """ Test convolutional layer with softmax activation. """ conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'm') o = conv(torch.randn(1, 5, 24, 12)) self.assertTrue(0 <= o[0].min() <= 1) self.assertTrue(0 <= o[0].max() <= 1) def test_actconv2d_relu(self): """ Test convolutional layer with relu activation. """ conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'r') o = conv(torch.randn(1, 5, 24, 12)) self.assertLessEqual(0, o[0].min()) self.assertLessEqual(0, o[0].max()) def test_linsoftmax_resize_add(self): """ Tests resizing of a fully connected layer. """ lin = layers.LinSoftmax(20, 10) w_cp = lin.lin.weight.clone() b_cp = lin.lin.bias.clone() lin.resize(25) self.assertTrue(w_cp.eq(lin.lin.weight[:10, :]).all()) self.assertTrue(b_cp.eq(lin.lin.bias[:10]).all()) self.assertTrue(lin.lin.weight.shape[0] == 25) self.assertTrue(lin.lin.bias.shape[0] == 25) def test_linsoftmax_resize_remove(self): """ Tests resizing of a fully connected layer. """ lin = layers.LinSoftmax(20, 10) w_cp = lin.lin.weight.clone() b_cp = lin.lin.bias.clone() lin.resize(5, (1, 5, 6, 7, 9)) self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(lin.lin.weight).all()) self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(lin.lin.bias).all()) def test_linsoftmax_resize_both(self): """ Tests resizing of a fully connected layer. """ lin = layers.LinSoftmax(20, 10) w_cp = lin.lin.weight.clone() b_cp = lin.lin.bias.clone() lin.resize(25, (1, 5, 6, 7, 9)) self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(lin.lin.weight[:5, :]).all()) self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(lin.lin.bias[:5]).all()) self.assertTrue(lin.lin.weight.shape[0] == 25) self.assertTrue(lin.lin.bias.shape[0] == 25) def test_conv_resize_add(self): """ Tests resizing of a convolutional output layer. """ conv = layers.ActConv2D(20, 10, (1, 1), (1, 1)) w_cp = conv.co.weight.clone() b_cp = conv.co.bias.clone() conv.resize(25) self.assertTrue(w_cp.eq(conv.co.weight[:10, :]).all()) self.assertTrue(b_cp.eq(conv.co.bias[:10]).all()) self.assertTrue(conv.co.weight.shape[0] == 25) self.assertTrue(conv.co.bias.shape[0] == 25) def test_conv_resize_remove(self): """ Tests resizing of a convolutional output layer. """ conv = layers.ActConv2D(20, 10, (1, 1), (1, 1)) w_cp = conv.co.weight.clone() b_cp = conv.co.bias.clone() conv.resize(5, (1, 5, 6, 7, 9)) self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(conv.co.weight).all()) self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(conv.co.bias).all()) def test_conv_resize_both(self): """ Tests resizing of a convolutional output layer. """ conv = layers.ActConv2D(20, 10, (1, 1), (1, 1)) w_cp = conv.co.weight.clone() b_cp = conv.co.bias.clone() conv.resize(25, (1, 5, 6, 7, 9)) self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(conv.co.weight[:5, :]).all()) self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(conv.co.bias[:5]).all()) self.assertTrue(conv.co.weight.shape[0] == 25) self.assertTrue(conv.co.bias.shape[0] == 25)
9,708
33.675
88
py
kraken
kraken-main/tests/test_transcribe.py
# -*- coding: utf-8 -*- import os import json import unittest from PIL import Image from io import BytesIO from lxml import etree from pathlib import Path from kraken.transcribe import TranscriptionInterface thisfile = Path(__file__).resolve().parent resources = thisfile / 'resources' class TestTranscriptionInterface(unittest.TestCase): """ Test of the transcription interface generation """ def test_transcription_generation(self): """ Tests creation of transcription interfaces with segmentation. """ tr = TranscriptionInterface() with open(resources / 'segmentation.json') as fp: seg = json.load(fp) with Image.open(resources / 'input.jpg') as im: tr.add_page(im, seg) fp = BytesIO() tr.write(fp) # this will not throw an exception ever so we need a better validator etree.HTML(fp.getvalue())
924
25.428571
77
py