text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package com.facebook.buck_project_builder.targets;
import com.facebook.buck_project_builder.BuilderException;
import com.facebook.buck_project_builder.DebugOutput;
import com.facebook.buck_project_builder.FileSystem;
import com.facebook.buck_project_builder.SimpleLogger;
import com.facebook.buck_project_builder.cache.BuilderCache;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.lang.StackTraceElement;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
public final class BuildTargetsBuilder {
private final long startTime;
private final String buckRoot;
private final String outputDirectory;
private final @Nullable String projectName;
private final ImmutableList<String> targets;
/** key: output path, value: source path */
private final ImmutableMap<Path, Path> sources;
private final ImmutableSet<String> unsupportedGeneratedSources;
private final ImmutableSet<String> pythonWheelUrls;
private final ImmutableSortedSet<ThriftLibraryTarget> thriftLibraryTargets;
private final ImmutableSet<String> swigLibraryBuildCommands;
private final ImmutableSet<String> antlr4LibraryBuildCommands;
private final Set<String> conflictingFiles = new HashSet<>();
private final Set<String> unsupportedFiles = new HashSet<>();
public BuildTargetsBuilder(
long startTime,
String buckRoot,
String outputDirectory,
@Nullable String projectName,
ImmutableList<String> targets,
ImmutableMap<Path, Path> sources,
ImmutableSet<String> unsupportedGeneratedSources,
ImmutableSet<String> pythonWheelUrls,
ImmutableSortedSet<ThriftLibraryTarget> thriftLibraryTargets,
ImmutableSet<String> swigLibraryBuildCommands,
ImmutableSet<String> antlr4LibraryBuildCommands) {
this.startTime = startTime;
this.buckRoot = buckRoot;
this.outputDirectory = outputDirectory;
this.projectName = projectName;
this.targets = targets;
this.sources = sources;
this.unsupportedGeneratedSources = unsupportedGeneratedSources;
this.pythonWheelUrls = pythonWheelUrls;
this.thriftLibraryTargets = thriftLibraryTargets;
this.swigLibraryBuildCommands = swigLibraryBuildCommands;
this.antlr4LibraryBuildCommands = antlr4LibraryBuildCommands;
}
private static void logCodeGenerationIOException(IOException exception) {
SimpleLogger.error("IOException during python code generation: " + exception.getMessage());
for (StackTraceElement element : exception.getStackTrace()) {
SimpleLogger.warning(element.toString());
}
}
private void buildPythonSources() {
SimpleLogger.info("Building " + this.sources.size() + " python sources...");
long start = System.currentTimeMillis();
this.sources
.entrySet()
.forEach(mapping -> FileSystem.addSymbolicLink(mapping.getKey(), mapping.getValue()));
long time = System.currentTimeMillis() - start;
SimpleLogger.info("Built python sources in " + time + "ms.");
}
private void buildPythonWheels(String buckRoot, @Nullable String projectName) throws IOException {
if (pythonWheelUrls.isEmpty()) {
return;
}
SimpleLogger.info("Building " + this.pythonWheelUrls.size() + " python wheels...");
long start = System.currentTimeMillis();
new File(BuilderCache.getWheelCachePath(buckRoot, projectName)).mkdirs();
try {
FileUtils.cleanDirectory(new File(BuilderCache.getWheelCachePath(buckRoot, projectName)));
} catch (IOException exception) {
// Silently fail if the cache wasn't found.
}
File outputDirectoryFile = new File(outputDirectory);
this.pythonWheelUrls
.parallelStream()
.forEach(
url -> {
try {
ImmutableSet<String> conflictingFiles =
FileSystem.unzipRemoteFile(
url,
BuilderCache.getWheelCachePath(buckRoot, projectName),
outputDirectoryFile);
this.conflictingFiles.addAll(conflictingFiles);
} catch (IOException firstException) {
try {
ImmutableSet<String> conflictingFiles =
FileSystem.unzipRemoteFile(
url,
BuilderCache.getWheelCachePath(buckRoot, projectName),
outputDirectoryFile);
this.conflictingFiles.addAll(conflictingFiles);
} catch (IOException secondException) {
SimpleLogger.error(
String.format(
"Cannot fetch and unzip remote python dependency at `%s` after 1 retry.",
url));
SimpleLogger.error("First IO Exception: " + firstException);
SimpleLogger.error("Second IO Exception: " + secondException);
}
}
});
long time = System.currentTimeMillis() - start;
SimpleLogger.info("Built python wheels in " + time + "ms.");
}
private <T> void runBuildCommands(
Collection<T> commands, String buildRuleType, CommandRunner<T> commandRunner)
throws BuilderException {
int numberOfSuccessfulRuns =
commands
.parallelStream()
.mapToInt(
command -> {
boolean buildIsSuccessful = commandRunner.run(command);
if (buildIsSuccessful) {
return 1;
}
SimpleLogger.error("Failed to build: " + command);
return 0;
})
.sum();
if (numberOfSuccessfulRuns < commands.size()) {
throw new BuilderException(
String.format(
"Failed to build some %s targets. Read the log above for more information.",
buildRuleType));
}
}
private void buildThriftLibraries() throws BuilderException, IOException {
if (this.thriftLibraryTargets.isEmpty()) {
return;
}
new File(BuilderCache.getThriftCachePath(buckRoot, projectName)).mkdirs();
try {
FileUtils.cleanDirectory(new File(BuilderCache.getThriftCachePath(buckRoot, projectName)));
} catch (IOException exception) {
// Silently fail if there was nothing to clean.
}
int totalNumberOfThriftLibraries = this.thriftLibraryTargets.size();
SimpleLogger.info("Building " + totalNumberOfThriftLibraries + " thrift libraries...");
AtomicInteger numberOfBuiltThriftLibraries = new AtomicInteger(0);
long start = System.currentTimeMillis();
// First pass: build thrift library in cache path.
runBuildCommands(
this.thriftLibraryTargets,
"thrift_library",
target -> {
boolean successfullyBuilt = target.build(this.buckRoot);
if (successfullyBuilt) {
int builtThriftLibrariesSoFar = numberOfBuiltThriftLibraries.addAndGet(1);
if (builtThriftLibrariesSoFar % 100 == 0) {
// Log progress for every 100 built thrift library.
SimpleLogger.info(
String.format(
"Built %d/%d thrift libraries.",
builtThriftLibrariesSoFar, totalNumberOfThriftLibraries));
}
}
return successfullyBuilt;
});
// Second pass: establishing symbolic links
this.thriftLibraryTargets.forEach(
command -> {
String baseModulePath = command.getBaseModulePath();
try {
Path generatedCodeRoot =
Paths.get(
BuilderCache.getThriftCachePath(buckRoot, projectName),
DigestUtils.md5Hex(baseModulePath));
Files.walk(Paths.get(generatedCodeRoot.toString(), baseModulePath))
.forEach(
absolutePath -> {
if (absolutePath.toFile().isDirectory()) {
return;
}
if (absolutePath.endsWith("__init__.py")
|| absolutePath.endsWith("__init__.pyi")) {
return;
}
String relativePath = generatedCodeRoot.relativize(absolutePath).toString();
FileSystem.addSymbolicLink(
Paths.get(this.outputDirectory, relativePath), absolutePath);
});
} catch (IOException exception) {
SimpleLogger.warning(
String.format(
"Cannot find generated python code because the namespace directive in the thrift file"
+ " does not match the base_module %s specified in the TARGETS file.",
baseModulePath));
}
});
long time = System.currentTimeMillis() - start;
SimpleLogger.info("Built thrift libraries in " + time + "ms.");
}
private void buildSwigLibraries() throws BuilderException {
if (this.swigLibraryBuildCommands.isEmpty()) {
return;
}
SimpleLogger.info("Building " + this.swigLibraryBuildCommands.size() + " swig libraries...");
long start = System.currentTimeMillis();
// Swig command contains buck run, so it's better not to make it run in parallel.
this.swigLibraryBuildCommands
.parallelStream()
.forEach(
command -> {
try {
GeneratedBuildRuleRunner.runBuilderCommand(command, this.buckRoot);
} catch (IOException exception) {
logCodeGenerationIOException(exception);
}
});
long time = System.currentTimeMillis() - start;
SimpleLogger.info("Built swig libraries in " + time + "ms.");
}
private void buildAntlr4Libraries() throws BuilderException {
if (this.antlr4LibraryBuildCommands.isEmpty()) {
return;
}
SimpleLogger.info(
"Building " + this.antlr4LibraryBuildCommands.size() + " ANTLR4 libraries...");
long start = System.currentTimeMillis();
runBuildCommands(
this.antlr4LibraryBuildCommands,
"antlr4_library",
command -> {
try {
return GeneratedBuildRuleRunner.runBuilderCommand(command, this.buckRoot);
} catch (IOException exception) {
logCodeGenerationIOException(exception);
return false;
}
});
long time = System.currentTimeMillis() - start;
SimpleLogger.info("Built ANTLR4 libraries in " + time + "ms.");
}
private void generateEmptyStubs() {
SimpleLogger.info("Generating empty stubs...");
long start = System.currentTimeMillis();
Path outputPath = Paths.get(outputDirectory);
this.unsupportedGeneratedSources.forEach(
source -> {
String pyiSource = source.endsWith(".py") ? source + "i" : source;
File outputFile = new File(pyiSource);
if (outputFile.exists()) {
// Do not generate stubs for files that has already been handled.
return;
}
String relativeUnsupportedFilename =
outputPath.relativize(Paths.get(source)).normalize().toString();
this.unsupportedFiles.add(relativeUnsupportedFilename);
outputFile.getParentFile().mkdirs();
try {
FileUtils.write(outputFile, "# pyre-placeholder-stub\n", Charset.defaultCharset());
} catch (IOException exception) {
logCodeGenerationIOException(exception);
}
});
long time = System.currentTimeMillis() - start;
SimpleLogger.info("Generate empty stubs in " + time + "ms.");
}
public DebugOutput buildTargets(String buckRoot, @Nullable String projectName)
throws BuilderException, IOException {
SimpleLogger.info("Building...");
this.buildThriftLibraries();
this.buildSwigLibraries();
this.buildAntlr4Libraries();
this.buildPythonSources();
this.buildPythonWheels(buckRoot, projectName);
this.generateEmptyStubs();
return new DebugOutput(this.conflictingFiles, this.unsupportedFiles);
}
private interface CommandRunner<T> {
boolean run(T command);
}
}
| {
"pile_set_name": "Github"
} |
{
"info" : {
"version" : 1,
"author" : "xcode"
},
"properties" : {
"filename" : "48---j67Ng9Z.png"
}
} | {
"pile_set_name": "Github"
} |
set(LLVM_LINK_COMPONENTS
Support
Symbolize
)
add_llvm_tool(sanstats
sanstats.cpp
DEPENDS
intrinsics_gen
)
| {
"pile_set_name": "Github"
} |
package higherkindness.droste
import cats.Functor
import cats.syntax.functor._
import data.Attr
object Gather {
def cata[F[_], A]: Gather[F, A, A] =
(a, fa) => a
def zygo[F[_]: Functor, A, B](algebra: Algebra[F, B]): Gather[F, (B, A), A] =
(a, fa) => (algebra(fa.map(_._1)), a)
def para[F[_]: Functor, A, B](
implicit embed: Embed[F, B]): Gather[F, (B, A), A] =
zygo(embed.algebra)
def histo[F[_], A]: Gather[F, Attr[F, A], A] =
Attr(_, _)
def zip[F[_]: Functor, Ax, Ay, Sx, Sy](
x: Gather[F, Sx, Ax],
y: Gather[F, Sy, Ay]
): Gather[F, (Sx, Sy), (Ax, Ay)] =
(a, fs) => (x(a._1, fs.map(_._1)), y(a._2, fs.map(_._2)))
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<SurfaceView xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent">
</SurfaceView>
| {
"pile_set_name": "Github"
} |
"""Whimpy test script for the al module
Roger E. Masse
"""
from test.test_support import verbose, import_module
al = import_module('al', deprecated=True)
alattrs = ['__doc__', '__name__', 'getdefault', 'getminmax', 'getname', 'getparams',
'newconfig', 'openport', 'queryparams', 'setparams']
# This is a very unobtrusive test for the existence of the al module and all its
# attributes. More comprehensive examples can be found in Demo/al
def test_main():
# touch all the attributes of al without doing anything
if verbose:
print 'Touching al module attributes...'
for attr in alattrs:
if verbose:
print 'touching: ', attr
getattr(al, attr)
if __name__ == '__main__':
test_main()
| {
"pile_set_name": "Github"
} |
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| {
"pile_set_name": "Github"
} |
package com.lianggzone.springboot.actuator.endpoint;
import java.util.Map;
import org.springframework.boot.actuate.endpoint.Endpoint;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* <h3>概要:</h3><p>EndpointConfig</p>
* <h3>功能:</h3><p>端点配置类</p>
* <h3>履历:</h3>
* <li>2017年1月13日 v0.1 版本内容: 新建</li>
* @author 粱桂钊
* @since 0.1
*/
@Configuration
public class EndpointConfig {
@Bean
public static Endpoint<Map<String, Object>> servertime() {
return new ServerTimeEndpoint();
}
} | {
"pile_set_name": "Github"
} |
print-dependency-info non-existant
| {
"pile_set_name": "Github"
} |
// (c) Copyright Fernando Luis Cacciola Carballal 2000-2004
// Use, modification, and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See library home page at http://www.boost.org/libs/numeric/conversion
//
// Contact the author at: [email protected]
//
#ifndef BOOST_NUMERIC_CONVERSION_SIGN_MIXTURE_ENUM_FLC_12NOV2002_HPP
#define BOOST_NUMERIC_CONVERSION_SIGN_MIXTURE_ENUM_FLC_12NOV2002_HPP
namespace boost { namespace numeric
{
enum sign_mixture_enum
{
unsigned_to_unsigned
,signed_to_signed
,signed_to_unsigned
,unsigned_to_signed
} ;
} } // namespace boost::numeric
#endif
//
///////////////////////////////////////////////////////////////////////////////////////////////
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
| {
"pile_set_name": "Github"
} |
subroutine smd_pdb_natoms(filename,nt)
implicit none
#include "errquit.fh"
#include "util.fh"
character*(*) filename
integer nt
c
character*(4) buffer
integer un
c
if(.not.util_get_io_unit(un))
> call errquit("cannot get file number",0,0)
c
open(unit=un,status="old",form="formatted",file=filename)
nt = 0
100 continue
read(un,'(A4)',end=200) buffer
if(buffer(1:4).eq."ATOM") then
nt = nt + 1
end if
goto 100
200 continue
close(un)
end
subroutine smd_pdb_read_coords(filename,nt,c)
implicit none
#include "mafdecls.fh"
#include "errquit.fh"
#include "util.fh"
#include "inp.fh"
character*(*) filename
integer nt
double precision c(nt,3)
c
character*(180) buffer
character*(4) tag
integer i
integer un
character*(30) pname
pname = "smd_pdb_read_coords"
c
if(.not.util_get_io_unit(un))
> call errquit(pname//"cannot get file number",0,0)
c
open(unit=un,status="old",form="formatted",file=filename)
i = 0
100 continue
read(un,'(A180)',end=200) buffer
call util_flush(34)
if(buffer(1:4).eq."ATOM") then
i = i +1
if(i.gt.nt)
> call errquit(pname//"insufficient array size",0,0)
read(buffer,*) tag,tag,tag,tag,tag,
> c(i,1),c(i,2),c(i,3)
end if
goto 100
200 continue
close(un)
if(i.ne.nt)
> call errquit(pname//"incompatible array sizes",0,0)
return
end
subroutine smd_pdb_read_atomres(filename,nt,ta,tr,ir)
implicit none
#include "mafdecls.fh"
#include "errquit.fh"
#include "util.fh"
#include "inp.fh"
character*(*) filename
integer nt
character*16 ta(nt)
character*16 tr(nt)
integer ir(nt)
c
character*(180) buffer
character*(4) tag
integer i
integer un
character*(30) pname
pname = "smd_pdb_read_atomres"
c
if(.not.util_get_io_unit(un))
> call errquit(pname//"cannot get file number",0,0)
c
open(unit=un,status="old",form="formatted",file=filename)
i = 0
100 continue
read(un,'(A180)',end=200) buffer
if(buffer(1:4).eq."ATOM") then
i = i +1
if(i.gt.nt)
> call errquit(pname//"insufficient array size",0,0)
read(buffer,*) tag,tag,ta(i),tr(i),ir(i),
> tag,tag,tag
end if
goto 100
200 continue
close(un)
if(i.ne.nt)
> call errquit(pname//"incompatible array sizes",0,0)
return
end
subroutine smd_pdb_read(filename,nt,ta,tr,ir,c)
implicit none
#include "mafdecls.fh"
#include "errquit.fh"
#include "util.fh"
#include "inp.fh"
character*(*) filename
integer nt
character*16 ta(nt)
character*16 tr(nt)
integer ir(nt)
double precision c(3,nt)
c
character*(180) buffer
character*(4) tag
integer i
integer un
character*(30) pname
pname = "smd_pdb_read"
c
if(.not.util_get_io_unit(un))
> call errquit(pname//"cannot get file number",0,0)
c
open(unit=un,status="old",form="formatted",file=filename)
i = 0
100 continue
read(un,'(A180)',end=200) buffer
call util_flush(34)
if(buffer(1:4).eq."ATOM") then
i = i +1
if(i.gt.nt)
> call errquit(pname//"insufficient array size",0,0)
read(buffer,*) tag,tag,ta(i),tr(i),ir(i),
> c(1,i),c(2,i),c(3,i)
call util_flush(6)
end if
goto 100
200 continue
close(un)
if(i.ne.nt)
> call errquit(pname//"incompatible array sizes",0,0)
return
end
subroutine smd_pdb_nres(filename,nr)
implicit none
#include "mafdecls.fh"
#include "errquit.fh"
#include "util.fh"
#include "inp.fh"
character*(*) filename
integer nr
c
character*(180) buffer
character*(4) tag
integer ir0,ir
integer un
c
if(.not.util_get_io_unit(un))
> call errquit("cannot get file number",0,0)
c
open(unit=un,status="old",form="formatted",file=filename)
c reset residue arrays to be the size of number of residues only
nr = 0
ir0 = 0
100 continue
read(un,'(A180)',end=200) buffer
if(buffer(1:4).eq."ATOM") then
read(buffer,*) tag,tag,tag,tag,ir
if(ir0.ne.ir) then
nr = nr + 1
ir0=ir
end if
end if
goto 100
200 continue
close(un)
end
subroutine smd_pdb_read_res(filename,nt,nr,tr,ir,nm)
implicit none
#include "mafdecls.fh"
#include "errquit.fh"
#include "util.fh"
#include "inp.fh"
character*(*) filename
integer nt,nr
character*16 tr(nr)
integer ir(nt)
integer nm(nr)
c
character*(30) pname
character*(180) buffer
character*(4) tag
character*(16) rtag,rtag0
integer ir0,nr0
integer ncenter
integer un
c
pname = "sg_read_res"
c
if(.not.util_get_io_unit(un))
> call errquit(pname//"cannot get file number",0,0)
c
open(unit=un,status="old",form="formatted",file=filename)
ncenter = 0
nr0 = 0
rtag0 = " "
ir0 = 0
100 continue
read(un,'(A180)',end=200) buffer
if(buffer(1:4).eq."ATOM") then
ncenter = ncenter + 1
read(buffer,*) tag,tag,tag,rtag,ir(ncenter)
write(*,*) "ir",ncenter,ir(ncenter)
if(ir0.ne.ir(ncenter)) then
ir0=ir(ncenter)
nr0 = nr0 + 1
if(ncenter.gt.nt)
> call errquit(pname//"insufficient array size",0,0)
if(nr0.gt.nr)
> call errquit(pname//"insufficient array size",0,0)
tr(nr0) = rtag
rtag0=rtag
end if
ir(ncenter) = nr0
nm(nr0) = nm(nr0) + 1
end if
goto 100
200 continue
close(un)
write(*,*) "ncenter,nt",ncenter,nt
if(ncenter.ne.nt)
> call errquit(pname//"incompatible atom array sizes",
> ncenter,nt)
if(nr.ne.nr0)
> call errquit(pname//"incompatible residue array sizes",nr0,nr)
return
end
c $Id$
| {
"pile_set_name": "Github"
} |
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm64,linux
package unix
const (
SizeofPtr = 0x8
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x8
SizeofLongLong = 0x8
PathMax = 0x1000
)
type (
_C_short int16
_C_int int32
_C_long int64
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int64
}
type Timeval struct {
Sec int64
Usec int64
}
type Timex struct {
Modes uint32
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
Constant int64
Precision int64
Tolerance int64
Time Timeval
Tick int64
Ppsfreq int64
Jitter int64
Shift int32
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
_ [44]byte
}
type Time_t int64
type Tms struct {
Utime int64
Stime int64
Cutime int64
Cstime int64
}
type Utimbuf struct {
Actime int64
Modtime int64
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int64
Ixrss int64
Idrss int64
Isrss int64
Minflt int64
Majflt int64
Nswap int64
Inblock int64
Oublock int64
Msgsnd int64
Msgrcv int64
Nsignals int64
Nvcsw int64
Nivcsw int64
}
type Rlimit struct {
Cur uint64
Max uint64
}
type _Gid_t uint32
type Stat_t struct {
Dev uint64
Ino uint64
Mode uint32
Nlink uint32
Uid uint32
Gid uint32
Rdev uint64
_ uint64
Size int64
Blksize int32
_ int32
Blocks int64
Atim Timespec
Mtim Timespec
Ctim Timespec
_ [2]int32
}
type StatxTimestamp struct {
Sec int64
Nsec uint32
_ int32
}
type Statx_t struct {
Mask uint32
Blksize uint32
Attributes uint64
Nlink uint32
Uid uint32
Gid uint32
Mode uint16
_ [1]uint16
Ino uint64
Size uint64
Blocks uint64
Attributes_mask uint64
Atime StatxTimestamp
Btime StatxTimestamp
Ctime StatxTimestamp
Mtime StatxTimestamp
Rdev_major uint32
Rdev_minor uint32
Dev_major uint32
Dev_minor uint32
_ [14]uint64
}
type Dirent struct {
Ino uint64
Off int64
Reclen uint16
Type uint8
Name [256]int8
_ [5]byte
}
type Fsid struct {
Val [2]int32
}
type Flock_t struct {
Type int16
Whence int16
Start int64
Len int64
Pid int32
_ [4]byte
}
type FscryptPolicy struct {
Version uint8
Contents_encryption_mode uint8
Filenames_encryption_mode uint8
Flags uint8
Master_key_descriptor [8]uint8
}
type FscryptKey struct {
Mode uint32
Raw [64]uint8
Size uint32
}
type KeyctlDHParams struct {
Private int32
Prime int32
Base int32
}
const (
FADV_NORMAL = 0x0
FADV_RANDOM = 0x1
FADV_SEQUENTIAL = 0x2
FADV_WILLNEED = 0x3
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
)
type RawSockaddrInet4 struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type RawSockaddrInet6 struct {
Family uint16
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Family uint16
Path [108]int8
}
type RawSockaddrLinklayer struct {
Family uint16
Protocol uint16
Ifindex int32
Hatype uint16
Pkttype uint8
Halen uint8
Addr [8]uint8
}
type RawSockaddrNetlink struct {
Family uint16
Pad uint16
Pid uint32
Groups uint32
}
type RawSockaddrHCI struct {
Family uint16
Dev uint16
Channel uint16
}
type RawSockaddrL2 struct {
Family uint16
Psm uint16
Bdaddr [6]uint8
Cid uint16
Bdaddr_type uint8
_ [1]byte
}
type RawSockaddrRFCOMM struct {
Family uint16
Bdaddr [6]uint8
Channel uint8
_ [1]byte
}
type RawSockaddrCAN struct {
Family uint16
Ifindex int32
Addr [8]byte
}
type RawSockaddrALG struct {
Family uint16
Type [14]uint8
Feat uint32
Mask uint32
Name [64]uint8
}
type RawSockaddrVM struct {
Family uint16
Reserved1 uint16
Port uint32
Cid uint32
Zero [4]uint8
}
type RawSockaddrXDP struct {
Family uint16
Flags uint16
Ifindex uint32
Queue_id uint32
Shared_umem_fd uint32
}
type RawSockaddrPPPoX [0x1e]byte
type RawSockaddr struct {
Family uint16
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [96]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint64
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPMreqn struct {
Multiaddr [4]byte /* in_addr */
Address [4]byte /* in_addr */
Ifindex int32
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type PacketMreq struct {
Ifindex int32
Type uint16
Alen uint16
Address [8]uint8
}
type Msghdr struct {
Name *byte
Namelen uint32
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
_ [4]byte
}
type Cmsghdr struct {
Len uint64
Level int32
Type int32
}
type Inet4Pktinfo struct {
Ifindex int32
Spec_dst [4]byte /* in_addr */
Addr [4]byte /* in_addr */
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Data [8]uint32
}
type Ucred struct {
Pid int32
Uid uint32
Gid uint32
}
type TCPInfo struct {
State uint8
Ca_state uint8
Retransmits uint8
Probes uint8
Backoff uint8
Options uint8
Rto uint32
Ato uint32
Snd_mss uint32
Rcv_mss uint32
Unacked uint32
Sacked uint32
Lost uint32
Retrans uint32
Fackets uint32
Last_data_sent uint32
Last_ack_sent uint32
Last_data_recv uint32
Last_ack_recv uint32
Pmtu uint32
Rcv_ssthresh uint32
Rtt uint32
Rttvar uint32
Snd_ssthresh uint32
Snd_cwnd uint32
Advmss uint32
Reordering uint32
Rcv_rtt uint32
Rcv_space uint32
Total_retrans uint32
}
type CanFilter struct {
Id uint32
Mask uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x70
SizeofSockaddrUnix = 0x6e
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
SizeofSockaddrL2 = 0xe
SizeofSockaddrRFCOMM = 0xa
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
SizeofSockaddrXDP = 0x10
SizeofSockaddrPPPoX = 0x1e
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
SizeofIPv6Mreq = 0x14
SizeofPacketMreq = 0x10
SizeofMsghdr = 0x38
SizeofCmsghdr = 0x10
SizeofInet4Pktinfo = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
SizeofCanFilter = 0x8
)
const (
NDA_UNSPEC = 0x0
NDA_DST = 0x1
NDA_LLADDR = 0x2
NDA_CACHEINFO = 0x3
NDA_PROBES = 0x4
NDA_VLAN = 0x5
NDA_PORT = 0x6
NDA_VNI = 0x7
NDA_IFINDEX = 0x8
NDA_MASTER = 0x9
NDA_LINK_NETNSID = 0xa
NDA_SRC_VNI = 0xb
NTF_USE = 0x1
NTF_SELF = 0x2
NTF_MASTER = 0x4
NTF_PROXY = 0x8
NTF_EXT_LEARNED = 0x10
NTF_OFFLOADED = 0x20
NTF_ROUTER = 0x80
NUD_INCOMPLETE = 0x1
NUD_REACHABLE = 0x2
NUD_STALE = 0x4
NUD_DELAY = 0x8
NUD_PROBE = 0x10
NUD_FAILED = 0x20
NUD_NOARP = 0x40
NUD_PERMANENT = 0x80
NUD_NONE = 0x0
IFA_UNSPEC = 0x0
IFA_ADDRESS = 0x1
IFA_LOCAL = 0x2
IFA_LABEL = 0x3
IFA_BROADCAST = 0x4
IFA_ANYCAST = 0x5
IFA_CACHEINFO = 0x6
IFA_MULTICAST = 0x7
IFA_FLAGS = 0x8
IFA_RT_PRIORITY = 0x9
IFA_TARGET_NETNSID = 0xa
IFLA_UNSPEC = 0x0
IFLA_ADDRESS = 0x1
IFLA_BROADCAST = 0x2
IFLA_IFNAME = 0x3
IFLA_MTU = 0x4
IFLA_LINK = 0x5
IFLA_QDISC = 0x6
IFLA_STATS = 0x7
IFLA_COST = 0x8
IFLA_PRIORITY = 0x9
IFLA_MASTER = 0xa
IFLA_WIRELESS = 0xb
IFLA_PROTINFO = 0xc
IFLA_TXQLEN = 0xd
IFLA_MAP = 0xe
IFLA_WEIGHT = 0xf
IFLA_OPERSTATE = 0x10
IFLA_LINKMODE = 0x11
IFLA_LINKINFO = 0x12
IFLA_NET_NS_PID = 0x13
IFLA_IFALIAS = 0x14
IFLA_NUM_VF = 0x15
IFLA_VFINFO_LIST = 0x16
IFLA_STATS64 = 0x17
IFLA_VF_PORTS = 0x18
IFLA_PORT_SELF = 0x19
IFLA_AF_SPEC = 0x1a
IFLA_GROUP = 0x1b
IFLA_NET_NS_FD = 0x1c
IFLA_EXT_MASK = 0x1d
IFLA_PROMISCUITY = 0x1e
IFLA_NUM_TX_QUEUES = 0x1f
IFLA_NUM_RX_QUEUES = 0x20
IFLA_CARRIER = 0x21
IFLA_PHYS_PORT_ID = 0x22
IFLA_CARRIER_CHANGES = 0x23
IFLA_PHYS_SWITCH_ID = 0x24
IFLA_LINK_NETNSID = 0x25
IFLA_PHYS_PORT_NAME = 0x26
IFLA_PROTO_DOWN = 0x27
IFLA_GSO_MAX_SEGS = 0x28
IFLA_GSO_MAX_SIZE = 0x29
IFLA_PAD = 0x2a
IFLA_XDP = 0x2b
IFLA_EVENT = 0x2c
IFLA_NEW_NETNSID = 0x2d
IFLA_IF_NETNSID = 0x2e
IFLA_TARGET_NETNSID = 0x2e
IFLA_CARRIER_UP_COUNT = 0x2f
IFLA_CARRIER_DOWN_COUNT = 0x30
IFLA_NEW_IFINDEX = 0x31
IFLA_MIN_MTU = 0x32
IFLA_MAX_MTU = 0x33
IFLA_MAX = 0x33
IFLA_INFO_KIND = 0x1
IFLA_INFO_DATA = 0x2
IFLA_INFO_XSTATS = 0x3
IFLA_INFO_SLAVE_KIND = 0x4
IFLA_INFO_SLAVE_DATA = 0x5
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
RT_SCOPE_HOST = 0xfe
RT_SCOPE_NOWHERE = 0xff
RT_TABLE_UNSPEC = 0x0
RT_TABLE_COMPAT = 0xfc
RT_TABLE_DEFAULT = 0xfd
RT_TABLE_MAIN = 0xfe
RT_TABLE_LOCAL = 0xff
RT_TABLE_MAX = 0xffffffff
RTA_UNSPEC = 0x0
RTA_DST = 0x1
RTA_SRC = 0x2
RTA_IIF = 0x3
RTA_OIF = 0x4
RTA_GATEWAY = 0x5
RTA_PRIORITY = 0x6
RTA_PREFSRC = 0x7
RTA_METRICS = 0x8
RTA_MULTIPATH = 0x9
RTA_FLOW = 0xb
RTA_CACHEINFO = 0xc
RTA_TABLE = 0xf
RTA_MARK = 0x10
RTA_MFC_STATS = 0x11
RTA_VIA = 0x12
RTA_NEWDST = 0x13
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
RTA_EXPIRES = 0x17
RTA_PAD = 0x18
RTA_UID = 0x19
RTA_TTL_PROPAGATE = 0x1a
RTA_IP_PROTO = 0x1b
RTA_SPORT = 0x1c
RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
RTN_BROADCAST = 0x3
RTN_ANYCAST = 0x4
RTN_MULTICAST = 0x5
RTN_BLACKHOLE = 0x6
RTN_UNREACHABLE = 0x7
RTN_PROHIBIT = 0x8
RTN_THROW = 0x9
RTN_NAT = 0xa
RTN_XRESOLVE = 0xb
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
RTNLGRP_NEIGH = 0x3
RTNLGRP_TC = 0x4
RTNLGRP_IPV4_IFADDR = 0x5
RTNLGRP_IPV4_MROUTE = 0x6
RTNLGRP_IPV4_ROUTE = 0x7
RTNLGRP_IPV4_RULE = 0x8
RTNLGRP_IPV6_IFADDR = 0x9
RTNLGRP_IPV6_MROUTE = 0xa
RTNLGRP_IPV6_ROUTE = 0xb
RTNLGRP_IPV6_IFINFO = 0xc
RTNLGRP_IPV6_PREFIX = 0x12
RTNLGRP_IPV6_RULE = 0x13
RTNLGRP_ND_USEROPT = 0x14
SizeofNlMsghdr = 0x10
SizeofNlMsgerr = 0x14
SizeofRtGenmsg = 0x1
SizeofNlAttr = 0x4
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
SizeofNdUseroptmsg = 0x10
SizeofNdMsg = 0xc
)
type NlMsghdr struct {
Len uint32
Type uint16
Flags uint16
Seq uint32
Pid uint32
}
type NlMsgerr struct {
Error int32
Msg NlMsghdr
}
type RtGenmsg struct {
Family uint8
}
type NlAttr struct {
Len uint16
Type uint16
}
type RtAttr struct {
Len uint16
Type uint16
}
type IfInfomsg struct {
Family uint8
_ uint8
Type uint16
Index int32
Flags uint32
Change uint32
}
type IfAddrmsg struct {
Family uint8
Prefixlen uint8
Flags uint8
Scope uint8
Index uint32
}
type RtMsg struct {
Family uint8
Dst_len uint8
Src_len uint8
Tos uint8
Table uint8
Protocol uint8
Scope uint8
Type uint8
Flags uint32
}
type RtNexthop struct {
Len uint16
Flags uint8
Hops uint8
Ifindex int32
}
type NdUseroptmsg struct {
Family uint8
Pad1 uint8
Opts_len uint16
Ifindex int32
Icmp_type uint8
Icmp_code uint8
Pad2 uint16
Pad3 uint32
}
type NdMsg struct {
Family uint8
Pad1 uint8
Pad2 uint16
Ifindex int32
State uint16
Flags uint8
Type uint8
}
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
)
type SockFilter struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type SockFprog struct {
Len uint16
Filter *SockFilter
}
type InotifyEvent struct {
Wd int32
Mask uint32
Cookie uint32
Len uint32
}
const SizeofInotifyEvent = 0x10
type PtraceRegs struct {
Regs [31]uint64
Sp uint64
Pc uint64
Pstate uint64
}
type FdSet struct {
Bits [16]int64
}
type Sysinfo_t struct {
Uptime int64
Loads [3]uint64
Totalram uint64
Freeram uint64
Sharedram uint64
Bufferram uint64
Totalswap uint64
Freeswap uint64
Procs uint16
Pad uint16
Totalhigh uint64
Freehigh uint64
Unit uint32
_ [0]int8
_ [4]byte
}
type Utsname struct {
Sysname [65]byte
Nodename [65]byte
Release [65]byte
Version [65]byte
Machine [65]byte
Domainname [65]byte
}
type Ustat_t struct {
Tfree int32
Tinode uint64
Fname [6]int8
Fpack [6]int8
_ [4]byte
}
type EpollEvent struct {
Events uint32
PadFd int32
Fd int32
Pad int32
}
const (
AT_EMPTY_PATH = 0x1000
AT_FDCWD = -0x64
AT_NO_AUTOMOUNT = 0x800
AT_REMOVEDIR = 0x200
AT_STATX_SYNC_AS_STAT = 0x0
AT_STATX_FORCE_SYNC = 0x2000
AT_STATX_DONT_SYNC = 0x4000
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
AT_EACCESS = 0x200
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLIN = 0x1
POLLPRI = 0x2
POLLOUT = 0x4
POLLRDHUP = 0x2000
POLLERR = 0x8
POLLHUP = 0x10
POLLNVAL = 0x20
)
type Sigset_t struct {
Val [16]uint64
}
const _C__NSIG = 0x41
type SignalfdSiginfo struct {
Signo uint32
Errno int32
Code int32
Pid uint32
Uid uint32
Fd int32
Tid uint32
Band uint32
Overrun uint32
Trapno uint32
Status int32
Int int32
Ptr uint64
Utime uint64
Stime uint64
Addr uint64
Addr_lsb uint16
_ uint16
Syscall int32
Call_addr uint64
Arch uint32
_ [28]uint8
}
const PERF_IOC_FLAG_GROUP = 0x1
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Line uint8
Cc [19]uint8
Ispeed uint32
Ospeed uint32
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
type Taskstats struct {
Version uint16
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
Blkio_delay_total uint64
Swapin_count uint64
Swapin_delay_total uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
_ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
Ac_minflt uint64
Ac_majflt uint64
Coremem uint64
Virtmem uint64
Hiwater_rss uint64
Hiwater_vm uint64
Read_char uint64
Write_char uint64
Read_syscalls uint64
Write_syscalls uint64
Read_bytes uint64
Write_bytes uint64
Cancelled_write_bytes uint64
Nvcsw uint64
Nivcsw uint64
Ac_utimescaled uint64
Ac_stimescaled uint64
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
}
const (
TASKSTATS_CMD_UNSPEC = 0x0
TASKSTATS_CMD_GET = 0x1
TASKSTATS_CMD_NEW = 0x2
TASKSTATS_TYPE_UNSPEC = 0x0
TASKSTATS_TYPE_PID = 0x1
TASKSTATS_TYPE_TGID = 0x2
TASKSTATS_TYPE_STATS = 0x3
TASKSTATS_TYPE_AGGR_PID = 0x4
TASKSTATS_TYPE_AGGR_TGID = 0x5
TASKSTATS_TYPE_NULL = 0x6
TASKSTATS_CMD_ATTR_UNSPEC = 0x0
TASKSTATS_CMD_ATTR_PID = 0x1
TASKSTATS_CMD_ATTR_TGID = 0x2
TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3
TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
)
type CGroupStats struct {
Sleeping uint64
Running uint64
Stopped uint64
Uninterruptible uint64
Io_wait uint64
}
const (
CGROUPSTATS_CMD_UNSPEC = 0x3
CGROUPSTATS_CMD_GET = 0x4
CGROUPSTATS_CMD_NEW = 0x5
CGROUPSTATS_TYPE_UNSPEC = 0x0
CGROUPSTATS_TYPE_CGROUP_STATS = 0x1
CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0
CGROUPSTATS_CMD_ATTR_FD = 0x1
)
type Genlmsghdr struct {
Cmd uint8
Version uint8
Reserved uint16
}
const (
CTRL_CMD_UNSPEC = 0x0
CTRL_CMD_NEWFAMILY = 0x1
CTRL_CMD_DELFAMILY = 0x2
CTRL_CMD_GETFAMILY = 0x3
CTRL_CMD_NEWOPS = 0x4
CTRL_CMD_DELOPS = 0x5
CTRL_CMD_GETOPS = 0x6
CTRL_CMD_NEWMCAST_GRP = 0x7
CTRL_CMD_DELMCAST_GRP = 0x8
CTRL_CMD_GETMCAST_GRP = 0x9
CTRL_ATTR_UNSPEC = 0x0
CTRL_ATTR_FAMILY_ID = 0x1
CTRL_ATTR_FAMILY_NAME = 0x2
CTRL_ATTR_VERSION = 0x3
CTRL_ATTR_HDRSIZE = 0x4
CTRL_ATTR_MAXATTR = 0x5
CTRL_ATTR_OPS = 0x6
CTRL_ATTR_MCAST_GROUPS = 0x7
CTRL_ATTR_OP_UNSPEC = 0x0
CTRL_ATTR_OP_ID = 0x1
CTRL_ATTR_OP_FLAGS = 0x2
CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
CTRL_ATTR_MCAST_GRP_NAME = 0x1
CTRL_ATTR_MCAST_GRP_ID = 0x2
)
type cpuMask uint64
const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
const (
BDADDR_BREDR = 0x0
BDADDR_LE_PUBLIC = 0x1
BDADDR_LE_RANDOM = 0x2
)
type PerfEventAttr struct {
Type uint32
Size uint32
Config uint64
Sample uint64
Sample_type uint64
Read_format uint64
Bits uint64
Wakeup uint32
Bp_type uint32
Ext1 uint64
Ext2 uint64
Branch_sample_type uint64
Sample_regs_user uint64
Sample_stack_user uint32
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
Sample_max_stack uint16
_ uint16
}
type PerfEventMmapPage struct {
Version uint32
Compat_version uint32
Lock uint32
Index uint32
Offset int64
Time_enabled uint64
Time_running uint64
Capabilities uint64
Pmc_width uint16
Time_shift uint16
Time_mult uint32
Time_offset uint64
Time_zero uint64
Size uint32
_ [948]uint8
Data_head uint64
Data_tail uint64
Data_offset uint64
Data_size uint64
Aux_head uint64
Aux_tail uint64
Aux_offset uint64
Aux_size uint64
}
const (
PerfBitDisabled uint64 = CBitFieldMaskBit0
PerfBitInherit = CBitFieldMaskBit1
PerfBitPinned = CBitFieldMaskBit2
PerfBitExclusive = CBitFieldMaskBit3
PerfBitExcludeUser = CBitFieldMaskBit4
PerfBitExcludeKernel = CBitFieldMaskBit5
PerfBitExcludeHv = CBitFieldMaskBit6
PerfBitExcludeIdle = CBitFieldMaskBit7
PerfBitMmap = CBitFieldMaskBit8
PerfBitComm = CBitFieldMaskBit9
PerfBitFreq = CBitFieldMaskBit10
PerfBitInheritStat = CBitFieldMaskBit11
PerfBitEnableOnExec = CBitFieldMaskBit12
PerfBitTask = CBitFieldMaskBit13
PerfBitWatermark = CBitFieldMaskBit14
PerfBitPreciseIPBit1 = CBitFieldMaskBit15
PerfBitPreciseIPBit2 = CBitFieldMaskBit16
PerfBitMmapData = CBitFieldMaskBit17
PerfBitSampleIDAll = CBitFieldMaskBit18
PerfBitExcludeHost = CBitFieldMaskBit19
PerfBitExcludeGuest = CBitFieldMaskBit20
PerfBitExcludeCallchainKernel = CBitFieldMaskBit21
PerfBitExcludeCallchainUser = CBitFieldMaskBit22
PerfBitMmap2 = CBitFieldMaskBit23
PerfBitCommExec = CBitFieldMaskBit24
PerfBitUseClockID = CBitFieldMaskBit25
PerfBitContextSwitch = CBitFieldMaskBit26
)
const (
PERF_TYPE_HARDWARE = 0x0
PERF_TYPE_SOFTWARE = 0x1
PERF_TYPE_TRACEPOINT = 0x2
PERF_TYPE_HW_CACHE = 0x3
PERF_TYPE_RAW = 0x4
PERF_TYPE_BREAKPOINT = 0x5
PERF_COUNT_HW_CPU_CYCLES = 0x0
PERF_COUNT_HW_INSTRUCTIONS = 0x1
PERF_COUNT_HW_CACHE_REFERENCES = 0x2
PERF_COUNT_HW_CACHE_MISSES = 0x3
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4
PERF_COUNT_HW_BRANCH_MISSES = 0x5
PERF_COUNT_HW_BUS_CYCLES = 0x6
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8
PERF_COUNT_HW_REF_CPU_CYCLES = 0x9
PERF_COUNT_HW_CACHE_L1D = 0x0
PERF_COUNT_HW_CACHE_L1I = 0x1
PERF_COUNT_HW_CACHE_LL = 0x2
PERF_COUNT_HW_CACHE_DTLB = 0x3
PERF_COUNT_HW_CACHE_ITLB = 0x4
PERF_COUNT_HW_CACHE_BPU = 0x5
PERF_COUNT_HW_CACHE_NODE = 0x6
PERF_COUNT_HW_CACHE_OP_READ = 0x0
PERF_COUNT_HW_CACHE_OP_WRITE = 0x1
PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0
PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1
PERF_COUNT_SW_CPU_CLOCK = 0x0
PERF_COUNT_SW_TASK_CLOCK = 0x1
PERF_COUNT_SW_PAGE_FAULTS = 0x2
PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3
PERF_COUNT_SW_CPU_MIGRATIONS = 0x4
PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
PERF_SAMPLE_TIME = 0x4
PERF_SAMPLE_ADDR = 0x8
PERF_SAMPLE_READ = 0x10
PERF_SAMPLE_CALLCHAIN = 0x20
PERF_SAMPLE_ID = 0x40
PERF_SAMPLE_CPU = 0x80
PERF_SAMPLE_PERIOD = 0x100
PERF_SAMPLE_STREAM_ID = 0x200
PERF_SAMPLE_RAW = 0x400
PERF_SAMPLE_BRANCH_STACK = 0x800
PERF_SAMPLE_BRANCH_USER = 0x1
PERF_SAMPLE_BRANCH_KERNEL = 0x2
PERF_SAMPLE_BRANCH_HV = 0x4
PERF_SAMPLE_BRANCH_ANY = 0x8
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
PERF_SAMPLE_BRANCH_IN_TX = 0x100
PERF_SAMPLE_BRANCH_NO_TX = 0x200
PERF_SAMPLE_BRANCH_COND = 0x400
PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
PERF_SAMPLE_BRANCH_CALL = 0x2000
PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
PERF_RECORD_MMAP = 0x1
PERF_RECORD_LOST = 0x2
PERF_RECORD_COMM = 0x3
PERF_RECORD_EXIT = 0x4
PERF_RECORD_THROTTLE = 0x5
PERF_RECORD_UNTHROTTLE = 0x6
PERF_RECORD_FORK = 0x7
PERF_RECORD_READ = 0x8
PERF_RECORD_SAMPLE = 0x9
PERF_RECORD_MMAP2 = 0xa
PERF_RECORD_AUX = 0xb
PERF_RECORD_ITRACE_START = 0xc
PERF_RECORD_LOST_SAMPLES = 0xd
PERF_RECORD_SWITCH = 0xe
PERF_RECORD_SWITCH_CPU_WIDE = 0xf
PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
PERF_CONTEXT_USER = -0x200
PERF_CONTEXT_GUEST = -0x800
PERF_CONTEXT_GUEST_KERNEL = -0x880
PERF_CONTEXT_GUEST_USER = -0xa00
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
CBitFieldMaskBit0 = 0x1
CBitFieldMaskBit1 = 0x2
CBitFieldMaskBit2 = 0x4
CBitFieldMaskBit3 = 0x8
CBitFieldMaskBit4 = 0x10
CBitFieldMaskBit5 = 0x20
CBitFieldMaskBit6 = 0x40
CBitFieldMaskBit7 = 0x80
CBitFieldMaskBit8 = 0x100
CBitFieldMaskBit9 = 0x200
CBitFieldMaskBit10 = 0x400
CBitFieldMaskBit11 = 0x800
CBitFieldMaskBit12 = 0x1000
CBitFieldMaskBit13 = 0x2000
CBitFieldMaskBit14 = 0x4000
CBitFieldMaskBit15 = 0x8000
CBitFieldMaskBit16 = 0x10000
CBitFieldMaskBit17 = 0x20000
CBitFieldMaskBit18 = 0x40000
CBitFieldMaskBit19 = 0x80000
CBitFieldMaskBit20 = 0x100000
CBitFieldMaskBit21 = 0x200000
CBitFieldMaskBit22 = 0x400000
CBitFieldMaskBit23 = 0x800000
CBitFieldMaskBit24 = 0x1000000
CBitFieldMaskBit25 = 0x2000000
CBitFieldMaskBit26 = 0x4000000
CBitFieldMaskBit27 = 0x8000000
CBitFieldMaskBit28 = 0x10000000
CBitFieldMaskBit29 = 0x20000000
CBitFieldMaskBit30 = 0x40000000
CBitFieldMaskBit31 = 0x80000000
CBitFieldMaskBit32 = 0x100000000
CBitFieldMaskBit33 = 0x200000000
CBitFieldMaskBit34 = 0x400000000
CBitFieldMaskBit35 = 0x800000000
CBitFieldMaskBit36 = 0x1000000000
CBitFieldMaskBit37 = 0x2000000000
CBitFieldMaskBit38 = 0x4000000000
CBitFieldMaskBit39 = 0x8000000000
CBitFieldMaskBit40 = 0x10000000000
CBitFieldMaskBit41 = 0x20000000000
CBitFieldMaskBit42 = 0x40000000000
CBitFieldMaskBit43 = 0x80000000000
CBitFieldMaskBit44 = 0x100000000000
CBitFieldMaskBit45 = 0x200000000000
CBitFieldMaskBit46 = 0x400000000000
CBitFieldMaskBit47 = 0x800000000000
CBitFieldMaskBit48 = 0x1000000000000
CBitFieldMaskBit49 = 0x2000000000000
CBitFieldMaskBit50 = 0x4000000000000
CBitFieldMaskBit51 = 0x8000000000000
CBitFieldMaskBit52 = 0x10000000000000
CBitFieldMaskBit53 = 0x20000000000000
CBitFieldMaskBit54 = 0x40000000000000
CBitFieldMaskBit55 = 0x80000000000000
CBitFieldMaskBit56 = 0x100000000000000
CBitFieldMaskBit57 = 0x200000000000000
CBitFieldMaskBit58 = 0x400000000000000
CBitFieldMaskBit59 = 0x800000000000000
CBitFieldMaskBit60 = 0x1000000000000000
CBitFieldMaskBit61 = 0x2000000000000000
CBitFieldMaskBit62 = 0x4000000000000000
CBitFieldMaskBit63 = 0x8000000000000000
)
type SockaddrStorage struct {
Family uint16
_ [118]int8
_ uint64
}
type TCPMD5Sig struct {
Addr SockaddrStorage
Flags uint8
Prefixlen uint8
Keylen uint16
_ uint32
Key [80]uint8
}
type HDDriveCmdHdr struct {
Command uint8
Number uint8
Feature uint8
Count uint8
}
type HDGeometry struct {
Heads uint8
Sectors uint8
Cylinders uint16
Start uint64
}
type HDDriveID struct {
Config uint16
Cyls uint16
Reserved2 uint16
Heads uint16
Track_bytes uint16
Sector_bytes uint16
Sectors uint16
Vendor0 uint16
Vendor1 uint16
Vendor2 uint16
Serial_no [20]uint8
Buf_type uint16
Buf_size uint16
Ecc_bytes uint16
Fw_rev [8]uint8
Model [40]uint8
Max_multsect uint8
Vendor3 uint8
Dword_io uint16
Vendor4 uint8
Capability uint8
Reserved50 uint16
Vendor5 uint8
TPIO uint8
Vendor6 uint8
TDMA uint8
Field_valid uint16
Cur_cyls uint16
Cur_heads uint16
Cur_sectors uint16
Cur_capacity0 uint16
Cur_capacity1 uint16
Multsect uint8
Multsect_valid uint8
Lba_capacity uint32
Dma_1word uint16
Dma_mword uint16
Eide_pio_modes uint16
Eide_dma_min uint16
Eide_dma_time uint16
Eide_pio uint16
Eide_pio_iordy uint16
Words69_70 [2]uint16
Words71_74 [4]uint16
Queue_depth uint16
Words76_79 [4]uint16
Major_rev_num uint16
Minor_rev_num uint16
Command_set_1 uint16
Command_set_2 uint16
Cfsse uint16
Cfs_enable_1 uint16
Cfs_enable_2 uint16
Csf_default uint16
Dma_ultra uint16
Trseuc uint16
TrsEuc uint16
CurAPMvalues uint16
Mprc uint16
Hw_config uint16
Acoustic uint16
Msrqs uint16
Sxfert uint16
Sal uint16
Spg uint32
Lba_capacity_2 uint64
Words104_125 [22]uint16
Last_lun uint16
Word127 uint16
Dlf uint16
Csfo uint16
Words130_155 [26]uint16
Word156 uint16
Words157_159 [3]uint16
Cfa_power uint16
Words161_175 [15]uint16
Words176_205 [30]uint16
Words206_254 [49]uint16
Integrity_word uint16
}
type Statfs_t struct {
Type int64
Bsize int64
Blocks uint64
Bfree uint64
Bavail uint64
Files uint64
Ffree uint64
Fsid Fsid
Namelen int64
Frsize int64
Flags int64
Spare [4]int64
}
const (
ST_MANDLOCK = 0x40
ST_NOATIME = 0x400
ST_NODEV = 0x4
ST_NODIRATIME = 0x800
ST_NOEXEC = 0x8
ST_NOSUID = 0x2
ST_RDONLY = 0x1
ST_RELATIME = 0x1000
ST_SYNCHRONOUS = 0x10
)
type TpacketHdr struct {
Status uint64
Len uint32
Snaplen uint32
Mac uint16
Net uint16
Sec uint32
Usec uint32
_ [4]byte
}
type Tpacket2Hdr struct {
Status uint32
Len uint32
Snaplen uint32
Mac uint16
Net uint16
Sec uint32
Nsec uint32
Vlan_tci uint16
Vlan_tpid uint16
_ [4]uint8
}
type Tpacket3Hdr struct {
Next_offset uint32
Sec uint32
Nsec uint32
Snaplen uint32
Len uint32
Status uint32
Mac uint16
Net uint16
Hv1 TpacketHdrVariant1
_ [8]uint8
}
type TpacketHdrVariant1 struct {
Rxhash uint32
Vlan_tci uint32
Vlan_tpid uint16
_ uint16
}
type TpacketBlockDesc struct {
Version uint32
To_priv uint32
Hdr [40]byte
}
type TpacketBDTS struct {
Sec uint32
Usec uint32
}
type TpacketHdrV1 struct {
Block_status uint32
Num_pkts uint32
Offset_to_first_pkt uint32
Blk_len uint32
Seq_num uint64
Ts_first_pkt TpacketBDTS
Ts_last_pkt TpacketBDTS
}
type TpacketReq struct {
Block_size uint32
Block_nr uint32
Frame_size uint32
Frame_nr uint32
}
type TpacketReq3 struct {
Block_size uint32
Block_nr uint32
Frame_size uint32
Frame_nr uint32
Retire_blk_tov uint32
Sizeof_priv uint32
Feature_req_word uint32
}
type TpacketStats struct {
Packets uint32
Drops uint32
}
type TpacketStatsV3 struct {
Packets uint32
Drops uint32
Freeze_q_cnt uint32
}
type TpacketAuxdata struct {
Status uint32
Len uint32
Snaplen uint32
Mac uint16
Net uint16
Vlan_tci uint16
Vlan_tpid uint16
}
const (
TPACKET_V1 = 0x0
TPACKET_V2 = 0x1
TPACKET_V3 = 0x2
)
const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
SizeofTpacketStats = 0x8
SizeofTpacketStatsV3 = 0xc
)
const (
NF_INET_PRE_ROUTING = 0x0
NF_INET_LOCAL_IN = 0x1
NF_INET_FORWARD = 0x2
NF_INET_LOCAL_OUT = 0x3
NF_INET_POST_ROUTING = 0x4
NF_INET_NUMHOOKS = 0x5
)
const (
NF_NETDEV_INGRESS = 0x0
NF_NETDEV_NUMHOOKS = 0x1
)
const (
NFPROTO_UNSPEC = 0x0
NFPROTO_INET = 0x1
NFPROTO_IPV4 = 0x2
NFPROTO_ARP = 0x3
NFPROTO_NETDEV = 0x5
NFPROTO_BRIDGE = 0x7
NFPROTO_IPV6 = 0xa
NFPROTO_DECNET = 0xc
NFPROTO_NUMPROTO = 0xd
)
type Nfgenmsg struct {
Nfgen_family uint8
Version uint8
Res_id uint16
}
const (
NFNL_BATCH_UNSPEC = 0x0
NFNL_BATCH_GENID = 0x1
)
const (
NFT_REG_VERDICT = 0x0
NFT_REG_1 = 0x1
NFT_REG_2 = 0x2
NFT_REG_3 = 0x3
NFT_REG_4 = 0x4
NFT_REG32_00 = 0x8
NFT_REG32_01 = 0x9
NFT_REG32_02 = 0xa
NFT_REG32_03 = 0xb
NFT_REG32_04 = 0xc
NFT_REG32_05 = 0xd
NFT_REG32_06 = 0xe
NFT_REG32_07 = 0xf
NFT_REG32_08 = 0x10
NFT_REG32_09 = 0x11
NFT_REG32_10 = 0x12
NFT_REG32_11 = 0x13
NFT_REG32_12 = 0x14
NFT_REG32_13 = 0x15
NFT_REG32_14 = 0x16
NFT_REG32_15 = 0x17
NFT_CONTINUE = -0x1
NFT_BREAK = -0x2
NFT_JUMP = -0x3
NFT_GOTO = -0x4
NFT_RETURN = -0x5
NFT_MSG_NEWTABLE = 0x0
NFT_MSG_GETTABLE = 0x1
NFT_MSG_DELTABLE = 0x2
NFT_MSG_NEWCHAIN = 0x3
NFT_MSG_GETCHAIN = 0x4
NFT_MSG_DELCHAIN = 0x5
NFT_MSG_NEWRULE = 0x6
NFT_MSG_GETRULE = 0x7
NFT_MSG_DELRULE = 0x8
NFT_MSG_NEWSET = 0x9
NFT_MSG_GETSET = 0xa
NFT_MSG_DELSET = 0xb
NFT_MSG_NEWSETELEM = 0xc
NFT_MSG_GETSETELEM = 0xd
NFT_MSG_DELSETELEM = 0xe
NFT_MSG_NEWGEN = 0xf
NFT_MSG_GETGEN = 0x10
NFT_MSG_TRACE = 0x11
NFT_MSG_NEWOBJ = 0x12
NFT_MSG_GETOBJ = 0x13
NFT_MSG_DELOBJ = 0x14
NFT_MSG_GETOBJ_RESET = 0x15
NFT_MSG_MAX = 0x19
NFTA_LIST_UNPEC = 0x0
NFTA_LIST_ELEM = 0x1
NFTA_HOOK_UNSPEC = 0x0
NFTA_HOOK_HOOKNUM = 0x1
NFTA_HOOK_PRIORITY = 0x2
NFTA_HOOK_DEV = 0x3
NFT_TABLE_F_DORMANT = 0x1
NFTA_TABLE_UNSPEC = 0x0
NFTA_TABLE_NAME = 0x1
NFTA_TABLE_FLAGS = 0x2
NFTA_TABLE_USE = 0x3
NFTA_CHAIN_UNSPEC = 0x0
NFTA_CHAIN_TABLE = 0x1
NFTA_CHAIN_HANDLE = 0x2
NFTA_CHAIN_NAME = 0x3
NFTA_CHAIN_HOOK = 0x4
NFTA_CHAIN_POLICY = 0x5
NFTA_CHAIN_USE = 0x6
NFTA_CHAIN_TYPE = 0x7
NFTA_CHAIN_COUNTERS = 0x8
NFTA_CHAIN_PAD = 0x9
NFTA_RULE_UNSPEC = 0x0
NFTA_RULE_TABLE = 0x1
NFTA_RULE_CHAIN = 0x2
NFTA_RULE_HANDLE = 0x3
NFTA_RULE_EXPRESSIONS = 0x4
NFTA_RULE_COMPAT = 0x5
NFTA_RULE_POSITION = 0x6
NFTA_RULE_USERDATA = 0x7
NFTA_RULE_PAD = 0x8
NFTA_RULE_ID = 0x9
NFT_RULE_COMPAT_F_INV = 0x2
NFT_RULE_COMPAT_F_MASK = 0x2
NFTA_RULE_COMPAT_UNSPEC = 0x0
NFTA_RULE_COMPAT_PROTO = 0x1
NFTA_RULE_COMPAT_FLAGS = 0x2
NFT_SET_ANONYMOUS = 0x1
NFT_SET_CONSTANT = 0x2
NFT_SET_INTERVAL = 0x4
NFT_SET_MAP = 0x8
NFT_SET_TIMEOUT = 0x10
NFT_SET_EVAL = 0x20
NFT_SET_OBJECT = 0x40
NFT_SET_POL_PERFORMANCE = 0x0
NFT_SET_POL_MEMORY = 0x1
NFTA_SET_DESC_UNSPEC = 0x0
NFTA_SET_DESC_SIZE = 0x1
NFTA_SET_UNSPEC = 0x0
NFTA_SET_TABLE = 0x1
NFTA_SET_NAME = 0x2
NFTA_SET_FLAGS = 0x3
NFTA_SET_KEY_TYPE = 0x4
NFTA_SET_KEY_LEN = 0x5
NFTA_SET_DATA_TYPE = 0x6
NFTA_SET_DATA_LEN = 0x7
NFTA_SET_POLICY = 0x8
NFTA_SET_DESC = 0x9
NFTA_SET_ID = 0xa
NFTA_SET_TIMEOUT = 0xb
NFTA_SET_GC_INTERVAL = 0xc
NFTA_SET_USERDATA = 0xd
NFTA_SET_PAD = 0xe
NFTA_SET_OBJ_TYPE = 0xf
NFT_SET_ELEM_INTERVAL_END = 0x1
NFTA_SET_ELEM_UNSPEC = 0x0
NFTA_SET_ELEM_KEY = 0x1
NFTA_SET_ELEM_DATA = 0x2
NFTA_SET_ELEM_FLAGS = 0x3
NFTA_SET_ELEM_TIMEOUT = 0x4
NFTA_SET_ELEM_EXPIRATION = 0x5
NFTA_SET_ELEM_USERDATA = 0x6
NFTA_SET_ELEM_EXPR = 0x7
NFTA_SET_ELEM_PAD = 0x8
NFTA_SET_ELEM_OBJREF = 0x9
NFTA_SET_ELEM_LIST_UNSPEC = 0x0
NFTA_SET_ELEM_LIST_TABLE = 0x1
NFTA_SET_ELEM_LIST_SET = 0x2
NFTA_SET_ELEM_LIST_ELEMENTS = 0x3
NFTA_SET_ELEM_LIST_SET_ID = 0x4
NFT_DATA_VALUE = 0x0
NFT_DATA_VERDICT = 0xffffff00
NFTA_DATA_UNSPEC = 0x0
NFTA_DATA_VALUE = 0x1
NFTA_DATA_VERDICT = 0x2
NFTA_VERDICT_UNSPEC = 0x0
NFTA_VERDICT_CODE = 0x1
NFTA_VERDICT_CHAIN = 0x2
NFTA_EXPR_UNSPEC = 0x0
NFTA_EXPR_NAME = 0x1
NFTA_EXPR_DATA = 0x2
NFTA_IMMEDIATE_UNSPEC = 0x0
NFTA_IMMEDIATE_DREG = 0x1
NFTA_IMMEDIATE_DATA = 0x2
NFTA_BITWISE_UNSPEC = 0x0
NFTA_BITWISE_SREG = 0x1
NFTA_BITWISE_DREG = 0x2
NFTA_BITWISE_LEN = 0x3
NFTA_BITWISE_MASK = 0x4
NFTA_BITWISE_XOR = 0x5
NFT_BYTEORDER_NTOH = 0x0
NFT_BYTEORDER_HTON = 0x1
NFTA_BYTEORDER_UNSPEC = 0x0
NFTA_BYTEORDER_SREG = 0x1
NFTA_BYTEORDER_DREG = 0x2
NFTA_BYTEORDER_OP = 0x3
NFTA_BYTEORDER_LEN = 0x4
NFTA_BYTEORDER_SIZE = 0x5
NFT_CMP_EQ = 0x0
NFT_CMP_NEQ = 0x1
NFT_CMP_LT = 0x2
NFT_CMP_LTE = 0x3
NFT_CMP_GT = 0x4
NFT_CMP_GTE = 0x5
NFTA_CMP_UNSPEC = 0x0
NFTA_CMP_SREG = 0x1
NFTA_CMP_OP = 0x2
NFTA_CMP_DATA = 0x3
NFT_RANGE_EQ = 0x0
NFT_RANGE_NEQ = 0x1
NFTA_RANGE_UNSPEC = 0x0
NFTA_RANGE_SREG = 0x1
NFTA_RANGE_OP = 0x2
NFTA_RANGE_FROM_DATA = 0x3
NFTA_RANGE_TO_DATA = 0x4
NFT_LOOKUP_F_INV = 0x1
NFTA_LOOKUP_UNSPEC = 0x0
NFTA_LOOKUP_SET = 0x1
NFTA_LOOKUP_SREG = 0x2
NFTA_LOOKUP_DREG = 0x3
NFTA_LOOKUP_SET_ID = 0x4
NFTA_LOOKUP_FLAGS = 0x5
NFT_DYNSET_OP_ADD = 0x0
NFT_DYNSET_OP_UPDATE = 0x1
NFT_DYNSET_F_INV = 0x1
NFTA_DYNSET_UNSPEC = 0x0
NFTA_DYNSET_SET_NAME = 0x1
NFTA_DYNSET_SET_ID = 0x2
NFTA_DYNSET_OP = 0x3
NFTA_DYNSET_SREG_KEY = 0x4
NFTA_DYNSET_SREG_DATA = 0x5
NFTA_DYNSET_TIMEOUT = 0x6
NFTA_DYNSET_EXPR = 0x7
NFTA_DYNSET_PAD = 0x8
NFTA_DYNSET_FLAGS = 0x9
NFT_PAYLOAD_LL_HEADER = 0x0
NFT_PAYLOAD_NETWORK_HEADER = 0x1
NFT_PAYLOAD_TRANSPORT_HEADER = 0x2
NFT_PAYLOAD_CSUM_NONE = 0x0
NFT_PAYLOAD_CSUM_INET = 0x1
NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1
NFTA_PAYLOAD_UNSPEC = 0x0
NFTA_PAYLOAD_DREG = 0x1
NFTA_PAYLOAD_BASE = 0x2
NFTA_PAYLOAD_OFFSET = 0x3
NFTA_PAYLOAD_LEN = 0x4
NFTA_PAYLOAD_SREG = 0x5
NFTA_PAYLOAD_CSUM_TYPE = 0x6
NFTA_PAYLOAD_CSUM_OFFSET = 0x7
NFTA_PAYLOAD_CSUM_FLAGS = 0x8
NFT_EXTHDR_F_PRESENT = 0x1
NFT_EXTHDR_OP_IPV6 = 0x0
NFT_EXTHDR_OP_TCPOPT = 0x1
NFTA_EXTHDR_UNSPEC = 0x0
NFTA_EXTHDR_DREG = 0x1
NFTA_EXTHDR_TYPE = 0x2
NFTA_EXTHDR_OFFSET = 0x3
NFTA_EXTHDR_LEN = 0x4
NFTA_EXTHDR_FLAGS = 0x5
NFTA_EXTHDR_OP = 0x6
NFTA_EXTHDR_SREG = 0x7
NFT_META_LEN = 0x0
NFT_META_PROTOCOL = 0x1
NFT_META_PRIORITY = 0x2
NFT_META_MARK = 0x3
NFT_META_IIF = 0x4
NFT_META_OIF = 0x5
NFT_META_IIFNAME = 0x6
NFT_META_OIFNAME = 0x7
NFT_META_IIFTYPE = 0x8
NFT_META_OIFTYPE = 0x9
NFT_META_SKUID = 0xa
NFT_META_SKGID = 0xb
NFT_META_NFTRACE = 0xc
NFT_META_RTCLASSID = 0xd
NFT_META_SECMARK = 0xe
NFT_META_NFPROTO = 0xf
NFT_META_L4PROTO = 0x10
NFT_META_BRI_IIFNAME = 0x11
NFT_META_BRI_OIFNAME = 0x12
NFT_META_PKTTYPE = 0x13
NFT_META_CPU = 0x14
NFT_META_IIFGROUP = 0x15
NFT_META_OIFGROUP = 0x16
NFT_META_CGROUP = 0x17
NFT_META_PRANDOM = 0x18
NFT_RT_CLASSID = 0x0
NFT_RT_NEXTHOP4 = 0x1
NFT_RT_NEXTHOP6 = 0x2
NFT_RT_TCPMSS = 0x3
NFT_HASH_JENKINS = 0x0
NFT_HASH_SYM = 0x1
NFTA_HASH_UNSPEC = 0x0
NFTA_HASH_SREG = 0x1
NFTA_HASH_DREG = 0x2
NFTA_HASH_LEN = 0x3
NFTA_HASH_MODULUS = 0x4
NFTA_HASH_SEED = 0x5
NFTA_HASH_OFFSET = 0x6
NFTA_HASH_TYPE = 0x7
NFTA_META_UNSPEC = 0x0
NFTA_META_DREG = 0x1
NFTA_META_KEY = 0x2
NFTA_META_SREG = 0x3
NFTA_RT_UNSPEC = 0x0
NFTA_RT_DREG = 0x1
NFTA_RT_KEY = 0x2
NFT_CT_STATE = 0x0
NFT_CT_DIRECTION = 0x1
NFT_CT_STATUS = 0x2
NFT_CT_MARK = 0x3
NFT_CT_SECMARK = 0x4
NFT_CT_EXPIRATION = 0x5
NFT_CT_HELPER = 0x6
NFT_CT_L3PROTOCOL = 0x7
NFT_CT_SRC = 0x8
NFT_CT_DST = 0x9
NFT_CT_PROTOCOL = 0xa
NFT_CT_PROTO_SRC = 0xb
NFT_CT_PROTO_DST = 0xc
NFT_CT_LABELS = 0xd
NFT_CT_PKTS = 0xe
NFT_CT_BYTES = 0xf
NFT_CT_AVGPKT = 0x10
NFT_CT_ZONE = 0x11
NFT_CT_EVENTMASK = 0x12
NFTA_CT_UNSPEC = 0x0
NFTA_CT_DREG = 0x1
NFTA_CT_KEY = 0x2
NFTA_CT_DIRECTION = 0x3
NFTA_CT_SREG = 0x4
NFT_LIMIT_PKTS = 0x0
NFT_LIMIT_PKT_BYTES = 0x1
NFT_LIMIT_F_INV = 0x1
NFTA_LIMIT_UNSPEC = 0x0
NFTA_LIMIT_RATE = 0x1
NFTA_LIMIT_UNIT = 0x2
NFTA_LIMIT_BURST = 0x3
NFTA_LIMIT_TYPE = 0x4
NFTA_LIMIT_FLAGS = 0x5
NFTA_LIMIT_PAD = 0x6
NFTA_COUNTER_UNSPEC = 0x0
NFTA_COUNTER_BYTES = 0x1
NFTA_COUNTER_PACKETS = 0x2
NFTA_COUNTER_PAD = 0x3
NFTA_LOG_UNSPEC = 0x0
NFTA_LOG_GROUP = 0x1
NFTA_LOG_PREFIX = 0x2
NFTA_LOG_SNAPLEN = 0x3
NFTA_LOG_QTHRESHOLD = 0x4
NFTA_LOG_LEVEL = 0x5
NFTA_LOG_FLAGS = 0x6
NFTA_QUEUE_UNSPEC = 0x0
NFTA_QUEUE_NUM = 0x1
NFTA_QUEUE_TOTAL = 0x2
NFTA_QUEUE_FLAGS = 0x3
NFTA_QUEUE_SREG_QNUM = 0x4
NFT_QUOTA_F_INV = 0x1
NFT_QUOTA_F_DEPLETED = 0x2
NFTA_QUOTA_UNSPEC = 0x0
NFTA_QUOTA_BYTES = 0x1
NFTA_QUOTA_FLAGS = 0x2
NFTA_QUOTA_PAD = 0x3
NFTA_QUOTA_CONSUMED = 0x4
NFT_REJECT_ICMP_UNREACH = 0x0
NFT_REJECT_TCP_RST = 0x1
NFT_REJECT_ICMPX_UNREACH = 0x2
NFT_REJECT_ICMPX_NO_ROUTE = 0x0
NFT_REJECT_ICMPX_PORT_UNREACH = 0x1
NFT_REJECT_ICMPX_HOST_UNREACH = 0x2
NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3
NFTA_REJECT_UNSPEC = 0x0
NFTA_REJECT_TYPE = 0x1
NFTA_REJECT_ICMP_CODE = 0x2
NFT_NAT_SNAT = 0x0
NFT_NAT_DNAT = 0x1
NFTA_NAT_UNSPEC = 0x0
NFTA_NAT_TYPE = 0x1
NFTA_NAT_FAMILY = 0x2
NFTA_NAT_REG_ADDR_MIN = 0x3
NFTA_NAT_REG_ADDR_MAX = 0x4
NFTA_NAT_REG_PROTO_MIN = 0x5
NFTA_NAT_REG_PROTO_MAX = 0x6
NFTA_NAT_FLAGS = 0x7
NFTA_MASQ_UNSPEC = 0x0
NFTA_MASQ_FLAGS = 0x1
NFTA_MASQ_REG_PROTO_MIN = 0x2
NFTA_MASQ_REG_PROTO_MAX = 0x3
NFTA_REDIR_UNSPEC = 0x0
NFTA_REDIR_REG_PROTO_MIN = 0x1
NFTA_REDIR_REG_PROTO_MAX = 0x2
NFTA_REDIR_FLAGS = 0x3
NFTA_DUP_UNSPEC = 0x0
NFTA_DUP_SREG_ADDR = 0x1
NFTA_DUP_SREG_DEV = 0x2
NFTA_FWD_UNSPEC = 0x0
NFTA_FWD_SREG_DEV = 0x1
NFTA_OBJREF_UNSPEC = 0x0
NFTA_OBJREF_IMM_TYPE = 0x1
NFTA_OBJREF_IMM_NAME = 0x2
NFTA_OBJREF_SET_SREG = 0x3
NFTA_OBJREF_SET_NAME = 0x4
NFTA_OBJREF_SET_ID = 0x5
NFTA_GEN_UNSPEC = 0x0
NFTA_GEN_ID = 0x1
NFTA_GEN_PROC_PID = 0x2
NFTA_GEN_PROC_NAME = 0x3
NFTA_FIB_UNSPEC = 0x0
NFTA_FIB_DREG = 0x1
NFTA_FIB_RESULT = 0x2
NFTA_FIB_FLAGS = 0x3
NFT_FIB_RESULT_UNSPEC = 0x0
NFT_FIB_RESULT_OIF = 0x1
NFT_FIB_RESULT_OIFNAME = 0x2
NFT_FIB_RESULT_ADDRTYPE = 0x3
NFTA_FIB_F_SADDR = 0x1
NFTA_FIB_F_DADDR = 0x2
NFTA_FIB_F_MARK = 0x4
NFTA_FIB_F_IIF = 0x8
NFTA_FIB_F_OIF = 0x10
NFTA_FIB_F_PRESENT = 0x20
NFTA_CT_HELPER_UNSPEC = 0x0
NFTA_CT_HELPER_NAME = 0x1
NFTA_CT_HELPER_L3PROTO = 0x2
NFTA_CT_HELPER_L4PROTO = 0x3
NFTA_OBJ_UNSPEC = 0x0
NFTA_OBJ_TABLE = 0x1
NFTA_OBJ_NAME = 0x2
NFTA_OBJ_TYPE = 0x3
NFTA_OBJ_DATA = 0x4
NFTA_OBJ_USE = 0x5
NFTA_TRACE_UNSPEC = 0x0
NFTA_TRACE_TABLE = 0x1
NFTA_TRACE_CHAIN = 0x2
NFTA_TRACE_RULE_HANDLE = 0x3
NFTA_TRACE_TYPE = 0x4
NFTA_TRACE_VERDICT = 0x5
NFTA_TRACE_ID = 0x6
NFTA_TRACE_LL_HEADER = 0x7
NFTA_TRACE_NETWORK_HEADER = 0x8
NFTA_TRACE_TRANSPORT_HEADER = 0x9
NFTA_TRACE_IIF = 0xa
NFTA_TRACE_IIFTYPE = 0xb
NFTA_TRACE_OIF = 0xc
NFTA_TRACE_OIFTYPE = 0xd
NFTA_TRACE_MARK = 0xe
NFTA_TRACE_NFPROTO = 0xf
NFTA_TRACE_POLICY = 0x10
NFTA_TRACE_PAD = 0x11
NFT_TRACETYPE_UNSPEC = 0x0
NFT_TRACETYPE_POLICY = 0x1
NFT_TRACETYPE_RETURN = 0x2
NFT_TRACETYPE_RULE = 0x3
NFTA_NG_UNSPEC = 0x0
NFTA_NG_DREG = 0x1
NFTA_NG_MODULUS = 0x2
NFTA_NG_TYPE = 0x3
NFTA_NG_OFFSET = 0x4
NFT_NG_INCREMENTAL = 0x0
NFT_NG_RANDOM = 0x1
)
type RTCTime struct {
Sec int32
Min int32
Hour int32
Mday int32
Mon int32
Year int32
Wday int32
Yday int32
Isdst int32
}
type RTCWkAlrm struct {
Enabled uint8
Pending uint8
Time RTCTime
}
type RTCPLLInfo struct {
Ctrl int32
Value int32
Max int32
Min int32
Posmult int32
Negmult int32
Clock int64
}
type BlkpgIoctlArg struct {
Op int32
Flags int32
Datalen int32
Data *byte
}
type BlkpgPartition struct {
Start int64
Length int64
Pno int32
Devname [64]uint8
Volname [64]uint8
_ [4]byte
}
const (
BLKPG = 0x1269
BLKPG_ADD_PARTITION = 0x1
BLKPG_DEL_PARTITION = 0x2
BLKPG_RESIZE_PARTITION = 0x3
)
const (
NETNSA_NONE = 0x0
NETNSA_NSID = 0x1
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
type XDPRingOffset struct {
Producer uint64
Consumer uint64
Desc uint64
}
type XDPMmapOffsets struct {
Rx XDPRingOffset
Tx XDPRingOffset
Fr XDPRingOffset
Cr XDPRingOffset
}
type XDPUmemReg struct {
Addr uint64
Len uint64
Size uint32
Headroom uint32
}
type XDPStatistics struct {
Rx_dropped uint64
Rx_invalid_descs uint64
Tx_invalid_descs uint64
}
type XDPDesc struct {
Addr uint64
Len uint32
Options uint32
}
const (
NCSI_CMD_UNSPEC = 0x0
NCSI_CMD_PKG_INFO = 0x1
NCSI_CMD_SET_INTERFACE = 0x2
NCSI_CMD_CLEAR_INTERFACE = 0x3
NCSI_ATTR_UNSPEC = 0x0
NCSI_ATTR_IFINDEX = 0x1
NCSI_ATTR_PACKAGE_LIST = 0x2
NCSI_ATTR_PACKAGE_ID = 0x3
NCSI_ATTR_CHANNEL_ID = 0x4
NCSI_PKG_ATTR_UNSPEC = 0x0
NCSI_PKG_ATTR = 0x1
NCSI_PKG_ATTR_ID = 0x2
NCSI_PKG_ATTR_FORCED = 0x3
NCSI_PKG_ATTR_CHANNEL_LIST = 0x4
NCSI_CHANNEL_ATTR_UNSPEC = 0x0
NCSI_CHANNEL_ATTR = 0x1
NCSI_CHANNEL_ATTR_ID = 0x2
NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3
NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4
NCSI_CHANNEL_ATTR_VERSION_STR = 0x5
NCSI_CHANNEL_ATTR_LINK_STATE = 0x6
NCSI_CHANNEL_ATTR_ACTIVE = 0x7
NCSI_CHANNEL_ATTR_FORCED = 0x8
NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
type ScmTimestamping struct {
Ts [3]Timespec
}
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
SOF_TIMESTAMPING_RX_HARDWARE = 0x4
SOF_TIMESTAMPING_RX_SOFTWARE = 0x8
SOF_TIMESTAMPING_SOFTWARE = 0x10
SOF_TIMESTAMPING_SYS_HARDWARE = 0x20
SOF_TIMESTAMPING_RAW_HARDWARE = 0x40
SOF_TIMESTAMPING_OPT_ID = 0x80
SOF_TIMESTAMPING_TX_SCHED = 0x100
SOF_TIMESTAMPING_TX_ACK = 0x200
SOF_TIMESTAMPING_OPT_CMSG = 0x400
SOF_TIMESTAMPING_OPT_TSONLY = 0x800
SOF_TIMESTAMPING_OPT_STATS = 0x1000
SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000
SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
SCM_TSTAMP_SND = 0x0
SCM_TSTAMP_SCHED = 0x1
SCM_TSTAMP_ACK = 0x2
)
type SockExtendedErr struct {
Errno uint32
Origin uint8
Type uint8
Code uint8
Pad uint8
Info uint32
Data uint32
}
type FanotifyEventMetadata struct {
Event_len uint32
Vers uint8
Reserved uint8
Metadata_len uint16
Mask uint64
Fd int32
Pid int32
}
type FanotifyResponse struct {
Fd int32
Response uint32
}
const (
CRYPTO_MSG_BASE = 0x10
CRYPTO_MSG_NEWALG = 0x10
CRYPTO_MSG_DELALG = 0x11
CRYPTO_MSG_UPDATEALG = 0x12
CRYPTO_MSG_GETALG = 0x13
CRYPTO_MSG_DELRNG = 0x14
CRYPTO_MSG_GETSTAT = 0x15
)
const (
CRYPTOCFGA_UNSPEC = 0x0
CRYPTOCFGA_PRIORITY_VAL = 0x1
CRYPTOCFGA_REPORT_LARVAL = 0x2
CRYPTOCFGA_REPORT_HASH = 0x3
CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
CRYPTOCFGA_REPORT_AEAD = 0x5
CRYPTOCFGA_REPORT_COMPRESS = 0x6
CRYPTOCFGA_REPORT_RNG = 0x7
CRYPTOCFGA_REPORT_CIPHER = 0x8
CRYPTOCFGA_REPORT_AKCIPHER = 0x9
CRYPTOCFGA_REPORT_KPP = 0xa
CRYPTOCFGA_REPORT_ACOMP = 0xb
CRYPTOCFGA_STAT_LARVAL = 0xc
CRYPTOCFGA_STAT_HASH = 0xd
CRYPTOCFGA_STAT_BLKCIPHER = 0xe
CRYPTOCFGA_STAT_AEAD = 0xf
CRYPTOCFGA_STAT_COMPRESS = 0x10
CRYPTOCFGA_STAT_RNG = 0x11
CRYPTOCFGA_STAT_CIPHER = 0x12
CRYPTOCFGA_STAT_AKCIPHER = 0x13
CRYPTOCFGA_STAT_KPP = 0x14
CRYPTOCFGA_STAT_ACOMP = 0x15
)
type CryptoUserAlg struct {
Name [64]int8
Driver_name [64]int8
Module_name [64]int8
Type uint32
Mask uint32
Refcnt uint32
Flags uint32
}
type CryptoStatAEAD struct {
Type [64]int8
Encrypt_cnt uint64
Encrypt_tlen uint64
Decrypt_cnt uint64
Decrypt_tlen uint64
Err_cnt uint64
}
type CryptoStatAKCipher struct {
Type [64]int8
Encrypt_cnt uint64
Encrypt_tlen uint64
Decrypt_cnt uint64
Decrypt_tlen uint64
Verify_cnt uint64
Sign_cnt uint64
Err_cnt uint64
}
type CryptoStatCipher struct {
Type [64]int8
Encrypt_cnt uint64
Encrypt_tlen uint64
Decrypt_cnt uint64
Decrypt_tlen uint64
Err_cnt uint64
}
type CryptoStatCompress struct {
Type [64]int8
Compress_cnt uint64
Compress_tlen uint64
Decompress_cnt uint64
Decompress_tlen uint64
Err_cnt uint64
}
type CryptoStatHash struct {
Type [64]int8
Hash_cnt uint64
Hash_tlen uint64
Err_cnt uint64
}
type CryptoStatKPP struct {
Type [64]int8
Setsecret_cnt uint64
Generate_public_key_cnt uint64
Compute_shared_secret_cnt uint64
Err_cnt uint64
}
type CryptoStatRNG struct {
Type [64]int8
Generate_cnt uint64
Generate_tlen uint64
Seed_cnt uint64
Err_cnt uint64
}
type CryptoStatLarval struct {
Type [64]int8
}
type CryptoReportLarval struct {
Type [64]int8
}
type CryptoReportHash struct {
Type [64]int8
Blocksize uint32
Digestsize uint32
}
type CryptoReportCipher struct {
Type [64]int8
Blocksize uint32
Min_keysize uint32
Max_keysize uint32
}
type CryptoReportBlkCipher struct {
Type [64]int8
Geniv [64]int8
Blocksize uint32
Min_keysize uint32
Max_keysize uint32
Ivsize uint32
}
type CryptoReportAEAD struct {
Type [64]int8
Geniv [64]int8
Blocksize uint32
Maxauthsize uint32
Ivsize uint32
}
type CryptoReportComp struct {
Type [64]int8
}
type CryptoReportRNG struct {
Type [64]int8
Seedsize uint32
}
type CryptoReportAKCipher struct {
Type [64]int8
}
type CryptoReportKPP struct {
Type [64]int8
}
type CryptoReportAcomp struct {
Type [64]int8
}
const (
BPF_REG_0 = 0x0
BPF_REG_1 = 0x1
BPF_REG_2 = 0x2
BPF_REG_3 = 0x3
BPF_REG_4 = 0x4
BPF_REG_5 = 0x5
BPF_REG_6 = 0x6
BPF_REG_7 = 0x7
BPF_REG_8 = 0x8
BPF_REG_9 = 0x9
BPF_REG_10 = 0xa
BPF_MAP_CREATE = 0x0
BPF_MAP_LOOKUP_ELEM = 0x1
BPF_MAP_UPDATE_ELEM = 0x2
BPF_MAP_DELETE_ELEM = 0x3
BPF_MAP_GET_NEXT_KEY = 0x4
BPF_PROG_LOAD = 0x5
BPF_OBJ_PIN = 0x6
BPF_OBJ_GET = 0x7
BPF_PROG_ATTACH = 0x8
BPF_PROG_DETACH = 0x9
BPF_PROG_TEST_RUN = 0xa
BPF_PROG_GET_NEXT_ID = 0xb
BPF_MAP_GET_NEXT_ID = 0xc
BPF_PROG_GET_FD_BY_ID = 0xd
BPF_MAP_GET_FD_BY_ID = 0xe
BPF_OBJ_GET_INFO_BY_FD = 0xf
BPF_PROG_QUERY = 0x10
BPF_RAW_TRACEPOINT_OPEN = 0x11
BPF_BTF_LOAD = 0x12
BPF_BTF_GET_FD_BY_ID = 0x13
BPF_TASK_FD_QUERY = 0x14
BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
BPF_MAP_TYPE_UNSPEC = 0x0
BPF_MAP_TYPE_HASH = 0x1
BPF_MAP_TYPE_ARRAY = 0x2
BPF_MAP_TYPE_PROG_ARRAY = 0x3
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
BPF_MAP_TYPE_PERCPU_HASH = 0x5
BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
BPF_MAP_TYPE_STACK_TRACE = 0x7
BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
BPF_MAP_TYPE_LRU_HASH = 0x9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
BPF_MAP_TYPE_LPM_TRIE = 0xb
BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
BPF_MAP_TYPE_DEVMAP = 0xe
BPF_MAP_TYPE_SOCKMAP = 0xf
BPF_MAP_TYPE_CPUMAP = 0x10
BPF_MAP_TYPE_XSKMAP = 0x11
BPF_MAP_TYPE_SOCKHASH = 0x12
BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
BPF_MAP_TYPE_QUEUE = 0x16
BPF_MAP_TYPE_STACK = 0x17
BPF_PROG_TYPE_UNSPEC = 0x0
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
BPF_PROG_TYPE_KPROBE = 0x2
BPF_PROG_TYPE_SCHED_CLS = 0x3
BPF_PROG_TYPE_SCHED_ACT = 0x4
BPF_PROG_TYPE_TRACEPOINT = 0x5
BPF_PROG_TYPE_XDP = 0x6
BPF_PROG_TYPE_PERF_EVENT = 0x7
BPF_PROG_TYPE_CGROUP_SKB = 0x8
BPF_PROG_TYPE_CGROUP_SOCK = 0x9
BPF_PROG_TYPE_LWT_IN = 0xa
BPF_PROG_TYPE_LWT_OUT = 0xb
BPF_PROG_TYPE_LWT_XMIT = 0xc
BPF_PROG_TYPE_SOCK_OPS = 0xd
BPF_PROG_TYPE_SK_SKB = 0xe
BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
BPF_PROG_TYPE_SK_MSG = 0x10
BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
BPF_PROG_TYPE_LIRC_MODE2 = 0x14
BPF_PROG_TYPE_SK_REUSEPORT = 0x15
BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
BPF_CGROUP_INET_INGRESS = 0x0
BPF_CGROUP_INET_EGRESS = 0x1
BPF_CGROUP_INET_SOCK_CREATE = 0x2
BPF_CGROUP_SOCK_OPS = 0x3
BPF_SK_SKB_STREAM_PARSER = 0x4
BPF_SK_SKB_STREAM_VERDICT = 0x5
BPF_CGROUP_DEVICE = 0x6
BPF_SK_MSG_VERDICT = 0x7
BPF_CGROUP_INET4_BIND = 0x8
BPF_CGROUP_INET6_BIND = 0x9
BPF_CGROUP_INET4_CONNECT = 0xa
BPF_CGROUP_INET6_CONNECT = 0xb
BPF_CGROUP_INET4_POST_BIND = 0xc
BPF_CGROUP_INET6_POST_BIND = 0xd
BPF_CGROUP_UDP4_SENDMSG = 0xe
BPF_CGROUP_UDP6_SENDMSG = 0xf
BPF_LIRC_MODE2 = 0x10
BPF_FLOW_DISSECTOR = 0x11
BPF_STACK_BUILD_ID_EMPTY = 0x0
BPF_STACK_BUILD_ID_VALID = 0x1
BPF_STACK_BUILD_ID_IP = 0x2
BPF_ADJ_ROOM_NET = 0x0
BPF_HDR_START_MAC = 0x0
BPF_HDR_START_NET = 0x1
BPF_LWT_ENCAP_SEG6 = 0x0
BPF_LWT_ENCAP_SEG6_INLINE = 0x1
BPF_OK = 0x0
BPF_DROP = 0x2
BPF_REDIRECT = 0x7
BPF_SOCK_OPS_VOID = 0x0
BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
BPF_SOCK_OPS_RWND_INIT = 0x2
BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
BPF_SOCK_OPS_NEEDS_ECN = 0x6
BPF_SOCK_OPS_BASE_RTT = 0x7
BPF_SOCK_OPS_RTO_CB = 0x8
BPF_SOCK_OPS_RETRANS_CB = 0x9
BPF_SOCK_OPS_STATE_CB = 0xa
BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
BPF_TCP_ESTABLISHED = 0x1
BPF_TCP_SYN_SENT = 0x2
BPF_TCP_SYN_RECV = 0x3
BPF_TCP_FIN_WAIT1 = 0x4
BPF_TCP_FIN_WAIT2 = 0x5
BPF_TCP_TIME_WAIT = 0x6
BPF_TCP_CLOSE = 0x7
BPF_TCP_CLOSE_WAIT = 0x8
BPF_TCP_LAST_ACK = 0x9
BPF_TCP_LISTEN = 0xa
BPF_TCP_CLOSING = 0xb
BPF_TCP_NEW_SYN_RECV = 0xc
BPF_TCP_MAX_STATES = 0xd
BPF_FIB_LKUP_RET_SUCCESS = 0x0
BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
BPF_FIB_LKUP_RET_PROHIBIT = 0x3
BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
BPF_FD_TYPE_TRACEPOINT = 0x1
BPF_FD_TYPE_KPROBE = 0x2
BPF_FD_TYPE_KRETPROBE = 0x3
BPF_FD_TYPE_UPROBE = 0x4
BPF_FD_TYPE_URETPROBE = 0x5
)
| {
"pile_set_name": "Github"
} |
SMITH
WILSON
BROWN
WILLIAMS
TAYLOR
JONES
ANDERSON
THOMPSON
WALKER
LEE
| {
"pile_set_name": "Github"
} |
================
Duktape releases
================
Release information can be found at https://duktape.org/download.html.
Raw release metadata is maintained in https://github.com/svaarala/duktape/tree/master/releases.
| {
"pile_set_name": "Github"
} |
#import "GPUImageParallelCoordinateLineTransformFilter.h"
NSString *const kGPUImageHoughAccumulationVertexShaderString = SHADER_STRING
(
attribute vec4 position;
void main()
{
gl_Position = position;
}
);
#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
NSString *const kGPUImageHoughAccumulationFragmentShaderString = SHADER_STRING
(
const lowp float scalingFactor = 1.0 / 256.0;
void main()
{
gl_FragColor = vec4(0.004, 0.004, 0.004, 1.0);
}
);
// highp - 16-bit, floating point range: -2^62 to 2^62, integer range: -2^16 to 2^16
// NOTE: See below for where I'm tacking on the required extension as a prefix
NSString *const kGPUImageHoughAccumulationFBOReadFragmentShaderString = SHADER_STRING
(
const lowp float scalingFactor = 0.004;
// const lowp float scalingFactor = 0.1;
void main()
{
mediump vec4 fragmentData = gl_LastFragData[0];
fragmentData.r = fragmentData.r + scalingFactor;
fragmentData.g = scalingFactor * floor(fragmentData.r) + fragmentData.g;
fragmentData.b = scalingFactor * floor(fragmentData.g) + fragmentData.b;
fragmentData.a = scalingFactor * floor(fragmentData.b) + fragmentData.a;
fragmentData = fract(fragmentData);
gl_FragColor = vec4(fragmentData.rgb, 1.0);
}
);
#else
NSString *const kGPUImageHoughAccumulationFragmentShaderString = SHADER_STRING
(
const float scalingFactor = 1.0 / 256.0;
void main()
{
gl_FragColor = vec4(0.004, 0.004, 0.004, 1.0);
}
);
NSString *const kGPUImageHoughAccumulationFBOReadFragmentShaderString = SHADER_STRING
(
const float scalingFactor = 1.0 / 256.0;
void main()
{
// gl_FragColor = vec4(scalingFactor, scalingFactor, scalingFactor, 1.0);
gl_FragColor = vec4(0.004, 0.004, 0.004, 1.0);
}
);
#endif
@interface GPUImageParallelCoordinateLineTransformFilter()
// Rendering
- (void)generateLineCoordinates;
@end
@implementation GPUImageParallelCoordinateLineTransformFilter
#pragma mark -
#pragma mark Initialization and teardown
- (id)init;
{
NSString *fragmentShaderToUse = nil;
if ([GPUImageContext deviceSupportsFramebufferReads])
{
fragmentShaderToUse = [NSString stringWithFormat:@"#extension GL_EXT_shader_framebuffer_fetch : require\n %@",kGPUImageHoughAccumulationFBOReadFragmentShaderString];
}
else
{
fragmentShaderToUse = kGPUImageHoughAccumulationFragmentShaderString;
}
if (!(self = [super initWithVertexShaderFromString:kGPUImageHoughAccumulationVertexShaderString fragmentShaderFromString:fragmentShaderToUse]))
{
return nil;
}
return self;
}
// TODO: have this be regenerated on change of image size
- (void)dealloc;
{
free(rawImagePixels);
free(lineCoordinates);
}
- (void)initializeAttributes;
{
[filterProgram addAttribute:@"position"];
}
#pragma mark -
#pragma mark Rendering
#define MAXLINESCALINGFACTOR 4
- (void)generateLineCoordinates;
{
unsigned int imageByteSize = inputTextureSize.width * inputTextureSize.height * 4;
rawImagePixels = (GLubyte *)malloc(imageByteSize);
maxLinePairsToRender = (inputTextureSize.width * inputTextureSize.height) / MAXLINESCALINGFACTOR;
lineCoordinates = calloc(maxLinePairsToRender * 8, sizeof(GLfloat));
}
- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex;
{
if (lineCoordinates == NULL)
{
[self generateLineCoordinates];
}
[self renderToTextureWithVertices:NULL textureCoordinates:NULL];
[self informTargetsAboutNewFrameAtTime:frameTime];
}
- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates;
{
// we need a normal color texture for this filter
NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture format for this filter must be GL_RGBA.");
NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE.");
if (self.preventRendering)
{
[firstInputFramebuffer unlock];
return;
}
// Grab the edge points from the previous frame and create the parallel coordinate lines for them
// This would be a great place to have a working histogram pyramid implementation
[GPUImageContext useImageProcessingContext];
[firstInputFramebuffer activateFramebuffer];
glFinish();
glReadPixels(0, 0, inputTextureSize.width, inputTextureSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels);
CGFloat xAspectMultiplier = 1.0, yAspectMultiplier = 1.0;
// if (inputTextureSize.width > inputTextureSize.height)
// {
// yAspectMultiplier = inputTextureSize.height / inputTextureSize.width;
// }
// else
// {
// xAspectMultiplier = inputTextureSize.width / inputTextureSize.height;
// }
// CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent();
unsigned int imageByteSize = inputTextureSize.width * inputTextureSize.height * 4;
unsigned int imageWidth = inputTextureSize.width * 4;
linePairsToRender = 0;
unsigned int currentByte = 0;
unsigned int lineStorageIndex = 0;
unsigned int maxLineStorageIndex = maxLinePairsToRender * 8 - 8;
GLfloat minY = 100, maxY = -100, minX = 100, maxX = -100;
while (currentByte < imageByteSize)
{
GLubyte colorByte = rawImagePixels[currentByte];
if (colorByte > 0)
{
unsigned int xCoordinate = currentByte % imageWidth;
unsigned int yCoordinate = currentByte / imageWidth;
CGFloat normalizedXCoordinate = (-1.0 + 2.0 * (CGFloat)(xCoordinate / 4) / inputTextureSize.width) * xAspectMultiplier;
CGFloat normalizedYCoordinate = (-1.0 + 2.0 * (CGFloat)(yCoordinate) / inputTextureSize.height) * yAspectMultiplier;
minY = MIN(minY, normalizedYCoordinate);
maxY = MAX(maxY, normalizedYCoordinate);
minX = MIN(minX, normalizedXCoordinate);
maxX = MAX(maxX, normalizedXCoordinate);
// NSLog(@"Parallel line coordinates: (%f, %f) - (%f, %f) - (%f, %f)", -1.0, -normalizedYCoordinate, 0.0, normalizedXCoordinate, 1.0, normalizedYCoordinate);
// T space coordinates, (-d, -y) to (0, x)
lineCoordinates[lineStorageIndex++] = -1.0;
lineCoordinates[lineStorageIndex++] = -normalizedYCoordinate;
lineCoordinates[lineStorageIndex++] = 0.0;
lineCoordinates[lineStorageIndex++] = normalizedXCoordinate;
// S space coordinates, (0, x) to (d, y)
lineCoordinates[lineStorageIndex++] = 0.0;
lineCoordinates[lineStorageIndex++] = normalizedXCoordinate;
lineCoordinates[lineStorageIndex++] = 1.0;
lineCoordinates[lineStorageIndex++] = normalizedYCoordinate;
linePairsToRender++;
linePairsToRender = MIN(linePairsToRender, maxLinePairsToRender);
lineStorageIndex = MIN(lineStorageIndex, maxLineStorageIndex);
}
currentByte +=8;
}
// NSLog(@"Line pairs to render: %d out of max: %d", linePairsToRender, maxLinePairsToRender);
// CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime);
// NSLog(@"Line generation processing time : %f ms", 1000.0 * currentFrameTime);
outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO];
[outputFramebuffer activateFramebuffer];
if (usingNextFrameForImageCapture)
{
[outputFramebuffer lock];
}
[GPUImageContext setActiveShaderProgram:filterProgram];
[self setUniformsForProgramAtIndex:0];
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
if (![GPUImageContext deviceSupportsFramebufferReads])
{
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
glEnable(GL_BLEND);
}
else
{
}
glLineWidth(1);
glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, lineCoordinates);
glDrawArrays(GL_LINES, 0, (linePairsToRender * 4));
if (![GPUImageContext deviceSupportsFramebufferReads])
{
glDisable(GL_BLEND);
}
[firstInputFramebuffer unlock];
if (usingNextFrameForImageCapture)
{
dispatch_semaphore_signal(imageCaptureSemaphore);
}
}
@end
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
"""
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['rts','trafficscript']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| {
"pile_set_name": "Github"
} |
# Совместимость типов на основе вида типизации
Каждый раз при присваивании значения компилятор подвергает его проверке на совместимость с ожидаемым типом. Компилятор словно секюрити стоящий на фейсконтроле ночного клуба решает пропускать или нет то, или иное значение. Подобные решения принимаются на основании правил, которые зависят не только клуба (языка программирования), но также вида проводимых мероприятий. Понять логику на которую опирается компилятор при определении совместимости нам поможет текущая и следующая глава. Но сначала давайте выведем правило самой совместимости.
## Совместимость типов (Types Compatibility)
_Совместимость типов_ — это совокупность правил, на основе которых программа, анализируя два типа, принимает решение о возможности одного типа заменить другой таким образом, чтобы замена не нарушила выполнение программы. Простыми словами, совместимость типов — это механизм, по которому происходит сопоставление типов.
Существует несколько основных механизмов и выбор конкретного зависит от случая, при котором возникает потребность сопоставления типов данных. Один из таких механизмов состоит из совокупности правил составляющих такое понятие, как типизация. Из существующего множества правил можно выделить несколько групп, которые образуют три вида типизации — _номинативную_, _структурную_ и _утиную_.
Чтобы различия между ними были более очевидными, все они будут рассматриваться на одном примере, диаграмма которого показана ниже.

## Номинативная Типизация (nominative typing)
_Номинативная типизация_ (nominative typing) устанавливает совместимость типов основываясь на идентификаторах типов (ссылках). Простыми словами, при проверке на совместимость компилятор проверяет иерархию типов на признаки наследования и реализацию интерфейсов. То есть, тип `B` будет совместим с типом `A` только тогда, когда он является его предком (`extends`). Кроме того, тип `B` будет совместим с интерфейсом `IA` только в том случае, если он или один из его предков реализует его явно (`implements`).

Как можно понять по изображению выше, при проверке на совместимость типа `Bird` с типом `Animal` компилятор обходит дерево в поисках наличия ссылки на тип `Animal` и обнаружив её, приходит к выводу, что типы совместимы. Тот же самый процесс требуется для установления совместимости типа `Bird` с типом интерфейса `INameable`. Полная картина совместимости изображена на диаграмме ниже.

Номинативная типизация присуща исключительно статически типизированным языкам.
К языкам с номинативной типизацией относятся _Java_, _C#_ и другие.
## Структурная Типизация (structural typing)
_Структурная типизация_ — это принцип, определяющий совместимость типов основываясь не на иерархии наследования или явной реализации интерфейсов, а на их описании.

Компилятор считает типы совместимыми, если сопоставляемый тип имеет все признаки типа, с которым сопоставляется. Чтобы быть совместимым, сопоставляемый тип должен иметь те же ключи с теми же (идентичными или совместимыми) типами что и тип, с которым происходит сопоставление. Полная картина совместимости в структурной типизации изображена на диаграмме ниже.

Структурная типизация присуща исключительно языкам с явной типизацией (глава [“Экскурс в типизацию - Сильная и слабая типизация”](../007.(Экскурс%20в%20типизацию)%20Сильная%20и%20слабая%20типизация)).
К языкам со структурной типизацией относятся _TypeScript_, _Scala_ и им подобные.
## Утиная Типизация (Duck typing)
_Утиная типизация_, как и в случае со структурной типизацией — это принцип, определяющий совместимость типов основываясь не на иерархии наследования или явной реализации интерфейсов, а на их описании. Утиная типизация ничем не отличается от структурной, за исключением того, что присуща лишь языкам с _динамическим связыванием_ (динамическая типизация).
Термин «Утиная типизация» произошёл от английского выражения _duck test_, который в оригинале звучит как -
_Если это выглядит как утка, плавает как утка и крякает как утка, то это, вероятно, и есть утка_.
Так как утиная типизация не отличается от структурной, то в качестве примеров совместимости можно воспользоваться диаграммой из предыдущего раздела, посвященного структурной типизации.
К языкам с утиной типизацией относятся _Python_, _JavaScript_ и другие.
| {
"pile_set_name": "Github"
} |
#import "GPUImageSoftEleganceFilter.h"
#import "GPUImagePicture.h"
#import "GPUImageLookupFilter.h"
#import "GPUImageGaussianBlurFilter.h"
#import "GPUImageAlphaBlendFilter.h"
@implementation GPUImageSoftEleganceFilter
- (id)init;
{
if (!(self = [super init]))
{
return nil;
}
#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
UIImage *image1 = [UIImage imageNamed:@"lookup_soft_elegance_1.png"];
UIImage *image2 = [UIImage imageNamed:@"lookup_soft_elegance_2.png"];
#else
NSImage *image1 = [NSImage imageNamed:@"lookup_soft_elegance_1.png"];
NSImage *image2 = [NSImage imageNamed:@"lookup_soft_elegance_2.png"];
#endif
NSAssert(image1 && image2,
@"To use GPUImageSoftEleganceFilter you need to add lookup_soft_elegance_1.png and lookup_soft_elegance_2.png from GPUImage/framework/Resources to your application bundle.");
lookupImageSource1 = [[GPUImagePicture alloc] initWithImage:image1];
GPUImageLookupFilter *lookupFilter1 = [[GPUImageLookupFilter alloc] init];
[self addFilter:lookupFilter1];
[lookupImageSource1 addTarget:lookupFilter1 atTextureLocation:1];
[lookupImageSource1 processImage];
GPUImageGaussianBlurFilter *gaussianBlur = [[GPUImageGaussianBlurFilter alloc] init];
gaussianBlur.blurRadiusInPixels = 10.0;
[lookupFilter1 addTarget:gaussianBlur];
[self addFilter:gaussianBlur];
GPUImageAlphaBlendFilter *alphaBlend = [[GPUImageAlphaBlendFilter alloc] init];
alphaBlend.mix = 0.14;
[lookupFilter1 addTarget:alphaBlend];
[gaussianBlur addTarget:alphaBlend];
[self addFilter:alphaBlend];
lookupImageSource2 = [[GPUImagePicture alloc] initWithImage:image2];
GPUImageLookupFilter *lookupFilter2 = [[GPUImageLookupFilter alloc] init];
[alphaBlend addTarget:lookupFilter2];
[lookupImageSource2 addTarget:lookupFilter2];
[lookupImageSource2 processImage];
[self addFilter:lookupFilter2];
self.initialFilters = [NSArray arrayWithObjects:lookupFilter1, nil];
self.terminalFilter = lookupFilter2;
return self;
}
#pragma mark -
#pragma mark Accessors
@end
| {
"pile_set_name": "Github"
} |
/*=========================================================================
ILBM.HPP
Author: Gary Liddon @ Watford
Created: 30/03/92
Purpose: Crappy ilbm reader
Copyright (c) 1992 - 1997 Gary Liddon
===========================================================================*/
/* -----------------------------------------------------------
Includes
-------- */
/* Std Lib
------- */
#include <string.h>
/* Glib
---- */
#include "gutils.h"
/* Local
----- */
#include "ilbm.hpp"
#include "iff.hpp"
/* Graphics
-------- */
/*----------------------------------------------------------------------
Tyepdefs && Defines
------------------- */
/*----------------------------------------------------------------------
Structure defintions
-------------------- */
/*----------------------------------------------------------------------
Positional Vars
--------------- */
/*----------------------------------------------------------------------
Function Prototypes
------------------- */
/*----------------------------------------------------------------------
Vars
---- */
/*----------------------------------------------------------------------
Data
---- */
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
nilbm::nilbm(char const *name) : niff(name,ILBM)
{
if (err_no==NOT_FORM)
{
if (open(name,PBM))
{
err_no=NO_ERROR;
}
}
if (!err_no)
starta();
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
nilbm::nilbm(FILE *fp) : niff(fp,ILBM)
{
if (err_no==PASSED_ERR)
if (mount_form(PBM))
err_no=NO_ERROR;
starta();
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
nilbm::~nilbm()
{
DiscardCmap();
DiscardBmap();
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
void nilbm::starta()
{
cmap=NULL;
Bmap=NULL;
if (err_no)
return;
if ((!file_opened) && (!form_mounted))
return;
GetBmHeadFromDisk();
if (goto_hunk(CMAP))
cmap=get_hunk();
Bmap=GetBmapFromDisk();
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
U8 *nilbm::TakeBmap()
{
U8* Retp=Bmap;
Bmap=NULL;
return Retp;
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
U8 *nilbm::TakeCmap()
{
U8* Retp=cmap;
cmap=NULL;
return Retp;
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
void nilbm::DiscardBmap()
{
if (Bmap)
delete Bmap;
Bmap=NULL;
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
void nilbm::DiscardCmap()
{
if (cmap)
delete cmap;
cmap=NULL;
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
void nilbm::GetBmHeadFromDisk()
{
if (goto_hunk(BMHD))
{
GetIntelLong();
GetIntelLong();
w=GetIntelWord();
h=GetIntelWord();
GetIntelWord();
GetIntelWord();
planes=fgetc(fp);
fgetc(fp);
comp=fgetc(fp);
rh = h;
}
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
U8 *nilbm::GetBmapFromDisk()
{
U8 *buffa=NULL;
U8 *Ptr;
if (file_opened)
{
if (goto_hunk(BODY))
{
long temp;
fread(&temp,1,sizeof(ULONG),fp);
fread(&temp,1,sizeof(ULONG),fp);
if (!(buffa=new U8[w*h]))
Error(ERR_FATAL,"Allocating ILBM body (%ld)",(long)w*(long)h);
U8 *line_buffer;
int NormWidth=((w+7)&(0xfff8));
int bwidth=NormWidth/8;
if (!(line_buffer=new U8[NormWidth*4])) //MA increased for safety incase of bad encoding
return NULL;
else
{
int z,dest; // Init source count
for (int line=0;line<(h);line++)
{
dest=0; // Destination count
memset(line_buffer,0,NormWidth); // Clear out buffer
if (form_name==PBM)
{
if (comp)
{
dest=0;
while (dest < w)
{
s8 val = fgetc(fp);
if (val<0)
{
val=(0-val)+1;
U8 ch=fgetc(fp);
while (val--)
{
line_buffer[dest]=ch;
dest++;
}
}
else if (val>=0)
{
val++;
while (val--)
{
line_buffer[dest]=fgetc(fp);
dest++;
}
}
}
}
else
{
int WidthToRead = GU_AlignVal(w, 2);
fread(line_buffer,WidthToRead,1,fp);
}
}
else
{
for (int p=0;p<planes;p++)
{
dest=0;
if (comp)
{
while (dest < bwidth)
{
S8 val = fgetc(fp);
Ptr=&line_buffer[dest*8];
if (val<0)
{
val=(0-val)+1;
U8 ch=fgetc(fp);
while (val--)
{
for (z=7;z>=0;z--)
*Ptr++ |= ((ch>>(z))&1)<<p;
dest++;
}
}
else if (val>=0)
{
val++;
while (val--)
{
U8 ch=fgetc(fp);
for (z=7;z>=0;z--)
*Ptr++ |= ((ch>>(z))&1)<<p;
dest++;
}
}
}
}
else
{
for (int x=0;x<bwidth;x++)
{
Ptr=&line_buffer[dest*8];
U8 ch=fgetc(fp);
for (z=7;z>=0;z--)
*Ptr++ |= ((ch>>(z))&1)<<p;
dest++;
}
}
}
}
memcpy(&buffa[line*w],line_buffer,w);
}
}
delete line_buffer;
}
}
return buffa;
}
/*----------------------------------------------------------------------
Function:
Purpose:
Params:
Returns:
---------------------------------------------------------------------- */
void nilbm::SavePbm(char *Name,U8 *Palette,U8 *Body,int W,int H)
{
if (Palette && Body)
{
IFF_FILE outfile;
if (outfile.write(Name) != 0)
{
int i, j;
outfile.form("PBM ");
outfile.chunk("BMHD");
outfile.writeword(W);
outfile.writeword(H);
outfile.writeword(0); // xpos
outfile.writeword(0); // ypos
outfile.writechar(8); // num-planes
outfile.writechar(mskNone); // masking
outfile.writechar(cmpNone); // compression
outfile.writechar(0); // Pad byte
outfile.writeword(0); // Transparent colour
outfile.writechar(1); // xAspect
outfile.writechar(1); // yAspect
outfile.writeword(320);
outfile.writeword(200);
outfile.endchunk();
if (256)
{
outfile.chunk("CMAP");
for (i = 0; i < 3*256; i++)
outfile.writechar(Palette[i]);
outfile.endchunk();
}
U8 *ptr;
outfile.chunk("BODY");
for (i = 0; i < H; i++)
{
// Save out a planar scanline
ptr = &Body[(long) i * (long) W];
j = W;
while (j--)
outfile.writechar(*ptr++);
if (W&1)
{
int l = W&1;
for (int k=0; k<l; k++)
outfile.writechar(0xfe);
}
}
outfile.endchunk();
outfile.endform();
outfile.close();
}
else
Error(ERR_FATAL,"Error trying to write %s",Name);
}
else
Error(ERR_FATAL,"Image not defined so can't write %s",Name);
}
/*===========================================================================
end */
| {
"pile_set_name": "Github"
} |
'use strict';
var Type = require('../type');
var YAML_DATE_REGEXP = new RegExp(
'^([0-9][0-9][0-9][0-9])' + // [1] year
'-([0-9][0-9])' + // [2] month
'-([0-9][0-9])$'); // [3] day
var YAML_TIMESTAMP_REGEXP = new RegExp(
'^([0-9][0-9][0-9][0-9])' + // [1] year
'-([0-9][0-9]?)' + // [2] month
'-([0-9][0-9]?)' + // [3] day
'(?:[Tt]|[ \\t]+)' + // ...
'([0-9][0-9]?)' + // [4] hour
':([0-9][0-9])' + // [5] minute
':([0-9][0-9])' + // [6] second
'(?:\\.([0-9]*))?' + // [7] fraction
'(?:[ \\t]*(Z|([-+])([0-9][0-9]?)' + // [8] tz [9] tz_sign [10] tz_hour
'(?::([0-9][0-9]))?))?$'); // [11] tz_minute
function resolveYamlTimestamp(data) {
if (data === null) return false;
if (YAML_DATE_REGEXP.exec(data) !== null) return true;
if (YAML_TIMESTAMP_REGEXP.exec(data) !== null) return true;
return false;
}
function constructYamlTimestamp(data) {
var match, year, month, day, hour, minute, second, fraction = 0,
delta = null, tz_hour, tz_minute, date;
match = YAML_DATE_REGEXP.exec(data);
if (match === null) match = YAML_TIMESTAMP_REGEXP.exec(data);
if (match === null) throw new Error('Date resolve error');
// match: [1] year [2] month [3] day
year = +(match[1]);
month = +(match[2]) - 1; // JS month starts with 0
day = +(match[3]);
if (!match[4]) { // no hour
return new Date(Date.UTC(year, month, day));
}
// match: [4] hour [5] minute [6] second [7] fraction
hour = +(match[4]);
minute = +(match[5]);
second = +(match[6]);
if (match[7]) {
fraction = match[7].slice(0, 3);
while (fraction.length < 3) { // milli-seconds
fraction += '0';
}
fraction = +fraction;
}
// match: [8] tz [9] tz_sign [10] tz_hour [11] tz_minute
if (match[9]) {
tz_hour = +(match[10]);
tz_minute = +(match[11] || 0);
delta = (tz_hour * 60 + tz_minute) * 60000; // delta in mili-seconds
if (match[9] === '-') delta = -delta;
}
date = new Date(Date.UTC(year, month, day, hour, minute, second, fraction));
if (delta) date.setTime(date.getTime() - delta);
return date;
}
function representYamlTimestamp(object /*, style*/) {
return object.toISOString();
}
module.exports = new Type('tag:yaml.org,2002:timestamp', {
kind: 'scalar',
resolve: resolveYamlTimestamp,
construct: constructYamlTimestamp,
instanceOf: Date,
represent: representYamlTimestamp
});
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package com.android.systemui.shared.system;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* Offloads work from other threads by running it in a background thread.
*/
public class BackgroundExecutor {
private static final BackgroundExecutor sInstance = new BackgroundExecutor();
private final ExecutorService mExecutorService = Executors.newFixedThreadPool(2);
/**
* @return the static instance of the background executor.
*/
public static BackgroundExecutor get() {
return sInstance;
}
/**
* Runs the given {@param callable} on one of the background executor threads.
*/
public <T> Future<T> submit(Callable<T> callable) {
return mExecutorService.submit(callable);
}
/**
* Runs the given {@param runnable} on one of the background executor threads.
*/
public Future<?> submit(Runnable runnable) {
return mExecutorService.submit(runnable);
}
/**
* Runs the given {@param runnable} on one of the background executor threads. Return
* {@param result} when the future is resolved.
*/
public <T> Future<T> submit(Runnable runnable, T result) {
return mExecutorService.submit(runnable, result);
}
}
| {
"pile_set_name": "Github"
} |
/****************************************************************************
**
** Copyright (C) 2015 The Qt Company Ltd.
** Contact: http://www.qt.io/licensing/
**
** This file is part of the QtGui module of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see http://www.qt.io/terms-conditions. For further
** information use the contact form at http://www.qt.io/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 or version 3 as published by the Free
** Software Foundation and appearing in the file LICENSE.LGPLv21 and
** LICENSE.LGPLv3 included in the packaging of this file. Please review the
** following information to ensure the GNU Lesser General Public License
** requirements will be met: https://www.gnu.org/licenses/lgpl.html and
** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** As a special exception, The Qt Company gives you certain additional
** rights. These rights are described in The Qt Company LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3.0 as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU General Public License version 3.0 requirements will be
** met: http://www.gnu.org/copyleft/gpl.html.
**
** $QT_END_LICENSE$
**
****************************************************************************/
/*
A simple model that uses a QStringList as its data source.
*/
#include "qstringlistmodel.h"
#ifndef QT_NO_STRINGLISTMODEL
QT_BEGIN_NAMESPACE
/*!
\class QStringListModel
\brief The QStringListModel class provides a model that supplies strings to views.
\ingroup model-view
QStringListModel is an editable model that can be used for simple
cases where you need to display a number of strings in a view
widget, such as a QListView or a QComboBox.
The model provides all the standard functions of an editable
model, representing the data in the string list as a model with
one column and a number of rows equal to the number of items in
the list.
Model indexes corresponding to items are obtained with the
\l{QAbstractListModel::index()}{index()} function, and item flags
are obtained with flags(). Item data is read with the data()
function and written with setData(). The number of rows (and
number of items in the string list) can be found with the
rowCount() function.
The model can be constructed with an existing string list, or
strings can be set later with the setStringList() convenience
function. Strings can also be inserted in the usual way with the
insertRows() function, and removed with removeRows(). The contents
of the string list can be retrieved with the stringList()
convenience function.
An example usage of QStringListModel:
\snippet doc/src/snippets/qstringlistmodel/main.cpp 0
\sa QAbstractListModel, QAbstractItemModel, {Model Classes}
*/
/*!
Constructs a string list model with the given \a parent.
*/
QStringListModel::QStringListModel(QObject *parent)
: QAbstractListModel(parent)
{
}
/*!
Constructs a string list model containing the specified \a strings
with the given \a parent.
*/
QStringListModel::QStringListModel(const QStringList &strings, QObject *parent)
: QAbstractListModel(parent), lst(strings)
{
}
/*!
Returns the number of rows in the model. This value corresponds to the
number of items in the model's internal string list.
The optional \a parent argument is in most models used to specify
the parent of the rows to be counted. Because this is a list if a
valid parent is specified, the result will always be 0.
\sa insertRows(), removeRows(), QAbstractItemModel::rowCount()
*/
int QStringListModel::rowCount(const QModelIndex &parent) const
{
if (parent.isValid())
return 0;
return lst.count();
}
/*!
Returns data for the specified \a role, from the item with the
given \a index.
If the view requests an invalid index, an invalid variant is returned.
\sa setData()
*/
QVariant QStringListModel::data(const QModelIndex &index, int role) const
{
if (index.row() < 0 || index.row() >= lst.size())
return QVariant();
if (role == Qt::DisplayRole || role == Qt::EditRole)
return lst.at(index.row());
return QVariant();
}
/*!
Returns the flags for the item with the given \a index.
Valid items are enabled, selectable, editable, drag enabled and drop enabled.
\sa QAbstractItemModel::flags()
*/
Qt::ItemFlags QStringListModel::flags(const QModelIndex &index) const
{
if (!index.isValid())
return QAbstractItemModel::flags(index) | Qt::ItemIsDropEnabled;
return QAbstractItemModel::flags(index) | Qt::ItemIsEditable | Qt::ItemIsDragEnabled | Qt::ItemIsDropEnabled;
}
/*!
Sets the data for the specified \a role in the item with the given
\a index in the model, to the provided \a value.
The dataChanged() signal is emitted if the item is changed.
\sa Qt::ItemDataRole, data()
*/
bool QStringListModel::setData(const QModelIndex &index, const QVariant &value, int role)
{
if (index.row() >= 0 && index.row() < lst.size()
&& (role == Qt::EditRole || role == Qt::DisplayRole)) {
lst.replace(index.row(), value.toString());
emit dataChanged(index, index);
return true;
}
return false;
}
/*!
Inserts \a count rows into the model, beginning at the given \a row.
The \a parent index of the rows is optional and is only used for
consistency with QAbstractItemModel. By default, a null index is
specified, indicating that the rows are inserted in the top level of
the model.
\sa QAbstractItemModel::insertRows()
*/
bool QStringListModel::insertRows(int row, int count, const QModelIndex &parent)
{
if (count < 1 || row < 0 || row > rowCount(parent))
return false;
beginInsertRows(QModelIndex(), row, row + count - 1);
for (int r = 0; r < count; ++r)
lst.insert(row, QString());
endInsertRows();
return true;
}
/*!
Removes \a count rows from the model, beginning at the given \a row.
The \a parent index of the rows is optional and is only used for
consistency with QAbstractItemModel. By default, a null index is
specified, indicating that the rows are removed in the top level of
the model.
\sa QAbstractItemModel::removeRows()
*/
bool QStringListModel::removeRows(int row, int count, const QModelIndex &parent)
{
if (count <= 0 || row < 0 || (row + count) > rowCount(parent))
return false;
beginRemoveRows(QModelIndex(), row, row + count - 1);
for (int r = 0; r < count; ++r)
lst.removeAt(row);
endRemoveRows();
return true;
}
static bool ascendingLessThan(const QPair<QString, int> &s1, const QPair<QString, int> &s2)
{
return s1.first < s2.first;
}
static bool decendingLessThan(const QPair<QString, int> &s1, const QPair<QString, int> &s2)
{
return s1.first > s2.first;
}
/*!
\reimp
*/
void QStringListModel::sort(int, Qt::SortOrder order)
{
emit layoutAboutToBeChanged();
QList<QPair<QString, int> > list;
for (int i = 0; i < lst.count(); ++i)
list.append(QPair<QString, int>(lst.at(i), i));
if (order == Qt::AscendingOrder)
qSort(list.begin(), list.end(), ascendingLessThan);
else
qSort(list.begin(), list.end(), decendingLessThan);
lst.clear();
QVector<int> forwarding(list.count());
for (int i = 0; i < list.count(); ++i) {
lst.append(list.at(i).first);
forwarding[list.at(i).second] = i;
}
QModelIndexList oldList = persistentIndexList();
QModelIndexList newList;
for (int i = 0; i < oldList.count(); ++i)
newList.append(index(forwarding.at(oldList.at(i).row()), 0));
changePersistentIndexList(oldList, newList);
emit layoutChanged();
}
/*!
Returns the string list used by the model to store data.
*/
QStringList QStringListModel::stringList() const
{
return lst;
}
/*!
Sets the model's internal string list to \a strings. The model will
notify any attached views that its underlying data has changed.
\sa dataChanged()
*/
void QStringListModel::setStringList(const QStringList &strings)
{
emit beginResetModel();
lst = strings;
emit endResetModel();
}
/*!
\reimp
*/
Qt::DropActions QStringListModel::supportedDropActions() const
{
return QAbstractItemModel::supportedDropActions() | Qt::MoveAction;
}
QT_END_NAMESPACE
#endif // QT_NO_STRINGLISTMODEL
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="9532" systemVersion="15G31" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES">
<dependencies>
<deployment identifier="iOS"/>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="9530"/>
</dependencies>
<objects>
<placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner"/>
<placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/>
<view contentMode="scaleToFill" id="iN0-l3-epB" customClass="CarouseView">
<rect key="frame" x="0.0" y="0.0" width="100" height="124"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<subviews>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="鱼塘|山东大学" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="xbN-tX-qqn">
<rect key="frame" x="0.0" y="103" width="100" height="21"/>
<constraints>
<constraint firstAttribute="height" constant="21" id="feh-3k-MlT"/>
</constraints>
<fontDescription key="fontDescription" type="system" pointSize="13"/>
<color key="textColor" red="0.1298714280128479" green="0.12068501859903336" blue="0.033356234431266785" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
<nil key="highlightedColor"/>
</label>
<view contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="wSC-t9-lt7" customClass="UIImageView">
<rect key="frame" x="0.0" y="0.0" width="100" height="100"/>
<color key="backgroundColor" red="0.65836408179999995" green="1" blue="0.3742346043" alpha="1" colorSpace="calibratedRGB"/>
</view>
</subviews>
<color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
<constraints>
<constraint firstItem="wSC-t9-lt7" firstAttribute="leading" secondItem="iN0-l3-epB" secondAttribute="leading" id="3pn-d2-Ger"/>
<constraint firstAttribute="trailing" secondItem="xbN-tX-qqn" secondAttribute="trailing" id="8lr-sG-uQl"/>
<constraint firstItem="wSC-t9-lt7" firstAttribute="top" secondItem="iN0-l3-epB" secondAttribute="top" id="Uhr-tC-omi"/>
<constraint firstItem="xbN-tX-qqn" firstAttribute="top" secondItem="wSC-t9-lt7" secondAttribute="bottom" constant="3" id="fjh-V4-aNO"/>
<constraint firstAttribute="bottom" secondItem="xbN-tX-qqn" secondAttribute="bottom" id="giS-mY-n2S"/>
<constraint firstAttribute="trailing" secondItem="wSC-t9-lt7" secondAttribute="trailing" id="qjj-2d-cKR"/>
<constraint firstItem="xbN-tX-qqn" firstAttribute="leading" secondItem="iN0-l3-epB" secondAttribute="leading" id="xhw-bK-szd"/>
</constraints>
<freeformSimulatedSizeMetrics key="simulatedDestinationMetrics"/>
<connections>
<outlet property="imageView" destination="wSC-t9-lt7" id="3Gd-S7-iSK"/>
</connections>
<point key="canvasLocation" x="224" y="106"/>
</view>
</objects>
</document>
| {
"pile_set_name": "Github"
} |
#ifdef _WIN32
#include <windows.h>
#else
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <dlfcn.h>
#endif
#include "onnx/onnxifi_loader.h"
/* ONNXIFI_LOADER_LOGGING macro enables/disables logging. Its OFF by default. */
#ifndef ONNXIFI_LOADER_LOGGING
#define ONNXIFI_LOADER_LOGGING 0
#endif
#if ONNXIFI_LOADER_LOGGING
#if defined(__ANDROID__)
#include <android/log.h>
/* Tag used for logging on Android */
#define ONNXIFI_LOADER_ANDROID_LOG_TAG "ONNXIFI-LOADER"
#else
#include <stdio.h>
#endif
#endif
#if defined(__APPLE__)
#define ONNXIFI_LIBRARY_NAME "libonnxifi.dylib"
#elif defined(_WIN32)
#define ONNXIFI_LIBRARY_NAME L"onnxifi.dll"
#else
#define ONNXIFI_LIBRARY_NAME "libonnxifi.so"
#endif
/* Order must match declaration order in onnxifi_library structure */
static const char onnxifi_function_names[] =
"onnxGetBackendIDs\0"
"onnxReleaseBackendID\0"
"onnxGetBackendInfo\0"
"onnxGetBackendCompatibility\0"
"onnxInitBackend\0"
"onnxReleaseBackend\0"
"onnxInitEvent\0"
"onnxSignalEvent\0"
"onnxGetEventState\0"
"onnxWaitEvent\0"
"onnxReleaseEvent\0"
"onnxInitGraph\0"
"onnxSetGraphIO\0"
"onnxRunGraph\0"
"onnxReleaseGraph\0";
int ONNXIFI_ABI onnxifi_load(
uint32_t flags,
#ifdef _WIN32
const wchar_t* path,
#else
const char* path,
#endif
struct onnxifi_library* onnx)
{
size_t i;
const char* function_name;
if (onnx == NULL) {
return 0;
}
#ifdef _WIN32
ZeroMemory(onnx, sizeof(struct onnxifi_library));
#else
memset(onnx, 0, sizeof(struct onnxifi_library));
#endif
if (!(flags & ONNXIFI_LOADER_FLAG_VERSION_1_0)) {
/* Unknown ONNXIFI version requested */
return 0;
}
if (path == NULL) {
path = ONNXIFI_LIBRARY_NAME;
}
#ifdef _WIN32
onnx->handle = (void*) LoadLibraryExW(path, NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
#else
/* Clear libdl error state */
dlerror();
onnx->handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
#endif
if (onnx->handle == NULL) {
#if ONNXIFI_LOADER_LOGGING
#if defined(__ANDROID__)
__android_log_print(
ANDROID_LOG_ERROR,
ONNXIFI_LOADER_ANDROID_LOG_TAG,
"failed to load %s: %s",
path, dlerror());
#elif defined(_WIN32)
fprintf(
stderr,
"Error: failed to load %S: error %u\n",
path, (unsigned long) GetLastError());
#else
fprintf(stderr, "Error: failed to load %s: %s\n",
path, dlerror());
#endif
#endif /* ONNXIFI_LOADER_LOGGING */
goto failed;
}
function_name = onnxifi_function_names;
for (i = 0; i < ONNXIFI_LOADER_FUNCTION_COUNT; i++) {
#ifdef _WIN32
onnx->functions[i] = GetProcAddress((HMODULE) onnx->handle, function_name);
#else
onnx->functions[i] = dlsym(onnx->handle, function_name);
#endif
if (onnx->functions[i] == NULL) {
#if ONNXIFI_LOADER_LOGGING
#if defined(__ANDROID__)
__android_log_print(
ANDROID_LOG_ERROR,
ONNXIFI_LOADER_ANDROID_LOG_TAG,
"failed to find function %s in %s: %s",
function_name,
ONNXIFI_LIBRARY_NAME,
dlerror());
#elif defined(_WIN32)
fprintf(
stderr,
"Error: failed to find function %s in %s: error %u\n",
function_name,
ONNXIFI_LIBRARY_NAME,
(unsigned long) GetLastError());
#else
fprintf(
stderr,
"Error: failed to find function %s in %s: %s\n",
function_name,
ONNXIFI_LIBRARY_NAME,
dlerror());
#endif
#endif /* ONNXIFI_LOADER_LOGGING */
goto failed;
}
#ifdef _WIN32
function_name += lstrlenA(function_name);
#else
function_name += strlen(function_name);
#endif
/* Skip null-terminator */
function_name += 1;
}
onnx->flags = flags & ONNXIFI_LOADER_FLAG_VERSION_MASK;
return 1;
failed:
onnxifi_unload(onnx);
return 0;
}
void ONNXIFI_ABI onnxifi_unload(struct onnxifi_library* onnx) {
if (onnx != NULL) {
if (onnx->handle != NULL) {
#ifdef _WIN32
if (FreeLibrary((HMODULE) onnx->handle) == FALSE) {
#if ONNXIFI_LOADER_LOGGING
fprintf(
stderr,
"Error: failed to unload library %s: error %u\n",
ONNXIFI_LIBRARY_NAME,
(unsigned long) GetLastError());
#endif /* ONNXIFI_LOADER_LOGGING */
}
#else /* !defined(_WIN32) */
/* Clear libdl error state */
dlerror();
if (dlclose(onnx->handle) != 0) {
#if ONNXIFI_LOADER_LOGGING
#if defined(__ANDROID__)
__android_log_print(
ANDROID_LOG_ERROR,
ONNXIFI_LOADER_ANDROID_LOG_TAG,
"failed to unload %s: %s",
ONNXIFI_LIBRARY_NAME,
dlerror());
#else
fprintf(
stderr,
"Error: failed to unload %s: %s\n",
ONNXIFI_LIBRARY_NAME,
dlerror());
#endif
#endif /* ONNXIFI_LOADER_LOGGING */
}
#endif /* !defined(_WIN32) */
}
#ifdef _WIN32
ZeroMemory(onnx, sizeof(struct onnxifi_library));
#else
memset(onnx, 0, sizeof(struct onnxifi_library));
#endif
}
}
| {
"pile_set_name": "Github"
} |
//
// Copyright (c) 2010-2013 Yves Langisch. All rights reserved.
// http://cyberduck.ch/
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// Bug fixes, suggestions and comments should be sent to:
// [email protected]
//
using ch.cyberduck.core;
using ch.cyberduck.core.pool;
using ch.cyberduck.core.transfer;
namespace Ch.Cyberduck.Ui.Controller
{
public class UploadPromptController : TransferPromptController
{
public UploadPromptController(WindowController parent, Transfer transfer, SessionPool source, SessionPool destination)
: base(parent, transfer, source, destination)
{
TransferPromptModel = new UploadPromptModel(this, source, destination, Transfer);
}
protected override string TransferName
{
get { return "Upload"; }
}
}
} | {
"pile_set_name": "Github"
} |
import { Component } from '@angular/core';
import { Observable } from 'rxjs';
import { map } from 'rxjs/operators';
import { CustomerOrdersDataService } from '../services';
import { Customer } from '../model';
import { CustomerVmPlus } from './customer-vm-plus';
/**
* Master/Detail following the Container/Presenter pattern.
* Master: customer list of Customer ViewModels
* Detail: detail about a selected Customer ViewModel
*/
@Component({
selector: 'app-vm-plus-container',
styleUrls: ['../view-model.css'],
template: `
<!-- Notice that there are no more event bindings! -->
<button (click)="addCustomer()" class="btn btn-primary button-row">Add Customer</button>
<div *ngIf="vms$ | async as vms" class="row">
<!-- Customer List -->
<div class="col-md-2">
<app-vm-plus-customer-list [vms]="vms"></app-vm-plus-customer-list>
</div>
<!-- Customer Details -->
<div class="col-md-5">
<app-vm-plus-customer-details [vm]="selectedVm"></app-vm-plus-customer-details>
</div>
</div>
`
})
export class VmPlusContainerComponent {
vms$: Observable<CustomerVmPlus[]>;
selectedVm: CustomerVmPlus;
constructor(private dataService: CustomerOrdersDataService) {
this.createVm$();
}
/** Create observable of Customer ViewModels from cached customers */
createVm$() {
this.vms$ = this.dataService.customers$.pipe(
map(customers => customers.map(customer => this.toCustomerVmPlus(customer)))
);
}
addCustomer() {
this.selectedVm = this.toCustomerVmPlus();
}
cancel() {
this.selectedVm = null;
}
save(vm: CustomerVmPlus) {
this.selectedVm = null;
const customer = vm.toCustomer();
customer.id == null ? this.dataService.addCustomer(customer) : this.dataService.updateCustomer(customer);
}
selected(vm: CustomerVmPlus) {
this.selectedVm = vm.clone();
}
private toCustomerVmPlus(customer?: Customer) {
const vm = CustomerVmPlus.create(
customer,
// Bind to container's action callbacks, replacing presenter emitters.
// Question: should we do this just because we can?
this.cancel.bind(this),
this.save.bind(this),
this.selected.bind(this)
);
return vm;
}
}
| {
"pile_set_name": "Github"
} |
package com.jakewharton.confundus.gradle
import com.google.common.truth.Truth.assertThat
import java.io.File
import org.gradle.testkit.runner.GradleRunner
import org.junit.Test
class ConfundusPluginTest {
private val fixturesDir = File("src/test/fixture")
private fun versionProperty() = "-PconfundusVersion=$confundusVersion"
@Test fun jvm() {
val fixtureDir = File(fixturesDir, "jvm")
val gradleRoot = File(fixtureDir, "gradle").also { it.mkdir() }
File("../gradle/wrapper").copyRecursively(File(gradleRoot, "wrapper"), true)
val result = GradleRunner.create()
.withProjectDir(fixtureDir)
.withArguments(
"clean", "compileKotlin", "compileTestKotlin", "--stacktrace", versionProperty()
)
.build()
assertThat(result.output).contains("BUILD SUCCESSFUL")
}
@Test fun jvmIr() {
val fixtureDir = File(fixturesDir, "jvm-ir")
val gradleRoot = File(fixtureDir, "gradle").also { it.mkdir() }
File("../gradle/wrapper").copyRecursively(File(gradleRoot, "wrapper"), true)
val result = GradleRunner.create()
.withProjectDir(fixtureDir)
.withArguments(
"clean", "compileKotlin", "compileTestKotlin", "--stacktrace", versionProperty()
)
.build()
assertThat(result.output).contains("BUILD SUCCESSFUL")
}
@Test fun mpp() {
val fixtureDir = File(fixturesDir, "mpp")
val gradleRoot = File(fixtureDir, "gradle").also { it.mkdir() }
File("../gradle/wrapper").copyRecursively(File(gradleRoot, "wrapper"), true)
val result = GradleRunner.create()
.withProjectDir(fixtureDir)
.withArguments(
"clean",
"compileKotlinJvm", "compileTestKotlinJvm",
"compileKotlinJvm2", "compileTestKotlinJvm2",
"--stacktrace", versionProperty()
)
.build()
assertThat(result.output).contains("BUILD SUCCESSFUL")
}
}
| {
"pile_set_name": "Github"
} |
using WowPacketParser.Enums;
using WowPacketParser.Misc;
using WowPacketParser.Parsing;
namespace WowPacketParserModule.V1_13_2_31446.Parsers
{
public static class ActionBarHandler
{
[Parser(Opcode.CMSG_SET_ACTION_BUTTON)]
public static void HandleSetActionButton(Packet packet)
{
short action = packet.ReadInt16();
short type = packet.ReadInt16();
packet.AddValue("Action ", action);
packet.AddValue("Type ", type);
packet.ReadByte("Slot Id");
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
// +build !nacl,!plan9,!windows
package ipv6
import (
"net"
"golang.org/x/net/internal/socket"
)
func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) {
c.rawOpt.RLock()
m := socket.Message{
Buffers: [][]byte{b},
OOB: NewControlMessage(c.rawOpt.cflags),
}
c.rawOpt.RUnlock()
switch c.PacketConn.(type) {
case *net.UDPConn:
if err := c.RecvMsg(&m, 0); err != nil {
return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}
}
case *net.IPConn:
if err := c.RecvMsg(&m, 0); err != nil {
return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}
}
default:
return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType}
}
var cm *ControlMessage
if m.NN > 0 {
cm = new(ControlMessage)
if err := cm.Parse(m.OOB[:m.NN]); err != nil {
return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}
}
cm.Src = netAddrToIP16(m.Addr)
}
return m.N, cm, m.Addr, nil
}
func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) {
m := socket.Message{
Buffers: [][]byte{b},
OOB: cm.Marshal(),
Addr: dst,
}
err := c.SendMsg(&m, 0)
if err != nil {
err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err}
}
return m.N, err
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.addthis.hydra.task.map;
import com.addthis.bundle.core.Bundle;
import com.addthis.bundle.util.AutoField;
import com.addthis.bundle.util.CachingField;
import com.addthis.bundle.util.FullAutoField;
import com.addthis.bundle.value.ValueObject;
import com.addthis.hydra.data.filter.value.ValueFilter;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* This section specifies how fields of the input source are transformed into a mapped bundle.
* <p/>
* <p>Fields are moved from a specified field in the job {@link StreamMapper#source source}
* to a destination field in the mapped bundle. By default null values are not written into
* the mapped bundle. This behavior can be changed by setting the toNull field to true.</p>
* <p/>
* <p>Example:</p>
* <pre>fields:[
* "TIME"
* "SOURCE"
* {from:"INITIAL_NAME", to:"NEW_NAME"}
* ]</pre>
*
* @user-reference
*/
public final class FieldFilter {
/** The name of the bundle field source. This is required. */
final AutoField from;
/** The name of the bundle field destination. */
final AutoField to;
/** Optionally apply a filter onto the field. */
@JsonProperty ValueFilter filter;
/** If true then emit null values to the destination field. The default is false. */
@JsonProperty boolean toNull;
// Default constructor required for codec deserialization
@SuppressWarnings("unused")
private FieldFilter(@JsonProperty(value = "from", required = true) AutoField from,
@JsonProperty(value = "to") AutoField to) {
this.from = from;
if (to == null) {
this.to = cloneFrom(from);
} else {
this.to = to;
}
}
// can't find a good way to copy the json value for "from", copy constructors fail for abstract types,
// and clone has its own host of problems. We could just re-use the same object, but that would be even
// more wasteful than the caching we perform for unchanging formats. This is a decent stop-gap solution.
private static AutoField cloneFrom(AutoField from) {
if (from instanceof FullAutoField) {
return new FullAutoField(((FullAutoField) from).baseAutoField, ((FullAutoField) from).subNames);
} else if (from instanceof CachingField) {
return new CachingField(((CachingField) from).name);
} else {
throw new IllegalArgumentException("can not use implicit relation (from = to) for AutoField type: "
+ from.getClass());
}
}
public FieldFilter(String copyFieldName) {
this.from = AutoField.newAutoField(copyFieldName);
this.to = AutoField.newAutoField(copyFieldName);
}
public void mapField(Bundle in, Bundle out) {
ValueObject inVal = from.getValue(in);
if (filter != null) {
inVal = filter.filter(inVal, in);
}
if (inVal != null || toNull) {
to.setValue(out, inVal);
}
}
}
| {
"pile_set_name": "Github"
} |
#!/bin/bash
######################################################################
#
# Clear all log files on all nodes or a selected node
#
# Usage:
# ./clrLogs [node-nr]
#
# Set CLUSTER_NR to the number of the cluster you're working with
# Default value of CLUSTER_NR is 0
######################################################################
# Set CLUSTER_NR to the number of the cluster you're working with
CLUSTER_NR_SEL=${CLUSTER_NR:-0}
CLUSTER_BASE_NODE_NR=$(expr $CLUSTER_NR_SEL \* 5)
PI_USER=akkapi
ALL_NODES="0 1 2 3 4"
SELECTED_NODE=$1
NODES=${SELECTED_NODE:-$ALL_NODES}
for i in $NODES;do
node=$[ CLUSTER_BASE_NODE_NR + i ]
echo "Clearing logs on node-${node}"
ssh ${PI_USER}@node-${node} "sudo cp /dev/null neopixel.log"
done
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:gravity="center_horizontal"
android:orientation="vertical"
android:paddingTop="13dp" >
<TextView
android:id="@+id/timestamp"
style="@style/chat_text_date_style"
android:layout_width="wrap_content"
android:layout_height="wrap_content" />
<RelativeLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="@dimen/margin_chat_activity" >
<ImageView
android:id="@+id/iv_userhead"
android:layout_width="@dimen/size_avatar"
android:layout_height="@dimen/size_avatar"
android:layout_alignParentRight="true"
android:layout_alignParentTop="true"
android:layout_marginRight="@dimen/margin_chat_activity"
android:background="@drawable/ease_default_avatar"
android:scaleType="fitXY" />
<RelativeLayout
android:id="@+id/bubble"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginRight="@dimen/margin_chat_activity"
android:layout_toLeftOf="@id/iv_userhead"
android:background="@drawable/ease_chatto_bg" >
<ImageView
android:layout_marginLeft="4dp"
android:id="@+id/iv_call_icon"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_centerVertical="true"
android:src="@drawable/ease_chat_voice_call_self" />
<TextView
android:id="@+id/tv_chatcontent"
style="@style/chat_content_date_style"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_centerVertical="true"
android:layout_toRightOf="@id/iv_call_icon"
android:background="@null"
android:minHeight="40dp"
android:padding="6dp"
/>
</RelativeLayout>
<ImageView
android:id="@+id/msg_status"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_centerVertical="true"
android:layout_toLeftOf="@id/bubble"
android:clickable="true"
android:src="@drawable/ease_msg_state_failed_resend"
android:visibility="gone" />
<TextView
android:id="@+id/tv_ack"
style="@style/chat_text_name_style"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_centerVertical="true"
android:layout_toLeftOf="@id/bubble"
android:text="@string/text_ack_msg"
android:textSize="12sp"
android:visibility="invisible" />
</RelativeLayout>
</LinearLayout> | {
"pile_set_name": "Github"
} |
#pragma once
/*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include <inttypes.h>
#include <io.h>
| {
"pile_set_name": "Github"
} |
var http = require('http');
http.createServer(function(req, res){
var content = '';
req.on('data', function(chunk){
content += chunk;
});
req.on('end', function(){
console.log('body: ' + content);
res.end('hello');
});
}).listen(3000); | {
"pile_set_name": "Github"
} |
# multi instance flors
An ideal application would sit in a single Ruby process and handle with ease all the work (HTTP requests, background jobs, etc). For an application used by a small to medium team, this might be sufficient.
Often, it's necessary to prepare for growth and split the application on multiple Ruby processes.
The usual flor integrator is building a web application and wants her/his users to take part in workflow executions. Seemingly snappy responses are necessary. One [initial technique](#separating-user-interfacing-from-workflow-processing) is to separate HTTP handling from workflow processing, one process for each.
Ruby people have developed numerous server libraries for efficient HTTP handling. Some of them ([Passenger](https://www.phusionpassenger.com), [Unicorn](https://bogomips.org/unicorn), ...) manage pools of Ruby processes to distribute the work, when using them, one has to be aware of this.
[Another technique](#extending-the-workflow-processing-capacity) is to have more than one active flor instances to deal with more executions at once.
## separating user interfacing from workflow processing
One could decide to not have flor working in the same Ruby process (or the same group of Ruby processes) as the HTTP handling side. Still access to flor is necessary.
The classical way to do this is to have a "passive" (non-started) flor unit on the HTTP handling side.
A Rails application could thus have an initializer dedicated to a passive flor:
```ruby
#
# config/initializers/flor.rb
FLOR = Flor::Unit.new('../flor/etc/conf.json')
FLOR.conf['unit'] = 'web'
#FLOR.start # no!
```
While the workflow handling side would use an "active" (started) flor unit sharing the same configuration:
```ruby
FLOR = Flor::Unit.new('../flor/etc/conf.json')
FLOR.conf['unit'] = 'flow'
FLOR.start # yes!
```
(Note that I'm using a `FLOR` constant, feel free to use another constant name or another way to point at your flor instance in your Ruby process)
One could use [foreman](https://github.com/ddollar/foreman) or an equivalent to manage such a multi process setup.
Flow/execution launching/cancelling/signalling thus happens in the web handling side, while the processing happens in its own side.
One has to keep in mind that a passive flor, doing no message handling, since it merely places messages in the flor database to get picked by the active flor(s) on the other side, it doesn't see the message circulation that is in fact the flow execution. See [below](#launchingwaiting-in-a-multi-instance-setup) for `#launch` / `#wait:` gotchas in multi instance setups.
## extending the workflow processing capacity
TODO
## launching/waiting in a multi instance setup
(already brushed in [launching.md](launching.md))
TODO
| {
"pile_set_name": "Github"
} |
"""
MIT License
Copyright (c) 2017 Zeke Barge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import subprocess
# Constants
XBt_TO_XBT = 100000000
VERSION = 'v1.1'
try:
VERSION = str(subprocess.check_output(["git", "describe", "--tags"], stderr=subprocess.DEVNULL).rstrip())
except Exception as e:
# git not available, ignore
pass
| {
"pile_set_name": "Github"
} |
// +build !linux !cgo !seccomp
package seccomp
import (
"errors"
"github.com/opencontainers/runc/libcontainer/configs"
)
var ErrSeccompNotEnabled = errors.New("seccomp: config provided but seccomp not supported")
// InitSeccomp does nothing because seccomp is not supported.
func InitSeccomp(config *configs.Seccomp) error {
if config != nil {
return ErrSeccompNotEnabled
}
return nil
}
// IsEnabled returns false, because it is not supported.
func IsEnabled() bool {
return false
}
| {
"pile_set_name": "Github"
} |
package org.wasabifx.wasabi.protocol.websocket
/**
* Created with IntelliJ IDEA.
* User: swishy
* Date: 5/11/13
* Time: 9:58 PM
* To change this template use File | Settings | File Templates.
*/
data class Channel (val path: String, val handler: ChannelHandler.() -> Unit) | {
"pile_set_name": "Github"
} |
@Echo OFF
cd data
python export_tables.py
ECHO Finished exporting tables
cd ..
sqlite3 mh4u.db < sqlcode
ECHO Finished importing tables
pause | {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2017 Andreas Steffen
* HSR Hochschule fuer Technik Rapperswil
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include "swima_event.h"
#include "swima_data_model.h"
typedef struct private_swima_event_t private_swima_event_t;
/**
* Private data of a swima_event_t object.
*
*/
struct private_swima_event_t {
/**
* Public swima_event_t interface.
*/
swima_event_t public;
/**
* Event ID
*/
uint32_t eid;
/**
* Timestamp
*/
chunk_t timestamp;
/**
* Action
*/
uint8_t action;
/**
* Software [Identifier] record
*/
swima_record_t *sw_record;
/**
* Reference count
*/
refcount_t ref;
};
METHOD(swima_event_t, get_eid, uint32_t,
private_swima_event_t *this, chunk_t *timestamp)
{
if (timestamp)
{
*timestamp = this->timestamp;
}
return this->eid;
}
METHOD(swima_event_t, get_action, uint8_t,
private_swima_event_t *this)
{
return this->action;
}
METHOD(swima_event_t, get_sw_record, swima_record_t*,
private_swima_event_t *this)
{
return this->sw_record;
}
METHOD(swima_event_t, get_ref, swima_event_t*,
private_swima_event_t *this)
{
ref_get(&this->ref);
return &this->public;
}
METHOD(swima_event_t, destroy, void,
private_swima_event_t *this)
{
if (ref_put(&this->ref))
{
this->sw_record->destroy(this->sw_record);
free(this->timestamp.ptr);
free(this);
}
}
/**
* See header
*/
swima_event_t *swima_event_create(uint32_t eid, chunk_t timestamp,
uint8_t action, swima_record_t *sw_record)
{
private_swima_event_t *this;
INIT(this,
.public = {
.get_eid = _get_eid,
.get_action = _get_action,
.get_sw_record = _get_sw_record,
.get_ref = _get_ref,
.destroy = _destroy,
},
.eid = eid,
.timestamp = chunk_clone(timestamp),
.action = action,
.sw_record = sw_record,
.ref = 1,
);
return &this->public;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:msxsl="urn:schemas-microsoft-com:xslt" exclude-result-prefixes="msxsl">
<xsl:template match="/">
<html>
<head>
<title>7-Zip Sfx Configurations</title>
<style type="text/css">
h1 { padding: 10px; padding-width: 100% }
th { width: 25%; border: 1px solid silver; padding: 10px }
table { width: 800px }
thead { align: center; background: silver; font-weight: bold }
td.r1 { background-color: white }
td.r2 { background-color: skyblue }
</style>
</head>
<body>
<xsl:for-each select="sfxConfigs/config">
<table border="1">
<caption>
<h1>
<xsl:value-of select="@name"/>
</h1>
</caption>
<xsl:if test=".!=''">
<thead>
<th>Command</th>
<td align="center">Description</td>
</thead>
<tbody>
<xsl:for-each select="id">
<tr>
<xsl:if test="position() mod 2 = 0">
<td align="center">
<xsl:value-of select="@command"/>
</td>
<td>
<xsl:value-of select="@description"/>
</td>
</xsl:if>
<xsl:if test="position() mod 2 = 1">
<td align="center" bgcolor="#d1def0">
<xsl:value-of select="@command"/>
</td>
<td bgcolor="#d1def0">
<xsl:value-of select="@description"/>
</td>
</xsl:if>
</tr>
</xsl:for-each>
</tbody>
</xsl:if>
</table>
<br/>Applicable to modules: <xsl:value-of select="@modules"/>.<br/>
</xsl:for-each>
</body>
</html>
</xsl:template>
</xsl:stylesheet>
| {
"pile_set_name": "Github"
} |
from __future__ import absolute_import
import os
import sys
# Make sure we import from working tree
pynagbase = os.path.dirname(os.path.realpath(__file__ + "/.."))
sys.path.insert(0, pynagbase)
import unittest2 as unittest
from mock import patch
import shutil
import tempfile
import pynag.Utils as utils
import pynag.Model
from pynag.Utils import PynagError
from tests import tests_dir
import pynag.Utils.misc
class testUtils(unittest.TestCase):
def setUp(self):
# Utils should work fine with just about any data, but lets use
# testdata01
os.chdir(tests_dir)
os.chdir('dataset01')
pynag.Model.config = None
pynag.Model.cfg_file = './nagios/nagios.cfg'
pynag.Model.ObjectDefinition.objects.get_all()
self.tmp_dir = tempfile.mkdtemp() # Will be deleted after test runs
os.environ['LANG'] = 'en_US@UTF8'
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def testCompareFilterWithGrep(self):
""" test pynag.Utils.grep() by comparing it with pynag.Model.Service.objects.filter()
# TODO: Currently pynag.Model.Service.objects.filter() has some bugs, so some tests here fail.
"""
self._compare_search_expressions(use='generic-service')
self._compare_search_expressions(register=1, use='generic-service')
self._compare_search_expressions(host_name__exists=True)
self._compare_search_expressions(host_name__exists=False)
self._compare_search_expressions(host_name__notcontains='l')
self._compare_search_expressions(host_name__notcontains='this value cannot possibly exist')
self._compare_search_expressions(host_name__startswith='l')
self._compare_search_expressions(host_name__endswith='m')
self._compare_search_expressions(host_name__isnot='examplehost for testing purposes')
def testGrep(self):
""" Test cases based on gradecke's testing """
host = pynag.Model.string_to_class['host']()
host['use'] = "generic-host"
host['name'] = "ABC"
host['_code'] = "ABC"
host['_function'] = "Server,Production"
host2 = pynag.Model.string_to_class['host']()
host2['use'] = "generic-host"
host2['name'] = "XYZ"
host2['_code'] = "XYZ"
host2['_function'] = "Switch,Production"
hosts = host, host2
result = pynag.Utils.grep(hosts, **{'_code__contains': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__contains': 'BC'})
self.assertEqual(1, len(result))
# Check that contains does not match nonexisting values
result = pynag.Utils.grep(hosts, **{'_code__contains': ''})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'nonexistant__contains': ''})
self.assertEqual(0, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notcontains': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notcontains': 'BC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__startswith': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__startswith': 'AB'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notstartswith': 'AB'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__endswith': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__endswith': 'BC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notendswith': 'YZ'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__exists': True})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'_code__exists': False})
self.assertEqual(0, len(result))
result = pynag.Utils.grep(hosts, **{'_function__has_field': 'Server'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(
hosts, **{'_function__has_field': 'Production'})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'name__notcontains': 'A'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'name__regex': 'A.C'})
self.assertEqual(1, len(result))
self.assertEqual('ABC', result[0].name)
result = pynag.Utils.grep(hosts, **{'name__in': ['ABC', 'BCD']})
self.assertEqual(1, len(result))
self.assertEqual('ABC', result[0].name)
result = pynag.Utils.grep(hosts, **{'name__notin': ['ABC', 'BCD']})
self.assertEqual(1, len(result))
self.assertEqual('XYZ', result[0].name)
result = pynag.Utils.grep(hosts, **{'search': 'Switch'})
self.assertEqual(1, len(result))
self.assertEqual('XYZ', result[0].name)
def _compare_search_expressions(self, **expression):
# print "Testing search expression %s" % expression
all_services = pynag.Model.Service.objects.all
result1 = pynag.Model.Service.objects.filter(**expression)
result2 = pynag.Utils.grep(all_services, **expression)
self.assertEqual(
result1, result2, msg="Search output from pynag.Utils.grep() does not match pynag.Model.Service.objects.filter() when using parameters %s\nFilter: %s\nGrep: %s" %
(expression, result1, result2))
return len(result1)
def test_run_command_file_not_found(self):
command = '/bin/doesnotexist'
expected_msg = '\* Could not run command \(return code= %s\)\n' % 127
expected_msg += '\* Error was:\n.*: %s: (not found|No such file or directory)\n' % command
expected_msg += '\* Command was:\n%s\n' % command
expected_msg += '\* Output was:\n\n'
expected_msg += 'Check if y/our path is correct: %s' % os.getenv(
'PATH')
self.assertRaisesRegexp(
utils.PynagError, expected_msg, utils.runCommand, command, raise_error_on_fail=True)
def test_gitrepo_init_empty(self):
from getpass import getuser
from platform import node
emptyish = [None, '', ' ', '\n ']
for x in emptyish:
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=x,
author_email=x
)
self.assertEquals(repo.author_name, 'Pynag User')
expected_email = '%s@%s' % (getuser(), node())
self.assertEquals(repo.author_email, expected_email)
def test_gitrepo_init_with_author(self):
tempfile.mkstemp(dir=self.tmp_dir)
author_name = 'Git Owner'
author_email = '[email protected]'
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=author_name,
author_email=author_email
)
self.assertEquals(repo.author_name, author_name)
self.assertEquals(repo.author_email, author_email)
self.assertEquals(len(repo.log()), 1)
self.assertEquals(repo.log()[0]['author_name'], author_name)
self.assertEquals(repo.log()[0]['author_email'], author_email)
def test_gitrepo_init_with_files(self):
tempfile.mkstemp(dir=self.tmp_dir)
# If pynag defaults will fail, correctly, adjust for test
author_email = None
from getpass import getuser
from platform import node
nodename = node()
if nodename.endswith('.(none)'):
nodename[:-7] + '.example.com'
author_email = '%s@%s' % (getuser(), nodename)
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=None,
author_email=author_email
)
# Check that there is an initial commit
expected_email = '%s@%s' % (getuser(), nodename)
self.assertEquals(len(repo.log()), 1)
self.assertEquals(repo.log()[0]['comment'], 'Initial Commit')
self.assertEquals(repo.log()[0]['author_name'], 'Pynag User')
self.assertEquals(repo.log()[0]['author_email'], expected_email)
# Test kwargs functionality
self.assertEquals(
repo.log(author_email=expected_email)[0]['author_email'], expected_email)
self.assertEquals(
repo.log(comment__contains='Initial')[0]['comment'], 'Initial Commit')
self.assertEquals(len(repo.log(comment__contains='nothing')), 0)
# Test show method
initial_hash = repo.log()[0]['hash']
initial_hash_valid_commits = repo.get_valid_commits()[0]
self.assertEquals(initial_hash, initial_hash_valid_commits)
gitrunpatcher = patch('pynag.Utils.GitRepo._run_command')
validcommitspatcher = patch('pynag.Utils.GitRepo.get_valid_commits')
gitrunpatch = gitrunpatcher.start()
validcommitspatch = validcommitspatcher.start()
validcommitspatch.return_value = [initial_hash]
repo.show(initial_hash)
gitrunpatch.assert_called_once_with('git show %s' % initial_hash)
gitrunpatcher.stop()
validcommitspatcher.stop()
self.assertRaisesRegexp(
PynagError, '%s is not a valid commit id' % initial_hash)
# Add file
tempfile.mkstemp(dir=self.tmp_dir)
self.assertEquals(len(repo.get_uncommited_files()), 1)
self.assertEquals(repo.is_up_to_date(), False)
# Commit file
repo.commit(filelist=repo.get_uncommited_files()[0]['filename'])
self.assertEquals(repo.is_up_to_date(), True)
self.assertEquals(len(repo.get_uncommited_files()), 0)
self.assertEquals(len(repo.get_valid_commits()), 2)
log_entry = repo.log()[0]
self.assertEquals(log_entry['comment'], 'commited by pynag')
def test_gitrepo_deprecated_methods(self):
"""
Delete this class as deprecated methods are removed.
"""
repo = utils.GitRepo(directory=self.tmp_dir, auto_init=True)
testfilename = 'testfile.name.txt'
add_method_patcher = patch('pynag.Utils.GitRepo.add')
add_method_patch = add_method_patcher.start()
repo._git_add(testfilename)
add_method_patch.assert_called_once_with(testfilename)
add_method_patcher.stop()
commit_method_mocker = patch('pynag.Utils.GitRepo.commit')
commit_method_mock = commit_method_mocker.start()
repo._git_commit(filename=testfilename, message='test')
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename])
commit_method_mock.reset_mock()
repo._git_commit(
filename=None, message='test', filelist=[testfilename])
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename])
commit_method_mock.reset_mock()
repo._git_commit(
filename=testfilename, message='test', filelist=[testfilename])
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename, testfilename])
commit_method_mocker.stop()
def test_gitrepo_diff(self):
""" Test git diff works as expected """
# Create repo and write one test commit
git = utils.GitRepo(directory=self.tmp_dir, auto_init=True)
tmp_filename = "%s/%s" % (self.tmp_dir, 'testfile.txt')
open(tmp_filename, 'w').write('test data\n')
git.commit()
# First try diff with no changes made:
diff = git.diff()
self.assertEquals(diff, '')
# Now append to our file and see the difference:
extra_data = 'extra data\n'
open(tmp_filename, 'a').write(extra_data)
# Call diff with no params, check if extra_data is in the diff
diff = git.diff()
self.assertTrue(diff.find(extra_data) > 0)
# Call diff with filename as parameter, check if extra_data is in the
# diff
diff = git.diff(commit_id_or_filename=tmp_filename)
self.assertTrue(diff.find(extra_data) > 0)
# Call commit again and confirm there is no diff
git.commit()
diff = git.diff()
self.assertEquals(diff, '')
# Call a diff against first commit, see if we find our changes in the
# commit.
all_commits = git.get_valid_commits()
first_commit = all_commits.pop()
diff = git.diff(commit_id_or_filename=first_commit)
self.assertTrue(diff.find(extra_data) > 0)
# Revert latest change, and make sure diff is gone.
last_commit = all_commits.pop(0)
git.revert(last_commit)
diff = git.diff(commit_id_or_filename=first_commit)
self.assertTrue(diff.find(extra_data) == -1)
# At last try to diff against an invalid commit id
try:
git.diff('invalid commit id')
self.assertTrue(
False, "we wanted exception when calling diff on invalid commit id")
except PynagError:
pass
def test_send_nsca(self):
""" test pynag.Utils.send_nsca
By its very nature, send_nsca binary itself does not allow for much testing,
however we can still test if the function is working as expected
"""
# Run send_nsca normally for a smoke test (we don't know much about what send_nsca will do with out packet)
# This test will only fail if there are unhandled tracebacks in the
# code somewhere
try:
pynag.Utils.send_nsca(code=0, message="test", nscahost="localhost")
except OSError as e:
# We don't care about the result if we have error because send_nsca
# is not installed
if e.errno != 2:
raise e
result = pynag.Utils.send_nsca(
code=0, message="match", nscahost="match", hostname="test", service=None, nscabin="/bin/grep", nscaconf="-")
self.assertEqual(0, result[0])
self.assertEqual('(standard input):1\n', result[1])
result = pynag.Utils.send_nsca(
code=0, message="match", nscahost="nomatch", hostname="test", service=None, nscabin="/bin/grep", nscaconf="-")
self.assertEqual(1, result[0])
self.assertEqual('(standard input):0\n', result[1])
class testFakeNagiosEnvironment(unittest.TestCase):
def setUp(self):
self.environment = pynag.Utils.misc.FakeNagiosEnvironment()
self.environment.create_minimal_environment()
def tearDown(self):
self.environment.terminate()
def testMinimal(self):
""" Minimal Test of our FakeNagiosEnvironment """
nagios = pynag.Utils.misc.FakeNagiosEnvironment()
nagios.create_minimal_environment()
nagios.config.parse()
self.assertTrue(os.path.isfile(nagios.config.cfg_file))
self.assertTrue(os.path.isdir(nagios.objects_dir))
def testModelUpdates(self):
""" Test backup and restores of Model global variables """
nagios = self.environment
original_config = pynag.Model.config
original_cfg_file = pynag.Model.cfg_file
original_dir = pynag.Model.pynag_directory
# Update model, and check if updates succeeded
nagios.update_model()
self.assertEqual(pynag.Model.config, nagios.config)
self.assertEqual(pynag.Model.cfg_file, nagios.config.cfg_file)
self.assertEqual(pynag.Model.pynag_directory, nagios.objects_dir)
# See if we can restore our model
nagios.restore_model()
self.assertEqual(pynag.Model.config, original_config)
self.assertEqual(pynag.Model.cfg_file, original_cfg_file)
self.assertEqual(pynag.Model.pynag_directory, original_dir)
def testStartStop(self):
""" Try to start and stop our nagios environment """
self.environment.start()
pid = open(os.path.join(self.environment.tempdir, "nagios.pid")).read()
pid = int(pid)
try:
os.kill(pid, 0)
except OSError:
self.assertTrue(False, "Did not find a running process with process_id=%s" % pid)
self.environment.stop()
try:
os.kill(pid, 0)
self.assertTrue(False, "Seems like process with process_id=%s is still running" % pid)
except OSError:
pass
def testOpenDecorator(self):
""" Makes sure the fake nagios environment cannot go outside its directory """
# Try to open a regular file
self.environment.config.open(self.environment.config.cfg_file).close()
self.assertTrue(True, "Successfully opened nagios.cfg")
try:
self.environment.config.open("/etc/passwd").close()
self.assertTrue(False, "I was able to open a file outside my tempdir!")
except PynagError:
pass
def testUpdateModel_NoRestore(self):
self.environment.update_model()
def testLivestatus(self):
host_name = "localhost"
self.environment.update_model()
pynag.Model.Host(host_name=host_name, use="generic-host").save()
self.environment.guess_livestatus_path()
self.environment.configure_livestatus()
self.environment.start()
livestatus = self.environment.get_livestatus()
hosts = livestatus.get_hosts(name=host_name)
self.assertTrue(hosts, "Could not find a host called %s" % (host_name))
def testImports(self):
""" Test FakeNagiosEnvironment.import_config() """
host1 = "host1"
host2 = "host2"
tempdir = tempfile.mkdtemp()
tempfile1 = tempfile.mktemp(suffix='.cfg')
tempfile2 = os.path.join(tempdir, 'file2.cfg')
with open(tempfile1, 'w') as f:
f.write("define host {\nname host1\n}")
with open(tempfile2, 'w') as f:
f.write("define host {\nname host2\n}")
self.environment.import_config(tempdir)
self.environment.import_config(tempfile1)
self.environment.update_model()
host1 = pynag.Model.Host.objects.filter(name=host1)
host2 = pynag.Model.Host.objects.filter(name=host2)
self.assertTrue(host1)
self.assertTrue(host2)
if __name__ == "__main__":
unittest.main()
# vim: sts=4 expandtab autoindent
| {
"pile_set_name": "Github"
} |
{
"Entries": [
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/providers/Microsoft.Storage?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Byb3ZpZGVycy9NaWNyb3NvZnQuU3RvcmFnZT9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"0c5e991a-9472-4a87-8b7e-3fdca0a7a3a1"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11999"
],
"x-ms-request-id": [
"c92b957d-66fa-4716-a78f-3d173fa398f2"
],
"x-ms-correlation-request-id": [
"c92b957d-66fa-4716-a78f-3d173fa398f2"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070034Z:c92b957d-66fa-4716-a78f-3d173fa398f2"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:00:33 GMT"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Expires": [
"-1"
],
"Content-Length": [
"13639"
]
},
"ResponseBody": "{\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/providers/Microsoft.Storage\",\r\n \"namespace\": \"Microsoft.Storage\",\r\n \"authorizations\": [\r\n {\r\n \"applicationId\": \"a6aa9161-5291-40bb-8c5c-923b567bee3b\",\r\n \"roleDefinitionId\": \"070ab87f-0efc-4423-b18b-756f3bdb0236\"\r\n },\r\n {\r\n \"applicationId\": \"e406a681-f3d4-42a8-90b6-c2b029497af1\"\r\n }\r\n ],\r\n \"resourceTypes\": [\r\n {\r\n \"resourceType\": \"deletedAccounts\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"locations/deletedAccounts\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\",\r\n \"2016-01-01\",\r\n \"2015-06-15\",\r\n \"2015-05-01-preview\"\r\n ],\r\n \"capabilities\": \"CrossResourceGroupResourceMove, CrossSubscriptionResourceMove, SystemAssignedResourceIdentity\"\r\n },\r\n {\r\n \"resourceType\": \"operations\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\",\r\n \"2016-01-01\",\r\n \"2015-06-15\",\r\n \"2015-05-01-preview\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"locations/asyncoperations\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\",\r\n \"2016-01-01\",\r\n \"2015-06-15\",\r\n \"2015-05-01-preview\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/listAccountSas\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/listServiceSas\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/blobServices\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/tableServices\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/queueServices\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/fileServices\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"locations\",\r\n \"locations\": [],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-07-01\",\r\n \"2016-01-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"locations/usages\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"locations/deleteVirtualNetworkOrSubnets\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-07-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"usages\",\r\n \"locations\": [],\r\n \"apiVersions\": [\r\n \"2020-08-01-preview\",\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\",\r\n \"2016-01-01\",\r\n \"2015-06-15\",\r\n \"2015-05-01-preview\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"checkNameAvailability\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-03-01-preview\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\",\r\n \"2016-05-01\",\r\n \"2016-01-01\",\r\n \"2015-06-15\",\r\n \"2015-05-01-preview\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"locations/checkNameAvailability\",\r\n \"locations\": [\r\n \"East US\",\r\n \"East US 2\",\r\n \"West US\",\r\n \"West Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"Central US\",\r\n \"North Europe\",\r\n \"Brazil South\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"Australia Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 (Stage)\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2019-06-01\",\r\n \"2019-04-01\",\r\n \"2018-11-01\",\r\n \"2018-07-01\",\r\n \"2018-02-01\",\r\n \"2017-10-01\",\r\n \"2017-06-01\",\r\n \"2016-12-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/services\",\r\n \"locations\": [\r\n \"East US\",\r\n \"West US\",\r\n \"East US 2 (Stage)\",\r\n \"West Europe\",\r\n \"North Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"East US 2\",\r\n \"Central US\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"Brazil South\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2014-04-01\"\r\n ]\r\n },\r\n {\r\n \"resourceType\": \"storageAccounts/services/metricDefinitions\",\r\n \"locations\": [\r\n \"East US\",\r\n \"West US\",\r\n \"East US 2 (Stage)\",\r\n \"West Europe\",\r\n \"North Europe\",\r\n \"East Asia\",\r\n \"Southeast Asia\",\r\n \"Japan East\",\r\n \"Japan West\",\r\n \"North Central US\",\r\n \"South Central US\",\r\n \"East US 2\",\r\n \"Central US\",\r\n \"Australia East\",\r\n \"Australia Southeast\",\r\n \"Brazil South\",\r\n \"South India\",\r\n \"Central India\",\r\n \"West India\",\r\n \"Canada East\",\r\n \"Canada Central\",\r\n \"West US 2\",\r\n \"West Central US\",\r\n \"UK South\",\r\n \"UK West\",\r\n \"Korea Central\",\r\n \"Korea South\",\r\n \"France Central\",\r\n \"South Africa North\",\r\n \"UAE North\",\r\n \"Switzerland North\",\r\n \"Germany West Central\",\r\n \"Norway East\",\r\n \"East US 2 EUAP\",\r\n \"Central US EUAP\"\r\n ],\r\n \"apiVersions\": [\r\n \"2014-04-01\"\r\n ]\r\n }\r\n ],\r\n \"registrationState\": \"Registered\"\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourcegroups/pstestrg1793?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlZ3JvdXBzL3BzdGVzdHJnMTc5Mz9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "PUT",
"RequestBody": "{\r\n \"location\": \"West US\"\r\n}",
"RequestHeaders": {
"x-ms-client-request-id": [
"a62b64f6-f6f9-40e0-ad42-ee9b7938b69d"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Content-Length": [
"29"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-ratelimit-remaining-subscription-writes": [
"1199"
],
"x-ms-request-id": [
"0eb9cc32-78cc-4c85-a617-55c2708aa640"
],
"x-ms-correlation-request-id": [
"0eb9cc32-78cc-4c85-a617-55c2708aa640"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070036Z:0eb9cc32-78cc-4c85-a617-55c2708aa640"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:00:35 GMT"
],
"Content-Length": [
"177"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793\",\r\n \"name\": \"pstestrg1793\",\r\n \"location\": \"westus\",\r\n \"properties\": {\r\n \"provisioningState\": \"Succeeded\"\r\n }\r\n}",
"StatusCode": 201
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/providers/Microsoft.Storage/checkNameAvailability?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Byb3ZpZGVycy9NaWNyb3NvZnQuU3RvcmFnZS9jaGVja05hbWVBdmFpbGFiaWxpdHk/YXBpLXZlcnNpb249MjAxOS0wNi0wMQ==",
"RequestMethod": "POST",
"RequestBody": "{\r\n \"name\": \"stopstestrg1793\",\r\n \"type\": \"Microsoft.Storage/storageAccounts\"\r\n}",
"RequestHeaders": {
"x-ms-client-request-id": [
"555348b0-9bea-4f8e-99d4-4a02fd0695a7"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Content-Length": [
"81"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"d6837c39-fad0-4db0-863c-2fe6feebe631"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11999"
],
"x-ms-correlation-request-id": [
"feb4b554-a55e-4957-ab6f-af880cadbf8e"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070037Z:feb4b554-a55e-4957-ab6f-af880cadbf8e"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:00:37 GMT"
],
"Content-Length": [
"22"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"nameAvailable\": true\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5Mz9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "PUT",
"RequestBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\"\r\n },\r\n \"kind\": \"StorageV2\",\r\n \"location\": \"West US\"\r\n}",
"RequestHeaders": {
"x-ms-client-request-id": [
"80c0eef9-c1b4-4c14-bb6a-8d2ed170cf61"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Content-Length": [
"99"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"Location": [
"https://management.azure.com/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/providers/Microsoft.Storage/locations/westus/asyncoperations/d94d1a7f-eaba-402d-b824-7616d9c103a9?monitor=true&api-version=2019-06-01"
],
"Retry-After": [
"17"
],
"x-ms-request-id": [
"d94d1a7f-eaba-402d-b824-7616d9c103a9"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-writes": [
"1199"
],
"x-ms-correlation-request-id": [
"33187919-1f16-4fc9-b21c-0b82e45dffe6"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070045Z:33187919-1f16-4fc9-b21c-0b82e45dffe6"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:00:44 GMT"
],
"Content-Type": [
"text/plain; charset=utf-8"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 202
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/providers/Microsoft.Storage/locations/westus/asyncoperations/d94d1a7f-eaba-402d-b824-7616d9c103a9?monitor=true&api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Byb3ZpZGVycy9NaWNyb3NvZnQuU3RvcmFnZS9sb2NhdGlvbnMvd2VzdHVzL2FzeW5jb3BlcmF0aW9ucy9kOTRkMWE3Zi1lYWJhLTQwMmQtYjgyNC03NjE2ZDljMTAzYTk/bW9uaXRvcj10cnVlJmFwaS12ZXJzaW9uPTIwMTktMDYtMDE=",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"54a22b9e-d50e-43d2-959e-a628cddbc29d"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11998"
],
"x-ms-correlation-request-id": [
"1717abf8-39d5-40a4-8649-b4ec673349ef"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070103Z:1717abf8-39d5-40a4-8649-b4ec673349ef"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:02 GMT"
],
"Content-Length": [
"1317"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"kind\": \"StorageV2\",\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793\",\r\n \"name\": \"stopstestrg1793\",\r\n \"type\": \"Microsoft.Storage/storageAccounts\",\r\n \"location\": \"westus\",\r\n \"tags\": {},\r\n \"properties\": {\r\n \"privateEndpointConnections\": [],\r\n \"networkAcls\": {\r\n \"bypass\": \"AzureServices\",\r\n \"virtualNetworkRules\": [],\r\n \"ipRules\": [],\r\n \"defaultAction\": \"Allow\"\r\n },\r\n \"supportsHttpsTrafficOnly\": true,\r\n \"encryption\": {\r\n \"services\": {\r\n \"file\": {\r\n \"keyType\": \"Account\",\r\n \"enabled\": true,\r\n \"lastEnabledTime\": \"2020-09-21T07:00:44.5268263Z\"\r\n },\r\n \"blob\": {\r\n \"keyType\": \"Account\",\r\n \"enabled\": true,\r\n \"lastEnabledTime\": \"2020-09-21T07:00:44.5268263Z\"\r\n }\r\n },\r\n \"keySource\": \"Microsoft.Storage\"\r\n },\r\n \"accessTier\": \"Hot\",\r\n \"provisioningState\": \"Succeeded\",\r\n \"creationTime\": \"2020-09-21T07:00:44.4642762Z\",\r\n \"primaryEndpoints\": {\r\n \"dfs\": \"https://stopstestrg1793.dfs.core.windows.net/\",\r\n \"web\": \"https://stopstestrg1793.z22.web.core.windows.net/\",\r\n \"blob\": \"https://stopstestrg1793.blob.core.windows.net/\",\r\n \"queue\": \"https://stopstestrg1793.queue.core.windows.net/\",\r\n \"table\": \"https://stopstestrg1793.table.core.windows.net/\",\r\n \"file\": \"https://stopstestrg1793.file.core.windows.net/\"\r\n },\r\n \"primaryLocation\": \"westus\",\r\n \"statusOfPrimary\": \"available\",\r\n \"secondaryLocation\": \"eastus\",\r\n \"statusOfSecondary\": \"available\"\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5Mz9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"fbe57731-3581-480b-93bb-5cf178502791"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"7b8d10ef-1532-41da-9abf-33f2c17db849"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11997"
],
"x-ms-correlation-request-id": [
"00d30a96-15b2-4934-83bb-e65a92b715bf"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070103Z:00d30a96-15b2-4934-83bb-e65a92b715bf"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:03 GMT"
],
"Content-Length": [
"1317"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"kind\": \"StorageV2\",\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793\",\r\n \"name\": \"stopstestrg1793\",\r\n \"type\": \"Microsoft.Storage/storageAccounts\",\r\n \"location\": \"westus\",\r\n \"tags\": {},\r\n \"properties\": {\r\n \"privateEndpointConnections\": [],\r\n \"networkAcls\": {\r\n \"bypass\": \"AzureServices\",\r\n \"virtualNetworkRules\": [],\r\n \"ipRules\": [],\r\n \"defaultAction\": \"Allow\"\r\n },\r\n \"supportsHttpsTrafficOnly\": true,\r\n \"encryption\": {\r\n \"services\": {\r\n \"file\": {\r\n \"keyType\": \"Account\",\r\n \"enabled\": true,\r\n \"lastEnabledTime\": \"2020-09-21T07:00:44.5268263Z\"\r\n },\r\n \"blob\": {\r\n \"keyType\": \"Account\",\r\n \"enabled\": true,\r\n \"lastEnabledTime\": \"2020-09-21T07:00:44.5268263Z\"\r\n }\r\n },\r\n \"keySource\": \"Microsoft.Storage\"\r\n },\r\n \"accessTier\": \"Hot\",\r\n \"provisioningState\": \"Succeeded\",\r\n \"creationTime\": \"2020-09-21T07:00:44.4642762Z\",\r\n \"primaryEndpoints\": {\r\n \"dfs\": \"https://stopstestrg1793.dfs.core.windows.net/\",\r\n \"web\": \"https://stopstestrg1793.z22.web.core.windows.net/\",\r\n \"blob\": \"https://stopstestrg1793.blob.core.windows.net/\",\r\n \"queue\": \"https://stopstestrg1793.queue.core.windows.net/\",\r\n \"table\": \"https://stopstestrg1793.table.core.windows.net/\",\r\n \"file\": \"https://stopstestrg1793.file.core.windows.net/\"\r\n },\r\n \"primaryLocation\": \"westus\",\r\n \"statusOfPrimary\": \"available\",\r\n \"secondaryLocation\": \"eastus\",\r\n \"statusOfSecondary\": \"available\"\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzP2FwaS12ZXJzaW9uPTIwMTktMDYtMDE=",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"fe642110-c77c-409a-9c3a-46db13f8fbfc"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"b7ba9fbe-8142-4839-9fe2-b0d909ca398a"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11996"
],
"x-ms-correlation-request-id": [
"0540b017-de33-4400-84b9-555d8406a9e6"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070103Z:0540b017-de33-4400-84b9-555d8406a9e6"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:03 GMT"
],
"Content-Length": [
"1329"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"value\": [\r\n {\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"kind\": \"StorageV2\",\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793\",\r\n \"name\": \"stopstestrg1793\",\r\n \"type\": \"Microsoft.Storage/storageAccounts\",\r\n \"location\": \"westus\",\r\n \"tags\": {},\r\n \"properties\": {\r\n \"privateEndpointConnections\": [],\r\n \"networkAcls\": {\r\n \"bypass\": \"AzureServices\",\r\n \"virtualNetworkRules\": [],\r\n \"ipRules\": [],\r\n \"defaultAction\": \"Allow\"\r\n },\r\n \"supportsHttpsTrafficOnly\": true,\r\n \"encryption\": {\r\n \"services\": {\r\n \"file\": {\r\n \"keyType\": \"Account\",\r\n \"enabled\": true,\r\n \"lastEnabledTime\": \"2020-09-21T07:00:44.5268263Z\"\r\n },\r\n \"blob\": {\r\n \"keyType\": \"Account\",\r\n \"enabled\": true,\r\n \"lastEnabledTime\": \"2020-09-21T07:00:44.5268263Z\"\r\n }\r\n },\r\n \"keySource\": \"Microsoft.Storage\"\r\n },\r\n \"accessTier\": \"Hot\",\r\n \"provisioningState\": \"Succeeded\",\r\n \"creationTime\": \"2020-09-21T07:00:44.4642762Z\",\r\n \"primaryEndpoints\": {\r\n \"dfs\": \"https://stopstestrg1793.dfs.core.windows.net/\",\r\n \"web\": \"https://stopstestrg1793.z22.web.core.windows.net/\",\r\n \"blob\": \"https://stopstestrg1793.blob.core.windows.net/\",\r\n \"queue\": \"https://stopstestrg1793.queue.core.windows.net/\",\r\n \"table\": \"https://stopstestrg1793.table.core.windows.net/\",\r\n \"file\": \"https://stopstestrg1793.file.core.windows.net/\"\r\n },\r\n \"primaryLocation\": \"westus\",\r\n \"statusOfPrimary\": \"available\",\r\n \"secondaryLocation\": \"eastus\",\r\n \"statusOfSecondary\": \"available\"\r\n }\r\n }\r\n ]\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "PUT",
"RequestBody": "{\r\n \"properties\": {\r\n \"defaultServiceVersion\": \"2018-03-28\"\r\n }\r\n}",
"RequestHeaders": {
"x-ms-client-request-id": [
"cdc3fad7-1e3a-4316-a7b6-0d0abdf99564"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Content-Length": [
"71"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"a2854b41-a92c-4191-8f3c-21abbe065b95"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-writes": [
"1198"
],
"x-ms-correlation-request-id": [
"a762ee02-9a8b-4c2e-bace-988ddab83cbf"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070104Z:a762ee02-9a8b-4c2e-bace-988ddab83cbf"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:04 GMT"
],
"Content-Length": [
"294"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"defaultServiceVersion\": \"2018-03-28\"\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "PUT",
"RequestBody": "{\r\n \"properties\": {\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": true,\r\n \"days\": 3\r\n }\r\n }\r\n}",
"RequestHeaders": {
"x-ms-client-request-id": [
"7ebd2a32-2c5f-489d-881b-a5fe6531aff7"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Content-Length": [
"108"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"aec413d0-cf30-4077-9f45-0c0464479638"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-writes": [
"1197"
],
"x-ms-correlation-request-id": [
"e9633690-1b86-4302-9012-0f79e843fd46"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070105Z:e9633690-1b86-4302-9012-0f79e843fd46"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:05 GMT"
],
"Content-Length": [
"307"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": true,\r\n \"days\": 3\r\n }\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "PUT",
"RequestBody": "{\r\n \"properties\": {\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": false\r\n }\r\n }\r\n}",
"RequestHeaders": {
"x-ms-client-request-id": [
"a31c70e4-98f6-4ac0-90bf-87df98538a73"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
],
"Content-Type": [
"application/json; charset=utf-8"
],
"Content-Length": [
"91"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"8d589ba0-bfc5-4260-8408-18ffd0e30632"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-writes": [
"1196"
],
"x-ms-correlation-request-id": [
"69722484-1d60-4b51-bec3-e5749cbd53d2"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070106Z:69722484-1d60-4b51-bec3-e5749cbd53d2"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:06 GMT"
],
"Content-Length": [
"299"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": false\r\n }\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"2126acfc-7451-4214-8ed6-3b049ae79cfc"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"e823ef87-f238-4245-8b0d-b4aa6266e367"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11995"
],
"x-ms-correlation-request-id": [
"78e0f55b-f777-4a01-9ed6-6a496f4859e8"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070104Z:78e0f55b-f777-4a01-9ed6-6a496f4859e8"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:04 GMT"
],
"Content-Length": [
"408"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"cors\": {\r\n \"corsRules\": []\r\n },\r\n \"defaultServiceVersion\": \"2018-03-28\",\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": false\r\n }\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"f73a2730-15b3-44da-8af9-9ccecfb2e60f"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"c656b40b-80cf-4fdd-b015-17aec9d17e1a"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11994"
],
"x-ms-correlation-request-id": [
"d8d8f84c-d6ea-4f76-a5f1-61619a9c6184"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070105Z:d8d8f84c-d6ea-4f76-a5f1-61619a9c6184"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:04 GMT"
],
"Content-Length": [
"408"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"cors\": {\r\n \"corsRules\": []\r\n },\r\n \"defaultServiceVersion\": \"2018-03-28\",\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": false\r\n }\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"e9080315-8f65-4370-8a4b-29eb05b773d2"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"4210189d-6631-4db2-8f50-dc7c8a735154"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11993"
],
"x-ms-correlation-request-id": [
"9064ac17-e661-4c8a-8b12-d8e490acfd42"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070106Z:9064ac17-e661-4c8a-8b12-d8e490acfd42"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:05 GMT"
],
"Content-Length": [
"416"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"cors\": {\r\n \"corsRules\": []\r\n },\r\n \"defaultServiceVersion\": \"2018-03-28\",\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": true,\r\n \"days\": 3\r\n }\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5My9ibG9iU2VydmljZXMvZGVmYXVsdD9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"cc3716a3-d481-427f-af67-714c8c213266"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"8a0da551-6d10-4e3c-8f42-c71d4f1a907e"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11992"
],
"x-ms-correlation-request-id": [
"1299a74c-a4b6-4924-97c8-6c3fc585cde7"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070106Z:1299a74c-a4b6-4924-97c8-6c3fc585cde7"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:06 GMT"
],
"Content-Length": [
"408"
],
"Content-Type": [
"application/json"
],
"Expires": [
"-1"
]
},
"ResponseBody": "{\r\n \"sku\": {\r\n \"name\": \"Standard_GRS\",\r\n \"tier\": \"Standard\"\r\n },\r\n \"id\": \"/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793/blobServices/default\",\r\n \"name\": \"default\",\r\n \"type\": \"Microsoft.Storage/storageAccounts/blobServices\",\r\n \"properties\": {\r\n \"cors\": {\r\n \"corsRules\": []\r\n },\r\n \"defaultServiceVersion\": \"2018-03-28\",\r\n \"deleteRetentionPolicy\": {\r\n \"enabled\": false\r\n }\r\n }\r\n}",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourceGroups/pstestrg1793/providers/Microsoft.Storage/storageAccounts/stopstestrg1793?api-version=2019-06-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlR3JvdXBzL3BzdGVzdHJnMTc5My9wcm92aWRlcnMvTWljcm9zb2Z0LlN0b3JhZ2Uvc3RvcmFnZUFjY291bnRzL3N0b3BzdGVzdHJnMTc5Mz9hcGktdmVyc2lvbj0yMDE5LTA2LTAx",
"RequestMethod": "DELETE",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"540d8a83-842f-4cb9-ae23-6938c72a1c9e"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Storage.StorageManagementClient/17.2.0.0"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-request-id": [
"d9fc68f6-9f67-4d64-b621-b3faf17f1e61"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"Server": [
"Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0"
],
"x-ms-ratelimit-remaining-subscription-deletes": [
"14999"
],
"x-ms-correlation-request-id": [
"b5339d3b-6768-4783-ad2b-21f26c12355c"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070112Z:b5339d3b-6768-4783-ad2b-21f26c12355c"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:12 GMT"
],
"Content-Type": [
"text/plain; charset=utf-8"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/resourcegroups/pstestrg1793?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL3Jlc291cmNlZ3JvdXBzL3BzdGVzdHJnMTc5Mz9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "DELETE",
"RequestBody": "",
"RequestHeaders": {
"x-ms-client-request-id": [
"4db23633-c893-4c24-97d7-1f69074e9b24"
],
"Accept-Language": [
"en-US"
],
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"Location": [
"https://management.azure.com/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01"
],
"Retry-After": [
"15"
],
"x-ms-ratelimit-remaining-subscription-deletes": [
"14999"
],
"x-ms-request-id": [
"0b951d8f-e993-49ee-bfbc-3453427299ae"
],
"x-ms-correlation-request-id": [
"0b951d8f-e993-49ee-bfbc-3453427299ae"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070115Z:0b951d8f-e993-49ee-bfbc-3453427299ae"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:15 GMT"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 202
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL29wZXJhdGlvbnJlc3VsdHMvZXlKcWIySkpaQ0k2SWxKRlUwOVZVa05GUjFKUFZWQkVSVXhGVkVsUFRrcFBRaTFRVTFSRlUxUlNSekUzT1RNdFYwVlRWRlZUSWl3aWFtOWlURzlqWVhScGIyNGlPaUozWlhOMGRYTWlmUT9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"Location": [
"https://management.azure.com/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01"
],
"Retry-After": [
"15"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11998"
],
"x-ms-request-id": [
"8a15983a-4710-41e5-9d20-3c7ae7678970"
],
"x-ms-correlation-request-id": [
"8a15983a-4710-41e5-9d20-3c7ae7678970"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070131Z:8a15983a-4710-41e5-9d20-3c7ae7678970"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:30 GMT"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 202
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL29wZXJhdGlvbnJlc3VsdHMvZXlKcWIySkpaQ0k2SWxKRlUwOVZVa05GUjFKUFZWQkVSVXhGVkVsUFRrcFBRaTFRVTFSRlUxUlNSekUzT1RNdFYwVlRWRlZUSWl3aWFtOWlURzlqWVhScGIyNGlPaUozWlhOMGRYTWlmUT9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"Location": [
"https://management.azure.com/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01"
],
"Retry-After": [
"15"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11997"
],
"x-ms-request-id": [
"0596cc8e-00b4-4671-ad3b-ec61e49c4530"
],
"x-ms-correlation-request-id": [
"0596cc8e-00b4-4671-ad3b-ec61e49c4530"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070146Z:0596cc8e-00b4-4671-ad3b-ec61e49c4530"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:01:46 GMT"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 202
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL29wZXJhdGlvbnJlc3VsdHMvZXlKcWIySkpaQ0k2SWxKRlUwOVZVa05GUjFKUFZWQkVSVXhGVkVsUFRrcFBRaTFRVTFSRlUxUlNSekUzT1RNdFYwVlRWRlZUSWl3aWFtOWlURzlqWVhScGIyNGlPaUozWlhOMGRYTWlmUT9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11996"
],
"x-ms-request-id": [
"9ddcb8c6-2fd7-45f4-bfc0-7f132611cc06"
],
"x-ms-correlation-request-id": [
"9ddcb8c6-2fd7-45f4-bfc0-7f132611cc06"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070201Z:9ddcb8c6-2fd7-45f4-bfc0-7f132611cc06"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:02:01 GMT"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 200
},
{
"RequestUri": "/subscriptions/45b60d85-fd72-427a-a708-f994d26e593e/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1QU1RFU1RSRzE3OTMtV0VTVFVTIiwiam9iTG9jYXRpb24iOiJ3ZXN0dXMifQ?api-version=2016-09-01",
"EncodedRequestUri": "L3N1YnNjcmlwdGlvbnMvNDViNjBkODUtZmQ3Mi00MjdhLWE3MDgtZjk5NGQyNmU1OTNlL29wZXJhdGlvbnJlc3VsdHMvZXlKcWIySkpaQ0k2SWxKRlUwOVZVa05GUjFKUFZWQkVSVXhGVkVsUFRrcFBRaTFRVTFSRlUxUlNSekUzT1RNdFYwVlRWRlZUSWl3aWFtOWlURzlqWVhScGIyNGlPaUozWlhOMGRYTWlmUT9hcGktdmVyc2lvbj0yMDE2LTA5LTAx",
"RequestMethod": "GET",
"RequestBody": "",
"RequestHeaders": {
"User-Agent": [
"FxVersion/4.6.29220.03",
"OSName/Windows",
"OSVersion/Microsoft.Windows.10.0.19041.",
"Microsoft.Azure.Management.Internal.Resources.ResourceManagementClient/1.3.23"
]
},
"ResponseHeaders": {
"Cache-Control": [
"no-cache"
],
"Pragma": [
"no-cache"
],
"x-ms-ratelimit-remaining-subscription-reads": [
"11995"
],
"x-ms-request-id": [
"db284fe1-2802-4d84-bf1b-db5dd788123d"
],
"x-ms-correlation-request-id": [
"db284fe1-2802-4d84-bf1b-db5dd788123d"
],
"x-ms-routing-request-id": [
"SOUTHEASTASIA:20200921T070202Z:db284fe1-2802-4d84-bf1b-db5dd788123d"
],
"Strict-Transport-Security": [
"max-age=31536000; includeSubDomains"
],
"X-Content-Type-Options": [
"nosniff"
],
"Date": [
"Mon, 21 Sep 2020 07:02:01 GMT"
],
"Expires": [
"-1"
],
"Content-Length": [
"0"
]
},
"ResponseBody": "",
"StatusCode": 200
}
],
"Names": {
"Test-StorageBlobServiceProperties": [
"pstestrg1793"
]
},
"Variables": {
"SubscriptionId": "45b60d85-fd72-427a-a708-f994d26e593e"
}
} | {
"pile_set_name": "Github"
} |
/**
* Copyright (C) 2018-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
* as published by MongoDB, Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Server Side Public License for more details.
*
* You should have received a copy of the Server Side Public License
* along with this program. If not, see
* <http://www.mongodb.com/licensing/server-side-public-license>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the Server Side Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
#include "mongo/db/jsobj.h"
#include "mongo/s/catalog/type_mongos.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/time_support.h"
namespace {
using namespace mongo;
TEST(Validity, MissingName) {
BSONObj obj = BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1))
<< MongosType::uptime(100) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingPing) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::uptime(100) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingUp) {
BSONObj obj =
BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingWaiting) {
BSONObj obj =
BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingMongoVersion) {
BSONObj obj =
BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
<< MongosType::waiting(false) << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
MongosType& mtype = mongosTypeResult.getValue();
/**
* Note: mongoVersion should eventually become mandatory, but is optional now
* for backward compatibility reasons.
*/
ASSERT_OK(mtype.validate());
}
TEST(Validity, MissingConfigVersion) {
BSONObj obj =
BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
<< MongosType::waiting(false) << MongosType::mongoVersion("x.x.x")
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
MongosType& mtype = mongosTypeResult.getValue();
/**
* Note: configVersion should eventually become mandatory, but is optional now
* for backward compatibility reasons.
*/
ASSERT_OK(mtype.validate());
}
TEST(Validity, MissingAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
<< MongosType::uptime(100) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
MongosType& mType = mongosTypeResult.getValue();
// advisoryHostFQDNs is optional
ASSERT_OK(mType.validate());
}
TEST(Validity, EmptyAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
<< MongosType::uptime(100) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
MongosType& mType = mongosTypeResult.getValue();
ASSERT_OK(mType.validate());
ASSERT_EQUALS(mType.getAdvisoryHostFQDNs().size(), 0UL);
}
TEST(Validity, BadTypeAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
<< MongosType::uptime(100) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo" << 0 << "baz")));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::TypeMismatch, mongosTypeResult.getStatus());
}
TEST(Validity, Valid) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
<< MongosType::uptime(100) << MongosType::waiting(false)
<< MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo"
<< "bar"
<< "baz")));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
MongosType& mType = mongosTypeResult.getValue();
ASSERT_OK(mType.validate());
ASSERT_EQUALS(mType.getName(), "localhost:27017");
ASSERT_EQUALS(mType.getPing(), Date_t::fromMillisSinceEpoch(1));
ASSERT_EQUALS(mType.getUptime(), 100);
ASSERT_EQUALS(mType.getWaiting(), false);
ASSERT_EQUALS(mType.getMongoVersion(), "x.x.x");
ASSERT_EQUALS(mType.getConfigVersion(), 0);
ASSERT_EQUALS(mType.getAdvisoryHostFQDNs().size(), 3UL);
ASSERT_EQUALS(mType.getAdvisoryHostFQDNs()[0], "foo");
ASSERT_EQUALS(mType.getAdvisoryHostFQDNs()[1], "bar");
ASSERT_EQUALS(mType.getAdvisoryHostFQDNs()[2], "baz");
}
TEST(Validity, BadType) {
BSONObj obj = BSON(MongosType::name() << 0);
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::TypeMismatch, mongosTypeResult.getStatus());
}
} // unnamed namespace
| {
"pile_set_name": "Github"
} |
package epic.dense
import breeze.linalg._
import breeze.linalg.operators.OpMulMatrix
import epic.features.SegmentedIndex
import epic.framework.Feature
import breeze.util.Index
import scala.runtime.ScalaRunTime
import scala.util.Random
case class LowRankQuadraticTransform[FV](numOutputs: Int, numRanks: Int, numLeftInputs: Int, numRightInputs: Int, innerTransform: Transform[FV, DenseVector[Double]]) extends OutputTransform[FV, DenseVector[Double]] {
val neurons = (0 until numOutputs).map(i => new LowRankQuadraticTransformNeuron(numRanks, numLeftInputs, numRightInputs))
val neuronIndex = SegmentedIndex(neurons.map(_.index):_*)
val index = SegmentedIndex(neuronIndex, innerTransform.index)
def extractLayerAndPenultimateLayer(weights: DenseVector[Double], forTrain: Boolean) = {
val subTransforms = neurons.indices.map(i => neurons(i).extractLayer(weights(neuronIndex.componentOffset(i) until neuronIndex.componentOffset(i) + neuronIndex.indices(i).size)))
val innerLayer = innerTransform.extractLayer(weights(index.componentOffset(1) to -1), forTrain)
new OutputLayer(subTransforms, innerLayer) -> innerLayer
}
// def extractLayer(weights: DenseVector[Double]) = {
// val subTransforms = (0 until neurons.size).map(i => neurons(i).extractLayer(weights(neuronIndex.componentOffset(i) until neuronIndex.componentOffset(i) + neuronIndex.indices(i).size)))
// new Layer(subTransforms, innerTransform.extractLayer(weights(index.componentOffset(1) to -1)))
// }
def initialWeightVector(initWeightsScale: Double, rng: Random, outputLayer: Boolean, spec: String) = {
val subVects = DenseVector.vertcat(neurons.map(_.initialWeightVector(initWeightsScale, rng, outputLayer, spec)):_*)
DenseVector.vertcat(subVects, innerTransform.initialWeightVector(initWeightsScale, rng, outputLayer, spec))
}
def clipHiddenWeightVectors(weights: DenseVector[Double], norm: Double, outputLayer: Boolean) {
innerTransform.clipHiddenWeightVectors(weights(index.componentOffset(1) to -1), norm, outputLayer)
}
def getInterestingWeightIndicesForGradientCheck(offset: Int): Seq[Int] = {
(offset until offset + Math.min(10, index.indices(0).size)) ++ innerTransform.getInterestingWeightIndicesForGradientCheck(offset + index.indices(0).size)
}
case class OutputLayer(sublayers: Seq[LRQTNLayer], innerLayer: innerTransform.Layer) extends OutputTransform.OutputLayer[FV,DenseVector[Double]] {
override val index = LowRankQuadraticTransform.this.index
val neuronIndex = LowRankQuadraticTransform.this.neuronIndex
def activations(fv: FV) = {
val innerActivations = innerLayer.activations(fv)
DenseVector(Array.tabulate(sublayers.size)(i => sublayers(i).activations(innerActivations)(0)))
}
def activationsFromPenultimateDot(innerLayerActivations: DenseVector[Double], sparseIdx: Int): Double = {
sublayers(sparseIdx).activations(innerLayerActivations)(0)
}
def tallyDerivative(deriv: DenseVector[Double], _scale: =>Vector[Double], fv: FV) = {
val innerActivations = innerLayer.activations(fv)
sublayers.indices.foreach { i =>
sublayers(i).tallyDerivative(deriv(neuronIndex.componentOffset(i) until neuronIndex.componentOffset(i) + neuronIndex.indices(i).size), _scale(i), innerActivations)
}
}
def applyBatchNormalization(inputs: scala.collection.GenTraversable[FV]) = innerLayer.applyBatchNormalization(inputs)
}
}
/**
* Separate because I was having some issues...
*/
case class LowRankQuadraticTransformNeuron(numRanks: Int, numLeftInputs: Int, numRightInputs: Int) {
val index = SegmentedIndex(new AffineTransform.Index(numRanks, numLeftInputs, false), new AffineTransform.Index(numRanks, numRightInputs, false))
def extractLayer(weights: DenseVector[Double]) = {
val lhsSize = numRanks * numLeftInputs
val rhsSize = numRanks * numRightInputs
val lhsMat = weights(0 until lhsSize).asDenseMatrix.reshape(numRanks, numLeftInputs, view = View.Require)
val rhsMat = weights(lhsSize until (lhsSize + rhsSize)).asDenseMatrix.reshape(numRanks, numRightInputs, view = View.Require)
new LRQTNLayer(lhsMat, rhsMat, index, numRanks, numLeftInputs, numRightInputs)
}
def initialWeightVector(initWeightsScale: Double, rng: Random, outputLayer: Boolean, spec: String) = {
if (spec == "magic") {
DenseVector.vertcat(AffineTransform.getMagicAffineWeights(index.indices(0).size, numLeftInputs, numRanks, initWeightsScale, rng),
AffineTransform.getMagicAffineWeights(index.indices(1).size, numRightInputs, numRanks, initWeightsScale, rng))
} else {
DenseVector.vertcat(AffineTransform.getGaussianAffineWeights(index.indices(0).size, initWeightsScale, rng),
AffineTransform.getGaussianAffineWeights(index.indices(1).size, initWeightsScale, rng))
}
}
def clipHiddenWeightVectors(weights: DenseVector[Double], norm: Double, outputLayer: Boolean) {
}
}
case class LRQTNLayer(lhsWeights: DenseMatrix[Double], rhsWeights: DenseMatrix[Double], index: Index[Feature], numRanks: Int, numLeftInputs: Int, numRightInputs: Int) {
val lhsWeightst = lhsWeights.t
val rhsWeightst = rhsWeights.t
def activations(fv: DenseVector[Double]) = {
val lhsProj = lhsWeights * fv
val rhsProj = rhsWeights * fv
val dotProd = lhsProj.dot(rhsProj)
// println(dotProd + " " + lhsProj.data.toSeq + " " + rhsProj.data.toSeq)
DenseVector(dotProd)
}
def tallyDerivative(deriv: DenseVector[Double], _scale: =>Vector[Double], fv: DenseVector[Double]) = {
// println("SCALE: " + _scale)
val scale = _scale(0)
if (Math.abs(scale) > 1e-6) {
val lhsSize = numRanks * numLeftInputs
val rhsSize = numRanks * numRightInputs
// println(deriv.size + " " + lhsSize + " " + numRanks + " " + numLeftInputs + " " + rhsSize)
val lhsDeriv = deriv(0 until lhsSize).asDenseMatrix.reshape(numRanks, numLeftInputs, view = View.Require)
val rhsDeriv = deriv(lhsSize until lhsSize + rhsSize).asDenseMatrix.reshape(numRanks, numRightInputs, view = View.Require)
val innerActs = fv
val lhsProj = lhsWeights * innerActs
val rhsProj = rhsWeights * innerActs
// Smart way
lhsDeriv += rhsProj * innerActs.t * scale
rhsDeriv += lhsProj * innerActs.t * scale
// Dumb way
// for (r <- 0 until lhsWeights.rows) {
// for (i <- 0 until lhsWeights.cols) {
// lhsDeriv(r, i) += scale * innerActs(i) * rhsProj(r)
// }
// for (i <- 0 until rhsWeights.cols) {
// rhsDeriv(r, i) += scale * innerActs(i) * lhsProj(r)
// }
// }
require(deriv.size == lhsSize + rhsSize, "Backpropagating through LowRankQuadraticTransform is not currently supported")
}
}
}
//}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.keycloak.models.sessions.infinispan.entities.wildfly;
import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
public class LoginFailureEntityWFExternalizer extends InfinispanExternalizerAdapter<LoginFailureEntity> {
public LoginFailureEntityWFExternalizer() {
super(LoginFailureEntity.class, new LoginFailureEntity.ExternalizerImpl());
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2016-2020 VMware, Inc. All Rights Reserved.
* This software is released under MIT license.
* The full license information can be found in LICENSE in the root directory of this project.
*/
import { Component } from '@angular/core';
import { ClrDatagridRowDetail } from './datagrid-row-detail';
import { DATAGRID_SPEC_PROVIDERS, TestContext } from './helpers.spec';
import { DatagridIfExpandService } from './datagrid-if-expanded.service';
import { ClrCommonStringsService } from '../../utils/i18n/common-strings.service';
export default function (): void {
describe('ClrDatagridRowDetail component', function () {
let context: TestContext<ClrDatagridRowDetail, FullTest>;
beforeEach(function () {
context = this.create(ClrDatagridRowDetail, FullTest, DATAGRID_SPEC_PROVIDERS);
});
it('projects content', function () {
expect(context.clarityElement.textContent.trim()).toMatch('Hello world');
});
it('adds the .datagrid-row-flex class to the host', function () {
expect(context.clarityElement.classList.contains('datagrid-row-flex')).toBe(true);
});
it('adds the .datagrid-row-detail class to the host', function () {
expect(context.clarityElement.classList.contains('datagrid-row-detail')).toBe(true);
});
it("adds the .datagrid-container class to the host if it doesn't contain cells", function () {
expect(context.clarityElement.classList.contains('datagrid-container')).toBe(true);
context.testComponent.cell = true;
context.detectChanges();
expect(context.clarityElement.classList.contains('datagrid-container')).toBe(false);
});
it('updates the Expand provider with the [clrDgReplace] input', function () {
const expand: DatagridIfExpandService = context.getClarityProvider(DatagridIfExpandService);
let expandState = false;
expand.replace.subscribe(state => {
expandState = state;
});
expect(expandState).toBe(false);
context.testComponent.replace = true;
context.detectChanges();
expect(expandState).toBe(true);
});
it('should add helper text', function () {
const commonStrings = new ClrCommonStringsService();
const rows: HTMLElement[] = context.clarityElement.querySelectorAll('.clr-sr-only');
const first = [
commonStrings.keys.dategridExpandableBeginningOf,
commonStrings.keys.dategridExpandableRowContent,
commonStrings.keys.dategridExpandableRowsHelperText,
];
const last = [commonStrings.keys.dategridExpandableEndOf, commonStrings.keys.dategridExpandableRowContent];
expect(rows[0].innerText).toBe(first.join(' '));
expect(rows[1].innerText).toBe(last.join(' '));
});
it('should add id to the root element', function () {
expect(context.clarityElement.getAttribute('id')).not.toBeNull();
});
});
}
@Component({
template: `
<clr-dg-row-detail [clrDgReplace]="replace">
<ng-container *ngIf="!cell">Hello world</ng-container>
<clr-dg-cell *ngIf="cell">This is a cell</clr-dg-cell>
</clr-dg-row-detail>
`,
})
class FullTest {
public replace = false;
public cell = false;
}
| {
"pile_set_name": "Github"
} |
https://www.caymus.com/
https://www.totalwine.com/wine/red-wine/cabernet-sauvignon/caymus-cabernet/p/164328010
https://drizly.com/wine/red-wine/cabernet-sauvignon/caymus-vineyards-cabernet-sauvignon/p2469
https://www.wagnerfamilyofwine.com/
https://www.wagnerfamilyofwine.com/Caymus_Vineyards
https://www.wine.com/product/caymus-napa-valley-cabernet-sauvignon-2017/527400
https://www.wine.com/list/wine/caymus/7155-8626
https://www.wine-searcher.com/find/caymus+cab+sauv+napa+valley+county+north+coast+california+usa
https://en.wikipedia.org/wiki/Caymus_Vineyards
https://www.vinfolio.com/producers/california-/caymus
https://applejack.com/Caymus-Cabernet-Sauvignon-Napa-Valley-750-ml
https://napavintners.com/vineyards/caymus-vineyards/
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017-2019 original authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.micronaut.spring.annotation.context
import io.micronaut.context.annotation.Factory
import io.micronaut.spring.context.MicronautApplicationContext
import org.springframework.beans.factory.NoSuchBeanDefinitionException
import org.springframework.beans.factory.support.AbstractBeanDefinition
import org.springframework.beans.factory.support.RootBeanDefinition
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import org.springframework.context.annotation.Bean
import spock.lang.Specification
class ParentApplicationContextSpec extends Specification {
void "test autowire by name beans are able to find beans in parent"() {
given:
def parent = new MicronautApplicationContext()
when:
parent.start()
def child = new AnnotationConfigApplicationContext()
child.setParent(parent)
def definition = new RootBeanDefinition(ChildBean)
definition.setAutowireCandidate(true)
definition.setAutowireMode(AbstractBeanDefinition.AUTOWIRE_BY_NAME)
child.registerBeanDefinition(
"child",
definition
)
child.refresh()
then:
child.getBean("child", ChildBean).myParentBean
child.getBean("child").myParentBean
when:"Accessing a bean that is not there"
child.getBean("notthere")
then:"A no such bean definition exception is thrown"
def e = thrown(NoSuchBeanDefinitionException)
e.message == 'No bean named \'notthere\' available'
when:"Access via the parent"
child.parent.getBean("notthere")
then:"A no such bean definition exception is thrown"
e = thrown(NoSuchBeanDefinitionException)
e.message == 'No bean named \'notthere\' available'
}
void "test autowire by type beans are able to find beans in parent"() {
given:
def parent = new MicronautApplicationContext()
parent.start()
def child = new AnnotationConfigApplicationContext()
child.setParent(parent)
def definition = new RootBeanDefinition(ChildBean)
definition.setAutowireCandidate(true)
definition.setAutowireMode(AbstractBeanDefinition.AUTOWIRE_BY_TYPE)
child.registerBeanDefinition(
"child",
definition
)
child.refresh()
expect:
child.getBean("child", ChildBean).myParentBean
child.getBean("child").myParentBean
}
@Factory
static class MyParentFactory {
@Bean("myParentBean")
MyParentBean myParentBean() {
new MyParentBean()
}
}
static class ChildBean {
MyParentBean myParentBean
}
static class MyParentBean {
}
}
| {
"pile_set_name": "Github"
} |
// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm,freebsd
package unix
const (
sizeofPtr = 0x4
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x4
sizeofLongLong = 0x8
)
type (
_C_short int16
_C_int int32
_C_long int32
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int32
Pad_cgo_0 [4]byte
}
type Timeval struct {
Sec int64
Usec int32
Pad_cgo_0 [4]byte
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int32
Ixrss int32
Idrss int32
Isrss int32
Minflt int32
Majflt int32
Nswap int32
Inblock int32
Oublock int32
Msgsnd int32
Msgrcv int32
Nsignals int32
Nvcsw int32
Nivcsw int32
}
type Rlimit struct {
Cur int64
Max int64
}
type _Gid_t uint32
const (
S_IFMT = 0xf000
S_IFIFO = 0x1000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFBLK = 0x6000
S_IFREG = 0x8000
S_IFLNK = 0xa000
S_IFSOCK = 0xc000
S_ISUID = 0x800
S_ISGID = 0x400
S_ISVTX = 0x200
S_IRUSR = 0x100
S_IWUSR = 0x80
S_IXUSR = 0x40
)
type Stat_t struct {
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Birthtimespec Timespec
}
type Statfs_t struct {
Version uint32
Type uint32
Flags uint64
Bsize uint64
Iosize uint64
Blocks uint64
Bfree uint64
Bavail int64
Files uint64
Ffree int64
Syncwrites uint64
Asyncwrites uint64
Syncreads uint64
Asyncreads uint64
Spare [10]uint64
Namemax uint32
Owner uint32
Fsid Fsid
Charspare [80]int8
Fstypename [16]int8
Mntfromname [88]int8
Mntonname [88]int8
}
type Flock_t struct {
Start int64
Len int64
Pid int32
Type int16
Whence int16
Sysid int32
Pad_cgo_0 [4]byte
}
type Dirent struct {
Fileno uint32
Reclen uint16
Type uint8
Namlen uint8
Name [256]int8
}
type Fsid struct {
Val [2]int32
}
const (
PathMax = 0x400
)
const (
FADV_NORMAL = 0x0
FADV_RANDOM = 0x1
FADV_SEQUENTIAL = 0x2
FADV_WILLNEED = 0x3
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
)
type RawSockaddrInet4 struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]int8
}
type RawSockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Len uint8
Family uint8
Path [104]int8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [46]int8
}
type RawSockaddr struct {
Len uint8
Family uint8
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [92]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint32
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPMreqn struct {
Multiaddr [4]byte /* in_addr */
Address [4]byte /* in_addr */
Ifindex int32
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type Msghdr struct {
Name *byte
Namelen uint32
Iov *Iovec
Iovlen int32
Control *byte
Controllen uint32
Flags int32
}
type Cmsghdr struct {
Len uint32
Level int32
Type int32
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Filt [8]uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x6c
SizeofSockaddrUnix = 0x6a
SizeofSockaddrDatalink = 0x36
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
SizeofIPv6Mreq = 0x14
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
)
const (
PTRACE_TRACEME = 0x0
PTRACE_CONT = 0x7
PTRACE_KILL = 0x8
)
type Kevent_t struct {
Ident uint32
Filter int16
Flags uint16
Fflags uint32
Data int32
Udata *byte
}
type FdSet struct {
X__fds_bits [32]uint32
}
const (
sizeofIfMsghdr = 0xa8
SizeofIfMsghdr = 0x70
sizeofIfData = 0x98
SizeofIfData = 0x60
SizeofIfaMsghdr = 0x14
SizeofIfmaMsghdr = 0x10
SizeofIfAnnounceMsghdr = 0x18
SizeofRtMsghdr = 0x5c
SizeofRtMetrics = 0x38
)
type ifMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Data ifData
}
type IfMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Data IfData
}
type ifData struct {
Type uint8
Physical uint8
Addrlen uint8
Hdrlen uint8
Link_state uint8
Vhid uint8
Datalen uint16
Mtu uint32
Metric uint32
Baudrate uint64
Ipackets uint64
Ierrors uint64
Opackets uint64
Oerrors uint64
Collisions uint64
Ibytes uint64
Obytes uint64
Imcasts uint64
Omcasts uint64
Iqdrops uint64
Oqdrops uint64
Noproto uint64
Hwassist uint64
X__ifi_epoch [8]byte
X__ifi_lastchange [16]byte
}
type IfData struct {
Type uint8
Physical uint8
Addrlen uint8
Hdrlen uint8
Link_state uint8
Spare_char1 uint8
Spare_char2 uint8
Datalen uint8
Mtu uint32
Metric uint32
Baudrate uint32
Ipackets uint32
Ierrors uint32
Opackets uint32
Oerrors uint32
Collisions uint32
Ibytes uint32
Obytes uint32
Imcasts uint32
Omcasts uint32
Iqdrops uint32
Noproto uint32
Hwassist uint32
Pad_cgo_0 [4]byte
Epoch int64
Lastchange Timeval
}
type IfaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Metric int32
}
type IfmaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
}
type IfAnnounceMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Name [16]int8
What uint16
}
type RtMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Pad_cgo_0 [2]byte
Flags int32
Addrs int32
Pid int32
Seq int32
Errno int32
Fmask int32
Inits uint32
Rmx RtMetrics
}
type RtMetrics struct {
Locks uint32
Mtu uint32
Hopcount uint32
Expire uint32
Recvpipe uint32
Sendpipe uint32
Ssthresh uint32
Rtt uint32
Rttvar uint32
Pksent uint32
Weight uint32
Filler [3]uint32
}
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfZbuf = 0xc
SizeofBpfProgram = 0x8
SizeofBpfInsn = 0x8
SizeofBpfHdr = 0x20
SizeofBpfZbufHeader = 0x20
)
type BpfVersion struct {
Major uint16
Minor uint16
}
type BpfStat struct {
Recv uint32
Drop uint32
}
type BpfZbuf struct {
Bufa *byte
Bufb *byte
Buflen uint32
}
type BpfProgram struct {
Len uint32
Insns *BpfInsn
}
type BpfInsn struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type BpfHdr struct {
Tstamp Timeval
Caplen uint32
Datalen uint32
Hdrlen uint16
Pad_cgo_0 [6]byte
}
type BpfZbufHeader struct {
Kernel_gen uint32
Kernel_len uint32
User_gen uint32
X_bzh_pad [5]uint32
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
const (
AT_FDCWD = -0x64
AT_REMOVEDIR = 0x800
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x200
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLERR = 0x8
POLLHUP = 0x10
POLLIN = 0x1
POLLINIGNEOF = 0x2000
POLLNVAL = 0x20
POLLOUT = 0x4
POLLPRI = 0x2
POLLRDBAND = 0x80
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
)
type CapRights struct {
Rights [2]uint64
}
type Utsname struct {
Sysname [256]byte
Nodename [256]byte
Release [256]byte
Version [256]byte
Machine [256]byte
}
| {
"pile_set_name": "Github"
} |
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package currency
import (
"time"
"golang.org/x/text/language"
)
// This file contains code common to gen.go and the package code.
const (
cashShift = 3
roundMask = 0x7
nonTenderBit = 0x8000
)
// currencyInfo contains information about a currency.
// bits 0..2: index into roundings for standard rounding
// bits 3..5: index into roundings for cash rounding
type currencyInfo byte
// roundingType defines the scale (number of fractional decimals) and increments
// in terms of units of size 10^-scale. For example, for scale == 2 and
// increment == 1, the currency is rounded to units of 0.01.
type roundingType struct {
scale, increment uint8
}
// roundings contains rounding data for currencies. This struct is
// created by hand as it is very unlikely to change much.
var roundings = [...]roundingType{
{2, 1}, // default
{0, 1},
{1, 1},
{3, 1},
{4, 1},
{2, 5}, // cash rounding alternative
{2, 50},
}
// regionToCode returns a 16-bit region code. Only two-letter codes are
// supported. (Three-letter codes are not needed.)
func regionToCode(r language.Region) uint16 {
if s := r.String(); len(s) == 2 {
return uint16(s[0])<<8 | uint16(s[1])
}
return 0
}
func toDate(t time.Time) uint32 {
y := t.Year()
if y == 1 {
return 0
}
date := uint32(y) << 4
date |= uint32(t.Month())
date <<= 5
date |= uint32(t.Day())
return date
}
func fromDate(date uint32) time.Time {
return time.Date(int(date>>9), time.Month((date>>5)&0xf), int(date&0x1f), 0, 0, 0, 0, time.UTC)
}
| {
"pile_set_name": "Github"
} |
Filter 1: ON PK Fc 26 Hz Gain 5.9 dB Q 0.69
Filter 2: ON PK Fc 3442 Hz Gain 6.0 dB Q 1.93
Filter 3: ON PK Fc 5203 Hz Gain -7.2 dB Q 4.51
Filter 4: ON PK Fc 8136 Hz Gain -10.9 dB Q 4.00
Filter 5: ON PK Fc 16321 Hz Gain 6.6 dB Q 0.22
Filter 6: ON PK Fc 290 Hz Gain -2.7 dB Q 0.80
Filter 7: ON PK Fc 1426 Hz Gain 2.5 dB Q 0.65
Filter 8: ON PK Fc 1534 Hz Gain -4.1 dB Q 1.60
Filter 9: ON PK Fc 11318 Hz Gain 1.9 dB Q 1.67
Filter 10: ON PK Fc 11565 Hz Gain -1.0 dB Q 0.48 | {
"pile_set_name": "Github"
} |
//===--- llvm-mc-fuzzer.cpp - Fuzzer for the MC layer ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
#include "llvm-c/Target.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCTargetOptionsCommandFlags.def"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/ToolOutputFile.h"
using namespace llvm;
static cl::opt<std::string>
TripleName("triple", cl::desc("Target triple to assemble for, "
"see -version for available targets"));
static cl::opt<std::string>
MCPU("mcpu",
cl::desc("Target a specific cpu type (-mcpu=help for details)"),
cl::value_desc("cpu-name"), cl::init(""));
// This is useful for variable-length instruction sets.
static cl::opt<unsigned> InsnLimit(
"insn-limit",
cl::desc("Limit the number of instructions to process (0 for no limit)"),
cl::value_desc("count"), cl::init(0));
static cl::list<std::string>
MAttrs("mattr", cl::CommaSeparated,
cl::desc("Target specific attributes (-mattr=help for details)"),
cl::value_desc("a1,+a2,-a3,..."));
// The feature string derived from -mattr's values.
std::string FeaturesStr;
static cl::list<std::string>
FuzzerArgs("fuzzer-args", cl::Positional,
cl::desc("Options to pass to the fuzzer"), cl::ZeroOrMore,
cl::PositionalEatsArgs);
static std::vector<char *> ModifiedArgv;
enum OutputFileType {
OFT_Null,
OFT_AssemblyFile,
OFT_ObjectFile
};
static cl::opt<OutputFileType>
FileType("filetype", cl::init(OFT_AssemblyFile),
cl::desc("Choose an output file type:"),
cl::values(
clEnumValN(OFT_AssemblyFile, "asm",
"Emit an assembly ('.s') file"),
clEnumValN(OFT_Null, "null",
"Don't emit anything (for timing purposes)"),
clEnumValN(OFT_ObjectFile, "obj",
"Emit a native object ('.o') file")));
class LLVMFuzzerInputBuffer : public MemoryBuffer
{
public:
LLVMFuzzerInputBuffer(const uint8_t *data_, size_t size_)
: Data(reinterpret_cast<const char *>(data_)),
Size(size_) {
init(Data, Data+Size, false);
}
virtual BufferKind getBufferKind() const {
return MemoryBuffer_Malloc; // it's not disk-backed so I think that's
// the intent ... though AFAIK it
// probably came from an mmap or sbrk
}
private:
const char *Data;
size_t Size;
};
static int AssembleInput(const char *ProgName, const Target *TheTarget,
SourceMgr &SrcMgr, MCContext &Ctx, MCStreamer &Str,
MCAsmInfo &MAI, MCSubtargetInfo &STI,
MCInstrInfo &MCII, MCTargetOptions &MCOptions) {
static const bool NoInitialTextSection = false;
std::unique_ptr<MCAsmParser> Parser(
createMCAsmParser(SrcMgr, Ctx, Str, MAI));
std::unique_ptr<MCTargetAsmParser> TAP(
TheTarget->createMCAsmParser(STI, *Parser, MCII, MCOptions));
if (!TAP) {
errs() << ProgName
<< ": error: this target '" << TripleName
<< "', does not support assembly parsing.\n";
abort();
}
Parser->setTargetParser(*TAP);
return Parser->Run(NoInitialTextSection);
}
int AssembleOneInput(const uint8_t *Data, size_t Size) {
const bool ShowInst = false;
const bool AsmVerbose = false;
const bool UseDwarfDirectory = true;
Triple TheTriple(Triple::normalize(TripleName));
SourceMgr SrcMgr;
std::unique_ptr<MemoryBuffer> BufferPtr(new LLVMFuzzerInputBuffer(Data, Size));
// Tell SrcMgr about this buffer, which is what the parser will pick up.
SrcMgr.AddNewSourceBuffer(std::move(BufferPtr), SMLoc());
static const std::vector<std::string> NoIncludeDirs;
SrcMgr.setIncludeDirs(NoIncludeDirs);
static std::string ArchName;
std::string Error;
const Target *TheTarget = TargetRegistry::lookupTarget(ArchName, TheTriple,
Error);
if (!TheTarget) {
errs() << "error: this target '" << TheTriple.normalize()
<< "/" << ArchName << "', was not found: '" << Error << "'\n";
abort();
}
std::unique_ptr<MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
if (!MRI) {
errs() << "Unable to create target register info!";
abort();
}
std::unique_ptr<MCAsmInfo> MAI(TheTarget->createMCAsmInfo(*MRI, TripleName));
if (!MAI) {
errs() << "Unable to create target asm info!";
abort();
}
MCObjectFileInfo MOFI;
MCContext Ctx(MAI.get(), MRI.get(), &MOFI, &SrcMgr);
static const bool UsePIC = false;
MOFI.InitMCObjectFileInfo(TheTriple, UsePIC, Ctx);
const unsigned OutputAsmVariant = 0;
std::unique_ptr<MCInstrInfo> MCII(TheTarget->createMCInstrInfo());
MCInstPrinter *IP = TheTarget->createMCInstPrinter(Triple(TripleName), OutputAsmVariant,
*MAI, *MCII, *MRI);
if (!IP) {
errs()
<< "error: unable to create instruction printer for target triple '"
<< TheTriple.normalize() << "' with assembly variant "
<< OutputAsmVariant << ".\n";
abort();
}
const char *ProgName = "llvm-mc-fuzzer";
std::unique_ptr<MCSubtargetInfo> STI(
TheTarget->createMCSubtargetInfo(TripleName, MCPU, FeaturesStr));
MCCodeEmitter *CE = nullptr;
MCAsmBackend *MAB = nullptr;
MCTargetOptions MCOptions = InitMCTargetOptionsFromFlags();
std::string OutputString;
raw_string_ostream Out(OutputString);
auto FOut = llvm::make_unique<formatted_raw_ostream>(Out);
std::unique_ptr<MCStreamer> Str;
if (FileType == OFT_AssemblyFile) {
Str.reset(TheTarget->createAsmStreamer(
Ctx, std::move(FOut), AsmVerbose,
UseDwarfDirectory, IP, CE, MAB, ShowInst));
} else {
assert(FileType == OFT_ObjectFile && "Invalid file type!");
std::error_code EC;
const std::string OutputFilename = "-";
auto Out =
llvm::make_unique<ToolOutputFile>(OutputFilename, EC, sys::fs::F_None);
if (EC) {
errs() << EC.message() << '\n';
abort();
}
// Don't waste memory on names of temp labels.
Ctx.setUseNamesOnTempLabels(false);
std::unique_ptr<buffer_ostream> BOS;
raw_pwrite_stream *OS = &Out->os();
if (!Out->os().supportsSeeking()) {
BOS = make_unique<buffer_ostream>(Out->os());
OS = BOS.get();
}
MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, Ctx);
MCAsmBackend *MAB = TheTarget->createMCAsmBackend(*MRI, TripleName, MCPU,
MCOptions);
Str.reset(TheTarget->createMCObjectStreamer(
TheTriple, Ctx, std::unique_ptr<MCAsmBackend>(MAB), *OS,
std::unique_ptr<MCCodeEmitter>(CE), *STI, MCOptions.MCRelaxAll,
MCOptions.MCIncrementalLinkerCompatible,
/*DWARFMustBeAtTheEnd*/ false));
}
const int Res = AssembleInput(ProgName, TheTarget, SrcMgr, Ctx, *Str, *MAI, *STI,
*MCII, MCOptions);
(void) Res;
return 0;
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
return AssembleOneInput(Data, Size);
}
extern "C" LLVM_ATTRIBUTE_USED int LLVMFuzzerInitialize(int *argc,
char ***argv) {
// The command line is unusual compared to other fuzzers due to the need to
// specify the target. Options like -triple, -mcpu, and -mattr work like
// their counterparts in llvm-mc, while -fuzzer-args collects options for the
// fuzzer itself.
//
// Examples:
//
// Fuzz the big-endian MIPS32R6 disassembler using 100,000 inputs of up to
// 4-bytes each and use the contents of ./corpus as the test corpus:
// llvm-mc-fuzzer -triple mips-linux-gnu -mcpu=mips32r6 -disassemble \
// -fuzzer-args -max_len=4 -runs=100000 ./corpus
//
// Infinitely fuzz the little-endian MIPS64R2 disassembler with the MSA
// feature enabled using up to 64-byte inputs:
// llvm-mc-fuzzer -triple mipsel-linux-gnu -mcpu=mips64r2 -mattr=msa \
// -disassemble -fuzzer-args ./corpus
//
// If your aim is to find instructions that are not tested, then it is
// advisable to constrain the maximum input size to a single instruction
// using -max_len as in the first example. This results in a test corpus of
// individual instructions that test unique paths. Without this constraint,
// there will be considerable redundancy in the corpus.
char **OriginalArgv = *argv;
LLVMInitializeAllTargetInfos();
LLVMInitializeAllTargetMCs();
LLVMInitializeAllAsmParsers();
cl::ParseCommandLineOptions(*argc, OriginalArgv);
// Rebuild the argv without the arguments llvm-mc-fuzzer consumed so that
// the driver can parse its arguments.
//
// FuzzerArgs cannot provide the non-const pointer that OriginalArgv needs.
// Re-use the strings from OriginalArgv instead of copying FuzzerArg to a
// non-const buffer to avoid the need to clean up when the fuzzer terminates.
ModifiedArgv.push_back(OriginalArgv[0]);
for (const auto &FuzzerArg : FuzzerArgs) {
for (int i = 1; i < *argc; ++i) {
if (FuzzerArg == OriginalArgv[i])
ModifiedArgv.push_back(OriginalArgv[i]);
}
}
*argc = ModifiedArgv.size();
*argv = ModifiedArgv.data();
// Package up features to be passed to target/subtarget
// We have to pass it via a global since the callback doesn't
// permit any user data.
if (MAttrs.size()) {
SubtargetFeatures Features;
for (unsigned i = 0; i != MAttrs.size(); ++i)
Features.AddFeature(MAttrs[i]);
FeaturesStr = Features.getString();
}
if (TripleName.empty())
TripleName = sys::getDefaultTargetTriple();
return 0;
}
| {
"pile_set_name": "Github"
} |
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
struct packv_params_s
{
uint64_t size
packv_var_oft* var_func;
bszid_t bmid;
pack_t pack_schema;
};
typedef struct packv_params_s packv_params_t;
#define bli_cntl_packv_params_var_func( cntl ) \
\
( (packv_params_t*)( cntl->params )->var_func )
#define bli_cntl_packv_params_bmid( cntl ) \
\
( (packv_params_t*)( cntl->params )->bmid_m )
#define bli_cntl_packv_params_pack_schema( cntl ) \
\
( (packv_params_t*)( cntl->params )->pack_schema )
// -----------------------------------------------------------------------------
cntl_t* bli_packv_cntl_obj_create
(
void_fp var_func,
void_fp packv_var_func,
bszid_t bmid,
pack_t pack_schema,
cntl_t* sub_node
);
| {
"pile_set_name": "Github"
} |
// Returns global object of a current environment.
export default (() => {
if (typeof global !== 'undefined' && global.Math === Math) {
return global;
}
if (typeof self !== 'undefined' && self.Math === Math) {
return self;
}
if (typeof window !== 'undefined' && window.Math === Math) {
return window;
}
// eslint-disable-next-line no-new-func
return Function('return this')();
})();
| {
"pile_set_name": "Github"
} |
comment "linux-fusion needs a Linux kernel to be built"
depends on !BR2_LINUX_KERNEL
config BR2_PACKAGE_LINUX_FUSION
depends on BR2_LINUX_KERNEL
bool "linux-fusion communication layer for DirectFB multi"
help
DirectFB Communication Layer allowing multiple DirectFB
applications to run concurrently
| {
"pile_set_name": "Github"
} |
/********************************************************************
* COPYRIGHT:
* Copyright (c) 1997-2009, International Business Machines Corporation and
* others. All Rights Reserved.
********************************************************************/
/********************************************************************************
*
* File CDETST.C
*
* Modification History:
* Name Description
* Madhu Katragadda Ported for C API
*********************************************************************************/
/**
* CollationGermanTest is a third level test class. This tests the locale
* specific primary, secondary and tertiary rules. For example, the ignorable
* character '-' in string "black-bird". The en_US locale uses the default
* collation rules as its sorting sequence.
*/
#include <stdlib.h>
#include "unicode/utypes.h"
#if !UCONFIG_NO_COLLATION
#include "unicode/ucol.h"
#include "unicode/uloc.h"
#include "cintltst.h"
#include "cdetst.h"
#include "ccolltst.h"
#include "callcoll.h"
#include "unicode/ustring.h"
#include "string.h"
static UCollator *myCollation;
const static UChar testSourceCases[][MAX_TOKEN_LEN] =
{
{0x0047/*'G'*/, 0x0072/*'r'*/, 0x00F6, 0x00DF, 0x0065/*'e'*/, 0x0000},
{0x0061/*'a'*/, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000},
{0x0054/*'T'*/, 0x00F6, 0x006e/*'n'*/, 0x0065/*'e'*/, 0x0000},
{0x0054/*'T'*/, 0x00F6, 0x006e/*'n'*/, 0x0065/*'e'*/, 0x0000},
{0x0054/*'T'*/, 0x00F6, 0x006e/*'n'*/, 0x0065/*'e'*/, 0x0000},
{0x0061/*'a'*/, 0x0308, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000},
{0x00E4, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000}, /*doubt in primary here */
{0x00E4, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000}, /*doubt in primary here*/
{0x0053/*'S'*/, 0x0074/*'t'*/, 0x0072/*'r'*/, 0x0061/*'a'*/, 0x00DF, 0x0065/*'e'*/, 0x0000},
{0x0065/*'e'*/, 0x0066/*'f'*/, 0x0067/*'g'*/, 0x0000},
{0x00E4, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000}, /*doubt in primary here*/
{0x0053/*'S'*/, 0x0074/*'t'*/, 0x0072/*'r'*/, 0x0061/*'a'*/, 0x00DF, 0x0065/*'e'*/, 0x0000}
};
const static UChar testTargetCases[][MAX_TOKEN_LEN] =
{
{0x0047/*'G'*/, 0x0072/*'r'*/, 0x006f/*'o'*/, 0x0073/*'s'*/, 0x0073/*'s'*/, 0x0069/*'i'*/, 0x0073/*'s'*/, 0x0074/*'t'*/, 0x0000},
{0x0061/*'a'*/, 0x0308, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000},
{0x0054/*'T'*/, 0x006f/*'o'*/, 0x006e/*'n'*/, 0x0000},
{0x0054/*'T'*/, 0x006f/*'o'*/, 0x0064/*'d'*/, 0x0000},
{0x0054/*'T'*/, 0x006f/*'o'*/, 0x0066/*'f'*/, 0x0075/*'u'*/, 0x0000},
{0x0041/*'A'*/, 0x0308, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000},
{0x0061/*'a'*/, 0x0308, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000}, /*doubt in primary here*/
{0x0061/*'a'*/, 0x0065/*'e'*/, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000}, /*doubt in primary here*/
{0x0053/*'S'*/, 0x0074/*'t'*/, 0x0072/*'r'*/, 0x0061/*'a'*/, 0x0073/*'s'*/, 0x0073/*'s'*/, 0x0065/*'e'*/, 0x0000},
{0x0065/*'e'*/, 0x0066/*'f'*/, 0x0067/*'g'*/, 0x0000},
{0x0061/*'a'*/, 0x0065/*'e'*/, 0x0062/*'b'*/, 0x0063/*'c'*/, 0x0000}, /*doubt in primary here*/
{0x0053/*'S'*/, 0x0074/*'t'*/, 0x0072/*'r'*/, 0x0061/*'a'*/, 0x0073/*'s'*/, 0x0073/*'s'*/, 0x0065/*'e'*/, 0x0000}
};
const static UCollationResult results[][2] =
{
/* Primary*/ /* Tertiary*/
{ UCOL_LESS, UCOL_LESS }, /*should be UCOL_GREATER for primary*/
{ UCOL_EQUAL, UCOL_LESS },
{ UCOL_GREATER, UCOL_GREATER },
{ UCOL_GREATER, UCOL_GREATER },
{ UCOL_GREATER, UCOL_GREATER },
{ UCOL_EQUAL, UCOL_LESS },
{ UCOL_EQUAL, UCOL_EQUAL }, /*should be UCOL_GREATER for primary*/
{ UCOL_LESS, UCOL_LESS }, /*should be UCOL_GREATER for primary*/
{ UCOL_EQUAL, UCOL_GREATER },
{ UCOL_EQUAL, UCOL_EQUAL },
{ UCOL_LESS, UCOL_LESS }, /*should be UCOL_GREATER for primary*/
{ UCOL_EQUAL, UCOL_GREATER }
};
void addGermanCollTest(TestNode** root)
{
addTest(root, &TestTertiary, "tscoll/cdetst/TestTertiary");
addTest(root, &TestPrimary, "tscoll/cdetst/TestPrimary");
}
static void TestTertiary( )
{
int32_t i;
UErrorCode status = U_ZERO_ERROR;
myCollation = ucol_open("de_DE", &status);
if(U_FAILURE(status)){
log_err_status(status, "ERROR: in creation of rule based collator: %s\n", myErrorName(status));
return;
}
log_verbose("Testing German Collation with Tertiary strength\n");
ucol_setAttribute(myCollation, UCOL_NORMALIZATION_MODE, UCOL_ON, &status);
ucol_setStrength(myCollation, UCOL_TERTIARY);
for (i = 0; i < 12 ; i++)
{
doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i][1]);
}
ucol_close(myCollation);
}
static void TestPrimary()
{
int32_t i;
UErrorCode status = U_ZERO_ERROR;
myCollation = ucol_open("de_DE", &status);
if(U_FAILURE(status)){
log_err_status(status, "ERROR: %s: in creation of rule based collator: %s\n", __FILE__, myErrorName(status));
return;
}
log_verbose("Testing German Collation with primary strength\n");
ucol_setStrength(myCollation, UCOL_PRIMARY);
for (i = 0; i < 12 ; i++)
{
doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i][0]);
}
ucol_close(myCollation);
}
#endif /* #if !UCONFIG_NO_COLLATION */
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import urllib
import urlparse
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from cStringIO import StringIO
from cookielib import CookieJar
from urllib2 import Request as U2Request
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri
from werkzeug.wsgi import get_host, get_current_url
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [StringIO(), 0, False]
if use_tempfile:
def write(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write = _closure[0].write
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in values.iterlists():
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write(chunk)
else:
if isinstance(value, unicode):
value = value.encode(charset)
write('\r\n\r\n' + value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = ', '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in data.iterlists():
for value in values:
yield key, value
else:
for key, values in data.iteritems():
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
if query_string is None and '?' in path:
path, query_string = path.split('?', 1)
self.charset = charset
if isinstance(path, unicode):
path = iri_to_uri(path, charset)
self.path = path
if base_url is not None:
if isinstance(base_url, unicode):
base_url = iri_to_uri(base_url, charset)
else:
base_url = url_fix(base_url, charset)
self.base_url = base_url
if isinstance(query_string, basestring):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, basestring):
self.input_stream = StringIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
args = v
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return urlparse.urlunsplit((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
scheme = 'http'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
self.close()
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = self.files.itervalues()
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception, e:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
values = url_encode(self.form, charset=self.charset)
content_length = len(values)
input_stream = StringIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
if isinstance(x, unicode):
x = x.encode(self.charset)
return urllib.unquote(x)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': self.query_string,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_list(self.charset):
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True):
self.application = application
if response_wrapper is None:
response_wrapper = lambda a, s, h: (a, s, h)
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.redirect_client = None
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
# handle redirects
redirect_chain = []
status_code = int(rv[1].split(None, 1)[0])
while status_code in (301, 302, 303, 305, 307) and follow_redirects:
if not self.redirect_client:
# assume that we're not using the user defined response wrapper
# so that we don't need any ugly hacks to get the status
# code from the response.
self.redirect_client = Client(self.application)
self.redirect_client.cookie_jar = self.cookie_jar
redirect = dict(rv[2])['Location']
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(redirect)
base_url = urlparse.urlunsplit((scheme, netloc, '', '', '')).rstrip('/') + '/'
host = get_host(create_environ('/', base_url, query_string=qs)).split(':', 1)[0]
if get_host(environ).split(':', 1)[0] != host:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
redirect_chain.append((redirect, status_code))
# the redirect request should be a new request, and not be based on
# the old request
redirect_kwargs = {}
redirect_kwargs.update({
'path': script_root,
'base_url': base_url,
'query_string': qs,
'as_tuple': True,
'buffered': buffered,
'follow_redirects': False,
})
environ, rv = self.redirect_client.open(**redirect_kwargs)
status_code = int(rv[1].split(None, 1)[0])
# Prevent loops
if redirect_chain[-1] in redirect_chain[0:-1]:
raise ClientRedirectError("loop detected")
response = self.response_wrapper(*rv)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(app_iter.next())
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], response[1]
from werkzeug.wsgi import ClosingIterator
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017 Magnus Madsen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Console {
///
/// Alias for `Console.StdIn.readLine`.
///
pub def readLine(): Option[String] & Impure = Console/StdIn.readLine(())
///
/// Alias for `Console.StdOut.print`.
///
pub def print(s: String): Unit & Impure = Console/StdOut.print(s)
///
/// Alias for `Console.StdOut.printLine`.
///
pub def printLine(s: String): Unit & Impure = Console/StdOut.printLine(s)
///
/// Operations on the Standard Input Stream.
///
namespace StdIn {
///
/// Returns the next line read from the standard input stream.
///
pub def readLine(): Option[String] & Impure =
import flix.runtime.library.Console:readLine();
let s = readLine();
(if (Object.isNull(s)) None else Some(s))
}
///
/// Operations on the Standard Output Stream.
///
namespace StdOut {
///
/// Prints the given string `s` to the standard output stream.
///
pub def print(s: String): Unit & Impure =
import java.io.PrintStream.print(String);
getOutputStream().print(s)
///
/// Prints the given string `s` and a newline to the standard output stream.
///
pub def printLine(s: String): Unit & Impure =
import java.io.PrintStream.println(String);
getOutputStream().println(s)
///
/// Prints a newline to the standard output stream.
///
pub def newLine(): Unit & Impure =
import java.io.PrintStream.println();
getOutputStream().println()
///
/// Flushes the standard output stream.
///
pub def flush(): Unit & Impure =
import java.io.PrintStream.flush();
getOutputStream().flush()
///
/// Returns the standard output stream.
///
def getOutputStream(): ##java.io.PrintStream & Impure =
import get java.lang.System:out as getOut;
getOut()
}
///
/// Operations on the Standard Error Stream.
///
namespace StdErr {
///
/// Prints the given string `s` to the standard error stream.
///
pub def print(s: String): Unit & Impure =
import java.io.PrintStream.print(String);
getErrorStream().print(s)
///
/// Prints the given string `s` and a newline to the standard error stream.
///
pub def printLine(s: String): Unit & Impure =
import java.io.PrintStream.println(String);
getErrorStream().println(s)
///
/// Prints a newline to the standard error stream.
///
pub def newLine(): Unit & Impure =
import java.io.PrintStream.println();
getErrorStream().println()
///
/// Flushes the standard error stream.
///
pub def flush(): Unit & Impure =
import java.io.PrintStream.flush();
getErrorStream().flush()
///
/// Returns the standard error stream.
///
def getErrorStream(): ##java.io.PrintStream & Impure =
import get java.lang.System:err as getErr;
getErr()
}
///
/// An enum that represents the colors supported by a terminal.
///
pub enum ColorSupport {
///
/// The terminal does not support any colors.
///
case None,
///
/// The terminal supports ANSI colors.
///
case Ansi,
///
/// The terminal supports 24-bit colors.
///
case TrueColor
}
namespace Color {
///
/// Returns the string `s` formatted with the default blue color.
///
pub def blue(s: String): String =
use Console/Color.Default.Blue;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(Blue |> toRGB, s)
///
/// Returns the string `s` formatted with the default cyan color.
///
pub def cyan(s: String): String =
use Console/Color.Default.Cyan;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(Cyan |> toRGB, s)
///
/// Returns the string `s` formatted with the default green color.
///
pub def green(s: String): String =
use Console/Color.Default.Green;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(Green |> toRGB, s)
///
/// Returns the string `s` formatted with the default magenta color.
///
pub def magenta(s: String): String =
use Console/Color.Default.Magenta;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(Magenta |> toRGB, s)
///
/// Returns the string `s` formatted with the default red color.
///
pub def red(s: String): String =
use Console/Color.Default.Red;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(Red |> toRGB, s)
///
/// Returns the string `s` formatted with the default yellow color.
///
pub def yellow(s: String): String =
use Console/Color.Default.Yellow;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(Yellow |> toRGB, s)
///
/// Returns the string `s` formatted with the default white color.
///
pub def white(s: String): String =
use Console/Color.Default.White;
use Console/Color/Default.toRGB;
use Console.rgb;
rgb(White |> toRGB, s)
///
/// Default Colors.
///
pub enum Default {
case Blue
case Cyan
case Green
case Magenta
case Red
case Yellow
case White
}
///
/// Namespace for Default Colors.
///
namespace Default {
///
/// Returns the RGB-triplet for the given color `c`.
///
pub def toRGB(c: Console/Color.Default): (Int32, Int32, Int32) =
use Console/Color.Default;
match c {
case Default.Blue => (113, 190, 242)
case Default.Cyan => (102, 194, 205)
case Default.Green => (168, 204, 140)
case Default.Magenta => (210, 144, 228)
case Default.Red => (232, 131, 136)
case Default.Yellow => (219, 171, 121)
case Default.White => (185, 191, 202)
}
}
///
/// Ansi Colors.
///
pub enum Ansi {
case Black
case Red
case Green
case Yellow
case Blue
case Magenta
case Cyan
case White
case BrightBlack
case BrightRed
case BrightGreen
case BrightYellow
case BrightBlue
case BrightMagenta
case BrightCyan
case BrightWhite
}
///
/// Namespace for Ansi Colors.
///
namespace Ansi {
///
/// Down sample the given color `c` to an ANSI color code.
///
pub def downsample(c: (Int32, Int32, Int32)): Int32 =
use Console/Color.Ansi;
let candidates = Ansi.Black :: Ansi.Red :: Ansi.Green :: Ansi.Yellow :: Ansi.Blue :: Ansi.Magenta ::
Ansi.Cyan :: Ansi.White :: Ansi.BrightBlack :: Ansi.BrightRed :: Ansi.BrightGreen ::
Ansi.BrightYellow :: Ansi.BrightBlue :: Ansi.BrightMagenta :: Ansi.BrightCyan ::
Ansi.BrightWhite :: Nil;
nearest(c, Ansi.Black, Int32.maxValue(), candidates) |> toFG
///
/// Returns the ANSI color nearest to the given `target` color from the given list of `candidates`.
///
def nearest(target: (Int32, Int32, Int32), bestCandidate: Console/Color.Ansi, bestDistance: Int32, candidates: List[Console/Color.Ansi]): Console/Color.Ansi =
match candidates {
case Nil => bestCandidate
case currentCandidate :: remainingCandidates =>
let currentDistance = distanceSquared(target, toRGB(currentCandidate));
if (currentDistance < bestDistance)
nearest(target, currentCandidate, currentDistance, remainingCandidates)
else
nearest(target, bestCandidate, bestDistance, remainingCandidates)
}
///
/// Returns the square of the distance between the two colors `c1` and `c2`.
///
def distanceSquared(c1: (Int32, Int32, Int32), c2: (Int32, Int32, Int32)): Int32 =
let (r1, g1, b1) = c1;
let (r2, g2, b2) = c2;
(r1 - r2) * (r1 - r2) + (g1 - g2) * (g1 - g2) + (b1 - b2) * (b1 - b2)
///
/// Returns the foreground color of the given color `c`.
///
pub def toFG(c: Console/Color.Ansi): Int32 =
use Console/Color.Ansi;
match c {
case Ansi.Black => 30
case Ansi.Red => 31
case Ansi.Green => 32
case Ansi.Yellow => 33
case Ansi.Blue => 34
case Ansi.Magenta => 35
case Ansi.Cyan => 36
case Ansi.White => 37
case Ansi.BrightBlack => 90
case Ansi.BrightRed => 91
case Ansi.BrightGreen => 92
case Ansi.BrightYellow => 93
case Ansi.BrightBlue => 94
case Ansi.BrightMagenta => 95
case Ansi.BrightCyan => 96
case Ansi.BrightWhite => 97
}
///
/// Returns the RGB-triplet for the given color `c`.
///
/// Based on the Ubuntu settings.
///
/// See https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
///
pub def toRGB(c: Console/Color.Ansi): (Int32, Int32, Int32) =
use Console/Color.Ansi;
match c {
case Ansi.Black => ( 1, 1, 1)
case Ansi.Red => (222, 56, 43)
case Ansi.Green => ( 57, 181, 74)
case Ansi.Yellow => (255, 199, 6)
case Ansi.Blue => ( 0, 111, 184)
case Ansi.Magenta => (118, 38, 113)
case Ansi.Cyan => ( 44, 181, 233)
case Ansi.White => (204, 204, 204)
case Ansi.BrightBlack => (128, 128, 128)
case Ansi.BrightRed => (255, 0, 0)
case Ansi.BrightGreen => ( 0, 255, 0)
case Ansi.BrightYellow => (255, 255, 0)
case Ansi.BrightBlue => ( 0, 0, 255)
case Ansi.BrightMagenta => (255, 0, 255)
case Ansi.BrightCyan => ( 0, 255, 255)
case Ansi.BrightWhite => (255, 255, 255)
}
}
///
/// Web Colors.
///
pub enum Web {
case AliceBlue
case AntiqueWhite
case Aqua
case Aquamarine
case Azure
case Beige
case Bisque
case Black
case BlanchedAlmond
case Blue
case BlueViolet
case Brown
case BurlyWood
case CadetBlue
case Chartreuse
case Chocolate
case Coral
case CornflowerBlue
case Cornsilk
case Crimson
case Cyan
case DarkBlue
case DarkCyan
case DarkGoldenRod
case DarkGray
case DarkGrey
case DarkGreen
case DarkKhaki
case DarkMagenta
case DarkOliveGreen
case DarkOrange
case DarkOrchid
case DarkRed
case DarkSalmon
case DarkSeaGreen
case DarkSlateBlue
case DarkSlateGray
case DarkSlateGrey
case DarkTurquoise
case DarkViolet
case DeepPink
case DeepSkyBlue
case DimGray
case DimGrey
case DodgerBlue
case FireBrick
case FloralWhite
case ForestGreen
case Fuchsia
case Gainsboro
case GhostWhite
case Gold
case GoldenRod
case Gray
case Grey
case Green
case GreenYellow
case HoneyDew
case HotPink
case IndianRed
case Indigo
case Ivory
case Khaki
case Lavender
case LavenderBlush
case LawnGreen
case LemonChiffon
case LightBlue
case LightCoral
case LightCyan
case LightGoldenRodYellow
case LightGray
case LightGrey
case LightGreen
case LightPink
case LightSalmon
case LightSeaGreen
case LightSkyBlue
case LightSlateGray
case LightSlateGrey
case LightSteelBlue
case LightYellow
case Lime
case LimeGreen
case Linen
case Magenta
case Maroon
case MediumAquaMarine
case MediumBlue
case MediumOrchid
case MediumPurple
case MediumSeaGreen
case MediumSlateBlue
case MediumSpringGreen
case MediumTurquoise
case MediumVioletRed
case MidnightBlue
case MintCream
case MistyRose
case Moccasin
case NavajoWhite
case Navy
case OldLace
case Olive
case OliveDrab
case Orange
case OrangeRed
case Orchid
case PaleGoldenRod
case PaleGreen
case PaleTurquoise
case PaleVioletRed
case PapayaWhip
case PeachPuff
case Peru
case Pink
case Plum
case PowderBlue
case Purple
case RebeccaPurple
case Red
case RosyBrown
case RoyalBlue
case SaddleBrown
case Salmon
case SandyBrown
case SeaGreen
case SeaShell
case Sienna
case Silver
case SkyBlue
case SlateBlue
case SlateGray
case SlateGrey
case Snow
case SpringGreen
case SteelBlue
case Tan
case Teal
case Thistle
case Tomato
case Turquoise
case Violet
case Wheat
case White
case WhiteSmoke
case Yellow
case YellowGreen
}
///
/// Namespace for Web Colors.
///
namespace Web {
///
/// Returns the RGB-triplet for the given color `c`.
///
pub def toRGB(c: Console/Color.Web): (Int32, Int32, Int32) =
use Console/Color.Web;
match c {
case Web.AliceBlue => (240, 248, 255)
case Web.AntiqueWhite => (250, 235, 215)
case Web.Aqua => ( 0, 255, 255)
case Web.Aquamarine => (127, 255, 212)
case Web.Azure => (240, 255, 255)
case Web.Beige => (245, 245, 220)
case Web.Bisque => (255, 228, 196)
case Web.Black => ( 0, 0, 0)
case Web.BlanchedAlmond => (255, 235, 205)
case Web.Blue => ( 0, 0, 255)
case Web.BlueViolet => (138, 43, 226)
case Web.Brown => (165, 42, 42)
case Web.BurlyWood => (222, 184, 135)
case Web.CadetBlue => ( 95, 158, 160)
case Web.Chartreuse => (127, 255, 0)
case Web.Chocolate => (210, 105, 30)
case Web.Coral => (255, 127, 80)
case Web.CornflowerBlue => (100, 149, 237)
case Web.Cornsilk => (255, 248, 220)
case Web.Crimson => (220, 20, 60)
case Web.Cyan => ( 0, 255, 255)
case Web.DarkBlue => ( 0, 0, 139)
case Web.DarkCyan => ( 0, 139, 139)
case Web.DarkGoldenRod => (184, 134, 11)
case Web.DarkGray => (169, 169, 169)
case Web.DarkGrey => (169, 169, 169)
case Web.DarkGreen => ( 0, 100, 0)
case Web.DarkKhaki => (189, 183, 107)
case Web.DarkMagenta => (139, 0, 139)
case Web.DarkOliveGreen => ( 85, 107, 47)
case Web.DarkOrange => (255, 140, 0)
case Web.DarkOrchid => (153, 50, 204)
case Web.DarkRed => (139, 0, 0)
case Web.DarkSalmon => (233, 150, 122)
case Web.DarkSeaGreen => (143, 188, 143)
case Web.DarkSlateBlue => ( 72, 61, 139)
case Web.DarkSlateGray => ( 47, 79, 79)
case Web.DarkSlateGrey => ( 47, 79, 79)
case Web.DarkTurquoise => ( 0, 206, 209)
case Web.DarkViolet => (148, 0, 211)
case Web.DeepPink => (255, 20, 147)
case Web.DeepSkyBlue => ( 0, 191, 255)
case Web.DimGray => (105, 105, 105)
case Web.DimGrey => (105, 105, 105)
case Web.DodgerBlue => ( 30, 144, 255)
case Web.FireBrick => (178, 34, 34)
case Web.FloralWhite => (255, 250, 240)
case Web.ForestGreen => ( 34, 139, 34)
case Web.Fuchsia => (255, 0, 255)
case Web.Gainsboro => (220, 220, 220)
case Web.GhostWhite => (248, 248, 255)
case Web.Gold => (255, 215, 0)
case Web.GoldenRod => (218, 165, 32)
case Web.Gray => (128, 128, 128)
case Web.Grey => (128, 128, 128)
case Web.Green => ( 0, 128, 0)
case Web.GreenYellow => (173, 255, 47)
case Web.HoneyDew => (240, 255, 240)
case Web.HotPink => (255, 105, 180)
case Web.IndianRed => (205, 92, 92)
case Web.Indigo => ( 75, 0, 130)
case Web.Ivory => (255, 255, 240)
case Web.Khaki => (240, 230, 140)
case Web.Lavender => (230, 230, 250)
case Web.LavenderBlush => (255, 240, 245)
case Web.LawnGreen => (124, 252, 0)
case Web.LemonChiffon => (255, 250, 205)
case Web.LightBlue => (173, 216, 230)
case Web.LightCoral => (240, 128, 128)
case Web.LightCyan => (224, 255, 255)
case Web.LightGoldenRodYellow => (250, 250, 210)
case Web.LightGray => (211, 211, 211)
case Web.LightGrey => (211, 211, 211)
case Web.LightGreen => (144, 238, 144)
case Web.LightPink => (255, 182, 193)
case Web.LightSalmon => (255, 160, 122)
case Web.LightSeaGreen => ( 32, 178, 170)
case Web.LightSkyBlue => (135, 206, 250)
case Web.LightSlateGray => (119, 136, 153)
case Web.LightSlateGrey => (119, 136, 153)
case Web.LightSteelBlue => (176, 196, 222)
case Web.LightYellow => (255, 255, 224)
case Web.Lime => ( 0, 255, 0)
case Web.LimeGreen => ( 50, 205, 50)
case Web.Linen => (250, 240, 230)
case Web.Magenta => (255, 0, 255)
case Web.Maroon => (128, 0, 0)
case Web.MediumAquaMarine => (102, 205, 170)
case Web.MediumBlue => ( 0, 0, 205)
case Web.MediumOrchid => (186, 85, 211)
case Web.MediumPurple => (147, 112, 219)
case Web.MediumSeaGreen => ( 60, 179, 113)
case Web.MediumSlateBlue => (123, 104, 238)
case Web.MediumSpringGreen => ( 0, 250, 154)
case Web.MediumTurquoise => ( 72, 209, 204)
case Web.MediumVioletRed => (199, 21, 133)
case Web.MidnightBlue => ( 25, 25, 112)
case Web.MintCream => (245, 255, 250)
case Web.MistyRose => (255, 228, 225)
case Web.Moccasin => (255, 228, 181)
case Web.NavajoWhite => (255, 222, 173)
case Web.Navy => ( 0, 0, 128)
case Web.OldLace => (253, 245, 230)
case Web.Olive => (128, 128, 0)
case Web.OliveDrab => (107, 142, 35)
case Web.Orange => (255, 165, 0)
case Web.OrangeRed => (255, 69, 0)
case Web.Orchid => (218, 112, 214)
case Web.PaleGoldenRod => (238, 232, 170)
case Web.PaleGreen => (152, 251, 152)
case Web.PaleTurquoise => (175, 238, 238)
case Web.PaleVioletRed => (219, 112, 147)
case Web.PapayaWhip => (255, 239, 213)
case Web.PeachPuff => (255, 218, 185)
case Web.Peru => (205, 133, 63)
case Web.Pink => (255, 192, 203)
case Web.Plum => (221, 160, 221)
case Web.PowderBlue => (176, 224, 230)
case Web.Purple => (128, 0, 128)
case Web.RebeccaPurple => (102, 51, 153)
case Web.Red => (255, 0, 0)
case Web.RosyBrown => (188, 143, 143)
case Web.RoyalBlue => ( 65, 105, 225)
case Web.SaddleBrown => (139, 69, 19)
case Web.Salmon => (250, 128, 114)
case Web.SandyBrown => (244, 164, 96)
case Web.SeaGreen => ( 46, 139, 87)
case Web.SeaShell => (255, 245, 238)
case Web.Sienna => (160, 82, 45)
case Web.Silver => (192, 192, 192)
case Web.SkyBlue => (135, 206, 235)
case Web.SlateBlue => (106, 90, 205)
case Web.SlateGray => (112, 128, 144)
case Web.SlateGrey => (112, 128, 144)
case Web.Snow => (255, 250, 250)
case Web.SpringGreen => ( 0, 255, 127)
case Web.SteelBlue => ( 70, 130, 180)
case Web.Tan => (210, 180, 140)
case Web.Teal => ( 0, 128, 128)
case Web.Thistle => (216, 191, 216)
case Web.Tomato => (255, 99, 71)
case Web.Turquoise => ( 64, 224, 208)
case Web.Violet => (238, 130, 238)
case Web.Wheat => (245, 222, 179)
case Web.White => (255, 255, 255)
case Web.WhiteSmoke => (245, 245, 245)
case Web.Yellow => (255, 255, 0)
case Web.YellowGreen => (154, 205, 50)
}
}
}
///
/// Returns the string `s` formatted with the given RGB color `c`.
///
pub def rgb(c: (Int32, Int32 , Int32), s: String): String =
match colorSupport() {
case ColorSupport.None => s
case ColorSupport.Ansi =>
use Console/Color/Ansi.downsample;
let fg = downsample(c);
formatAnsi(fg, s)
case ColorSupport.TrueColor => formatTrueColor(c, s)
}
///
/// Returns the string `s` formatted with the given ANSI color `fg`.
///
def formatAnsi(fg: Int32, s: String): String =
let esc = Console.escape();
esc + "[" + Int32.toString(fg) + "m" + s + esc + "[0m"
///
/// Returns the string `s` formatted with the given RGB color `c`.
///
def formatTrueColor(c: (Int32, Int32, Int32), s: String): String =
let (r, g, b) = c;
let esc = Console.escape();
let red = Int32.toString(r);
let gre = Int32.toString(g);
let blu = Int32.toString(b);
esc + "[38;2;" + red + ";" + gre + ";" + blu + "m" + s + esc + "[0m"
///
/// Returns the escape character as a string.
///
def escape(): String = Char.toString('\u001b')
///
/// Returns the color support of the current terminal.
///
def colorSupport(): ColorSupport =
if (isTrueColorTerminal() or isWindowsTerminal())
ColorSupport.TrueColor
else if (isAnsiTerminal())
ColorSupport.Ansi
else
ColorSupport.None
///
/// Returns `true` if the terminal supports ANSI colors.
///
def isAnsiTerminal(): Bool =
import java.lang.System:getenv(String);
let p = getenv("TERM") as & Pure;
if (Object.isNull(p))
false
else {
let keywords = "256" :: "ansi" :: "xterm" :: "screen" :: Nil;
List.exists(s -> String.contains(p, s), keywords)
}
///
/// Returns `true` if the terminal supports 24-bit colors.
///
def isTrueColorTerminal(): Bool =
import java.lang.System:getenv(String);
let p = getenv("COLORTERM") as & Pure;
if (Object.isNull(p))
false
else
p == "truecolor"
///
/// Returns `true` if the terminal is the Windows Terminal.
///
def isWindowsTerminal(): Bool =
import java.lang.System:getenv(String);
let p = getenv("WT_SESSION") as & Pure;
not Object.isNull(p)
}
| {
"pile_set_name": "Github"
} |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package math
func log10TrampolineSetup(x float64) float64
func log10Asm(x float64) float64
func cosTrampolineSetup(x float64) float64
func cosAsm(x float64) float64
func coshTrampolineSetup(x float64) float64
func coshAsm(x float64) float64
func sinTrampolineSetup(x float64) float64
func sinAsm(x float64) float64
func sinhTrampolineSetup(x float64) float64
func sinhAsm(x float64) float64
func tanhTrampolineSetup(x float64) float64
func tanhAsm(x float64) float64
func log1pTrampolineSetup(x float64) float64
func log1pAsm(x float64) float64
func atanhTrampolineSetup(x float64) float64
func atanhAsm(x float64) float64
func acosTrampolineSetup(x float64) float64
func acosAsm(x float64) float64
func acoshTrampolineSetup(x float64) float64
func acoshAsm(x float64) float64
func asinTrampolineSetup(x float64) float64
func asinAsm(x float64) float64
func asinhTrampolineSetup(x float64) float64
func asinhAsm(x float64) float64
func erfTrampolineSetup(x float64) float64
func erfAsm(x float64) float64
func erfcTrampolineSetup(x float64) float64
func erfcAsm(x float64) float64
func atanTrampolineSetup(x float64) float64
func atanAsm(x float64) float64
func atan2TrampolineSetup(x, y float64) float64
func atan2Asm(x, y float64) float64
func cbrtTrampolineSetup(x float64) float64
func cbrtAsm(x float64) float64
func logTrampolineSetup(x float64) float64
func logAsm(x float64) float64
func tanTrampolineSetup(x float64) float64
func tanAsm(x float64) float64
func expTrampolineSetup(x float64) float64
func expAsm(x float64) float64
func expm1TrampolineSetup(x float64) float64
func expm1Asm(x float64) float64
func powTrampolineSetup(x, y float64) float64
func powAsm(x, y float64) float64
// hasVectorFacility reports whether the machine has the z/Architecture
// vector facility installed and enabled.
func hasVectorFacility() bool
var hasVX = hasVectorFacility()
| {
"pile_set_name": "Github"
} |
// REQUIRES: arm
// RUN: llvm-mc -filetype=obj -triple=armv7a-none-linux %s -o %t.o
// Creates a R_ARM_ABS32 relocation against foo and bar, bar has hidden
// visibility so we expect a R_ARM_RELATIVE
.syntax unified
.globl foo
foo:
.globl bar
.hidden bar
bar:
.data
.word foo
.word bar
// In PIC mode, if R_ARM_TARGET1 represents R_ARM_ABS32 (the default), an
// R_ARM_TARGET1 to a non-preemptable symbol also creates an R_ARM_RELATIVE in
// a writable section.
.word bar(target1)
// RUN: ld.lld -shared -o %t.so %t.o
// RUN: llvm-readobj --symbols --dyn-relocations %t.so | FileCheck %s
// RUN: llvm-readelf -x .data %t.so | FileCheck --check-prefix=HEX %s
// CHECK: Dynamic Relocations {
// CHECK-NEXT: 0x3204 R_ARM_RELATIVE
// CHECK-NEXT: 0x3208 R_ARM_RELATIVE
// CHECK-NEXT: 0x3200 R_ARM_ABS32 foo 0x0
// CHECK-NEXT: }
// CHECK: Symbols [
// CHECK: Symbol {
// CHECK: Name: bar
// CHECK-NEXT: Value: 0x11A8
// CHECK: Symbol {
// CHECK: Name: foo
// CHECK-NEXT: Value: 0x11A8
// HEX: 0x00003200 00000000 a8110000 a8110000
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="album">Album</string>
<string name="app_picker_name">应用</string>
<string name="barcode_canner_title">条码扫描器</string>
<string name="bookmark_picker_name">书签</string>
<string name="button_add_calendar">添加事件至日历</string>
<string name="button_add_contact">添加联系人</string>
<string name="button_back">返回</string>
<string name="button_book_search">打开图书搜索</string>
<string name="button_cancel">取消</string>
<string name="button_clipboard_empty">清空剪切板</string>
<string name="button_custom_product_search">自定义搜索</string>
<string name="button_dial">拨号</string>
<string name="button_done">完成</string>
<string name="button_email">发送email</string>
<string name="button_get_directions">获取地址</string>
<string name="button_google_shopper">谷歌购物</string>
<string name="button_mms">发送彩信</string>
<string name="button_ok">确定</string>
<string name="button_open_browser">打开浏览器</string>
<string name="button_product_search">打开产品搜索</string>
<string name="button_search_book_contents">搜索图书内容</string>
<string name="button_share_app">共享应用</string>
<string name="button_share_bookmark">分享书签</string>
<string name="button_share_by_email">通过email分享</string>
<string name="button_share_by_sms">通过短信分享</string>
<string name="button_share_clipboard">分享剪切板</string>
<string name="button_share_contact">分享联系人</string>
<string name="button_show_map">显示地图</string>
<string name="button_sms">发送短信</string>
<string name="button_web_search">网页搜索</string>
<string name="button_wifi">连接到网络</string>
<string name="cancel">Cancel</string>
<string name="cancel_finish">取消</string>
<string name="contents_contact">联系人信息</string>
<string name="contents_email">Email地址</string>
<string name="contents_location">地理坐标</string>
<string name="contents_phone">电话号码</string>
<string name="contents_sms">短信地址</string>
<string name="contents_text">纯文本</string>
<string name="flash">Flashlight</string>
<string name="history_clear_one_history_text">删除</string>
<string name="history_clear_text">删除历史</string>
<string name="history_email_title">条码扫描器历史</string>
<string name="history_empty">空的</string>
<string name="history_empty_detail">没有条码扫描已记录</string>
<string name="history_send">发送历史</string>
<string name="history_title">历史</string>
<string name="input">Input</string>
<string name="input_isbn">手动输入ISBN号</string>
<string name="menu_about">关于</string>
<string name="menu_encode_mecard">使用MECARD</string>
<string name="menu_encode_vcard">使用vCard</string>
<string name="menu_help">帮助</string>
<string name="menu_history">历史</string>
<string name="menu_settings">设置</string>
<string name="menu_share">分享</string>
<string name="msg_about">基于ZXing开源条码引擎</string>
<string name="msg_bulk_mode_scanned">批量扫描:找到条码,已保存</string>
<string name="msg_camera_framework_bug">抱歉,Android相机出现问题。您可能需要重启设备。</string>
<string name="msg_default_format">格式</string>
<string name="msg_default_meta">元数据</string>
<string name="msg_default_mms_subject">Hi</string>
<string name="msg_default_status">Align QR code/barcode within frame to scan</string>
<string name="msg_default_time">时间</string>
<string name="msg_default_type">类型</string>
<string name="msg_encode_contents_failed">无法根据已有数据生成条码。 </string>
<string name="msg_google_books">谷歌图书</string>
<string name="msg_google_product">谷歌购物</string>
<string name="msg_google_shopper_missing">谷歌购物未安装</string>
<string name="msg_install_google_shopper">谷歌购物是一款将条码扫描和商品比价相结合的应用,谷歌购物不使用浏览器,您想尝试吗?</string>
<string name="msg_intent_failed">抱歉,无法打开所需软件。 条码内容可能无效。</string>
<string name="msg_redirect">重定向</string>
<string name="msg_sbc_book_not_searchable">抱歉,无法找到此图书。</string>
<string name="msg_sbc_failed">抱歉,搜索时遇到错误。</string>
<string name="msg_sbc_no_page_returned">无页面返回</string>
<string name="msg_sbc_page">页面</string>
<string name="msg_sbc_searching_book">搜索图书…</string>
<string name="msg_sbc_snippet_unavailable">摘录不可用</string>
<string name="msg_sbc_unknown_page">未知页面</string>
<string name="msg_share_explanation">您可以通过在自己的手机上显示条码,并使用其它手机扫描此条码的方式进行分享</string>
<string name="msg_share_subject_line">这是我所扫描条码的内容</string>
<string name="msg_share_text">或键入一些文字</string>
<string name="msg_sure">你肯定吗?</string>
<string name="msg_unmount_usb">抱歉,SD卡不可访问。</string>
<string name="ok">确定</string>
<string name="preferences_actions_title">找到条码时</string>
<string name="preferences_bulk_mode_summary">连续扫描并保存多个条形码</string>
<string name="preferences_bulk_mode_title">批量扫描模式</string>
<string name="preferences_copy_to_clipboard_title">复制到剪切板</string>
<string formatted="false" name="preferences_custom_product_search_summary">替换:%s=内容,%f=格式,%t=类型</string>
<string name="preferences_custom_product_search_title">自定义搜索网址</string>
<string name="preferences_decode_1D_title">扫描 一维条码</string>
<string name="preferences_decode_Data_Matrix_title">扫描 DM码</string>
<string name="preferences_decode_QR_title">扫描 QR码</string>
<string name="preferences_front_light_summary">前灯可用时,打开前灯</string>
<string name="preferences_front_light_title">前灯</string>
<string name="preferences_general_title">一般设置</string>
<string name="preferences_name">设置</string>
<string name="preferences_play_beep_title">提示音</string>
<string name="preferences_remember_duplicates_summary">在历史记录中保存重复记录</string>
<string name="preferences_remember_duplicates_title">记住重复</string>
<string name="preferences_result_title">结果设置</string>
<string name="preferences_reverse_image_summary">设备上启用了前置摄像头只</string>
<string name="preferences_reverse_image_title">反转摄像机的图像</string>
<string name="preferences_scanning_title">条码类型选择</string>
<string name="preferences_search_country">搜索国家</string>
<string name="preferences_supplemental_summary">尝试检索关于条码内容的更多信息</string>
<string name="preferences_supplemental_title">检索更多信息</string>
<string name="preferences_vibrate_title">振动</string>
<string name="result_address_book">找到联系人信息</string>
<string name="result_calendar">找到日历事件</string>
<string name="result_email_address">找到email地址</string>
<string name="result_geo">找到地理坐标</string>
<string name="result_isbn">找到图书</string>
<string name="result_product">找到产品</string>
<string name="result_sms">找到短信地址</string>
<string name="result_tel">找到电话号码</string>
<string name="result_text">找到纯文本</string>
<string name="result_uri">找到URL</string>
<string name="result_wifi">发现网络配置</string>
<string name="sbc_name">Google图书搜索</string>
<string name="scan_empty">没有发现二维码/条形码</string>
<string name="scan_tips">Align QR code/barcode within frame to scan</string>
<string name="scan_title">二维码扫描</string>
<string name="search">搜 索</string>
<string name="share_name">通过条码分享</string>
<string name="tip">提示</string>
<string name="title_about">条码扫描器</string>
<string name="turn_off_flash">关闭</string>
<string name="turn_on_flash">打开</string>
<string name="wifi_changing_network">请求连接到网络\u2026</string>
<string name="wifi_ssid_label">网络SSID</string>
<string name="wifi_type_label">类型</string>
<string name="zxing_url">http://code.google.com/p/zxing</string>
</resources> | {
"pile_set_name": "Github"
} |
/*
* This file is part of the SDWebImage package.
* (c) Olivier Poitrey <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#import "SDImageCache.h"
#import "SDWebImageDecoder.h"
#import "UIImage+MultiFormat.h"
#import <CommonCrypto/CommonDigest.h>
#import "UIImage+GIF.h"
#import "NSData+ImageContentType.h"
#import "NSImage+WebCache.h"
#import "SDImageCacheConfig.h"
// See https://github.com/rs/SDWebImage/pull/1141 for discussion
@interface AutoPurgeCache : NSCache
@end
@implementation AutoPurgeCache
- (nonnull instancetype)init {
self = [super init];
if (self) {
#if SD_UIKIT
[[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(removeAllObjects) name:UIApplicationDidReceiveMemoryWarningNotification object:nil];
#endif
}
return self;
}
- (void)dealloc {
#if SD_UIKIT
[[NSNotificationCenter defaultCenter] removeObserver:self name:UIApplicationDidReceiveMemoryWarningNotification object:nil];
#endif
}
@end
FOUNDATION_STATIC_INLINE NSUInteger SDCacheCostForImage(UIImage *image) {
#if SD_MAC
return image.size.height * image.size.width;
#elif SD_UIKIT || SD_WATCH
return image.size.height * image.size.width * image.scale * image.scale;
#endif
}
@interface SDImageCache ()
#pragma mark - Properties
@property (strong, nonatomic, nonnull) NSCache *memCache;
@property (strong, nonatomic, nonnull) NSString *diskCachePath;
@property (strong, nonatomic, nullable) NSMutableArray<NSString *> *customPaths;
@property (SDDispatchQueueSetterSementics, nonatomic, nullable) dispatch_queue_t ioQueue;
@end
@implementation SDImageCache {
NSFileManager *_fileManager;
}
#pragma mark - Singleton, init, dealloc
+ (nonnull instancetype)sharedImageCache {
static dispatch_once_t once;
static id instance;
dispatch_once(&once, ^{
instance = [self new];
});
return instance;
}
- (instancetype)init {
return [self initWithNamespace:@"default"];
}
- (nonnull instancetype)initWithNamespace:(nonnull NSString *)ns {
NSString *path = [self makeDiskCachePath:ns];
return [self initWithNamespace:ns diskCacheDirectory:path];
}
- (nonnull instancetype)initWithNamespace:(nonnull NSString *)ns
diskCacheDirectory:(nonnull NSString *)directory {
if ((self = [super init])) {
NSString *fullNamespace = [@"com.hackemist.SDWebImageCache." stringByAppendingString:ns];
// Create IO serial queue
_ioQueue = dispatch_queue_create("com.hackemist.SDWebImageCache", DISPATCH_QUEUE_SERIAL);
_config = [[SDImageCacheConfig alloc] init];
// Init the memory cache
_memCache = [[AutoPurgeCache alloc] init];
_memCache.name = fullNamespace;
// Init the disk cache
if (directory != nil) {
_diskCachePath = [directory stringByAppendingPathComponent:fullNamespace];
} else {
NSString *path = [self makeDiskCachePath:ns];
_diskCachePath = path;
}
dispatch_sync(_ioQueue, ^{
_fileManager = [NSFileManager new];
});
#if SD_UIKIT
// Subscribe to app events
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(clearMemory)
name:UIApplicationDidReceiveMemoryWarningNotification
object:nil];
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deleteOldFiles)
name:UIApplicationWillTerminateNotification
object:nil];
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(backgroundDeleteOldFiles)
name:UIApplicationDidEnterBackgroundNotification
object:nil];
#endif
}
return self;
}
- (void)dealloc {
[[NSNotificationCenter defaultCenter] removeObserver:self];
SDDispatchQueueRelease(_ioQueue);
}
- (void)checkIfQueueIsIOQueue {
const char *currentQueueLabel = dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL);
const char *ioQueueLabel = dispatch_queue_get_label(self.ioQueue);
if (strcmp(currentQueueLabel, ioQueueLabel) != 0) {
NSLog(@"This method should be called from the ioQueue");
}
}
#pragma mark - Cache paths
- (void)addReadOnlyCachePath:(nonnull NSString *)path {
if (!self.customPaths) {
self.customPaths = [NSMutableArray new];
}
if (![self.customPaths containsObject:path]) {
[self.customPaths addObject:path];
}
}
- (nullable NSString *)cachePathForKey:(nullable NSString *)key inPath:(nonnull NSString *)path {
NSString *filename = [self cachedFileNameForKey:key];
return [path stringByAppendingPathComponent:filename];
}
- (nullable NSString *)defaultCachePathForKey:(nullable NSString *)key {
return [self cachePathForKey:key inPath:self.diskCachePath];
}
- (nullable NSString *)cachedFileNameForKey:(nullable NSString *)key {
const char *str = key.UTF8String;
if (str == NULL) {
str = "";
}
unsigned char r[CC_MD5_DIGEST_LENGTH];
CC_MD5(str, (CC_LONG)strlen(str), r);
NSString *filename = [NSString stringWithFormat:@"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%@",
r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8], r[9], r[10],
r[11], r[12], r[13], r[14], r[15], [key.pathExtension isEqualToString:@""] ? @"" : [NSString stringWithFormat:@".%@", key.pathExtension]];
return filename;
}
- (nullable NSString *)makeDiskCachePath:(nonnull NSString*)fullNamespace {
NSArray<NSString *> *paths = NSSearchPathForDirectoriesInDomains(NSCachesDirectory, NSUserDomainMask, YES);
return [paths[0] stringByAppendingPathComponent:fullNamespace];
}
#pragma mark - Store Ops
- (void)storeImage:(nullable UIImage *)image
forKey:(nullable NSString *)key
completion:(nullable SDWebImageNoParamsBlock)completionBlock {
[self storeImage:image imageData:nil forKey:key toDisk:YES completion:completionBlock];
}
- (void)storeImage:(nullable UIImage *)image
forKey:(nullable NSString *)key
toDisk:(BOOL)toDisk
completion:(nullable SDWebImageNoParamsBlock)completionBlock {
[self storeImage:image imageData:nil forKey:key toDisk:toDisk completion:completionBlock];
}
- (void)storeImage:(nullable UIImage *)image
imageData:(nullable NSData *)imageData
forKey:(nullable NSString *)key
toDisk:(BOOL)toDisk
completion:(nullable SDWebImageNoParamsBlock)completionBlock {
if (!image || !key) {
if (completionBlock) {
completionBlock();
}
return;
}
// if memory cache is enabled
if (self.config.shouldCacheImagesInMemory) {
NSUInteger cost = SDCacheCostForImage(image);
[self.memCache setObject:image forKey:key cost:cost];
}
if (toDisk) {
dispatch_async(self.ioQueue, ^{
NSData *data = imageData;
if (!data && image) {
SDImageFormat imageFormatFromData = [NSData sd_imageFormatForImageData:data];
data = [image sd_imageDataAsFormat:imageFormatFromData];
}
[self storeImageDataToDisk:data forKey:key];
if (completionBlock) {
dispatch_async(dispatch_get_main_queue(), ^{
completionBlock();
});
}
});
} else {
if (completionBlock) {
completionBlock();
}
}
}
- (void)storeImageDataToDisk:(nullable NSData *)imageData forKey:(nullable NSString *)key {
if (!imageData || !key) {
return;
}
[self checkIfQueueIsIOQueue];
if (![_fileManager fileExistsAtPath:_diskCachePath]) {
[_fileManager createDirectoryAtPath:_diskCachePath withIntermediateDirectories:YES attributes:nil error:NULL];
}
// get cache Path for image key
NSString *cachePathForKey = [self defaultCachePathForKey:key];
// transform to NSUrl
NSURL *fileURL = [NSURL fileURLWithPath:cachePathForKey];
[_fileManager createFileAtPath:cachePathForKey contents:imageData attributes:nil];
// disable iCloud backup
if (self.config.shouldDisableiCloud) {
[fileURL setResourceValue:@YES forKey:NSURLIsExcludedFromBackupKey error:nil];
}
}
#pragma mark - Query and Retrieve Ops
- (void)diskImageExistsWithKey:(nullable NSString *)key completion:(nullable SDWebImageCheckCacheCompletionBlock)completionBlock {
dispatch_async(_ioQueue, ^{
BOOL exists = [_fileManager fileExistsAtPath:[self defaultCachePathForKey:key]];
// fallback because of https://github.com/rs/SDWebImage/pull/976 that added the extension to the disk file name
// checking the key with and without the extension
if (!exists) {
exists = [_fileManager fileExistsAtPath:[self defaultCachePathForKey:key].stringByDeletingPathExtension];
}
if (completionBlock) {
dispatch_async(dispatch_get_main_queue(), ^{
completionBlock(exists);
});
}
});
}
- (nullable UIImage *)imageFromMemoryCacheForKey:(nullable NSString *)key {
return [self.memCache objectForKey:key];
}
- (nullable UIImage *)imageFromDiskCacheForKey:(nullable NSString *)key {
UIImage *diskImage = [self diskImageForKey:key];
if (diskImage && self.config.shouldCacheImagesInMemory) {
NSUInteger cost = SDCacheCostForImage(diskImage);
[self.memCache setObject:diskImage forKey:key cost:cost];
}
return diskImage;
}
- (nullable UIImage *)imageFromCacheForKey:(nullable NSString *)key {
// First check the in-memory cache...
UIImage *image = [self imageFromMemoryCacheForKey:key];
if (image) {
return image;
}
// Second check the disk cache...
image = [self imageFromDiskCacheForKey:key];
return image;
}
- (nullable NSData *)diskImageDataBySearchingAllPathsForKey:(nullable NSString *)key {
NSString *defaultPath = [self defaultCachePathForKey:key];
NSData *data = [NSData dataWithContentsOfFile:defaultPath];
if (data) {
return data;
}
// fallback because of https://github.com/rs/SDWebImage/pull/976 that added the extension to the disk file name
// checking the key with and without the extension
data = [NSData dataWithContentsOfFile:defaultPath.stringByDeletingPathExtension];
if (data) {
return data;
}
NSArray<NSString *> *customPaths = [self.customPaths copy];
for (NSString *path in customPaths) {
NSString *filePath = [self cachePathForKey:key inPath:path];
NSData *imageData = [NSData dataWithContentsOfFile:filePath];
if (imageData) {
return imageData;
}
// fallback because of https://github.com/rs/SDWebImage/pull/976 that added the extension to the disk file name
// checking the key with and without the extension
imageData = [NSData dataWithContentsOfFile:filePath.stringByDeletingPathExtension];
if (imageData) {
return imageData;
}
}
return nil;
}
- (nullable UIImage *)diskImageForKey:(nullable NSString *)key {
NSData *data = [self diskImageDataBySearchingAllPathsForKey:key];
if (data) {
UIImage *image = [UIImage sd_imageWithData:data];
image = [self scaledImageForKey:key image:image];
if (self.config.shouldDecompressImages) {
image = [UIImage decodedImageWithImage:image];
}
return image;
}
else {
return nil;
}
}
- (nullable UIImage *)scaledImageForKey:(nullable NSString *)key image:(nullable UIImage *)image {
return SDScaledImageForKey(key, image);
}
- (nullable NSOperation *)queryCacheOperationForKey:(nullable NSString *)key done:(nullable SDCacheQueryCompletedBlock)doneBlock {
if (!key) {
if (doneBlock) {
doneBlock(nil, nil, SDImageCacheTypeNone);
}
return nil;
}
// First check the in-memory cache...
UIImage *image = [self imageFromMemoryCacheForKey:key];
if (image) {
NSData *diskData = nil;
if ([image isGIF]) {
diskData = [self diskImageDataBySearchingAllPathsForKey:key];
}
if (doneBlock) {
doneBlock(image, diskData, SDImageCacheTypeMemory);
}
return nil;
}
NSOperation *operation = [NSOperation new];
dispatch_async(self.ioQueue, ^{
if (operation.isCancelled) {
// do not call the completion if cancelled
return;
}
@autoreleasepool {
NSData *diskData = [self diskImageDataBySearchingAllPathsForKey:key];
UIImage *diskImage = [self diskImageForKey:key];
if (diskImage && self.config.shouldCacheImagesInMemory) {
NSUInteger cost = SDCacheCostForImage(diskImage);
[self.memCache setObject:diskImage forKey:key cost:cost];
}
if (doneBlock) {
dispatch_async(dispatch_get_main_queue(), ^{
doneBlock(diskImage, diskData, SDImageCacheTypeDisk);
});
}
}
});
return operation;
}
#pragma mark - Remove Ops
- (void)removeImageForKey:(nullable NSString *)key withCompletion:(nullable SDWebImageNoParamsBlock)completion {
[self removeImageForKey:key fromDisk:YES withCompletion:completion];
}
- (void)removeImageForKey:(nullable NSString *)key fromDisk:(BOOL)fromDisk withCompletion:(nullable SDWebImageNoParamsBlock)completion {
if (key == nil) {
return;
}
if (self.config.shouldCacheImagesInMemory) {
[self.memCache removeObjectForKey:key];
}
if (fromDisk) {
dispatch_async(self.ioQueue, ^{
[_fileManager removeItemAtPath:[self defaultCachePathForKey:key] error:nil];
if (completion) {
dispatch_async(dispatch_get_main_queue(), ^{
completion();
});
}
});
} else if (completion){
completion();
}
}
# pragma mark - Mem Cache settings
- (void)setMaxMemoryCost:(NSUInteger)maxMemoryCost {
self.memCache.totalCostLimit = maxMemoryCost;
}
- (NSUInteger)maxMemoryCost {
return self.memCache.totalCostLimit;
}
- (NSUInteger)maxMemoryCountLimit {
return self.memCache.countLimit;
}
- (void)setMaxMemoryCountLimit:(NSUInteger)maxCountLimit {
self.memCache.countLimit = maxCountLimit;
}
#pragma mark - Cache clean Ops
- (void)clearMemory {
[self.memCache removeAllObjects];
}
- (void)clearDiskOnCompletion:(nullable SDWebImageNoParamsBlock)completion {
dispatch_async(self.ioQueue, ^{
[_fileManager removeItemAtPath:self.diskCachePath error:nil];
[_fileManager createDirectoryAtPath:self.diskCachePath
withIntermediateDirectories:YES
attributes:nil
error:NULL];
if (completion) {
dispatch_async(dispatch_get_main_queue(), ^{
completion();
});
}
});
}
- (void)deleteOldFiles {
[self deleteOldFilesWithCompletionBlock:nil];
}
- (void)deleteOldFilesWithCompletionBlock:(nullable SDWebImageNoParamsBlock)completionBlock {
dispatch_async(self.ioQueue, ^{
NSURL *diskCacheURL = [NSURL fileURLWithPath:self.diskCachePath isDirectory:YES];
NSArray<NSString *> *resourceKeys = @[NSURLIsDirectoryKey, NSURLContentModificationDateKey, NSURLTotalFileAllocatedSizeKey];
// This enumerator prefetches useful properties for our cache files.
NSDirectoryEnumerator *fileEnumerator = [_fileManager enumeratorAtURL:diskCacheURL
includingPropertiesForKeys:resourceKeys
options:NSDirectoryEnumerationSkipsHiddenFiles
errorHandler:NULL];
NSDate *expirationDate = [NSDate dateWithTimeIntervalSinceNow:-self.config.maxCacheAge];
NSMutableDictionary<NSURL *, NSDictionary<NSString *, id> *> *cacheFiles = [NSMutableDictionary dictionary];
NSUInteger currentCacheSize = 0;
// Enumerate all of the files in the cache directory. This loop has two purposes:
//
// 1. Removing files that are older than the expiration date.
// 2. Storing file attributes for the size-based cleanup pass.
NSMutableArray<NSURL *> *urlsToDelete = [[NSMutableArray alloc] init];
for (NSURL *fileURL in fileEnumerator) {
NSError *error;
NSDictionary<NSString *, id> *resourceValues = [fileURL resourceValuesForKeys:resourceKeys error:&error];
// Skip directories and errors.
if (error || !resourceValues || [resourceValues[NSURLIsDirectoryKey] boolValue]) {
continue;
}
// Remove files that are older than the expiration date;
NSDate *modificationDate = resourceValues[NSURLContentModificationDateKey];
if ([[modificationDate laterDate:expirationDate] isEqualToDate:expirationDate]) {
[urlsToDelete addObject:fileURL];
continue;
}
// Store a reference to this file and account for its total size.
NSNumber *totalAllocatedSize = resourceValues[NSURLTotalFileAllocatedSizeKey];
currentCacheSize += totalAllocatedSize.unsignedIntegerValue;
cacheFiles[fileURL] = resourceValues;
}
for (NSURL *fileURL in urlsToDelete) {
[_fileManager removeItemAtURL:fileURL error:nil];
}
// If our remaining disk cache exceeds a configured maximum size, perform a second
// size-based cleanup pass. We delete the oldest files first.
if (self.config.maxCacheSize > 0 && currentCacheSize > self.config.maxCacheSize) {
// Target half of our maximum cache size for this cleanup pass.
const NSUInteger desiredCacheSize = self.config.maxCacheSize / 2;
// Sort the remaining cache files by their last modification time (oldest first).
NSArray<NSURL *> *sortedFiles = [cacheFiles keysSortedByValueWithOptions:NSSortConcurrent
usingComparator:^NSComparisonResult(id obj1, id obj2) {
return [obj1[NSURLContentModificationDateKey] compare:obj2[NSURLContentModificationDateKey]];
}];
// Delete files until we fall below our desired cache size.
for (NSURL *fileURL in sortedFiles) {
if ([_fileManager removeItemAtURL:fileURL error:nil]) {
NSDictionary<NSString *, id> *resourceValues = cacheFiles[fileURL];
NSNumber *totalAllocatedSize = resourceValues[NSURLTotalFileAllocatedSizeKey];
currentCacheSize -= totalAllocatedSize.unsignedIntegerValue;
if (currentCacheSize < desiredCacheSize) {
break;
}
}
}
}
if (completionBlock) {
dispatch_async(dispatch_get_main_queue(), ^{
completionBlock();
});
}
});
}
#if SD_UIKIT
- (void)backgroundDeleteOldFiles {
Class UIApplicationClass = NSClassFromString(@"UIApplication");
if(!UIApplicationClass || ![UIApplicationClass respondsToSelector:@selector(sharedApplication)]) {
return;
}
UIApplication *application = [UIApplication performSelector:@selector(sharedApplication)];
__block UIBackgroundTaskIdentifier bgTask = [application beginBackgroundTaskWithExpirationHandler:^{
// Clean up any unfinished task business by marking where you
// stopped or ending the task outright.
[application endBackgroundTask:bgTask];
bgTask = UIBackgroundTaskInvalid;
}];
// Start the long-running task and return immediately.
[self deleteOldFilesWithCompletionBlock:^{
[application endBackgroundTask:bgTask];
bgTask = UIBackgroundTaskInvalid;
}];
}
#endif
#pragma mark - Cache Info
- (NSUInteger)getSize {
__block NSUInteger size = 0;
dispatch_sync(self.ioQueue, ^{
NSDirectoryEnumerator *fileEnumerator = [_fileManager enumeratorAtPath:self.diskCachePath];
for (NSString *fileName in fileEnumerator) {
NSString *filePath = [self.diskCachePath stringByAppendingPathComponent:fileName];
NSDictionary<NSString *, id> *attrs = [[NSFileManager defaultManager] attributesOfItemAtPath:filePath error:nil];
size += [attrs fileSize];
}
});
return size;
}
- (NSUInteger)getDiskCount {
__block NSUInteger count = 0;
dispatch_sync(self.ioQueue, ^{
NSDirectoryEnumerator *fileEnumerator = [_fileManager enumeratorAtPath:self.diskCachePath];
count = fileEnumerator.allObjects.count;
});
return count;
}
- (void)calculateSizeWithCompletionBlock:(nullable SDWebImageCalculateSizeBlock)completionBlock {
NSURL *diskCacheURL = [NSURL fileURLWithPath:self.diskCachePath isDirectory:YES];
dispatch_async(self.ioQueue, ^{
NSUInteger fileCount = 0;
NSUInteger totalSize = 0;
NSDirectoryEnumerator *fileEnumerator = [_fileManager enumeratorAtURL:diskCacheURL
includingPropertiesForKeys:@[NSFileSize]
options:NSDirectoryEnumerationSkipsHiddenFiles
errorHandler:NULL];
for (NSURL *fileURL in fileEnumerator) {
NSNumber *fileSize;
[fileURL getResourceValue:&fileSize forKey:NSURLFileSizeKey error:NULL];
totalSize += fileSize.unsignedIntegerValue;
fileCount += 1;
}
if (completionBlock) {
dispatch_async(dispatch_get_main_queue(), ^{
completionBlock(fileCount, totalSize);
});
}
});
}
@end
| {
"pile_set_name": "Github"
} |
#!/sbin/openrc-run
# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
: ${GVMD_USER:=gvm}
: ${GVMD_GROUP:=gvm}
: ${GVMD_TIMEOUT:=30}
name="Greenbone Vulnerability Manager"
command=/usr/bin/gvmd
command_args="--foreground ${GVMD_OPTIONS} ${GVMD_LISTEN_ADDRESS_TCP} ${GVMD_PORT} ${GVMD_SCANNER_HOST} ${GVMD_GNUTLS_PRIORITIES}"
command_background="true"
command_user="${GVMD_USER}:${GVMD_GROUP}"
pidfile="/run/gvmd.pid"
retry="${GVMD_TIMEOUT}"
depend() {
after bootmisc
need localmount net ospd-openvas
}
start_pre() {
/bin/bash /etc/gvm/gvmd-startpre.sh
}
| {
"pile_set_name": "Github"
} |
package com.forezp.client.config;
import feign.Retryer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import static java.util.concurrent.TimeUnit.SECONDS;
/**
* Created by fangzhipeng on 2017/6/16.
*/
@Configuration
public class FeignConfig {
@Bean
public Retryer feignRetryer() {
return new Retryer.Default(100, SECONDS.toMillis(1), 5);
}
}
| {
"pile_set_name": "Github"
} |
'use strict';
/**
* Class representing an event.
*
* @private
*/
class Event {
/**
* Create a new `Event`.
*
* @param {String} type The name of the event
* @param {Object} target A reference to the target to which the event was dispatched
*/
constructor(type, target) {
this.target = target;
this.type = type;
}
}
/**
* Class representing a message event.
*
* @extends Event
* @private
*/
class MessageEvent extends Event {
/**
* Create a new `MessageEvent`.
*
* @param {(String|Buffer|ArrayBuffer|Buffer[])} data The received data
* @param {WebSocket} target A reference to the target to which the event was dispatched
*/
constructor(data, target) {
super('message', target);
this.data = data;
}
}
/**
* Class representing a close event.
*
* @extends Event
* @private
*/
class CloseEvent extends Event {
/**
* Create a new `CloseEvent`.
*
* @param {Number} code The status code explaining why the connection is being closed
* @param {String} reason A human-readable string explaining why the connection is closing
* @param {WebSocket} target A reference to the target to which the event was dispatched
*/
constructor(code, reason, target) {
super('close', target);
this.wasClean = target._closeFrameReceived && target._closeFrameSent;
this.reason = reason;
this.code = code;
}
}
/**
* Class representing an open event.
*
* @extends Event
* @private
*/
class OpenEvent extends Event {
/**
* Create a new `OpenEvent`.
*
* @param {WebSocket} target A reference to the target to which the event was dispatched
*/
constructor(target) {
super('open', target);
}
}
/**
* Class representing an error event.
*
* @extends Event
* @private
*/
class ErrorEvent extends Event {
/**
* Create a new `ErrorEvent`.
*
* @param {Object} error The error that generated this event
* @param {WebSocket} target A reference to the target to which the event was dispatched
*/
constructor(error, target) {
super('error', target);
this.message = error.message;
this.error = error;
}
}
/**
* This provides methods for emulating the `EventTarget` interface. It's not
* meant to be used directly.
*
* @mixin
*/
const EventTarget = {
/**
* Register an event listener.
*
* @param {String} method A string representing the event type to listen for
* @param {Function} listener The listener to add
* @public
*/
addEventListener(method, listener) {
if (typeof listener !== 'function') return;
function onMessage(data) {
listener.call(this, new MessageEvent(data, this));
}
function onClose(code, message) {
listener.call(this, new CloseEvent(code, message, this));
}
function onError(error) {
listener.call(this, new ErrorEvent(error, this));
}
function onOpen() {
listener.call(this, new OpenEvent(this));
}
if (method === 'message') {
onMessage._listener = listener;
this.on(method, onMessage);
} else if (method === 'close') {
onClose._listener = listener;
this.on(method, onClose);
} else if (method === 'error') {
onError._listener = listener;
this.on(method, onError);
} else if (method === 'open') {
onOpen._listener = listener;
this.on(method, onOpen);
} else {
this.on(method, listener);
}
},
/**
* Remove an event listener.
*
* @param {String} method A string representing the event type to remove
* @param {Function} listener The listener to remove
* @public
*/
removeEventListener(method, listener) {
const listeners = this.listeners(method);
for (let i = 0; i < listeners.length; i++) {
if (listeners[i] === listener || listeners[i]._listener === listener) {
this.removeListener(method, listeners[i]);
}
}
}
};
module.exports = EventTarget;
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <CFNetwork/NSObject-Protocol.h>
@class NSError, NSURLAuthenticationChallenge, NSURLSession;
@protocol NSURLSessionDelegate <NSObject>
@optional
- (void)URLSessionDidFinishEventsForBackgroundURLSession:(NSURLSession *)arg1;
- (void)URLSession:(NSURLSession *)arg1 didReceiveChallenge:(NSURLAuthenticationChallenge *)arg2 completionHandler:(void (^)(long long, NSURLCredential *))arg3;
- (void)URLSession:(NSURLSession *)arg1 didBecomeInvalidWithError:(NSError *)arg2;
@end
| {
"pile_set_name": "Github"
} |
--TEST--
phpunit --testdox DataProviderTest ../_files/DataProviderTest.php
--FILE--
<?php
define('PHPUNIT_TESTSUITE', TRUE);
$_SERVER['argv'][1] = '--no-configuration';
$_SERVER['argv'][2] = '--testdox';
$_SERVER['argv'][3] = 'DataProviderTest';
$_SERVER['argv'][4] = dirname(dirname(__FILE__)) . '/_files/DataProviderTest.php';
require_once dirname(dirname(dirname(__FILE__))) . '/PHPUnit/Autoload.php';
PHPUnit_TextUI_Command::main();
?>
--EXPECTF--
PHPUnit %s by Sebastian Bergmann.
DataProvider
[ ] Add
| {
"pile_set_name": "Github"
} |
; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info 2>&1 | FileCheck %s
; When merging MustAlias and PartialAlias, merge to PartialAlias
; instead of MayAlias.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; CHECK: PartialAlias: i16* %bigbase0, i8* %phi
define i8 @test0(i8* %base, i1 %x) {
entry:
%baseplusone = getelementptr i8, i8* %base, i64 1
br i1 %x, label %red, label %green
red:
br label %green
green:
%phi = phi i8* [ %baseplusone, %red ], [ %base, %entry ]
store i8 0, i8* %phi
%bigbase0 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase0
%loaded = load i8, i8* %phi
ret i8 %loaded
}
; CHECK: PartialAlias: i16* %bigbase1, i8* %sel
define i8 @test1(i8* %base, i1 %x) {
entry:
%baseplusone = getelementptr i8, i8* %base, i64 1
%sel = select i1 %x, i8* %baseplusone, i8* %base
store i8 0, i8* %sel
%bigbase1 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase1
%loaded = load i8, i8* %sel
ret i8 %loaded
}
| {
"pile_set_name": "Github"
} |
const pug = require('pug');
const fs =require('fs')
let _ = require('lodash');
const defaultParam = {
title: "multipage-cli",
tag: {
meta: [],
headLink: [],
headScript: [],
bodyLink: [],
bodyScript: []
},
vueInit: {},
reactInit: {}
}
module.exports = function (tpl, data, targetPath) {
let copy = _.cloneDeep(defaultParam);
let fn = pug.compileFile(tpl, {pretty: true}), tplData;
data = _.merge(copy, data);
tplData = fn(data);
fs.writeFileSync(targetPath, tplData)
return true;
} | {
"pile_set_name": "Github"
} |
package cn.edu.buaa.crypto.encryption.re.llw16a.serparams;
import cn.edu.buaa.crypto.utils.PairingUtils;
import it.unisa.dia.gas.jpbc.Element;
import it.unisa.dia.gas.jpbc.Pairing;
import it.unisa.dia.gas.jpbc.PairingParameters;
import it.unisa.dia.gas.plaf.jpbc.pairing.PairingFactory;
import java.util.Arrays;
import java.util.Map;
/**
* Created by Weiran Liu on 2017/1/4.
*
* Liu-Liu-Wu-16 CPA-secure RE ciphertext parameter.
*/
public class RELLW16aCiphertextSerParameter extends RELLW16aHeaderSerParameter {
private transient Element C;
private final byte[] byteArrayC;
public RELLW16aCiphertextSerParameter(
PairingParameters pairingParameters, Element C, Element C0,
Map<String, Element> C1s, Map<String, Element> C2s, Map<String, Element> C3s) {
super(pairingParameters, C0, C1s, C2s, C3s);
this.C = C.getImmutable();
this.byteArrayC = this.C.toBytes();
}
public Element getC() { return this.C.duplicate(); }
@Override
public boolean equals(Object anObject) {
if (this == anObject) {
return true;
}
if (anObject instanceof RELLW16aCiphertextSerParameter) {
RELLW16aCiphertextSerParameter that = (RELLW16aCiphertextSerParameter) anObject;
return PairingUtils.isEqualElement(this.C, that.C)
&& Arrays.equals(this.byteArrayC, that.byteArrayC)
&& super.equals(anObject);
}
return false;
}
private void readObject(java.io.ObjectInputStream objectInputStream)
throws java.io.IOException, ClassNotFoundException {
objectInputStream.defaultReadObject();
Pairing pairing = PairingFactory.getPairing(this.getParameters());
this.C = pairing.getGT().newElementFromBytes(this.byteArrayC);
}
}
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <CoreBluetooth/CBManager.h>
@class NSHashTable, NSLock, NSMapTable, NSMutableArray, NSMutableDictionary, NSNumber;
@protocol CBPeripheralManagerDelegate;
@interface CBPeripheralManager : CBManager
{
struct {
unsigned int willRestoreState:1;
unsigned int didAddService:1;
unsigned int didReceiveReadRequest:1;
unsigned int didReceiveWriteRequests:1;
unsigned int centralDidSubscribeToCharacteristic:1;
unsigned int centralDidUnsubscribeFromCharacteristic:1;
unsigned int didStartAdvertising:1;
unsigned int isReadyToUpdate:1;
unsigned int centralDidConnect:1;
unsigned int centralDidUpdateConnectionParameters:1;
unsigned int didPublishL2CAPChannel:1;
unsigned int didUnpublishL2CAPChannel:1;
unsigned int didOpenL2CAPChannel:1;
unsigned int didStopAdvertisingWithError:1;
unsigned int didUpdateANCSAuthorization:1;
} _delegateFlags;
BOOL _isAdvertising;
BOOL _readyForUpdates;
BOOL _waitingForReady;
id <CBPeripheralManagerDelegate> _delegate;
NSMapTable *_centrals;
NSMutableArray *_services;
NSMutableDictionary *_characteristicIDs;
NSLock *_updateLock;
NSNumber *_multipleAdvertisingSupported;
NSHashTable *_l2capChannels;
unsigned long long _attributeIDGenerator;
}
+ (BOOL)supportsFeatures:(unsigned long long)arg1;
+ (long long)authorizationStatus;
- (void).cxx_destruct;
@property unsigned long long attributeIDGenerator; // @synthesize attributeIDGenerator=_attributeIDGenerator;
@property(readonly, retain, nonatomic) NSHashTable *l2capChannels; // @synthesize l2capChannels=_l2capChannels;
@property(retain, nonatomic) NSNumber *multipleAdvertisingSupported; // @synthesize multipleAdvertisingSupported=_multipleAdvertisingSupported;
@property(readonly, nonatomic) BOOL waitingForReady; // @synthesize waitingForReady=_waitingForReady;
@property(readonly, nonatomic) BOOL readyForUpdates; // @synthesize readyForUpdates=_readyForUpdates;
@property(readonly, retain, nonatomic) NSLock *updateLock; // @synthesize updateLock=_updateLock;
@property(readonly, retain, nonatomic) NSMutableDictionary *characteristicIDs; // @synthesize characteristicIDs=_characteristicIDs;
@property(readonly, retain, nonatomic) NSMutableArray *services; // @synthesize services=_services;
@property(readonly, retain, nonatomic) NSMapTable *centrals; // @synthesize centrals=_centrals;
@property(nonatomic) BOOL isAdvertising; // @synthesize isAdvertising=_isAdvertising;
@property(nonatomic) __weak id <CBPeripheralManagerDelegate> delegate; // @synthesize delegate=_delegate;
- (void)handleMsg:(unsigned short)arg1 args:(id)arg2;
- (BOOL)isMsgAllowedAlways:(unsigned short)arg1;
- (BOOL)isMsgAllowedWhenOff:(unsigned short)arg1;
- (void)handleCentralDidUpdateANCSAuthorization:(id)arg1;
- (void)handleL2CAPChannelUnpublished:(id)arg1;
- (void)handleL2CAPChannelPublished:(id)arg1;
- (void)handleL2CAPChannelClosed:(id)arg1;
- (void)handleL2CAPChannelOpened:(id)arg1;
- (void)handleSupportedFeatures:(id)arg1;
- (void)handleAdvertisingAddressChanged:(id)arg1;
- (void)handleConnectionParametersUpdated:(id)arg1;
- (void)handleSolicitedServicesFound:(id)arg1;
- (void)handleReadyForUpdates:(id)arg1;
- (void)handleAdvertisingStopped:(id)arg1;
- (void)handleAdvertisingStarted:(id)arg1;
- (void)handleNotificationRemoved:(id)arg1;
- (void)handleNotificationAdded:(id)arg1;
- (void)handleSetAttributeValues:(id)arg1;
- (void)handleGetAttributeValue:(id)arg1;
- (void)handleServiceAdded:(id)arg1;
- (void)handleRestoringState:(id)arg1;
@property(readonly, nonatomic, getter=supportsMultipleAdvertising) BOOL isSupportingMultipleAdvertising;
- (BOOL)updateValue:(id)arg1 forCharacteristic:(id)arg2 onSubscribedCentrals:(id)arg3;
- (void)respondToTransaction:(id)arg1 value:(id)arg2 attributeID:(id)arg3 result:(long long)arg4;
- (void)respondToRequest:(id)arg1 withResult:(long long)arg2;
- (void)removeAllServices;
- (void)removeService:(id)arg1;
- (void)addService:(id)arg1;
- (void)stopAdvertising;
- (void)startAdvertising:(id)arg1;
- (void)setDesiredConnectionLatency:(long long)arg1 forCentral:(id)arg2;
- (void)observeValueForKeyPath:(id)arg1 ofObject:(id)arg2 change:(id)arg3 context:(void *)arg4;
- (id)initWithDelegate:(id)arg1 queue:(id)arg2 options:(id)arg3;
- (id)initWithDelegate:(id)arg1 queue:(id)arg2;
- (id)init;
- (void)removeAllL2CAPChannels;
- (id)l2capChannelForPeer:(id)arg1 withPsm:(unsigned short)arg2;
- (void)overrideLocalLeAddress:(id)arg1;
- (void)unpublishL2CAPChannel:(unsigned short)arg1;
- (void)publishL2CAPChannel:(unsigned short)arg1 requiresEncryption:(BOOL)arg2 options:(id)arg3;
- (void)publishL2CAPChannel:(unsigned short)arg1 requiresEncryption:(BOOL)arg2;
- (void)publishL2CAPChannelWithEncryption:(BOOL)arg1;
- (id)centralWithIdentifier:(id)arg1;
- (void)dealloc;
- (void)forEachCentral:(CDUnknownBlockType)arg1;
- (id)peerWithInfo:(id)arg1;
- (id)centralWithInfo:(id)arg1;
@end
| {
"pile_set_name": "Github"
} |
#!/bin/bash
set -e
RELEASE_VERSION=$1
NEW_VERSION=$2
git checkout "Baragon-$RELEASE_VERSION"
mvn clean package docker:build -DskipTests
git checkout master
mvn clean package docker:build -DskipTests
docker tag hubspot/baragonagent-aurora:$NEW_VERSION hubspot/baragonagent-aurora:latest
docker tag hubspot/baragonagent:$NEW_VERSION hubspot/baragonagent:latest
docker tag hubspot/baragonservice:$NEW_VERSION hubspot/baragonservice:latest
docker push hubspot/baragonagent-aurora:$RELEASE_VERSION && docker push hubspot/baragonagent-aurora:$NEW_VERSION && docker push hubspot/baragonagent-aurora:latest && docker push hubspot/baragonagent:$RELEASE_VERSION && docker push hubspot/baragonagent:$NEW_VERSION && docker push hubspot/baragonagent:latest && docker push hubspot/baragonservice:$RELEASE_VERSION && docker push hubspot/baragonservice:$NEW_VERSION && docker push hubspot/baragonservice:latest | {
"pile_set_name": "Github"
} |
import React from 'react';
import { Text as TextPrimitive, TextProps as TextPrimitiveProps } from '@modulz/primitives';
import { theme } from '../theme';
type Weight = 'normal' | 'medium';
type Size = 0 | 1 | 2 | 3 | 4 | 5;
export type HeadingProps = TextPrimitiveProps & {
weight?: Weight;
size?: Size;
truncate?: boolean;
};
export const Heading = React.forwardRef<HTMLHeadingElement, HeadingProps>((props, forwardedRef) => (
<TextPrimitive
{...props}
ref={forwardedRef}
styleConfig={{
base: {
text: {
normal: {
fontFamily: theme.fonts.normal,
fontWeight: 500,
color: theme.colors.gray800,
},
},
},
variants: {
weight: {
normal: {
text: {
normal: {
fontWeight: 400,
},
},
},
medium: {
text: {
normal: {
fontWeight: 500,
},
},
},
},
size: {
0: {
text: {
normal: {
fontSize: theme.fontSizes[3],
lineHeight: theme.lineHeights[2],
letterSpacing: '-0.005em',
textIndent: '-0.0025em',
},
},
},
1: {
text: {
normal: {
fontSize: theme.fontSizes[5],
lineHeight: theme.lineHeights[2],
letterSpacing: '-0.008em',
textIndent: '-0.004em',
},
},
},
2: {
text: {
normal: {
fontSize: theme.fontSizes[7],
lineHeight: theme.lineHeights[3],
letterSpacing: '-0.013em',
textIndent: '-0.00625em',
},
},
},
3: {
text: {
normal: {
fontSize: theme.fontSizes[8],
lineHeight: theme.lineHeights[4],
letterSpacing: '-0.021em',
textIndent: '-0.0105em',
},
},
},
4: {
text: {
normal: {
fontSize: theme.fontSizes[9],
lineHeight: theme.lineHeights[5],
letterSpacing: '-0.034em',
textIndent: '-0.017em',
},
},
},
5: {
text: {
normal: {
fontSize: theme.fontSizes[10],
lineHeight: theme.lineHeights[9],
letterSpacing: '-0.055em',
textIndent: '-0.0275em',
},
},
},
},
truncate: {
true: {
text: {
normal: {
whiteSpace: 'nowrap',
textOverflow: 'ellipsis',
overflow: 'hidden',
},
},
},
},
},
}}
/>
));
Heading.defaultProps = {
as: 'h1',
truncate: false,
size: 2,
weight: 'medium',
};
| {
"pile_set_name": "Github"
} |
import React from 'react';
import Console, { ConsoleLogTypes } from './';
import { storiesOf } from '@storybook/react';
import { Layout, Content } from '../Dashboard/styles';
const stories = storiesOf('Custom Functions|Console', module);
const containerWrapper = storyFn => (
<Layout>
<Content>{storyFn()}</Content>
</Layout>
);
stories.addDecorator(containerWrapper);
const sampleSource1 = 'IAmSomeSource';
const sampleSource2 = 'IAmSomeOtherSource';
const getSampleMessage = (x: string | number) =>
`I am a sample log ${x} log ${x} log log. I am such a log.`;
const sampleLogs = [
{
source: sampleSource1,
message: getSampleMessage(1),
severity: ConsoleLogTypes.Info,
},
{
source: sampleSource1,
message: getSampleMessage('info'),
severity: ConsoleLogTypes.Info,
},
{
source: sampleSource1,
message: getSampleMessage('log'),
severity: ConsoleLogTypes.Log,
},
{
source: sampleSource2,
message: getSampleMessage('warn'),
severity: ConsoleLogTypes.Warn,
},
{
source: sampleSource2,
message: getSampleMessage('error'),
severity: ConsoleLogTypes.Error,
},
];
const voidFunc = () => {};
export const BasicConsole = () => (
<Console logs={sampleLogs} fetchLogs={voidFunc} clearLogs={voidFunc} />
);
stories
.add('basic', () => <BasicConsole />)
.add('empty', () => <Console clearLogs={voidFunc} fetchLogs={voidFunc} logs={[]} />);
| {
"pile_set_name": "Github"
} |
[gd_scene load_steps=6 format=2]
[ext_resource path="res://libgodot.gdnlib" type="GDNativeLibrary" id=1]
[ext_resource path="res://Mob.tscn" type="PackedScene" id=2]
[ext_resource path="res://Player.tscn" type="PackedScene" id=3]
[sub_resource type="NativeScript" id=1]
resource_name = "main.Main"
class_name = "main.Main"
library = ExtResource( 1 )
[sub_resource type="Curve2D" id=2]
bake_interval = 5.0
_data = {
"points": PoolVector2Array( 0, 0, 0, 0, -0.51474, 1.09617, 0, 0, 0, 0, 480.6, 3.53968, 0, 0, 0, 0, 479.785, 721.117, 0, 0, 0, 0, 0.856934, 720.303, 0, 0, 0, 0, -0.51474, 1.09617 )
}
[node name="Main" type="Node" index="0"]
script = SubResource( 1 )
Mob = ExtResource( 2 )
[node name="Player" parent="." index="0" instance=ExtResource( 3 )]
[node name="MobTimer" type="Timer" parent="." index="1"]
process_mode = 1
wait_time = 0.5
one_shot = false
autostart = false
[node name="ScoreTimer" type="Timer" parent="." index="2"]
process_mode = 1
wait_time = 1.0
one_shot = false
autostart = false
[node name="StartTimer" type="Timer" parent="." index="3"]
process_mode = 1
wait_time = 2.0
one_shot = true
autostart = false
[node name="StartPosition" type="Position2D" parent="." index="4"]
position = Vector2( 240, 450 )
_sections_unfolded = [ "Transform" ]
[node name="MobPath" type="Path2D" parent="." index="5"]
curve = SubResource( 2 )
[node name="MobSpawnLocation" type="PathFollow2D" parent="MobPath" index="0"]
position = Vector2( -0.51474, 1.09617 )
rotation = 0.00507879
offset = 0.0
h_offset = 0.0
v_offset = 0.0
rotate = true
cubic_interp = true
loop = true
lookahead = 4.0
[connection signal="hit" from="Player" to="." method="game_over"]
[connection signal="timeout" from="MobTimer" to="." method="_on_Mob_timer_timeout"]
[connection signal="timeout" from="ScoreTimer" to="." method="_on_Score_timer_timeout"]
[connection signal="timeout" from="StartTimer" to="." method="_on_Start_timer_timeout"]
| {
"pile_set_name": "Github"
} |
import Vector2d from "./../../../math/vector2.js";
import pool from "./../../../system/pooling.js";
import TMXRenderer from "./TMXRenderer.js";
import TMXLayer from "./../TMXLayer.js";
/**
* @classdesc
* an Isometric Map Renderder
* @class TMXIsometricRenderer
* @memberOf me
* @extends me.TMXRenderer
* @constructor
* @param {me.TMXTileMap} map the TMX map
*/
class TMXIsometricRenderer extends TMXRenderer {
// constructor
constructor(map) {
super(
map.cols,
map.rows,
map.tilewidth,
map.tileheight
);
this.hTilewidth = this.tilewidth / 2;
this.hTileheight = this.tileheight / 2;
this.originX = this.rows * this.hTilewidth;
}
/**
* return true if the renderer can render the specified layer
* @ignore
*/
canRender(layer) {
return (
(layer.orientation === "isometric") &&
super.canRender(layer)
);
}
/**
* return the bounding rect for this map renderer
* @name me.TMXIsometricRenderer#getBounds
* @public
* @function
* @param {me.TMXLayer} [layer] calculate the bounding rect for a specific layer (will return a new bounds object)
* @return {me.Rect}
*/
getBounds(layer) {
var bounds = layer instanceof TMXLayer ? pool.pull("me.Rect", 0, 0, 0, 0) : this.bounds;
bounds.setShape(
0, 0,
(this.cols + this.rows) * (this.tilewidth / 2),
(this.cols + this.rows) * (this.tileheight / 2)
);
return bounds;
}
/**
* return the tile position corresponding to the specified pixel
* @ignore
*/
pixelToTileCoords(x, y, v) {
var ret = v || new Vector2d();
return ret.set(
(y / this.tileheight) + ((x - this.originX) / this.tilewidth),
(y / this.tileheight) - ((x - this.originX) / this.tilewidth)
);
}
/**
* return the pixel position corresponding of the specified tile
* @ignore
*/
tileToPixelCoords(x, y, v) {
var ret = v || new Vector2d();
return ret.set(
(x - y) * this.hTilewidth + this.originX,
(x + y) * this.hTileheight
);
}
/**
* fix the position of Objects to match
* the way Tiled places them
* @ignore
*/
adjustPosition(obj) {
var tileX = obj.x / this.hTilewidth;
var tileY = obj.y / this.tileheight;
var isoPos = pool.pull("me.Vector2d");
this.tileToPixelCoords(tileX, tileY, isoPos);
obj.x = isoPos.x;
obj.y = isoPos.y;
pool.push(isoPos);
}
/**
* draw the tile map
* @ignore
*/
drawTile(renderer, x, y, tmxTile) {
var tileset = tmxTile.tileset;
// draw the tile
tileset.drawTile(
renderer,
((this.cols - 1) * tileset.tilewidth + (x - y) * tileset.tilewidth >> 1),
(-tileset.tilewidth + (x + y) * tileset.tileheight >> 2),
tmxTile
);
}
/**
* draw the tile map
* @ignore
*/
drawTileLayer(renderer, layer, rect) {
// cache a couple of useful references
var tileset = layer.tileset;
// get top-left and bottom-right tile position
var rowItr = this.pixelToTileCoords(
rect.pos.x - tileset.tilewidth,
rect.pos.y - tileset.tileheight,
pool.pull("me.Vector2d")
).floorSelf();
var tileEnd = this.pixelToTileCoords(
rect.pos.x + rect.width + tileset.tilewidth,
rect.pos.y + rect.height + tileset.tileheight,
pool.pull("me.Vector2d")
).ceilSelf();
var rectEnd = this.tileToPixelCoords(tileEnd.x, tileEnd.y, pool.pull("me.Vector2d"));
// Determine the tile and pixel coordinates to start at
var startPos = this.tileToPixelCoords(rowItr.x, rowItr.y, pool.pull("me.Vector2d"));
startPos.x -= this.hTilewidth;
startPos.y += this.tileheight;
/* Determine in which half of the tile the top-left corner of the area we
* need to draw is. If we're in the upper half, we need to start one row
* up due to those tiles being visible as well. How we go up one row
* depends on whether we're in the left or right half of the tile.
*/
var inUpperHalf = startPos.y - rect.pos.y > this.hTileheight;
var inLeftHalf = rect.pos.x - startPos.x < this.hTilewidth;
if (inUpperHalf) {
if (inLeftHalf) {
rowItr.x--;
startPos.x -= this.hTilewidth;
}
else {
rowItr.y--;
startPos.x += this.hTilewidth;
}
startPos.y -= this.hTileheight;
}
// Determine whether the current row is shifted half a tile to the right
var shifted = inUpperHalf ^ inLeftHalf;
// initialize the columItr vector
var columnItr = rowItr.clone();
// main drawing loop
for (var y = startPos.y * 2; y - this.tileheight * 2 < rectEnd.y * 2; y += this.tileheight) {
columnItr.setV(rowItr);
for (var x = startPos.x; x < rectEnd.x; x += this.tilewidth) {
var tmxTile = layer.cellAt(columnItr.x, columnItr.y);
// render if a valid tile position
if (tmxTile) {
tileset = tmxTile.tileset;
// offset could be different per tileset
var offset = tileset.tileoffset;
// draw our tile
tileset.drawTile(
renderer,
offset.x + x,
offset.y + y / 2 - tileset.tileheight,
tmxTile
);
}
// Advance to the next column
columnItr.x++;
columnItr.y--;
}
// Advance to the next row
if (!shifted) {
rowItr.x++;
startPos.x += this.hTilewidth;
shifted = true;
}
else {
rowItr.y++;
startPos.x -= this.hTilewidth;
shifted = false;
}
}
pool.push(columnItr);
pool.push(rowItr);
pool.push(tileEnd);
pool.push(rectEnd);
pool.push(startPos);
}
};
export default TMXIsometricRenderer;
| {
"pile_set_name": "Github"
} |
using Autofac;
using Autofac.Extras.DynamicProxy;
using LinCms.Cms.Account;
using LinCms.Cms.Files;
using LinCms.Cms.Users;
using LinCms.IRepositories;
using LinCms.Middleware;
using LinCms.Repositories;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
namespace LinCms.Startup.Configuration
{
/// <summary>
/// 注入仓储接口
/// </summary>
public class RepositoryModule : Autofac.Module
{
protected override void Load(ContainerBuilder builder)
{
Assembly assemblysRepository = Assembly.Load("LinCms.Infrastructure");
builder.RegisterAssemblyTypes(assemblysRepository)
.Where(a => a.Name.EndsWith("Repository"))
.AsImplementedInterfaces()
.InstancePerLifetimeScope();
builder.RegisterGeneric(typeof(AuditBaseRepository<>)).As(typeof(IAuditBaseRepository<>)).InstancePerLifetimeScope();
builder.RegisterGeneric(typeof(AuditBaseRepository<,>)).As(typeof(IAuditBaseRepository<,>)).InstancePerLifetimeScope();
}
}
}
| {
"pile_set_name": "Github"
} |
import {
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
OnInit,
} from '@angular/core';
import { Client } from '../../../../services/api/client';
@Component({
moduleId: module.id,
selector: 'm-wallet--balance-rewards',
templateUrl: 'balance.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class WalletBalanceRewardsComponent implements OnInit {
inProgress: boolean = false;
balance: number = 0;
constructor(protected client: Client, protected cd: ChangeDetectorRef) {}
ngOnInit() {
this.load();
}
async load() {
this.inProgress = true;
this.detectChanges();
try {
let response: any = await this.client.get(
`api/v2/blockchain/rewards/balance`
);
if (response && typeof response.balance !== 'undefined') {
this.balance = response.balance;
} else {
console.error('No data');
this.balance = 0;
}
} catch (e) {
console.error(e);
this.balance = 0;
} finally {
this.inProgress = false;
this.detectChanges();
}
}
detectChanges() {
this.cd.markForCheck();
this.cd.detectChanges();
}
}
| {
"pile_set_name": "Github"
} |
const assertion = require('./assertion.js');
const AssertionRunner = require('./assertion-runner.js');
const AssertionError = require('./assertion-error.js');
module.exports = assertion;
module.exports.AssertionRunner = AssertionRunner;
module.exports.AssertionError = AssertionError;
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2015 MediaTek Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
};
static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
struct drm_framebuffer *fb;
int ret;
if (drm_format_num_planes(mode->pixel_format) != 1)
return ERR_PTR(-EINVAL);
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
drm_helper_mode_fill_fb_struct(dev, fb, mode);
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
kfree(fb);
return ERR_PTR(ret);
}
return fb;
}
/*
* Wait for any exclusive fence in fb's gem object's reservation object.
*
* Returns -ERESTARTSYS if interrupted, else 0.
*/
int mtk_fb_wait(struct drm_framebuffer *fb)
{
struct drm_gem_object *gem;
struct reservation_object *resv;
long ret;
if (!fb)
return 0;
gem = fb->obj[0];
if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
return 0;
resv = gem->dma_buf->resv;
ret = reservation_object_wait_timeout_rcu(resv, false, true,
MAX_SCHEDULE_TIMEOUT);
/* MAX_SCHEDULE_TIMEOUT on success, -ERESTARTSYS if interrupted */
if (WARN_ON(ret < 0))
return ret;
return 0;
}
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
unsigned int height = cmd->height;
unsigned int size, bpp;
int ret;
if (drm_format_num_planes(cmd->pixel_format) != 1)
return ERR_PTR(-EINVAL);
gem = drm_gem_object_lookup(file, cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
bpp = drm_format_plane_cpp(cmd->pixel_format, 0);
size = (height - 1) * cmd->pitches[0] + width * bpp;
size += cmd->offsets[0];
if (gem->size < size) {
ret = -EINVAL;
goto unreference;
}
fb = mtk_drm_framebuffer_init(dev, cmd, gem);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto unreference;
}
return fb;
unreference:
drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<!-- Copyright (C) 1988-2019 Free Software Foundation, Inc.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3 or
any later version published by the Free Software Foundation; with the
Invariant Sections being "Funding Free Software", the Front-Cover
Texts being (a) (see below), and with the Back-Cover Texts being (b)
(see below). A copy of the license is included in the section entitled
"GNU Free Documentation License".
(a) The FSF's Front-Cover Text is:
A GNU Manual
(b) The FSF's Back-Cover Text is:
You have freedom to copy and modify this GNU Manual, like GNU
software. Copies published by the Free Software Foundation raise
funds for GNU development. -->
<!-- Created by GNU Texinfo 6.3, http://www.gnu.org/software/texinfo/ -->
<head>
<title>Using the GNU Compiler Collection (GCC): Preprocessor Options</title>
<meta name="description" content="Using the GNU Compiler Collection (GCC): Preprocessor Options">
<meta name="keywords" content="Using the GNU Compiler Collection (GCC): Preprocessor Options">
<meta name="resource-type" content="document">
<meta name="distribution" content="global">
<meta name="Generator" content="makeinfo">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<link href="index.html#Top" rel="start" title="Top">
<link href="Option-Index.html#Option-Index" rel="index" title="Option Index">
<link href="index.html#SEC_Contents" rel="contents" title="Table of Contents">
<link href="Invoking-GCC.html#Invoking-GCC" rel="up" title="Invoking GCC">
<link href="Assembler-Options.html#Assembler-Options" rel="next" title="Assembler Options">
<link href="Instrumentation-Options.html#Instrumentation-Options" rel="prev" title="Instrumentation Options">
<style type="text/css">
<!--
a.summary-letter {text-decoration: none}
blockquote.indentedblock {margin-right: 0em}
blockquote.smallindentedblock {margin-right: 0em; font-size: smaller}
blockquote.smallquotation {font-size: smaller}
div.display {margin-left: 3.2em}
div.example {margin-left: 3.2em}
div.lisp {margin-left: 3.2em}
div.smalldisplay {margin-left: 3.2em}
div.smallexample {margin-left: 3.2em}
div.smalllisp {margin-left: 3.2em}
kbd {font-style: oblique}
pre.display {font-family: inherit}
pre.format {font-family: inherit}
pre.menu-comment {font-family: serif}
pre.menu-preformatted {font-family: serif}
pre.smalldisplay {font-family: inherit; font-size: smaller}
pre.smallexample {font-size: smaller}
pre.smallformat {font-family: inherit; font-size: smaller}
pre.smalllisp {font-size: smaller}
span.nolinebreak {white-space: nowrap}
span.roman {font-family: initial; font-weight: normal}
span.sansserif {font-family: sans-serif; font-weight: normal}
ul.no-bullet {list-style: none}
-->
</style>
<link rel="stylesheet" type="text/css" href="/gcc.css">
</head>
<body lang="en">
<a name="Preprocessor-Options"></a>
<div class="header">
<p>
Next: <a href="Assembler-Options.html#Assembler-Options" accesskey="n" rel="next">Assembler Options</a>, Previous: <a href="Instrumentation-Options.html#Instrumentation-Options" accesskey="p" rel="prev">Instrumentation Options</a>, Up: <a href="Invoking-GCC.html#Invoking-GCC" accesskey="u" rel="up">Invoking GCC</a> [<a href="index.html#SEC_Contents" title="Table of contents" rel="contents">Contents</a>][<a href="Option-Index.html#Option-Index" title="Index" rel="index">Index</a>]</p>
</div>
<hr>
<a name="Options-Controlling-the-Preprocessor"></a>
<h3 class="section">3.12 Options Controlling the Preprocessor</h3>
<a name="index-preprocessor-options"></a>
<a name="index-options_002c-preprocessor"></a>
<p>These options control the C preprocessor, which is run on each C source
file before actual compilation.
</p>
<p>If you use the <samp>-E</samp> option, nothing is done except preprocessing.
Some of these options make sense only together with <samp>-E</samp> because
they cause the preprocessor output to be unsuitable for actual
compilation.
</p>
<p>In addition to the options listed here, there are a number of options
to control search paths for include files documented in
<a href="Directory-Options.html#Directory-Options">Directory Options</a>.
Options to control preprocessor diagnostics are listed in
<a href="Warning-Options.html#Warning-Options">Warning Options</a>.
</p>
<dl compact="compact">
<dt><code>-D <var>name</var></code></dt>
<dd><a name="index-D-1"></a>
<p>Predefine <var>name</var> as a macro, with definition <code>1</code>.
</p>
</dd>
<dt><code>-D <var>name</var>=<var>definition</var></code></dt>
<dd><p>The contents of <var>definition</var> are tokenized and processed as if
they appeared during translation phase three in a ‘<samp>#define</samp>’
directive. In particular, the definition is truncated by
embedded newline characters.
</p>
<p>If you are invoking the preprocessor from a shell or shell-like
program you may need to use the shell’s quoting syntax to protect
characters such as spaces that have a meaning in the shell syntax.
</p>
<p>If you wish to define a function-like macro on the command line, write
its argument list with surrounding parentheses before the equals sign
(if any). Parentheses are meaningful to most shells, so you should
quote the option. With <code>sh</code> and <code>csh</code>,
<samp>-D'<var>name</var>(<var>args…</var>)=<var>definition</var>'</samp> works.
</p>
<p><samp>-D</samp> and <samp>-U</samp> options are processed in the order they
are given on the command line. All <samp>-imacros <var>file</var></samp> and
<samp>-include <var>file</var></samp> options are processed after all
<samp>-D</samp> and <samp>-U</samp> options.
</p>
</dd>
<dt><code>-U <var>name</var></code></dt>
<dd><a name="index-U"></a>
<p>Cancel any previous definition of <var>name</var>, either built in or
provided with a <samp>-D</samp> option.
</p>
</dd>
<dt><code>-include <var>file</var></code></dt>
<dd><a name="index-include"></a>
<p>Process <var>file</var> as if <code>#include "file"</code> appeared as the first
line of the primary source file. However, the first directory searched
for <var>file</var> is the preprocessor’s working directory <em>instead of</em>
the directory containing the main source file. If not found there, it
is searched for in the remainder of the <code>#include "…"</code> search
chain as normal.
</p>
<p>If multiple <samp>-include</samp> options are given, the files are included
in the order they appear on the command line.
</p>
</dd>
<dt><code>-imacros <var>file</var></code></dt>
<dd><a name="index-imacros"></a>
<p>Exactly like <samp>-include</samp>, except that any output produced by
scanning <var>file</var> is thrown away. Macros it defines remain defined.
This allows you to acquire all the macros from a header without also
processing its declarations.
</p>
<p>All files specified by <samp>-imacros</samp> are processed before all files
specified by <samp>-include</samp>.
</p>
</dd>
<dt><code>-undef</code></dt>
<dd><a name="index-undef"></a>
<p>Do not predefine any system-specific or GCC-specific macros. The
standard predefined macros remain defined.
</p>
</dd>
<dt><code>-pthread</code></dt>
<dd><a name="index-pthread"></a>
<p>Define additional macros required for using the POSIX threads library.
You should use this option consistently for both compilation and linking.
This option is supported on GNU/Linux targets, most other Unix derivatives,
and also on x86 Cygwin and MinGW targets.
</p>
</dd>
<dt><code>-M</code></dt>
<dd><a name="index-M"></a>
<a name="index-make"></a>
<a name="index-dependencies_002c-make"></a>
<p>Instead of outputting the result of preprocessing, output a rule
suitable for <code>make</code> describing the dependencies of the main
source file. The preprocessor outputs one <code>make</code> rule containing
the object file name for that source file, a colon, and the names of all
the included files, including those coming from <samp>-include</samp> or
<samp>-imacros</samp> command-line options.
</p>
<p>Unless specified explicitly (with <samp>-MT</samp> or <samp>-MQ</samp>), the
object file name consists of the name of the source file with any
suffix replaced with object file suffix and with any leading directory
parts removed. If there are many included files then the rule is
split into several lines using ‘<samp>\</samp>’-newline. The rule has no
commands.
</p>
<p>This option does not suppress the preprocessor’s debug output, such as
<samp>-dM</samp>. To avoid mixing such debug output with the dependency
rules you should explicitly specify the dependency output file with
<samp>-MF</samp>, or use an environment variable like
<code>DEPENDENCIES_OUTPUT</code> (see <a href="Environment-Variables.html#Environment-Variables">Environment Variables</a>). Debug output
is still sent to the regular output stream as normal.
</p>
<p>Passing <samp>-M</samp> to the driver implies <samp>-E</samp>, and suppresses
warnings with an implicit <samp>-w</samp>.
</p>
</dd>
<dt><code>-MM</code></dt>
<dd><a name="index-MM"></a>
<p>Like <samp>-M</samp> but do not mention header files that are found in
system header directories, nor header files that are included,
directly or indirectly, from such a header.
</p>
<p>This implies that the choice of angle brackets or double quotes in an
‘<samp>#include</samp>’ directive does not in itself determine whether that
header appears in <samp>-MM</samp> dependency output.
</p>
<a name="dashMF"></a></dd>
<dt><code>-MF <var>file</var></code></dt>
<dd><a name="index-MF"></a>
<p>When used with <samp>-M</samp> or <samp>-MM</samp>, specifies a
file to write the dependencies to. If no <samp>-MF</samp> switch is given
the preprocessor sends the rules to the same place it would send
preprocessed output.
</p>
<p>When used with the driver options <samp>-MD</samp> or <samp>-MMD</samp>,
<samp>-MF</samp> overrides the default dependency output file.
</p>
<p>If <var>file</var> is <samp>-</samp>, then the dependencies are written to <samp>stdout</samp>.
</p>
</dd>
<dt><code>-MG</code></dt>
<dd><a name="index-MG"></a>
<p>In conjunction with an option such as <samp>-M</samp> requesting
dependency generation, <samp>-MG</samp> assumes missing header files are
generated files and adds them to the dependency list without raising
an error. The dependency filename is taken directly from the
<code>#include</code> directive without prepending any path. <samp>-MG</samp>
also suppresses preprocessed output, as a missing header file renders
this useless.
</p>
<p>This feature is used in automatic updating of makefiles.
</p>
</dd>
<dt><code>-MP</code></dt>
<dd><a name="index-MP"></a>
<p>This option instructs CPP to add a phony target for each dependency
other than the main file, causing each to depend on nothing. These
dummy rules work around errors <code>make</code> gives if you remove header
files without updating the <samp>Makefile</samp> to match.
</p>
<p>This is typical output:
</p>
<div class="smallexample">
<pre class="smallexample">test.o: test.c test.h
test.h:
</pre></div>
</dd>
<dt><code>-MT <var>target</var></code></dt>
<dd><a name="index-MT"></a>
<p>Change the target of the rule emitted by dependency generation. By
default CPP takes the name of the main input file, deletes any
directory components and any file suffix such as ‘<samp>.c</samp>’, and
appends the platform’s usual object suffix. The result is the target.
</p>
<p>An <samp>-MT</samp> option sets the target to be exactly the string you
specify. If you want multiple targets, you can specify them as a single
argument to <samp>-MT</samp>, or use multiple <samp>-MT</samp> options.
</p>
<p>For example, <samp><span class="nolinebreak">-MT</span> '$(objpfx)foo.o'<!-- /@w --></samp> might give
</p>
<div class="smallexample">
<pre class="smallexample">$(objpfx)foo.o: foo.c
</pre></div>
</dd>
<dt><code>-MQ <var>target</var></code></dt>
<dd><a name="index-MQ"></a>
<p>Same as <samp>-MT</samp>, but it quotes any characters which are special to
Make. <samp><span class="nolinebreak">-MQ</span> '$(objpfx)foo.o'<!-- /@w --></samp> gives
</p>
<div class="smallexample">
<pre class="smallexample">$$(objpfx)foo.o: foo.c
</pre></div>
<p>The default target is automatically quoted, as if it were given with
<samp>-MQ</samp>.
</p>
</dd>
<dt><code>-MD</code></dt>
<dd><a name="index-MD"></a>
<p><samp>-MD</samp> is equivalent to <samp>-M -MF <var>file</var></samp>, except that
<samp>-E</samp> is not implied. The driver determines <var>file</var> based on
whether an <samp>-o</samp> option is given. If it is, the driver uses its
argument but with a suffix of <samp>.d</samp>, otherwise it takes the name
of the input file, removes any directory components and suffix, and
applies a <samp>.d</samp> suffix.
</p>
<p>If <samp>-MD</samp> is used in conjunction with <samp>-E</samp>, any
<samp>-o</samp> switch is understood to specify the dependency output file
(see <a href="#dashMF">-MF</a>), but if used without <samp>-E</samp>, each <samp>-o</samp>
is understood to specify a target object file.
</p>
<p>Since <samp>-E</samp> is not implied, <samp>-MD</samp> can be used to generate
a dependency output file as a side effect of the compilation process.
</p>
</dd>
<dt><code>-MMD</code></dt>
<dd><a name="index-MMD"></a>
<p>Like <samp>-MD</samp> except mention only user header files, not system
header files.
</p>
</dd>
<dt><code>-fpreprocessed</code></dt>
<dd><a name="index-fpreprocessed"></a>
<p>Indicate to the preprocessor that the input file has already been
preprocessed. This suppresses things like macro expansion, trigraph
conversion, escaped newline splicing, and processing of most directives.
The preprocessor still recognizes and removes comments, so that you can
pass a file preprocessed with <samp>-C</samp> to the compiler without
problems. In this mode the integrated preprocessor is little more than
a tokenizer for the front ends.
</p>
<p><samp>-fpreprocessed</samp> is implicit if the input file has one of the
extensions ‘<samp>.i</samp>’, ‘<samp>.ii</samp>’ or ‘<samp>.mi</samp>’. These are the
extensions that GCC uses for preprocessed files created by
<samp>-save-temps</samp>.
</p>
</dd>
<dt><code>-fdirectives-only</code></dt>
<dd><a name="index-fdirectives_002donly"></a>
<p>When preprocessing, handle directives, but do not expand macros.
</p>
<p>The option’s behavior depends on the <samp>-E</samp> and <samp>-fpreprocessed</samp>
options.
</p>
<p>With <samp>-E</samp>, preprocessing is limited to the handling of directives
such as <code>#define</code>, <code>#ifdef</code>, and <code>#error</code>. Other
preprocessor operations, such as macro expansion and trigraph
conversion are not performed. In addition, the <samp>-dD</samp> option is
implicitly enabled.
</p>
<p>With <samp>-fpreprocessed</samp>, predefinition of command line and most
builtin macros is disabled. Macros such as <code>__LINE__</code>, which are
contextually dependent, are handled normally. This enables compilation of
files previously preprocessed with <code>-E -fdirectives-only</code>.
</p>
<p>With both <samp>-E</samp> and <samp>-fpreprocessed</samp>, the rules for
<samp>-fpreprocessed</samp> take precedence. This enables full preprocessing of
files previously preprocessed with <code>-E -fdirectives-only</code>.
</p>
</dd>
<dt><code>-fdollars-in-identifiers</code></dt>
<dd><a name="index-fdollars_002din_002didentifiers"></a>
<a name="fdollars_002din_002didentifiers"></a><p>Accept ‘<samp>$</samp>’ in identifiers.
</p>
</dd>
<dt><code>-fextended-identifiers</code></dt>
<dd><a name="index-fextended_002didentifiers"></a>
<p>Accept universal character names in identifiers. This option is
enabled by default for C99 (and later C standard versions) and C++.
</p>
</dd>
<dt><code>-fno-canonical-system-headers</code></dt>
<dd><a name="index-fno_002dcanonical_002dsystem_002dheaders"></a>
<p>When preprocessing, do not shorten system header paths with canonicalization.
</p>
</dd>
<dt><code>-ftabstop=<var>width</var></code></dt>
<dd><a name="index-ftabstop"></a>
<p>Set the distance between tab stops. This helps the preprocessor report
correct column numbers in warnings or errors, even if tabs appear on the
line. If the value is less than 1 or greater than 100, the option is
ignored. The default is 8.
</p>
</dd>
<dt><code>-ftrack-macro-expansion<span class="roman">[</span>=<var>level</var><span class="roman">]</span></code></dt>
<dd><a name="index-ftrack_002dmacro_002dexpansion"></a>
<p>Track locations of tokens across macro expansions. This allows the
compiler to emit diagnostic about the current macro expansion stack
when a compilation error occurs in a macro expansion. Using this
option makes the preprocessor and the compiler consume more
memory. The <var>level</var> parameter can be used to choose the level of
precision of token location tracking thus decreasing the memory
consumption if necessary. Value ‘<samp>0</samp>’ of <var>level</var> de-activates
this option. Value ‘<samp>1</samp>’ tracks tokens locations in a
degraded mode for the sake of minimal memory overhead. In this mode
all tokens resulting from the expansion of an argument of a
function-like macro have the same location. Value ‘<samp>2</samp>’ tracks
tokens locations completely. This value is the most memory hungry.
When this option is given no argument, the default parameter value is
‘<samp>2</samp>’.
</p>
<p>Note that <code>-ftrack-macro-expansion=2</code> is activated by default.
</p>
</dd>
<dt><code>-fmacro-prefix-map=<var>old</var>=<var>new</var></code></dt>
<dd><a name="index-fmacro_002dprefix_002dmap"></a>
<p>When preprocessing files residing in directory <samp><var>old</var></samp>,
expand the <code>__FILE__</code> and <code>__BASE_FILE__</code> macros as if the
files resided in directory <samp><var>new</var></samp> instead. This can be used
to change an absolute path to a relative path by using <samp>.</samp> for
<var>new</var> which can result in more reproducible builds that are
location independent. This option also affects
<code>__builtin_FILE()</code> during compilation. See also
<samp>-ffile-prefix-map</samp>.
</p>
</dd>
<dt><code>-fexec-charset=<var>charset</var></code></dt>
<dd><a name="index-fexec_002dcharset"></a>
<a name="index-character-set_002c-execution"></a>
<p>Set the execution character set, used for string and character
constants. The default is UTF-8. <var>charset</var> can be any encoding
supported by the system’s <code>iconv</code> library routine.
</p>
</dd>
<dt><code>-fwide-exec-charset=<var>charset</var></code></dt>
<dd><a name="index-fwide_002dexec_002dcharset"></a>
<a name="index-character-set_002c-wide-execution"></a>
<p>Set the wide execution character set, used for wide string and
character constants. The default is UTF-32 or UTF-16, whichever
corresponds to the width of <code>wchar_t</code>. As with
<samp>-fexec-charset</samp>, <var>charset</var> can be any encoding supported
by the system’s <code>iconv</code> library routine; however, you will have
problems with encodings that do not fit exactly in <code>wchar_t</code>.
</p>
</dd>
<dt><code>-finput-charset=<var>charset</var></code></dt>
<dd><a name="index-finput_002dcharset"></a>
<a name="index-character-set_002c-input"></a>
<p>Set the input character set, used for translation from the character
set of the input file to the source character set used by GCC. If the
locale does not specify, or GCC cannot get this information from the
locale, the default is UTF-8. This can be overridden by either the locale
or this command-line option. Currently the command-line option takes
precedence if there’s a conflict. <var>charset</var> can be any encoding
supported by the system’s <code>iconv</code> library routine.
</p>
</dd>
<dt><code>-fpch-deps</code></dt>
<dd><a name="index-fpch_002ddeps"></a>
<p>When using precompiled headers (see <a href="Precompiled-Headers.html#Precompiled-Headers">Precompiled Headers</a>), this flag
causes the dependency-output flags to also list the files from the
precompiled header’s dependencies. If not specified, only the
precompiled header are listed and not the files that were used to
create it, because those files are not consulted when a precompiled
header is used.
</p>
</dd>
<dt><code>-fpch-preprocess</code></dt>
<dd><a name="index-fpch_002dpreprocess"></a>
<p>This option allows use of a precompiled header (see <a href="Precompiled-Headers.html#Precompiled-Headers">Precompiled Headers</a>) together with <samp>-E</samp>. It inserts a special <code>#pragma</code>,
<code>#pragma GCC pch_preprocess "<var>filename</var>"</code> in the output to mark
the place where the precompiled header was found, and its <var>filename</var>.
When <samp>-fpreprocessed</samp> is in use, GCC recognizes this <code>#pragma</code>
and loads the PCH.
</p>
<p>This option is off by default, because the resulting preprocessed output
is only really suitable as input to GCC. It is switched on by
<samp>-save-temps</samp>.
</p>
<p>You should not write this <code>#pragma</code> in your own code, but it is
safe to edit the filename if the PCH file is available in a different
location. The filename may be absolute or it may be relative to GCC’s
current directory.
</p>
</dd>
<dt><code>-fworking-directory</code></dt>
<dd><a name="index-fworking_002ddirectory"></a>
<a name="index-fno_002dworking_002ddirectory"></a>
<p>Enable generation of linemarkers in the preprocessor output that
let the compiler know the current working directory at the time of
preprocessing. When this option is enabled, the preprocessor
emits, after the initial linemarker, a second linemarker with the
current working directory followed by two slashes. GCC uses this
directory, when it’s present in the preprocessed input, as the
directory emitted as the current working directory in some debugging
information formats. This option is implicitly enabled if debugging
information is enabled, but this can be inhibited with the negated
form <samp>-fno-working-directory</samp>. If the <samp>-P</samp> flag is
present in the command line, this option has no effect, since no
<code>#line</code> directives are emitted whatsoever.
</p>
</dd>
<dt><code>-A <var>predicate</var>=<var>answer</var></code></dt>
<dd><a name="index-A"></a>
<p>Make an assertion with the predicate <var>predicate</var> and answer
<var>answer</var>. This form is preferred to the older form <samp>-A
<var>predicate</var>(<var>answer</var>)</samp>, which is still supported, because
it does not use shell special characters.
</p>
</dd>
<dt><code>-A -<var>predicate</var>=<var>answer</var></code></dt>
<dd><p>Cancel an assertion with the predicate <var>predicate</var> and answer
<var>answer</var>.
</p>
</dd>
<dt><code>-C</code></dt>
<dd><a name="index-C"></a>
<p>Do not discard comments. All comments are passed through to the output
file, except for comments in processed directives, which are deleted
along with the directive.
</p>
<p>You should be prepared for side effects when using <samp>-C</samp>; it
causes the preprocessor to treat comments as tokens in their own right.
For example, comments appearing at the start of what would be a
directive line have the effect of turning that line into an ordinary
source line, since the first token on the line is no longer a ‘<samp>#</samp>’.
</p>
</dd>
<dt><code>-CC</code></dt>
<dd><a name="index-CC"></a>
<p>Do not discard comments, including during macro expansion. This is
like <samp>-C</samp>, except that comments contained within macros are
also passed through to the output file where the macro is expanded.
</p>
<p>In addition to the side effects of the <samp>-C</samp> option, the
<samp>-CC</samp> option causes all C++-style comments inside a macro
to be converted to C-style comments. This is to prevent later use
of that macro from inadvertently commenting out the remainder of
the source line.
</p>
<p>The <samp>-CC</samp> option is generally used to support lint comments.
</p>
</dd>
<dt><code>-P</code></dt>
<dd><a name="index-P"></a>
<p>Inhibit generation of linemarkers in the output from the preprocessor.
This might be useful when running the preprocessor on something that is
not C code, and will be sent to a program which might be confused by the
linemarkers.
</p>
<a name="index-traditional-C-language"></a>
<a name="index-C-language_002c-traditional"></a>
</dd>
<dt><code>-traditional</code></dt>
<dt><code>-traditional-cpp</code></dt>
<dd><a name="index-traditional_002dcpp"></a>
<a name="index-traditional"></a>
<p>Try to imitate the behavior of pre-standard C preprocessors, as
opposed to ISO C preprocessors.
See the GNU CPP manual for details.
</p>
<p>Note that GCC does not otherwise attempt to emulate a pre-standard
C compiler, and these options are only supported with the <samp>-E</samp>
switch, or when invoking CPP explicitly.
</p>
</dd>
<dt><code>-trigraphs</code></dt>
<dd><a name="index-trigraphs"></a>
<p>Support ISO C trigraphs.
These are three-character sequences, all starting with ‘<samp>??</samp>’, that
are defined by ISO C to stand for single characters. For example,
‘<samp>??/</samp>’ stands for ‘<samp>\</samp>’, so ‘<samp>'??/n'</samp>’ is a character
constant for a newline.
</p>
<p>The nine trigraphs and their replacements are
</p>
<div class="smallexample">
<pre class="smallexample">Trigraph: ??( ??) ??< ??> ??= ??/ ??' ??! ??-
Replacement: [ ] { } # \ ^ | ~
</pre></div>
<p>By default, GCC ignores trigraphs, but in
standard-conforming modes it converts them. See the <samp>-std</samp> and
<samp>-ansi</samp> options.
</p>
</dd>
<dt><code>-remap</code></dt>
<dd><a name="index-remap"></a>
<p>Enable special code to work around file systems which only permit very
short file names, such as MS-DOS.
</p>
</dd>
<dt><code>-H</code></dt>
<dd><a name="index-H"></a>
<p>Print the name of each header file used, in addition to other normal
activities. Each name is indented to show how deep in the
‘<samp>#include</samp>’ stack it is. Precompiled header files are also
printed, even if they are found to be invalid; an invalid precompiled
header file is printed with ‘<samp>...x</samp>’ and a valid one with ‘<samp>...!</samp>’ .
</p>
</dd>
<dt><code>-d<var>letters</var></code></dt>
<dd><a name="index-d"></a>
<p>Says to make debugging dumps during compilation as specified by
<var>letters</var>. The flags documented here are those relevant to the
preprocessor. Other <var>letters</var> are interpreted
by the compiler proper, or reserved for future versions of GCC, and so
are silently ignored. If you specify <var>letters</var> whose behavior
conflicts, the result is undefined.
See <a href="Developer-Options.html#Developer-Options">Developer Options</a>, for more information.
</p>
<dl compact="compact">
<dt><code>-dM</code></dt>
<dd><a name="index-dM"></a>
<p>Instead of the normal output, generate a list of ‘<samp>#define</samp>’
directives for all the macros defined during the execution of the
preprocessor, including predefined macros. This gives you a way of
finding out what is predefined in your version of the preprocessor.
Assuming you have no file <samp>foo.h</samp>, the command
</p>
<div class="smallexample">
<pre class="smallexample">touch foo.h; cpp -dM foo.h
</pre></div>
<p>shows all the predefined macros.
</p>
<p>If you use <samp>-dM</samp> without the <samp>-E</samp> option, <samp>-dM</samp> is
interpreted as a synonym for <samp>-fdump-rtl-mach</samp>.
See <a href="http://gcc.gnu.org/onlinedocs/gcc/Developer-Options.html#Developer-Options">(gcc)Developer Options</a>.
</p>
</dd>
<dt><code>-dD</code></dt>
<dd><a name="index-dD"></a>
<p>Like <samp>-dM</samp> except in two respects: it does <em>not</em> include the
predefined macros, and it outputs <em>both</em> the ‘<samp>#define</samp>’
directives and the result of preprocessing. Both kinds of output go to
the standard output file.
</p>
</dd>
<dt><code>-dN</code></dt>
<dd><a name="index-dN"></a>
<p>Like <samp>-dD</samp>, but emit only the macro names, not their expansions.
</p>
</dd>
<dt><code>-dI</code></dt>
<dd><a name="index-dI"></a>
<p>Output ‘<samp>#include</samp>’ directives in addition to the result of
preprocessing.
</p>
</dd>
<dt><code>-dU</code></dt>
<dd><a name="index-dU"></a>
<p>Like <samp>-dD</samp> except that only macros that are expanded, or whose
definedness is tested in preprocessor directives, are output; the
output is delayed until the use or test of the macro; and
‘<samp>#undef</samp>’ directives are also output for macros tested but
undefined at the time.
</p></dd>
</dl>
</dd>
<dt><code>-fdebug-cpp</code></dt>
<dd><a name="index-fdebug_002dcpp"></a>
<p>This option is only useful for debugging GCC. When used from CPP or with
<samp>-E</samp>, it dumps debugging information about location maps. Every
token in the output is preceded by the dump of the map its location
belongs to.
</p>
<p>When used from GCC without <samp>-E</samp>, this option has no effect.
</p>
</dd>
<dt><code>-Wp,<var>option</var></code></dt>
<dd><a name="index-Wp"></a>
<p>You can use <samp>-Wp,<var>option</var></samp> to bypass the compiler driver
and pass <var>option</var> directly through to the preprocessor. If
<var>option</var> contains commas, it is split into multiple options at the
commas. However, many options are modified, translated or interpreted
by the compiler driver before being passed to the preprocessor, and
<samp>-Wp</samp> forcibly bypasses this phase. The preprocessor’s direct
interface is undocumented and subject to change, so whenever possible
you should avoid using <samp>-Wp</samp> and let the driver handle the
options instead.
</p>
</dd>
<dt><code>-Xpreprocessor <var>option</var></code></dt>
<dd><a name="index-Xpreprocessor"></a>
<p>Pass <var>option</var> as an option to the preprocessor. You can use this to
supply system-specific preprocessor options that GCC does not
recognize.
</p>
<p>If you want to pass an option that takes an argument, you must use
<samp>-Xpreprocessor</samp> twice, once for the option and once for the argument.
</p>
</dd>
<dt><code>-no-integrated-cpp</code></dt>
<dd><a name="index-no_002dintegrated_002dcpp"></a>
<p>Perform preprocessing as a separate pass before compilation.
By default, GCC performs preprocessing as an integrated part of
input tokenization and parsing.
If this option is provided, the appropriate language front end
(<code>cc1</code>, <code>cc1plus</code>, or <code>cc1obj</code> for C, C++,
and Objective-C, respectively) is instead invoked twice,
once for preprocessing only and once for actual compilation
of the preprocessed input.
This option may be useful in conjunction with the <samp>-B</samp> or
<samp>-wrapper</samp> options to specify an alternate preprocessor or
perform additional processing of the program source between
normal preprocessing and compilation.
</p>
</dd>
</dl>
<hr>
<div class="header">
<p>
Next: <a href="Assembler-Options.html#Assembler-Options" accesskey="n" rel="next">Assembler Options</a>, Previous: <a href="Instrumentation-Options.html#Instrumentation-Options" accesskey="p" rel="prev">Instrumentation Options</a>, Up: <a href="Invoking-GCC.html#Invoking-GCC" accesskey="u" rel="up">Invoking GCC</a> [<a href="index.html#SEC_Contents" title="Table of contents" rel="contents">Contents</a>][<a href="Option-Index.html#Option-Index" title="Index" rel="index">Index</a>]</p>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
//
// AVObjectUtils.m
// AVOSCloud
//
// Created by Zhu Zeng on 7/4/13.
// Copyright (c) 2013 AVOS. All rights reserved.
//
#import <objc/runtime.h>
#import "AVObjectUtils.h"
#import "AVObject_Internal.h"
#import "AVFile.h"
#import "AVFile_Internal.h"
#import "AVObjectUtils.h"
#import "AVUser_Internal.h"
#import "AVACL_Internal.h"
#import "AVRelation.h"
#import "AVRole_Internal.h"
#import "AVInstallation_Internal.h"
#import "AVPaasClient.h"
#import "AVGeoPoint_Internal.h"
#import "AVRelation_Internal.h"
#import "AVUtils.h"
@implementation AVObjectUtils
#pragma mark - Check type
+(BOOL)isRelation:(NSString *)type
{
return [type isEqualToString:@"Relation"];
}
/// The remote AVObject can be a pointer object or a normal object without pointer property
/// When adding AVObject, we have to check if it's a pointer or not.
+(BOOL)isRelationDictionary:(NSDictionary *)dict
{
NSString * type = [dict objectForKey:@"__type"];
if ([type isEqualToString:@"Relation"]) {
return YES;
}
return NO;
}
+(BOOL)isPointerDictionary:(NSDictionary *)dict
{
NSString * type = [dict objectForKey:@"__type"];
if ([type isEqualToString:@"Pointer"]) {
return YES;
}
return NO;
}
+(BOOL)isPointer:(NSString *)type
{
return [type isEqualToString:@"Pointer"];
}
+(BOOL)isGeoPoint:(NSString *)type
{
return [type isEqualToString:@"GeoPoint"];
}
+(BOOL)isACL:(NSString *)type
{
return [type isEqualToString:ACLTag];
}
+(BOOL)isDate:(NSString *)type
{
return [type isEqualToString:@"Date"];
}
+(BOOL)isData:(NSString *)type
{
return [type isEqualToString:@"Bytes"];
}
+(BOOL)isFile:(NSString *)type
{
return [type isEqualToString:@"File"];
}
+(BOOL)isFilePointer:(NSDictionary *)dict {
return ([[dict objectForKey:classNameTag] isEqualToString:@"_File"]);
}
+(BOOL)isAVObject:(NSDictionary *)dict
{
// Should check for __type is Object ?
return ([dict objectForKey:classNameTag] != nil);
}
#pragma mark - Simple objecitive-c object from server side dictionary
+(NSDateFormatter *)dateFormatter{
static NSDateFormatter *dateFormatter;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
dateFormatter = [[NSDateFormatter alloc] init];
[dateFormatter setDateFormat:AV_DATE_FORMAT];
[dateFormatter setTimeZone:[NSTimeZone timeZoneForSecondsFromGMT:0]];
});
return dateFormatter;
}
+(NSString *)stringFromDate:(NSDate *)date
{
NSString *strDate = [[self.class dateFormatter] stringFromDate:date];
return strDate;
}
+(NSDate *)dateFromString:(NSString *)string
{
if (string == nil || [string isKindOfClass:[NSNull class]]) {
return [NSDate date];
}
NSDate *date = [[self.class dateFormatter] dateFromString:string];
return date;
}
+(NSDate *)dateFromDictionary:(NSDictionary *)dict
{
return [AVObjectUtils dateFromString:[dict valueForKey:@"iso"]];
}
+(NSData *)dataFromDictionary:(NSDictionary *)dict
{
NSString * string = [dict valueForKey:@"base64"];
NSData * data = [NSData AVdataFromBase64String:string];
return data;
}
+(AVGeoPoint *)geoPointFromDictionary:(NSDictionary *)dict
{
AVGeoPoint * point = [[AVGeoPoint alloc]init];
point.latitude = [[dict objectForKey:@"latitude"] doubleValue];
point.longitude = [[dict objectForKey:@"longitude"] doubleValue];
return point;
}
+(AVACL *)aclFromDictionary:(NSDictionary *)dict
{
AVACL * acl = [AVACL ACL];
acl.permissionsById = [dict mutableCopy];
return acl;
}
+(NSArray *)arrayFromArray:(NSArray *)array
{
NSMutableArray *newArray = [NSMutableArray arrayWithCapacity:array.count];
for (id obj in [array copy]) {
if ([obj isKindOfClass:[NSDictionary class]]) {
[newArray addObject:[AVObjectUtils objectFromDictionary:obj]];
} else if ([obj isKindOfClass:[NSArray class]]) {
NSArray * sub = [AVObjectUtils arrayFromArray:obj];
[newArray addObject:sub];
} else {
[newArray addObject:obj];
}
}
return newArray;
}
+(NSObject *)objectFromDictionary:(NSDictionary *)dict
{
NSString * type = [dict valueForKey:@"__type"];
if ([AVObjectUtils isRelation:type])
{
return [AVObjectUtils targetObjectFromRelationDictionary:dict];
}
else if ([AVObjectUtils isPointer:type] ||
[AVObjectUtils isAVObject:dict] )
{
// the backend stores AVFile as AVObject, but in sdk AVFile is not subclass
// of AVObject, have to process the situation here.
if ([AVObjectUtils isFilePointer:dict]) {
return [AVFile fileFromDictionary:dict];
}
return [AVObjectUtils avobjectFromDictionary:dict];
}
else if ([AVObjectUtils isFile:type])
{
AVFile * file = [AVFile fileFromDictionary:dict];
return file;
}
else if ([AVObjectUtils isGeoPoint:type])
{
AVGeoPoint * point = [AVObjectUtils geoPointFromDictionary:dict];
return point;
}
else if ([AVObjectUtils isDate:type])
{
NSDate * date = [AVObjectUtils dateFromDictionary:dict];
return date;
}
else if ([AVObjectUtils isData:type])
{
NSData * data = [AVObjectUtils dataFromDictionary:dict];
return data;
}
return dict;
}
+ (NSObject *)objectFromDictionary:(NSDictionary *)dict recursive:(BOOL)recursive {
if (recursive) {
NSMutableDictionary *mutableDict = [dict mutableCopy];
for (NSString *key in [dict allKeys]) {
id object = dict[key];
if ([object isKindOfClass:[NSDictionary class]]) {
object = [self objectFromDictionary:object recursive:YES];
mutableDict[key] = object;
}
}
return [self objectFromDictionary:mutableDict];
} else {
return [self objectFromDictionary:dict];
}
}
+(void)copyDictionary:(NSDictionary *)dict
toTarget:(AVObject *)target
key:(NSString *)key
{
NSString * type = [dict valueForKey:@"__type"];
if ([AVObjectUtils isRelation:type])
{
// 解析 {"__type":"Relation","className":"_User"},添加第一个来判断类型
AVObject * object = [AVObjectUtils targetObjectFromRelationDictionary:dict];
[target addRelation:object forKey:key submit:NO];
}
else if ([AVObjectUtils isPointer:type])
{
[target setObject:[AVObjectUtils objectFromDictionary:dict] forKey:key submit:NO];
}
else if ([AVObjectUtils isAVObject:dict]) {
[target setObject:[AVObjectUtils objectFromDictionary:dict] forKey:key submit:NO];
}
else if ([AVObjectUtils isFile:type])
{
AVFile * file = [AVFile fileFromDictionary:dict];
[target setObject:file forKey:key submit:NO];
}
else if ([AVObjectUtils isGeoPoint:type])
{
AVGeoPoint * point = [AVGeoPoint geoPointFromDictionary:dict];
[target setObject:point forKey:key submit:NO];
}
else if ([AVObjectUtils isACL:type] ||
[AVObjectUtils isACL:key])
{
[target setObject:[AVObjectUtils aclFromDictionary:dict] forKey:ACLTag submit:NO];
}
else if ([AVObjectUtils isDate:type])
{
NSDate * date = [AVObjectUtils dateFromDictionary:dict];
[target setObject:date forKey:key submit:NO];
}
else if ([AVObjectUtils isData:type])
{
NSData * data = [AVObjectUtils dataFromDictionary:dict];
[target setObject:data forKey:key submit:NO];
}
else
{
id object = [self objectFromDictionary:dict recursive:YES];
[target setObject:object forKey:key submit:NO];
}
}
/// Add object to avobject container.
+(void)addObject:(NSObject *)object
to:(NSObject *)parent
key:(NSString *)key
isRelation:(BOOL)isRelation
{
if ([key hasPrefix:@"_"]) {
// NSLog(@"Ingore key %@", key);
return;
}
if (![parent isKindOfClass:[AVObject class]]) {
return;
}
AVObject * avParent = (AVObject *)parent;
if ([object isKindOfClass:[AVObject class]]) {
if (isRelation) {
[avParent addRelation:(AVObject *)object forKey:key submit:NO];
} else {
[avParent setObject:object forKey:key submit:NO];
}
} else if ([object isKindOfClass:[NSArray class]]) {
for(AVObject * item in [object copy]) {
[avParent addObject:item forKey:key];
}
} else {
[avParent setObject:object forKey:key submit:NO];
}
}
+(NSDate *)dateFromValue:(id)value {
NSDate * date = nil;
if ([value isKindOfClass:[NSDictionary class]]) {
date = [AVObjectUtils dateFromDictionary:value];
} else if ([value isKindOfClass:[NSString class]]) {
date = [AVObjectUtils dateFromString:value];
}
return date;
}
+(void)updateObjectProperty:(AVObject *)target
key:(NSString *)key
value:(NSObject *)value
{
if ([key isEqualToString:@"createdAt"] ) {
target.createdAt = [AVObjectUtils dateFromValue:value];
} else if ([key isEqualToString:@"updatedAt"]) {
target.updatedAt = [AVObjectUtils dateFromValue:value];
} else if ([key isEqualToString:ACLTag]) {
AVACL * acl = [AVObjectUtils aclFromDictionary:(NSDictionary *)value];
[target setObject:acl forKey:key submit:NO];
} else {
if ([value isKindOfClass:[NSDictionary class]]) {
NSDictionary * valueDict = (NSDictionary *)value;
[AVObjectUtils copyDictionary:valueDict toTarget:target key:key];
} else if ([value isKindOfClass:[NSArray class]]) {
NSArray * array = [AVObjectUtils arrayFromArray:(NSArray *)value];
[target setObject:array forKey:key submit:NO];
} else if ([value isEqual:[NSNull null]]) {
[target removeObjectForKey:key];
} else {
[target setObject:value forKey:key submit:NO];
}
}
}
+(void)updateSubObjects:(AVObject *)target
key:(NSString *)key
value:(NSObject *)obj
{
// additional properties, use setObject
if ([obj isKindOfClass:[NSDictionary class]])
{
[AVObjectUtils copyDictionary:(NSDictionary *)obj toTarget:target key:key];
}
else if ([obj isKindOfClass:[NSArray class]])
{
NSArray * array = [AVObjectUtils arrayFromArray:(NSArray *)obj];
[target setObject:array forKey:key submit:NO];
}
else
{
[target setObject:obj forKey:key submit:NO];
}
}
#pragma mark - Update Objecitive-c object from server side dictionary
+(void)copyDictionary:(NSDictionary *)src
toObject:(AVObject *)target
{
[src enumerateKeysAndObjectsUsingBlock:^(id key, id obj, BOOL *stop) {
if ([target respondsToSelector:NSSelectorFromString(key)]) {
[AVObjectUtils updateObjectProperty:target key:key value:obj];
} else {
[AVObjectUtils updateSubObjects:target key:key value:obj];
}
}];
}
#pragma mark - Server side dictionary representation of objective-c object.
+ (NSMutableDictionary *)dictionaryFromDictionary:(NSDictionary *)dic {
return [self dictionaryFromDictionary:dic topObject:NO];
}
/// topObject is for cloud rpc
+ (NSMutableDictionary *)dictionaryFromDictionary:(NSDictionary *)dic topObject:(BOOL)topObject{
NSMutableDictionary *newDic = [NSMutableDictionary dictionaryWithCapacity:dic.count];
for (NSString *key in [dic allKeys]) {
id obj = [dic objectForKey:key];
[newDic setObject:[AVObjectUtils dictionaryFromObject:obj topObject:topObject] forKey:key];
}
return newDic;
}
+ (NSMutableArray *)dictionaryFromArray:(NSArray *)array {
return [self dictionaryFromArray:array topObject:NO];
}
+ (NSMutableArray *)dictionaryFromArray:(NSArray *)array topObject:(BOOL)topObject
{
NSMutableArray *newArray = [NSMutableArray arrayWithCapacity:array.count];
for (id obj in [array copy]) {
[newArray addObject:[AVObjectUtils dictionaryFromObject:obj topObject:topObject]];
}
return newArray;
}
+(NSDictionary *)dictionaryFromAVObjectPointer:(AVObject *)object
{
NSMutableDictionary * dict = [[NSMutableDictionary alloc] init];
[dict setObject:@"Pointer" forKey:@"__type"];
[dict setObject:[object internalClassName] forKey:classNameTag];
if ([object hasValidObjectId])
{
[dict setObject:object.objectId forKey:@"objectId"];
}
return dict;
}
/*
{
"cid" : "67c35bc8-4183-4db0-8f5a-0ee2b0baa4d4",
"className" : "ddd",
"key" : "myddd"
}
*/
+(NSDictionary *)childDictionaryFromAVObject:(AVObject *)object
withKey:(NSString *)key
{
NSMutableDictionary * dict = [[NSMutableDictionary alloc] init];
[dict setObject:[object internalClassName] forKey:classNameTag];
NSString *cid = [object objectId] != nil ? [object objectId] : [object uuid];
[dict setObject:cid forKey:@"cid"];
[dict setObject:key forKey:@"key"];
return dict;
}
+ (NSSet *)allAVObjectProperties:(Class)objectClass {
NSMutableSet *properties = [NSMutableSet set];
[self allAVObjectProperties:objectClass properties:properties];
return [properties copy];
}
+(void)allAVObjectProperties:(Class)objectClass
properties:(NSMutableSet *)properties {
unsigned int numberOfProperties = 0;
objc_property_t *propertyArray = class_copyPropertyList(objectClass, &numberOfProperties);
for (NSUInteger i = 0; i < numberOfProperties; i++)
{
objc_property_t property = propertyArray[i];
char *readonly = property_copyAttributeValue(property, "R");
if (readonly) {
free(readonly);
continue;
}
NSString *key = [[NSString alloc] initWithUTF8String:property_getName(property)];
[properties addObject:key];
}
if ([objectClass isSubclassOfClass:[AVObject class]] && objectClass != [AVObject class])
{
[AVObjectUtils allAVObjectProperties:[objectClass superclass] properties:properties];
}
free(propertyArray);
}
// generate object json dictionary. For AVObject, we generate the full
// json dictionary instead of pointer only. This function is different
// from dictionaryFromObject which generates pointer json only for AVObject.
+ (id)snapshotDictionary:(id)object {
return [self snapshotDictionary:object recursive:YES];
}
+ (id)snapshotDictionary:(id)object recursive:(BOOL)recursive {
if (recursive && [object isKindOfClass:[AVObject class]]) {
return [AVObjectUtils objectSnapshot:object recursive:recursive];
} else {
return [AVObjectUtils dictionaryFromObject:object];
}
}
+ (NSMutableDictionary *)objectSnapshot:(AVObject *)object {
return [self objectSnapshot:object recursive:YES];
}
+ (NSMutableDictionary *)objectSnapshot:(AVObject *)object recursive:(BOOL)recursive {
NSArray * objects = @[object.localData, object.estimatedData];
NSMutableDictionary * result = [NSMutableDictionary dictionary];
[result setObject:@"Object" forKey:kAVTypeTag];
for (NSDictionary *object in objects) {
NSDictionary *dictionary = [object copy];
NSArray *keys = [dictionary allKeys];
for(NSString * key in keys) {
id valueObject = [self snapshotDictionary:dictionary[key] recursive:recursive];
if (valueObject != nil) {
[result setObject:valueObject forKey:key];
}
}
}
NSArray * keys = [object.relationData allKeys];
for(NSString * key in keys) {
NSString * childClassName = [object childClassNameForRelation:key];
id valueObject = [self dictionaryForRelation:childClassName];
if (valueObject != nil) {
[result setObject:valueObject forKey:key];
}
}
NSSet *ignoreKeys = [NSSet setWithObjects:
@"localData",
@"relationData",
@"estimatedData",
@"isPointer",
@"running",
@"operationQueue",
@"requestManager",
@"inSetter",
@"uuid",
@"submit",
@"hasDataForInitial",
@"hasDataForCloud",
@"fetchWhenSave",
@"isNew", // from AVUser
nil];
NSMutableSet * properties = [NSMutableSet set];
[self allAVObjectProperties:[object class] properties:properties];
for (NSString * key in properties) {
if ([ignoreKeys containsObject:key]) {
continue;
}
id valueObjet = [self snapshotDictionary:[object valueForKey:key] recursive:recursive];
if (valueObjet != nil) {
[result setObject:valueObjet forKey:key];
}
}
return result;
}
+(AVObject *)avObjectForClass:(NSString *)className {
if (className == nil) {
return nil;
}
AVObject *object = nil;
Class classObject = [[AVPaasClient sharedInstance] classFor:className];
if (classObject != nil && [classObject isSubclassOfClass:[AVObject class]]) {
if ([classObject respondsToSelector:@selector(object)]) {
object = [classObject performSelector:@selector(object)];
}
} else {
if ([AVObjectUtils isUserClass:className]) {
object = [AVUser user];
} else if ([AVObjectUtils isInstallationClass:className]) {
object = [AVInstallation installation];
} else if ([AVObjectUtils isRoleClass:className]) {
// TODO
object = [AVRole role];
} else {
object = [AVObject objectWithClassName:className];
}
}
return object;
}
+(AVObject *)avObjectFromDictionary:(NSDictionary *)src
className:(NSString *)className {
if (src == nil || className == nil || src.count == 0) {
return nil;
}
AVObject *object = [AVObjectUtils avObjectForClass:className];
[AVObjectUtils copyDictionary:src toObject:object];
if ([AVObjectUtils isPointerDictionary:src]) {
object.isPointer = YES;
}
return object;
}
+(AVObject *)avobjectFromDictionary:(NSDictionary *)dict {
NSString * className = [dict objectForKey:classNameTag];
return [AVObjectUtils avObjectFromDictionary:dict className:className];
}
// create relation target object instead of relation object.
+(AVObject *)targetObjectFromRelationDictionary:(NSDictionary *)dict
{
AVObject * object = [AVObjectUtils avObjectForClass:[dict valueForKey:classNameTag]];
return object;
}
+(NSDictionary *)dictionaryFromGeoPoint:(AVGeoPoint *)point
{
return [AVGeoPoint dictionaryFromGeoPoint:point];
}
+(NSDictionary *)dictionaryFromDate:(NSDate *)date
{
NSString *strDate = [AVObjectUtils stringFromDate:date];
return @{@"__type": @"Date", @"iso":strDate};
}
+(NSDictionary *)dictionaryFromData:(NSData *)data
{
NSString *base64 = [data AVbase64EncodedString];
return @{@"__type": @"Bytes", @"base64":base64};
}
+(NSDictionary *)dictionaryFromFile:(AVFile *)file
{
return [AVFile dictionaryFromFile:file];
}
+(NSDictionary *)dictionaryFromACL:(AVACL *)acl {
return acl.permissionsById;
}
+(NSDictionary *)dictionaryFromRelation:(AVRelation *)relation {
if (relation.targetClass) {
return [AVObjectUtils dictionaryForRelation:relation.targetClass];
}
return nil;
}
+(NSDictionary *)dictionaryForRelation:(NSString *)className {
return @{@"__type": @"Relation", @"className":className};
}
// Generate server side dictionary representation of input NSObject
+ (id)dictionaryFromObject:(id)obj {
return [self dictionaryFromObject:obj topObject:NO];
}
/// topObject means get the top level AVObject with Pointer child if any AVObject. Used for cloud rpc.
+ (id)dictionaryFromObject:(id)obj topObject:(BOOL)topObject
{
if ([obj isKindOfClass:[NSDictionary class]]) {
return [AVObjectUtils dictionaryFromDictionary:obj topObject:topObject];
} else if ([obj isKindOfClass:[NSArray class]]) {
return [AVObjectUtils dictionaryFromArray:obj topObject:topObject];
} else if ([obj isKindOfClass:[AVObject class]]) {
if (topObject) {
return [AVObjectUtils objectSnapshot:obj recursive:NO];
} else {
return [AVObjectUtils dictionaryFromAVObjectPointer:obj];
}
} else if ([obj isKindOfClass:[AVGeoPoint class]]) {
return [AVObjectUtils dictionaryFromGeoPoint:obj];
} else if ([obj isKindOfClass:[NSDate class]]) {
return [AVObjectUtils dictionaryFromDate:obj];
} else if ([obj isKindOfClass:[NSData class]]) {
return [AVObjectUtils dictionaryFromData:obj];
} else if ([obj isKindOfClass:[AVFile class]]) {
return [AVObjectUtils dictionaryFromFile:obj];
} else if ([obj isKindOfClass:[AVACL class]]) {
return [AVObjectUtils dictionaryFromACL:obj];
} else if ([obj isKindOfClass:[AVRelation class]]) {
return [AVObjectUtils dictionaryFromRelation:obj];
}
// string or other?
return obj;
}
+(void)setupRelation:(AVObject *)parent
withDictionary:(NSDictionary *)relationMap
{
for(NSString * key in [relationMap allKeys]) {
NSArray * array = [relationMap objectForKey:key];
for(NSDictionary * item in [array copy]) {
NSObject * object = [AVObjectUtils objectFromDictionary:item];
if ([object isKindOfClass:[AVObject class]]) {
[parent addRelation:(AVObject *)object forKey:key submit:NO];
}
}
}
}
#pragma mark - batch request from operation list
+(BOOL)isUserClass:(NSString *)className
{
return [className isEqualToString:[AVUser userTag]];
}
+(BOOL)isRoleClass:(NSString *)className
{
return [className isEqualToString:[AVRole className]];
}
+(BOOL)isFileClass:(NSString *)className
{
return [className isEqualToString:[AVFile className]];
}
+(BOOL)isInstallationClass:(NSString *)className
{
return [className isEqualToString:[AVInstallation className]];
}
+(NSString *)classEndPoint:(NSString *)className
objectId:(NSString *)objectId
{
if (objectId == nil)
{
return [NSString stringWithFormat:@"classes/%@", className];
}
return [NSString stringWithFormat:@"classes/%@/%@", className, objectId];
}
+(NSString *)userObjectPath:(NSString *)objectId
{
if (objectId == nil)
{
return [AVUser endPoint];
}
return [NSString stringWithFormat:@"%@/%@", [AVUser endPoint], objectId];
}
+(NSString *)roleObjectPath:(NSString *)objectId
{
if (objectId == nil)
{
return [AVRole endPoint];
}
return [NSString stringWithFormat:@"%@/%@", [AVRole endPoint], objectId];
}
+(NSString *)installationObjectPath:(NSString *)objectId
{
if (objectId == nil)
{
return [AVInstallation endPoint];
}
return [NSString stringWithFormat:@"%@/%@", [AVInstallation endPoint], objectId];
}
+(NSString *)objectPath:(NSString *)className
objectId:(NSString *)objectId
{
//FIXME: 而且等于nil也没问题 只不过不应该再发请求
//NSAssert(objectClass!=nil, @"className should not be nil!");
if ([AVObjectUtils isUserClass:className])
{
return [AVObjectUtils userObjectPath:objectId];
}
else if ([AVObjectUtils isRoleClass:className])
{
return [AVObjectUtils roleObjectPath:objectId];
}
else if ([AVObjectUtils isInstallationClass:className])
{
return [AVObjectUtils installationObjectPath:objectId];
}
return [AVObjectUtils classEndPoint:className objectId:objectId];
}
+(NSString *)batchPath {
return @"batch";
}
+(NSString *)batchSavePath
{
return @"batch/save";
}
+(BOOL)safeAdd:(NSDictionary *)dict
toArray:(NSMutableArray *)array
{
if (dict != nil) {
[array addObject:dict];
return YES;
}
return NO;
}
+(BOOL)hasAnyKeys:(id)object {
if ([object isKindOfClass:[NSDictionary class]]) {
NSDictionary * dict = (NSDictionary *)object;
return ([dict count] > 0);
}
return NO;
}
@end
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<style>
caption { color: green }
</style>
<script>
function boom()
{
var table = document.getElementById("table");
var newCaption = document.createElement('caption');
newCaption.appendChild(document.createTextNode("TEST"));
newCaption.style.background = "inherit"
table.appendChild(newCaption);
table.appendChild(document.createElement('caption'));
}
</script>
</head>
<body onload="boom();">
<table id="table" style="background: green;"></table>
</body>
</html>
| {
"pile_set_name": "Github"
} |
#include <FastBVH.h>
#include <gtest/gtest.h>
#include <list>
#include "Primitive.h"
using namespace FastBVH;
namespace {
//! \brief This is just boilerplate code for google test
//! to be able to run template tests.
template <typename T>
class BuildStrategyTest : public ::testing::Test {
public:
using List = std::list<T>;
static T shared_;
T value_;
};
//! \brief The floating point types to be tested.
using FloatTypes = ::testing::Types<float, double, long double>;
} // namespace
TYPED_TEST_CASE(BuildStrategyTest, FloatTypes);
TYPED_TEST(BuildStrategyTest, build) {
Testing::Primitive<TypeParam> p[3] = {{{1, 2, 3}, {4, 5, 8}}, {{9, 8, 4}, {11, 15, 9}}, {{2, 6, 5}, {3, 7, 6}}};
std::vector<Testing::Primitive<TypeParam>> primitives;
primitives.emplace_back(p[0]);
primitives.emplace_back(p[1]);
primitives.emplace_back(p[2]);
Testing::BoxConverter<TypeParam> box_converter;
BuildStrategy<TypeParam, 0> build_strategy;
auto bvh = build_strategy(primitives, box_converter);
auto nodes = bvh.getNodes();
constexpr TypeParam cmp_bias = 0.001;
EXPECT_EQ(bvh.countLeafs(), 1);
ASSERT_EQ(nodes.size(), 1);
EXPECT_EQ(nodes[0].primitive_count, 3);
EXPECT_NEAR(nodes[0].bbox.min.x, 1, cmp_bias);
EXPECT_NEAR(nodes[0].bbox.min.y, 2, cmp_bias);
EXPECT_NEAR(nodes[0].bbox.min.z, 3, cmp_bias);
EXPECT_NEAR(nodes[0].bbox.max.x, 11, cmp_bias);
EXPECT_NEAR(nodes[0].bbox.max.y, 15, cmp_bias);
EXPECT_NEAR(nodes[0].bbox.max.z, 9, cmp_bias);
}
| {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////
/////////////// Welcome to Cello //////////////////////
///////////////////////////////////////////////////////////
JobID by date: 0xCD
[ -dateID 0xCD -figures false -external_directory true -assignment_algorithm abstract_only -verilog /Users/peng/cello/resources/verilog/3-input/0xCD.v -output_or false]
///////////////////////////////////////////////////////////
/////////////// Logic synthesis, Wiring diagram ///////
///////////////////////////////////////////////////////////
fin_ver /Users/peng/cello/resources/verilog/3-input/0xCD.v
Input gates = 3
Logic gates = 4
NOR gates = 4
AND gates = 0
Output gates = 1
----- Logic Circuit #0 -----
OUTPUT 11001101 out 0 (1)
NOR 11001101 ~| 1 (3,2)
NOR 00110000 ~| 2 (7,4)
NOR 00100010 ~| 3 (4,6)
NOT 11001100 ~ 4 (5)
INPUT 00001111 in1 7
INPUT 00110011 in2 5
INPUT 01010101 in3 6
Cello finished playing. Abstract circuit only.
| {
"pile_set_name": "Github"
} |
<annotation>
<folder>widerface</folder>
<filename>55--Sports_Coach_Trainer_55_Sports_Coach_Trainer_sportcoaching_55_180.jpg</filename>
<source>
<database>wider face Database</database>
<annotation>PASCAL VOC2007</annotation>

<flickrid>-1</flickrid>
</source>
<owner>
<flickrid>yanyu</flickrid>
<name>yanyu</name>
</owner>
<size>
<width>1024</width>
<height>1024</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>702</xmin>
<ymin>478</ymin>
<xmax>762</xmax>
<ymax>540</ymax>
</bndbox>
<lm>
<x1>713.634</x1>
<y1>510.054</y1>
<x2>735.772</x2>
<y2>511.996</y2>
<x3>723.732</x3>
<y3>528.696</y3>
<x4>719.46</x4>
<y4>529.473</y4>
<x5>736.161</x5>
<y5>527.92</y5>
<visible>1</visible>
<blur>0.6</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>492</xmin>
<ymin>190</ymin>
<xmax>552</xmax>
<ymax>262</ymax>
</bndbox>
<lm>
<x1>513.835</x1>
<y1>220.089</y1>
<x2>535.929</x2>
<y2>218.737</y2>
<x3>525.107</x3>
<y3>234.969</y3>
<x4>514.286</x4>
<y4>240.83</y4>
<x5>530.969</x5>
<y5>242.634</y5>
<visible>0</visible>
<blur>0.61</blur>
</lm>
<has_lm>1</has_lm>
</object>
</annotation> | {
"pile_set_name": "Github"
} |
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
using gaxgrpc = Google.Api.Gax.Grpc;
using lro = Google.LongRunning;
using wkt = Google.Protobuf.WellKnownTypes;
using grpccore = Grpc.Core;
using moq = Moq;
using st = System.Threading;
using stt = System.Threading.Tasks;
using xunit = Xunit;
namespace Google.Cloud.Gaming.V1Beta.Tests
{
/// <summary>Generated unit tests.</summary>
public sealed class GeneratedRealmsServiceClientTest
{
[xunit::FactAttribute]
public void GetRealmRequestObject()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
GetRealmRequest request = new GetRealmRequest
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
};
Realm expectedResponse = new Realm
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
CreateTime = new wkt::Timestamp(),
UpdateTime = new wkt::Timestamp(),
Labels =
{
{
"key8a0b6e3c",
"value60c16320"
},
},
TimeZone = "time_zone73f23b20",
Etag = "etage8ad7218",
Description = "description2cf9da67",
};
mockGrpcClient.Setup(x => x.GetRealm(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(expectedResponse);
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
Realm response = client.GetRealm(request);
xunit::Assert.Same(expectedResponse, response);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public async stt::Task GetRealmRequestObjectAsync()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
GetRealmRequest request = new GetRealmRequest
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
};
Realm expectedResponse = new Realm
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
CreateTime = new wkt::Timestamp(),
UpdateTime = new wkt::Timestamp(),
Labels =
{
{
"key8a0b6e3c",
"value60c16320"
},
},
TimeZone = "time_zone73f23b20",
Etag = "etage8ad7218",
Description = "description2cf9da67",
};
mockGrpcClient.Setup(x => x.GetRealmAsync(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(new grpccore::AsyncUnaryCall<Realm>(stt::Task.FromResult(expectedResponse), null, null, null, null));
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
Realm responseCallSettings = await client.GetRealmAsync(request, gaxgrpc::CallSettings.FromCancellationToken(st::CancellationToken.None));
xunit::Assert.Same(expectedResponse, responseCallSettings);
Realm responseCancellationToken = await client.GetRealmAsync(request, st::CancellationToken.None);
xunit::Assert.Same(expectedResponse, responseCancellationToken);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public void GetRealm()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
GetRealmRequest request = new GetRealmRequest
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
};
Realm expectedResponse = new Realm
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
CreateTime = new wkt::Timestamp(),
UpdateTime = new wkt::Timestamp(),
Labels =
{
{
"key8a0b6e3c",
"value60c16320"
},
},
TimeZone = "time_zone73f23b20",
Etag = "etage8ad7218",
Description = "description2cf9da67",
};
mockGrpcClient.Setup(x => x.GetRealm(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(expectedResponse);
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
Realm response = client.GetRealm(request.Name);
xunit::Assert.Same(expectedResponse, response);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public async stt::Task GetRealmAsync()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
GetRealmRequest request = new GetRealmRequest
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
};
Realm expectedResponse = new Realm
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
CreateTime = new wkt::Timestamp(),
UpdateTime = new wkt::Timestamp(),
Labels =
{
{
"key8a0b6e3c",
"value60c16320"
},
},
TimeZone = "time_zone73f23b20",
Etag = "etage8ad7218",
Description = "description2cf9da67",
};
mockGrpcClient.Setup(x => x.GetRealmAsync(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(new grpccore::AsyncUnaryCall<Realm>(stt::Task.FromResult(expectedResponse), null, null, null, null));
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
Realm responseCallSettings = await client.GetRealmAsync(request.Name, gaxgrpc::CallSettings.FromCancellationToken(st::CancellationToken.None));
xunit::Assert.Same(expectedResponse, responseCallSettings);
Realm responseCancellationToken = await client.GetRealmAsync(request.Name, st::CancellationToken.None);
xunit::Assert.Same(expectedResponse, responseCancellationToken);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public void GetRealmResourceNames()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
GetRealmRequest request = new GetRealmRequest
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
};
Realm expectedResponse = new Realm
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
CreateTime = new wkt::Timestamp(),
UpdateTime = new wkt::Timestamp(),
Labels =
{
{
"key8a0b6e3c",
"value60c16320"
},
},
TimeZone = "time_zone73f23b20",
Etag = "etage8ad7218",
Description = "description2cf9da67",
};
mockGrpcClient.Setup(x => x.GetRealm(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(expectedResponse);
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
Realm response = client.GetRealm(request.RealmName);
xunit::Assert.Same(expectedResponse, response);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public async stt::Task GetRealmResourceNamesAsync()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
GetRealmRequest request = new GetRealmRequest
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
};
Realm expectedResponse = new Realm
{
RealmName = RealmName.FromProjectLocationRealm("[PROJECT]", "[LOCATION]", "[REALM]"),
CreateTime = new wkt::Timestamp(),
UpdateTime = new wkt::Timestamp(),
Labels =
{
{
"key8a0b6e3c",
"value60c16320"
},
},
TimeZone = "time_zone73f23b20",
Etag = "etage8ad7218",
Description = "description2cf9da67",
};
mockGrpcClient.Setup(x => x.GetRealmAsync(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(new grpccore::AsyncUnaryCall<Realm>(stt::Task.FromResult(expectedResponse), null, null, null, null));
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
Realm responseCallSettings = await client.GetRealmAsync(request.RealmName, gaxgrpc::CallSettings.FromCancellationToken(st::CancellationToken.None));
xunit::Assert.Same(expectedResponse, responseCallSettings);
Realm responseCancellationToken = await client.GetRealmAsync(request.RealmName, st::CancellationToken.None);
xunit::Assert.Same(expectedResponse, responseCancellationToken);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public void PreviewRealmUpdateRequestObject()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
PreviewRealmUpdateRequest request = new PreviewRealmUpdateRequest
{
Realm = new Realm(),
UpdateMask = new wkt::FieldMask(),
PreviewTime = new wkt::Timestamp(),
};
PreviewRealmUpdateResponse expectedResponse = new PreviewRealmUpdateResponse
{
Etag = "etage8ad7218",
TargetState = new TargetState(),
};
mockGrpcClient.Setup(x => x.PreviewRealmUpdate(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(expectedResponse);
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
PreviewRealmUpdateResponse response = client.PreviewRealmUpdate(request);
xunit::Assert.Same(expectedResponse, response);
mockGrpcClient.VerifyAll();
}
[xunit::FactAttribute]
public async stt::Task PreviewRealmUpdateRequestObjectAsync()
{
moq::Mock<RealmsService.RealmsServiceClient> mockGrpcClient = new moq::Mock<RealmsService.RealmsServiceClient>(moq::MockBehavior.Strict);
mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock<lro::Operations.OperationsClient>().Object);
PreviewRealmUpdateRequest request = new PreviewRealmUpdateRequest
{
Realm = new Realm(),
UpdateMask = new wkt::FieldMask(),
PreviewTime = new wkt::Timestamp(),
};
PreviewRealmUpdateResponse expectedResponse = new PreviewRealmUpdateResponse
{
Etag = "etage8ad7218",
TargetState = new TargetState(),
};
mockGrpcClient.Setup(x => x.PreviewRealmUpdateAsync(request, moq::It.IsAny<grpccore::CallOptions>())).Returns(new grpccore::AsyncUnaryCall<PreviewRealmUpdateResponse>(stt::Task.FromResult(expectedResponse), null, null, null, null));
RealmsServiceClient client = new RealmsServiceClientImpl(mockGrpcClient.Object, null);
PreviewRealmUpdateResponse responseCallSettings = await client.PreviewRealmUpdateAsync(request, gaxgrpc::CallSettings.FromCancellationToken(st::CancellationToken.None));
xunit::Assert.Same(expectedResponse, responseCallSettings);
PreviewRealmUpdateResponse responseCancellationToken = await client.PreviewRealmUpdateAsync(request, st::CancellationToken.None);
xunit::Assert.Same(expectedResponse, responseCancellationToken);
mockGrpcClient.VerifyAll();
}
}
}
| {
"pile_set_name": "Github"
} |
package Paws::Neptune::CreateDBCluster;
use Moose;
has AvailabilityZones => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
has BackupRetentionPeriod => (is => 'ro', isa => 'Int');
has CharacterSetName => (is => 'ro', isa => 'Str');
has DatabaseName => (is => 'ro', isa => 'Str');
has DBClusterIdentifier => (is => 'ro', isa => 'Str', required => 1);
has DBClusterParameterGroupName => (is => 'ro', isa => 'Str');
has DBSubnetGroupName => (is => 'ro', isa => 'Str');
has DeletionProtection => (is => 'ro', isa => 'Bool');
has EnableCloudwatchLogsExports => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
has EnableIAMDatabaseAuthentication => (is => 'ro', isa => 'Bool');
has Engine => (is => 'ro', isa => 'Str', required => 1);
has EngineVersion => (is => 'ro', isa => 'Str');
has KmsKeyId => (is => 'ro', isa => 'Str');
has MasterUsername => (is => 'ro', isa => 'Str');
has MasterUserPassword => (is => 'ro', isa => 'Str');
has OptionGroupName => (is => 'ro', isa => 'Str');
has Port => (is => 'ro', isa => 'Int');
has PreferredBackupWindow => (is => 'ro', isa => 'Str');
has PreferredMaintenanceWindow => (is => 'ro', isa => 'Str');
has PreSignedUrl => (is => 'ro', isa => 'Str');
has ReplicationSourceIdentifier => (is => 'ro', isa => 'Str');
has StorageEncrypted => (is => 'ro', isa => 'Bool');
has Tags => (is => 'ro', isa => 'ArrayRef[Paws::Neptune::Tag]');
has VpcSecurityGroupIds => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'CreateDBCluster');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Neptune::CreateDBClusterResult');
class_has _result_key => (isa => 'Str', is => 'ro', default => 'CreateDBClusterResult');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Neptune::CreateDBCluster - Arguments for method CreateDBCluster on L<Paws::Neptune>
=head1 DESCRIPTION
This class represents the parameters used for calling the method CreateDBCluster on the
L<Amazon Neptune|Paws::Neptune> service. Use the attributes of this class
as arguments to method CreateDBCluster.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to CreateDBCluster.
=head1 SYNOPSIS
my $rds = Paws->service('Neptune');
my $CreateDBClusterResult = $rds->CreateDBCluster(
DBClusterIdentifier => 'MyString',
Engine => 'MyString',
AvailabilityZones => [ 'MyString', ... ], # OPTIONAL
BackupRetentionPeriod => 1, # OPTIONAL
CharacterSetName => 'MyString', # OPTIONAL
DBClusterParameterGroupName => 'MyString', # OPTIONAL
DBSubnetGroupName => 'MyString', # OPTIONAL
DatabaseName => 'MyString', # OPTIONAL
DeletionProtection => 1, # OPTIONAL
EnableCloudwatchLogsExports => [ 'MyString', ... ], # OPTIONAL
EnableIAMDatabaseAuthentication => 1, # OPTIONAL
EngineVersion => 'MyString', # OPTIONAL
KmsKeyId => 'MyString', # OPTIONAL
MasterUserPassword => 'MyString', # OPTIONAL
MasterUsername => 'MyString', # OPTIONAL
OptionGroupName => 'MyString', # OPTIONAL
Port => 1, # OPTIONAL
PreSignedUrl => 'MyString', # OPTIONAL
PreferredBackupWindow => 'MyString', # OPTIONAL
PreferredMaintenanceWindow => 'MyString', # OPTIONAL
ReplicationSourceIdentifier => 'MyString', # OPTIONAL
StorageEncrypted => 1, # OPTIONAL
Tags => [
{
Key => 'MyString',
Value => 'MyString',
},
...
], # OPTIONAL
VpcSecurityGroupIds => [ 'MyString', ... ], # OPTIONAL
);
# Results:
my $DBCluster = $CreateDBClusterResult->DBCluster;
# Returns a L<Paws::Neptune::CreateDBClusterResult> object.
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
For the AWS API documentation, see L<https://docs.aws.amazon.com/goto/WebAPI/rds/CreateDBCluster>
=head1 ATTRIBUTES
=head2 AvailabilityZones => ArrayRef[Str|Undef]
A list of EC2 Availability Zones that instances in the DB cluster can
be created in.
=head2 BackupRetentionPeriod => Int
The number of days for which automated backups are retained. You must
specify a minimum value of 1.
Default: 1
Constraints:
=over
=item *
Must be a value from 1 to 35
=back
=head2 CharacterSetName => Str
I<(Not supported by Neptune)>
=head2 DatabaseName => Str
The name for your database of up to 64 alpha-numeric characters. If you
do not provide a name, Amazon Neptune will not create a database in the
DB cluster you are creating.
=head2 B<REQUIRED> DBClusterIdentifier => Str
The DB cluster identifier. This parameter is stored as a lowercase
string.
Constraints:
=over
=item *
Must contain from 1 to 63 letters, numbers, or hyphens.
=item *
First character must be a letter.
=item *
Cannot end with a hyphen or contain two consecutive hyphens.
=back
Example: C<my-cluster1>
=head2 DBClusterParameterGroupName => Str
The name of the DB cluster parameter group to associate with this DB
cluster. If this argument is omitted, the default is used.
Constraints:
=over
=item *
If supplied, must match the name of an existing
DBClusterParameterGroup.
=back
=head2 DBSubnetGroupName => Str
A DB subnet group to associate with this DB cluster.
Constraints: Must match the name of an existing DBSubnetGroup. Must not
be default.
Example: C<mySubnetgroup>
=head2 DeletionProtection => Bool
A value that indicates whether the DB cluster has deletion protection
enabled. The database can't be deleted when deletion protection is
enabled. By default, deletion protection is enabled.
=head2 EnableCloudwatchLogsExports => ArrayRef[Str|Undef]
The list of log types that need to be enabled for exporting to
CloudWatch Logs.
=head2 EnableIAMDatabaseAuthentication => Bool
True to enable mapping of AWS Identity and Access Management (IAM)
accounts to database accounts, and otherwise false.
Default: C<false>
=head2 B<REQUIRED> Engine => Str
The name of the database engine to be used for this DB cluster.
Valid Values: C<neptune>
=head2 EngineVersion => Str
The version number of the database engine to use. Currently, setting
this parameter has no effect.
Example: C<1.0.1>
=head2 KmsKeyId => Str
The AWS KMS key identifier for an encrypted DB cluster.
The KMS key identifier is the Amazon Resource Name (ARN) for the KMS
encryption key. If you are creating a DB cluster with the same AWS
account that owns the KMS encryption key used to encrypt the new DB
cluster, then you can use the KMS key alias instead of the ARN for the
KMS encryption key.
If an encryption key is not specified in C<KmsKeyId>:
=over
=item *
If C<ReplicationSourceIdentifier> identifies an encrypted source, then
Amazon Neptune will use the encryption key used to encrypt the source.
Otherwise, Amazon Neptune will use your default encryption key.
=item *
If the C<StorageEncrypted> parameter is true and
C<ReplicationSourceIdentifier> is not specified, then Amazon Neptune
will use your default encryption key.
=back
AWS KMS creates the default encryption key for your AWS account. Your
AWS account has a different default encryption key for each AWS Region.
If you create a Read Replica of an encrypted DB cluster in another AWS
Region, you must set C<KmsKeyId> to a KMS key ID that is valid in the
destination AWS Region. This key is used to encrypt the Read Replica in
that AWS Region.
=head2 MasterUsername => Str
The name of the master user for the DB cluster.
Constraints:
=over
=item *
Must be 1 to 16 letters or numbers.
=item *
First character must be a letter.
=item *
Cannot be a reserved word for the chosen database engine.
=back
=head2 MasterUserPassword => Str
The password for the master database user. This password can contain
any printable ASCII character except "/", """, or "@".
Constraints: Must contain from 8 to 41 characters.
=head2 OptionGroupName => Str
I<(Not supported by Neptune)>
=head2 Port => Int
The port number on which the instances in the DB cluster accept
connections.
Default: C<8182>
=head2 PreferredBackupWindow => Str
The daily time range during which automated backups are created if
automated backups are enabled using the C<BackupRetentionPeriod>
parameter.
The default is a 30-minute window selected at random from an 8-hour
block of time for each AWS Region. To see the time blocks available,
see Adjusting the Preferred Maintenance Window
(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html)
in the I<Amazon Neptune User Guide.>
Constraints:
=over
=item *
Must be in the format C<hh24:mi-hh24:mi>.
=item *
Must be in Universal Coordinated Time (UTC).
=item *
Must not conflict with the preferred maintenance window.
=item *
Must be at least 30 minutes.
=back
=head2 PreferredMaintenanceWindow => Str
The weekly time range during which system maintenance can occur, in
Universal Coordinated Time (UTC).
Format: C<ddd:hh24:mi-ddd:hh24:mi>
The default is a 30-minute window selected at random from an 8-hour
block of time for each AWS Region, occurring on a random day of the
week. To see the time blocks available, see Adjusting the Preferred
Maintenance Window
(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html)
in the I<Amazon Neptune User Guide.>
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
Constraints: Minimum 30-minute window.
=head2 PreSignedUrl => Str
This parameter is not currently supported.
=head2 ReplicationSourceIdentifier => Str
The Amazon Resource Name (ARN) of the source DB instance or DB cluster
if this DB cluster is created as a Read Replica.
=head2 StorageEncrypted => Bool
Specifies whether the DB cluster is encrypted.
=head2 Tags => ArrayRef[L<Paws::Neptune::Tag>]
The tags to assign to the new DB cluster.
=head2 VpcSecurityGroupIds => ArrayRef[Str|Undef]
A list of EC2 VPC security groups to associate with this DB cluster.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method CreateDBCluster in L<Paws::Neptune>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| {
"pile_set_name": "Github"
} |
#include "pe_buffer.h"
#include <iostream>
#include "../scanners/artefact_scanner.h"
size_t pesieve::PeBuffer::calcRemoteImgSize(HANDLE processHandle, ULONGLONG modBaseAddr)
{
const size_t hdr_buffer_size = PAGE_SIZE;
BYTE hdr_buffer[hdr_buffer_size] = { 0 };
size_t pe_vsize = 0;
PIMAGE_SECTION_HEADER hdr_ptr = NULL;
if (peconv::read_remote_pe_header(processHandle, (BYTE*)modBaseAddr, hdr_buffer, hdr_buffer_size)) {
hdr_ptr = peconv::get_section_hdr(hdr_buffer, hdr_buffer_size, 0);
}
if (!hdr_ptr) {
pe_vsize = peconv::fetch_region_size(processHandle, (PBYTE)modBaseAddr);
//std::cout << "[!] Image size at: " << std::hex << modBaseAddr << " undetermined, using region size instead: " << pe_vsize << std::endl;
return pe_vsize;
}
pe_vsize = ArtefactScanner::calcImgSize(processHandle, (HMODULE)modBaseAddr, hdr_buffer, hdr_buffer_size, hdr_ptr);
//std::cout << "[!] Image size at: " << std::hex << modBaseAddr << " undetermined, using calculated img size: " << pe_vsize << std::endl;
return pe_vsize;
}
bool pesieve::PeBuffer::readRemote(HANDLE process_hndl, ULONGLONG module_base, size_t pe_vsize)
{
if (pe_vsize == 0) {
// if not size supplied, try with the size fetched from the header
pe_vsize = peconv::get_remote_image_size(process_hndl, (BYTE*)module_base);
}
if (_readRemote(process_hndl, module_base, pe_vsize)) {
return true; //success
}
// try with the calculated size
pe_vsize = calcRemoteImgSize(process_hndl, module_base);
std::cout << "[!] Image size at: " << std::hex << module_base << " undetermined, using calculated size: " << pe_vsize << std::endl;
return _readRemote(process_hndl, module_base, pe_vsize);
}
bool pesieve::PeBuffer::_readRemote(HANDLE process_hndl, ULONGLONG module_base, size_t pe_vsize)
{
if (pe_vsize == 0) {
return false;
}
if (!allocBuffer(pe_vsize)) {
return false;
}
size_t read_size = peconv::read_remote_area(process_hndl, (BYTE*)module_base, vBuf, pe_vsize);
if (read_size != pe_vsize) {
std::cout << "[!] Failed reading Image at: " << std::hex << module_base << " img size: " << pe_vsize << std::endl;
freeBuffer();
return false;
}
this->moduleBase = module_base;
this->relocBase = module_base; //by default set the same as module base
return true;
}
bool pesieve::PeBuffer::resizeBuffer(size_t new_size)
{
if (!vBuf) return false;
BYTE *new_buf = peconv::alloc_aligned(new_size, PAGE_READWRITE);
if (!new_buf) {
return false;
}
//preserve the module base:
ULONGLONG module_base = this->moduleBase;
size_t smaller_size = (vBufSize < new_size) ? vBufSize : new_size;
memcpy(new_buf, this->vBuf, smaller_size);
freeBuffer();
this->moduleBase = module_base;
this->vBuf = new_buf;
this->vBufSize = new_size;
return true;
}
bool pesieve::PeBuffer::resizeLastSection(size_t new_img_size)
{
if (!vBuf) return false;
PIMAGE_SECTION_HEADER last_sec = peconv::get_last_section(vBuf, vBufSize, false);
if (!last_sec) {
return false;
}
if (new_img_size < last_sec->VirtualAddress) {
return false;
}
const size_t new_sec_vsize = new_img_size - last_sec->VirtualAddress;
const size_t new_sec_rsize = new_sec_vsize;
if (last_sec->VirtualAddress + new_sec_vsize > this->vBufSize) {
//buffer too small
return false;
}
if (!peconv::update_image_size(vBuf, new_img_size)) {
return false;
}
last_sec->Misc.VirtualSize = new_sec_vsize;
last_sec->SizeOfRawData = new_sec_rsize;
return true;
}
bool pesieve::PeBuffer::dumpPeToFile(
IN std::string dumpFileName,
IN OUT peconv::t_pe_dump_mode &dumpMode,
IN OPTIONAL const peconv::ExportsMapper* exportsMap,
OUT OPTIONAL peconv::ImpsNotCovered *notCovered
)
{
if (!vBuf || !isValidPe()) return false;
#ifdef _DEBUG
std::cout << "Dumping using relocBase: " << std::hex << relocBase << "\n";
#endif
if (exportsMap != nullptr) {
if (!peconv::fix_imports(this->vBuf, this->vBufSize, *exportsMap, notCovered)) {
std::cerr << "[-] Unable to fix imports!" << std::endl;
}
}
if (dumpMode == peconv::PE_DUMP_AUTO) {
bool is_raw_alignment_valid = peconv::is_valid_sectons_alignment(vBuf, vBufSize, true);
bool is_virtual_alignment_valid = peconv::is_valid_sectons_alignment(vBuf, vBufSize, false);
#ifdef _DEBUG
std::cout << "Is raw alignment valid: " << is_raw_alignment_valid << std::endl;
std::cout << "Is virtual alignment valid: " << is_virtual_alignment_valid << std::endl;
#endif
if (!is_raw_alignment_valid && is_virtual_alignment_valid) {
//in case if raw alignment is invalid and virtual valid, try to dump using Virtual Alignment first
dumpMode = peconv::PE_DUMP_REALIGN;
bool is_dumped = peconv::dump_pe(dumpFileName.c_str(), this->vBuf, this->vBufSize, this->relocBase, dumpMode);
if (is_dumped) {
return is_dumped;
}
dumpMode = peconv::PE_DUMP_AUTO; //revert and try again
}
}
// dump PE in a given dump mode:
return peconv::dump_pe(dumpFileName.c_str(), this->vBuf, this->vBufSize, this->relocBase, dumpMode);
}
bool pesieve::PeBuffer::dumpToFile(IN std::string dumpFileName)
{
if (!vBuf) return false;
return peconv::dump_to_file(dumpFileName.c_str(), vBuf, vBufSize);
}
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
/**
* This class allows generic access to variable length type-safe parameter
* lists.
*/
public class Options {
public static abstract class StringOption {
private final String value;
protected StringOption(String value) {
this.value = value;
}
public String getValue() {
return value;
}
}
public static abstract class ClassOption {
private final Class<?> value;
protected ClassOption(Class<?> value) {
this.value = value;
}
public Class<?> getValue() {
return value;
}
}
public static abstract class BooleanOption {
private final boolean value;
protected BooleanOption(boolean value) {
this.value = value;
}
public boolean getValue() {
return value;
}
}
public static abstract class IntegerOption {
private final int value;
protected IntegerOption(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
public static abstract class LongOption {
private final long value;
protected LongOption(long value) {
this.value = value;
}
public long getValue() {
return value;
}
}
public static abstract class PathOption {
private final Path value;
protected PathOption(Path value) {
this.value = value;
}
public Path getValue() {
return value;
}
}
public static abstract class FSDataInputStreamOption {
private final FSDataInputStream value;
protected FSDataInputStreamOption(FSDataInputStream value) {
this.value = value;
}
public FSDataInputStream getValue() {
return value;
}
}
public static abstract class FSDataOutputStreamOption {
private final FSDataOutputStream value;
protected FSDataOutputStreamOption(FSDataOutputStream value) {
this.value = value;
}
public FSDataOutputStream getValue() {
return value;
}
}
public static abstract class ProgressableOption {
private final Progressable value;
protected ProgressableOption(Progressable value) {
this.value = value;
}
public Progressable getValue() {
return value;
}
}
/**
* Find the first option of the required class.
* @param <T> the static class to find
* @param <base> the parent class of the array
* @param cls the dynamic class to find
* @param opts the list of options to look through
* @return the first option that matches
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <base, T extends base> T getOption(Class<T> cls, base [] opts
) throws IOException {
for(base o: opts) {
if (o.getClass() == cls) {
return (T) o;
}
}
return null;
}
/**
* Prepend some new options to the old options
* @param <T> the type of options
* @param oldOpts the old options
* @param newOpts the new options
* @return a new array of options
*/
public static <T> T[] prependOptions(T[] oldOpts, T... newOpts) {
// copy the new options to the front of the array
T[] result = Arrays.copyOf(newOpts, newOpts.length+oldOpts.length);
// now copy the old options
System.arraycopy(oldOpts, 0, result, newOpts.length, oldOpts.length);
return result;
}
}
| {
"pile_set_name": "Github"
} |
dictionary Optional {
short field;
};
dictionary Required {
required short field;
};
dictionary Required2: Required {
long field2;
};
// The following is wrong
// but it still shouldn't break the validator
dictionary Recursive : Recursive2 {};
dictionary Recursive2 : Recursive {};
// Do not warn if we don't know about the supertype
dictionary SuperDictUnknown: GuessWhatAmI {};
typedef (DOMString or Optional) OptionalUnion;
typedef (DOMString or Optional?) NullableUnion;
interface mixin Container {
undefined op1(Optional shouldBeOptional);
undefined op2(Required noNeedToBeOptional);
undefined op22(Required2 noNeedToBeOptional);
undefined op3((Optional or boolean) union);
undefined op4(OptionalUnion union);
undefined op5(NullableUnion union);
undefined op6(Recursive recursive);
undefined op7(SuperDictUnknown unknown);
undefined op8(Optional lastRequired, optional DOMString yay);
undefined op9(Optional notLast, DOMString yay);
};
[Exposed=Window]
interface ContainerInterface {
async iterable<DOMString>(Optional shouldBeOptional);
};
| {
"pile_set_name": "Github"
} |
msgid ""
msgstr "Content-Type: text/plain; charset=UTF-8"
#: dbgintfcommonstrings.rsrunerrorarithmeticoverflowerror
msgid "Arithmetic overflow error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorassertionfailederror
msgid "Assertion failed error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorbaddriverequeststructleng
msgid "Bad drive request struct length"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorcalltoabstractmethod
msgid "Call to abstract method"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorcannotremovecurrentdirect
msgid "Cannot remove current directory"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorcannotrenameacrossdrives
msgid "Cannot rename across drives"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorcollectionindexoutofrange
msgid "Collection index out of range"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorcollectionoverflowerror
msgid "Collection overflow error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorcrcerrorindata
msgid "CRC error in data"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordevicereadfault
msgid "Device read fault"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordevicewritefault
msgid "Device write fault"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordiskiswriteprotected
msgid "Disk is write-protected"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordiskreaderror
msgid "Disk read error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordiskseekerror
msgid "Disk seek error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordiskwriteerror
msgid "Disk write error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordivisionbyzero
msgid "Division by zero"
msgstr ""
#: dbgintfcommonstrings.rsrunerrordrivenotready
msgid "Drive not ready"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorexceptionstackcorrupted
msgid "Exception stack corrupted"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfileaccessdenied
msgid "File access denied"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfilenotassigned
msgid "File not assigned"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfilenotfound
msgid "File not found"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfilenotopen
msgid "File not open"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfilenotopenforinput
msgid "File not open for input"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfilenotopenforoutput
msgid "File not open for output"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfloatingpointoverflow
msgid "Floating point overflow"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorfloatingpointunderflow
msgid "Floating point underflow"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorgeneralprotectionfault
msgid "General Protection fault"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorhardwarefailure
msgid "Hardware failure"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorheapoverflowerror
msgid "Heap overflow error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvaliddrivenumber
msgid "Invalid drive number"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidenumeration
msgid "Invalid enumeration"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidfileaccesscode
msgid "Invalid file access code"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidfilehandle
msgid "Invalid file handle"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidfloatingpointopera
msgid "Invalid floating point operation"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidfunctionnumber
msgid "Invalid function number"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidnumericformat
msgid "Invalid numeric format"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidpointeroperation
msgid "Invalid pointer operation"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidtypecast
msgid "Invalid typecast"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorinvalidvaluespecified
msgid "Invalid value specified"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorobjectnotinitialized
msgid "Object not initialized"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorpathnotfound
msgid "Path not found"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorprinteroutofpaper
msgid "Printer out of paper"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorrangecheckerror
msgid "Range check error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorsafecallerrorcheck
msgid "Safecall error check"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorsectornotfound
msgid "Sector Not Found"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorstackoverflowerror
msgid "Stack overflow error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorstreamregistrationerror
msgid "Stream registration error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorthreadsnotsupported
msgid "Threads not supported"
msgstr ""
#: dbgintfcommonstrings.rsrunerrortoomanyopenfiles
msgid "Too many open files"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorunhandledexceptionoccurre
msgid "Unhandled exception occurred"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorunknownmediatype
msgid "Unknown media type"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorvararrayboundscheckerror
msgid "Var Array Bounds check error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorvariantarraycreate
msgid "Variant array create"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorvariantdispatcherror
msgid "Variant dispatch error"
msgstr ""
#: dbgintfcommonstrings.rsrunerrorvariantisnotanarray
msgid "Variant is not an array"
msgstr ""
| {
"pile_set_name": "Github"
} |
#
# SPDX-License-Identifier: Apache-2.0
#
---
- name: Deploy blockchain infrastructure
hosts: localhost
vars:
infrastructure:
type: docker
docker:
network: "fabricvscodelocalfabric_network"
labels:
fabric-environment-name: "Local Fabric"
organizations:
- &Org1
msp:
id: "Org1MSP"
admin:
identity: "org1Admin"
secret: "org1Adminpw"
ca: &Org1CA
id: "Org1CA"
admin_identity: "admin"
admin_secret: "adminpw"
tls:
enabled: false
docker:
name: "fabricvscodelocalfabric_ca.org1.example.com"
port: 17050
peers:
- &Org1Peer1
id: "Org1Peer1"
identity: "org1peer1"
secret: "org1peer1pw"
database_type: couchdb
tls:
enabled: false
docker:
name: "fabricvscodelocalfabric_peer0.org1.example.com"
port: 17051
chaincode_name_prefix: "fabricvscodelocalfabric"
chaincode_port: 17052
operations_port: 17053
couchdb:
name: "fabricvscodelocalfabric_couchdb0.org1.example.com"
port: 17054
nodes: "{{ playbook_dir }}/nodes/Org1"
wallet: "{{ playbook_dir }}/wallets/Org1"
gateways: "{{ playbook_dir }}/gateways/Org1"
- &OrdererOrg
msp:
id: "OrdererMSP"
admin:
identity: "ordererAdmin"
secret: "ordererAdminpw"
ca: &OrdererCA
id: "OrdererCA"
admin_identity: "admin"
admin_secret: "adminpw"
tls:
enabled: false
docker:
name: "fabricvscodelocalfabric_ca.orderer.example.com"
port: 17055
orderer: &Orderer
id: "Orderer"
identity: "orderer"
secret: "ordererpw"
tls:
enabled: false
consortium:
members:
- *Org1
docker:
name: "fabricvscodelocalfabric_orderer.example.com"
port: 17056
operations_port: 17057
nodes: "{{ playbook_dir }}/nodes/Orderer"
wallet: "{{ playbook_dir }}/wallets/Orderer"
gateways: "{{ playbook_dir }}/gateways/Orderer"
gateways:
- name: Org1
organization:
<<: *Org1
gateway_peers:
- *Org1Peer1
channels:
- name: mychannel
orderer: *Orderer
members:
- <<: *Org1
committing_peers:
- *Org1Peer1
anchor_peers:
- *Org1Peer1
roles:
- ibm.blockchain_platform_manager | {
"pile_set_name": "Github"
} |
// Package memory is a storage backend base on memory
package memory
import (
"fmt"
"time"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/index"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/storage"
)
var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type")
// Storage is an implementation of git.Storer that stores data on memory, being
// ephemeral. The use of this storage should be done in controlled environments,
// since the representation in memory of some repository can fill the machine
// memory. in the other hand this storage has the best performance.
type Storage struct {
ConfigStorage
ObjectStorage
ShallowStorage
IndexStorage
ReferenceStorage
ModuleStorage
}
// NewStorage returns a new Storage base on memory
func NewStorage() *Storage {
return &Storage{
ReferenceStorage: make(ReferenceStorage),
ConfigStorage: ConfigStorage{},
ShallowStorage: ShallowStorage{},
ObjectStorage: ObjectStorage{
Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
Commits: make(map[plumbing.Hash]plumbing.EncodedObject),
Trees: make(map[plumbing.Hash]plumbing.EncodedObject),
Blobs: make(map[plumbing.Hash]plumbing.EncodedObject),
Tags: make(map[plumbing.Hash]plumbing.EncodedObject),
},
ModuleStorage: make(ModuleStorage),
}
}
type ConfigStorage struct {
config *config.Config
}
func (c *ConfigStorage) SetConfig(cfg *config.Config) error {
if err := cfg.Validate(); err != nil {
return err
}
c.config = cfg
return nil
}
func (c *ConfigStorage) Config() (*config.Config, error) {
if c.config == nil {
c.config = config.NewConfig()
}
return c.config, nil
}
type IndexStorage struct {
index *index.Index
}
func (c *IndexStorage) SetIndex(idx *index.Index) error {
c.index = idx
return nil
}
func (c *IndexStorage) Index() (*index.Index, error) {
if c.index == nil {
c.index = &index.Index{Version: 2}
}
return c.index, nil
}
type ObjectStorage struct {
Objects map[plumbing.Hash]plumbing.EncodedObject
Commits map[plumbing.Hash]plumbing.EncodedObject
Trees map[plumbing.Hash]plumbing.EncodedObject
Blobs map[plumbing.Hash]plumbing.EncodedObject
Tags map[plumbing.Hash]plumbing.EncodedObject
}
func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
return &plumbing.MemoryObject{}
}
func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
h := obj.Hash()
o.Objects[h] = obj
switch obj.Type() {
case plumbing.CommitObject:
o.Commits[h] = o.Objects[h]
case plumbing.TreeObject:
o.Trees[h] = o.Objects[h]
case plumbing.BlobObject:
o.Blobs[h] = o.Objects[h]
case plumbing.TagObject:
o.Tags[h] = o.Objects[h]
default:
return h, ErrUnsupportedObjectType
}
return h, nil
}
func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
if _, ok := o.Objects[h]; !ok {
return plumbing.ErrObjectNotFound
}
return nil
}
func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
size int64, err error) {
obj, ok := o.Objects[h]
if !ok {
return 0, plumbing.ErrObjectNotFound
}
return obj.Size(), nil
}
func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
obj, ok := o.Objects[h]
if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
return nil, plumbing.ErrObjectNotFound
}
return obj, nil
}
func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
var series []plumbing.EncodedObject
switch t {
case plumbing.AnyObject:
series = flattenObjectMap(o.Objects)
case plumbing.CommitObject:
series = flattenObjectMap(o.Commits)
case plumbing.TreeObject:
series = flattenObjectMap(o.Trees)
case plumbing.BlobObject:
series = flattenObjectMap(o.Blobs)
case plumbing.TagObject:
series = flattenObjectMap(o.Tags)
}
return storer.NewEncodedObjectSliceIter(series), nil
}
func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject {
objects := make([]plumbing.EncodedObject, 0, len(m))
for _, obj := range m {
objects = append(objects, obj)
}
return objects
}
func (o *ObjectStorage) Begin() storer.Transaction {
return &TxObjectStorage{
Storage: o,
Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
}
}
func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
for h := range o.Objects {
err := fun(h)
if err != nil {
if err == storer.ErrStop {
return nil
}
return err
}
}
return nil
}
func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
return nil, nil
}
func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error {
return nil
}
var errNotSupported = fmt.Errorf("Not supported")
func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
return time.Time{}, errNotSupported
}
func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
return errNotSupported
}
type TxObjectStorage struct {
Storage *ObjectStorage
Objects map[plumbing.Hash]plumbing.EncodedObject
}
func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
h := obj.Hash()
tx.Objects[h] = obj
return h, nil
}
func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
obj, ok := tx.Objects[h]
if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
return nil, plumbing.ErrObjectNotFound
}
return obj, nil
}
func (tx *TxObjectStorage) Commit() error {
for h, obj := range tx.Objects {
delete(tx.Objects, h)
if _, err := tx.Storage.SetEncodedObject(obj); err != nil {
return err
}
}
return nil
}
func (tx *TxObjectStorage) Rollback() error {
tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject)
return nil
}
type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference
func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error {
if ref != nil {
r[ref.Name()] = ref
}
return nil
}
func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
if ref == nil {
return nil
}
if old != nil {
tmp := r[ref.Name()]
if tmp != nil && tmp.Hash() != old.Hash() {
return storage.ErrReferenceHasChanged
}
}
r[ref.Name()] = ref
return nil
}
func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
ref, ok := r[n]
if !ok {
return nil, plumbing.ErrReferenceNotFound
}
return ref, nil
}
func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
var refs []*plumbing.Reference
for _, ref := range r {
refs = append(refs, ref)
}
return storer.NewReferenceSliceIter(refs), nil
}
func (r ReferenceStorage) CountLooseRefs() (int, error) {
return len(r), nil
}
func (r ReferenceStorage) PackRefs() error {
return nil
}
func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
delete(r, n)
return nil
}
type ShallowStorage []plumbing.Hash
func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
*s = commits
return nil
}
func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) {
return s, nil
}
type ModuleStorage map[string]*Storage
func (s ModuleStorage) Module(name string) (storage.Storer, error) {
if m, ok := s[name]; ok {
return m, nil
}
m := NewStorage()
s[name] = m
return m, nil
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_172) on Sun Jun 07 15:57:41 CEST 2020 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>simplenlg.server (SimpleNLG 4.5.0 API)</title>
<meta name="date" content="2020-06-07">
<link rel="stylesheet" type="text/css" href="../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title = "simplenlg.server (SimpleNLG 4.5.0 API)";
}
}
catch (err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../index-all.html">Index</a></li>
<li><a href="../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../simplenlg/realiser/english/package-summary.html">Prev Package</a></li>
<li><a href="../../simplenlg/syntax/english/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../index.html?simplenlg/server/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 title="Package" class="title">Package simplenlg.server</h1>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<table class="typeSummary" border="0" cellpadding="3" cellspacing="0"
summary="Class Summary table, listing classes, and an explanation">
<caption><span>Class Summary</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Class</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="../../simplenlg/server/RealisationRequest.html" title="class in simplenlg.server">RealisationRequest</a></td>
<td class="colLast">
<div class="block">This class handles one realisation request.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><a href="../../simplenlg/server/SimpleServer.html" title="class in simplenlg.server">SimpleServer</a></td>
<td class="colLast">
<div class="block">SimpleServer is a program that realises xml requests.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../index-all.html">Index</a></li>
<li><a href="../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../simplenlg/realiser/english/package-summary.html">Prev Package</a></li>
<li><a href="../../simplenlg/syntax/english/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../index.html?simplenlg/server/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy">
<small>Copyright © 2020. All Rights Reserved.</small>
</p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
// This file is part of CaesarIA.
//
// CaesarIA is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// CaesarIA is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with CaesarIA. If not, see <http://www.gnu.org/licenses/>.
//
// Copyright 2012-2014 Dalerank, [email protected]
#ifndef __CAESARIA_CONSTRUCTION_EXTENSION_H_INCLUDED__
#define __CAESARIA_CONSTRUCTION_EXTENSION_H_INCLUDED__
#include "core/referencecounted.hpp"
#include "core/scopedptr.hpp"
#include "core/variant_map.hpp"
#include "construction.hpp"
class ConstructionExtension : public ReferenceCounted
{
public:
virtual void save( VariantMap& stream ) const;
virtual void load( const VariantMap& stream );
virtual void timeStep( ConstructionPtr parent, unsigned int time );
virtual bool isDeleted() const { return _isDeleted; }
virtual std::string type() const = 0;
virtual void destroy( ConstructionPtr parent ) {}
void setName( const std::string& name ) { _name = name; }
const std::string& name() const { return _name; }
protected:
ConstructionExtension() : _isDeleted( false )
{}
bool _isDeleted;
DateTime _finishDate;
VariantMap _options;
std::string _name;
};
class WarehouseBuff : public ConstructionExtension
{
public:
static ConstructionExtensionPtr create();
static ConstructionExtensionPtr assignTo(WarehousePtr warehouse, int group, float value, int week2finish );
static ConstructionExtensionPtr uniqueTo(WarehousePtr warehouse, int group, float value, int week2finish, const std::string& name );
virtual void timeStep( ConstructionPtr parent, unsigned int time );
virtual std::string type() const;
float value() const;
int group() const;
private:
WarehouseBuff();
};
class FactoryProgressUpdater : public ConstructionExtension
{
public:
static ConstructionExtensionPtr create();
static ConstructionExtensionPtr assignTo(FactoryPtr factory, float value, int week2finish );
static ConstructionExtensionPtr uniqueTo(FactoryPtr factory, float value, int week2finish, const std::string& name );
virtual void timeStep( ConstructionPtr parent, unsigned int time );
virtual std::string type() const;
private:
FactoryProgressUpdater();
};
class ConstructionParamUpdater : public ConstructionExtension
{
public:
static ConstructionExtensionPtr create();
static ConstructionExtensionPtr assignTo(ConstructionPtr construction, Param paramName, bool relative, int value, int week2finish );
virtual void timeStep(ConstructionPtr parent, unsigned int time);
virtual std::string type() const;
virtual void destroy(ConstructionPtr parent);
private:
ConstructionParamUpdater();
};
class FortCurseByMars : public ConstructionExtension
{
public:
static ConstructionExtensionPtr create();
static ConstructionExtensionPtr assignTo( FortPtr fort, unsigned int monthsCurse );
virtual void timeStep( ConstructionPtr parent, unsigned int time );
virtual std::string type() const;
private:
FortCurseByMars();
};
class ExtensionsFactory
{
public:
virtual ~ExtensionsFactory();
static ExtensionsFactory& instance();
static ConstructionExtensionPtr create(std::string type);
static ConstructionExtensionPtr create(const VariantMap& stream);
private:
ExtensionsFactory();
class Impl;
ScopedPtr<Impl> _d;
};
#endif //__CAESARIA_CONSTRUCTION_EXTENSION_H_INCLUDED__
| {
"pile_set_name": "Github"
} |
@using System.Linq
@using Mvc.JQuery.DataTables
@using Newtonsoft.Json
@using Newtonsoft.Json.Linq
@model DataTableConfigVm
<table id="@Model.Id" class="display @(Model.TableClass ?? DataTableConfigVm.DefaultTableClass ?? "")" @Html.Raw(Model.FixedLayout ? "style='table-layout:fixed'":"")>
<thead>
@if (Model.UseColumnFilterPlugin)
{
<tr>
@foreach (var column in Model.Columns)
{
<th >@column.DisplayName</th>
}
</tr>
}
@if (!Model.HideHeaders)
{
<tr>
@foreach (var column in Model.Columns)
{
<th class="@column.CssClassHeader">@column.DisplayName</th>
}
</tr>
}
</thead>
<tbody>
<tr>
<td colspan="@Model.Columns.Count()" class="dataTables_empty">Loading data from server
</td>
</tr>
</tbody>
</table>
<script type="text/javascript">
(function setDataTable() {
if(!window.jQuery || !$.fn.DataTable) {
setTimeout(setDataTable, 100);
return;
}
var $table = $('#@Model.Id');
@{
var options = new JObject();
options["aaSorting"] = new JRaw(Model.ColumnSortingString);
options["bProcessing"] = true;
options["bStateSave"] = Model.StateSave;
options["bServerSide"] = true;
options["bFilter"] = Model.Filter;
options["sDom"] = Model.Dom;
if (Model.LengthMenu != null)
{
options["lengthMenu"] = new JRaw(Model.LengthMenu);
}
if (Model.PageLength.HasValue)
{
options["pageLength"] = Model.PageLength;
}
options["bAutoWidth"] = Model.AutoWidth;
options["sAjaxSource"] = Model.AjaxUrl;
if (Model.TableTools)
{
options["oTableTools"] = new JRaw("{ 'sSwfPath': '//cdn.datatables.net/tabletools/2.2.1/swf/copy_csv_xls_pdf.swf' }");
}
options["fnServerData"] = new JRaw(
"function(sSource, aoData, fnCallback) { " +
" var ajaxOptions = { 'dataType': 'json', 'type': 'POST', 'url': sSource, 'data': aoData, 'success': fnCallback }; " +
(Model.AjaxErrorHandler == null ? "" : ("ajaxOptions['error'] = " + Model.AjaxErrorHandler) + "; ") +
" $.ajax(ajaxOptions);" +
"}");
options["aoColumnDefs"] = new JRaw(Model.ColumnDefsString);
options["aoSearchCols"] = Model.SearchCols;
if (Model.JsOptions.Any())
{
foreach (var jsOption in Model.JsOptions)
{
options[jsOption.Key] = new JRaw(jsOption.Value);
}
}
if (!string.IsNullOrWhiteSpace(Model.Language))
{
options["oLanguage"] = new JRaw(Model.Language);
}
if (!string.IsNullOrWhiteSpace(Model.DrawCallback))
{
options["fnDrawCallback"] = new JRaw(Model.DrawCallback);
}
}
var dt = $table.dataTable(@Html.Raw(options.ToString(Formatting.Indented)));
@if (Model.UseColumnFilterPlugin)
{
@Html.Raw("dt.columnFilter(" + Model.ColumnFilterVm + ");")
}
@if (Model.GlobalJsVariableName != null)
{
@Html.Raw("window['" + Model.GlobalJsVariableName + "'] = dt;")
}
})();
</script>
| {
"pile_set_name": "Github"
} |
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"sync"
"go.opencensus.io/trace/internal"
)
// Config represents the global tracing configuration.
type Config struct {
// DefaultSampler is the default sampler used when creating new spans.
DefaultSampler Sampler
// IDGenerator is for internal use only.
IDGenerator internal.IDGenerator
}
var configWriteMu sync.Mutex
// ApplyConfig applies changes to the global tracing configuration.
//
// Fields not provided in the given config are going to be preserved.
func ApplyConfig(cfg Config) {
configWriteMu.Lock()
defer configWriteMu.Unlock()
c := *config.Load().(*Config)
if cfg.DefaultSampler != nil {
c.DefaultSampler = cfg.DefaultSampler
}
if cfg.IDGenerator != nil {
c.IDGenerator = cfg.IDGenerator
}
config.Store(&c)
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.