code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
3
942
language
stringclasses
30 values
license
stringclasses
15 values
size
int32
3
1.05M
"""Support gathering system information of hosts which are running netdata.""" from datetime import timedelta import logging from netdata import Netdata from netdata.exceptions import NetdataError import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import ( CONF_HOST, CONF_ICON, CONF_NAME, CONF_PORT, CONF_RESOURCES, PERCENTAGE, ) from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1) CONF_DATA_GROUP = "data_group" CONF_ELEMENT = "element" CONF_INVERT = "invert" DEFAULT_HOST = "localhost" DEFAULT_NAME = "Netdata" DEFAULT_PORT = 19999 DEFAULT_ICON = "mdi:desktop-classic" RESOURCE_SCHEMA = vol.Any( { vol.Required(CONF_DATA_GROUP): cv.string, vol.Required(CONF_ELEMENT): cv.string, vol.Optional(CONF_ICON, default=DEFAULT_ICON): cv.icon, vol.Optional(CONF_INVERT, default=False): cv.boolean, } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Required(CONF_RESOURCES): vol.Schema({cv.string: RESOURCE_SCHEMA}), } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Netdata sensor.""" name = config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) resources = config.get(CONF_RESOURCES) netdata = NetdataData(Netdata(host, port=port)) await netdata.async_update() if netdata.api.metrics is None: raise PlatformNotReady dev = [] for entry, data in resources.items(): icon = data[CONF_ICON] sensor = data[CONF_DATA_GROUP] element = data[CONF_ELEMENT] invert = data[CONF_INVERT] sensor_name = entry try: resource_data = netdata.api.metrics[sensor] unit = ( PERCENTAGE if resource_data["units"] == "percentage" else resource_data["units"] ) except KeyError: _LOGGER.error("Sensor is not available: %s", sensor) continue dev.append( NetdataSensor( netdata, name, sensor, sensor_name, element, icon, unit, invert ) ) dev.append(NetdataAlarms(netdata, name, host, port)) async_add_entities(dev, True) class NetdataSensor(SensorEntity): """Implementation of a Netdata sensor.""" def __init__(self, netdata, name, sensor, sensor_name, element, icon, unit, invert): """Initialize the Netdata sensor.""" self.netdata = netdata self._state = None self._sensor = sensor self._element = element self._sensor_name = self._sensor if sensor_name is None else sensor_name self._name = name self._icon = icon self._unit_of_measurement = unit self._invert = invert @property def name(self): """Return the name of the sensor.""" return f"{self._name} {self._sensor_name}" @property def native_unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit_of_measurement @property def icon(self): """Return the icon to use in the frontend, if any.""" return self._icon @property def native_value(self): """Return the state of the resources.""" return self._state @property def available(self): """Could the resource be accessed during the last update call.""" return self.netdata.available async def async_update(self): """Get the latest data from Netdata REST API.""" await self.netdata.async_update() resource_data = self.netdata.api.metrics.get(self._sensor) self._state = round(resource_data["dimensions"][self._element]["value"], 2) * ( -1 if self._invert else 1 ) class NetdataAlarms(SensorEntity): """Implementation of a Netdata alarm sensor.""" def __init__(self, netdata, name, host, port): """Initialize the Netdata alarm sensor.""" self.netdata = netdata self._state = None self._name = name self._host = host self._port = port @property def name(self): """Return the name of the sensor.""" return f"{self._name} Alarms" @property def native_value(self): """Return the state of the resources.""" return self._state @property def icon(self): """Status symbol if type is symbol.""" if self._state == "ok": return "mdi:check" if self._state == "warning": return "mdi:alert-outline" if self._state == "critical": return "mdi:alert" return "mdi:crosshairs-question" @property def available(self): """Could the resource be accessed during the last update call.""" return self.netdata.available async def async_update(self): """Get the latest alarms from Netdata REST API.""" await self.netdata.async_update() alarms = self.netdata.api.alarms["alarms"] self._state = None number_of_alarms = len(alarms) number_of_relevant_alarms = number_of_alarms _LOGGER.debug("Host %s has %s alarms", self.name, number_of_alarms) for alarm in alarms: if alarms[alarm]["recipient"] == "silent": number_of_relevant_alarms = number_of_relevant_alarms - 1 elif alarms[alarm]["status"] == "CLEAR": number_of_relevant_alarms = number_of_relevant_alarms - 1 elif alarms[alarm]["status"] == "UNDEFINED": number_of_relevant_alarms = number_of_relevant_alarms - 1 elif alarms[alarm]["status"] == "UNINITIALIZED": number_of_relevant_alarms = number_of_relevant_alarms - 1 elif alarms[alarm]["status"] == "CRITICAL": self._state = "critical" return self._state = "ok" if number_of_relevant_alarms == 0 else "warning" class NetdataData: """The class for handling the data retrieval.""" def __init__(self, api): """Initialize the data object.""" self.api = api self.available = True @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest data from the Netdata REST API.""" try: await self.api.get_allmetrics() await self.api.get_alarms() self.available = True except NetdataError: _LOGGER.error("Unable to retrieve data from Netdata") self.available = False
jawilson/home-assistant
homeassistant/components/netdata/sensor.py
Python
apache-2.0
7,027
/* * Copyright 2012 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.jboss.netty.channel.socket.nio; import org.jboss.netty.channel.ChannelException; import org.jboss.netty.channel.ChannelFactory; import org.jboss.netty.channel.ChannelFuture; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.socket.DatagramChannelConfig; import org.jboss.netty.channel.socket.InternetProtocolFamily; import org.jboss.netty.util.internal.DetectionUtil; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.NetworkInterface; import java.net.SocketAddress; import java.net.SocketException; import java.nio.channels.DatagramChannel; import java.nio.channels.MembershipKey; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import static org.jboss.netty.channel.Channels.*; /** * Provides an NIO based {@link org.jboss.netty.channel.socket.DatagramChannel}. */ public class NioDatagramChannel extends AbstractNioChannel<DatagramChannel> implements org.jboss.netty.channel.socket.DatagramChannel { /** * The {@link DatagramChannelConfig}. */ private final NioDatagramChannelConfig config; private Map<InetAddress, List<MembershipKey>> memberships; NioDatagramChannel(final ChannelFactory factory, final ChannelPipeline pipeline, final ChannelSink sink, final NioDatagramWorker worker, InternetProtocolFamily family) { super(null, factory, pipeline, sink, worker, openNonBlockingChannel(family)); config = new DefaultNioDatagramChannelConfig(channel); fireChannelOpen(this); } private static DatagramChannel openNonBlockingChannel(InternetProtocolFamily family) { try { final DatagramChannel channel; // check if we are on java 7 or if the family was not specified if (DetectionUtil.javaVersion() < 7 || family == null) { channel = DatagramChannel.open(); } else { // This block only works on java7++, but we checked before if we have it. // // Use the ProtocolFamilyConvert for conversion to prevent NoClassDefFoundError. // // See #368 switch (family) { case IPv4: channel = DatagramChannel.open(ProtocolFamilyConverter.convert(family)); break; case IPv6: channel = DatagramChannel.open(ProtocolFamilyConverter.convert(family)); break; default: throw new IllegalArgumentException(); } } channel.configureBlocking(false); return channel; } catch (final IOException e) { throw new ChannelException("Failed to open a DatagramChannel.", e); } } @Override public NioDatagramWorker getWorker() { return (NioDatagramWorker) super.getWorker(); } public boolean isBound() { return isOpen() && channel.socket().isBound(); } public boolean isConnected() { return channel.isConnected(); } @Override protected boolean setClosed() { return super.setClosed(); } @Override public NioDatagramChannelConfig getConfig() { return config; } DatagramChannel getDatagramChannel() { return channel; } public ChannelFuture joinGroup(InetAddress multicastAddress) { try { return joinGroup( multicastAddress, NetworkInterface.getByInetAddress(getLocalAddress().getAddress()), null); } catch (SocketException e) { return failedFuture(this, e); } } public ChannelFuture joinGroup(InetSocketAddress multicastAddress, NetworkInterface networkInterface) { return joinGroup(multicastAddress.getAddress(), networkInterface, null); } /** * Joins the specified multicast group at the specified interface using the specified source. */ public ChannelFuture joinGroup( InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) { if (DetectionUtil.javaVersion() < 7) { throw new UnsupportedOperationException(); } if (multicastAddress == null) { throw new NullPointerException("multicastAddress"); } if (networkInterface == null) { throw new NullPointerException("networkInterface"); } try { MembershipKey key; if (source == null) { key = channel.join(multicastAddress, networkInterface); } else { key = channel.join(multicastAddress, networkInterface, source); } synchronized (this) { if (memberships == null) { memberships = new HashMap<InetAddress, List<MembershipKey>>(); } List<MembershipKey> keys = memberships.get(multicastAddress); if (keys == null) { keys = new ArrayList<MembershipKey>(); memberships.put(multicastAddress, keys); } keys.add(key); } } catch (Throwable e) { return failedFuture(this, e); } return succeededFuture(this); } public ChannelFuture leaveGroup(InetAddress multicastAddress) { try { return leaveGroup( multicastAddress, NetworkInterface.getByInetAddress(getLocalAddress().getAddress()), null); } catch (SocketException e) { return failedFuture(this, e); } } public ChannelFuture leaveGroup(InetSocketAddress multicastAddress, NetworkInterface networkInterface) { return leaveGroup(multicastAddress.getAddress(), networkInterface, null); } /** * Leave the specified multicast group at the specified interface using the specified source. */ public ChannelFuture leaveGroup(InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) { if (DetectionUtil.javaVersion() < 7) { throw new UnsupportedOperationException(); } else { if (multicastAddress == null) { throw new NullPointerException("multicastAddress"); } if (networkInterface == null) { throw new NullPointerException("networkInterface"); } synchronized (this) { if (memberships != null) { List<MembershipKey> keys = memberships.get(multicastAddress); if (keys != null) { Iterator<MembershipKey> keyIt = keys.iterator(); while (keyIt.hasNext()) { MembershipKey key = keyIt.next(); if (networkInterface.equals(key.networkInterface())) { if (source == null && key.sourceAddress() == null || source != null && source.equals(key.sourceAddress())) { key.drop(); keyIt.remove(); } } } if (keys.isEmpty()) { memberships.remove(multicastAddress); } } } } return succeededFuture(this); } } /** * Block the given sourceToBlock address for the given multicastAddress on the given networkInterface * */ public ChannelFuture block(InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress sourceToBlock) { if (DetectionUtil.javaVersion() < 7) { throw new UnsupportedOperationException(); } else { if (multicastAddress == null) { throw new NullPointerException("multicastAddress"); } if (sourceToBlock == null) { throw new NullPointerException("sourceToBlock"); } if (networkInterface == null) { throw new NullPointerException("networkInterface"); } synchronized (this) { if (memberships != null) { List<MembershipKey> keys = memberships.get(multicastAddress); for (MembershipKey key: keys) { if (networkInterface.equals(key.networkInterface())) { try { key.block(sourceToBlock); } catch (IOException e) { return failedFuture(this, e); } } } } } return succeededFuture(this); } } /** * Block the given sourceToBlock address for the given multicastAddress * */ public ChannelFuture block(InetAddress multicastAddress, InetAddress sourceToBlock) { try { block(multicastAddress, NetworkInterface.getByInetAddress(getLocalAddress().getAddress()), sourceToBlock); } catch (SocketException e) { return failedFuture(this, e); } return succeededFuture(this); } @Override InetSocketAddress getLocalSocketAddress() throws Exception { return (InetSocketAddress) channel.socket().getLocalSocketAddress(); } @Override InetSocketAddress getRemoteSocketAddress() throws Exception { return (InetSocketAddress) channel.socket().getRemoteSocketAddress(); } @Override public ChannelFuture write(Object message, SocketAddress remoteAddress) { if (remoteAddress == null || remoteAddress.equals(getRemoteAddress())) { return super.write(message, null); } else { return super.write(message, remoteAddress); } } }
CliffYuan/netty
src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramChannel.java
Java
apache-2.0
10,921
package org.jgroups.blocks; import org.jgroups.logging.Log; import org.jgroups.Address; import org.jgroups.Global; import org.jgroups.stack.IpAddress; import org.jgroups.util.ShutdownRejectedExecutionHandler; import java.io.IOException; import java.net.*; import java.nio.ByteBuffer; import java.nio.channels.*; import java.nio.channels.spi.SelectorProvider; import java.util.*; import java.util.concurrent.*; /** * Manages incoming and outgoing TCP connections. For each outgoing message to destination P, if there * is not yet a connection for P, one will be created. Subsequent outgoing messages will use this * connection. For incoming messages, one server socket is created at startup. For each new incoming * client connecting, a new thread from a thread pool is allocated and listens for incoming messages * until the socket is closed by the peer.<br>Sockets/threads with no activity will be killed * after some time. * <p/> * Incoming messages from any of the sockets can be received by setting the message listener. * * @author Bela Ban, Scott Marlow, Alex Fu */ public class ConnectionTableNIO extends BasicConnectionTable implements Runnable { private ServerSocketChannel m_serverSocketChannel; private Selector m_acceptSelector; private WriteHandler[] m_writeHandlers; private int m_nextWriteHandler = 0; private final Object m_lockNextWriteHandler = new Object(); private ReadHandler[] m_readHandlers; private int m_nextReadHandler = 0; private final Object m_lockNextReadHandler = new Object(); // thread pool for processing read requests private Executor m_requestProcessors; private volatile boolean serverStopping=false; private final List<Thread> m_backGroundThreads = new LinkedList<Thread>(); // Collection of all created threads private int m_reader_threads = 3; private int m_writer_threads = 3; private int m_processor_threads = 5; // PooledExecutor.createThreads() private int m_processor_minThreads = 5; // PooledExecutor.setMinimumPoolSize() private int m_processor_maxThreads = 5; // PooledExecutor.setMaxThreads() private int m_processor_queueSize=100; // Number of queued requests that can be pending waiting // for a background thread to run the request. private long m_processor_keepAliveTime = Long.MAX_VALUE; // PooledExecutor.setKeepAliveTime( milliseconds); // negative value used to mean to wait forever, instead set to Long.MAX_VALUE to wait forever /** * @param srv_port * @throws Exception */ public ConnectionTableNIO(int srv_port) throws Exception { this.srv_port=srv_port; start(); } /** * @param srv_port * @param reaper_interval * @param conn_expire_time * @throws Exception */ public ConnectionTableNIO(int srv_port, long reaper_interval, long conn_expire_time) throws Exception { this.srv_port=srv_port; this.reaper_interval=reaper_interval; this.conn_expire_time=conn_expire_time; start(); } /** * @param r * @param bind_addr * @param external_addr * @param srv_port * @param max_port * @throws Exception */ public ConnectionTableNIO(Receiver r, InetAddress bind_addr, InetAddress external_addr, int external_port, int srv_port, int max_port) throws Exception { setReceiver(r); this.external_addr=external_addr; this.external_port=external_port; this.bind_addr=bind_addr; this.srv_port=srv_port; this.max_port=max_port; use_reaper=true; start(); } public ConnectionTableNIO(Receiver r, InetAddress bind_addr, InetAddress external_addr, int external_port, int srv_port, int max_port, boolean doStart) throws Exception { setReceiver(r); this.external_addr=external_addr; this.external_port=external_port; this.bind_addr=bind_addr; this.srv_port=srv_port; this.max_port=max_port; use_reaper=true; if(doStart) start(); } /** * @param r * @param bind_addr * @param external_addr * @param srv_port * @param max_port * @param reaper_interval * @param conn_expire_time * @throws Exception */ public ConnectionTableNIO(Receiver r, InetAddress bind_addr, InetAddress external_addr, int external_port, int srv_port, int max_port, long reaper_interval, long conn_expire_time ) throws Exception { setReceiver(r); this.bind_addr=bind_addr; this.external_addr=external_addr; this.external_port=external_port; this.srv_port=srv_port; this.max_port=max_port; this.reaper_interval=reaper_interval; this.conn_expire_time=conn_expire_time; use_reaper=true; start(); } public ConnectionTableNIO(Receiver r, InetAddress bind_addr, InetAddress external_addr, int external_port, int srv_port, int max_port, long reaper_interval, long conn_expire_time, boolean doStart ) throws Exception { setReceiver(r); this.bind_addr=bind_addr; this.external_addr=external_addr; this.external_port=external_port; this.srv_port=srv_port; this.max_port=max_port; this.reaper_interval=reaper_interval; this.conn_expire_time=conn_expire_time; use_reaper=true; if(doStart) start(); } public int getReaderThreads() { return m_reader_threads; } public void setReaderThreads(int m_reader_threads) { this.m_reader_threads=m_reader_threads; } public int getWriterThreads() { return m_writer_threads; } public void setWriterThreads(int m_writer_threads) { this.m_writer_threads=m_writer_threads; } public int getProcessorThreads() { return m_processor_threads; } public void setProcessorThreads(int m_processor_threads) { this.m_processor_threads=m_processor_threads; } public int getProcessorMinThreads() { return m_processor_minThreads;} public void setProcessorMinThreads(int m_processor_minThreads) { this.m_processor_minThreads=m_processor_minThreads; } public int getProcessorMaxThreads() { return m_processor_maxThreads;} public void setProcessorMaxThreads(int m_processor_maxThreads) { this.m_processor_maxThreads=m_processor_maxThreads; } public int getProcessorQueueSize() { return m_processor_queueSize; } public void setProcessorQueueSize(int m_processor_queueSize) { this.m_processor_queueSize=m_processor_queueSize; } public long getProcessorKeepAliveTime() { return m_processor_keepAliveTime; } public void setProcessorKeepAliveTime(long m_processor_keepAliveTime) { this.m_processor_keepAliveTime=m_processor_keepAliveTime; } /** * Try to obtain correct Connection (or create one if not yet existent) */ BasicConnectionTable.Connection getConnection(Address dest) throws Exception { Connection conn; SocketChannel sock_ch; synchronized (conns) { conn = (Connection) conns.get(dest); if (conn == null) { InetSocketAddress destAddress = new InetSocketAddress(((IpAddress) dest).getIpAddress(), ((IpAddress) dest).getPort()); sock_ch = SocketChannel.open(destAddress); sock_ch.socket().setTcpNoDelay(tcp_nodelay); conn = new Connection(sock_ch, dest); conn.sendLocalAddress(local_addr); // This outbound connection is ready sock_ch.configureBlocking(false); try { if (log.isTraceEnabled()) log.trace("About to change new connection send buff size from " + sock_ch.socket().getSendBufferSize() + " bytes"); sock_ch.socket().setSendBufferSize(send_buf_size); if (log.isTraceEnabled()) log.trace("Changed new connection send buff size to " + sock_ch.socket().getSendBufferSize() + " bytes"); } catch (IllegalArgumentException ex) { if (log.isErrorEnabled()) log.error("exception setting send buffer size to " + send_buf_size + " bytes: " + ex); } try { if (log.isTraceEnabled()) log.trace("About to change new connection receive buff size from " + sock_ch.socket().getReceiveBufferSize() + " bytes"); sock_ch.socket().setReceiveBufferSize(recv_buf_size); if (log.isTraceEnabled()) log.trace("Changed new connection receive buff size to " + sock_ch.socket().getReceiveBufferSize() + " bytes"); } catch (IllegalArgumentException ex) { if (log.isErrorEnabled()) log.error("exception setting receive buffer size to " + send_buf_size + " bytes: " + ex); } int idx; synchronized (m_lockNextWriteHandler) { idx = m_nextWriteHandler = (m_nextWriteHandler + 1) % m_writeHandlers.length; } conn.setupWriteHandler(m_writeHandlers[idx]); // Put the new connection to the queue try { synchronized (m_lockNextReadHandler) { idx = m_nextReadHandler = (m_nextReadHandler + 1) % m_readHandlers.length; } m_readHandlers[idx].add(conn); } catch (InterruptedException e) { if (log.isWarnEnabled()) log.warn("Thread (" +Thread.currentThread().getName() + ") was interrupted, closing connection", e); // What can we do? Remove it from table then. conn.destroy(); throw e; } // Add connection to table addConnection(dest, conn); notifyConnectionOpened(dest); if (log.isTraceEnabled()) log.trace("created socket to " + dest); } return conn; } } public final void start() throws Exception { super.start(); init(); srv_sock=createServerSocket(srv_port, max_port); if (external_addr!=null) { local_addr=new IpAddress(external_addr, external_port == 0? srv_sock.getLocalPort() : external_port); } else if (bind_addr != null) local_addr=new IpAddress(bind_addr, srv_sock.getLocalPort()); else local_addr=new IpAddress(srv_sock.getLocalPort()); if(log.isDebugEnabled()) log.debug("server socket created on " + local_addr); // Roland Kurmann 4/7/2003, put in thread_group -- removed bela Nov 2012 acceptor=getThreadFactory().newThread(this, "ConnectionTable.AcceptorThread"); acceptor.setDaemon(true); acceptor.start(); m_backGroundThreads.add(acceptor); // start the connection reaper - will periodically remove unused connections if(use_reaper && reaper == null) { reaper=new Reaper(); reaper.start(); } } protected void init() throws Exception { // use directExector if max thread pool size is less than or equal to zero. if(getProcessorMaxThreads() <= 0) { m_requestProcessors = new Executor() { public void execute(Runnable command) { command.run(); } }; } else { // Create worker thread pool for processing incoming buffers ThreadPoolExecutor requestProcessors = new ThreadPoolExecutor(getProcessorMinThreads(), getProcessorMaxThreads(), getProcessorKeepAliveTime(), TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(getProcessorQueueSize())); requestProcessors.setThreadFactory(new ThreadFactory() { public Thread newThread(Runnable runnable) { Thread new_thread=new Thread(runnable, "ConnectionTableNIO.Thread"); new_thread.setDaemon(true); m_backGroundThreads.add(new_thread); return new_thread; } }); requestProcessors.setRejectedExecutionHandler(new ShutdownRejectedExecutionHandler(requestProcessors.getRejectedExecutionHandler())); m_requestProcessors = requestProcessors; } m_writeHandlers = WriteHandler.create(getThreadFactory(),getWriterThreads(), m_backGroundThreads, log); m_readHandlers = ReadHandler.create(getThreadFactory(),getReaderThreads(), this, m_backGroundThreads, log); } /** * Closes all open sockets, the server socket and all threads waiting for incoming messages */ public void stop() { super.stop(); serverStopping = true; if(reaper != null) reaper.stop(); // Stop the main selector if(m_acceptSelector != null) m_acceptSelector.wakeup(); // Stop selector threads if(m_readHandlers != null) { for (int i = 0; i < m_readHandlers.length; i++) { try { m_readHandlers[i].add(new Shutdown()); } catch (InterruptedException e) { log.error("Thread ("+Thread.currentThread().getName() +") was interrupted, failed to shutdown selector", e); } } } if(m_writeHandlers != null) { for (int i = 0; i < m_writeHandlers.length; i++) { try { m_writeHandlers[i].queue.put(new Shutdown()); m_writeHandlers[i].selector.wakeup(); } catch (InterruptedException e) { log.error("Thread ("+Thread.currentThread().getName() +") was interrupted, failed to shutdown selector", e); } } } // Stop the callback thread pool if(m_requestProcessors instanceof ThreadPoolExecutor) ((ThreadPoolExecutor)m_requestProcessors).shutdownNow(); if(m_requestProcessors instanceof ThreadPoolExecutor){ try{ ((ThreadPoolExecutor) m_requestProcessors).awaitTermination(Global.THREADPOOL_SHUTDOWN_WAIT_TIME, TimeUnit.MILLISECONDS); }catch(InterruptedException e){ } } // then close the connections synchronized(conns) { Iterator it=conns.values().iterator(); while(it.hasNext()) { Connection conn=(Connection)it.next(); conn.destroy(); } conns.clear(); } while(!m_backGroundThreads.isEmpty()) { Thread t =m_backGroundThreads.remove(0); try { t.join(); } catch(InterruptedException e) { log.error("Thread ("+Thread.currentThread().getName() +") was interrupted while waiting on thread " + t.getName() + " to finish."); } } m_backGroundThreads.clear(); } /** * Acceptor thread. Continuously accept new connections and assign readhandler/writehandler * to them. */ public void run() { Connection conn; while(m_serverSocketChannel.isOpen() && !serverStopping) { int num; try { num=m_acceptSelector.select(); } catch(IOException e) { if(log.isWarnEnabled()) log.warn("Select operation on listening socket failed", e); continue; // Give up this time } if(num > 0) { Set<SelectionKey> readyKeys=m_acceptSelector.selectedKeys(); for(Iterator<SelectionKey> i=readyKeys.iterator(); i.hasNext();) { SelectionKey key=i.next(); i.remove(); // We only deal with new incoming connections ServerSocketChannel readyChannel=(ServerSocketChannel)key.channel(); SocketChannel client_sock_ch; try { client_sock_ch=readyChannel.accept(); } catch(IOException e) { if(log.isWarnEnabled()) log.warn("Attempt to accept new connection from listening socket failed", e); // Give up this connection continue; } if(log.isTraceEnabled()) log.trace("accepted connection, client_sock=" + client_sock_ch.socket()); try { client_sock_ch.socket().setSendBufferSize(send_buf_size); } catch(IllegalArgumentException ex) { if(log.isErrorEnabled()) log.error("exception setting send buffer size to " + send_buf_size + " bytes: ", ex); } catch(SocketException e) { if(log.isErrorEnabled()) log.error("exception setting send buffer size to " + send_buf_size + " bytes: ", e); } try { client_sock_ch.socket().setReceiveBufferSize(recv_buf_size); } catch(IllegalArgumentException ex) { if(log.isErrorEnabled()) log.error("exception setting receive buffer size to " + send_buf_size + " bytes: ", ex); } catch(SocketException e) { if(log.isErrorEnabled()) log.error("exception setting receive buffer size to " + recv_buf_size + " bytes: ", e); } conn=new Connection(client_sock_ch, null); try { Address peer_addr=conn.readPeerAddress(client_sock_ch.socket()); conn.peer_addr=peer_addr; synchronized(conns) { Connection tmp=(Connection)conns.get(peer_addr); if(tmp != null) { if(peer_addr.compareTo(local_addr) > 0) { if(log.isTraceEnabled()) log.trace("peer's address (" + peer_addr + ") is greater than our local address (" + local_addr + "), replacing our existing connection"); // peer's address is greater, add peer's connection to ConnectionTable, destroy existing connection addConnection(peer_addr, conn); tmp.destroy(); notifyConnectionOpened(peer_addr); } else { if(log.isTraceEnabled()) log.trace("peer's address (" + peer_addr + ") is smaller than our local address (" + local_addr + "), rejecting peer connection request"); conn.destroy(); continue; } } else { addConnection(peer_addr, conn); } } notifyConnectionOpened(peer_addr); client_sock_ch.configureBlocking(false); } catch(IOException e) { if(log.isWarnEnabled()) log.warn("Attempt to configure non-blocking mode failed", e); conn.destroy(); continue; } catch(Exception e) { if(log.isWarnEnabled()) log.warn("Attempt to handshake with other peer failed", e); conn.destroy(); continue; } int idx; synchronized(m_lockNextWriteHandler) { idx=m_nextWriteHandler=(m_nextWriteHandler + 1) % m_writeHandlers.length; } conn.setupWriteHandler(m_writeHandlers[idx]); try { synchronized(m_lockNextReadHandler) { idx=m_nextReadHandler=(m_nextReadHandler + 1) % m_readHandlers.length; } m_readHandlers[idx].add(conn); } catch(InterruptedException e) { if(log.isWarnEnabled()) log.warn("Attempt to configure read handler for accepted connection failed", e); // close connection conn.destroy(); } } // end of iteration } // end of selected key > 0 } // end of thread if(m_serverSocketChannel.isOpen()) { try { m_serverSocketChannel.close(); } catch(Exception e) { log.error("exception closing server listening socket", e); } } if(log.isTraceEnabled()) log.trace("acceptor thread terminated"); } /** * Finds first available port starting at start_port and returns server socket. Sets srv_port */ protected ServerSocket createServerSocket(int start_port, int end_port) throws Exception { this.m_acceptSelector = Selector.open(); m_serverSocketChannel = ServerSocketChannel.open(); m_serverSocketChannel.configureBlocking(false); while (true) { try { SocketAddress sockAddr; if (bind_addr == null) { sockAddr=new InetSocketAddress(start_port); m_serverSocketChannel.socket().bind(sockAddr); } else { sockAddr=new InetSocketAddress(bind_addr, start_port); m_serverSocketChannel.socket().bind(sockAddr, backlog); } } catch (BindException bind_ex) { if (start_port == end_port) throw (BindException) ((new BindException("No available port to bind to (start_port=" + start_port + ")")).initCause(bind_ex)); start_port++; continue; } catch (SocketException bind_ex) { if (start_port == end_port) throw (BindException) ((new BindException("No available port to bind to (start_port=" + start_port + ")")).initCause(bind_ex)); start_port++; continue; } catch (IOException io_ex) { log.error("Attempt to bind serversocket failed, port="+start_port+", bind addr=" + bind_addr ,io_ex); throw io_ex; } srv_port = start_port; break; } m_serverSocketChannel.register(this.m_acceptSelector, SelectionKey.OP_ACCEPT); return m_serverSocketChannel.socket(); } protected void runRequest(Address addr, ByteBuffer buf) throws InterruptedException { m_requestProcessors.execute(new ExecuteTask(addr, buf)); } // Represents shutdown private static class Shutdown { } // ReadHandler has selector to deal with read, it runs in seperated thread private static class ReadHandler implements Runnable { private final Selector selector= initHandler(); private final LinkedBlockingQueue<Object> queue= new LinkedBlockingQueue<Object>(); private final ConnectionTableNIO connectTable; private final Log log; ReadHandler(ConnectionTableNIO ct, Log log) { connectTable= ct; this.log=log; } public Selector initHandler() { // Open the selector try { return Selector.open(); } catch (IOException e) { if (log.isErrorEnabled()) log.error(e.toString()); throw new IllegalStateException(e.getMessage()); } } /** * create instances of ReadHandler threads for receiving data. * * @param workerThreads is the number of threads to create. */ private static ReadHandler[] create(org.jgroups.util.ThreadFactory f,int workerThreads, ConnectionTableNIO ct, List<Thread> backGroundThreads, Log log) { ReadHandler[] handlers = new ReadHandler[workerThreads]; for (int looper = 0; looper < workerThreads; looper++) { handlers[looper] = new ReadHandler(ct, log); Thread thread = f.newThread(handlers[looper], "nioReadHandlerThread"); thread.setDaemon(true); thread.start(); backGroundThreads.add(thread); } return handlers; } private void add(Object conn) throws InterruptedException { queue.put(conn); wakeup(); } private void wakeup() { selector.wakeup(); } public void run() { while (true) { // m_s can be closed by the management thread int events; try { events = selector.select(); } catch (IOException e) { if (log.isWarnEnabled()) log.warn("Select operation on socket failed", e); continue; // Give up this time } catch (ClosedSelectorException e) { if (log.isWarnEnabled()) log.warn("Select operation on socket failed" , e); return; // Selector gets closed, thread stops } if (events > 0) { // there are read-ready channels Set readyKeys = selector.selectedKeys(); try { for (Iterator i = readyKeys.iterator(); i.hasNext();) { SelectionKey key = (SelectionKey) i.next(); i.remove(); // Do partial read and handle call back Connection conn = (Connection) key.attachment(); if(conn != null && conn.getSocketChannel() != null) { try { if (conn.getSocketChannel().isOpen()) readOnce(conn); else { // socket connection is already closed, clean up connection state conn.closed(); } } catch (IOException e) { if (log.isTraceEnabled()) log.trace("Read operation on socket failed" , e); // The connection must be bad, cancel the key, close socket, then // remove it from table! key.cancel(); conn.destroy(); conn.closed(); } } } } catch(ConcurrentModificationException e) { if (log.isTraceEnabled()) log.trace("Selection set changed", e); // valid events should still be in the selection set the next time } } // Now we look at the connection queue to get any new connections added Object o; try { o = queue.poll(0L, TimeUnit.MILLISECONDS); // get a connection } catch (InterruptedException e) { if (log.isTraceEnabled()) log.trace("Thread ("+Thread.currentThread().getName() +") was interrupted while polling queue" ,e); // We must give up continue; } if (null == o) continue; if (o instanceof Shutdown) { // shutdown command? try { selector.close(); } catch(IOException e) { if (log.isTraceEnabled()) log.trace("Read selector close operation failed" , e); } return; // stop reading } Connection conn = (Connection) o;// must be a new connection SocketChannel sc = conn.getSocketChannel(); try { sc.register(selector, SelectionKey.OP_READ, conn); } catch (ClosedChannelException e) { if (log.isTraceEnabled()) log.trace("Socket channel was closed while we were trying to register it to selector" , e); // Channel becomes bad. The connection must be bad, // close socket, then remove it from table! conn.destroy(); conn.closed(); } } // end of the while true loop } private void readOnce(Connection conn) throws IOException { ConnectionReadState readState = conn.getReadState(); if (!readState.isHeadFinished()) { // a brand new message coming or header is not completed // Begin or continue to read header int size = readHeader(conn); if (0 == size) { // header is not completed return; } } // Begin or continue to read body if (readBody(conn) > 0) { // not finish yet return; } Address addr = conn.getPeerAddress(); ByteBuffer buf = readState.getReadBodyBuffer(); // Clear status readState.bodyFinished(); // Assign worker thread to execute call back try { connectTable.runRequest(addr, buf); } catch (InterruptedException e) { // Cannot do call back, what can we do? // Give up handling the message then log.error("Thread ("+Thread.currentThread().getName() +") was interrupted while assigning executor to process read request" , e); } } /** * Read message header from channel. It doesn't try to complete. If there is nothing in * the channel, the method returns immediately. * * @param conn The connection * @return 0 if header hasn't been read completely, otherwise the size of message body * @throws IOException */ private int readHeader(Connection conn) throws IOException { ConnectionReadState readState = conn.getReadState(); ByteBuffer headBuf = readState.getReadHeadBuffer(); SocketChannel sc = conn.getSocketChannel(); while (headBuf.remaining() > 0) { int num = sc.read(headBuf); if (-1 == num) {// EOS throw new IOException("Peer closed socket"); } if (0 == num) // no more data return 0; } // OK, now we get the whole header, change the status and return message size return readState.headFinished(); } /** * Read message body from channel. It doesn't try to complete. If there is nothing in * the channel, the method returns immediately. * * @param conn The connection * @return remaining bytes for the message * @throws IOException */ private int readBody(Connection conn) throws IOException { ByteBuffer bodyBuf = conn.getReadState().getReadBodyBuffer(); SocketChannel sc = conn.getSocketChannel(); while (bodyBuf.remaining() > 0) { int num = sc.read(bodyBuf); if (-1 == num) // EOS throw new IOException("Couldn't read from socket as peer closed the socket"); if (0 == num) // no more data return bodyBuf.remaining(); } // OK, we finished reading the whole message! Flip it (not necessary though) bodyBuf.flip(); return 0; } } private class ExecuteTask implements Runnable { Address m_addr = null; ByteBuffer m_buf = null; public ExecuteTask(Address addr, ByteBuffer buf) { m_addr = addr; m_buf = buf; } public void run() { receive(m_addr, m_buf.array(), m_buf.arrayOffset(), m_buf.limit()); } } private class ConnectionReadState { private final Connection m_conn; // Status for receiving message private boolean m_headFinished = false; private ByteBuffer m_readBodyBuf = null; private final ByteBuffer m_readHeadBuf = ByteBuffer.allocate(Connection.HEADER_SIZE); public ConnectionReadState(Connection conn) { m_conn = conn; } ByteBuffer getReadBodyBuffer() { return m_readBodyBuf; } ByteBuffer getReadHeadBuffer() { return m_readHeadBuf; } void bodyFinished() { m_headFinished = false; m_readHeadBuf.clear(); m_readBodyBuf = null; m_conn.updateLastAccessed(); } /** * Status change for finishing reading the message header (data already in buffer) * * @return message size */ int headFinished() { m_headFinished = true; m_readHeadBuf.flip(); int messageSize = m_readHeadBuf.getInt(); m_readBodyBuf = ByteBuffer.allocate(messageSize); m_conn.updateLastAccessed(); return messageSize; } boolean isHeadFinished() { return m_headFinished; } } class Connection extends BasicConnectionTable.Connection { private SocketChannel sock_ch = null; private WriteHandler m_writeHandler; private SelectorWriteHandler m_selectorWriteHandler; private final ConnectionReadState m_readState; private static final int HEADER_SIZE = 4; final ByteBuffer headerBuffer = ByteBuffer.allocate(HEADER_SIZE); Connection(SocketChannel s, Address peer_addr) { super(s.socket(), peer_addr); sock_ch = s; m_readState = new ConnectionReadState(this); is_running=true; } private ConnectionReadState getReadState() { return m_readState; } private void setupWriteHandler(WriteHandler hdlr) { m_writeHandler = hdlr; m_selectorWriteHandler = hdlr.add(sock_ch); } void doSend(byte[] buffie, int offset, int length) throws Exception { MyFuture result = new MyFuture(); m_writeHandler.write(sock_ch, ByteBuffer.wrap(buffie, offset, length), result, m_selectorWriteHandler); Object ex = result.get(); if (ex instanceof Exception) { if (log.isErrorEnabled()) log.error("failed sending message", (Exception)ex); if (((Exception)ex).getCause() instanceof IOException) throw (IOException) ((Exception)ex).getCause(); throw (Exception)ex; } result.get(); } SocketChannel getSocketChannel() { return sock_ch; } synchronized void closeSocket() { if (sock_ch != null) { try { if(sock_ch.isConnected() && sock_ch.isOpen()) { sock_ch.close(); } } catch (Exception e) { log.error("error closing socket connection", e); } sock_ch = null; } } void closed() { Address peerAddr = getPeerAddress(); synchronized (conns) { conns.remove(peerAddr); } notifyConnectionClosed(peerAddr); } } /** * Handle writing to non-blocking NIO connection. */ private static class WriteHandler implements Runnable { // Create a queue for write requests (unbounded) private final LinkedBlockingQueue<Object> queue= new LinkedBlockingQueue<Object>(); private final Selector selector= initSelector(); private int m_pendingChannels; // count of the number of channels that have pending writes // note that this variable is only accessed by one thread. // allocate and reuse the header for all buffer write operations private ByteBuffer m_headerBuffer = ByteBuffer.allocate(Connection.HEADER_SIZE); private final Log log; public WriteHandler(Log log) { this.log=log; } Selector initSelector() { try { return SelectorProvider.provider().openSelector(); } catch (IOException e) { if (log.isErrorEnabled()) log.error(e.toString()); throw new IllegalStateException(e.getMessage()); } } /** * create instances of WriteHandler threads for sending data. * * @param workerThreads is the number of threads to create. */ private static WriteHandler[] create(org.jgroups.util.ThreadFactory f, int workerThreads, List<Thread> backGroundThreads, Log log) { WriteHandler[] handlers = new WriteHandler[workerThreads]; for (int looper = 0; looper < workerThreads; looper++) { handlers[looper] = new WriteHandler(log); Thread thread = f.newThread(handlers[looper], "nioWriteHandlerThread"); thread.setDaemon(true); thread.start(); backGroundThreads.add(thread); } return handlers; } /** * Add a new channel to be handled. * * @param channel */ private SelectorWriteHandler add(SocketChannel channel) { return new SelectorWriteHandler(channel, selector, m_headerBuffer); } /** * Writes buffer to the specified socket connection. This is always performed asynchronously. If you want * to perform a synchrounous write, call notification.`get() which will block until the write operation is complete. * Best practice is to call notification.getException() which may return any exceptions that occured during the write * operation. * * @param channel is where the buffer is written to. * @param buffer is what we write. * @param notification may be specified if you want to know how many bytes were written and know if an exception * occurred. */ private void write(SocketChannel channel, ByteBuffer buffer, MyFuture notification, SelectorWriteHandler hdlr) throws InterruptedException { queue.put(new WriteRequest(channel, buffer, notification, hdlr)); } private static void close(SelectorWriteHandler entry) { entry.cancel(); } private static void handleChannelError( SelectorWriteHandler entry, Throwable error) { // notify callers of the exception and drain all of the send buffers for this channel. do { if (error != null) entry.notifyError(error); } while (entry.next()); close(entry); } // process the write operation private void processWrite(Selector selector) { Set keys = selector.selectedKeys(); Object arr[] = keys.toArray(); for (Object anArr : arr) { SelectionKey key = (SelectionKey) anArr; SelectorWriteHandler entry = (SelectorWriteHandler) key.attachment(); boolean needToDecrementPendingChannels = false; try { if (0 == entry.write()) { // write the buffer and if the remaining bytes is zero, // notify the caller of number of bytes written. entry.notifyObject(entry.getBytesWritten()); // switch to next write buffer or clear interest bit on socket channel. if (!entry.next()) { needToDecrementPendingChannels = true; } } } catch (IOException e) { needToDecrementPendingChannels = true; // connection must of closed handleChannelError(entry, e); } finally { if (needToDecrementPendingChannels) m_pendingChannels--; } } keys.clear(); } public void run() { while (selector.isOpen()) { try { WriteRequest queueEntry; Object o; // When there are no more commands in the Queue, we will hit the blocking code after this loop. while (null != (o = queue.poll(0L, TimeUnit.MILLISECONDS))) { if (o instanceof Shutdown) // Stop the thread { try { selector.close(); } catch(IOException e) { if (log.isTraceEnabled()) log.trace("Write selector close operation failed" , e); } return; } queueEntry = (WriteRequest) o; if (queueEntry.getHandler().add(queueEntry)) { // If the add operation returns true, than means that a buffer is available to be written to the // corresponding channel and channel's selection key has been modified to indicate interest in the // 'write' operation. // If the add operation threw an exception, we will not increment m_pendingChannels which // seems correct as long as a new buffer wasn't added to be sent. // Another way to view this is that we don't have to protect m_pendingChannels on the increment // side, only need to protect on the decrement side (this logic of this run() will be incorrect // if m_pendingChannels is set incorrectly). m_pendingChannels++; } try { // process any connections ready to be written to. if (selector.selectNow() > 0) { processWrite(selector); } } catch (IOException e) { // need to understand what causes this error so we can handle it properly if (log.isErrorEnabled()) log.error("SelectNow operation on write selector failed, didn't expect this to occur, please report this", e); return; // if select fails, give up so we don't go into a busy loop. } } // if there isn't any pending work to do, block on queue to get next request. if (m_pendingChannels == 0) { o = queue.take(); if (o instanceof Shutdown){ // Stop the thread try { selector.close(); } catch(IOException e) { if (log.isTraceEnabled()) log.trace("Write selector close operation failed" , e); } return; } queueEntry = (WriteRequest) o; if (queueEntry.getHandler().add(queueEntry)) m_pendingChannels++; } // otherwise do a blocking wait select operation. else { try { if ((selector.select()) > 0) { processWrite(selector); } } catch (IOException e) { // need to understand what causes this error if (log.isErrorEnabled()) log.error("Failure while writing to socket",e); } } } catch (InterruptedException e) { if (log.isErrorEnabled()) log.error("Thread ("+Thread.currentThread().getName() +") was interrupted", e); } catch (Throwable e) // Log throwable rather than terminating this thread. { // We are a daemon thread so we shouldn't prevent the process from terminating if // the controlling thread decides that should happen. if (log.isErrorEnabled()) log.error("Thread ("+Thread.currentThread().getName() +") caught Throwable" , e); } } } } // Wrapper class for passing Write requests. There will be an instance of this class for each socketChannel // mapped to a Selector. public static class SelectorWriteHandler { private final List<WriteRequest> m_writeRequests = new LinkedList<WriteRequest>(); // Collection of writeRequests private boolean m_headerSent = false; private SocketChannel m_channel; private SelectionKey m_key; private Selector m_selector; private int m_bytesWritten = 0; private boolean m_enabled = false; private ByteBuffer m_headerBuffer; SelectorWriteHandler(SocketChannel channel, Selector selector, ByteBuffer headerBuffer) { m_channel = channel; m_selector = selector; m_headerBuffer = headerBuffer; } private void register(Selector selector, SocketChannel channel) throws ClosedChannelException { // register the channel but don't enable OP_WRITE until we have a write request. m_key = channel.register(selector, 0, this); } // return true if selection key is enabled when it wasn't previous to call. private boolean enable() { boolean rc = false; try { if (m_key == null) { // register the socket on first access, // we are the only thread using this variable, so no sync needed. register(m_selector, m_channel); } } catch (ClosedChannelException e) { return rc; } if (!m_enabled) { rc = true; try { m_key.interestOps(SelectionKey.OP_WRITE); } catch (CancelledKeyException e) { // channel must of closed return false; } m_enabled = true; } return rc; } private void disable() { if (m_enabled) { try { m_key.interestOps(0); // pass zero which means that we are not interested in being // notified of anything for this channel. } catch (CancelledKeyException eat) // If we finished writing and didn't get an exception, then { // we probably don't need to throw this exception (if they try to write // again, we will then throw an exception). } m_enabled = false; } } private void cancel() { m_key.cancel(); } boolean add(WriteRequest entry) { m_writeRequests.add(entry); return enable(); } WriteRequest getCurrentRequest() { return m_writeRequests.get(0); } SocketChannel getChannel() { return m_channel; } ByteBuffer getBuffer() { return getCurrentRequest().getBuffer(); } MyFuture getCallback() { return getCurrentRequest().getCallback(); } int getBytesWritten() { return m_bytesWritten; } void notifyError(Throwable error) { if (getCallback() != null) getCallback().setException(error); } void notifyObject(Object result) { if (getCallback() != null) getCallback().set(result); } /** * switch to next request or disable write interest bit if there are no more buffers. * * @return true if another request was found to be processed. */ boolean next() { m_headerSent = false; m_bytesWritten = 0; m_writeRequests.remove(0); // remove current entry boolean rc = !m_writeRequests.isEmpty(); if (!rc) // disable select for this channel if no more entries disable(); return rc; } /** * @return bytes remaining to write. This function will only throw IOException, unchecked exceptions are not * expected to be thrown from here. It is very important for the caller to know if an unchecked exception can * be thrown in here. Please correct the following throws list to include any other exceptions and update * caller to handle them. * @throws IOException */ int write() throws IOException { // Send header first. Note that while we are writing the shared header buffer, // no other threads can access the header buffer as we are the only thread that has access to it. if (!m_headerSent) { m_headerSent = true; m_headerBuffer.clear(); m_headerBuffer.putInt(getBuffer().remaining()); m_headerBuffer.flip(); do { getChannel().write(m_headerBuffer); } // we should be able to handle writing the header in one action but just in case, just do a busy loop while (m_headerBuffer.remaining() > 0); } m_bytesWritten += (getChannel().write(getBuffer())); return getBuffer().remaining(); } } public static class WriteRequest { private final SocketChannel m_channel; private final ByteBuffer m_buffer; private final MyFuture m_callback; private final SelectorWriteHandler m_hdlr; WriteRequest(SocketChannel channel, ByteBuffer buffer, MyFuture callback, SelectorWriteHandler hdlr) { m_channel = channel; m_buffer = buffer; m_callback = callback; m_hdlr = hdlr; } SelectorWriteHandler getHandler() { return m_hdlr; } SocketChannel getChannel() { return m_channel; } ByteBuffer getBuffer() { return m_buffer; } MyFuture getCallback() { return m_callback; } } private static class NullCallable implements Callable { public Object call() { System.out.println("nullCallable.call invoked"); return null; } } private static final NullCallable NULLCALL = new NullCallable(); public static class MyFuture extends FutureTask { // make FutureTask work like the old FutureResult public MyFuture() { super(NULLCALL); } protected void set(Object o) { super.set(o); } protected void setException(Throwable t) { super.setException(t); } } }
tekcomms/JGroups
src/org/jgroups/blocks/ConnectionTableNIO.java
Java
apache-2.0
52,632
// Copyright 2017 Xiaomi, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "errors" "fmt" ) func ArrIntToString(arr []int) (result string, err error) { result = "" for indx, a := range arr { if indx == 0 { result = fmt.Sprintf("%v", a) } else { result = fmt.Sprintf("%v,%v", result, a) } } if result == "" { err = errors.New(fmt.Sprintf("array is empty, err: %v", arr)) } return } func ArrIntToStringMust(arr []int) (result string) { result, _ = ArrIntToString(arr) return } func ArrInt64ToString(arr []int64) (result string, err error) { result = "" for indx, a := range arr { if indx == 0 { result = fmt.Sprintf("%v", a) } else { result = fmt.Sprintf("%v,%v", result, a) } } if result == "" { err = errors.New(fmt.Sprintf("array is empty, err: %v", arr)) } return } func ArrInt64ToStringMust(arr []int64) (result string) { result, _ = ArrInt64ToString(arr) return } func ArrStringsToString(arr []string) (result string, err error) { result = "" for indx, a := range arr { if indx == 0 { result = fmt.Sprintf("\"%v\"", a) } else { result = fmt.Sprintf("%v,\"%v\"", result, a) } } if result == "" { err = errors.New(fmt.Sprintf("array is empty, err: %v", arr)) } return } func ArrStringsToStringMust(arr []string) (result string) { result, _ = ArrStringsToString(arr) return }
taomaree/falcon-plus
modules/api/app/utils/to_string.go
GO
apache-2.0
1,890
/* * index.js */
ivarptr/clobaframe-web
source/tools/src/test/resources/webapp/resources/default/js/index.js
JavaScript
apache-2.0
19
import {expect} from 'chai'; import {spec} from 'modules/platformioBidAdapter'; import {newBidder} from 'src/adapters/bidderFactory'; describe('Platform.io Adapter Tests', function () { const slotConfigs = [{ placementCode: '/DfpAccount1/slot1', mediaTypes: { banner: { sizes: [[300, 250]] } }, bidId: 'bid12345', mediaType: 'banner', params: { pubId: '29521', siteId: '26047', placementId: '123', bidFloor: '0.001', ifa: 'IFA', latitude: '40.712775', longitude: '-74.005973' } }, { placementCode: '/DfpAccount2/slot2', mediaTypes: { banner: { sizes: [[728, 90]] } }, bidId: 'bid23456', mediaType: 'banner', params: { pubId: '29521', siteId: '26047', placementId: '1234', bidFloor: '0.000001', } }]; const nativeSlotConfig = [{ placementCode: '/DfpAccount1/slot3', bidId: 'bid12345', mediaType: 'native', nativeParams: { title: { required: true, len: 200 }, body: {}, image: { wmin: 100 }, sponsoredBy: { }, icon: { } }, params: { pubId: '29521', placementId: '123', siteId: '26047' } }]; const videoSlotConfig = [{ placementCode: '/DfpAccount1/slot4', mediaTypes: { video: { playerSize: [[640, 480]] } }, bidId: 'bid12345678', mediaType: 'video', video: { skippable: true }, params: { pubId: '29521', placementId: '1234567', siteId: '26047', } }]; const appSlotConfig = [{ placementCode: '/DfpAccount1/slot5', bidId: 'bid12345', params: { pubId: '29521', placementId: '1234', app: { id: '1111', name: 'app name', bundle: 'io.platform.apps', storeUrl: 'https://platform.io/apps', domain: 'platform.io' } } }]; it('Verify build request', function () { const request = spec.buildRequests(slotConfigs); expect(request.url).to.equal('https://piohbdisp.hb.adx1.com/'); expect(request.method).to.equal('POST'); const ortbRequest = JSON.parse(request.data); // site object expect(ortbRequest.site).to.not.equal(null); expect(ortbRequest.site.publisher).to.not.equal(null); expect(ortbRequest.site.publisher.id).to.equal('29521'); expect(ortbRequest.site.ref).to.equal(window.top.document.referrer); expect(ortbRequest.site.page).to.equal(window.location.href); expect(ortbRequest.imp).to.have.lengthOf(2); // device object expect(ortbRequest.device).to.not.equal(null); expect(ortbRequest.device.ua).to.equal(navigator.userAgent); expect(ortbRequest.device.ifa).to.equal('IFA'); expect(ortbRequest.device.geo.lat).to.equal('40.712775'); expect(ortbRequest.device.geo.lon).to.equal('-74.005973'); // slot 1 expect(ortbRequest.imp[0].tagid).to.equal('123'); expect(ortbRequest.imp[0].banner).to.not.equal(null); expect(ortbRequest.imp[0].banner.w).to.equal(300); expect(ortbRequest.imp[0].banner.h).to.equal(250); expect(ortbRequest.imp[0].bidfloor).to.equal('0.001'); // slot 2 expect(ortbRequest.imp[1].tagid).to.equal('1234'); expect(ortbRequest.imp[1].banner).to.not.equal(null); expect(ortbRequest.imp[1].banner.w).to.equal(728); expect(ortbRequest.imp[1].banner.h).to.equal(90); expect(ortbRequest.imp[1].bidfloor).to.equal('0.000001'); }); it('Verify parse response', function () { const request = spec.buildRequests(slotConfigs); const ortbRequest = JSON.parse(request.data); const ortbResponse = { seatbid: [{ bid: [{ impid: ortbRequest.imp[0].id, price: 1.25, adm: 'This is an Ad', w: 300, h: 250 }] }], cur: 'USD' }; const bids = spec.interpretResponse({ body: ortbResponse }, request); expect(bids).to.have.lengthOf(1); // verify first bid const bid = bids[0]; expect(bid.cpm).to.equal(1.25); expect(bid.ad).to.equal('This is an Ad'); expect(bid.width).to.equal(300); expect(bid.height).to.equal(250); expect(bid.adId).to.equal('bid12345'); expect(bid.creativeId).to.equal('bid12345'); expect(bid.netRevenue).to.equal(true); expect(bid.currency).to.equal('USD'); expect(bid.ttl).to.equal(360); }); it('Verify full passback', function () { const request = spec.buildRequests(slotConfigs); const bids = spec.interpretResponse({ body: null }, request) expect(bids).to.have.lengthOf(0); }); it('Verify Native request', function () { const request = spec.buildRequests(nativeSlotConfig); expect(request.url).to.equal('https://piohbdisp.hb.adx1.com/'); expect(request.method).to.equal('POST'); const ortbRequest = JSON.parse(request.data); // native impression expect(ortbRequest.imp[0].tagid).to.equal('123'); const nativePart = ortbRequest.imp[0]['native']; expect(nativePart).to.not.equal(null); expect(nativePart.ver).to.equal('1.1'); expect(nativePart.request).to.not.equal(null); // native request assets const nativeRequest = JSON.parse(ortbRequest.imp[0]['native'].request); expect(nativeRequest).to.not.equal(null); expect(nativeRequest.assets).to.have.lengthOf(5); expect(nativeRequest.assets[0].id).to.equal(1); expect(nativeRequest.assets[1].id).to.equal(2); expect(nativeRequest.assets[2].id).to.equal(3); expect(nativeRequest.assets[3].id).to.equal(4); expect(nativeRequest.assets[4].id).to.equal(5); expect(nativeRequest.assets[0].required).to.equal(1); expect(nativeRequest.assets[0].title).to.not.equal(null); expect(nativeRequest.assets[0].title.len).to.equal(200); expect(nativeRequest.assets[1].title).to.be.undefined; expect(nativeRequest.assets[1].data).to.not.equal(null); expect(nativeRequest.assets[1].data.type).to.equal(2); expect(nativeRequest.assets[1].data.len).to.equal(200); expect(nativeRequest.assets[2].required).to.equal(0); expect(nativeRequest.assets[3].img).to.not.equal(null); expect(nativeRequest.assets[3].img.wmin).to.equal(50); expect(nativeRequest.assets[3].img.hmin).to.equal(50); expect(nativeRequest.assets[3].img.type).to.equal(1); expect(nativeRequest.assets[4].img).to.not.equal(null); expect(nativeRequest.assets[4].img.wmin).to.equal(100); expect(nativeRequest.assets[4].img.hmin).to.equal(150); expect(nativeRequest.assets[4].img.type).to.equal(3); }); it('Verify Native response', function () { const request = spec.buildRequests(nativeSlotConfig); expect(request.url).to.equal('https://piohbdisp.hb.adx1.com/'); expect(request.method).to.equal('POST'); const ortbRequest = JSON.parse(request.data); const nativeResponse = { 'native': { assets: [ { id: 1, title: { text: 'Ad Title' } }, { id: 2, data: { value: 'Test description' } }, { id: 3, data: { value: 'Brand' } }, { id: 4, img: { url: 'https://adx1public.s3.amazonaws.com/creatives_icon.png', w: 100, h: 100 } }, { id: 5, img: { url: 'https://adx1public.s3.amazonaws.com/creatives_image.png', w: 300, h: 300 } } ], link: { url: 'https://brand.com/' } } }; const ortbResponse = { seatbid: [{ bid: [{ impid: ortbRequest.imp[0].id, price: 1.25, nurl: 'https://rtb.adx1.com/log', adm: JSON.stringify(nativeResponse) }] }], cur: 'USD', }; const bids = spec.interpretResponse({ body: ortbResponse }, request); // verify bid const bid = bids[0]; expect(bid.cpm).to.equal(1.25); expect(bid.adId).to.equal('bid12345'); expect(bid.ad).to.be.undefined; expect(bid.mediaType).to.equal('native'); const nativeBid = bid['native']; expect(nativeBid).to.not.equal(null); expect(nativeBid.title).to.equal('Ad Title'); expect(nativeBid.sponsoredBy).to.equal('Brand'); expect(nativeBid.icon.url).to.equal('https://adx1public.s3.amazonaws.com/creatives_icon.png'); expect(nativeBid.image.url).to.equal('https://adx1public.s3.amazonaws.com/creatives_image.png'); expect(nativeBid.image.width).to.equal(300); expect(nativeBid.image.height).to.equal(300); expect(nativeBid.icon.width).to.equal(100); expect(nativeBid.icon.height).to.equal(100); expect(nativeBid.clickUrl).to.equal(encodeURIComponent('https://brand.com/')); expect(nativeBid.impressionTrackers).to.have.lengthOf(1); expect(nativeBid.impressionTrackers[0]).to.equal('https://rtb.adx1.com/log'); }); it('Verify Video request', function () { const request = spec.buildRequests(videoSlotConfig); expect(request.url).to.equal('https://piohbdisp.hb.adx1.com/'); expect(request.method).to.equal('POST'); const videoRequest = JSON.parse(request.data); // site object expect(videoRequest.site).to.not.equal(null); expect(videoRequest.site.publisher.id).to.equal('29521'); expect(videoRequest.site.ref).to.equal(window.top.document.referrer); expect(videoRequest.site.page).to.equal(window.location.href); // device object expect(videoRequest.device).to.not.equal(null); expect(videoRequest.device.ua).to.equal(navigator.userAgent); // slot 1 expect(videoRequest.imp[0].tagid).to.equal('1234567'); expect(videoRequest.imp[0].video).to.not.equal(null); expect(videoRequest.imp[0].video.w).to.equal(640); expect(videoRequest.imp[0].video.h).to.equal(480); expect(videoRequest.imp[0].banner).to.equal(null); expect(videoRequest.imp[0].native).to.equal(null); }); it('Verify parse video response', function () { const request = spec.buildRequests(videoSlotConfig); const videoRequest = JSON.parse(request.data); const videoResponse = { seatbid: [{ bid: [{ impid: videoRequest.imp[0].id, price: 1.90, adm: 'https://vid.example.com/9876', crid: '510511_754567308' }] }], cur: 'USD' }; const bids = spec.interpretResponse({ body: videoResponse }, request); expect(bids).to.have.lengthOf(1); // verify first bid const bid = bids[0]; expect(bid.cpm).to.equal(1.90); expect(bid.vastUrl).to.equal('https://vid.example.com/9876'); expect(bid.crid).to.equal('510511_754567308'); expect(bid.width).to.equal(640); expect(bid.height).to.equal(480); expect(bid.adId).to.equal('bid12345678'); expect(bid.netRevenue).to.equal(true); expect(bid.currency).to.equal('USD'); expect(bid.ttl).to.equal(360); }); it('Verifies bidder code', function () { expect(spec.code).to.equal('platformio'); }); it('Verifies supported media types', function () { expect(spec.supportedMediaTypes).to.have.lengthOf(3); expect(spec.supportedMediaTypes[0]).to.equal('banner'); expect(spec.supportedMediaTypes[1]).to.equal('native'); expect(spec.supportedMediaTypes[2]).to.equal('video'); }); it('Verifies if bid request valid', function () { expect(spec.isBidRequestValid(slotConfigs[0])).to.equal(true); expect(spec.isBidRequestValid(slotConfigs[1])).to.equal(true); expect(spec.isBidRequestValid(nativeSlotConfig[0])).to.equal(true); expect(spec.isBidRequestValid(videoSlotConfig[0])).to.equal(true); }); it('Verify app requests', function () { const request = spec.buildRequests(appSlotConfig); const ortbRequest = JSON.parse(request.data); expect(ortbRequest.site).to.equal(null); expect(ortbRequest.app).to.not.be.null; expect(ortbRequest.app.publisher).to.not.equal(null); expect(ortbRequest.app.publisher.id).to.equal('29521'); expect(ortbRequest.app.id).to.equal('1111'); expect(ortbRequest.app.name).to.equal('app name'); expect(ortbRequest.app.bundle).to.equal('io.platform.apps'); expect(ortbRequest.app.storeurl).to.equal('https://platform.io/apps'); expect(ortbRequest.app.domain).to.equal('platform.io'); }); it('Verify GDPR', function () { const bidderRequest = { gdprConsent: { gdprApplies: true, consentString: 'serialized_gpdr_data' } }; const request = spec.buildRequests(slotConfigs, bidderRequest); expect(request.url).to.equal('https://piohbdisp.hb.adx1.com/'); expect(request.method).to.equal('POST'); const ortbRequest = JSON.parse(request.data); expect(ortbRequest.user).to.not.equal(null); expect(ortbRequest.user.ext).to.not.equal(null); expect(ortbRequest.user.ext.consent).to.equal('serialized_gpdr_data'); expect(ortbRequest.regs).to.not.equal(null); expect(ortbRequest.regs.ext).to.not.equal(null); expect(ortbRequest.regs.ext.gdpr).to.equal(1); }); });
varashellov/Prebid.js
test/spec/modules/platformioBidAdapter_spec.js
JavaScript
apache-2.0
12,829
/** Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ var Q = require('q'); var cordova_util = require('../util'); var HooksRunner = require('../../hooks/HooksRunner'); var CordovaError = require('cordova-common').CordovaError; var platforms = require('../../platforms/platforms'); var addHelper = require('./addHelper'); var events = require('cordova-common').events; module.exports = platform; module.exports.add = function add (hooksRunner, projectRoot, targets, opts) { return addHelper('add', hooksRunner, projectRoot, targets, opts); }; module.exports.update = function update (hooksRunner, projectRoot, targets, opts) { return addHelper('update', hooksRunner, projectRoot, targets, opts); }; module.exports.remove = require('./remove'); module.exports.check = require('./check'); module.exports.list = require('./list'); module.exports.save = require('./save'); module.exports.getPlatformDetailsFromDir = require('./getPlatformDetailsFromDir'); // Expose the platform parsers on top of this command for (var p in platforms) { module.exports[p] = platforms[p]; } /** * Handles all cordova platform commands. * @param {string} command - Command to execute (add, rm, ls, update, save) * @param {Object[]} targets - Array containing platforms to execute commands on * @param {Object} opts * @returns {Promise} */ function platform (command, targets, opts) { // CB-10519 wrap function code into promise so throwing error // would result in promise rejection instead of uncaught exception return Q().then(function () { var msg; var projectRoot = cordova_util.cdProjectRoot(); var hooksRunner = new HooksRunner(projectRoot); if (arguments.length === 0) command = 'ls'; if (targets && !(targets instanceof Array)) targets = [targets]; // TODO: wouldn't update need a platform, too? what about save? if ((command === 'add' || command === 'rm' || command === 'remove') && (!targets || (targets instanceof Array && targets.length === 0))) { msg = 'You need to qualify `' + command + '` with one or more platforms!'; return Q.reject(new CordovaError(msg)); } opts = opts || {}; opts.platforms = targets; switch (command) { case 'add': return module.exports.add(hooksRunner, projectRoot, targets, opts); case 'rm': case 'remove': return module.exports.remove(hooksRunner, projectRoot, targets, opts); case 'update': case 'up': return module.exports.update(hooksRunner, projectRoot, targets, opts); case 'check': return module.exports.check(hooksRunner, projectRoot); case 'save': events.emit('warn', 'This command has been deprecated and will be removed in the next major release of cordova.'); return module.exports.save(hooksRunner, projectRoot, opts); default: return module.exports.list(hooksRunner, projectRoot, opts); } }); }
purplecabbage/cordova-lib
src/cordova/platform/index.js
JavaScript
apache-2.0
3,797
/* * Copyright (C) 2013 Apple, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "JSObject.h" namespace JSC { class WeakMapPrototype : public JSNonFinalObject { public: typedef JSNonFinalObject Base; static WeakMapPrototype* create(VM& vm, JSGlobalObject* globalObject, Structure* structure) { WeakMapPrototype* prototype = new (NotNull, allocateCell<WeakMapPrototype>(vm.heap)) WeakMapPrototype(vm, structure); prototype->finishCreation(vm, globalObject); return prototype; } DECLARE_INFO; static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) { return Structure::create(vm, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), info()); } private: WeakMapPrototype(VM& vm, Structure* structure) : Base(vm, structure) { } void finishCreation(VM&, JSGlobalObject*); }; } // namespace JSC
alibaba/weex
weex_core/Source/include/JavaScriptCore/runtime/WeakMapPrototype.h
C
apache-2.0
2,205
// Copyright 2015 The Mangos Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use file except in compliance with the License. // You may obtain a copy of the license at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tlstcp import ( "testing" "github.com/go-mangos/mangos/test" ) var tt = test.NewTranTest(NewTransport(), "tls+tcp://127.0.0.1:3334") func TestTLSListenAndAccept(t *testing.T) { tt.TestListenAndAccept(t) } func TestTLSDuplicateListen(t *testing.T) { tt.TestDuplicateListen(t) } func TestTLSConnRefused(t *testing.T) { tt.TestConnRefused(t) } func TestTLSSendRecv(t *testing.T) { tt.TestSendRecv(t) } func TestTLSAll(t *testing.T) { tt.TestAll(t) }
glycerine/mangos
transport/tlstcp/tls_test.go
GO
apache-2.0
1,060
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/io.h" #include <algorithm> #include <fstream> #include <vector> #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/pybind/pybind.h" DEFINE_string(devices, "", "The devices to be used which is joined by comma."); DEFINE_bool(init_p2p, false, "Whether to init p2p."); namespace paddle { namespace inference { void Init(const std::vector<std::string> argv) { framework::InitGflags(argv); // init devices std::vector<int> devices; std::string token; std::istringstream tokenStream(FLAGS_devices); while (std::getline(tokenStream, token, ',')) { devices.push_back(std::stoi(token)); } framework::InitDevices(FLAGS_init_p2p, devices); } void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); PADDLE_ENFORCE(static_cast<bool>(fin), "Cannot open file %s", filename); fin.seekg(0, std::ios::end); contents->clear(); contents->resize(fin.tellg()); fin.seekg(0, std::ios::beg); fin.read(&(contents->at(0)), contents->size()); fin.close(); } bool IsPersistable(const framework::VarDesc* var) { if (var->Persistable() && var->GetType() != framework::proto::VarType::FEED_MINIBATCH && var->GetType() != framework::proto::VarType::FETCH_LIST) { return true; } return false; } void LoadPersistables(framework::Executor* executor, framework::Scope* scope, const framework::ProgramDesc& main_program, const std::string& dirname, const std::string& param_filename) { const framework::BlockDesc& global_block = main_program.Block(0); framework::ProgramDesc* load_program = new framework::ProgramDesc(); framework::BlockDesc* load_block = load_program->MutableBlock(0); std::vector<std::string> paramlist; for (auto* var : global_block.AllVars()) { if (IsPersistable(var)) { VLOG(3) << "persistable variable's name: " << var->Name(); framework::VarDesc* new_var = load_block->Var(var->Name()); new_var->SetShape(var->GetShape()); new_var->SetDataType(var->GetDataType()); new_var->SetType(var->GetType()); new_var->SetLoDLevel(var->GetLoDLevel()); new_var->SetPersistable(true); if (!param_filename.empty()) { paramlist.push_back(new_var->Name()); } else { // append_op framework::OpDesc* op = load_block->AppendOp(); op->SetType("load"); op->SetOutput("Out", {new_var->Name()}); op->SetAttr("file_path", {dirname + "/" + new_var->Name()}); op->CheckAttrs(); } } } if (!param_filename.empty()) { // sort paramlist to have consistent ordering std::sort(paramlist.begin(), paramlist.end()); // append just the load_combine op framework::OpDesc* op = load_block->AppendOp(); op->SetType("load_combine"); op->SetOutput("Out", paramlist); op->SetAttr("file_path", {param_filename}); op->CheckAttrs(); } executor->Run(*load_program, scope, 0, true, true); delete load_program; } std::unique_ptr<framework::ProgramDesc> Load(framework::Executor* executor, framework::Scope* scope, const std::string& dirname) { std::string model_filename = dirname + "/__model__"; std::string program_desc_str; VLOG(3) << "loading model from " << model_filename; ReadBinaryFile(model_filename, &program_desc_str); std::unique_ptr<framework::ProgramDesc> main_program( new framework::ProgramDesc(program_desc_str)); LoadPersistables(executor, scope, *main_program, dirname, ""); return main_program; } std::unique_ptr<framework::ProgramDesc> Load( framework::Executor* executor, framework::Scope* scope, const std::string& prog_filename, const std::string& param_filename) { std::string model_filename = prog_filename; std::string program_desc_str; ReadBinaryFile(model_filename, &program_desc_str); std::unique_ptr<framework::ProgramDesc> main_program( new framework::ProgramDesc(program_desc_str)); LoadPersistables(executor, scope, *main_program, "", param_filename); return main_program; } } // namespace inference } // namespace paddle
pkuyym/Paddle
paddle/fluid/inference/io.cc
C++
apache-2.0
4,981
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.rocketmq.example.namespace; import org.apache.rocketmq.client.producer.DefaultMQProducer; import org.apache.rocketmq.client.producer.SendResult; import org.apache.rocketmq.common.message.Message; public class ProducerWithNamespace { public static void main(String[] args) throws Exception { DefaultMQProducer producer = new DefaultMQProducer("InstanceTest", "pidTest"); producer.setNamesrvAddr("127.0.0.1:9876"); producer.start(); for (int i = 0; i < 100; i++) { Message message = new Message("topicTest", "tagTest", "Hello world".getBytes()); try { SendResult result = producer.send(message); System.out.printf("Topic:%s send success, misId is:%s%n", message.getTopic(), result.getMsgId()); } catch (Exception e) { e.printStackTrace(); } } } }
Vansee/RocketMQ
example/src/main/java/org/apache/rocketmq/example/namespace/ProducerWithNamespace.java
Java
apache-2.0
1,712
<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=US-ASCII"> <title>Struct as_feature&lt;tag::weighted_mean(immediate)&gt;</title> <link rel="stylesheet" href="../../../../doc/src/boostbook.css" type="text/css"> <meta name="generator" content="DocBook XSL Stylesheets V1.78.1"> <link rel="home" href="../../index.html" title="The Boost C++ Libraries BoostBook Documentation Subset"> <link rel="up" href="../../accumulators/reference.html#header.boost.accumulators.statistics.weighted_mean_hpp" title="Header &lt;boost/accumulators/statistics/weighted_mean.hpp&gt;"> <link rel="prev" href="as_feature_tag_idp26819232.html" title="Struct as_feature&lt;tag::weighted_mean(lazy)&gt;"> <link rel="next" href="as_feature_tag_idp26821184.html" title="Struct template as_feature&lt;tag::weighted_mean_of_variates&lt; VariateType, VariateTag &gt;(lazy)&gt;"> </head> <body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"> <table cellpadding="2" width="100%"><tr> <td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../boost.png"></td> <td align="center"><a href="../../../../index.html">Home</a></td> <td align="center"><a href="../../../../libs/libraries.htm">Libraries</a></td> <td align="center"><a href="http://www.boost.org/users/people.html">People</a></td> <td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td> <td align="center"><a href="../../../../more/index.htm">More</a></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="as_feature_tag_idp26819232.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../accumulators/reference.html#header.boost.accumulators.statistics.weighted_mean_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="as_feature_tag_idp26821184.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a> </div> <div class="refentry"> <a name="boost.accumulators.as_feature_tag_idp26820208"></a><div class="titlepage"></div> <div class="refnamediv"> <h2><span class="refentrytitle">Struct as_feature&lt;tag::weighted_mean(immediate)&gt;</span></h2> <p>boost::accumulators::as_feature&lt;tag::weighted_mean(immediate)&gt;</p> </div> <h2 xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv-title">Synopsis</h2> <div xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv"><pre class="synopsis"><span class="comment">// In header: &lt;<a class="link" href="../../accumulators/reference.html#header.boost.accumulators.statistics.weighted_mean_hpp" title="Header &lt;boost/accumulators/statistics/weighted_mean.hpp&gt;">boost/accumulators/statistics/weighted_mean.hpp</a>&gt; </span> <span class="keyword">struct</span> <a class="link" href="as_feature_tag_idp26820208.html" title="Struct as_feature&lt;tag::weighted_mean(immediate)&gt;">as_feature</a><span class="special">&lt;</span><span class="identifier">tag</span><span class="special">::</span><span class="identifier">weighted_mean</span><span class="special">(</span><span class="identifier">immediate</span><span class="special">)</span><span class="special">&gt;</span> <span class="special">{</span> <span class="comment">// types</span> <span class="keyword">typedef</span> <a class="link" href="tag/immediate_weighted_mean.html" title="Struct immediate_weighted_mean">tag::immediate_weighted_mean</a> <a name="boost.accumulators.as_feature_tag_idp26820208.type"></a><span class="identifier">type</span><span class="special">;</span> <span class="special">}</span><span class="special">;</span></pre></div> </div> <table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr> <td align="left"></td> <td align="right"><div class="copyright-footer">Copyright &#169; 2005, 2006 Eric Niebler<p> Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>) </p> </div></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="as_feature_tag_idp26819232.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../accumulators/reference.html#header.boost.accumulators.statistics.weighted_mean_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="as_feature_tag_idp26821184.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a> </div> </body> </html>
ryancoleman/autodock-vina
boost_1_54_0/doc/html/boost/accumulators/as_feature_tag_idp26820208.html
HTML
apache-2.0
4,823
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.query; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import io.druid.java.util.common.IAE; import io.druid.java.util.common.ISE; public class QueryContexts { public static final String PRIORITY_KEY = "priority"; public static final String TIMEOUT_KEY = "timeout"; public static final String MAX_SCATTER_GATHER_BYTES_KEY = "maxScatterGatherBytes"; public static final String DEFAULT_TIMEOUT_KEY = "defaultTimeout"; public static final String CHUNK_PERIOD_KEY = "chunkPeriod"; public static final boolean DEFAULT_BY_SEGMENT = false; public static final boolean DEFAULT_POPULATE_CACHE = true; public static final boolean DEFAULT_USE_CACHE = true; public static final int DEFAULT_PRIORITY = 0; public static final int DEFAULT_UNCOVERED_INTERVALS_LIMIT = 0; public static final long DEFAULT_TIMEOUT_MILLIS = 300_000; // 5 minutes public static final long NO_TIMEOUT = 0; public static <T> boolean isBySegment(Query<T> query) { return isBySegment(query, DEFAULT_BY_SEGMENT); } public static <T> boolean isBySegment(Query<T> query, boolean defaultValue) { return parseBoolean(query, "bySegment", defaultValue); } public static <T> boolean isPopulateCache(Query<T> query) { return isPopulateCache(query, DEFAULT_POPULATE_CACHE); } public static <T> boolean isPopulateCache(Query<T> query, boolean defaultValue) { return parseBoolean(query, "populateCache", defaultValue); } public static <T> boolean isUseCache(Query<T> query) { return isUseCache(query, DEFAULT_USE_CACHE); } public static <T> boolean isUseCache(Query<T> query, boolean defaultValue) { return parseBoolean(query, "useCache", defaultValue); } public static <T> boolean isFinalize(Query<T> query, boolean defaultValue) { return parseBoolean(query, "finalize", defaultValue); } public static <T> boolean isSerializeDateTimeAsLong(Query<T> query, boolean defaultValue) { return parseBoolean(query, "serializeDateTimeAsLong", defaultValue); } public static <T> boolean isSerializeDateTimeAsLongInner(Query<T> query, boolean defaultValue) { return parseBoolean(query, "serializeDateTimeAsLongInner", defaultValue); } public static <T> int getUncoveredIntervalsLimit(Query<T> query) { return getUncoveredIntervalsLimit(query, DEFAULT_UNCOVERED_INTERVALS_LIMIT); } public static <T> int getUncoveredIntervalsLimit(Query<T> query, int defaultValue) { return parseInt(query, "uncoveredIntervalsLimit", defaultValue); } public static <T> int getPriority(Query<T> query) { return getPriority(query, DEFAULT_PRIORITY); } public static <T> int getPriority(Query<T> query, int defaultValue) { return parseInt(query, PRIORITY_KEY, defaultValue); } public static <T> String getChunkPeriod(Query<T> query) { return query.getContextValue(CHUNK_PERIOD_KEY, "P0D"); } public static <T> Query<T> withMaxScatterGatherBytes(Query<T> query, long maxScatterGatherBytesLimit) { Object obj = query.getContextValue(MAX_SCATTER_GATHER_BYTES_KEY); if (obj == null) { return query.withOverriddenContext(ImmutableMap.of(MAX_SCATTER_GATHER_BYTES_KEY, maxScatterGatherBytesLimit)); } else { long curr = ((Number) obj).longValue(); if (curr > maxScatterGatherBytesLimit) { throw new IAE( "configured [%s = %s] is more than enforced limit of [%s].", MAX_SCATTER_GATHER_BYTES_KEY, curr, maxScatterGatherBytesLimit ); } else { return query; } } } public static <T> long getMaxScatterGatherBytes(Query<T> query) { return parseLong(query, MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE); } public static <T> boolean hasTimeout(Query<T> query) { return getTimeout(query) != NO_TIMEOUT; } public static <T> long getTimeout(Query<T> query) { return getTimeout(query, getDefaultTimeout(query)); } public static <T> long getTimeout(Query<T> query, long defaultTimeout) { final long timeout = parseLong(query, TIMEOUT_KEY, defaultTimeout); Preconditions.checkState(timeout >= 0, "Timeout must be a non negative value, but was [%s]", timeout); return timeout; } public static <T> Query<T> withTimeout(Query<T> query, long timeout) { return query.withOverriddenContext(ImmutableMap.of(TIMEOUT_KEY, timeout)); } public static <T> Query<T> withDefaultTimeout(Query<T> query, long defaultTimeout) { return query.withOverriddenContext(ImmutableMap.of(QueryContexts.DEFAULT_TIMEOUT_KEY, defaultTimeout)); } static <T> long getDefaultTimeout(Query<T> query) { final long defaultTimeout = parseLong(query, DEFAULT_TIMEOUT_KEY, DEFAULT_TIMEOUT_MILLIS); Preconditions.checkState(defaultTimeout >= 0, "Timeout must be a non negative value, but was [%s]", defaultTimeout); return defaultTimeout; } static <T> long parseLong(Query<T> query, String key, long defaultValue) { Object val = query.getContextValue(key); if (val == null) { return defaultValue; } if (val instanceof String) { return Long.parseLong((String) val); } else if (val instanceof Number) { return ((Number) val).longValue(); } else { throw new ISE("Unknown type [%s]", val.getClass()); } } static <T> int parseInt(Query<T> query, String key, int defaultValue) { Object val = query.getContextValue(key); if (val == null) { return defaultValue; } if (val instanceof String) { return Integer.parseInt((String) val); } else if (val instanceof Number) { return ((Number) val).intValue(); } else { throw new ISE("Unknown type [%s]", val.getClass()); } } static <T> boolean parseBoolean(Query<T> query, String key, boolean defaultValue) { Object val = query.getContextValue(key); if (val == null) { return defaultValue; } if (val instanceof String) { return Boolean.parseBoolean((String) val); } else if (val instanceof Boolean) { return (boolean) val; } else { throw new ISE("Unknown type [%s]. Cannot parse!", val.getClass()); } } }
lizhanhui/data_druid
processing/src/main/java/io/druid/query/QueryContexts.java
Java
apache-2.0
7,039
<html dir="LTR"> <head> <meta http-equiv="Content-Type" content="text/html; charset=Windows-1252" /> <meta name="vs_targetSchema" content="http://schemas.microsoft.com/intellisense/ie5" /> <title>DefaultRemoteCommand Members</title> <xml> </xml> <link rel="stylesheet" type="text/css" href="MSDN.css" /> </head> <body id="bodyID" class="dtBODY"> <div id="nsbanner"> <div id="bannerrow1"> <table class="bannerparthead" cellspacing="0"> <tr id="hdr"> <td class="runninghead">An NDoc Documented Class Library</td> <td class="product"> </td> </tr> </table> </div> <div id="TitleRow"> <h1 class="dtH1">DefaultRemoteCommand Members </h1> </div> </div> <div id="nstext"> <p> <a href="Selenium.DefaultRemoteCommand.html">DefaultRemoteCommand overview</a> </p> <h4 class="dtH4">Public Static Methods</h4> <div class="tablediv"> <table class="dtTABLE" cellspacing="0"> <tr VALIGN="top"><td width="50%"><img src="pubmethod.gif"></img><img src="static.gif" /><a href="Selenium.DefaultRemoteCommand.Parse.html">Parse</a></td><td width="50%"> Parses a "wiki-style" command string, like this: |type|q|Hello World| </td></tr></table> </div> <h4 class="dtH4">Public Instance Constructors</h4> <div class="tablediv"> <table class="dtTABLE" cellspacing="0"> <tr VALIGN="top"> <td width="50%"> <img src="pubmethod.gif" /> <a href="Selenium.DefaultRemoteCommandConstructor.html">DefaultRemoteCommand Constructor</a> </td> <td width="50%"> Creates a command with the specified arguments </td> </tr> </table> </div> <h4 class="dtH4">Public Instance Properties</h4> <div class="tablediv"> <table class="dtTABLE" cellspacing="0"> <tr VALIGN="top"><td width="50%"><img src="pubproperty.gif"></img><a href="Selenium.DefaultRemoteCommand.Args.html">Args</a></td><td width="50%"> The array of arguments for this command </td></tr> <tr VALIGN="top"><td width="50%"><img src="pubproperty.gif"></img><a href="Selenium.DefaultRemoteCommand.Command.html">Command</a></td><td width="50%"> The name of the Selenium command verb </td></tr> <tr VALIGN="top"><td width="50%"><img src="pubproperty.gif"></img><a href="Selenium.DefaultRemoteCommand.CommandString.html">CommandString</a></td><td width="50%"> The string token that we'll send to the server </td></tr></table> </div> <h4 class="dtH4">Public Instance Methods</h4> <div class="tablediv"> <table class="dtTABLE" cellspacing="0"> <tr VALIGN="top"><td width="50%"><img src="pubmethod.gif"></img><a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassEqualsTopic.htm">Equals</a> (inherited from <b>Object</b>)</td><td width="50%"> </td></tr> <tr VALIGN="top"><td width="50%"><img src="pubmethod.gif"></img><a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassGetHashCodeTopic.htm">GetHashCode</a> (inherited from <b>Object</b>)</td><td width="50%"> </td></tr> <tr VALIGN="top"><td width="50%"><img src="pubmethod.gif"></img><a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassGetTypeTopic.htm">GetType</a> (inherited from <b>Object</b>)</td><td width="50%"> </td></tr> <tr VALIGN="top"><td width="50%"><img src="pubmethod.gif"></img><a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassToStringTopic.htm">ToString</a> (inherited from <b>Object</b>)</td><td width="50%"> </td></tr></table> </div> <h4 class="dtH4">Protected Instance Methods</h4> <div class="tablediv"> <table class="dtTABLE" cellspacing="0"> <tr VALIGN="top"><td width="50%"><img src="protmethod.gif"></img><a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassFinalizeTopic.htm">Finalize</a> (inherited from <b>Object</b>)</td><td width="50%"> </td></tr> <tr VALIGN="top"><td width="50%"><img src="protmethod.gif"></img><a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassMemberwiseCloneTopic.htm">MemberwiseClone</a> (inherited from <b>Object</b>)</td><td width="50%"> </td></tr></table> </div> <h4 class="dtH4">See Also</h4> <p> <a href="Selenium.DefaultRemoteCommand.html">DefaultRemoteCommand Class</a> | <a href="Selenium.html">Selenium Namespace</a></p> </div> </body> </html>
dineshkummarc/gitak-1.0
server/selenium-remote-control-1.0.3/selenium-dotnet-client-driver-1.0.1/doc/Selenium.DefaultRemoteCommandMembers.html
HTML
apache-2.0
4,516
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.hibench.streambench.spark.microbench import com.intel.hibench.streambench.spark.entity.ParamEntity import org.apache.spark.streaming.dstream.DStream import com.intel.hibench.streambench.spark.metrics.LatencyListener import org.apache.spark.streaming.StreamingContext import com.intel.hibench.streambench.spark.util.BenchLogUtil import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import scala.collection.mutable.Map object MapPool { private var imap: Map[String, Long] = _ def getMap(): Map[String, Long] = synchronized { if (imap == null) imap = Map() imap } def setMap(imap: Map[String, Long]) = synchronized { this.imap = imap } } class Wordcount(subClassParams:ParamEntity,separator:String) extends RunBenchJobWithInit(subClassParams){ override def processStreamData(lines:DStream[String],ssc:StreamingContext){ val sep = separator val wordcount = lines .flatMap(x => x.split(sep)) .map(word => (word, 1)) .reduceByKey(_ + _) wordcount.foreachRDD(rdd=> { rdd.foreachPartition(partitionOfRecords => { val imap = MapPool.getMap partitionOfRecords.foreach{case (word, count) => imap(word) = if (imap.contains(word)) imap(word) + count else count } MapPool.setMap(imap) }) }) } }
eriknor/HiBench
src/streambench/sparkbench/src/main/scala/com/intel/hibench/streambench/spark/microbench/Wordcount.scala
Scala
apache-2.0
2,145
# Copyright 2014-2016 Ivan Kravets <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Builder for Linux Linux i686 / 32-bit """ from SCons.Script import AlwaysBuild, Default, DefaultEnvironment from platformio.util import get_systype env = DefaultEnvironment() env.Replace( _BINPREFIX="", AR="${_BINPREFIX}ar", AS="${_BINPREFIX}as", CC="${_BINPREFIX}gcc", CXX="${_BINPREFIX}g++", OBJCOPY="${_BINPREFIX}objcopy", RANLIB="${_BINPREFIX}ranlib", SIZETOOL="${_BINPREFIX}size", SIZEPRINTCMD='"$SIZETOOL" $SOURCES' ) if get_systype() == "darwin_x86_64": env.Replace( _BINPREFIX="i586-pc-linux-" ) # # Target: Build executable program # target_bin = env.BuildProgram() # # Target: Print binary size # target_size = env.Alias("size", target_bin, "$SIZEPRINTCMD") AlwaysBuild(target_size) # # Target: Define targets # Default([target_bin])
valeros/platformio
platformio/builder/scripts/linux_i686.py
Python
apache-2.0
1,415
package org.eclipse.webdav.internal.kernel.utils; /** * <code>AssertionFailedException</code> is a runtime exception thrown * by some of the methods in <code>Assert</code>. * <p> * This class is not declared public to prevent some misuses; programs that catch * or otherwise depend on assertion failures are susceptible to unexpected * breakage when assertions in the code are added or removed. */ /* package */class AssertionFailedException extends RuntimeException { /** * Comment for <code>serialVersionUID</code> */ private static final long serialVersionUID = 510l; /** Constructs a new exception. */ public AssertionFailedException() { super(); } /** Constructs a new exception with the given message. */ public AssertionFailedException(String detail) { super(detail); } }
ChallenHB/droolsjbpm-tools
drools-eclipse/org.guvnor.eclipse.webdav/src/kernel/org/eclipse/webdav/internal/kernel/utils/AssertionFailedException.java
Java
apache-2.0
858
/* * Copyright 2018 Lookout, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; import com.amazonaws.services.applicationautoscaling.model.RegisterScalableTargetRequest; import com.amazonaws.services.applicationautoscaling.model.ScalableDimension; import com.amazonaws.services.applicationautoscaling.model.ServiceNamespace; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.Service; import com.amazonaws.services.ecs.model.UpdateServiceRequest; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ResizeServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import java.util.List; import org.springframework.beans.factory.annotation.Autowired; public class ResizeServiceAtomicOperation extends AbstractEcsAtomicOperation<ResizeServiceDescription, Void> implements AtomicOperation<Void> { @Autowired ContainerInformationService containerInformationService; public ResizeServiceAtomicOperation(ResizeServiceDescription description) { super(description, "RESIZE_ECS_SERVER_GROUP"); } @Override public Void operate(List priorOutputs) { updateTaskStatus("Initializing Resize ECS Server Group Operation..."); Service service = resizeService(); resizeAutoScalingGroup(service); return null; } private Service resizeService() { AmazonECS amazonECS = getAmazonEcsClient(); String serviceName = description.getServerGroupName(); Integer desiredCount = description.getCapacity().getDesired(); String ecsClusterName = containerInformationService.getClusterName( serviceName, description.getAccount(), description.getRegion()); UpdateServiceRequest updateServiceRequest = new UpdateServiceRequest() .withCluster(ecsClusterName) .withService(serviceName) .withDesiredCount(desiredCount); updateTaskStatus(String.format("Resizing %s to %s instances.", serviceName, desiredCount)); Service service = amazonECS.updateService(updateServiceRequest).getService(); updateTaskStatus(String.format("Done resizing %s to %s", serviceName, desiredCount)); return service; } private void resizeAutoScalingGroup(Service service) { AWSApplicationAutoScaling autoScalingClient = getAmazonApplicationAutoScalingClient(); Integer desiredCount = description.getCapacity().getDesired(); String ecsClusterName = containerInformationService.getClusterName( service.getServiceName(), description.getAccount(), description.getRegion()); RegisterScalableTargetRequest request = new RegisterScalableTargetRequest() .withServiceNamespace(ServiceNamespace.Ecs) .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) .withResourceId( String.format("service/%s/%s", ecsClusterName, service.getServiceName())) .withMinCapacity(description.getCapacity().getMin()) .withMaxCapacity(description.getCapacity().getMax()); updateTaskStatus( String.format( "Resizing Scalable Target of %s to %s instances", service.getServiceName(), desiredCount)); autoScalingClient.registerScalableTarget(request); updateTaskStatus( String.format( "Done resizing Scalable Target of %s to %s instances", service.getServiceName(), desiredCount)); } }
ajordens/clouddriver
clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperation.java
Java
apache-2.0
4,172
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. **/ #include "query_optimizer/physical/InsertTuple.hpp" #include <string> #include <vector> #include "query_optimizer/OptimizerTree.hpp" #include "query_optimizer/expressions/ScalarLiteral.hpp" #include "utility/Cast.hpp" namespace quickstep { namespace optimizer { namespace physical { void InsertTuple::getFieldStringItems( std::vector<std::string> *inline_field_names, std::vector<std::string> *inline_field_values, std::vector<std::string> *non_container_child_field_names, std::vector<OptimizerTreeBaseNodePtr> *non_container_child_fields, std::vector<std::string> *container_child_field_names, std::vector<std::vector<OptimizerTreeBaseNodePtr>> *container_child_fields) const { non_container_child_field_names->push_back("input"); non_container_child_fields->push_back(input_); container_child_field_names->push_back("column_values"); container_child_fields->push_back(CastSharedPtrVector<OptimizerTreeBase>(column_values_)); } } // namespace physical } // namespace optimizer } // namespace quickstep
cramja/incubator-quickstep
query_optimizer/physical/InsertTuple.cpp
C++
apache-2.0
1,856
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/sqs/SQS_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSString.h> namespace Aws { namespace SQS { namespace Model { enum class MessageSystemAttributeName { NOT_SET, SenderId, SentTimestamp, ApproximateReceiveCount, ApproximateFirstReceiveTimestamp, SequenceNumber, MessageDeduplicationId, MessageGroupId, AWSTraceHeader }; namespace MessageSystemAttributeNameMapper { AWS_SQS_API MessageSystemAttributeName GetMessageSystemAttributeNameForName(const Aws::String& name); AWS_SQS_API Aws::String GetNameForMessageSystemAttributeName(MessageSystemAttributeName value); } // namespace MessageSystemAttributeNameMapper } // namespace Model } // namespace SQS } // namespace Aws
jt70471/aws-sdk-cpp
aws-cpp-sdk-sqs/include/aws/sqs/model/MessageSystemAttributeName.h
C
apache-2.0
869
define(['./module'], function (services) { 'use strict'; services.factory("PersonaJuridicaService",["Restangular", function(Restangular){ var _personaJuridicaService = Restangular.all("personas/juridicas"); var baseUrl = "personas/juridicas"; return { getModel: function(){ return { "id":undefined, "tipoDocumento": undefined, "numeroDocumento":undefined, "razonSocial":undefined, "nombreComercial":undefined, "representanteLegal": undefined, "fechaConstitucion":undefined, "actividadPrincipal":undefined, "tipoEmpresa":undefined, "finLucro":undefined, "direccion":undefined, "referencia":undefined, "telefono":undefined, "celular":undefined, "email":undefined, "ubigeo":undefined, "accionistas":undefined }; }, getTipoDocumentos: function(){ return Restangular.all(baseUrl+"/tipoDocumentos").getList(); }, findById: function(id){ return Restangular.one(baseUrl, id).get(); }, findByTipoNumeroDocumento: function(idtipodocumento, numeroDocumento){ return Restangular.one(baseUrl + '/buscar').get({idTipoDocumento:idtipodocumento,numeroDocumento:numeroDocumento},{}); }, findByFilterText: function(filterText, offset, limit){ if(arguments.length == 0){ return Restangular.all(baseUrl).getList(); } else if(arguments.length == 1){ return Restangular.all(baseUrl).getList({filterText:filterText},{}); } else if(arguments.length == 2){ return Restangular.all(baseUrl).getList({filterText:filterText,offset:offset},{}); } else if(arguments.length == 3){ return Restangular.all(baseUrl).getList({filterText:filterText,offset:offset,limit:limit},{}); } else if(arguments.length > 2){ return Restangular.all(baseUrl).getList({filterText:filterText,offset:offset,limit:limit},{}); } }, getPersonas: function(offset, limit){ if(arguments.length == 0){ return _personaJuridicaService.getList(); } else if(arguments.length == 1){ return _personaJuridicaService.getList({"offset":offset},{}); } else if(arguments.length == 2){ return _personaJuridicaService.getList({"offset":offset,"limit":limit},{}); } else if(arguments.length > 2){ return _personaJuridicaService.getList({"offset":offset,"limit":limit},{}); } }, count: function(filterText){ if(arguments.length == 0){ return Restangular.one(baseUrl + "/count").get(); } else if(arguments.length == 1){ return Restangular.one(baseUrl + "/count").get({"filterText":filterText},{}); } }, update: function(persona){ return Restangular.one(baseUrl + "/" + persona.id).customPUT(persona,'',{},{}); }, crear: function(personaJuridica){ return _personaJuridicaService.post(personaJuridica); }, remove: function(id){ return Restangular.all(baseUrl + "/" + id).remove(); } } }]) });
Softgreen/SistCoop
SistCoopApp/src/main/webapp/scripts/services/PersonaJuridicaService.js
JavaScript
apache-2.0
4,119
import { Filter } from '../models/filter'; export class FilterParamMaker { static makeParam(currentFilter: string, filter: Filter): string { const aCurrentFilter: Filter[] = Filter.instanceFromString(currentFilter || '[]'); if (aCurrentFilter.length === 0) { aCurrentFilter.push(filter); } else { let searchIndex = -1; for ( let i = 0 ; i < aCurrentFilter.length ; i++ ) { if ( aCurrentFilter[i].equal(filter) ) { searchIndex = i; aCurrentFilter[i] = filter; // replace previous param object break; } } if (searchIndex === -1) { aCurrentFilter.push(filter); } } return '/' + encodeURIComponent('[' + aCurrentFilter.map(f => f.toString()).join(',') + ']'); } }
Xylus/pinpoint
web/src/main/webapp/v2/src/app/core/utils/filter-param-maker.ts
TypeScript
apache-2.0
889
<?php final class ArcanistInspectWorkflow extends ArcanistArcWorkflow { public function getWorkflowName() { return 'inspect'; } public function getWorkflowInformation() { $help = pht(<<<EOTEXT Inspect internal object properties. EOTEXT ); return $this->newWorkflowInformation() ->setSynopsis(pht('Show internal object information.')) ->addExample(pht('**inspect** [__options__] -- __object__')) ->setHelp($help); } public function getWorkflowArguments() { return array( $this->newWorkflowArgument('explore') ->setHelp(pht('Load all object hardpoints.')), $this->newWorkflowArgument('objects') ->setWildcard(true), ); } public function runWorkflow() { $is_explore = $this->getArgument('explore'); $objects = $this->getArgument('objects'); $inspectors = ArcanistRefInspector::getAllInspectors(); foreach ($inspectors as $inspector) { $inspector->setWorkflow($this); } if (!$objects) { echo tsprintf( "%s\n\n", pht('Choose an object to inspect:')); foreach ($inspectors as $inspector) { echo tsprintf( " - %s\n", $inspector->getInspectFunctionName()); } echo tsprintf("\n"); return 0; } $all_refs = array(); foreach ($objects as $description) { $matches = null; $pattern = '/^([\w-]+)(?:\((.*)\))?\z/'; if (!preg_match($pattern, $description, $matches)) { throw new PhutilArgumentUsageException( pht( 'Object specification "%s" is unknown, expected a specification '. 'like "commit(HEAD)".', $description)); } $function = $matches[1]; if (!isset($inspectors[$function])) { ksort($inspectors); throw new PhutilArgumentUsageException( pht( 'Unknown object type "%s", supported types are: %s.', $function, implode(', ', array_keys($inspectors)))); } $inspector = $inspectors[$function]; if (isset($matches[2])) { $arguments = array($matches[2]); } else { $arguments = array(); } $ref = $inspector->newInspectRef($arguments); $all_refs[] = $ref; } if ($is_explore) { $this->exploreRefs($all_refs); } $list = array(); foreach ($all_refs as $ref) { $out = $this->describeRef($ref, 0); $list[] = $out; } $list = phutil_glue($list, "\n"); echo tsprintf('%B', $list); return 0; } private function describeRef(ArcanistRef $ref, $depth) { $indent = str_repeat(' ', $depth); $out = array(); $out[] = tsprintf( "%s+ [%s] %s\n", $indent, get_class($ref), $ref->getRefDisplayName()); $hardpoint_list = $ref->getHardpointList(); foreach ($hardpoint_list->getHardpoints() as $hardpoint) { $lines = $this->describeHardpoint($ref, $hardpoint, $depth + 1); foreach ($lines as $line) { $out[] = $line; } } return $out; } private function describeHardpoint( ArcanistRef $ref, ArcanistHardpoint $hardpoint, $depth) { $indent = str_repeat(' ', $depth); $children = array(); $values = array(); $hardpoint_key = $hardpoint->getHardpointKey(); if ($ref->hasAttachedHardpoint($hardpoint_key)) { $mode = '*'; $value = $ref->getHardpoint($hardpoint_key); if ($value instanceof ArcanistRef) { $children[] = $value; } else if (is_array($value)) { foreach ($value as $key => $child) { if ($child instanceof ArcanistRef) { $children[] = $child; } else { $values[] = $value; } } } else { $values[] = $value; } } else { $mode = 'o'; } $out = array(); $out[] = tsprintf( "%s%s [%s] %s\n", $indent, $mode, get_class($hardpoint), $hardpoint->getHardpointKey()); foreach ($children as $child) { $lines = $this->describeRef($child, $depth + 1); foreach ($lines as $line) { $out[] = $line; } } foreach ($values as $value) { $lines = $this->describeValue($value, $depth + 1); foreach ($lines as $line) { $out[] = $line; } } return $out; } private function describeValue($value, $depth) { $indent = str_repeat(' ', $depth); if (is_string($value)) { $display_value = '"'.addcslashes(substr($value, 0, 64), "\n\r\t\\\"").'"'; } else if (is_scalar($value)) { $display_value = phutil_string_cast($value); } else if ($value === null) { $display_value = 'null'; } else { $display_value = phutil_describe_type($value); } $out = array(); $out[] = tsprintf( "%s> %s\n", $indent, $display_value); return $out; } private function exploreRefs(array $refs) { $seen = array(); $look = $refs; while ($look) { $ref_map = $this->getRefsByClass($look); $look = array(); $children = $this->inspectHardpoints($ref_map); foreach ($children as $child) { $hash = spl_object_hash($child); if (isset($seen[$hash])) { continue; } $seen[$hash] = true; $look[] = $child; } } } private function getRefsByClass(array $refs) { $ref_lists = array(); foreach ($refs as $ref) { $ref_lists[get_class($ref)][] = $ref; } foreach ($ref_lists as $ref_class => $refs) { $typical_ref = head($refs); $hardpoint_list = $typical_ref->getHardpointList(); $hardpoints = $hardpoint_list->getHardpoints(); if (!$hardpoints) { unset($ref_lists[$ref_class]); continue; } $hardpoint_keys = mpull($hardpoints, 'getHardpointKey'); $ref_lists[$ref_class] = array( 'keys' => $hardpoint_keys, 'refs' => $refs, ); } return $ref_lists; } private function inspectHardpoints(array $ref_lists) { foreach ($ref_lists as $ref_class => $spec) { $refs = $spec['refs']; $keys = $spec['keys']; $this->loadHardpoints($refs, $keys); } $child_refs = array(); foreach ($ref_lists as $ref_class => $spec) { $refs = $spec['refs']; $keys = $spec['keys']; foreach ($refs as $ref) { foreach ($keys as $key) { $value = $ref->getHardpoint($key); if (!is_array($value)) { $value = array($value); } foreach ($value as $child) { if ($child instanceof ArcanistRef) { $child_refs[] = $child; } } } } } return $child_refs; } }
codelabs-ch/arcanist
src/workflow/ArcanistInspectWorkflow.php
PHP
apache-2.0
6,770
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/lightsail/Lightsail_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSVector.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <utility> namespace Aws { template<typename RESULT_TYPE> class AmazonWebServiceResult; namespace Utils { namespace Json { class JsonValue; } // namespace Json } // namespace Utils namespace Lightsail { namespace Model { class AWS_LIGHTSAIL_API GetActiveNamesResult { public: GetActiveNamesResult(); GetActiveNamesResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); GetActiveNamesResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); /** * <p>The list of active names returned by the get active names request.</p> */ inline const Aws::Vector<Aws::String>& GetActiveNames() const{ return m_activeNames; } /** * <p>The list of active names returned by the get active names request.</p> */ inline void SetActiveNames(const Aws::Vector<Aws::String>& value) { m_activeNames = value; } /** * <p>The list of active names returned by the get active names request.</p> */ inline void SetActiveNames(Aws::Vector<Aws::String>&& value) { m_activeNames = std::move(value); } /** * <p>The list of active names returned by the get active names request.</p> */ inline GetActiveNamesResult& WithActiveNames(const Aws::Vector<Aws::String>& value) { SetActiveNames(value); return *this;} /** * <p>The list of active names returned by the get active names request.</p> */ inline GetActiveNamesResult& WithActiveNames(Aws::Vector<Aws::String>&& value) { SetActiveNames(std::move(value)); return *this;} /** * <p>The list of active names returned by the get active names request.</p> */ inline GetActiveNamesResult& AddActiveNames(const Aws::String& value) { m_activeNames.push_back(value); return *this; } /** * <p>The list of active names returned by the get active names request.</p> */ inline GetActiveNamesResult& AddActiveNames(Aws::String&& value) { m_activeNames.push_back(std::move(value)); return *this; } /** * <p>The list of active names returned by the get active names request.</p> */ inline GetActiveNamesResult& AddActiveNames(const char* value) { m_activeNames.push_back(value); return *this; } /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline const Aws::String& GetNextPageToken() const{ return m_nextPageToken; } /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline void SetNextPageToken(const Aws::String& value) { m_nextPageToken = value; } /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline void SetNextPageToken(Aws::String&& value) { m_nextPageToken = std::move(value); } /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline void SetNextPageToken(const char* value) { m_nextPageToken.assign(value); } /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline GetActiveNamesResult& WithNextPageToken(const Aws::String& value) { SetNextPageToken(value); return *this;} /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline GetActiveNamesResult& WithNextPageToken(Aws::String&& value) { SetNextPageToken(std::move(value)); return *this;} /** * <p>The token to advance to the next page of results from your request.</p> <p>A * next page token is not returned if there are no more results to display.</p> * <p>To get the next page of results, perform another <code>GetActiveNames</code> * request and specify the next page token using the <code>pageToken</code> * parameter.</p> */ inline GetActiveNamesResult& WithNextPageToken(const char* value) { SetNextPageToken(value); return *this;} private: Aws::Vector<Aws::String> m_activeNames; Aws::String m_nextPageToken; }; } // namespace Model } // namespace Lightsail } // namespace Aws
jt70471/aws-sdk-cpp
aws-cpp-sdk-lightsail/include/aws/lightsail/model/GetActiveNamesResult.h
C
apache-2.0
6,025
select c1, c2, c3 from `filter/pushdown/DRILL_6174_test_data` where c2 = false;
Agirish/drill-test-framework
framework/resources/Functional/filter/pushdown/boolean_null/DRILL_6174_filter_push_down_eq_false.sql
SQL
apache-2.0
79
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #define s2n_likely(x) __builtin_expect(!!(x), 1) #define s2n_unlikely(x) __builtin_expect(!!(x), 0) /** * s2n_ensure provides low-level safety check functionality * * This should only consumed directly by s2n_safety. * * Note: This module can be replaced by static analyzer implementation * to insert additional safety checks. */ /** * Ensures `cond` is true, otherwise `action` will be performed */ #define __S2N_ENSURE( cond, action ) do {if ( !(cond) ) { action; }} while (0) #define __S2N_ENSURE_LIKELY( cond, action ) do {if ( s2n_unlikely( !(cond) ) ) { action; }} while (0) #ifdef NDEBUG #define __S2N_ENSURE_DEBUG( cond, action ) do {} while (0) #else #define __S2N_ENSURE_DEBUG( cond, action ) __S2N_ENSURE_LIKELY((cond), action) #endif #define __S2N_ENSURE_PRECONDITION( result ) (s2n_likely(s2n_result_is_ok(result)) ? S2N_RESULT_OK : S2N_RESULT_ERROR) #ifdef NDEBUG #define __S2N_ENSURE_POSTCONDITION( result ) (S2N_RESULT_OK) #else #define __S2N_ENSURE_POSTCONDITION( result ) (s2n_likely(s2n_result_is_ok(result)) ? S2N_RESULT_OK : S2N_RESULT_ERROR) #endif #define __S2N_ENSURE_SAFE_MEMCPY( d , s , n , guard ) \ do { \ __typeof( n ) __tmp_n = ( n ); \ if ( s2n_likely( __tmp_n ) ) { \ void *r = s2n_ensure_memcpy_trace( (d), (s) , (__tmp_n), _S2N_DEBUG_LINE); \ guard(r); \ } \ } while(0) #define __S2N_ENSURE_SAFE_MEMSET( d , c , n , guard ) \ do { \ __typeof( n ) __tmp_n = ( n ); \ if ( s2n_likely( __tmp_n ) ) { \ __typeof( d ) __tmp_d = ( d ); \ guard( __tmp_d ); \ memset( __tmp_d, (c), __tmp_n); \ } \ } while(0) /** * `restrict` is a part of the c99 standard and will work with any C compiler. If you're trying to * compile with a C++ compiler `restrict` is invalid. However some C++ compilers support the behavior * of `restrict` using the `__restrict__` keyword. Therefore if the compiler supports `__restrict__` * use it. * * This is helpful for the benchmarks in tests/benchmark which use Google's Benchmark library and * are all written in C++. * * https://gcc.gnu.org/onlinedocs/gcc/Restricted-Pointers.html * */ #if defined(S2N___RESTRICT__SUPPORTED) extern void* s2n_ensure_memcpy_trace(void *__restrict__ to, const void *__restrict__ from, size_t size, const char *debug_str); #else extern void* s2n_ensure_memcpy_trace(void *restrict to, const void *restrict from, size_t size, const char *debug_str); #endif /** * These macros should not be used in validate functions. * All validate functions are also used in assumptions for CBMC proofs, * which should not contain __CPROVER_*_ok primitives. The use of these primitives * in assumptions may lead to spurious results. * When the code is being verified using CBMC, these properties are formally verified; * When the code is built in debug mode, they are checked as much as possible using assertions. * When the code is built in production mode, non-fatal properties are not checked. * Violations of these properties are undefined behaviour. */ #ifdef CBMC # define S2N_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || __CPROVER_r_ok((base), (len))) # define S2N_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || __CPROVER_w_ok((base), (len))) #else /* the C runtime does not give a way to check these properties, * but we can at least check for nullness. */ # define S2N_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (base) != NULL) # define S2N_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (base) != NULL) #endif /* CBMC */ /** * These macros can safely be used in validate functions. */ #define S2N_MEM_IS_READABLE(base, len) (((len) == 0) || (base) != NULL) #define S2N_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base) != NULL) #define S2N_OBJECT_PTR_IS_READABLE(ptr) ((ptr) != NULL) #define S2N_OBJECT_PTR_IS_WRITABLE(ptr) ((ptr) != NULL) #define S2N_IMPLIES(a, b) (!(a) || (b)) /** * If and only if (iff) is a biconditional logical connective between statements a and b. * Equivalent to (S2N_IMPLIES(a, b) && S2N_IMPLIES(b, a)). */ #define S2N_IFF(a, b) (!!(a) == !!(b)) /** * These macros are used to specify code contracts in CBMC proofs. * Define function contracts. * When the code is being verified using CBMC, these contracts are formally verified; * When the code is built in production mode, contracts are not checked. * Violations of the function contracts are undefined behaviour. */ #ifdef CBMC # define CONTRACT_ASSIGNS(...) __CPROVER_assigns(__VA_ARGS__) # define CONTRACT_ASSIGNS_ERR(...) CONTRACT_ASSIGNS(__VA_ARGS__, s2n_debug_str, s2n_errno) # define CONTRACT_REQUIRES(...) __CPROVER_requires(__VA_ARGS__) # define CONTRACT_ENSURES(...) __CPROVER_ensures(__VA_ARGS__) # define CONTRACT_INVARIANT(...) __CPROVER_loop_invariant(__VA_ARGS__) # define CONTRACT_RETURN_VALUE (__CPROVER_return_value) #else # define CONTRACT_ASSIGNS(...) # define CONTRACT_ASSIGNS_ERR(...) # define CONTRACT_REQUIRES(...) # define CONTRACT_ENSURES(...) # define CONTRACT_INVARIANT(...) # define CONTRACT_RETURN_VALUE #endif
awslabs/s2n
utils/s2n_ensure.h
C
apache-2.0
6,424
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.trino.plugin.phoenix5; import com.google.common.base.CharMatcher; import com.google.common.primitives.Shorts; import com.google.common.primitives.SignedBytes; import io.airlift.slice.Slice; import io.trino.spi.TrinoException; import io.trino.spi.block.Block; import io.trino.spi.block.BlockBuilder; import io.trino.spi.connector.ConnectorSession; import io.trino.spi.type.ArrayType; import io.trino.spi.type.CharType; import io.trino.spi.type.DecimalType; import io.trino.spi.type.Type; import io.trino.spi.type.VarcharType; import org.joda.time.DateTimeZone; import org.joda.time.chrono.ISOChronology; import java.lang.reflect.Array; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.sql.Date; import static com.google.common.base.Preconditions.checkArgument; import static io.airlift.slice.Slices.utf8Slice; import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.BooleanType.BOOLEAN; import static io.trino.spi.type.DateType.DATE; import static io.trino.spi.type.Decimals.decodeUnscaledValue; import static io.trino.spi.type.Decimals.encodeScaledValue; import static io.trino.spi.type.Decimals.encodeShortScaledValue; import static io.trino.spi.type.DoubleType.DOUBLE; import static io.trino.spi.type.IntegerType.INTEGER; import static io.trino.spi.type.RealType.REAL; import static io.trino.spi.type.SmallintType.SMALLINT; import static io.trino.spi.type.TinyintType.TINYINT; import static io.trino.spi.type.TypeUtils.readNativeValue; import static io.trino.spi.type.TypeUtils.writeNativeValue; import static java.lang.Float.floatToRawIntBits; import static java.lang.Float.intBitsToFloat; import static java.lang.Math.toIntExact; import static java.lang.String.format; import static java.util.Locale.ENGLISH; import static java.util.Objects.requireNonNull; import static java.util.concurrent.TimeUnit.DAYS; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.joda.time.DateTimeZone.UTC; public final class TypeUtils { private TypeUtils() {} public static String getArrayElementPhoenixTypeName(ConnectorSession session, PhoenixClient client, Type elementType) { if (elementType instanceof VarcharType) { return "VARCHAR"; } if (elementType instanceof CharType) { return "CHAR"; } if (elementType instanceof DecimalType) { return "DECIMAL"; } return client.toWriteMapping(session, elementType).getDataType().toUpperCase(ENGLISH); } public static Block jdbcObjectArrayToBlock(ConnectorSession session, Type type, Object[] elements) { BlockBuilder builder = type.createBlockBuilder(null, elements.length); for (Object element : elements) { writeNativeValue(type, builder, jdbcObjectToTrinoNative(session, element, type)); } return builder.build(); } public static Object[] getJdbcObjectArray(ConnectorSession session, Type elementType, Block block) { int positionCount = block.getPositionCount(); Object[] valuesArray = new Object[positionCount]; int subArrayLength = 1; for (int i = 0; i < positionCount; i++) { Object objectValue = trinoNativeToJdbcObject(session, elementType, readNativeValue(elementType, block, i)); valuesArray[i] = objectValue; if (objectValue != null && objectValue.getClass().isArray()) { subArrayLength = Math.max(subArrayLength, Array.getLength(objectValue)); } } if (elementType instanceof ArrayType) { handleArrayNulls(valuesArray, subArrayLength); } return valuesArray; } public static Object[] toBoxedArray(Object jdbcArray) { requireNonNull(jdbcArray, "jdbcArray is null"); checkArgument(jdbcArray.getClass().isArray(), "object is not an array: %s", jdbcArray.getClass().getName()); if (!jdbcArray.getClass().getComponentType().isPrimitive()) { return (Object[]) jdbcArray; } int elementCount = Array.getLength(jdbcArray); Object[] elements = new Object[elementCount]; for (int i = 0; i < elementCount; i++) { elements[i] = Array.get(jdbcArray, i); } return elements; } private static void handleArrayNulls(Object[] valuesArray, int length) { for (int i = 0; i < valuesArray.length; i++) { if (valuesArray[i] == null) { valuesArray[i] = new Object[length]; } } } private static Object jdbcObjectToTrinoNative(ConnectorSession session, Object jdbcObject, Type type) { if (jdbcObject == null) { return null; } if (BOOLEAN.equals(type) || BIGINT.equals(type) || DOUBLE.equals(type)) { return jdbcObject; } if (TINYINT.equals(type)) { return (long) (byte) jdbcObject; } if (SMALLINT.equals(type)) { return (long) (short) jdbcObject; } if (INTEGER.equals(type)) { return (long) (int) jdbcObject; } if (type instanceof ArrayType) { return jdbcObjectArrayToBlock(session, ((ArrayType) type).getElementType(), (Object[]) jdbcObject); } if (type instanceof DecimalType) { DecimalType decimalType = (DecimalType) type; BigDecimal value = (BigDecimal) jdbcObject; if (decimalType.isShort()) { return encodeShortScaledValue(value, decimalType.getScale()); } return encodeScaledValue(value, decimalType.getScale()); } if (REAL.equals(type)) { return (long) floatToRawIntBits((float) jdbcObject); } if (DATE.equals(type)) { long localMillis = ((Date) jdbcObject).getTime(); // Convert it to a ~midnight in UTC. long utcMillis = ISOChronology.getInstance().getZone().getMillisKeepLocal(UTC, localMillis); // convert to days return MILLISECONDS.toDays(utcMillis); } if (type instanceof VarcharType) { return utf8Slice((String) jdbcObject); } if (type instanceof CharType) { return utf8Slice(CharMatcher.is(' ').trimTrailingFrom((String) jdbcObject)); } throw new TrinoException(NOT_SUPPORTED, format("Unsupported type %s and object type %s", type, jdbcObject.getClass())); } private static Object trinoNativeToJdbcObject(ConnectorSession session, Type type, Object object) { if (object == null) { return null; } if (DOUBLE.equals(type) || BOOLEAN.equals(type) || BIGINT.equals(type)) { return object; } if (type instanceof DecimalType) { DecimalType decimalType = (DecimalType) type; if (decimalType.isShort()) { BigInteger unscaledValue = BigInteger.valueOf((long) object); return new BigDecimal(unscaledValue, decimalType.getScale(), new MathContext(decimalType.getPrecision())); } BigInteger unscaledValue = decodeUnscaledValue((Slice) object); return new BigDecimal(unscaledValue, decimalType.getScale(), new MathContext(decimalType.getPrecision())); } if (REAL.equals(type)) { return intBitsToFloat(toIntExact((long) object)); } if (TINYINT.equals(type)) { return SignedBytes.checkedCast((long) object); } if (SMALLINT.equals(type)) { return Shorts.checkedCast((long) object); } if (INTEGER.equals(type)) { return toIntExact((long) object); } if (DATE.equals(type)) { // convert to midnight in default time zone long millis = DAYS.toMillis((long) object); return new Date(UTC.getMillisKeepLocal(DateTimeZone.getDefault(), millis)); } if (type instanceof VarcharType || type instanceof CharType) { return ((Slice) object).toStringUtf8(); } if (type instanceof ArrayType) { // process subarray of multi-dimensional array return getJdbcObjectArray(session, ((ArrayType) type).getElementType(), (Block) object); } throw new TrinoException(NOT_SUPPORTED, "Unsupported type: " + type); } }
dain/presto
plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/TypeUtils.java
Java
apache-2.0
9,157
<div class="contentbox"> <form id="form1" name="form1" method="post" action=""> <table cellspacing="2" cellpadding="5" width="100%"> <tr> <th colspan="2" align="left"><span style="float:left"><?php echo $id > 0 ? '修改' : '添加';?>文章</span><span style="float:right"><a href="weixin.php?type=wxnewlist">返回内容列表</a></span></th> </tr> <tr> <td class="label" width="15%">所在分类:</td> <td width="85%"> <select name="cat_id" id="cat_id"> <?php if(!empty($catids)){ foreach($catids as $row){ ?> <option value="<?php echo $row['id'];?>" <?php echo $row['id']==$rt['cat_id'] ? 'selected="selected"' : '';?>><?php echo $row['cat_name'];?></option> <?php if(!empty($row['cat_id'])){ foreach($row['cat_id'] as $rows){ ?> <option value="<?php echo $rows['id'];?>" <?php echo $rows['id']==$rt['cat_id'] ? 'selected="selected"' : '';?>>&nbsp;&nbsp;<?php echo $rows['cat_name'];?></option> <?php if(!empty($rows['cat_id'])){ foreach($rows['cat_id'] as $rowss){ ?> <option value="<?php echo $rowss['id'];?>" <?php echo $rowss['id']==$rt['cat_id'] ? 'selected="selected"' : '';?>>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<?php echo $rowss['cat_name'];?></option> <?php }//end foreach }//end if }//end foreach } // end if }//end foreach } ?> </select> </td> </tr> <tr> <td class="label">文章标题:</td> <td><input name="article_title" id="article_title" type="text" size="43" value="<?php echo isset($rt['article_title']) ? $rt['article_title'] : '';?>"><span style="color:#FF0000">*</span><span class="article_title_mes"></span></td> </tr> <tr> <td class="label">关键词:</td> <td><input name="keyword" id="keyword" type="text" size="43" value="<?php echo isset($rt['keyword']) ? $rt['keyword'] : '';?>"></td> </tr> <tr> <td class="label">外部链接:</td> <td><input name="art_url" id="art_url" type="text" size="43" value="<?php echo isset($rt['art_url']) ? $rt['art_url'] : '';?>"></td> </tr> <tr> <td class="label">图文:</td> <td> <input name="article_img" id="articlephoto" type="hidden" value="<?php echo isset($rt['article_img']) ? $rt['article_img'] : '';?>" size="43"/> <br /> <iframe id="iframe_t" name="iframe_t" border="0" src="upload.php?action=<?php echo isset($rt['article_img'])&&!empty($rt['article_img'])? 'show' : '';?>&ty=articlephoto&files=<?php echo isset($rt['article_img']) ? $rt['article_img'] : '';?>" scrolling="no" width="445" frameborder="0" height="25"></iframe> </td> </tr> <tr> <td class="label">文章描述:</td> <td><textarea name="about" id="meta_desc" style="width: 60%; height: 65px; overflow: auto;"><?php echo isset($rt['about']) ? $rt['about'] : '';?></textarea></td> </tr> <tr> <td class="label">状态设置:</td> <td><input id="is_show" name="is_show" value="1" type="checkbox" <?php echo !isset($rt['is_show']) || $rt['is_show']==1 ? 'checked="checked"' : '';?>>审核</td> </tr> <tr> <td class="label">文章内容:</td> <td><textarea name="content" id="content" style="width:95%;height:500px;display:none;"><?php echo isset($rt['content']) ? $rt['content'] : '';?></textarea> <script>KE.show({id : 'content',cssPath : '<?php echo ADMIN_URL.'/css/edit.css';?>'});</script> </td> </tr> <tr> <td>&nbsp;</td> <td align="left"> <input type="hidden" name="type" value="img" /> <input class="new_save" value="<?php echo $type=='newedit' ? '修改' : '添加';?>保存" type="Submit" style="cursor:pointer"> </td> </tr> </table> </form> </div> <?php $thisurl = ADMIN_URL.'weixin.php'; ?> <script type="text/javascript"> <!-- //jQuery(document).ready(function($){ $('.new_save').click(function(){ art_title = $('#article_title').val(); if(art_title=='undefined' || art_title==""){ $('.article_title_mes').html("文章标题不能为空!"); $('.article_title_mes').css('color','#FE0000'); return false; } return true; }); //}); --> </script>
zhaoshengloveqingqing/fenxiao
admin/app/weixin/template/infos.php
PHP
apache-2.0
4,144
/** * * Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.developerstudio.che.ext.esb.client.editor.multipage; import com.codenvy.ide.api.editor.EditorPartPresenter; import com.codenvy.ide.api.mvp.View; import com.codenvy.ide.jseditor.client.texteditor.ConfigurableTextEditor; import com.google.inject.ImplementedBy; @ImplementedBy(ESBMultiPageEditorViewImpl.class) public interface ESBMultiPageEditorView extends View<ESBMultiPageEditorView.ActionDelegate> { void showTextEditor(); void showGraphicalEditor(); void addTextEditor(ConfigurableTextEditor editor); void addGraphicalEditor(EditorPartPresenter editor); interface ActionDelegate { void onTextEditorChosen(); void onGraphicalEditorChosen(); } }
liurl3/cloud-dev-studio
extensions/artifacts/org.wso2.developerstudio.che.ext.esb/src/main/java/org/wso2/developerstudio/che/ext/esb/client/editor/multipage/ESBMultiPageEditorView.java
Java
apache-2.0
1,344
<div class="trip-add-destination-box" ng-show="editedDestination.isEditing && isAddPOIshowned"> <div class="sticky-container"> <div class="dialog-content"> <lx-text-field label="[['Point of Interest' | translate]]"> <input type="text" ng-model="tempPOI.name" focus-on="add-dialog-show" places-auto-complete types="['establishment']" placeholder="" on-place-changed="placeChanged(place)"> </lx-text-field> </div> </div> <div class="box-action-buttons"> <span flex></span> <a class="btn btn--m btn--flat" lx-ripple ng-click="closeAddPOIBox()" translate> Cancel </a> <a class="btn btn--m btn--primary btn--raised" ng-disabled="!tempPOI.readyToSave()" ng-click="addPOI()" lx-ripple translate> Add New Point of Interest </a> </div> </div>
susanrosito/vacaciones-permanentes
views/components/add-poi-dialog.html
HTML
apache-2.0
912
#editbar { border-left: 1px solid #888; border-top: 1px solid #888; border-right: 1px solid #888; overflow: hidden; font-family: sans-serif; font-size: 13px; } #editbar .inner { width: 100%; padding: 0; margin: 0; border: none; } #editbar .current { display: block !important; } #editbar .menu { overflow: hidden; background: white; background: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#EBF1FF)); background: -moz-linear-gradient(top, #fff, #EBF1FF); } #editbar .group { float: left; height: 26px; margin: 3px; padding-right: 6px; } #editbar .group-right { float: right; } #editbar .group-right .tab.format { margin-top: 4px; } #editbar .group-separator { border-right: 1px solid #ddd; } #editbar .button { width: 22px; height: 22px; background: #e7ecfb url(/images/editbar-buttons.png); border: 1px solid #ddd; text-indent: -100px; cursor: pointer; overflow: hidden; padding: 1px; display: block; float: left; margin: 0 2px; -moz-border-radius: 2px; -webkit-border-radius: 2px; -khtml-border-radius: 2px; border-radius: 2px; background-repeat: no-repeat; } #editbar .button:hover { background-color: #d9dde7; border-color: #aaa; } #editbar .bold { background-position: -97px 4px; } #editbar .italic { background-position: -147px 4px; } #editbar .link { background-position: -197px 4px; } #editbar .image { background-position: -247px 4px; } #editbar .ul { background-position: 3px 4px; } #editbar .ol { background-position: -47px 4px; } #editbar .tab { float: left; display: block; } #editbar .tab a { cursor: pointer; display: inline-block; float: left; height: 26px; padding-left: 18px; padding-right: 12px; line-height: 26px; text-decoration: none; background-image: url(/images/twiddle-right.png); background-position: 0 50%; background-repeat: no-repeat; color: blue; } #editbar .tab a.open { background-image: url(/images/twiddle-down.png); color: #333; } #editbar .tab a.open:hover { text-decoration: none; } #editbar .tab a:hover { text-decoration: underline; } #editbar .sections { clear: both; float: left; width: 100%; overflow: visible; border-top: 1px solid #888; height: 185px; background-color: #E0EEF7; display: none; } #editbar .sections .toc { float: left; width: 20%; overflow: auto; } #editbar .sections .toc div { cursor: pointer; padding: 4px 4px 4px 6px; background-color: #E0EEF7; color: blue; } #editbar .sections .toc div.current { cursor: default; background-color: white; color: #333; } #editbar .sections .pages { overflow: auto; background-color: white; float: right; width: 80%; height: 185px; } #editbar .sections .page { display: none; } #editbar .sections .pages th { color: #999; font-weight: bold; padding: 5px; text-align: left; } #editbar .sections .pages td { color: black; padding: 5px; border-top: 1px solid #eee; } #editbar .sections .pages span.invisible { color: #bbb; padding-left: 1px; } #editbar .sections .pages .shortcodes th { text-align: center; } #editbar .sections .pages .shortcodes ul { list-style-type: none; }
greghill/TitanFork
doc/css/editbar.css
CSS
apache-2.0
3,829
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.util; import org.apache.calcite.avatica.util.DateTimeUtils; import org.apache.calcite.avatica.util.Spaces; import org.apache.calcite.config.CalciteSystemProperty; import org.apache.calcite.linq4j.Ord; import org.apache.calcite.runtime.CalciteException; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlValuesOperator; import org.apache.calcite.sql.fun.SqlRowOperator; import org.apache.calcite.sql.util.SqlBasicVisitor; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.Collections2; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import org.apiguardian.api.API; import org.checkerframework.checker.nullness.qual.Nullable; import org.checkerframework.checker.nullness.qual.PolyNull; import org.checkerframework.dataflow.qual.Pure; import org.slf4j.Logger; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.PrintStream; import java.io.PrintWriter; import java.io.Reader; import java.io.StringReader; import java.io.StringWriter; import java.io.UncheckedIOException; import java.io.Writer; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.math.BigDecimal; import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; import java.text.SimpleDateFormat; import java.util.AbstractCollection; import java.util.AbstractList; import java.util.AbstractMap; import java.util.AbstractSet; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collection; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Hashtable; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Properties; import java.util.RandomAccess; import java.util.Set; import java.util.StringTokenizer; import java.util.TimeZone; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.ObjIntConsumer; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.jar.JarFile; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collector; import static org.apache.calcite.linq4j.Nullness.castNonNull; /** * Miscellaneous utility functions. */ public class Util { private static final int QUICK_DISTINCT = 15; private Util() {} //~ Static fields/initializers --------------------------------------------- /** * System-dependent newline character. * * <p>In general, you should not use this in expected results of tests. * Expected results should be the expected result on Linux (or Mac OS) using * '\n'. Apply {@link Util#toLinux(String)} to Windows actual results, if * necessary, to make them look like Linux actual.</p> */ public static final String LINE_SEPARATOR = System.getProperty("line.separator"); /** * System-dependent file separator, for example, "/" or "\." */ public static final String FILE_SEPARATOR = System.getProperty("file.separator"); /** * Datetime format string for generating a timestamp string to be used as * part of a filename. Conforms to SimpleDateFormat conventions. */ public static final String FILE_TIMESTAMP_FORMAT = "yyyy-MM-dd_HH_mm_ss"; /** * Regular expression for a valid java identifier which contains no * underscores and can therefore be returned intact by {@link #toJavaId}. */ private static final Pattern JAVA_ID_PATTERN = Pattern.compile("[a-zA-Z_$][a-zA-Z0-9$]*"); private static final Charset DEFAULT_CHARSET = Charset.forName(CalciteSystemProperty.DEFAULT_CHARSET.value()); /** * Maps classes to the map of their enum values. Uses a weak map so that * classes are not prevented from being unloaded. */ @SuppressWarnings("unchecked") private static final LoadingCache<Class, Map<String, Enum>> ENUM_CONSTANTS = CacheBuilder.newBuilder() .weakKeys() .build(CacheLoader.from(Util::enumConstants)); //~ Methods ---------------------------------------------------------------- /** * Does nothing with its argument. Returns whether it is ensured that * the call produces a single value * * @param call the expression to evaluate * @return Whether it is ensured that the call produces a single value */ public static boolean isSingleValue(SqlCall call) { if (call.getOperator() instanceof SqlAggFunction) { return true; } else if (call.getOperator() instanceof SqlValuesOperator || call.getOperator() instanceof SqlRowOperator) { List<SqlNode> operands = call.getOperandList(); if (operands.size() == 1) { SqlNode operand = operands.get(0); if (operand instanceof SqlLiteral) { return true; } else if (operand instanceof SqlCall) { return isSingleValue((SqlCall) operand); } } return false; } else { boolean isScalar = true; for (SqlNode operand : call.getOperandList()) { if (operand instanceof SqlLiteral) { continue; } if (!(operand instanceof SqlCall) || !Util.isSingleValue((SqlCall) operand)) { isScalar = false; break; } } return isScalar; } } /** * Does nothing with its argument. Call this method when you have a value * you are not interested in, but you don't want the compiler to warn that * you are not using it. */ public static void discard(@Nullable Object o) { if (false) { discard(o); } } /** * Does nothing with its argument. Call this method when you have a value * you are not interested in, but you don't want the compiler to warn that * you are not using it. */ public static void discard(int i) { if (false) { discard(i); } } /** * Does nothing with its argument. Call this method when you have a value * you are not interested in, but you don't want the compiler to warn that * you are not using it. */ public static void discard(boolean b) { if (false) { discard(b); } } /** * Does nothing with its argument. Call this method when you have a value * you are not interested in, but you don't want the compiler to warn that * you are not using it. */ public static void discard(double d) { if (false) { discard(d); } } /** * Records that an exception has been caught but will not be re-thrown. If * the tracer is not null, logs the exception to the tracer. * * @param e Exception * @param logger If not null, logs exception to this logger */ public static void swallow( Throwable e, @Nullable Logger logger) { if (logger != null) { logger.debug("Discarding exception", e); } } /** * Returns whether two lists are equal to each other using shallow * comparisons. * * @param list0 First list * @param list1 Second list * @return Whether lists are same length and all of their elements are * equal using {@code ==} (may be null). */ public static <T> boolean equalShallow( List<? extends T> list0, List<? extends T> list1) { if (list0.size() != list1.size()) { return false; } for (int i = 0; i < list0.size(); i++) { if (list0.get(i) != list1.get(i)) { return false; } } return true; } /** * Combines two integers into a hash code. * * @deprecated Use {@link Objects#hash(Object...)} */ @Deprecated // to be removed before 2.0 public static int hash( int i, int j) { return (i << 4) ^ j; } /** * Computes a hash code from an existing hash code and an object (which may * be null). * * @deprecated Use {@link Objects#hash(Object...)} */ @Deprecated // to be removed before 2.0 public static int hash( int h, @Nullable Object o) { int k = (o == null) ? 0 : o.hashCode(); return ((h << 4) | h) ^ k; } /** * Computes a hash code from an existing hash code and an array of objects * (which may be null). * * @deprecated Use {@link Objects#hash(Object...)} */ @Deprecated // to be removed before 2.0 public static int hashArray( int h, Object[] a) { return h ^ Arrays.hashCode(a); } /** Computes the hash code of a {@code double} value. Equivalent to * {@link Double}{@code .hashCode(double)}, but that method was only * introduced in JDK 1.8. * * @param v Value * @return Hash code * * @deprecated Use {@link org.apache.calcite.runtime.Utilities#hashCode(double)} */ @Deprecated // to be removed before 2.0 public static int hashCode(double v) { long bits = Double.doubleToLongBits(v); return (int) (bits ^ (bits >>> 32)); } /** * Returns a set of the elements which are in <code>set1</code> but not in * <code>set2</code>, without modifying either. */ public static <T> Set<T> minus(Set<T> set1, Set<T> set2) { if (set1.isEmpty()) { return set1; } else if (set2.isEmpty()) { return set1; } else { Set<T> set = new HashSet<>(set1); set.removeAll(set2); return set; } } /** * Computes <code>nlogn(n)</code> using the natural logarithm (or <code> * n</code> if <code>n &lt; {@link Math#E}</code>, so the result is never * negative. */ public static double nLogN(double d) { return (d < Math.E) ? d : (d * Math.log(d)); } /** * Computes <code>nlog(m)</code> using the natural logarithm (or <code> * n</code> if <code>m &lt; {@link Math#E}</code>, so the result is never * negative. */ public static double nLogM(double n, double m) { return (m < Math.E) ? n : (n * Math.log(m)); } /** * Prints an object using reflection. We can handle <code>null</code>; * arrays of objects and primitive values; for regular objects, we print all * public fields. */ public static void print( PrintWriter pw, Object o) { print(pw, o, 0); } @SuppressWarnings("JdkObsolete") public static void print( PrintWriter pw, @Nullable Object o, int indent) { if (o == null) { pw.print("null"); return; } Class clazz = o.getClass(); if (o instanceof String) { printJavaString(pw, (String) o, true); } else if ( (clazz == Integer.class) || (clazz == Boolean.class) || (clazz == Character.class) || (clazz == Byte.class) || (clazz == Short.class) || (clazz == Long.class) || (clazz == Float.class) || (clazz == Double.class) || (clazz == Void.class)) { pw.print(o.toString()); } else if (clazz.isArray()) { // o is an array, but we can't cast to Object[] because it may be // an array of primitives. Object[] a; // for debug if (o instanceof Object[]) { a = (Object[]) o; discard(a); } int n = Array.getLength(o); pw.print("{"); for (int i = 0; i < n; i++) { if (i > 0) { pw.println(","); } else { pw.println(); } for (int j = 0; j < indent; j++) { pw.print("\t"); } print( pw, Array.get(o, i), indent + 1); } pw.print("}"); } else if (o instanceof Iterator) { pw.print(clazz.getName()); Iterator iter = (Iterator) o; pw.print(" {"); int i = 0; while (iter.hasNext()) { if (i++ > 0) { pw.println(","); } print( pw, iter.next(), indent + 1); } pw.print("}"); } else if (o instanceof Enumeration) { pw.print(clazz.getName()); Enumeration e = (Enumeration) o; pw.print(" {"); int i = 0; while (e.hasMoreElements()) { if (i++ > 0) { pw.println(","); } print( pw, e.nextElement(), indent + 1); } pw.print("}"); } else { pw.print(clazz.getName()); pw.print(" {"); Field[] fields = clazz.getFields(); int printed = 0; for (Field field : fields) { if (Modifier.isStatic(field.getModifiers())) { continue; } if (printed++ > 0) { pw.println(","); } else { pw.println(); } for (int j = 0; j < indent; j++) { pw.print("\t"); } pw.print(field.getName()); pw.print("="); Object val; try { val = field.get(o); } catch (IllegalAccessException e) { throw new RuntimeException(e); } print(pw, val, indent + 1); } pw.print("}"); } } /** * Prints a string, enclosing in double quotes (") and escaping if * necessary. For example, <code>printDoubleQuoted(w,"x\"y",false)</code> * prints <code>"x\"y"</code>. * * <p>The appendable where the value is printed must not incur I/O operations. This method is * not meant to be used for writing the values to permanent storage.</p> * * @throws IllegalStateException if the print to the specified appendable fails due to I/O */ public static void printJavaString( Appendable appendable, @Nullable String s, boolean nullMeansNull) { try { if (s == null) { if (nullMeansNull) { appendable.append("null"); } } else { String s1 = replace(s, "\\", "\\\\"); String s2 = replace(s1, "\"", "\\\""); String s3 = replace(s2, "\n\r", "\\n"); String s4 = replace(s3, "\n", "\\n"); String s5 = replace(s4, "\r", "\\r"); appendable.append('"'); appendable.append(s5); appendable.append('"'); } } catch (IOException ioe) { throw new IllegalStateException("The specified appendable should not incur I/O.", ioe); } } public static void println( PrintWriter pw, Object o) { print(pw, o, 0); pw.println(); } /** * Formats a {@link BigDecimal} value to a string in scientific notation For * example<br> * * <ul> * <li>A value of 0.00001234 would be formated as <code>1.234E-5</code></li> * <li>A value of 100000.00 would be formated as <code>1.00E5</code></li> * <li>A value of 100 (scale zero) would be formated as * <code>1E2</code></li> * </ul> * * <p>If <code>bd</code> has a precision higher than 20, this method will * truncate the output string to have a precision of 20 (no rounding will be * done, just a truncate). */ public static String toScientificNotation(BigDecimal bd) { final int truncateAt = 20; String unscaled = bd.unscaledValue().toString(); if (bd.signum() < 0) { unscaled = unscaled.substring(1); } int len = unscaled.length(); int scale = bd.scale(); int e = len - scale - 1; StringBuilder ret = new StringBuilder(); if (bd.signum() < 0) { ret.append('-'); } // do truncation unscaled = unscaled.substring( 0, Math.min(truncateAt, len)); ret.append(unscaled.charAt(0)); if (scale == 0) { // trim trailing zeros since they aren't significant int i = unscaled.length(); while (i > 1) { if (unscaled.charAt(i - 1) != '0') { break; } --i; } unscaled = unscaled.substring(0, i); } if (unscaled.length() > 1) { ret.append("."); ret.append(unscaled.substring(1)); } ret.append("E"); ret.append(e); return ret.toString(); } /** * Replaces every occurrence of <code>find</code> in <code>s</code> with * <code>replace</code>. */ public static String replace( String s, String find, String replace) { // let's be optimistic int found = s.indexOf(find); if (found == -1) { return s; } StringBuilder sb = new StringBuilder(s.length()); int start = 0; for (;;) { for (; start < found; start++) { sb.append(s.charAt(start)); } if (found == s.length()) { break; } sb.append(replace); start += find.length(); found = s.indexOf(find, start); if (found == -1) { found = s.length(); } } return sb.toString(); } /** * Creates a file-protocol URL for the given file. */ @Deprecated // to be removed before 2.0 public static URL toURL(File file) throws MalformedURLException { String path = file.getAbsolutePath(); // This is a bunch of weird code that is required to // make a valid URL on the Windows platform, due // to inconsistencies in what getAbsolutePath returns. String fs = System.getProperty("file.separator"); if (fs.length() == 1) { char sep = fs.charAt(0); if (sep != '/') { path = path.replace(sep, '/'); } if (path.charAt(0) != '/') { path = '/' + path; } } path = "file://" + path; return new URL(path); } /** * Gets a timestamp string for use in file names. The generated timestamp * string reflects the current time. */ @Deprecated // to be removed before 2.0 @SuppressWarnings("JavaUtilDate") public static String getFileTimestamp() { SimpleDateFormat sdf = new SimpleDateFormat(FILE_TIMESTAMP_FORMAT, Locale.ROOT); return sdf.format(new java.util.Date()); } /** * Converts double-quoted Java strings to their contents. For example, * <code>"foo\"bar"</code> becomes <code>foo"bar</code>. */ public static String stripDoubleQuotes(String value) { assert value.charAt(0) == '"'; assert value.charAt(value.length() - 1) == '"'; String s5 = value.substring(1, value.length() - 1); String s4 = Util.replace(s5, "\\r", "\r"); String s3 = Util.replace(s4, "\\n", "\n"); String s2 = Util.replace(s3, "\\\"", "\""); String s1 = Util.replace(s2, "\\\\", "\\"); return s1; } /** * Converts an arbitrary string into a string suitable for use as a Java * identifier. * * <p>The mapping is one-to-one (that is, distinct strings will produce * distinct java identifiers). The mapping is also reversible, but the * inverse mapping is not implemented.</p> * * <p>A valid Java identifier must start with a Unicode letter, underscore, * or dollar sign ($). The other characters, if any, can be a Unicode * letter, underscore, dollar sign, or digit.</p> * * <p>This method uses an algorithm similar to URL encoding. Valid * characters are unchanged; invalid characters are converted to an * underscore followed by the hex code of the character; and underscores are * doubled.</p> * * <p>Examples: * * <ul> * <li><code>toJavaId("foo")</code> returns <code>"foo"</code> * <li><code>toJavaId("foo bar")</code> returns <code>"foo_20_bar"</code> * <li><code>toJavaId("foo_bar")</code> returns <code>"foo__bar"</code> * <li><code>toJavaId("0bar")</code> returns <code>"_40_bar"</code> (digits * are illegal as a prefix) * <li><code>toJavaId("foo0bar")</code> returns <code>"foo0bar"</code> * </ul> */ public static String toJavaId( String s, int ordinal) { // If it's already a valid Java id (and doesn't contain any // underscores), return it unchanged. if (JAVA_ID_PATTERN.matcher(s).matches()) { // prepend "ID$" to string so it doesn't clash with java keywords return "ID$" + ordinal + "$" + s; } // Escape underscores and other undesirables. StringBuilder buf = new StringBuilder(s.length() + 10); buf.append("ID$"); buf.append(ordinal); buf.append("$"); for (int i = 0; i < s.length(); i++) { char c = s.charAt(i); if (c == '_') { buf.append("__"); } else if ( (c < 0x7F) /* Normal ascii character */ && !Character.isISOControl(c) && ((i == 0) ? Character.isJavaIdentifierStart(c) : Character.isJavaIdentifierPart(c))) { buf.append(c); } else { buf.append("_"); buf.append(Integer.toString(c, 16)); buf.append("_"); } } return buf.toString(); } /** * Returns true when input string is a valid Java identifier. * @param s input string * @return true when input string is a valid Java identifier */ public static boolean isValidJavaIdentifier(String s) { if (s.isEmpty()) { return false; } if (!Character.isJavaIdentifierStart(s.codePointAt(0))) { return false; } int i = 0; while (i < s.length()) { int codePoint = s.codePointAt(i); if (!Character.isJavaIdentifierPart(codePoint)) { return false; } i += Character.charCount(codePoint); } return true; } public static String toLinux(String s) { return s.replace("\r\n", "\n"); } /** * Materializes the results of a {@link java.util.Iterator} as a * {@link java.util.List}. * * @param iter iterator to materialize * @return materialized list */ @Deprecated // to be removed before 2.0 public static <T> List<T> toList(Iterator<T> iter) { List<T> list = new ArrayList<>(); while (iter.hasNext()) { list.add(iter.next()); } return list; } /** * Returns whether s == null or if s.length() == 0. */ public static boolean isNullOrEmpty(@Nullable String s) { return (null == s) || (s.length() == 0); } /** * Converts a list of a string, with commas between elements. * * <p>For example, * <code>commaList(Arrays.asList({"a", "b"}))</code> * returns "a, b". * * @param list List * @return String representation of string */ public static <T> String commaList(List<T> list) { return sepList(list, ", "); } /** Converts a list of a string, with a given separator between elements. */ public static <T> String sepList(List<T> list, String sep) { final int max = list.size() - 1; switch (max) { case -1: return ""; case 0: return String.valueOf(list.get(0)); default: break; } final StringBuilder buf = new StringBuilder(); for (int i = 0;; i++) { buf.append(list.get(i)); if (i == max) { return buf.toString(); } buf.append(sep); } } /** Prints a collection of elements to a StringBuilder, in the same format as * {@link AbstractCollection#toString()}. */ public static <E> StringBuilder printIterable(StringBuilder sb, Iterable<E> iterable) { final Iterator<E> it = iterable.iterator(); if (!it.hasNext()) { return sb.append("[]"); } sb.append('['); for (;;) { final E e = it.next(); sb.append(e); if (!it.hasNext()) { return sb.append(']'); } sb.append(", "); } } /** Prints a set of elements to a StringBuilder, in the same format same as * {@link AbstractCollection#toString()}. * * <p>The 'set' is represented by the number of elements and an action to * perform for each element. * * <p>This method can be a very efficient way to convert a structure to a * string, because the components can write directly to the StringBuilder * rather than constructing intermediate strings. * * @see org.apache.calcite.linq4j.function.Functions#generate */ public static <E> StringBuilder printList(StringBuilder sb, int elementCount, ObjIntConsumer<StringBuilder> consumer) { if (elementCount == 0) { return sb.append("[]"); } sb.append('['); for (int i = 0;;) { consumer.accept(sb, i); if (++i == elementCount) { return sb.append(']'); } sb.append(", "); } } /** * Returns the {@link Charset} object representing the value of * {@link CalciteSystemProperty#DEFAULT_CHARSET}. * * @throws java.nio.charset.IllegalCharsetNameException If the given charset * name is illegal * @throws java.nio.charset.UnsupportedCharsetException If no support for * the named charset is * available in this * instance of the Java * virtual machine */ public static Charset getDefaultCharset() { return DEFAULT_CHARSET; } // CHECKSTYLE: IGNORE 1 /** @deprecated Throw new {@link AssertionError} */ @Deprecated // to be removed before 2.0 public static Error newInternal() { return new AssertionError("(unknown cause)"); } // CHECKSTYLE: IGNORE 1 /** @deprecated Throw new {@link AssertionError} */ @Deprecated // to be removed before 2.0 public static Error newInternal(String s) { return new AssertionError(s); } // CHECKSTYLE: IGNORE 1 /** @deprecated Throw new {@link RuntimeException} if checked; throw raw * exception if unchecked or {@link Error} */ @Deprecated // to be removed before 2.0 public static Error newInternal(Throwable e) { return new AssertionError(e); } // CHECKSTYLE: IGNORE 1 /** @deprecated Throw new {@link AssertionError} if applicable; * or {@link RuntimeException} if e is checked; * or raw exception if e is unchecked or {@link Error}. */ @SuppressWarnings("MissingSummary") public static Error newInternal(Throwable e, String s) { return new AssertionError("Internal error: " + s, e); } /** As {@link Throwables}{@code .throwIfUnchecked(Throwable)}, * which was introduced in Guava 20, * but we don't require Guava version 20 yet. */ public static void throwIfUnchecked(Throwable throwable) { Bug.upgrade("Remove when minimum Guava version is 20"); Objects.requireNonNull(throwable, "throwable"); if (throwable instanceof RuntimeException) { throw (RuntimeException) throwable; } if (throwable instanceof Error) { throw (Error) throwable; } } /** * This method rethrows input throwable as is (if its unchecked) or * wraps it with {@link RuntimeException} and throws. * <p>The typical usage would be {@code throw throwAsRuntime(...)}, where {@code throw} statement * is needed so Java compiler knows the execution stops at that line.</p> * * @param throwable input throwable * @return the method never returns, it always throws an unchecked exception */ @API(since = "1.26", status = API.Status.EXPERIMENTAL) public static RuntimeException throwAsRuntime(Throwable throwable) { throwIfUnchecked(throwable); throw new RuntimeException(throwable); } /** * This method rethrows input throwable as is (if its unchecked) with an extra message or * wraps it with {@link RuntimeException} and throws. * <p>The typical usage would be {@code throw throwAsRuntime(...)}, where {@code throw} statement * is needed so Java compiler knows the execution stops at that line.</p> * * @param throwable input throwable * @return the method never returns, it always throws an unchecked exception */ @API(since = "1.26", status = API.Status.EXPERIMENTAL) public static RuntimeException throwAsRuntime(String message, Throwable throwable) { if (throwable instanceof RuntimeException) { throwable.addSuppressed(new Throwable(message)); throw (RuntimeException) throwable; } if (throwable instanceof Error) { throwable.addSuppressed(new Throwable(message)); throw (Error) throwable; } if (throwable instanceof IOException) { return new UncheckedIOException(message, (IOException) throwable); } throw new RuntimeException(message, throwable); } /** * Wraps an exception with {@link RuntimeException} and return it. * If the exception is already an instance of RuntimeException, * returns it directly. */ public static RuntimeException toUnchecked(Exception e) { if (e instanceof RuntimeException) { return (RuntimeException) e; } if (e instanceof IOException) { return new UncheckedIOException((IOException) e); } return new RuntimeException(e); } /** * Returns cause of the given throwable if it is non-null or the throwable itself. * @param throwable input throwable * @return cause of the given throwable if it is non-null or the throwable itself */ @API(since = "1.26", status = API.Status.EXPERIMENTAL) public static Throwable causeOrSelf(Throwable throwable) { Throwable cause = throwable.getCause(); return cause != null ? cause : throwable; } /** * Retrieves messages in a exception and writes them to a string. In the * string returned, each message will appear on a different line. * * @return a non-null string containing all messages of the exception */ @Deprecated // to be removed before 2.0 public static String getMessages(Throwable t) { StringBuilder sb = new StringBuilder(); for (Throwable curr = t; curr != null; curr = curr.getCause()) { String msg = ((curr instanceof CalciteException) || (curr instanceof SQLException)) ? curr.getMessage() : curr.toString(); if (sb.length() > 0) { sb.append("\n"); } sb.append(msg); } return sb.toString(); } /** * Returns the stack trace of a throwable. Called from native code. * * @param t Throwable * @return Stack trace * * @deprecated Use {@link com.google.common.base.Throwables#getStackTraceAsString(Throwable)} */ @Deprecated // to be removed before 2.0 public static String getStackTrace(Throwable t) { final StringWriter sw = new StringWriter(); final PrintWriter pw = new PrintWriter(sw); t.printStackTrace(pw); pw.flush(); return sw.toString(); } // CHECKSTYLE: IGNORE 1 /** @deprecated Use {@link Preconditions#checkArgument} * or {@link Objects#requireNonNull(Object)} */ @Deprecated // to be removed before 2.0 public static void pre(boolean b, String description) { if (!b) { throw new AssertionError("pre-condition failed: " + description); } } // CHECKSTYLE: IGNORE 1 /** @deprecated Use {@link Preconditions#checkArgument} * or {@link Objects#requireNonNull(Object)} */ @Deprecated // to be removed before 2.0 public static void post(boolean b, String description) { if (!b) { throw new AssertionError("post-condition failed: " + description); } } // CHECKSTYLE: IGNORE 1 /** @deprecated Use {@link Preconditions#checkArgument} */ @Deprecated // to be removed before 2.0 public static void permAssert(boolean b, String description) { if (!b) { throw new AssertionError("invariant violated: " + description); } } /** * Returns a {@link java.lang.RuntimeException} indicating that a particular * feature has not been implemented, but should be. * * <p>If every 'hole' in our functionality uses this method, it will be * easier for us to identity the holes. Throwing a * {@link java.lang.UnsupportedOperationException} isn't as good, because * sometimes we actually want to partially implement an API. * * <p>Example usage: * * <blockquote> * <pre><code>class MyVisitor extends BaseVisitor { * void accept(Foo foo) { * // Exception will identify which subclass forgot to override * // this method * throw Util.needToImplement(this); * } * }</code></pre> * </blockquote> * * @param o The object which was the target of the call, or null. Passing * the object gives crucial information if a method needs to be * overridden and a subclass forgot to do so. * @return an {@link UnsupportedOperationException}. */ public static RuntimeException needToImplement(@Nullable Object o) { String description = null; if (o != null) { description = o.getClass().toString() + ": " + o.toString(); } throw new UnsupportedOperationException(description); } /** * Flags a piece of code as needing to be cleaned up before you check in. * * <p>Introduce a call to this method to indicate that a piece of code, or a * javadoc comment, needs work before you check in. If you have an IDE which * can easily trace references, this is an easy way to maintain a to-do * list. * * <p><strong>Checked-in code must never call this method</strong>: you must * remove all calls/references to this method before you check in. * * <p>The <code>argument</code> has generic type and determines the type of * the result. This allows you to use the method inside an expression, for * example * * <blockquote> * <pre><code>int x = Util.deprecated(0, false);</code></pre> * </blockquote> * * <p>but the usual usage is to pass in a descriptive string. * * <p><b>Examples</b> * * <p><b>Example #1: Using <code>deprecated</code> to fail if a piece of * supposedly dead code is reached</b> * * <blockquote> * <pre><code>void foo(int x) { * if (x &lt; 0) { * // If this code is executed, an error will be thrown. * Util.deprecated( * "no longer need to handle negative numbers", true); * bar(x); * } else { * baz(x); * } * }</code></pre> * </blockquote> * * <p><b>Example #2: Using <code>deprecated</code> to comment out dead * code</b> * * <blockquote> * <pre>if (Util.deprecated(false, false)) { * // This code will not be executed, but an error will not be thrown. * baz(); * }</pre> * </blockquote> * * @param argument Arbitrary argument to the method. * @param fail Whether to throw an exception if this method is called * @return The value of the <code>argument</code>. * @deprecated If a piece of code calls this method, it indicates that the * code needs to be cleaned up. */ public static <T> T deprecated(T argument, boolean fail) { if (fail) { throw new UnsupportedOperationException(); } return argument; } /** * Returns whether an array of strings contains a given string among the * first <code>length</code> entries. * * @param a Array of strings * @param length Number of entries to search * @param s String to seek * @return Whether array contains the name */ public static boolean contains( String[] a, int length, String s) { for (int i = 0; i < length; i++) { if (a[i].equals(s)) { return true; } } return false; } /** * Reads all remaining contents from a {@link java.io.Reader} and returns * them as a string. * * @param reader reader to read from * @return reader contents as string */ @Deprecated // to be removed before 2.0 public static String readAllAsString(Reader reader) throws IOException { StringBuilder sb = new StringBuilder(); char[] buf = new char[4096]; for (;;) { int n = reader.read(buf); if (n == -1) { break; } sb.append(buf, 0, n); } return sb.toString(); } /** * Closes a Jar, ignoring any I/O exception. This should only be * used in finally blocks when it's necessary to avoid throwing an exception * which might mask a real exception. * * @param jar jar to close */ @Deprecated // to be removed before 2.0 public static void squelchJar(@Nullable JarFile jar) { try { if (jar != null) { jar.close(); } } catch (IOException ex) { // intentionally suppressed } } /** * Closes an InputStream, ignoring any I/O exception. This should only be * used in finally blocks when it's necessary to avoid throwing an exception * which might mask a real exception. * * @param stream stream to close */ @Deprecated // to be removed before 2.0 public static void squelchStream(@Nullable InputStream stream) { try { if (stream != null) { stream.close(); } } catch (IOException ex) { // intentionally suppressed } } /** * Closes an OutputStream, ignoring any I/O exception. This should only be * used in finally blocks when it's necessary to avoid throwing an exception * which might mask a real exception. If you want to make sure that data has * been successfully flushed, do NOT use this anywhere else; use * stream.close() instead. * * @param stream stream to close */ @Deprecated // to be removed before 2.0 public static void squelchStream(@Nullable OutputStream stream) { try { if (stream != null) { stream.close(); } } catch (IOException ex) { // intentionally suppressed } } /** * Closes a Reader, ignoring any I/O exception. This should only be used in * finally blocks when it's necessary to avoid throwing an exception which * might mask a real exception. * * @param reader reader to close */ @Deprecated // to be removed before 2.0 public static void squelchReader(@Nullable Reader reader) { try { if (reader != null) { reader.close(); } } catch (IOException ex) { // intentionally suppressed } } /** * Closes a Writer, ignoring any I/O exception. This should only be used in * finally blocks when it's necessary to avoid throwing an exception which * might mask a real exception. If you want to make sure that data has been * successfully flushed, do NOT use this anywhere else; use writer.close() * instead. * * @param writer writer to close */ @Deprecated // to be removed before 2.0 public static void squelchWriter(@Nullable Writer writer) { try { if (writer != null) { writer.close(); } } catch (IOException ex) { // intentionally suppressed } } /** * Closes a Statement, ignoring any SQL exception. This should only be used * in finally blocks when it's necessary to avoid throwing an exception * which might mask a real exception. * * @param stmt stmt to close */ @Deprecated // to be removed before 2.0 public static void squelchStmt(@Nullable Statement stmt) { try { if (stmt != null) { stmt.close(); } } catch (SQLException ex) { // intentionally suppressed } } /** * Closes a Connection, ignoring any SQL exception. This should only be used * in finally blocks when it's necessary to avoid throwing an exception * which might mask a real exception. * * @param connection connection to close */ @Deprecated // to be removed before 2.0 public static void squelchConnection(@Nullable Connection connection) { try { if (connection != null) { connection.close(); } } catch (SQLException ex) { // intentionally suppressed } } /** * Trims trailing spaces from a string. * * @param s string to be trimmed * @return trimmed string */ @Deprecated // to be removed before 2.0 public static String rtrim(String s) { int n = s.length() - 1; if (n >= 0) { if (s.charAt(n) != ' ') { return s; } while (--n >= 0) { if (s.charAt(n) != ' ') { return s.substring(0, n + 1); } } } return ""; } /** * Pads a string with spaces up to a given length. * * @param s string to be padded * @param len desired length * @return padded string * * @deprecated Use {@link Spaces#padRight(String, int)} */ @Deprecated // to be removed before 2.0 public static String rpad(String s, int len) { if (s.length() >= len) { return s; } StringBuilder sb = new StringBuilder(s); while (sb.length() < len) { sb.append(' '); } return sb.toString(); } /** * Converts an iterable to a string. */ public static <T> String toString( Iterable<T> iterable, String start, String sep, String end) { final StringBuilder buf = new StringBuilder(); buf.append(start); for (Ord<T> ord : Ord.zip(iterable)) { if (ord.i > 0) { buf.append(sep); } buf.append(ord.e); } buf.append(end); return buf.toString(); } /** Converts a list of strings to a string separated by newlines. */ public static String lines(Iterable<String> strings) { return toString(strings, "", "\n", ""); } /** Converts a string into tokens. */ public static Iterable<String> tokenize(final String s, final String delim) { return new Iterable<String>() { final StringTokenizer t = new StringTokenizer(s, delim); @Override public Iterator<String> iterator() { return new Iterator<String>() { @Override public boolean hasNext() { return t.hasMoreTokens(); } @Override public String next() { return t.nextToken(); } @Override public void remove() { throw new UnsupportedOperationException("remove"); } }; } }; } /** * Converts a Java timezone to POSIX format, so that the boost C++ library * can instantiate timezone objects. * * <p><a * href="http://www.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html">POSIX * IEEE 1003.1</a> defines a format for timezone specifications. * * <p>The boost C++ library can read these specifications and instantiate <a * href="http://www.boost.org/doc/html/date_time/local_time.html#date_time.local_time.posix_time_zone"> * posix_time_zone</a> objects from them. The purpose of this method, * therefore, is to allow the C++ code such as the fennel calculator to use * the same notion of timezone as Java code. * * <p>The format is as follows: * * <blockquote>"std offset dst [offset],start[/time],end[/time]" * </blockquote> * * <p>where: * * <ul> * <li>'std' specifies the abbrev of the time zone. * <li>'offset' is the offset from UTC, and takes the form * <code>[+|-]hh[:mm[:ss]] {h=0-23, m/s=0-59}</code></li> * <li>'dst' specifies the abbrev of the time zone during daylight savings * time * <li>The second offset is how many hours changed during DST. Default=1 * <li>'start' and 'end' are the dates when DST goes into (and out of) * effect.<br> * <br> * They can each be one of three forms: * * <ol> * <li>Mm.w.d {month=1-12, week=1-5 (5 is always last), day=0-6} * <li>Jn {n=1-365 Feb29 is never counted} * <li>n {n=0-365 Feb29 is counted in leap years} * </ol> * </li> * * <li>'time' has the same format as 'offset', and defaults to 02:00:00.</li> * </ul> * * <p>For example:</p> * * <ul> * <li>"PST-8PDT01:00:00,M4.1.0/02:00:00,M10.1.0/02:00:00"; or more tersely * <li>"PST-8PDT,M4.1.0,M10.1.0" * </ul> * * <p>(Real format strings do not contain spaces; they are in the above * template only for readability.) * * <p>Boost apparently diverges from the POSIX standard in how it treats the * sign of timezone offsets. The POSIX standard states '<i>If preceded by a * '-', the timezone shall be east of the Prime Meridian; otherwise, it * shall be west</i>', yet boost requires the opposite. For instance, PST * has offset '-8' above. This method generates timezone strings consistent * with boost's expectations. * * @param tz Timezone * @param verbose Whether to include fields which can be omitted because * they have their default values * @return Timezone in POSIX format (offset sign reversed, per boost's * idiosyncracies) */ public static String toPosix(TimeZone tz, boolean verbose) { StringBuilder buf = new StringBuilder(); buf.append(tz.getDisplayName(false, TimeZone.SHORT, Locale.ROOT)); appendPosixTime(buf, tz.getRawOffset()); final int dstSavings = tz.getDSTSavings(); if (dstSavings == 0) { return buf.toString(); } buf.append(tz.getDisplayName(true, TimeZone.SHORT, Locale.ROOT)); if (verbose || (dstSavings != 3600000)) { // POSIX allows us to omit DST offset if it is 1:00:00 appendPosixTime(buf, dstSavings); } String patternString = ".*," + "startMode=([0-9]*)," + "startMonth=([0-9]*)," + "startDay=([-0-9]*)," + "startDayOfWeek=([0-9]*)," + "startTime=([0-9]*)," + "startTimeMode=([0-9]*)," + "endMode=([0-9]*)," + "endMonth=([0-9]*)," + "endDay=([-0-9]*)," + "endDayOfWeek=([0-9]*)," + "endTime=([0-9]*)," + "endTimeMode=([0-9]*).*"; Pattern pattern = Pattern.compile(patternString); String tzString = tz.toString(); Matcher matcher = pattern.matcher(tzString); if (!matcher.matches()) { throw new AssertionError("tz.toString not of expected format: " + tzString); } int j = 0; int startMode = groupAsInt(matcher, ++j); int startMonth = groupAsInt(matcher, ++j); int startDay = groupAsInt(matcher, ++j); int startDayOfWeek = groupAsInt(matcher, ++j); int startTime = groupAsInt(matcher, ++j); int startTimeMode = groupAsInt(matcher, ++j); int endMode = groupAsInt(matcher, ++j); int endMonth = groupAsInt(matcher, ++j); int endDay = groupAsInt(matcher, ++j); int endDayOfWeek = groupAsInt(matcher, ++j); int endTime = groupAsInt(matcher, ++j); int endTimeMode = groupAsInt(matcher, ++j); appendPosixDaylightTransition( tz, buf, startMode, startDay, startMonth, startDayOfWeek, startTime, startTimeMode, verbose, false); appendPosixDaylightTransition( tz, buf, endMode, endDay, endMonth, endDayOfWeek, endTime, endTimeMode, verbose, true); return buf.toString(); } private static int groupAsInt(Matcher matcher, int index) { String value = Objects.requireNonNull( matcher.group(index), () -> "no group for index " + index + ", matcher " + matcher); return Integer.parseInt(value); } /** * Writes a daylight savings time transition to a POSIX timezone * description. * * @param tz Timezone * @param buf Buffer to append to * @param mode Transition mode * @param day Day of transition * @param month Month of transition * @param dayOfWeek Day of week of transition * @param time Time of transition in millis * @param timeMode Mode of time transition * @param verbose Verbose * @param isEnd Whether this transition is leaving DST */ private static void appendPosixDaylightTransition( TimeZone tz, StringBuilder buf, int mode, int day, int month, int dayOfWeek, int time, int timeMode, boolean verbose, boolean isEnd) { buf.append(','); int week = day; switch (mode) { case 1: // SimpleTimeZone.DOM_MODE throw Util.needToImplement(0); case 3: // SimpleTimeZone.DOW_GE_DOM_MODE // If the day is 1, 8, 15, 22, we can translate this to case 2. switch (day) { case 1: week = 1; // 1st week of month break; case 8: week = 2; // 2nd week of month break; case 15: week = 3; // 3rd week of month break; case 22: week = 4; // 4th week of month break; default: throw new AssertionError( "POSIX timezone format cannot represent " + tz); } // fall through case 2: // SimpleTimeZone.DOW_IN_MONTH_MODE buf.append('M'); buf.append(month + 1); // 1 <= m <= 12 buf.append('.'); if (week == -1) { // java represents 'last week' differently from POSIX week = 5; } buf.append(week); // 1 <= n <= 5, 5 means 'last' buf.append('.'); buf.append(dayOfWeek - 1); // 0 <= d <= 6, 0=Sunday break; case 4: // SimpleTimeZone.DOW_LE_DOM_MODE throw Util.needToImplement(0); default: throw new AssertionError("unexpected value: " + mode); } switch (timeMode) { case 0: // SimpleTimeZone.WALL_TIME break; case 1: // SimpleTimeZone.STANDARD_TIME, e.g. Australia/Sydney if (isEnd) { time += tz.getDSTSavings(); } break; case 2: // SimpleTimeZone.UTC_TIME, e.g. Europe/Paris time += tz.getRawOffset(); if (isEnd) { time += tz.getDSTSavings(); } break; default: break; } if (verbose || (time != 7200000)) { // POSIX allows us to omit the time if it is 2am (the default) buf.append('/'); appendPosixTime(buf, time); } } /** * Given a time expressed in milliseconds, append the time formatted as * "hh[:mm[:ss]]". * * @param buf Buffer to append to * @param millis Milliseconds */ private static void appendPosixTime(StringBuilder buf, int millis) { if (millis < 0) { buf.append('-'); millis = -millis; } int hours = millis / 3600000; buf.append(hours); millis -= hours * 3600000; if (millis == 0) { return; } buf.append(':'); int minutes = millis / 60000; if (minutes < 10) { buf.append('0'); } buf.append(minutes); millis -= minutes * 60000; if (millis == 0) { return; } buf.append(':'); int seconds = millis / 1000; if (seconds < 10) { buf.append('0'); } buf.append(seconds); } /** * Parses a locale string. * * <p>The inverse operation of {@link java.util.Locale#toString()}. * * @param localeString Locale string, e.g. "en" or "en_US" * @return Java locale object */ public static Locale parseLocale(String localeString) { String[] strings = localeString.split("_"); switch (strings.length) { case 1: return new Locale(strings[0]); case 2: return new Locale(strings[0], strings[1]); case 3: return new Locale(strings[0], strings[1], strings[2]); default: throw new AssertionError("bad locale string '" + localeString + "'"); } } /** * Converts a list whose members are automatically down-cast to a given * type. * * <p>If a member of the backing list is not an instanceof <code>E</code>, * the accessing method (such as {@link List#get}) will throw a * {@link ClassCastException}. * * <p>All modifications are automatically written to the backing list. Not * synchronized. * * @param list Backing list. * @param clazz Class to cast to. * @return A list whose members are of the desired type. */ public static <E> List<E> cast(List<? super E> list, Class<E> clazz) { return new CastingList<>(list, clazz); } /** * Converts a iterator whose members are automatically down-cast to a given * type. * * <p>If a member of the backing iterator is not an instance of {@code E}, * {@link Iterator#next()}) will throw a * {@link ClassCastException}. * * <p>All modifications are automatically written to the backing iterator. * Not synchronized. * * <p>If the backing iterator has not-nullable elements, * the returned iterator has not-nullable elements. * * @param iter Backing iterator * @param clazz Class to cast to * @return An iterator whose members are of the desired type. */ public static <E extends @PolyNull Object> Iterator<E> cast( final Iterator<? extends @PolyNull Object> iter, final Class<E> clazz) { return transform(iter, x -> clazz.cast(castNonNull(x))); } /** * Converts an {@link Iterable} whose members are automatically down-cast to * a given type. * * <p>All modifications are automatically written to the backing iterator. * Not synchronized. * * @param iterable Backing iterable * @param clazz Class to cast to * @return An iterable whose members are of the desired type. */ public static <E> Iterable<E> cast( final Iterable<? super E> iterable, final Class<E> clazz) { // FluentIterable provides toString return new FluentIterable<E>() { @Override public Iterator<E> iterator() { return Util.cast(iterable.iterator(), clazz); } }; } /** * Makes a collection of untyped elements appear as a list of strictly typed * elements, by filtering out those which are not of the correct type. * * <p>The returned object is an {@link Iterable}, * which makes it ideal for use with the 'foreach' construct. For example, * * <blockquote><code>List&lt;Number&gt; numbers = Arrays.asList(1, 2, 3.14, * 4, null, 6E23);<br> * for (int myInt : filter(numbers, Integer.class)) {<br> * &nbsp;&nbsp;&nbsp;&nbsp;print(i);<br> * }</code></blockquote> * * <p>will print 1, 2, 4. * * @param iterable Iterable * @param includeFilter Class whose instances to include */ public static <E> Iterable<E> filter( final Iterable<?> iterable, final Class<E> includeFilter) { // FluentIterable provides toString return new FluentIterable<E>() { @Override public Iterator<E> iterator() { return new Filterator<>(iterable.iterator(), includeFilter); } }; } public static <E> Collection<E> filter( final Collection<?> collection, final Class<E> includeFilter) { return new AbstractCollection<E>() { private int size = -1; @Override public Iterator<E> iterator() { return new Filterator<>(collection.iterator(), includeFilter); } @Override public int size() { if (size == -1) { // Compute size. This is expensive, but the value // collection.size() is not correct since we're // filtering values. (Some java.util algorithms // call next() on the result of iterator() size() times.) int s = 0; for (@SuppressWarnings("unused") E e : this) { s++; } size = s; } return size; } }; } /** * Returns a subset of a list containing only elements of a given type. * * <p>Modifications to the list are NOT written back to the source list. * * @param list List of objects * @param includeFilter Class to filter for * @return List of objects of given class (or a subtype) */ public static <E> List<E> filter( final List<?> list, final Class<E> includeFilter) { List<E> result = new ArrayList<>(); for (Object o : list) { if (includeFilter.isInstance(o)) { result.add(includeFilter.cast(o)); } } return result; } /** * Converts a {@link Properties} object to a <code>{@link Map}&lt;String, * String&gt;</code>. * * <p>This is necessary because {@link Properties} is a dinosaur class. It * ought to extend <code>Map&lt;String,String&gt;</code>, but instead * extends <code>{@link Hashtable}&lt;Object,Object&gt;</code>. * * <p>Typical usage, to iterate over a {@link Properties}: * * <blockquote> * <code> * Properties properties;<br> * for (Map.Entry&lt;String, String&gt; entry = * Util.toMap(properties).entrySet()) {<br> * println("key=" + entry.getKey() + ", value=" + entry.getValue());<br> * } * </code> * </blockquote> */ public static Map<String, String> toMap( final Properties properties) { //noinspection unchecked return (Map) properties; } /** * Returns a hashmap with given contents. * * <p>Use this method in initializers. Type parameters are inferred from * context, and the contents are initialized declaratively. For example, * * <blockquote><code>Map&lt;String, Integer&gt; population =<br> * &nbsp;&nbsp;Olap4jUtil.mapOf(<br> * &nbsp;&nbsp;&nbsp;&nbsp;"UK", 65000000,<br> * &nbsp;&nbsp;&nbsp;&nbsp;"USA", 300000000);</code></blockquote> * * @param key First key * @param value First value * @param keyValues Second and sequent key/value pairs * @param <K> Key type * @param <V> Value type * @return Map with given contents */ public static <K, V> Map<K, V> mapOf(K key, V value, Object... keyValues) { final Map<K, V> map = new LinkedHashMap<>(1 + keyValues.length); map.put(key, value); for (int i = 0; i < keyValues.length;) { //noinspection unchecked map.put((K) keyValues[i++], (V) keyValues[i++]); } return map; } /** * Returns an exception indicating that we didn't expect to find this * enumeration here. * * @param value Enumeration value which was not expected * @return an error, to be thrown */ public static <E extends Enum<E>> Error unexpected(E value) { return new AssertionError("Was not expecting value '" + value + "' for enumeration '" + value.getDeclaringClass().getName() + "' in this context"); } /** * Creates a map of the values of an enumeration by name. * * @param clazz Enumeration class * @return map of values */ public static <T extends Enum<T>> Map<String, T> enumConstants( Class<T> clazz) { final T[] ts = clazz.getEnumConstants(); if (ts == null) { throw new AssertionError("not an enum type"); } ImmutableMap.Builder<String, T> builder = ImmutableMap.builder(); for (T t : ts) { builder.put(t.name(), t); } return builder.build(); } /** * Returns the value of an enumeration with a particular name. * * <p>Similar to {@link Enum#valueOf(Class, String)}, but returns {@code * null} rather than throwing {@link IllegalArgumentException}. * * @param clazz Enum class * @param name Name of enum constant * @param <T> Enum class type * @return Enum constant or null */ public static synchronized <T extends Enum<T>> @Nullable T enumVal( Class<T> clazz, String name) { return clazz.cast(ENUM_CONSTANTS.getUnchecked(clazz).get(name)); } /** * Returns the value of an enumeration with a particular or default value if * not found. * * @param default_ Default value (not null) * @param name Name of enum constant * @param <T> Enum class type * @return Enum constant, never null */ public static synchronized <T extends Enum<T>> T enumVal(T default_, @Nullable String name) { final Class<T> clazz = default_.getDeclaringClass(); final T t = clazz.cast(ENUM_CONSTANTS.getUnchecked(clazz).get(name)); if (t == null) { return default_; } return t; } /** * Creates a list that returns every {@code n}th element of a list, * starting at element {@code k}. * * <p>It is OK if the list is empty or its size is not a multiple of * {@code n}.</p> * * <p>For instance, {@code quotientList(list, 2, 0)} returns the even * elements of a list, and {@code quotientList(list, 2, 1)} returns the odd * elements. Those lists are the same length only if list has even size.</p> */ public static <E> List<E> quotientList( final List<E> list, final int n, final int k) { if (n <= 0 || k < 0 || k >= n) { throw new IllegalArgumentException( "n must be positive; k must be between 0 and n - 1"); } final int size = (list.size() + n - k - 1) / n; return new AbstractList<E>() { @Override public E get(int index) { return list.get(index * n + k); } @Override public int size() { return size; } }; } /** Given a list with N elements * [e<sub>0</sub>, e<sub>1</sub>, ..., e<sub>N-1</sub>] * (where N is even), returns a list of the N / 2 elements * [ (e<sub>0</sub>, e<sub>1</sub>), * (e<sub>2</sub>, e<sub>3</sub>), ... ]. */ public static <E> List<Pair<E, E>> pairs(final List<E> list) { //noinspection unchecked return Pair.zip(quotientList(list, 2, 0), quotientList(list, 2, 1)); } /** Returns the first value if it is not null, * otherwise the second value. * * <p>The result may be null only if the second argument is not null. * * <p>Equivalent to the Elvis operator ({@code ?:}) of languages such as * Groovy or PHP. */ public static <T extends Object> @PolyNull T first(@Nullable T v0, @PolyNull T v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Double} value, * using a given default value if it is null. */ public static double first(@Nullable Double v0, double v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Float} value, * using a given default value if it is null. */ public static float first(@Nullable Float v0, float v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Integer} value, * using a given default value if it is null. */ public static int first(@Nullable Integer v0, int v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Long} value, * using a given default value if it is null. */ public static long first(@Nullable Long v0, long v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Boolean} value, * using a given default value if it is null. */ public static boolean first(@Nullable Boolean v0, boolean v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Short} value, * using a given default value if it is null. */ public static short first(@Nullable Short v0, short v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Character} value, * using a given default value if it is null. */ public static char first(@Nullable Character v0, char v1) { return v0 != null ? v0 : v1; } /** Unboxes a {@link Byte} value, * using a given default value if it is null. */ public static byte first(@Nullable Byte v0, byte v1) { return v0 != null ? v0 : v1; } public static <T> Iterable<T> orEmpty(@Nullable Iterable<T> v0) { return v0 != null ? v0 : ImmutableList.of(); } /** Returns the first element of a list. * * @throws java.lang.IndexOutOfBoundsException if the list is empty */ public <E> E first(List<E> list) { return list.get(0); } /** Returns the last element of a list. * * @throws java.lang.IndexOutOfBoundsException if the list is empty */ public static <E> E last(List<E> list) { return list.get(list.size() - 1); } /** Returns the first {@code n} elements of a list. */ public static <E> List<E> first(List<E> list, int n) { return list.subList(0, n); } /** Returns every element of a list but its last element. */ public static <E> List<E> skipLast(List<E> list) { return skipLast(list, 1); } /** Returns every element of a list but its last {@code n} elements. */ public static <E> List<E> skipLast(List<E> list, int n) { return list.subList(0, list.size() - n); } /** Returns the last {@code n} elements of a list. */ public static <E> List<E> last(List<E> list, int n) { return list.subList(list.size() - n, list.size()); } /** Returns all but the first element of a list. */ public static <E> List<E> skip(List<E> list) { return skip(list, 1); } /** Returns all but the first {@code n} elements of a list. */ public static <E> List<E> skip(List<E> list, int fromIndex) { return fromIndex == 0 ? list : list.subList(fromIndex, list.size()); } public static List<Integer> range(final int end) { return new AbstractList<Integer>() { @Override public int size() { return end; } @Override public Integer get(int index) { return index; } }; } public static List<Integer> range(final int start, final int end) { return new AbstractList<Integer>() { @Override public int size() { return end - start; } @Override public Integer get(int index) { return start + index; } }; } /** * Returns whether the elements of {@code list} are distinct. */ public static <E> boolean isDistinct(List<E> list) { return firstDuplicate(list) < 0; } /** * Returns the ordinal of the first element in the list which is equal to a * previous element in the list. * * <p>For example, * <code>firstDuplicate(Arrays.asList("a", "b", "c", "b", "a"))</code> * returns 3, the ordinal of the 2nd "b". * * @param list List * @return Ordinal of first duplicate, or -1 if not found */ public static <E> int firstDuplicate(List<E> list) { final int size = list.size(); if (size < 2) { // Lists of size 0 and 1 are always distinct. return -1; } if (size < QUICK_DISTINCT) { // For smaller lists, avoid the overhead of creating a set. Threshold // determined empirically using UtilTest.testIsDistinctBenchmark. for (int i = 1; i < size; i++) { E e = list.get(i); for (int j = i - 1; j >= 0; j--) { E e1 = list.get(j); if (Objects.equals(e, e1)) { return i; } } } return -1; } // we use HashMap here, because it is more efficient than HashSet. final Map<E, Object> set = new HashMap<>(size); for (E e : list) { if (set.put(e, "") != null) { return set.size(); } } return -1; } /** Converts a list into a list with unique elements. * * <p>The order is preserved; the second and subsequent occurrences are * removed. * * <p>If the list is already unique it is returned unchanged. */ public static <E> List<E> distinctList(List<E> list) { // If the list is small, check for duplicates using pairwise comparison. if (list.size() < QUICK_DISTINCT && isDistinct(list)) { return list; } // Lists that have all the same element are common. Avoiding creating a set. if (allSameElement(list)) { return ImmutableList.of(list.get(0)); } return ImmutableList.copyOf(new LinkedHashSet<>(list)); } /** Returns whether all of the elements of a list are equal. * The list is assumed to be non-empty. */ private static <E> boolean allSameElement(List<E> list) { final Iterator<E> iterator = list.iterator(); final E first = iterator.next(); while (iterator.hasNext()) { if (!Objects.equals(first, iterator.next())) { return false; } } return true; } /** Converts an iterable into a list with unique elements. * * <p>The order is preserved; the second and subsequent occurrences are * removed. * * <p>If {@code iterable} is a unique list it is returned unchanged. */ public static <E> List<E> distinctList(Iterable<E> keys) { if (keys instanceof Set) { return ImmutableList.copyOf(keys); } if (keys instanceof List) { @SuppressWarnings("unchecked") final List<E> list = (List) keys; if (isDistinct(list)) { return list; } } return ImmutableList.copyOf(Sets.newLinkedHashSet(keys)); } /** Returns whether two collections have any elements in common. */ public static <E> boolean intersects(Collection<E> c0, Collection<E> c1) { for (E e : c1) { if (c0.contains(e)) { return true; } } return false; } /** Looks for a string within a list of strings, using a given * case-sensitivity policy, and returns the position at which the first match * is found, or -1 if there are no matches. */ public static int findMatch(List<String> strings, String seek, boolean caseSensitive) { if (caseSensitive) { return strings.indexOf(seek); } for (int i = 0; i < strings.size(); i++) { String s = strings.get(i); if (s.equalsIgnoreCase(seek)) { return i; } } return -1; } /** Returns whether a name matches another according to a given * case-sensitivity policy. */ public static boolean matches(boolean caseSensitive, String s0, String s1) { return caseSensitive ? s1.equals(s0) : s1.equalsIgnoreCase(s0); } /** Returns whether one list is a prefix of another. */ public static <E> boolean startsWith(List<E> list0, List<E> list1) { if (list0 == list1) { return true; } final int size = list1.size(); if (list0.size() < size) { return false; } for (int i = 0; i < size; i++) { if (!Objects.equals(list0.get(i), list1.get(i))) { return false; } } return true; } /** Converts ["ab", "c"] to "ab"."c". */ public static String listToString(List<String> list) { final StringBuilder b = new StringBuilder(); for (String s : list) { if (b.length() > 0) { b.append("."); } b.append('"'); b.append(s.replace("\"", "\"\"")); b.append('"'); } return b.toString(); } public static List<String> stringToList(String s) { if (s.isEmpty()) { return ImmutableList.of(); } final ImmutableList.Builder<String> builder = ImmutableList.builder(); final StringBuilder b = new StringBuilder(); int i = 0; for (;;) { char c = s.charAt(i); if (c != '"') { throw new IllegalArgumentException(); } for (;;) { c = s.charAt(++i); if (c == '"') { if (i == s.length() - 1) { break; } ++i; c = s.charAt(i); if (c == '.') { break; } if (c != '"') { throw new IllegalArgumentException(); } } b.append(c); } builder.add(b.toString()); b.setLength(0); if (++i >= s.length()) { break; } } return builder.build(); } /** Converts a number into human-readable form, with 3 digits and a "K", "M" * or "G" multiplier for thousands, millions or billions. * * <p>Examples: -2, 0, 1, 999, 1.00K, 1.99K, 3.45M, 4.56B.</p> */ public static String human(double d) { if (d == 0d) { return "0"; } if (d < 0d) { return "-" + human(-d); } final int digitCount = (int) Math.floor(Math.log10(d)); switch (digitCount) { case 0: case 1: case 2: return Integer.toString((int) d); case 3: case 4: case 5: return digits3(Math.round(d / 10D), digitCount % 3) + "K"; case 6: case 7: case 8: return digits3(Math.round(d / 10000D), digitCount % 3) + "M"; case 9: case 10: case 11: return digits3(Math.round(d / 10000000D), digitCount % 3) + "G"; default: return Double.toString(d); } } private static String digits3(long x, int z) { final String s = Long.toString(x); switch (z) { case 0: return s.charAt(0) + "." + s.substring(1, 3); case 1: return s.substring(0, 2) + "." + s.substring(2, 3); default: return s.substring(0, 3); } } /** Returns a map that is a view onto a collection of values, using the * provided function to convert a value to a key. * * <p>Unlike * {@link com.google.common.collect.Maps#uniqueIndex(Iterable, com.google.common.base.Function)}, * returns a view whose contents change as the collection of values changes. * * @param values Collection of values * @param function Function to map value to key * @param <K> Key type * @param <V> Value type * @return Map that is a view onto the values */ public static <K, V> Map<K, V> asIndexMapJ( final Collection<V> values, final Function<V, K> function) { final Collection<Map.Entry<K, V>> entries = Collections2.transform(values, v -> Pair.of(function.apply(v), v)); final Set<Map.Entry<K, V>> entrySet = new AbstractSet<Map.Entry<K, V>>() { @Override public Iterator<Map.Entry<K, V>> iterator() { return entries.iterator(); } @Override public int size() { return entries.size(); } }; return new AbstractMap<K, V>() { @SuppressWarnings("override.return.invalid") @Override public Set<Entry<K, V>> entrySet() { return entrySet; } }; } @SuppressWarnings({"Guava", "UnnecessaryMethodReference"}) @Deprecated public static <K, V> Map<K, V> asIndexMap( final Collection<V> values, final com.google.common.base.Function<V, K> function) { return asIndexMapJ(values, function::apply); } /** * Prints the given code with line numbering. */ public static void debugCode(PrintStream out, String code) { out.println(); StringReader sr = new StringReader(code); BufferedReader br = new BufferedReader(sr); try { String line; for (int i = 1; (line = br.readLine()) != null; i++) { out.print("/*"); String number = Integer.toString(i); if (number.length() < 4) { Spaces.append(out, 4 - number.length()); } out.print(number); out.print(" */ "); out.println(line); } } catch (IOException e) { // not possible } } /** Returns a copy of a list of lists, making the component lists immutable if * they are not already. */ public static <E> List<List<E>> immutableCopy( Iterable<? extends Iterable<E>> lists) { int n = 0; for (Iterable<E> list : lists) { if (!(list instanceof ImmutableList)) { ++n; } } if (n == 0) { // Lists are already immutable. Furthermore, if the outer list is // immutable we will just return "lists" unchanged. //noinspection unchecked return ImmutableList.copyOf((Iterable<List<E>>) lists); } final ImmutableList.Builder<List<E>> builder = ImmutableList.builder(); for (Iterable<E> list : lists) { builder.add(ImmutableList.copyOf(list)); } return builder.build(); } /** Creates a {@link PrintWriter} to a given output stream using UTF-8 * character set. * * <p>Does not use the default character set. */ public static PrintWriter printWriter(OutputStream out) { return new PrintWriter( new BufferedWriter( new OutputStreamWriter(out, StandardCharsets.UTF_8))); } /** Creates a {@link PrintWriter} to a given file using UTF-8 * character set. * * <p>Does not use the default character set. */ public static PrintWriter printWriter(File file) throws FileNotFoundException { return printWriter(new FileOutputStream(file)); } /** Creates a {@link BufferedReader} to a given input stream using UTF-8 * character set. * * <p>Does not use the default character set. */ public static BufferedReader reader(InputStream in) { return new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8)); } /** Creates a {@link BufferedReader} to read a given file using UTF-8 * character set. * * <p>Does not use the default character set. */ public static BufferedReader reader(File file) throws FileNotFoundException { return reader(new FileInputStream(file)); } /** Given an {@link Appendable}, performs an action that requires a * {@link StringBuilder}. Casts the Appendable if possible. */ public static void asStringBuilder(Appendable appendable, Consumer<StringBuilder> consumer) { if (appendable instanceof StringBuilder) { consumer.accept((StringBuilder) appendable); } else { try { final StringBuilder sb = new StringBuilder(); consumer.accept(sb); appendable.append(sb); } catch (IOException e) { throw new RuntimeException(e); } } } /** Creates a {@link Calendar} in the UTC time zone and root locale. * Does not use the time zone or locale. */ public static Calendar calendar() { return Calendar.getInstance(DateTimeUtils.UTC_ZONE, Locale.ROOT); } /** Creates a {@link Calendar} in the UTC time zone and root locale * with a given time. */ public static Calendar calendar(long millis) { Calendar calendar = calendar(); calendar.setTimeInMillis(millis); return calendar; } /** * Returns a {@code Collector} that accumulates the input elements into a * Guava {@link ImmutableList} via a {@link ImmutableList.Builder}. * * <p>It will be obsolete when we move to {@link Bug#upgrade Guava 28.0-jre}. * Guava 21.0 introduced {@code ImmutableList.toImmutableList()}, but it had * a {@link com.google.common.annotations.Beta} tag until 28.0-jre. * * <p>In {@link Bug#upgrade Guava 21.0}, change this method to call * {@code ImmutableList.toImmutableList()}, ignoring the {@code @Beta} tag. * * @param <T> Type of the input elements * * @return a {@code Collector} that collects all the input elements into an * {@link ImmutableList}, in encounter order */ public static <T> Collector<T, ImmutableList.Builder<T>, ImmutableList<T>> toImmutableList() { return Collector.of(ImmutableList::builder, ImmutableList.Builder::add, Util::combine, ImmutableList.Builder::build); } /** Combines a second immutable list builder into a first. */ public static <E> ImmutableList.Builder<E> combine( ImmutableList.Builder<E> b0, ImmutableList.Builder<E> b1) { b0.addAll(b1.build()); return b0; } /** Combines a second array list into a first. */ public static <E> ArrayList<E> combine(ArrayList<E> list0, ArrayList<E> list1) { list0.addAll(list1); return list0; } /** Returns an operator that applies {@code op1} and then {@code op2}. * * <p>As {@link Function#andThen(Function)} but for {@link UnaryOperator}. */ public static <X> UnaryOperator<X> andThen(UnaryOperator<X> op1, UnaryOperator<X> op2) { return op1.andThen(op2)::apply; } /** Transforms a list, applying a function to each element. */ public static <F, T> List<T> transform(List<? extends F> list, java.util.function.Function<? super F, ? extends T> function) { if (list instanceof RandomAccess) { return new RandomAccessTransformingList<>(list, function); } else { return new TransformingList<>(list, function); } } /** Transforms a list, applying a function to each element, also passing in * the element's index in the list. */ public static <F, T> List<T> transformIndexed(List<? extends F> list, BiFunction<? super F, Integer, ? extends T> function) { if (list instanceof RandomAccess) { return new RandomAccessTransformingIndexedList<>(list, function); } else { return new TransformingIndexedList<>(list, function); } } /** Transforms an iterable, applying a function to each element. */ @API(since = "1.27", status = API.Status.EXPERIMENTAL) public static <F, T> Iterable<T> transform(Iterable<? extends F> iterable, java.util.function.Function<? super F, ? extends T> function) { // FluentIterable provides toString return new FluentIterable<T>() { @Override public Iterator<T> iterator() { return Util.transform(iterable.iterator(), function); } }; } /** Transforms an iterator. */ @API(since = "1.27", status = API.Status.EXPERIMENTAL) public static <F, T> Iterator<T> transform(Iterator<? extends F> iterator, java.util.function.Function<? super F, ? extends T> function) { return new TransformingIterator<>(iterator, function); } /** Filters an iterable. */ @API(since = "1.27", status = API.Status.EXPERIMENTAL) public static <E> Iterable<E> filter(Iterable<? extends E> iterable, Predicate<? super E> predicate) { // FluentIterable provides toString return new FluentIterable<E>() { @Override public Iterator<E> iterator() { return Util.filter(iterable.iterator(), predicate); } }; } /** Filters an iterator. */ @API(since = "1.27", status = API.Status.EXPERIMENTAL) public static <E> Iterator<E> filter(Iterator<? extends E> iterator, Predicate<? super E> predicate) { return new FilteringIterator<>(iterator, predicate); } /** Returns a list with any elements for which the predicate is true moved to * the head of the list. The algorithm does not modify the list, is stable, * and is idempotent. */ public static <E> List<E> moveToHead(List<? extends E> terms, Predicate<? super E> predicate) { if (alreadyAtFront(terms, predicate)) { //noinspection unchecked return (List<E>) terms; } final List<E> newTerms = new ArrayList<>(terms.size()); for (E term : terms) { if (predicate.test(term)) { newTerms.add(term); } } for (E term : terms) { if (!predicate.test(term)) { newTerms.add(term); } } return newTerms; } /** Returns whether of the elements of a list for which predicate is true * occur before all elements where the predicate is false. (Returns true in * corner cases such as empty list, all true, or all false. */ private static <E> boolean alreadyAtFront(List<? extends E> list, Predicate<? super E> predicate) { boolean prev = true; for (E e : list) { final boolean pass = predicate.test(e); if (pass && !prev) { return false; } prev = pass; } return true; } /** Returns a view of a list, picking the elements of a list with the given * set of ordinals. */ public static <E> List<E> select(List<E> list, List<Integer> ordinals) { return new AbstractList<E>() { @Override public int size() { return ordinals.size(); } @Override public E get(int index) { return list.get(ordinals.get(index)); } }; } /** Returns a map which ignores any write operation. */ public static <K, V> Map<K, V> blackholeMap() { return BlackholeMap.of(); } //~ Inner Classes ---------------------------------------------------------- /** * Exception used to interrupt a tree walk of any kind. */ public static class FoundOne extends ControlFlowException { private final @Nullable Object node; /** Singleton instance. Can be used if you don't care about node. */ @SuppressWarnings("ThrowableInstanceNeverThrown") public static final FoundOne NULL = new FoundOne(null); public FoundOne(@Nullable Object node) { this.node = node; } @Pure public @Nullable Object getNode() { return node; } } /** * Visitor which looks for an OVER clause inside a tree of * {@link SqlNode} objects. */ public static class OverFinder extends SqlBasicVisitor<Void> { public static final OverFinder INSTANCE = new Util.OverFinder(); @Override public Void visit(SqlCall call) { if (call.getKind() == SqlKind.OVER) { throw FoundOne.NULL; } return super.visit(call); } } /** List that returns the same number of elements as a backing list, * applying a transformation function to each one. * * @param <F> Element type of backing list * @param <T> Element type of this list */ private static class TransformingList<F, T> extends AbstractList<T> { private final java.util.function.Function<? super F, ? extends T> function; private final List<? extends F> list; TransformingList(List<? extends F> list, java.util.function.Function<? super F, ? extends T> function) { this.function = function; this.list = list; } @Override public T get(int i) { return function.apply(list.get(i)); } @Override public int size() { return list.size(); } @Override public Iterator<T> iterator() { return listIterator(); } } /** Extension to {@link TransformingList} that implements * {@link RandomAccess}. * * @param <F> Element type of backing list * @param <T> Element type of this list */ private static class RandomAccessTransformingList<F, T> extends TransformingList<F, T> implements RandomAccess { RandomAccessTransformingList(List<? extends F> list, java.util.function.Function<? super F, ? extends T> function) { super(list, function); } } /** List that returns the same number of elements as a backing list, * applying a transformation function to each one. * * @param <F> Element type of backing list * @param <T> Element type of this list */ private static class TransformingIndexedList<F, T> extends AbstractList<T> { private final BiFunction<? super F, Integer, ? extends T> function; private final List<? extends F> list; TransformingIndexedList(List<? extends F> list, BiFunction<? super F, Integer, ? extends T> function) { this.function = function; this.list = list; } @Override public T get(int i) { return function.apply(list.get(i), i); } @Override public int size() { return list.size(); } @Override public Iterator<T> iterator() { return listIterator(); } } /** Extension to {@link TransformingIndexedList} that implements * {@link RandomAccess}. * * @param <F> Element type of backing list * @param <T> Element type of this list */ private static class RandomAccessTransformingIndexedList<F, T> extends TransformingIndexedList<F, T> implements RandomAccess { RandomAccessTransformingIndexedList(List<? extends F> list, BiFunction<? super F, Integer, ? extends T> function) { super(list, function); } } /** Iterator that applies a predicate to each element. * * @param <T> Element type */ private static class FilteringIterator<T> implements Iterator<T> { private static final Object DUMMY = new Object(); final Iterator<? extends T> iterator; private final Predicate<? super T> predicate; T current; FilteringIterator(Iterator<? extends T> iterator, Predicate<? super T> predicate) { this.iterator = iterator; this.predicate = predicate; @SuppressWarnings("method.invocation.invalid") T current = moveNext(); this.current = current; } @Override public boolean hasNext() { return current != DUMMY; } @Override public T next() { final T t = this.current; current = moveNext(); return t; } protected T moveNext() { while (iterator.hasNext()) { T t = iterator.next(); if (predicate.test(t)) { return t; } } return (T) DUMMY; } } /** * An {@link java.util.Iterator} that transforms its elements on-the-fly. * * @param <F> The element type of the delegate iterator * @param <T> The element type of this iterator */ private static class TransformingIterator<F, T> implements Iterator<T> { private final Iterator<? extends F> delegate; private final java.util.function.Function<? super F, ? extends T> function; TransformingIterator(Iterator<? extends F> delegate, java.util.function.Function<? super F, ? extends T> function) { this.delegate = delegate; this.function = function; } @Override public boolean hasNext() { return delegate.hasNext(); } @Override public final T next() { return function.apply(delegate.next()); } @Override public void remove() { delegate.remove(); } } }
jcamachor/calcite
core/src/main/java/org/apache/calcite/util/Util.java
Java
apache-2.0
89,578
* * * title: <%= hoc_s(:title_stats) %> layout: wide nav: promote_nav * * * <%= view :signup_button %> # Blurbs and Useful Stats ## Use this short blurb in newsletters ### Bring computer science to your school. Start with an Hour of Code Bilgisayarlar her yerde, ancak artık 10 yıl öncesine kıyasla daha az okul bilgisayar bilimini öğretiyor. İyi haber şu ki, biz bu durumu değiştirmek için yola çıktık. If you heard about the [Hour of Code](%= resolve_url('/') %) last year, you might know it made history. In the first Hour of Code, 15 million students tried computer science. Last year, that number increased to 60 million students! The [Hour of Code](%= resolve_url('/') %) is a one-hour introduction to computer science, designed to demystify code and show that anybody can learn the basics. [Sign up](%= resolve_url('/') %) to host an Hour of Code this <%= campaign_date('full') %> during Computer Science Education Week. To add your school to the map, go to https://hourofcode.com/<%= @country %> ## Infographics <%= view :stats_carousel %> <%= view :signup_button %>
rvarshney/code-dot-org
i18n/locales/tr-TR/hourofcode/promote/stats.md
Markdown
apache-2.0
1,098
/* * Copyright 1999-2015 dangdang.com. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * </p> */ package com.dangdang.ddframe.job.cloud.scheduler.config.app; import com.dangdang.ddframe.job.cloud.scheduler.fixture.CloudAppConfigurationBuilder; import com.dangdang.ddframe.job.cloud.scheduler.fixture.CloudAppJsonConstants; import com.dangdang.ddframe.job.reg.base.CoordinatorRegistryCenter; import com.google.common.base.Optional; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import java.util.Arrays; import java.util.Collection; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public final class CloudAppConfigurationServiceTest { @Mock private CoordinatorRegistryCenter regCenter; @InjectMocks private CloudAppConfigurationService configService; @Test public void assertAdd() { CloudAppConfiguration appConfig = CloudAppConfigurationBuilder.createCloudAppConfiguration("test_app"); configService.add(appConfig); verify(regCenter).persist("/config/app/test_app", CloudAppJsonConstants.getAppJson("test_app")); } @Test public void assertUpdate() { CloudAppConfiguration appConfig = CloudAppConfigurationBuilder.createCloudAppConfiguration("test_app"); configService.update(appConfig); verify(regCenter).update("/config/app/test_app", CloudAppJsonConstants.getAppJson("test_app")); } @Test public void assertLoadAllWithoutRootNode() { when(regCenter.isExisted("/config/app")).thenReturn(false); assertTrue(configService.loadAll().isEmpty()); verify(regCenter).isExisted("/config/app"); } @Test public void assertLoadAllWithRootNode() { when(regCenter.isExisted("/config/app")).thenReturn(true); when(regCenter.getChildrenKeys(CloudAppConfigurationNode.ROOT)).thenReturn(Arrays.asList("test_app_1", "test_app_2")); when(regCenter.get("/config/app/test_app_1")).thenReturn(CloudAppJsonConstants.getAppJson("test_app_1")); Collection<CloudAppConfiguration> actual = configService.loadAll(); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next().getAppName(), is("test_app_1")); verify(regCenter).isExisted("/config/app"); verify(regCenter).getChildrenKeys("/config/app"); verify(regCenter).get("/config/app/test_app_1"); verify(regCenter).get("/config/app/test_app_2"); } @Test public void assertLoadWithoutConfig() { Optional<CloudAppConfiguration> actual = configService.load("test_app"); assertFalse(actual.isPresent()); } @Test public void assertLoadWithConfig() { when(regCenter.get("/config/app/test_app")).thenReturn(CloudAppJsonConstants.getAppJson("test_app")); Optional<CloudAppConfiguration> actual = configService.load("test_app"); assertTrue(actual.isPresent()); assertThat(actual.get().getAppName(), is("test_app")); } @Test public void assertRemove() { configService.remove("test_app"); verify(regCenter).remove("/config/app/test_app"); } }
Esjob-Cloud-DevOps/elastic-job
elastic-job-cloud/elastic-job-cloud-scheduler/src/test/java/com/dangdang/ddframe/job/cloud/scheduler/config/app/CloudAppConfigurationServiceTest.java
Java
apache-2.0
4,002
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.server.services.jbpm; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import org.jbpm.services.api.DefinitionService; import org.jbpm.services.api.ProcessService; import org.jbpm.services.api.RuntimeDataService; import org.jbpm.services.api.model.ProcessDefinition; import org.jbpm.services.api.model.ProcessInstanceDesc; import org.kie.api.runtime.process.ProcessInstance; import org.kie.api.runtime.process.WorkItem; import org.kie.internal.KieInternalServices; import org.kie.internal.process.CorrelationKey; import org.kie.internal.process.CorrelationKeyFactory; import org.kie.server.api.model.instance.ProcessInstanceList; import org.kie.server.api.model.instance.WorkItemInstance; import org.kie.server.api.model.instance.WorkItemInstanceList; import org.kie.server.services.api.KieServerRegistry; import org.kie.server.services.impl.locator.ContainerLocatorProvider; import org.kie.server.services.impl.marshal.MarshallerHelper; import org.kie.server.services.jbpm.locator.ByProcessInstanceIdContainerLocator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.kie.server.services.jbpm.ConvertUtils.*; public class ProcessServiceBase { public static final Logger logger = LoggerFactory.getLogger(ProcessServiceBase.class); private ProcessService processService; private DefinitionService definitionService; private RuntimeDataService runtimeDataService; private MarshallerHelper marshallerHelper; private KieServerRegistry context; private CorrelationKeyFactory correlationKeyFactory = KieInternalServices.Factory.get().newCorrelationKeyFactory(); public ProcessServiceBase(ProcessService processService, DefinitionService definitionService, RuntimeDataService runtimeDataService, KieServerRegistry context) { this.processService = processService; this.definitionService = definitionService; this.runtimeDataService = runtimeDataService; this.marshallerHelper = new MarshallerHelper(context); this.context = context; } public String startProcess(String containerId, String processId, String marshallingType) { containerId = context.getContainerId(containerId, ContainerLocatorProvider.get().getLocator()); // check validity of deployment and process id definitionService.getProcessDefinition(containerId, processId); logger.debug("Calling start process with id {} on container {} and parameters {}", processId, containerId, null); Long processInstanceId = processService.startProcess(containerId, processId); // return response String response = marshallerHelper.marshal(containerId, marshallingType, processInstanceId); return response; } public String startProcess(String containerId, String processId, String payload, String marshallingType) { containerId = context.getContainerId(containerId, ContainerLocatorProvider.get().getLocator()); // check validity of deployment and process id definitionService.getProcessDefinition(containerId, processId); logger.debug("About to unmarshal parameters from payload: '{}'", payload); Map<String, Object> parameters = marshallerHelper.unmarshal(containerId, payload, marshallingType, Map.class); logger.debug("Calling start process with id {} on container {} and parameters {}", processId, containerId, parameters); Long processInstanceId = processService.startProcess(containerId, processId, parameters); // return response String response = marshallerHelper.marshal(containerId, marshallingType, processInstanceId); return response; } public String startProcessWithCorrelation(String containerId, String processId, String correlationKey, String payload, String marshallingType) { containerId = context.getContainerId(containerId, ContainerLocatorProvider.get().getLocator()); // check validity of deployment and process id definitionService.getProcessDefinition(containerId, processId); logger.debug("About to unmarshal parameters from payload: '{}'", payload); Map<String, Object> parameters = marshallerHelper.unmarshal(containerId, payload, marshallingType, Map.class); String[] correlationProperties = correlationKey.split(":"); CorrelationKey actualCorrelationKey = correlationKeyFactory.newCorrelationKey(Arrays.asList(correlationProperties)); logger.debug("Calling start process with id {} on container {} and parameters {}", processId, containerId, parameters); Long processInstanceId = processService.startProcess(containerId, processId, actualCorrelationKey, parameters); // return response String response = marshallerHelper.marshal(containerId, marshallingType, processInstanceId); return response; } public Object abortProcessInstance(String containerId, Number processInstanceId) { processService.abortProcessInstance(processInstanceId.longValue()); return null; } protected List<Long> convert(List<? extends Number> input) { List<Long> result = new ArrayList<Long>(); for (Number n : input) { result.add(n.longValue()); } return result; } public Object abortProcessInstances(String containerId, List<Long> processInstanceIds) { processService.abortProcessInstances(convert(processInstanceIds)); return null; } public void signalProcessInstance(String containerId, Number processInstanceId, String signalName, String marshallingType) { logger.debug("Calling signal '{}' process instance with id {} on container {} and event {}", signalName, processInstanceId, containerId, null); processService.signalProcessInstance(processInstanceId.longValue(), signalName, null); } public void signalProcessInstance(String containerId, Number processInstanceId, String signalName, String eventPayload, String marshallingType) { logger.debug("About to unmarshal event from payload: '{}'", eventPayload); Object event = marshallerHelper.unmarshal(containerId, eventPayload, marshallingType, Object.class, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); logger.debug("Calling signal '{}' process instance with id {} on container {} and event {}", signalName, processInstanceId, containerId, event); processService.signalProcessInstance(processInstanceId.longValue(), signalName, event); } public void signalProcessInstances(String containerId, List<Long> processInstanceIds, String signalName, String marshallingType) { logger.debug("Calling signal '{}' process instances with id {} on container {} and event {}", signalName, processInstanceIds, containerId, null); processService.signalProcessInstances(convert(processInstanceIds), signalName, null); } public void signalProcessInstances(String containerId, List<Long> processInstanceIds, String signalName, String eventPayload, String marshallingType) { List<Long> ids = convert(processInstanceIds); if (ids.isEmpty()) { return; } logger.debug("About to unmarshal event from payload: '{}'", eventPayload); Object event = marshallerHelper.unmarshal(containerId, eventPayload, marshallingType, Object.class, new ByProcessInstanceIdContainerLocator(ids.get(0))); logger.debug("Calling signal '{}' process instances with id {} on container {} and event {}", signalName, processInstanceIds, containerId, event); processService.signalProcessInstances(ids, signalName, event); } public void signal(String containerId, String signalName, String marshallingType) { logger.debug("Calling signal '{}' on container {} and event {}", signalName, containerId, null); processService.signalEvent(containerId, signalName, null); } public void signal(String containerId, String signalName, String eventPayload, String marshallingType) { logger.debug("About to unmarshal event from payload: '{}'", eventPayload); Object event = marshallerHelper.unmarshal(containerId, eventPayload, marshallingType, Object.class); logger.debug("Calling signal '{}' on container {} and event {}", signalName, containerId, event); processService.signalEvent(containerId, signalName, event); } public String getProcessInstance(String containerId, Number processInstanceId, boolean withVars, String marshallingType) { ProcessInstanceDesc instanceDesc = runtimeDataService.getProcessInstanceById(processInstanceId.longValue()); if (instanceDesc == null) { throw new IllegalStateException("Unable to find process instance with id " + processInstanceId); } org.kie.server.api.model.instance.ProcessInstance processInstance = org.kie.server.api.model.instance.ProcessInstance.builder() .id(instanceDesc.getId()) .processId(instanceDesc.getProcessId()) .processName(instanceDesc.getProcessName()) .processVersion(instanceDesc.getProcessVersion()) .state(instanceDesc.getState()) .containerId(instanceDesc.getDeploymentId()) .date(instanceDesc.getDataTimeStamp()) .initiator(instanceDesc.getInitiator()) .processInstanceDescription(instanceDesc.getProcessInstanceDescription()) .parentInstanceId(instanceDesc.getParentId()) .correlationKey(instanceDesc.getCorrelationKey()) .build(); if (Boolean.TRUE.equals(withVars) && processInstance.getState().equals(ProcessInstance.STATE_ACTIVE)) { Map<String, Object> variables = processService.getProcessInstanceVariables(processInstanceId.longValue()); processInstance.setVariables(variables); } logger.debug("About to marshal process instance with id '{}' {}", processInstanceId, processInstance); String response = marshallerHelper.marshal(containerId, marshallingType, processInstance, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); return response; } public void setProcessVariable(String containerId, Number processInstanceId, String varName, String variablePayload, String marshallingType) { logger.debug("About to unmarshal variable from payload: '{}'", variablePayload); Object variable = marshallerHelper.unmarshal(containerId, variablePayload, marshallingType, Object.class, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); logger.debug("Setting variable '{}' on process instance with id {} with value {}", varName, processInstanceId, variable); processService.setProcessVariable(processInstanceId.longValue(), varName, variable); } public void setProcessVariables(String containerId, Number processInstanceId, String variablePayload, String marshallingType) { logger.debug("About to unmarshal variables from payload: '{}'", variablePayload); Map<String, Object> variables = marshallerHelper.unmarshal(containerId, variablePayload, marshallingType, Map.class, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); logger.debug("Setting variables '{}' on process instance with id {} with value {}", variables.keySet(), processInstanceId, variables.values()); processService.setProcessVariables(processInstanceId.longValue(), variables); } public String getProcessInstanceVariable(String containerId, Number processInstanceId, String varName, String marshallingType) { Object variable = processService.getProcessInstanceVariable(processInstanceId.longValue(), varName); if (variable == null) { throw new IllegalStateException("Unable to find variable '"+ varName + "' within process instance with id " + processInstanceId); } logger.debug("About to marshal process variable with name '{}' {}", varName, variable); String response = marshallerHelper.marshal(containerId, marshallingType, variable, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); return response; } public String getProcessInstanceVariables(String containerId, Number processInstanceId, String marshallingType) { Map<String, Object> variables = processService.getProcessInstanceVariables(processInstanceId.longValue()); logger.debug("About to marshal process variables {}", variables); String response = marshallerHelper.marshal(containerId, marshallingType, variables, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); return response; } public String getAvailableSignals(String containerId, Number processInstanceId, String marshallingType) { Collection<String> signals = processService.getAvailableSignals(processInstanceId.longValue()); logger.debug("About to marshal available signals {}", signals); String response = marshallerHelper.marshal(containerId, marshallingType, signals); return response; } public void completeWorkItem(String containerId, Number processInstanceId, Number workItemId, String resultPayload, String marshallingType) { logger.debug("About to unmarshal work item result from payload: '{}'", resultPayload); Map<String, Object> results = marshallerHelper.unmarshal(containerId, resultPayload, marshallingType, Map.class, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); logger.debug("Completing work item '{}' on process instance id {} with value {}", workItemId, processInstanceId, results); processService.completeWorkItem(workItemId.longValue(), results); } public void abortWorkItem(String containerId, Number processInstanceId, Number workItemId) { logger.debug("Aborting work item '{}' on process instance id {}", workItemId, processInstanceId); processService.abortWorkItem(workItemId.longValue()); } public String getWorkItem(String containerId, Number processInstanceId, Number workItemId, String marshallingType) { WorkItem workItem = processService.getWorkItem(workItemId.longValue()); if (workItem == null) { throw new IllegalStateException("Unable to find work item with id " + workItemId); } WorkItemInstance workItemInstance = WorkItemInstance.builder() .id(workItem.getId()) .nodeInstanceId(((org.drools.core.process.instance.WorkItem) workItem).getNodeInstanceId()) .processInstanceId(workItem.getProcessInstanceId()) .containerId(((org.drools.core.process.instance.WorkItem) workItem).getDeploymentId()) .name(workItem.getName()) .nodeId(((org.drools.core.process.instance.WorkItem) workItem).getNodeId()) .parameters(workItem.getParameters()) .state(workItem.getState()) .build(); logger.debug("About to marshal work item {}", workItemInstance); String response = marshallerHelper.marshal(containerId, marshallingType, workItemInstance, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); return response; } public String getWorkItemByProcessInstance(String containerId, Number processInstanceId, String marshallingType) { List<WorkItem> workItems = processService.getWorkItemByProcessInstance(processInstanceId.longValue()); WorkItemInstance[] instances = new WorkItemInstance[workItems.size()]; int counter = 0; for (WorkItem workItem : workItems) { WorkItemInstance workItemInstance = WorkItemInstance.builder() .id(workItem.getId()) .nodeInstanceId(((org.drools.core.process.instance.WorkItem) workItem).getNodeInstanceId()) .processInstanceId(workItem.getProcessInstanceId()) .containerId(((org.drools.core.process.instance.WorkItem) workItem).getDeploymentId()) .name(workItem.getName()) .nodeId(((org.drools.core.process.instance.WorkItem) workItem).getNodeId()) .parameters(workItem.getParameters()) .state(workItem.getState()) .build(); instances[counter] = workItemInstance; counter++; } WorkItemInstanceList result = new WorkItemInstanceList(instances); logger.debug("About to marshal work items {}", result); String response = marshallerHelper.marshal(containerId, marshallingType, result, new ByProcessInstanceIdContainerLocator(processInstanceId.longValue())); return response; } public ProcessInstanceList getProcessInstancesByParent(long parentProcessInstanceId, List<Integer> status, Integer page, Integer pageSize, String sort, boolean sortOrder) { if (sort == null || sort.isEmpty()) { sort = "ProcessInstanceId"; } if (status == null || status.isEmpty()) { status = new ArrayList<Integer>(); status.add(ProcessInstance.STATE_ACTIVE); } Collection<ProcessInstanceDesc> instances = runtimeDataService.getProcessInstancesByParent(parentProcessInstanceId, status, buildQueryContext(page, pageSize, sort, sortOrder)); logger.debug("Found {} process instances , statuses '{}'", instances.size(), status); ProcessInstanceList processInstanceList = convertToProcessInstanceList(instances); logger.debug("Returning result of process instance search: {}", processInstanceList); return processInstanceList; } }
markcoble/droolsjbpm-integration
kie-server-parent/kie-server-services/kie-server-services-jbpm/src/main/java/org/kie/server/services/jbpm/ProcessServiceBase.java
Java
apache-2.0
18,532
package it.pkg; public class Measurement { private String id; private String val; public Measurement(String id, String val) { super(); this.id = id; this.val = val; } public String getId() { return id; } public String getVal() { return val; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("Measurement ["); if (id != null) builder.append("id=").append(id).append(", "); if (val != null) builder.append("val=").append(val); builder.append("]"); return builder.toString(); } }
tarilabs/kjar-cep-archetype
src/test/resources/projects/integrationtestNoEclipse/reference/src/main/java/it/pkg/Measurement.java
Java
apache-2.0
612
<div class="panel panel-default enquiry-page"> <div class="panel-heading"> <a href="/admin/<?php $_(strtolower($modelName)); ?>">&larr; Return to list</a> </div> <!-- /.panel-heading --> <div class="panel-body helpdesk"> <div class="col-xs-6 col-md-6"> <?php /** @var \App\Admin\FieldFormatter $formatter */ $formatter->renderForm(); ?> </div> <div class="col-xs-6 col-md-6"> <div id="helpdesk"> <h4>Messages:</h4> <br/> <div class="messages-panel"> <div class="gwt-HTML"> <ul class="chat" id="enquiry_messages"> <?php foreach ($enquiryMessages as $eMessage): ?> <?php include __DIR__.'/_enquiry_message.php'; ?> <?php endforeach; ?> </ul> </div> </div> <div aria-hidden="true" style="display: none;" class="gwt-HTML errors alert alert-danger js-add-enquiry-message-errors"></div> <form action="/admin/enquiry/<?php echo $item->id(); ?>/add-message" method="POST" class="js-add-enquiry-message-form"> <table class="add-message-form" cellpadding="0" cellspacing="0"> <tbody> <tr> <td style="vertical-align: top;" align="left"> <div class="gwt-Label">Message:</div> </td> </tr> <tr> <td style="vertical-align: top;" align="left"> <textarea required="required" class="form-control" name="message"></textarea></td> </tr> <tr> <td style="vertical-align: top;" align="left"> <table class="buttons-panel" cellpadding="0" cellspacing="0"> <tbody> <tr> <td style="vertical-align: top;" align="right"> <button class="btn btn-primary" type="submit">Submit</button> </td> </tr> </tbody> </table> </td> </tr> </tbody> </table> </form> </div> </div> </div> <!-- /.panel-body --> </div>
AdrienKuhn/hackazon
assets/views/admin/enquiry/edit.php
PHP
apache-2.0
2,837
/* Copyright 2014 Google Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ using System; using DriveProxy.API; using DriveProxy.Forms; using DriveProxy.Service; using DriveProxy.Utils; namespace DriveProxy.Methods { internal class AuthenticateMethodInfo : MethodInfo { public AuthenticateMethodInfo() { try { SetResult(typeof(void)); } catch (Exception exception) { Log.Error(exception); } } public override MethodType Type { get { return MethodType.Authenticate; } } public override string Invoke(string[] args) { try { Invoke(); return ""; } catch (Exception exception) { StatusForm.Exception(exception); Log.Error(exception); return null; } } public void Invoke(bool signout = false) { try { if (signout) { DriveService.Signout(); } DriveService.Authenticate(false); } catch (Exception exception) { Log.Error(exception); } } } }
google/google-drive-proxy
DriveProxy/Methods/AuthenticateMethodInfo.cs
C#
apache-2.0
1,597
/* * Copyright 2014 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.common.dao.service; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.kaaproject.kaa.server.common.nosql.mongo.dao.MongoDBTestRunner; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "/mongo-dao-test-context.xml") @DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_CLASS) public class MongoDBTopicServiceImplTest extends TopicServiceImplTest { @BeforeClass public static void init() throws Exception { MongoDBTestRunner.setUp(); } @AfterClass public static void after() throws Exception { MongoDBTestRunner.tearDown(); } }
vzhukovskyi/kaa
server/common/nosql/mongo-dao/src/test/java/org/kaaproject/kaa/server/common/dao/service/MongoDBTopicServiceImplTest.java
Java
apache-2.0
1,481
/* * Copyright 2015-2016 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Target; /** * Marker interface for methods used by test code in the same module. Methods/Constructors annotated * with this method should not be accessed in production code. This annotation should be used * sparingly as it's a code smell to need access to internal data/functionality to properly unit * test a class. Typically there is a better way to test a class. * <p> * TODO: Write a linter that makes sure only test code depends on methods or constructors annotated * with this method */ @Target({ ElementType.CONSTRUCTOR, ElementType.METHOD }) public @interface SdkTestInternalApi { }
mhurne/aws-sdk-java
aws-java-sdk-core/src/main/java/com/amazonaws/annotation/SdkTestInternalApi.java
Java
apache-2.0
1,216
/* * Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.identity.oauth.cache; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; public class SessionDataCacheKeyTest { String sessionDataId = "org.wso2.carbon.identity.oauth.cache.SessionDataCacheKey@54d4dc7"; Integer sessionDataIdHashcode = sessionDataId.hashCode(); @Test public void testGetSessionDataId() throws Exception { SessionDataCacheKey sessionDataCacheKey = new SessionDataCacheKey(sessionDataId); assertEquals(sessionDataCacheKey.getSessionDataId(), sessionDataId, "Get sessionDataId successfully."); } @DataProvider(name = "TestEquals") public Object[][] testequals() { return new Object[][]{ {true}, {false} }; } @Test(dataProvider = "TestEquals") public void testEquals(boolean istrue) throws Exception { Object object = new Object(); SessionDataCacheKey sessionDataCacheKey = new SessionDataCacheKey(sessionDataId); SessionDataCacheKey sessionDataCacheKeySample = new SessionDataCacheKey(sessionDataId); if (istrue) { assertTrue(sessionDataCacheKey.equals(sessionDataCacheKeySample)); } assertTrue(!sessionDataCacheKey.equals(object)); } @Test public void testHashCode() throws Exception { SessionDataCacheKey sessionDataCacheKey = new SessionDataCacheKey(sessionDataId); Integer sessionDataIdHashCodeSample = sessionDataCacheKey.hashCode(); assertEquals(sessionDataIdHashCodeSample , sessionDataIdHashcode, "Get SessionDataHashCode successfully."); } }
darshanasbg/identity-inbound-auth-oauth
components/org.wso2.carbon.identity.oauth/src/test/java/org/wso2/carbon/identity/oauth/cache/SessionDataCacheKeyTest.java
Java
apache-2.0
2,405
/* * Medical Image Registration ToolKit (MIRTK) * * Copyright 2013-2015 Imperial College London * Copyright 2013-2015 Andreas Schuh * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MIRTK_OrderedMap_H #define MIRTK_OrderedMap_H #include <map> namespace mirtk { template <class Key, class T, class Compare = std::less<Key> > using OrderedMap = std::map<Key, T, Compare>; } // namespace mirtk #endif // MIRTK_OrderedMap_H
BioMedIA/MIRTK
Modules/Common/include/mirtk/OrderedMap.h
C
apache-2.0
952
import sys sys.path.insert(1, "../../../") import h2o def bigcatGBM(ip,port): #Log.info("Importing bigcat_5000x2.csv data...\n") bigcat = h2o.import_file(path=h2o.locate("smalldata/gbm_test/bigcat_5000x2.csv")) bigcat["y"] = bigcat["y"].asfactor() #Log.info("Summary of bigcat_5000x2.csv from H2O:\n") #bigcat.summary() # Train H2O GBM Model: #Log.info("H2O GBM with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n") model = h2o.gbm(x=bigcat[["X"]], y = bigcat["y"], distribution="bernoulli", ntrees=1, max_depth=1, nbins=100) model.show() performance = model.model_performance(bigcat) performance.show() # Check AUC and overall prediction error #test_accuracy = performance.accuracy() test_auc = performance.auc() if __name__ == "__main__": h2o.run_test(sys.argv, bigcatGBM)
weaver-viii/h2o-3
h2o-py/tests/testdir_algos/gbm/pyunit_bigcatGBM.py
Python
apache-2.0
829
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package volumemanager import ( "os" "reflect" "strconv" "testing" "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/kubelet/config" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/pod" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/sets" utiltesting "k8s.io/kubernetes/pkg/util/testing" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) const ( testHostname = "test-hostname" ) func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) node, pod, pv, claim := createObjects() kubeClient := fake.NewSimpleClientset(node, pod, pv, claim) manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient) if err != nil { t.Fatalf("Failed to initialize volume manager: %v", err) } stopCh := runVolumeManager(manager) defer close(stopCh) podManager.SetPods([]*api.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( api.UniqueVolumeName(node.Status.VolumesAttached[0].Name), stopCh, manager) err = manager.WaitForAttachAndMount(pod) if err != nil { t.Errorf("Expected success: %v", err) } expectedMounted := pod.Spec.Volumes[0].Name actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID)) if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) { t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted) } expectedInUse := []api.UniqueVolumeName{api.UniqueVolumeName(node.Status.VolumesAttached[0].Name)} actualInUse := manager.GetVolumesInUse() if !reflect.DeepEqual(expectedInUse, actualInUse) { t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse) } } func TestGetExtraSupplementalGroupsForPod(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) node, pod, _, claim := createObjects() existingGid := pod.Spec.SecurityContext.SupplementalGroups[0] cases := []struct { gidAnnotation string expected []int64 }{ { gidAnnotation: "777", expected: []int64{777}, }, { gidAnnotation: strconv.FormatInt(existingGid, 10), expected: []int64{}, }, { gidAnnotation: "a", expected: []int64{}, }, { gidAnnotation: "", expected: []int64{}, }, } for _, tc := range cases { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", Annotations: map[string]string{ volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation, }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, ClaimRef: &api.ObjectReference{ Name: claim.ObjectMeta.Name, }, }, } kubeClient := fake.NewSimpleClientset(node, pod, pv, claim) manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient) if err != nil { t.Errorf("Failed to initialize volume manager: %v", err) continue } stopCh := runVolumeManager(manager) defer func() { close(stopCh) }() podManager.SetPods([]*api.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( api.UniqueVolumeName(node.Status.VolumesAttached[0].Name), stopCh, manager) err = manager.WaitForAttachAndMount(pod) if err != nil { t.Errorf("Expected success: %v", err) continue } actual := manager.GetExtraSupplementalGroupsForPod(pod) if !reflect.DeepEqual(tc.expected, actual) { t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual) } } } func newTestVolumeManager( tmpDir string, podManager pod.Manager, kubeClient internalclientset.Interface) (VolumeManager, error) { plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} plugMgr := &volume.VolumePluginMgr{} plugMgr.InitPlugins([]volume.VolumePlugin{plug}, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil, "" /* rootContext */)) vm, err := NewVolumeManager( true, testHostname, podManager, kubeClient, plugMgr, &containertest.FakeRuntime{}, &mount.FakeMounter{}, "") return vm, err } // createObjects returns objects for making a fake clientset. The pv is // already attached to the node and bound to the claim used by the pod. func createObjects() (*api.Node, *api.Pod, *api.PersistentVolume, *api.PersistentVolumeClaim) { node := &api.Node{ ObjectMeta: api.ObjectMeta{Name: testHostname}, Status: api.NodeStatus{ VolumesAttached: []api.AttachedVolume{ { Name: "fake/pvA", DevicePath: "fake/path", }, }}, Spec: api.NodeSpec{ExternalID: testHostname}, } pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: "nsA", UID: "1234", }, Spec: api.PodSpec{ Volumes: []api.Volume{ { Name: "vol1", VolumeSource: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ClaimName: "claimA", }, }, }, }, SecurityContext: &api.PodSecurityContext{ SupplementalGroups: []int64{555}, }, }, } pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } return node, pod, pv, claim } func simulateVolumeInUseUpdate( volumeName api.UniqueVolumeName, stopCh <-chan struct{}, volumeManager VolumeManager) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: volumeManager.MarkVolumesAsReportedInUse( []api.UniqueVolumeName{volumeName}) case <-stopCh: return } } } func runVolumeManager(manager VolumeManager) chan struct{} { stopCh := make(chan struct{}) //readyCh := make(chan bool, 1) //readyCh <- true sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) go manager.Run(sourcesReady, stopCh) return stopCh }
bmoylan/kubernetes
pkg/kubelet/volumemanager/volume_manager_test.go
GO
apache-2.0
7,696
package pl.project13.scala.akka.raft.cluster import akka.testkit.ImplicitSender import concurrent.duration._ import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp} import akka.cluster.Cluster import akka.actor.{RootActorPath, Props} import akka.util.Timeout import clusters._ import pl.project13.scala.akka.raft.protocol._ import pl.project13.scala.akka.raft.{RaftClientActor, ClusterConfiguration} import pl.project13.scala.akka.raft.example.WordConcatRaftActor import org.scalatest.time.{Millis, Span, Seconds} import pl.project13.scala.akka.raft.example.protocol._ abstract class ClusterRaftClientSpec extends RaftClusterSpec(ThreeNodesCluster) with ImplicitSender { implicit val defaultTimeout = { import concurrent.duration._ Timeout(5.seconds) } override implicit val patienceConfig = PatienceConfig( timeout = scaled(Span(2, Seconds)), interval = scaled(Span(1, Millis)) ) def initialParticipants = 3 behavior of s"${classOf[RaftClientActor].getSimpleName}" import ThreeNodesCluster._ it should "interact with cluster raft actors" in within(20.seconds) { Cluster(system).subscribe(testActor, classOf[MemberUp]) expectMsgClass(classOf[CurrentClusterState]) val firstAddress = node(first).address val secondAddress = node(second).address val thirdAddress = node(third).address Cluster(system) join firstAddress (1 to initialParticipants) map { idx => runOn(nodes(idx)) { val raftActor = system.actorOf(Props[WordConcatRaftActor], s"impl-raft-member-$idx") system.actorOf(ClusterRaftActor.props(raftActor, initialParticipants), s"raft-member-$idx") } } receiveN(3).collect { case MemberUp(m) => m.address }.toSet should be( Set(firstAddress, secondAddress, thirdAddress) ) Cluster(system).unsubscribe(testActor) testConductor.enter("all-nodes-up") val member1 = selectActorRef(firstAddress, 1) val member2 = selectActorRef(secondAddress, 2) val member3 = selectActorRef(thirdAddress, 3) val members = member1 :: member2 :: member3 :: Nil awaitLeaderElected(members) testConductor.enter("raft-up") // interact with cluster from each node runOn(second) { val client = system.actorOf(RaftClientActor.props( RootActorPath(firstAddress) / "user" / "raft-member-*", RootActorPath(secondAddress) / "user" / "raft-member-*", RootActorPath(thirdAddress) / "user" / "raft-member-*" ), "raft-client") client ! AppendWord("I") client ! AppendWord("like") client ! AppendWord("tea") client ! GetWords expectMsg("I") expectMsg("like") expectMsg("tea") expectMsg(List("I", "like", "tea")) } testConductor.enter("client-done") } } class ClusterRaftClientSpecMultiJvmNode1 extends ClusterRaftClientSpec class ClusterRaftClientSpecMultiJvmNode2 extends ClusterRaftClientSpec class ClusterRaftClientSpecMultiJvmNode3 extends ClusterRaftClientSpec
ktoso/akka-raft
src/multi-jvm/scala/pl/project13/scala/akka/raft/cluster/ClusterRaftClientSpec.scala
Scala
apache-2.0
3,016
/* Copyright (c) 2004-2010, The Dojo Foundation All Rights Reserved. Available via Academic Free License >= 2.1 OR the modified BSD license. see: http://dojotoolkit.org/license for details */ if(!dojo._hasResource["dijit._editor.html"]){ //_hasResource checks added by build. Do not use _hasResource directly in your code. dojo._hasResource["dijit._editor.html"] = true; dojo.provide("dijit._editor.html"); dijit._editor.escapeXml=function(/*String*/str, /*Boolean?*/noSingleQuotes){ // summary: // Adds escape sequences for special characters in XML: &<>"' // Optionally skips escapes for single quotes str = str.replace(/&/gm, "&amp;").replace(/</gm, "&lt;").replace(/>/gm, "&gt;").replace(/"/gm, "&quot;"); if(!noSingleQuotes){ str = str.replace(/'/gm, "&#39;"); } return str; // string }; dijit._editor.getNodeHtml=function(/* DomNode */node){ var output; switch(node.nodeType){ case 1: //element node var lName = node.nodeName.toLowerCase(); if(!lName || lName.charAt(0) == "/"){ // IE does some strange things with malformed HTML input, like // treating a close tag </span> without an open tag <span>, as // a new tag with tagName of /span. Corrupts output HTML, remove // them. Other browsers don't prefix tags that way, so will // never show up. return ""; } output = '<' + lName; //store the list of attributes and sort it to have the //attributes appear in the dictionary order var attrarray = []; var attr; if(dojo.isIE && node.outerHTML){ var s = node.outerHTML; s = s.substr(0, s.indexOf('>')) .replace(/(['"])[^"']*\1/g, ''); //to make the following regexp safe var reg = /(\b\w+)\s?=/g; var m, key; while((m = reg.exec(s))){ key = m[1]; if(key.substr(0,3) != '_dj'){ if(key == 'src' || key == 'href'){ if(node.getAttribute('_djrealurl')){ attrarray.push([key,node.getAttribute('_djrealurl')]); continue; } } var val, match; switch(key){ case 'style': val = node.style.cssText.toLowerCase(); break; case 'class': val = node.className; break; case 'width': if(lName === "img"){ // This somehow gets lost on IE for IMG tags and the like // and we have to find it in outerHTML, known IE oddity. match=/width=(\S+)/i.exec(s); if(match){ val = match[1]; } break; } case 'height': if(lName === "img"){ // This somehow gets lost on IE for IMG tags and the like // and we have to find it in outerHTML, known IE oddity. match=/height=(\S+)/i.exec(s); if(match){ val = match[1]; } break; } default: val = node.getAttribute(key); } if(val != null){ attrarray.push([key, val.toString()]); } } } }else{ var i = 0; while((attr = node.attributes[i++])){ //ignore all attributes starting with _dj which are //internal temporary attributes used by the editor var n = attr.name; if(n.substr(0,3) != '_dj' /*&& (attr.specified == undefined || attr.specified)*/){ var v = attr.value; if(n == 'src' || n == 'href'){ if(node.getAttribute('_djrealurl')){ v = node.getAttribute('_djrealurl'); } } attrarray.push([n,v]); } } } attrarray.sort(function(a,b){ return a[0] < b[0] ? -1 : (a[0] == b[0] ? 0 : 1); }); var j = 0; while((attr = attrarray[j++])){ output += ' ' + attr[0] + '="' + (dojo.isString(attr[1]) ? dijit._editor.escapeXml(attr[1], true) : attr[1]) + '"'; } if(lName === "script"){ // Browsers handle script tags differently in how you get content, // but innerHTML always seems to work, so insert its content that way // Yes, it's bad to allow script tags in the editor code, but some people // seem to want to do it, so we need to at least return them right. // other plugins/filters can strip them. output += '>' + node.innerHTML +'</' + lName + '>'; }else{ if(node.childNodes.length){ output += '>' + dijit._editor.getChildrenHtml(node)+'</' + lName +'>'; }else{ switch(lName){ case 'br': case 'hr': case 'img': case 'input': case 'base': case 'meta': case 'area': case 'basefont': // These should all be singly closed output += ' />'; break; default: // Assume XML style separate closure for everything else. output += '></' + lName + '>'; } } } break; case 4: // cdata case 3: // text // FIXME: output = dijit._editor.escapeXml(node.nodeValue, true); break; case 8: //comment // FIXME: output = '<!--' + dijit._editor.escapeXml(node.nodeValue, true) + '-->'; break; default: output = "<!-- Element not recognized - Type: " + node.nodeType + " Name: " + node.nodeName + "-->"; } return output; }; dijit._editor.getChildrenHtml = function(/* DomNode */dom){ // summary: // Returns the html content of a DomNode and children var out = ""; if(!dom){ return out; } var nodes = dom["childNodes"] || dom; //IE issue. //If we have an actual node we can check parent relationships on for IE, //We should check, as IE sometimes builds invalid DOMS. If no parent, we can't check //And should just process it and hope for the best. var checkParent = !dojo.isIE || nodes !== dom; var node, i = 0; while((node = nodes[i++])){ //IE is broken. DOMs are supposed to be a tree. But in the case of malformed HTML, IE generates a graph //meaning one node ends up with multiple references (multiple parents). This is totally wrong and invalid, but //such is what it is. We have to keep track and check for this because otherise the source output HTML will have dups. //No other browser generates a graph. Leave it to IE to break a fundamental DOM rule. So, we check the parent if we can //If we can't, nothing more we can do other than walk it. if(!checkParent || node.parentNode == dom){ out += dijit._editor.getNodeHtml(node); } } return out; // String }; }
sonatype/owf
web-app/js-lib/dojo-release-1.5.0/dijit/_editor/html.js
JavaScript
apache-2.0
6,422
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:wicket="http://wicket.apache.org"> <wicket:panel> <span wicket:id="usersearch">[USER SEARCH]</span> <div class="searchResult"> <span wicket:id="searchResult">[USER SEARCH RESULT]</span> </div> </wicket:panel> </html>
apache/syncope
client/idrepo/console/src/main/resources/org/apache/syncope/client/console/wizards/UserSelectionWizardStep.html
HTML
apache-2.0
1,052
# Local ## Privilege Management Application privileges are managed by **Cynara** and the security manager in the **AppFw**. For more details, please refer to the **AppFw** documentation in Platform part.
automotive-grade-linux/docs-agl
docs/security-blueprint/part-6/2-PrivilegeManagement.md
Markdown
apache-2.0
206
// Copyright 2021 Google LLC All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package cache provides methods to cache layers. package cache import ( "errors" "io" "github.com/google/go-containerregistry/pkg/logs" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" ) // Cache encapsulates methods to interact with cached layers. type Cache interface { // Put writes the Layer to the Cache. // // The returned Layer should be used for future operations, since lazy // cachers might only populate the cache when the layer is actually // consumed. // // The returned layer can be consumed, and the cache entry populated, // by calling either Compressed or Uncompressed and consuming the // returned io.ReadCloser. Put(v1.Layer) (v1.Layer, error) // Get returns the Layer cached by the given Hash, or ErrNotFound if no // such layer was found. Get(v1.Hash) (v1.Layer, error) // Delete removes the Layer with the given Hash from the Cache. Delete(v1.Hash) error } // ErrNotFound is returned by Get when no layer with the given Hash is found. var ErrNotFound = errors.New("layer was not found") // Image returns a new Image which wraps the given Image, whose layers will be // pulled from the Cache if they are found, and written to the Cache as they // are read from the underlying Image. func Image(i v1.Image, c Cache) v1.Image { return &image{ Image: i, c: c, } } type image struct { v1.Image c Cache } func (i *image) Layers() ([]v1.Layer, error) { ls, err := i.Image.Layers() if err != nil { return nil, err } out := make([]v1.Layer, len(ls)) for idx, l := range ls { out[idx] = &lazyLayer{inner: l, c: i.c} } return out, nil } type lazyLayer struct { inner v1.Layer c Cache } func (l *lazyLayer) Compressed() (io.ReadCloser, error) { digest, err := l.inner.Digest() if err != nil { return nil, err } if cl, err := l.c.Get(digest); err == nil { // Layer found in the cache. logs.Progress.Printf("Layer %s found (compressed) in cache", digest) return cl.Compressed() } else if !errors.Is(err, ErrNotFound) { return nil, err } // Not cached, pull and return the real layer. logs.Progress.Printf("Layer %s not found (compressed) in cache, getting", digest) rl, err := l.c.Put(l.inner) if err != nil { return nil, err } return rl.Compressed() } func (l *lazyLayer) Uncompressed() (io.ReadCloser, error) { diffID, err := l.inner.DiffID() if err != nil { return nil, err } if cl, err := l.c.Get(diffID); err == nil { // Layer found in the cache. logs.Progress.Printf("Layer %s found (uncompressed) in cache", diffID) return cl.Uncompressed() } else if !errors.Is(err, ErrNotFound) { return nil, err } // Not cached, pull and return the real layer. logs.Progress.Printf("Layer %s not found (uncompressed) in cache, getting", diffID) rl, err := l.c.Put(l.inner) if err != nil { return nil, err } return rl.Uncompressed() } func (l *lazyLayer) Size() (int64, error) { return l.inner.Size() } func (l *lazyLayer) DiffID() (v1.Hash, error) { return l.inner.DiffID() } func (l *lazyLayer) Digest() (v1.Hash, error) { return l.inner.Digest() } func (l *lazyLayer) MediaType() (types.MediaType, error) { return l.inner.MediaType() } func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { l, err := i.c.Get(h) if errors.Is(err, ErrNotFound) { // Not cached, get it and write it. l, err := i.Image.LayerByDigest(h) if err != nil { return nil, err } return i.c.Put(l) } return l, err } func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { l, err := i.c.Get(h) if errors.Is(err, ErrNotFound) { // Not cached, get it and write it. l, err := i.Image.LayerByDiffID(h) if err != nil { return nil, err } return i.c.Put(l) } return l, err }
google/ko
vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go
GO
apache-2.0
4,404
package auth import ( "context" "math/rand" "net/http" "time" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/jsonutil" ) type AuthMethod interface { // Authenticate returns a mount path, header, request body, and error. // The header may be nil if no special header is needed. Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) NewCreds() chan struct{} CredSuccess() Shutdown() } type AuthConfig struct { Logger hclog.Logger MountPath string WrapTTL time.Duration Config map[string]interface{} } // AuthHandler is responsible for keeping a token alive and renewed and passing // new tokens to the sink server type AuthHandler struct { DoneCh chan struct{} OutputCh chan string TemplateTokenCh chan string logger hclog.Logger client *api.Client random *rand.Rand wrapTTL time.Duration enableReauthOnNewCredentials bool enableTemplateTokenCh bool } type AuthHandlerConfig struct { Logger hclog.Logger Client *api.Client WrapTTL time.Duration EnableReauthOnNewCredentials bool EnableTemplateTokenCh bool } func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { ah := &AuthHandler{ DoneCh: make(chan struct{}), // This is buffered so that if we try to output after the sink server // has been shut down, during agent shutdown, we won't block OutputCh: make(chan string, 1), TemplateTokenCh: make(chan string, 1), logger: conf.Logger, client: conf.Client, random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), wrapTTL: conf.WrapTTL, enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, enableTemplateTokenCh: conf.EnableTemplateTokenCh, } return ah } func backoffOrQuit(ctx context.Context, backoff time.Duration) { select { case <-time.After(backoff): case <-ctx.Done(): } } func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) { if am == nil { panic("nil auth method") } ah.logger.Info("starting auth handler") defer func() { am.Shutdown() close(ah.OutputCh) close(ah.DoneCh) close(ah.TemplateTokenCh) ah.logger.Info("auth handler stopped") }() credCh := am.NewCreds() if !ah.enableReauthOnNewCredentials { realCredCh := credCh credCh = nil if realCredCh != nil { go func() { for { select { case <-ctx.Done(): return case <-realCredCh: } } }() } } if credCh == nil { credCh = make(chan struct{}) } var watcher *api.LifetimeWatcher for { select { case <-ctx.Done(): return default: } // Create a fresh backoff value backoff := 2*time.Second + time.Duration(ah.random.Int63()%int64(time.Second*2)-int64(time.Second)) ah.logger.Info("authenticating") path, header, data, err := am.Authenticate(ctx, ah.client) if err != nil { ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } clientToUse := ah.client if ah.wrapTTL > 0 { wrapClient, err := ah.client.Clone() if err != nil { ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } wrapClient.SetWrappingLookupFunc(func(string, string) string { return ah.wrapTTL.String() }) clientToUse = wrapClient } for key, values := range header { for _, value := range values { clientToUse.AddHeader(key, value) } } secret, err := clientToUse.Logical().Write(path, data) // Check errors/sanity if err != nil { ah.logger.Error("error authenticating", "error", err, "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } switch { case ah.wrapTTL > 0: if secret.WrapInfo == nil { ah.logger.Error("authentication returned nil wrap info", "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } if secret.WrapInfo.Token == "" { ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) if err != nil { ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") ah.OutputCh <- string(wrappedResp) if ah.enableTemplateTokenCh { ah.TemplateTokenCh <- string(wrappedResp) } am.CredSuccess() select { case <-ctx.Done(): ah.logger.Info("shutdown triggered") continue case <-credCh: ah.logger.Info("auth method found new credentials, re-authenticating") continue } default: if secret == nil || secret.Auth == nil { ah.logger.Error("authentication returned nil auth info", "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } if secret.Auth.ClientToken == "" { ah.logger.Error("authentication returned empty client token", "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } ah.logger.Info("authentication successful, sending token to sinks") ah.OutputCh <- secret.Auth.ClientToken if ah.enableTemplateTokenCh { ah.TemplateTokenCh <- secret.Auth.ClientToken } am.CredSuccess() } if watcher != nil { watcher.Stop() } watcher, err = ah.client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ Secret: secret, }) if err != nil { ah.logger.Error("error creating lifetime watcher, backing off and retrying", "error", err, "backoff", backoff.Seconds()) backoffOrQuit(ctx, backoff) continue } // Start the renewal process ah.logger.Info("starting renewal process") go watcher.Renew() LifetimeWatcherLoop: for { select { case <-ctx.Done(): ah.logger.Info("shutdown triggered, stopping lifetime watcher") watcher.Stop() break LifetimeWatcherLoop case err := <-watcher.DoneCh(): ah.logger.Info("lifetime watcher done channel triggered") if err != nil { ah.logger.Error("error renewing token", "error", err) } break LifetimeWatcherLoop case <-watcher.RenewCh(): ah.logger.Info("renewed auth token") case <-credCh: ah.logger.Info("auth method found new credentials, re-authenticating") break LifetimeWatcherLoop } } } }
ceph/ceph-csi
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
GO
apache-2.0
6,794
--- layout: global title: DROP DATABASE displayTitle: DROP DATABASE license: | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --- ### Description Drop a database and delete the directory associated with the database from the file system. An exception will be thrown if the database does not exist in the system. ### Syntax {% highlight sql %} DROP (DATABASE|SCHEMA) [IF EXISTS] dbname [RESTRICT|CASCADE]; {% endhighlight %} ### Parameters <dl> <dt><code><em>DATABASE|SCHEMA</em></code></dt> <dd>`DATABASE` and `SCHEMA` mean the same thing, either of them can be used.</dd> </dl> <dl> <dt><code><em>IF EXISTS</em></code></dt> <dd>If specified, no exception is thrown when the database does not exist.</dd> </dl> <dl> <dt><code><em>RESTRICT</em></code></dt> <dd>If specified, will restrict dropping a non-empty database and is enabled by default.</dd> </dl> <dl> <dt><code><em>CASCADE</em></code></dt> <dd>If specified, will drop all the associated tables and functions.</dd> </dl> ### Example {% highlight sql %} -- Create `inventory_db` Database CREATE DATABASE inventory_db COMMENT 'This database is used to maintain Inventory'; -- Drop the database and it's tables DROP DATABASE inventory_db CASCADE; +---------+ | Result | +---------+ +---------+ -- Drop the database using IF EXISTS DROP DATABASE IF EXISTS inventory_db CASCADE; +---------+ | Result | +---------+ +---------+ {% endhighlight %} ### Related statements - [CREATE DATABASE](sql-ref-syntax-ddl-create-database.html) - [DESCRIBE DATABASE](sql-ref-syntax-aux-describe-database.html) - [SHOW DATABASES](sql-ref-syntax-aux-show-databases.html)
bdrillard/spark
docs/sql-ref-syntax-ddl-drop-database.md
Markdown
apache-2.0
2,374
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <title>Uses of Class org.apache.poi.hssf.util.HSSFColor.ORCHID (POI API Documentation)</title> <link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class org.apache.poi.hssf.util.HSSFColor.ORCHID (POI API Documentation)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../../org/apache/poi/hssf/util/HSSFColor.ORCHID.html" title="class in org.apache.poi.hssf.util">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>PREV</li> <li>NEXT</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?org/apache/poi/hssf/util//class-useHSSFColor.ORCHID.html" target="_top">FRAMES</a></li> <li><a href="HSSFColor.ORCHID.html" target="_top">NO FRAMES</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class org.apache.poi.hssf.util.HSSFColor.ORCHID" class="title">Uses of Class<br>org.apache.poi.hssf.util.HSSFColor.ORCHID</h2> </div> <div class="classUseContainer">No usage of org.apache.poi.hssf.util.HSSFColor.ORCHID</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../../org/apache/poi/hssf/util/HSSFColor.ORCHID.html" title="class in org.apache.poi.hssf.util">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>PREV</li> <li>NEXT</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?org/apache/poi/hssf/util//class-useHSSFColor.ORCHID.html" target="_top">FRAMES</a></li> <li><a href="HSSFColor.ORCHID.html" target="_top">NO FRAMES</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small> <i>Copyright 2014 The Apache Software Foundation or its licensors, as applicable.</i> </small></p> </body> </html>
RyoSaeba69/Bio-info
mylib/poi-3.11/docs/apidocs/org/apache/poi/hssf/util/class-use/HSSFColor.ORCHID.html
HTML
apache-2.0
4,312
/* * Copyright 2000-2014 Vaadin Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.vaadin.tests.elements; import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; import com.vaadin.testbench.elements.AbstractComponentElement; import com.vaadin.testbench.elements.AbstractLayoutElement; import com.vaadin.testbench.elements.AccordionElement; import com.vaadin.testbench.elements.BrowserFrameElement; import com.vaadin.testbench.elements.ButtonElement; import com.vaadin.testbench.elements.CheckBoxElement; import com.vaadin.testbench.elements.CheckBoxGroupElement; import com.vaadin.testbench.elements.ColorPickerElement; import com.vaadin.testbench.elements.ComboBoxElement; import com.vaadin.testbench.elements.DateFieldElement; import com.vaadin.testbench.elements.FlashElement; import com.vaadin.testbench.elements.FormLayoutElement; import com.vaadin.testbench.elements.GridElement; import com.vaadin.testbench.elements.GridLayoutElement; import com.vaadin.testbench.elements.HorizontalLayoutElement; import com.vaadin.testbench.elements.ImageElement; import com.vaadin.testbench.elements.ListSelectElement; import com.vaadin.testbench.elements.RadioButtonGroupElement; import com.vaadin.testbench.elements.TextAreaElement; import com.vaadin.testbench.elements.TextFieldElement; import com.vaadin.testbench.elements.TwinColSelectElement; import com.vaadin.testbench.elements.VerticalLayoutElement; import com.vaadin.tests.tb3.MultiBrowserTest; /** * * Test class which have test methods for all components added in the testUI * class. Open TestURL is called only once before tests. Parent class should * override protected Class<?> getUIClass() to specify which testUI should be * used */ public abstract class ElementComponentGetCaptionBaseTest extends MultiBrowserTest { AbstractLayoutElement mainLayout; @Before public void init() { openTestURL(); } @Test public void getComboboxCaptionTest() { ComboBoxElement elem = mainLayout.$(ComboBoxElement.class).get(0); testCaption(elem, 0); } @Test public void getButtonCaptionTest() { ButtonElement elem = mainLayout.$(ButtonElement.class).get(0); testCaption(elem, 1); } @Test public void getGridCaptionTest() { GridElement elem = mainLayout.$(GridElement.class).get(0); testCaption(elem, 2); } @Test public void getCheckBoxGroupCaptionTest() { CheckBoxGroupElement elem = mainLayout.$(CheckBoxGroupElement.class) .get(0); testCaption(elem, 3); } @Test public void getRadioButtonGroupCaptionTest() { RadioButtonGroupElement elem = mainLayout .$(RadioButtonGroupElement.class).get(0); testCaption(elem, 4); } @Test public void getTwinColSelectCaptionTest() { TwinColSelectElement elem = mainLayout.$(TwinColSelectElement.class) .get(0); testCaption(elem, 5); } @Test public void getListSelectCaptionTest() { ListSelectElement elem = mainLayout.$(ListSelectElement.class).get(0); testCaption(elem, 6); } @Test public void getColorPickerCaptionTest() { ColorPickerElement elem = mainLayout.$(ColorPickerElement.class).get(0); testCaption(elem, 7); } @Test public void getAccordionCaptionTest() { AccordionElement elem = mainLayout.$(AccordionElement.class).get(0); testCaption(elem, 8); } @Test public void getImageCaptionTest() { ImageElement elem = mainLayout.$(ImageElement.class).get(0); testCaption(elem, 9); } @Test public void getFlashCaptionTest() { FlashElement elem = mainLayout.$(FlashElement.class).get(0); testCaption(elem, 10); } @Test public void getBrowserFrameCaptionTest() { BrowserFrameElement elem = mainLayout.$(BrowserFrameElement.class) .get(0); testCaption(elem, 11); } @Test public void getCheckBoxCaptionTest() { CheckBoxElement elem = mainLayout.$(CheckBoxElement.class).get(0); testCaption(elem, 12); } @Test public void getTextFieldCaptionTest() { TextFieldElement elem = mainLayout.$(TextFieldElement.class).get(0); testCaption(elem, 13); } @Test public void getTextAreaCaptionTest() { TextAreaElement elem = mainLayout.$(TextAreaElement.class).get(0); testCaption(elem, 14); } @Test public void getDateFieldCaptionTest() { DateFieldElement elem = mainLayout.$(DateFieldElement.class).get(0); testCaption(elem, 15); } @Test public void getVerticalLayoutCaptionTest() { VerticalLayoutElement elem = mainLayout.$(VerticalLayoutElement.class) .get(0); testCaption(elem, 16); } @Test public void getHorizontalLayoutCaptionTest() { HorizontalLayoutElement elem = mainLayout .$(HorizontalLayoutElement.class).get(0); testCaption(elem, 17); } @Test public void getFormLayoutCaptionTest() { FormLayoutElement elem = mainLayout.$(FormLayoutElement.class).get(0); testCaption(elem, 18); } @Test public void getGridLayoutCaptionTest() { GridLayoutElement elem = mainLayout.$(GridLayoutElement.class).get(0); testCaption(elem, 19); } private void testCaption(AbstractComponentElement elem, int caption_index) { String actual = elem.getCaption(); String expected = ElementComponentGetCaptionBase.DEFAULT_CAPTIONS[caption_index]; assertTrue("Error with class:" + elem.getAttribute("class"), expected.equals(actual)); } }
Darsstar/framework
uitest/src/test/java/com/vaadin/tests/elements/ElementComponentGetCaptionBaseTest.java
Java
apache-2.0
6,314
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for builtin_functions module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.python.autograph.converters import builtin_functions from tensorflow.python.autograph.core import converter_testing from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class BuiltinFunctionsTest(converter_testing.TestCase): def test_len(self): def test_fn(a): return len(a) with self.converted(test_fn, builtin_functions, {'len': len}) as result: with self.test_session() as sess: p = array_ops.placeholder(dtype=dtypes.int32, shape=None) ops = result.test_fn(p) self.assertEqual(sess.run(ops, {p: [0, 0, 0]}), 3) def test_print(self): if six.PY2: return def test_fn(a): return print(a) with self.converted(test_fn, builtin_functions, {'print': print}) as result: with self.test_session() as sess: with self.assertPrints('a\n'): sess.run(result.test_fn('a')) def test_print_multiple_values(self): if six.PY2: return def test_fn(a, b, c): return print(a, b, c) with self.converted(test_fn, builtin_functions, {'print': print}) as result: with self.test_session() as sess: with self.assertPrints('a 1 [2, 3]\n'): sess.run( result.test_fn( constant_op.constant('a'), constant_op.constant(1), [2, 3])) def test_conversion_robust_to_unhashable_callables(self): def test_fn(): return foo() # pylint:disable=undefined-variable with self.converted(test_fn, builtin_functions, {'foo': { 'a': 'b' }.keys}) as result: self.assertListEqual(list(result.test_fn()), ['a']) if __name__ == '__main__': test.main()
snnn/tensorflow
tensorflow/python/autograph/converters/builtin_functions_test.py
Python
apache-2.0
2,636
## # Copyright (C) 2013 TopCoder Inc., All Rights Reserved. ## """ Abstract interface of writer class. Clarifying interface and duty of writer classes. """ __author__ = 'Easyhard' __version__ = '1.0' class DataWriter(object): """ Abstract interface of writer class. A writer class should be able to export internal representation, i.e, subclass of Entity Class, as external format like XML, CSV, etc. """ def start(self): """This method gives writer class a chance for initiation. It will be called at the beginning of exporting.""" raise NotImplementedError() def end(self): """ Notify that writing is finished. Writer class should finish any unfinished job in this method. """ raise NotImplementedError() def write_entity(self, entity): """ Each time this method is called, writer should export all fields returning from entity.get_field_list() correctly. """ raise NotImplementedError()
NASA-Tournament-Lab/CoECI-CMS-Healthcare-Fraud-Prevention
partnerclient/hfppnetwork/partner/conversion/datawriter.py
Python
apache-2.0
1,031
/* * Copyright (c) 2010-2012 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package krati.io.serializer; import java.nio.ByteOrder; import krati.io.SerializationException; import krati.io.Serializer; import krati.util.Numbers; /** * ShortSerializer * * @author jwu * @since 10/01, 2012 */ public class ShortSerializer implements Serializer<Short> { private final ByteOrder _byteOrder; /** * Creates a new short Serializer using the BIG_ENDIAN byte order. */ public ShortSerializer() { this._byteOrder = ByteOrder.BIG_ENDIAN; } /** * Creates a new short Serializer using the specified byte order. */ public ShortSerializer(ByteOrder byteOrder) { this._byteOrder = (byteOrder == null) ? ByteOrder.BIG_ENDIAN : byteOrder; } @Override public byte[] serialize(Short value) throws SerializationException { return (_byteOrder == ByteOrder.BIG_ENDIAN) ? Numbers.shortBytesBE(value) : Numbers.shortBytesLE(value); } @Override public Short deserialize(byte[] bytes) throws SerializationException { return (_byteOrder == ByteOrder.BIG_ENDIAN) ? Numbers.shortValueBE(bytes) : Numbers.shortValueLE(bytes); } public short shortValue(byte[] bytes) { return (_byteOrder == ByteOrder.BIG_ENDIAN) ? Numbers.shortValueBE(bytes) : Numbers.shortValueLE(bytes); } public byte[] shortBytes(Short value) throws SerializationException { return (_byteOrder == ByteOrder.BIG_ENDIAN) ? Numbers.shortBytesBE(value) : Numbers.shortBytesLE(value); } }
ferdiknight/krati
krati-main/src/main/java/krati/io/serializer/ShortSerializer.java
Java
apache-2.0
2,190
/* * Copyright © 2015 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ /** * Packages for ETL Application Template. * */ package co.cask.cdap.etl.realtime.jms;
mpouttuclarke/cdap
cdap-app-templates/cdap-etl/cdap-etl-lib/src/main/java/co/cask/cdap/etl/realtime/jms/package-info.java
Java
apache-2.0
691
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.internal.cache.tier.sockets.command; import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.util.ArrayList; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.ArgumentCaptor; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.apache.geode.CancelCriterion; import org.apache.geode.cache.operations.RegisterInterestOperationContext; import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.LocalRegion; import org.apache.geode.internal.cache.tier.CachedRegionHelper; import org.apache.geode.internal.cache.tier.sockets.AcceptorImpl; import org.apache.geode.internal.cache.tier.sockets.ChunkedMessage; import org.apache.geode.internal.cache.tier.sockets.Message; import org.apache.geode.internal.cache.tier.sockets.Part; import org.apache.geode.internal.cache.tier.sockets.ServerConnection; import org.apache.geode.internal.security.AuthorizeRequest; import org.apache.geode.internal.security.SecurityService; import org.apache.geode.internal.serialization.KnownVersion; import org.apache.geode.security.NotAuthorizedException; import org.apache.geode.security.ResourcePermission.Operation; import org.apache.geode.security.ResourcePermission.Resource; import org.apache.geode.test.junit.categories.ClientServerTest; @Category({ClientServerTest.class}) public class RegisterInterestList66Test { private static final String REGION_NAME = "region1"; private static final String KEY = "key1"; private static final byte[] DURABLE = new byte[8]; private static final byte[] DATA_POLICY = new byte[] {0x01, 0x01}; @Mock private SecurityService securityService; @Mock private Message message; @Mock private ServerConnection serverConnection; @Mock private AuthorizeRequest authzRequest; @Mock private InternalCache cache; @Mock private Part regionNamePart; @Mock private Part interestTypePart; @Mock private Part durablePart; @Mock private Part keyPart; @Mock private Part numberOfKeysPart; @Mock private Part notifyPart; @Mock private Part regionDataPolicyPart; @Mock private RegisterInterestOperationContext registerInterestOperationContext; @Mock private ChunkedMessage chunkedResponseMessage; @InjectMocks private RegisterInterestList66 registerInterestList66; @Before public void setUp() throws Exception { registerInterestList66 = new RegisterInterestList66(); MockitoAnnotations.openMocks(this); when(authzRequest.registerInterestListAuthorize(eq(REGION_NAME), any(), any())) .thenReturn(registerInterestOperationContext); when(cache.getRegion(isA(String.class))).thenReturn(uncheckedCast(mock(LocalRegion.class))); when(cache.getCancelCriterion()).thenReturn(mock(CancelCriterion.class)); when(durablePart.getObject()).thenReturn(DURABLE); when(interestTypePart.getInt()).thenReturn(0); when(keyPart.getObject()).thenReturn(mock(ArrayList.class)); when(message.getNumberOfParts()).thenReturn(6); when(message.getPart(eq(0))).thenReturn(regionNamePart); when(message.getPart(eq(1))).thenReturn(interestTypePart); when(message.getPart(eq(2))).thenReturn(durablePart); when(message.getPart(eq(3))).thenReturn(keyPart); when(message.getPart(eq(4))).thenReturn(notifyPart); when(message.getPart(eq(5))).thenReturn(regionDataPolicyPart); when(notifyPart.getObject()).thenReturn(DURABLE); when(numberOfKeysPart.getInt()).thenReturn(1); when(regionDataPolicyPart.getObject()).thenReturn(DATA_POLICY); when(regionNamePart.getCachedString()).thenReturn(REGION_NAME); when(registerInterestOperationContext.getKey()).thenReturn(KEY); when(serverConnection.getCache()).thenReturn(cache); when(serverConnection.getAuthzRequest()).thenReturn(authzRequest); when(serverConnection.getCachedRegionHelper()).thenReturn(mock(CachedRegionHelper.class)); when(serverConnection.getChunkedResponseMessage()).thenReturn(chunkedResponseMessage); when(serverConnection.getClientVersion()).thenReturn(KnownVersion.GFE_81); when(serverConnection.getAcceptor()).thenReturn(mock(AcceptorImpl.class)); } @Test public void noSecurityShouldSucceed() throws Exception { when(securityService.isClientSecurityRequired()).thenReturn(false); registerInterestList66.cmdExecute(message, serverConnection, securityService, 0); verify(chunkedResponseMessage).sendChunk(serverConnection); } @Test public void integratedSecurityShouldSucceedIfAuthorized() throws Exception { when(securityService.isClientSecurityRequired()).thenReturn(true); when(securityService.isIntegratedSecurity()).thenReturn(true); registerInterestList66.cmdExecute(message, serverConnection, securityService, 0); verify(securityService).authorize(Resource.DATA, Operation.READ, REGION_NAME); verify(chunkedResponseMessage).sendChunk(serverConnection); } @Test public void integratedSecurityShouldThrowIfNotAuthorized() throws Exception { when(securityService.isClientSecurityRequired()).thenReturn(true); when(securityService.isIntegratedSecurity()).thenReturn(true); doThrow(new NotAuthorizedException("")).when(securityService).authorize(Resource.DATA, Operation.READ, REGION_NAME); registerInterestList66.cmdExecute(message, serverConnection, securityService, 0); verify(securityService).authorize(Resource.DATA, Operation.READ, REGION_NAME); verify(chunkedResponseMessage).sendChunk(serverConnection); } @Test public void oldSecurityShouldSucceedIfAuthorized() throws Exception { when(securityService.isClientSecurityRequired()).thenReturn(true); when(securityService.isIntegratedSecurity()).thenReturn(false); registerInterestList66.cmdExecute(message, serverConnection, securityService, 0); verify(authzRequest).registerInterestListAuthorize(eq(REGION_NAME), any(), any()); verify(chunkedResponseMessage).sendChunk(serverConnection); } @Test public void oldSecurityShouldFailIfNotAuthorized() throws Exception { when(securityService.isClientSecurityRequired()).thenReturn(true); when(securityService.isIntegratedSecurity()).thenReturn(false); doThrow(new NotAuthorizedException("")).when(authzRequest) .registerInterestListAuthorize(eq(REGION_NAME), any(), any()); registerInterestList66.cmdExecute(message, serverConnection, securityService, 0); verify(authzRequest).registerInterestListAuthorize(eq(REGION_NAME), any(), any()); ArgumentCaptor<NotAuthorizedException> argument = ArgumentCaptor.forClass(NotAuthorizedException.class); verify(chunkedResponseMessage).addObjPart(argument.capture()); assertThat(argument.getValue()).isExactlyInstanceOf(NotAuthorizedException.class); verify(chunkedResponseMessage).sendChunk(serverConnection); } }
smgoller/geode
geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66Test.java
Java
apache-2.0
8,181
/* * Copyright 2020 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.optaplanner.persistence.jpa.api.score.buildin.bendable; import javax.persistence.AttributeConverter; import javax.persistence.Converter; import org.optaplanner.core.api.score.buildin.bendable.BendableScore; @Converter public class BendableScoreConverter implements AttributeConverter<BendableScore, String> { @Override public String convertToDatabaseColumn(BendableScore score) { if (score == null) { return null; } return score.toString(); } @Override public BendableScore convertToEntityAttribute(String scoreString) { if (scoreString == null) { return null; } return BendableScore.parseScore(scoreString); } }
ge0ffrey/optaplanner
optaplanner-persistence/optaplanner-persistence-jpa/src/main/java/org/optaplanner/persistence/jpa/api/score/buildin/bendable/BendableScoreConverter.java
Java
apache-2.0
1,350
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.processor.channel; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.camel.AsyncProcessor; import org.apache.camel.CamelContext; import org.apache.camel.CamelContextAware; import org.apache.camel.Channel; import org.apache.camel.Exchange; import org.apache.camel.NamedNode; import org.apache.camel.NamedRoute; import org.apache.camel.Processor; import org.apache.camel.Route; import org.apache.camel.processor.CamelInternalProcessor; import org.apache.camel.processor.WrapProcessor; import org.apache.camel.processor.errorhandler.RedeliveryErrorHandler; import org.apache.camel.processor.interceptor.BacklogDebugger; import org.apache.camel.processor.interceptor.BacklogTracer; import org.apache.camel.spi.Debugger; import org.apache.camel.spi.InterceptStrategy; import org.apache.camel.spi.ManagementInterceptStrategy; import org.apache.camel.spi.MessageHistoryFactory; import org.apache.camel.spi.Tracer; import org.apache.camel.support.OrderedComparator; import org.apache.camel.support.service.ServiceHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * DefaultChannel is the default {@link Channel}. * <p/> * The current implementation is just a composite containing the interceptors and error handler * that beforehand was added to the route graph directly. * <br/> * With this {@link Channel} we can in the future implement better strategies for routing the * {@link Exchange} in the route graph, as we have a {@link Channel} between each and every node * in the graph. */ public class DefaultChannel extends CamelInternalProcessor implements Channel { private static final Logger LOG = LoggerFactory.getLogger(DefaultChannel.class); private Processor errorHandler; // the next processor (non wrapped) private Processor nextProcessor; // the real output to invoke that has been wrapped private Processor output; private NamedNode definition; private ManagementInterceptStrategy.InstrumentationProcessor<?> instrumentationProcessor; private CamelContext camelContext; private Route route; public DefaultChannel(CamelContext camelContext) { super(camelContext); } @Override public Processor getOutput() { // the errorHandler is already decorated with interceptors // so it contain the entire chain of processors, so we can safely use it directly as output // if no error handler provided we use the output // the error handlers, interceptors, etc. woven in at design time return errorHandler != null ? errorHandler : output; } @Override public boolean hasNext() { return nextProcessor != null; } @Override public List<Processor> next() { if (!hasNext()) { return null; } List<Processor> answer = new ArrayList<>(1); answer.add(nextProcessor); return answer; } public void setOutput(Processor output) { this.output = output; } @Override public Processor getNextProcessor() { return nextProcessor; } /** * Sets the {@link org.apache.camel.processor.ErrorHandler} this Channel uses. * * @param errorHandler the error handler */ public void setErrorHandler(Processor errorHandler) { this.errorHandler = errorHandler; } @Override public Processor getErrorHandler() { return errorHandler; } @Override public NamedNode getProcessorDefinition() { return definition; } public void clearModelReferences() { this.definition = null; } @Override public Route getRoute() { return route; } @Override protected void doStart() throws Exception { // do not call super as we want to be in control here of the lifecycle // the output has now been created, so assign the output as the processor setProcessor(getOutput()); ServiceHelper.startService(errorHandler, output); } @Override protected void doStop() throws Exception { // do not call super as we want to be in control here of the lifecycle // only stop services if not context scoped (as context scoped is reused by others) ServiceHelper.stopService(output, errorHandler); } @Override protected void doShutdown() throws Exception { // do not call super as we want to be in control here of the lifecycle ServiceHelper.stopAndShutdownServices(output, errorHandler); } /** * Initializes the channel. * If the initialized output definition contained outputs (children) then * the childDefinition will be set so we can leverage fine grained tracing * * @param route the route context * @param definition the route definition the {@link Channel} represents * @param childDefinition the child definition * @throws Exception is thrown if some error occurred */ public void initChannel(Route route, NamedNode definition, NamedNode childDefinition, List<InterceptStrategy> interceptors, Processor nextProcessor, NamedRoute routeDefinition, boolean first) throws Exception { this.route = route; this.definition = definition; this.camelContext = route.getCamelContext(); this.nextProcessor = nextProcessor; // init CamelContextAware as early as possible on nextProcessor if (nextProcessor instanceof CamelContextAware) { ((CamelContextAware) nextProcessor).setCamelContext(camelContext); } // the definition to wrap should be the fine grained, // so if a child is set then use it, if not then its the original output used NamedNode targetOutputDef = childDefinition != null ? childDefinition : definition; LOG.trace("Initialize channel for target: {}", targetOutputDef); // setup instrumentation processor for management (jmx) // this is later used in postInitChannel as we need to setup the error handler later as well ManagementInterceptStrategy managed = route.getManagementInterceptStrategy(); if (managed != null) { instrumentationProcessor = managed.createProcessor(targetOutputDef, nextProcessor); } if (route.isMessageHistory()) { // add message history advice MessageHistoryFactory factory = camelContext.getMessageHistoryFactory(); addAdvice(new MessageHistoryAdvice(factory, targetOutputDef)); } // add advice that keeps track of which node is processing addAdvice(new NodeHistoryAdvice(targetOutputDef)); // then wrap the output with the tracer and debugger (debugger first, // as we do not want regular tracer to trace the debugger) if (route.isDebugging()) { if (camelContext.getDebugger() != null) { // use custom debugger Debugger debugger = camelContext.getDebugger(); addAdvice(new DebuggerAdvice(debugger, nextProcessor, targetOutputDef)); } else { // use backlog debugger BacklogDebugger debugger = getOrCreateBacklogDebugger(); camelContext.addService(debugger); addAdvice(new BacklogDebuggerAdvice(debugger, nextProcessor, targetOutputDef)); } } if (route.isBacklogTracing()) { // add jmx backlog tracer BacklogTracer backlogTracer = getOrCreateBacklogTracer(); addAdvice(new BacklogTracerAdvice(backlogTracer, targetOutputDef, routeDefinition, first)); } if (route.isTracing()) { // add logger tracer Tracer tracer = camelContext.getTracer(); addAdvice(new TracingAdvice(tracer, targetOutputDef, routeDefinition, first)); } // sort interceptors according to ordered interceptors.sort(OrderedComparator.get()); // reverse list so the first will be wrapped last, as it would then be first being invoked Collections.reverse(interceptors); // wrap the output with the configured interceptors Processor target = nextProcessor; for (InterceptStrategy strategy : interceptors) { Processor next = target == nextProcessor ? null : nextProcessor; // use the fine grained definition (eg the child if available). Its always possible to get back to the parent Processor wrapped = strategy.wrapProcessorInInterceptors(route.getCamelContext(), targetOutputDef, target, next); if (!(wrapped instanceof AsyncProcessor)) { LOG.warn("Interceptor: " + strategy + " at: " + definition + " does not return an AsyncProcessor instance." + " This causes the asynchronous routing engine to not work as optimal as possible." + " See more details at the InterceptStrategy javadoc." + " Camel will use a bridge to adapt the interceptor to the asynchronous routing engine," + " but its not the most optimal solution. Please consider changing your interceptor to comply."); } if (!(wrapped instanceof WrapProcessor)) { // wrap the target so it becomes a service and we can manage its lifecycle wrapped = new WrapProcessor(wrapped, target); } target = wrapped; } if (route.isStreamCaching()) { addAdvice(new StreamCachingAdvice(camelContext.getStreamCachingStrategy())); } if (route.getDelayer() != null && route.getDelayer() > 0) { addAdvice(new DelayerAdvice(route.getDelayer())); } // sets the delegate to our wrapped output output = target; } /** * Post initializes the channel. * * @throws Exception is thrown if some error occurred */ public void postInitChannel() throws Exception { // if jmx was enabled for the processor then either add as advice or wrap and change the processor // on the error handler. See more details in the class javadoc of InstrumentationProcessor if (instrumentationProcessor != null) { boolean redeliveryPossible = false; if (errorHandler instanceof RedeliveryErrorHandler) { redeliveryPossible = ((RedeliveryErrorHandler) errorHandler).determineIfRedeliveryIsEnabled(); if (redeliveryPossible) { // okay we can redeliver then we need to change the output in the error handler // to use us which we then wrap the call so we can capture before/after for redeliveries as well Processor currentOutput = ((RedeliveryErrorHandler) errorHandler).getOutput(); instrumentationProcessor.setProcessor(currentOutput); ((RedeliveryErrorHandler) errorHandler).changeOutput(instrumentationProcessor); } } if (!redeliveryPossible) { // optimise to use advice as we cannot redeliver addAdvice(CamelInternalProcessor.wrap(instrumentationProcessor)); } } } private BacklogTracer getOrCreateBacklogTracer() { BacklogTracer tracer = null; if (camelContext.getRegistry() != null) { // lookup in registry Map<String, BacklogTracer> map = camelContext.getRegistry().findByTypeWithName(BacklogTracer.class); if (map.size() == 1) { tracer = map.values().iterator().next(); } } if (tracer == null) { tracer = camelContext.getExtension(BacklogTracer.class); } if (tracer == null) { tracer = BacklogTracer.createTracer(camelContext); camelContext.setExtension(BacklogTracer.class, tracer); } return tracer; } private BacklogDebugger getOrCreateBacklogDebugger() { BacklogDebugger debugger = null; if (camelContext.getRegistry() != null) { // lookup in registry Map<String, BacklogDebugger> map = camelContext.getRegistry().findByTypeWithName(BacklogDebugger.class); if (map.size() == 1) { debugger = map.values().iterator().next(); } } if (debugger == null) { debugger = camelContext.hasService(BacklogDebugger.class); } if (debugger == null) { // fallback to use the default debugger debugger = BacklogDebugger.createDebugger(camelContext); } return debugger; } @Override public String toString() { // just output the next processor as all the interceptors and error handler is just too verbose return "Channel[" + nextProcessor + "]"; } }
DariusX/camel
core/camel-base/src/main/java/org/apache/camel/processor/channel/DefaultChannel.java
Java
apache-2.0
13,979
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexableField; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; import static java.util.Collections.emptyList; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; public class ParentFieldMapperTests extends ESSingleNodeTestCase { @Override protected Collection<Class<? extends Plugin>> getPlugins() { return Collections.singleton(InternalSettingsPlugin.class); } public void testParentSetInDocNotAllowed() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); try { docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().field("_parent", "1122").endObject().bytes(), XContentType.JSON)); fail("Expected failure to parse metadata field"); } catch (MapperParsingException e) { assertTrue(e.getMessage(), e.getMessage().contains("Field [_parent] is a metadata field and cannot be added inside a document")); } } public void testJoinFieldSet() throws Exception { String parentMapping = XContentFactory.jsonBuilder().startObject().startObject("parent_type") .endObject().endObject().string(); String childMapping = XContentFactory.jsonBuilder().startObject().startObject("child_type") .startObject("_parent").field("type", "parent_type").endObject() .endObject().endObject().string(); IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); // Indexing parent doc: DocumentMapper parentDocMapper = indexService.mapperService().documentMapper("parent_type"); ParsedDocument doc = parentDocMapper.parse(SourceToParse.source("test", "parent_type", "1122", new BytesArray("{}"), XContentType.JSON)); assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc())); assertEquals("1122", doc.rootDoc().getBinaryValue("_parent#parent_type").utf8ToString()); // Indexing child doc: DocumentMapper childDocMapper = indexService.mapperService().documentMapper("child_type"); doc = childDocMapper.parse(SourceToParse.source("test", "child_type", "1", new BytesArray("{}"), XContentType.JSON).parent("1122")); assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc())); assertEquals("1122", doc.rootDoc().getBinaryValue("_parent#parent_type").utf8ToString()); } public void testJoinFieldNotSet() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("x_field", "x_value") .endObject() .bytes(), XContentType.JSON)); assertEquals(0, getNumberOfFieldWithParentPrefix(doc.rootDoc())); } public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { Index index = new Index("_index", "testUUID"); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer, Collections.emptyMap(), Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry(), similarityService, new IndicesModule(emptyList()).getMapperRegistry(), () -> null); XContentBuilder mappingSource = jsonBuilder().startObject().startObject("some_type") .startObject("properties") .endObject() .endObject().endObject(); mapperService.merge("some_type", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE, false); Set<String> allFields = new HashSet<>(mapperService.simpleMatchToIndexNames("*")); assertTrue(allFields.contains("_parent")); assertFalse(allFields.contains("_parent#null")); MappedFieldType fieldType = mapperService.fullName("_parent"); assertFalse(fieldType.eagerGlobalOrdinals()); } private static int getNumberOfFieldWithParentPrefix(ParseContext.Document doc) { int numFieldWithParentPrefix = 0; for (IndexableField field : doc) { if (field.name().startsWith("_parent")) { numFieldWithParentPrefix++; } } return numFieldWithParentPrefix; } }
jimczi/elasticsearch
core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java
Java
apache-2.0
7,530
/* * Licensed to DuraSpace under one or more contributor license agreements. * See the NOTICE file distributed with this work for additional information * regarding copyright ownership. * * DuraSpace licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fcrepo.kernel.api; /** * A utility class for working with common transaction related operations. * @author dbernstein */ public class TransactionUtils { private TransactionUtils() { } /** * Returns the transaction ID if the transaction is both non-null and uncommitted. Otherwise it returns null. * @param transaction The transaction * @return The transaction ID or null */ public static String openTxId(final Transaction transaction) { return transaction == null || transaction.isCommitted() ? null : transaction.getId(); } }
fcrepo4/fcrepo4
fcrepo-kernel-api/src/main/java/org/fcrepo/kernel/api/TransactionUtils.java
Java
apache-2.0
1,374
/* * Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.internal.serialization.impl; import com.hazelcast.nio.BufferObjectDataOutput; import com.hazelcast.nio.ObjectDataOutput; import com.hazelcast.nio.serialization.ClassDefinition; import com.hazelcast.nio.serialization.FieldDefinition; import com.hazelcast.nio.serialization.FieldType; import com.hazelcast.nio.serialization.HazelcastSerializationException; import com.hazelcast.nio.serialization.Portable; import com.hazelcast.nio.serialization.PortableWriter; import java.io.IOException; import java.util.HashSet; import java.util.Set; import static com.hazelcast.nio.Bits.INT_SIZE_IN_BYTES; import static com.hazelcast.nio.Bits.NULL_ARRAY_LENGTH; public class DefaultPortableWriter implements PortableWriter { private final PortableSerializer serializer; private final ClassDefinition cd; private final BufferObjectDataOutput out; private final int begin; private final int offset; private final Set<String> writtenFields; private boolean raw; public DefaultPortableWriter(PortableSerializer serializer, BufferObjectDataOutput out, ClassDefinition cd) throws IOException { this.serializer = serializer; this.out = out; this.cd = cd; this.writtenFields = new HashSet<String>(cd.getFieldCount()); this.begin = out.position(); // room for final offset out.writeZeroBytes(4); out.writeInt(cd.getFieldCount()); this.offset = out.position(); // one additional for raw data int fieldIndexesLength = (cd.getFieldCount() + 1) * INT_SIZE_IN_BYTES; out.writeZeroBytes(fieldIndexesLength); } public int getVersion() { return cd.getVersion(); } @Override public void writeInt(String fieldName, int value) throws IOException { setPosition(fieldName, FieldType.INT); out.writeInt(value); } @Override public void writeLong(String fieldName, long value) throws IOException { setPosition(fieldName, FieldType.LONG); out.writeLong(value); } @Override public void writeUTF(String fieldName, String str) throws IOException { setPosition(fieldName, FieldType.UTF); out.writeUTF(str); } @Override public void writeBoolean(String fieldName, boolean value) throws IOException { setPosition(fieldName, FieldType.BOOLEAN); out.writeBoolean(value); } @Override public void writeByte(String fieldName, byte value) throws IOException { setPosition(fieldName, FieldType.BYTE); out.writeByte(value); } @Override public void writeChar(String fieldName, int value) throws IOException { setPosition(fieldName, FieldType.CHAR); out.writeChar(value); } @Override public void writeDouble(String fieldName, double value) throws IOException { setPosition(fieldName, FieldType.DOUBLE); out.writeDouble(value); } @Override public void writeFloat(String fieldName, float value) throws IOException { setPosition(fieldName, FieldType.FLOAT); out.writeFloat(value); } @Override public void writeShort(String fieldName, short value) throws IOException { setPosition(fieldName, FieldType.SHORT); out.writeShort(value); } @Override public void writePortable(String fieldName, Portable portable) throws IOException { FieldDefinition fd = setPosition(fieldName, FieldType.PORTABLE); final boolean isNull = portable == null; out.writeBoolean(isNull); out.writeInt(fd.getFactoryId()); out.writeInt(fd.getClassId()); if (!isNull) { checkPortableAttributes(fd, portable); serializer.writeInternal(out, portable); } } private void checkPortableAttributes(FieldDefinition fd, Portable portable) { if (fd.getFactoryId() != portable.getFactoryId()) { throw new HazelcastSerializationException("Wrong Portable type! Generic portable types are not supported! " + " Expected factory-id: " + fd.getFactoryId() + ", Actual factory-id: " + portable.getFactoryId()); } if (fd.getClassId() != portable.getClassId()) { throw new HazelcastSerializationException("Wrong Portable type! Generic portable types are not supported! " + "Expected class-id: " + fd.getClassId() + ", Actual class-id: " + portable.getClassId()); } } @Override public void writeNullPortable(String fieldName, int factoryId, int classId) throws IOException { setPosition(fieldName, FieldType.PORTABLE); out.writeBoolean(true); out.writeInt(factoryId); out.writeInt(classId); } @Override public void writeByteArray(String fieldName, byte[] values) throws IOException { setPosition(fieldName, FieldType.BYTE_ARRAY); out.writeByteArray(values); } @Override public void writeBooleanArray(String fieldName, boolean[] booleans) throws IOException { setPosition(fieldName, FieldType.BOOLEAN_ARRAY); out.writeBooleanArray(booleans); } @Override public void writeCharArray(String fieldName, char[] values) throws IOException { setPosition(fieldName, FieldType.CHAR_ARRAY); out.writeCharArray(values); } @Override public void writeIntArray(String fieldName, int[] values) throws IOException { setPosition(fieldName, FieldType.INT_ARRAY); out.writeIntArray(values); } @Override public void writeLongArray(String fieldName, long[] values) throws IOException { setPosition(fieldName, FieldType.LONG_ARRAY); out.writeLongArray(values); } @Override public void writeDoubleArray(String fieldName, double[] values) throws IOException { setPosition(fieldName, FieldType.DOUBLE_ARRAY); out.writeDoubleArray(values); } @Override public void writeFloatArray(String fieldName, float[] values) throws IOException { setPosition(fieldName, FieldType.FLOAT_ARRAY); out.writeFloatArray(values); } @Override public void writeShortArray(String fieldName, short[] values) throws IOException { setPosition(fieldName, FieldType.SHORT_ARRAY); out.writeShortArray(values); } @Override public void writeUTFArray(String fieldName, String[] values) throws IOException { setPosition(fieldName, FieldType.UTF_ARRAY); out.writeUTFArray(values); } @Override public void writePortableArray(String fieldName, Portable[] portables) throws IOException { FieldDefinition fd = setPosition(fieldName, FieldType.PORTABLE_ARRAY); final int len = portables == null ? NULL_ARRAY_LENGTH : portables.length; out.writeInt(len); out.writeInt(fd.getFactoryId()); out.writeInt(fd.getClassId()); if (len > 0) { final int offset = out.position(); out.writeZeroBytes(len * 4); for (int i = 0; i < portables.length; i++) { Portable portable = portables[i]; checkPortableAttributes(fd, portable); int position = out.position(); out.writeInt(offset + i * INT_SIZE_IN_BYTES, position); serializer.writeInternal(out, portable); } } } private FieldDefinition setPosition(String fieldName, FieldType fieldType) throws IOException { if (raw) { throw new HazelcastSerializationException("Cannot write Portable fields after getRawDataOutput() is called!"); } FieldDefinition fd = cd.getField(fieldName); if (fd == null) { throw new HazelcastSerializationException("Invalid field name: '" + fieldName + "' for ClassDefinition {id: " + cd.getClassId() + ", version: " + cd.getVersion() + "}"); } if (writtenFields.add(fieldName)) { int pos = out.position(); int index = fd.getIndex(); out.writeInt(offset + index * INT_SIZE_IN_BYTES, pos); out.writeShort(fieldName.length()); out.writeBytes(fieldName); out.writeByte(fieldType.getId()); } else { throw new HazelcastSerializationException("Field '" + fieldName + "' has already been written!"); } return fd; } @Override public ObjectDataOutput getRawDataOutput() throws IOException { if (!raw) { int pos = out.position(); // last index int index = cd.getFieldCount(); out.writeInt(offset + index * INT_SIZE_IN_BYTES, pos); } raw = true; return out; } void end() throws IOException { // write final offset int position = out.position(); out.writeInt(begin, position); } }
tombujok/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/DefaultPortableWriter.java
Java
apache-2.0
9,595
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.apache.jmeter.report.core; import java.io.BufferedWriter; import java.io.File; import java.io.FileOutputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; import java.io.Writer; import org.apache.commons.lang3.Validate; import org.apache.jorphan.util.JOrphanUtils; /** * Base class for implementing sample writer.<br> * <p> * Handles buffering and output writer replacement.<br> * </p> * <p> * When a writer is set on the sample writer any previous writer is flushed and * closed before beeing replaced by the new one. * </p> * * @since 3.0 */ abstract public class AbstractSampleWriter extends SampleWriter { private static final int BUF_SIZE = 10000; private static final String CHARSET = "ISO8859-1"; /** output writer to write samples to */ protected PrintWriter writer; /** * Set he new writer on which samples will be written by this smaple * writter.<br> * If any writer exist on the sample writer, it is flushed and closed before * being replaced by the new one. * * @param writer * The destination writer where samples will be written by this * sample writer */ public void setWriter(Writer writer) { Validate.notNull(writer, "writer must not be null."); if (this.writer != null) { // flush and close previous writer JOrphanUtils.closeQuietly(this.writer); } this.writer = new PrintWriter(new BufferedWriter(writer, BUF_SIZE), false); } /** * Instructs this sample writer to write samples on the specified output * with ISO8859-1 encoding * * @param out * The output stream on which sample should be written */ public void setOutputStream(OutputStream out) { Validate.notNull(out, "out must not be null."); try { setWriter(new OutputStreamWriter(out, CHARSET)); } catch (UnsupportedEncodingException e) { // ignore iso8859-1 always supported } } /** * Set the destination file in which this sample writer will write samples * * @param output * The ouput file that will receive samples written by this * sample writter */ public void setOutputFile(File output) { FileOutputStream fos = null; try { fos = new FileOutputStream(output); } catch (Exception e) { throw new SampleException(e.getMessage(), e); } setOutputStream(fos); } /** * This method is guaranted to not throw any exception. If writer is already * closed then does nothing.<br> * Any buffered data is flushed by this method. */ @Override public void close() { JOrphanUtils.closeQuietly(writer); this.writer = null; } public void flush() { try { writer.flush(); } catch (Exception e) { // ignore } } }
d0k1/jmeter
src/core/org/apache/jmeter/report/core/AbstractSampleWriter.java
Java
apache-2.0
3,907
Puppet::Type.newtype(:netapp_lun_map) do @doc = "Manage Netap Lun map creation and deletion." apply_to_device ensurable newparam(:lunmap) do desc "Lun map - Composite key of format {path}:{lun-id}." isnamevar validate do |value| raise ArgumentError, "#{value} is an invalid Lun map." unless value =~ /(\/\w+){3,4}:\d{1,4}/ lun_id = value.split(':').last raise ArgumentError, "#{lun_id} is an invalid lun ID" unless lun_id.to_i.between?(1,4095) end end newparam(:initiatorgroup) do desc "Initiator group to map to." end ## Validate params validate do raise ArgumentError, 'Initiatorgroup is required' if self[:initiatorgroup].nil? end ## Autorequire resources # Netapp_lun resources autorequire(:netapp_lun) do path = self[:lunmap].split(':').first end end
cyrus-mc/puppetlabs-netapp
lib/puppet/type/netapp_lun_map.rb
Ruby
apache-2.0
838
/* * JBoss, Home of Professional Open Source * Copyright 2010, Red Hat, Inc. and/or its affiliates, and individual * contributors by the @authors tag. See the copyright.txt in the * distribution for a full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.weld.tests.interceptors.interceptorsOrderWithEjbInterceptorOnMethod; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.junit.Arquillian; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.BeanArchive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.weld.test.util.Utils; import org.jboss.weld.tests.category.Integration; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; /** * @author Marius Bogoevici */ @RunWith(Arquillian.class) public class InterceptorOrderTest { @Deployment public static Archive<?> deploy() { return ShrinkWrap.create(BeanArchive.class, Utils.getDeploymentNameAsHash(InterceptorOrderTest.class)) .intercept(CdiInterceptor2.class, CdiInterceptor.class) .addPackage(InterceptorOrderTest.class.getPackage()); } @Test @Category(Integration.class) public void testOrder(Processor processor) { Counter.count = 0; SimpleProcessor.count = 0; CdiInterceptor.count = 0; CdiInterceptor2.count = 0; EjbInterceptor.count = 0; EjbInterceptor2.count = 0; int sum = processor.add(8, 13); Assert.assertEquals(21, sum); Assert.assertEquals(1, EjbInterceptor.count); Assert.assertEquals(2, EjbInterceptor2.count); Assert.assertEquals(3, CdiInterceptor2.count); Assert.assertEquals(4, CdiInterceptor.count); Assert.assertEquals(5, SimpleProcessor.count); } @Test @Category(Integration.class) public void testOrder2(Processor processor) { Counter.count = 0; SimpleProcessor.count = 0; CdiInterceptor.count = 0; CdiInterceptor2.count = 0; EjbInterceptor.count = 0; EjbInterceptor2.count = 0; int sum = processor.substract(34, 13); Assert.assertEquals(21, sum); Assert.assertEquals(1, EjbInterceptor.count); Assert.assertEquals(2, CdiInterceptor2.count); Assert.assertEquals(3, CdiInterceptor.count); Assert.assertEquals(4, SimpleProcessor.count); } }
antoinesd/weld-core
tests-arquillian/src/test/java/org/jboss/weld/tests/interceptors/interceptorsOrderWithEjbInterceptorOnMethod/InterceptorOrderTest.java
Java
apache-2.0
3,018
//----------------------------------------------------------------------- // <copyright file="RunnableGraph.cs" company="Akka.NET Project"> // Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com> // Copyright (C) 2013-2016 Akka.NET project <https://github.com/akkadotnet/akka.net> // </copyright> //----------------------------------------------------------------------- using System; using Akka.Streams.Implementation; namespace Akka.Streams.Dsl { /// <summary> /// Flow with attached input and output, can be executed. /// </summary> public interface IRunnableGraph<TMat> : IGraph<ClosedShape, TMat> { /// <summary> /// Transform only the materialized value of this RunnableGraph, leaving all other properties as they were. /// </summary> IRunnableGraph<TMat2> MapMaterializedValue<TMat2>(Func<TMat, TMat2> func); /// <summary> /// Run this flow and return the materialized instance from the flow. /// </summary> TMat Run(IMaterializer materializer); /// <summary> /// Change the attributes of this <see cref="IGraph{TShape}"/> to the given ones /// and seal the list of attributes. This means that further calls will not be able /// to remove these attributes, but instead add new ones. Note that this /// operation has no effect on an empty Flow (because the attributes apply /// only to the contained processing stages). /// </summary> new IRunnableGraph<TMat> WithAttributes(Attributes attributes); /// <summary> /// Add the given attributes to this <see cref="IGraph{TShape}"/>. /// Further calls to <see cref="WithAttributes"/> /// will not remove these attributes. Note that this /// operation has no effect on an empty Flow (because the attributes apply /// only to the contained processing stages). /// </summary> new IRunnableGraph<TMat> AddAttributes(Attributes attributes); /// <summary> /// Add a name attribute to this Graph. /// </summary> new IRunnableGraph<TMat> Named(string name); } public sealed class RunnableGraph<TMat> : IRunnableGraph<TMat> { public RunnableGraph(IModule module) { Module = module; Shape = ClosedShape.Instance; } public ClosedShape Shape { get; } public IModule Module { get; } IGraph<ClosedShape, TMat> IGraph<ClosedShape, TMat>.WithAttributes(Attributes attributes) => WithAttributes(attributes); public IRunnableGraph<TMat> AddAttributes(Attributes attributes) => WithAttributes(Module.Attributes.And(attributes)); public IRunnableGraph<TMat> Named(string name) => AddAttributes(Attributes.CreateName(name)); public IRunnableGraph<TMat> Async() => AddAttributes(new Attributes(Attributes.AsyncBoundary.Instance)); public IRunnableGraph<TMat> WithAttributes(Attributes attributes) => new RunnableGraph<TMat>(Module.WithAttributes(attributes)); IGraph<ClosedShape, TMat> IGraph<ClosedShape, TMat>.AddAttributes(Attributes attributes) => AddAttributes(attributes); IGraph<ClosedShape, TMat> IGraph<ClosedShape, TMat>.Named(string name) => Named(name); IGraph<ClosedShape, TMat> IGraph<ClosedShape, TMat>.Async() => Async(); public IRunnableGraph<TMat2> MapMaterializedValue<TMat2>(Func<TMat, TMat2> func) => new RunnableGraph<TMat2>(Module.TransformMaterializedValue(func)); public TMat Run(IMaterializer materializer) => materializer.Materialize(this); } public static class RunnableGraph { /// <summary> /// A graph with a closed shape is logically a runnable graph, this method makes /// it so also in type. /// </summary> public static RunnableGraph<TMat> FromGraph<TMat>(IGraph<ClosedShape, TMat> g) => g as RunnableGraph<TMat> ?? new RunnableGraph<TMat>(g.Module); } }
alexpantyukhin/akka.net
src/core/Akka.Streams/Dsl/RunnableGraph.cs
C#
apache-2.0
4,123
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te from tvm import relay from tvm.relay.prelude import Prelude from tvm.relay.analysis import unmatched_cases import pytest def test_empty_match_block(): # empty match block will not match anything, so it should return a wildcard pattern v = relay.Var("v") match = relay.Match(v, []) unmatched = unmatched_cases(match) assert len(unmatched) == 1 assert isinstance(unmatched[0], relay.PatternWildcard) def test_trivial_matches(): # a match clause with a wildcard will match anything v = relay.Var("v") match = relay.Match(v, [relay.Clause(relay.PatternWildcard(), v)]) assert len(unmatched_cases(match)) == 0 # same with a pattern var w = relay.Var("w") match = relay.Match(v, [relay.Clause(relay.PatternVar(w), w)]) assert len(unmatched_cases(match)) == 0 def test_single_constructor_adt(): mod = tvm.IRModule() box = relay.GlobalTypeVar("box") a = relay.TypeVar("a") box_ctor = relay.Constructor("box", [a], box) box_data = relay.TypeData(box, [a], [box_ctor]) mod[box] = box_data v = relay.Var("v") match = relay.Match( v, [relay.Clause(relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), v)] ) # with one constructor, having one pattern constructor case is exhaustive assert len(unmatched_cases(match, mod)) == 0 # this will be so if we nest the constructors too nested_pattern = relay.Match( v, [ relay.Clause( relay.PatternConstructor( box_ctor, [ relay.PatternConstructor( box_ctor, [relay.PatternConstructor(box_ctor, [relay.PatternWildcard()])], ) ], ), v, ) ], ) assert len(unmatched_cases(nested_pattern, mod)) == 0 def test_too_specific_match(): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") v = relay.Var("v") match = relay.Match( v, [ relay.Clause( relay.PatternConstructor( cons, [ relay.PatternWildcard(), relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternWildcard()] ), ], ), v, ) ], ) unmatched = unmatched_cases(match, mod) # will not match nil or a list of length 1 nil_found = False single_length_found = False assert len(unmatched) == 2 for case in unmatched: assert isinstance(case, relay.PatternConstructor) if case.constructor == nil: nil_found = True if case.constructor == cons: assert isinstance(case.patterns[1], relay.PatternConstructor) assert case.patterns[1].constructor == nil single_length_found = True assert nil_found and single_length_found # if we add a wildcard, this should work new_match = relay.Match( v, [ relay.Clause( relay.PatternConstructor( cons, [ relay.PatternWildcard(), relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternWildcard()] ), ], ), v, ), relay.Clause(relay.PatternWildcard(), v), ], ) assert len(unmatched_cases(new_match, mod)) == 0 def test_multiple_constructor_clauses(): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") v = relay.Var("v") match = relay.Match( v, [ # list of length exactly 1 relay.Clause( relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])] ), v, ), # list of length exactly 2 relay.Clause( relay.PatternConstructor( cons, [ relay.PatternWildcard(), relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])] ), ], ), v, ), # empty list relay.Clause(relay.PatternConstructor(nil, []), v), # list of length 2 or more relay.Clause( relay.PatternConstructor( cons, [ relay.PatternWildcard(), relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternWildcard()] ), ], ), v, ), ], ) assert len(unmatched_cases(match, mod)) == 0 def test_missing_in_the_middle(): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") v = relay.Var("v") match = relay.Match( v, [ # list of length exactly 1 relay.Clause( relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])] ), v, ), # empty list relay.Clause(relay.PatternConstructor(nil, []), v), # list of length 3 or more relay.Clause( relay.PatternConstructor( cons, [ relay.PatternWildcard(), relay.PatternConstructor( cons, [ relay.PatternWildcard(), relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternWildcard()] ), ], ), ], ), v, ), ], ) # fails to match a list of length exactly two unmatched = unmatched_cases(match, mod) assert len(unmatched) == 1 assert isinstance(unmatched[0], relay.PatternConstructor) assert unmatched[0].constructor == cons assert isinstance(unmatched[0].patterns[1], relay.PatternConstructor) assert unmatched[0].patterns[1].constructor == cons assert isinstance(unmatched[0].patterns[1].patterns[1], relay.PatternConstructor) assert unmatched[0].patterns[1].patterns[1].constructor == nil def test_mixed_adt_constructors(): mod = tvm.IRModule() box = relay.GlobalTypeVar("box") a = relay.TypeVar("a") box_ctor = relay.Constructor("box", [a], box) box_data = relay.TypeData(box, [a], [box_ctor]) mod[box] = box_data p = Prelude(mod) _, cons, nil = p.mod.get_type("List") v = relay.Var("v") box_of_lists_inc = relay.Match( v, [ relay.Clause( relay.PatternConstructor( box_ctor, [ relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternWildcard()] ) ], ), v, ) ], ) # will fail to match a box containing an empty list unmatched = unmatched_cases(box_of_lists_inc, mod) assert len(unmatched) == 1 assert isinstance(unmatched[0], relay.PatternConstructor) assert unmatched[0].constructor == box_ctor assert len(unmatched[0].patterns) == 1 and unmatched[0].patterns[0].constructor == nil box_of_lists_comp = relay.Match( v, [ relay.Clause( relay.PatternConstructor(box_ctor, [relay.PatternConstructor(nil, [])]), v ), relay.Clause( relay.PatternConstructor( box_ctor, [ relay.PatternConstructor( cons, [relay.PatternWildcard(), relay.PatternWildcard()] ) ], ), v, ), ], ) assert len(unmatched_cases(box_of_lists_comp, mod)) == 0 list_of_boxes_inc = relay.Match( v, [ relay.Clause( relay.PatternConstructor( cons, [ relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), relay.PatternWildcard(), ], ), v, ) ], ) # fails to match empty list of boxes unmatched = unmatched_cases(list_of_boxes_inc, mod) assert len(unmatched) == 1 assert isinstance(unmatched[0], relay.PatternConstructor) assert unmatched[0].constructor == nil list_of_boxes_comp = relay.Match( v, [ # exactly one box relay.Clause( relay.PatternConstructor( cons, [ relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), relay.PatternConstructor(nil, []), ], ), v, ), # exactly two boxes relay.Clause( relay.PatternConstructor( cons, [ relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), relay.PatternConstructor( cons, [ relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), relay.PatternConstructor(nil, []), ], ), ], ), v, ), # exactly three boxes relay.Clause( relay.PatternConstructor( cons, [ relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), relay.PatternConstructor( cons, [ relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), relay.PatternConstructor( cons, [ relay.PatternConstructor( box_ctor, [relay.PatternWildcard()] ), relay.PatternConstructor(nil, []), ], ), ], ), ], ), v, ), # one or more boxes relay.Clause( relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternWildcard()]), v, ), # no boxes relay.Clause(relay.PatternConstructor(nil, []), v), ], ) assert len(unmatched_cases(list_of_boxes_comp, mod)) == 0 def test_tuple_match(): a = relay.Var("a") b = relay.Var("b") clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b) x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause]) assert len(unmatched_cases(x)) == 0 def test_inf_loop_case(): code = """ #[version = "0.0.5"] type Arith[A] { Zero, Const(A), Plus(Arith[A], Arith[A]) } def @shallow_opt[A](%a: Arith[A]) -> Arith[A] { match (%a) { Plus(Zero, %r) => %r, Plus(%l, Zero) => %l, _ => %a } } """ tvm.parser.fromtext(code) # fromtext parse the module, then checked it (which include strictness checking). def test_expanding_ctor_with_no_args(): code = """ #[version = "0.0.5"] type List[A] { Cons(A, List[A]), Nil, } def @expand_on_nil_match(%a: List[(List[()],)]) -> int { match (%a) { Cons((Nil), Nil) => 1, _ => 2, } } """ # exhausion checks: # * hits Cons((Nil), Nil), expands to Cons(*, *), Nil() # Nil() fails Cons((Nil), Nil), passes _ # Cons(*, *) hits Cons((Nil), Nil), expands to Cons((*), Cons(*, *)), Cons((*), Nil()) # Cons((*), Cons(*, *)) fails Cons((Nil), Nil), passes _ # Cons((*), Nil()) hits Cons((Nil), Nil), expands to Cons((Nil), Nil), Cons((Cons(*, *)), Nil) # Cons((Nil), Nil) passes the first pattern # Cons((Cons(*, *)), Nil) fails the first pattern, passes _ # Note Nil() is passed to ExpandWildcardsConstructor many times in the above! tvm.parser.fromtext(code) def test_expanding_empty_tuple(): # same principle as above, but with empty tuple code = """ #[version = "0.0.5"] type List[A] { Cons(A, List[A]), Nil, } def @expand_on_empty_tuple_match(%a: (List[()], ())) -> int { match (%a) { (Cons((), Nil), ()) => 1, _ => 2, } } """ tvm.parser.fromtext(code) if __name__ == "__main__": pytest.main([__file__])
dmlc/tvm
tests/python/relay/test_pass_unmatched_cases.py
Python
apache-2.0
14,624
/**************************************************************************//** * @file os_tick_ostm.c * @brief CMSIS OS Tick implementation for OS Timer * @version V1.0.1 * @date 19. September 2017 ******************************************************************************/ /* * Copyright (c) 2017-2017 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef MBED_CONF_RTOS_PRESENT #include "os_tick.h" #include "irq_ctrl.h" #include <MBRZA1H.h> #include <cmsis.h> // Define OS TImer interrupt priority #ifndef OSTM_IRQ_PRIORITY #define OSTM_IRQ_PRIORITY 0xFFU #endif // Define OS Timer channel and interrupt number #define OSTM (OSTM0) #define OSTM_IRQn ((IRQn_ID_t)OSTMI0TINT_IRQn) static uint32_t OSTM_Clock; // Timer tick frequency static uint8_t OSTM_PendIRQ; // Timer interrupt pending flag // Setup OS Tick. int32_t OS_Tick_Setup (uint32_t freq, IRQHandler_t handler) { uint32_t clock; uint32_t prio; uint32_t bits; if (freq == 0U) { return (-1); } OSTM_PendIRQ = 0U; // Get CPG.FRQCR[IFC] bits clock = (CPG.FRQCR >> 8) & 0x03; // Determine Divider 2 output clock by using SystemCoreClock if (clock == 0x03U) { clock = (SystemCoreClock * 3U); } else if (clock == 0x01U) { clock = (SystemCoreClock * 3U)/2U; } else { clock = SystemCoreClock; } // Determine tick frequency clock = clock / freq; // Save frequency for later OSTM_Clock = clock; // Enable OSTM clock CPG.STBCR5 &= ~(CPG_STBCR5_BIT_MSTP51); // Stop the OSTM counter OSTM.OSTMnTT = 0x01U; // Set interval timer mode and disable interrupts when counting starts OSTM.OSTMnCTL = 0x00U; // Set compare value OSTM.OSTMnCMP = clock - 1U; // Disable corresponding IRQ IRQ_Disable (OSTM_IRQn); IRQ_ClearPending(OSTM_IRQn); // Determine number of implemented priority bits IRQ_SetPriority (OSTM_IRQn, 0xFFU); prio = IRQ_GetPriority (OSTM_IRQn); // At least bits [7:4] must be implemented if ((prio & 0xF0U) == 0U) { return (-1); } for (bits = 0; bits < 4; bits++) { if ((prio & 0x01) != 0) { break; } prio >>= 1; } // Adjust configured priority to the number of implemented priority bits prio = (OSTM_IRQ_PRIORITY << bits) & 0xFFUL; // Set OSTM interrupt priority IRQ_SetPriority(OSTM_IRQn, prio-1U); // Set edge-triggered, non-secure, single CPU targeted IRQ IRQ_SetMode (OSTM_IRQn, IRQ_MODE_TRIG_EDGE); // Register tick interrupt handler function IRQ_SetHandler(OSTM_IRQn, (IRQHandler_t)handler); // Enable corresponding IRQ IRQ_Enable (OSTM_IRQn); return (0); } /// Enable OS Tick. void OS_Tick_Enable (void) { if (OSTM_PendIRQ != 0U) { OSTM_PendIRQ = 0U; IRQ_SetPending (OSTM_IRQn); } // Start the OSTM counter OSTM.OSTMnTS = 0x01U; } /// Disable OS Tick. void OS_Tick_Disable (void) { // Stop the OSTM counter OSTM.OSTMnTT = 0x01U; if (IRQ_GetPending(OSTM_IRQn) != 0) { IRQ_ClearPending (OSTM_IRQn); OSTM_PendIRQ = 1U; } } // Acknowledge OS Tick IRQ. void OS_Tick_AcknowledgeIRQ (void) { IRQ_ClearPending (OSTM_IRQn); } // Get OS Tick IRQ number. int32_t OS_Tick_GetIRQn (void) { return (OSTM_IRQn); } // Get OS Tick clock. uint32_t OS_Tick_GetClock (void) { return (OSTM_Clock); } // Get OS Tick interval. uint32_t OS_Tick_GetInterval (void) { return (OSTM.OSTMnCMP + 1U); } // Get OS Tick count value. uint32_t OS_Tick_GetCount (void) { uint32_t cmp = OSTM.OSTMnCMP; return (cmp - OSTM.OSTMnCNT); } // Get OS Tick overflow status. uint32_t OS_Tick_GetOverflow (void) { return (IRQ_GetPending(OSTM_IRQn)); } // Get Cortex-A9 OS Timer interrupt number IRQn_ID_t mbed_get_a9_tick_irqn(){ return OSTM_IRQn; } #endif
c1728p9/mbed-os
targets/TARGET_RENESAS/TARGET_RZ_A1XX/TARGET_RZ_A1H/device/os_tick_ostm.c
C
apache-2.0
4,384
/* * Copyright 2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.api.artifacts; import org.gradle.api.capabilities.Capability; import org.gradle.internal.HasInternalProtocol; import java.util.List; /** * Gives access to the resolution details of a single capability conflict. * This class may be used to resolve a capability conflict by either selecting * explicitly one of the candidates, or selecting the one with the highest * version of the capability. * * @since 5.6 */ @HasInternalProtocol public interface CapabilityResolutionDetails { /** * Returns the capability in conflict */ Capability getCapability(); /** * Returns the list of components which are in conflict on this capability */ List<ComponentVariantIdentifier> getCandidates(); /** * Selects a particular candidate to solve the conflict. It is recommended to * provide a human-readable explanation to the choice by calling the {@link #because(String)} method * * @param candidate the selected candidate * @return this details instance * * @since 6.0 */ CapabilityResolutionDetails select(ComponentVariantIdentifier candidate); /** * Selects a particular candidate to solve the conflict. It is recommended to * provide a human-readable explanation to the choice by calling the {@link #because(String)} method * * @param notation the selected candidate * * @return this details instance */ CapabilityResolutionDetails select(Object notation); /** * Automatically selects the candidate module which has the highest version of the * capability. A reason is automatically added so calling {@link #because(String)} would override * the automatic selection description. * * @return this details instance */ CapabilityResolutionDetails selectHighestVersion(); /** * Describes why a particular candidate is selected. * * @param reason the reason why a candidate is selected. * * @return this details instance */ CapabilityResolutionDetails because(String reason); }
gradle/gradle
subprojects/core-api/src/main/java/org/gradle/api/artifacts/CapabilityResolutionDetails.java
Java
apache-2.0
2,713
/* * Licensed to GraphHopper and Peter Karich under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * GraphHopper licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.graphhopper.routing; import com.graphhopper.routing.util.*; import com.graphhopper.storage.*; import com.graphhopper.storage.index.QueryResult; import static com.graphhopper.storage.index.QueryResult.Position.*; import com.graphhopper.util.*; import com.graphhopper.util.shapes.GHPoint; import gnu.trove.map.TIntObjectMap; import java.util.Arrays; import org.junit.After; import org.junit.Test; import static org.junit.Assert.*; import org.junit.Before; /** * * @author Peter Karich */ public class QueryGraphTest { private final EncodingManager encodingManager = new EncodingManager("CAR"); private GraphStorage g; @Before public void setUp() { g = new GraphHopperStorage(new RAMDirectory(), encodingManager, false).create(100); } @After public void tearDown() { g.close(); } void initGraph( Graph g ) { // // /*-*\ // 0 1 // | // 2 NodeAccess na = g.getNodeAccess(); na.setNode(0, 1, 0); na.setNode(1, 1, 2.5); na.setNode(2, 0, 0); g.edge(0, 2, 10, true); g.edge(0, 1, 10, true).setWayGeometry(Helper.createPointList(1.5, 1, 1.5, 1.5)); } @Test public void testOneVirtualNode() { initGraph(g); EdgeExplorer expl = g.createEdgeExplorer(); // snap directly to tower node => pointList could get of size 1?!? // a) EdgeIterator iter = expl.setBaseNode(2); iter.next(); QueryGraph queryGraph = new QueryGraph(g); QueryResult res = createLocationResult(1, -1, iter, 0, TOWER); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(0, 0), res.getSnappedPoint()); // b) res = createLocationResult(1, -1, iter, 1, TOWER); queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(1, 0), res.getSnappedPoint()); // c) iter = expl.setBaseNode(1); iter.next(); res = createLocationResult(1.2, 2.7, iter, 0, TOWER); queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(1, 2.5), res.getSnappedPoint()); // node number stays assertEquals(3, queryGraph.getNodes()); // snap directly to pillar node queryGraph = new QueryGraph(g); iter = expl.setBaseNode(1); iter.next(); res = createLocationResult(2, 1.5, iter, 1, PILLAR); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(1.5, 1.5), res.getSnappedPoint()); assertEquals(3, res.getClosestNode()); assertEquals(4, getPoints(queryGraph, 0, 3).getSize()); assertEquals(2, getPoints(queryGraph, 3, 1).getSize()); queryGraph = new QueryGraph(g); res = createLocationResult(2, 1.7, iter, 1, PILLAR); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(1.5, 1.5), res.getSnappedPoint()); assertEquals(3, res.getClosestNode()); assertEquals(4, getPoints(queryGraph, 0, 3).getSize()); assertEquals(2, getPoints(queryGraph, 3, 1).getSize()); // snap to edge which has pillar nodes queryGraph = new QueryGraph(g); res = createLocationResult(1.5, 2, iter, 0, EDGE); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(1.300019, 1.899962), res.getSnappedPoint()); assertEquals(3, res.getClosestNode()); assertEquals(4, getPoints(queryGraph, 0, 3).getSize()); assertEquals(2, getPoints(queryGraph, 3, 1).getSize()); // snap to edge which has no pillar nodes queryGraph = new QueryGraph(g); iter = expl.setBaseNode(2); iter.next(); res = createLocationResult(0.5, 0.1, iter, 0, EDGE); queryGraph.lookup(Arrays.asList(res)); assertEquals(new GHPoint(0.5, 0), res.getSnappedPoint()); assertEquals(3, res.getClosestNode()); assertEquals(2, getPoints(queryGraph, 0, 3).getSize()); assertEquals(2, getPoints(queryGraph, 3, 2).getSize()); } @Test public void testFillVirtualEdges() { initGraph(g); g.getNodeAccess().setNode(3, 0, 1); g.edge(1, 3); final int baseNode = 1; EdgeIterator iter = g.createEdgeExplorer().setBaseNode(baseNode); iter.next(); QueryResult res1 = createLocationResult(2, 1.7, iter, 1, PILLAR); QueryGraph queryGraph = new QueryGraph(g) { @Override void fillVirtualEdges( TIntObjectMap<VirtualEdgeIterator> node2Edge, int towerNode, EdgeExplorer mainExpl ) { super.fillVirtualEdges(node2Edge, towerNode, mainExpl); // ignore nodes should include baseNode == 1 if (towerNode == 3) assertEquals("[3->4]", node2Edge.get(towerNode).toString()); else if (towerNode == 1) assertEquals("[1->4, 1 1-0]", node2Edge.get(towerNode).toString()); else throw new IllegalStateException("not allowed " + towerNode); } }; queryGraph.lookup(Arrays.asList(res1)); EdgeIteratorState state = GHUtility.getEdge(queryGraph, 0, 1); assertEquals(4, state.fetchWayGeometry(3).size()); // fetch virtual edge and check way geometry state = GHUtility.getEdge(queryGraph, 4, 3); assertEquals(2, state.fetchWayGeometry(3).size()); } @Test public void testMultipleVirtualNodes() { initGraph(g); // snap to edge which has pillar nodes EdgeIterator iter = g.createEdgeExplorer().setBaseNode(1); iter.next(); QueryResult res1 = createLocationResult(2, 1.7, iter, 1, PILLAR); QueryGraph queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res1)); assertEquals(new GHPoint(1.5, 1.5), res1.getSnappedPoint()); assertEquals(3, res1.getClosestNode()); assertEquals(4, getPoints(queryGraph, 0, 3).getSize()); PointList pl = getPoints(queryGraph, 3, 1); assertEquals(2, pl.getSize()); assertEquals(new GHPoint(1.5, 1.5), pl.toGHPoint(0)); assertEquals(new GHPoint(1, 2.5), pl.toGHPoint(1)); EdgeIteratorState edge = GHUtility.getEdge(queryGraph, 3, 1); assertNotNull(queryGraph.getEdgeProps(edge.getEdge(), 3)); assertNotNull(queryGraph.getEdgeProps(edge.getEdge(), 1)); edge = GHUtility.getEdge(queryGraph, 3, 0); assertNotNull(queryGraph.getEdgeProps(edge.getEdge(), 3)); assertNotNull(queryGraph.getEdgeProps(edge.getEdge(), 0)); // snap again => new virtual node on same edge! iter = g.createEdgeExplorer().setBaseNode(1); iter.next(); res1 = createLocationResult(2, 1.7, iter, 1, PILLAR); QueryResult res2 = createLocationResult(1.5, 2, iter, 0, EDGE); queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res1, res2)); assertEquals(4, res2.getClosestNode()); assertEquals(new GHPoint(1.300019, 1.899962), res2.getSnappedPoint()); assertEquals(3, res1.getClosestNode()); assertEquals(new GHPoint(1.5, 1.5), res1.getSnappedPoint()); assertEquals(4, getPoints(queryGraph, 3, 0).getSize()); assertEquals(2, getPoints(queryGraph, 3, 4).getSize()); assertEquals(2, getPoints(queryGraph, 4, 1).getSize()); assertNull(GHUtility.getEdge(queryGraph, 4, 0)); assertNull(GHUtility.getEdge(queryGraph, 3, 1)); } @Test public void testOneWay() { NodeAccess na = g.getNodeAccess(); na.setNode(0, 0, 0); na.setNode(1, 0, 1); g.edge(0, 1, 10, false); EdgeIteratorState edge = GHUtility.getEdge(g, 0, 1); QueryResult res1 = createLocationResult(0.1, 0.1, edge, 0, EDGE); QueryResult res2 = createLocationResult(0.1, 0.9, edge, 0, EDGE); QueryGraph queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res2, res1)); assertEquals(2, res1.getClosestNode()); assertEquals(new GHPoint(0, 0.1), res1.getSnappedPoint()); assertEquals(3, res2.getClosestNode()); assertEquals(new GHPoint(0, 0.9), res2.getSnappedPoint()); assertEquals(2, getPoints(queryGraph, 0, 2).getSize()); assertEquals(2, getPoints(queryGraph, 2, 3).getSize()); assertEquals(2, getPoints(queryGraph, 3, 1).getSize()); assertNull(GHUtility.getEdge(queryGraph, 3, 0)); assertNull(GHUtility.getEdge(queryGraph, 2, 1)); } @Test public void testVirtEdges() { initGraph(g); EdgeIterator iter = g.createEdgeExplorer().setBaseNode(0); iter.next(); VirtualEdgeIterator vi = new VirtualEdgeIterator(2); vi.add(iter.detach(false)); assertTrue(vi.next()); } @Test public void testUseMeanElevation() { g.close(); g = new GraphHopperStorage(new RAMDirectory(), encodingManager, true).create(100); NodeAccess na = g.getNodeAccess(); na.setNode(0, 0, 0, 0); na.setNode(1, 0, 0.0001, 20); EdgeIteratorState edge = g.edge(0, 1); EdgeIteratorState edgeReverse = edge.detach(true); DistanceCalc2D distCalc = new DistanceCalc2D(); QueryResult qr = new QueryResult(0, 0.00005); qr.setClosestEdge(edge); qr.setWayIndex(0); qr.setSnappedPosition(EDGE); qr.calcSnappedPoint(distCalc); assertEquals(10, qr.getSnappedPoint().getEle(), 1e-1); qr = new QueryResult(0, 0.00005); qr.setClosestEdge(edgeReverse); qr.setWayIndex(0); qr.setSnappedPosition(EDGE); qr.calcSnappedPoint(distCalc); assertEquals(10, qr.getSnappedPoint().getEle(), 1e-1); } @Test public void testLoopStreet_Issue151() { // do query at x should result in ignoring only the bottom edge 1-3 not the upper one => getNeighbors are 0, 5, 3 and not only 0, 5 // // 0--1--3--4 // | | // x--- // g.edge(0, 1, 10, true); g.edge(1, 3, 10, true); g.edge(3, 4, 10, true); EdgeIteratorState edge = g.edge(1, 3, 20, true).setWayGeometry(Helper.createPointList(-0.001, 0.001, -0.001, 0.002)); AbstractRoutingAlgorithmTester.updateDistancesFor(g, 0, 0, 0); AbstractRoutingAlgorithmTester.updateDistancesFor(g, 1, 0, 0.001); AbstractRoutingAlgorithmTester.updateDistancesFor(g, 3, 0, 0.002); AbstractRoutingAlgorithmTester.updateDistancesFor(g, 4, 0, 0.003); QueryResult qr = new QueryResult(-0.0005, 0.001); qr.setClosestEdge(edge); qr.setWayIndex(1); qr.calcSnappedPoint(new DistanceCalc2D()); QueryGraph qg = new QueryGraph(g); qg.lookup(Arrays.asList(qr)); EdgeExplorer ee = qg.createEdgeExplorer(); assertEquals(GHUtility.asSet(0, 5, 3), GHUtility.getNeighbors(ee.setBaseNode(1))); } @Test public void testOneWayLoop_Issue162() { // do query at x, where edge is oneway // // |\ // | x // 0<-\ // | // 1 FlagEncoder carEncoder = encodingManager.getSingle(); NodeAccess na = g.getNodeAccess(); na.setNode(0, 0, 0); na.setNode(1, 0, -0.001); g.edge(0, 1, 10, true); // in the case of identical nodes the wayGeometry defines the direction! EdgeIteratorState edge = g.edge(0, 0). setDistance(100). setFlags(carEncoder.setProperties(20, true, false)). setWayGeometry(Helper.createPointList(0.001, 0, 0, 0.001)); QueryResult qr = new QueryResult(0.0011, 0.0009); qr.setClosestEdge(edge); qr.setWayIndex(1); qr.calcSnappedPoint(new DistanceCalc2D()); QueryGraph qg = new QueryGraph(g); qg.lookup(Arrays.asList(qr)); EdgeExplorer ee = qg.createEdgeExplorer(); assertTrue(qr.getClosestNode() > 1); assertEquals(2, GHUtility.count(ee.setBaseNode(qr.getClosestNode()))); EdgeIterator iter = ee.setBaseNode(qr.getClosestNode()); iter.next(); assertTrue(iter.toString(), carEncoder.isBool(iter.getFlags(), FlagEncoder.K_FORWARD)); assertFalse(iter.toString(), carEncoder.isBool(iter.getFlags(), FlagEncoder.K_BACKWARD)); iter.next(); assertFalse(iter.toString(), carEncoder.isBool(iter.getFlags(), FlagEncoder.K_FORWARD)); assertTrue(iter.toString(), carEncoder.isBool(iter.getFlags(), FlagEncoder.K_BACKWARD)); } @Test public void testEdgesShareOneNode() { initGraph(g); EdgeIteratorState iter = GHUtility.getEdge(g, 0, 2); QueryResult res1 = createLocationResult(0.5, 0, iter, 0, EDGE); iter = GHUtility.getEdge(g, 1, 0); QueryResult res2 = createLocationResult(1.5, 2, iter, 0, EDGE); QueryGraph queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res1, res2)); assertEquals(new GHPoint(0.5, 0), res1.getSnappedPoint()); assertEquals(new GHPoint(1.300019, 1.899962), res2.getSnappedPoint()); assertNotNull(GHUtility.getEdge(queryGraph, 0, 4)); assertNotNull(GHUtility.getEdge(queryGraph, 0, 3)); } @Test public void testAvoidDuplicateVirtualNodesIfIdentical() { initGraph(g); EdgeIteratorState edgeState = GHUtility.getEdge(g, 0, 2); QueryResult res1 = createLocationResult(0.5, 0, edgeState, 0, EDGE); QueryResult res2 = createLocationResult(0.5, 0, edgeState, 0, EDGE); QueryGraph queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res1, res2)); assertEquals(new GHPoint(0.5, 0), res1.getSnappedPoint()); assertEquals(new GHPoint(0.5, 0), res2.getSnappedPoint()); assertEquals(3, res1.getClosestNode()); assertEquals(3, res2.getClosestNode()); // force skip due to **tower** node snapping in phase 2, but no virtual edges should be created for res1 edgeState = GHUtility.getEdge(g, 0, 1); res1 = createLocationResult(1, 0, edgeState, 0, EDGE); // now create virtual edges edgeState = GHUtility.getEdge(g, 0, 2); res2 = createLocationResult(0.5, 0, edgeState, 0, EDGE); queryGraph = new QueryGraph(g); queryGraph.lookup(Arrays.asList(res1, res2)); // make sure only one virtual node was created assertEquals(queryGraph.getNodes(), g.getNodes() + 1); EdgeIterator iter = queryGraph.createEdgeExplorer().setBaseNode(0); assertEquals(GHUtility.asSet(1, 3), GHUtility.getNeighbors(iter)); } @Test public void testGetEdgeProps() { initGraph(g); EdgeIteratorState e1 = GHUtility.getEdge(g, 0, 2); QueryGraph queryGraph = new QueryGraph(g); QueryResult res1 = createLocationResult(0.5, 0, e1, 0, EDGE); queryGraph.lookup(Arrays.asList(res1)); // get virtual edge e1 = GHUtility.getEdge(queryGraph, res1.getClosestNode(), 0); EdgeIteratorState e2 = queryGraph.getEdgeProps(e1.getEdge(), Integer.MIN_VALUE); assertEquals(e1.getEdge(), e2.getEdge()); } PointList getPoints( Graph g, int base, int adj ) { EdgeIteratorState edge = GHUtility.getEdge(g, base, adj); if (edge == null) throw new IllegalStateException("edge " + base + "-" + adj + " not found"); return edge.fetchWayGeometry(3); } public QueryResult createLocationResult( double lat, double lon, EdgeIteratorState edge, int wayIndex, QueryResult.Position pos ) { if (edge == null) throw new IllegalStateException("Specify edge != null"); QueryResult tmp = new QueryResult(lat, lon); tmp.setClosestEdge(edge); tmp.setWayIndex(wayIndex); tmp.setSnappedPosition(pos); tmp.calcSnappedPoint(new DistanceCalcEarth()); return tmp; } @Test public void testIteration_Issue163() { EdgeFilter outEdgeFilter = new DefaultEdgeFilter(encodingManager.getEncoder("CAR"), false, true); EdgeFilter inEdgeFilter = new DefaultEdgeFilter(encodingManager.getEncoder("CAR"), true, false); EdgeExplorer inExplorer = g.createEdgeExplorer(inEdgeFilter); EdgeExplorer outExplorer = g.createEdgeExplorer(outEdgeFilter); int nodeA = 0; int nodeB = 1; /* init test graph: one directional edge going from A to B, via virtual nodes C and D * * (C)-(D) * / \ * A B */ g.getNodeAccess().setNode(nodeA, 1, 0); g.getNodeAccess().setNode(nodeB, 1, 10); g.edge(nodeA, nodeB, 10, false).setWayGeometry(Helper.createPointList(1.5, 3, 1.5, 7)); // assert the behavior for classic edgeIterator assertEdgeIdsStayingEqual(inExplorer, outExplorer, nodeA, nodeB); // setup query results EdgeIteratorState it = GHUtility.getEdge(g, nodeA, nodeB); QueryResult res1 = createLocationResult(1.5, 3, it, 1, QueryResult.Position.EDGE); QueryResult res2 = createLocationResult(1.5, 7, it, 2, QueryResult.Position.EDGE); QueryGraph q = new QueryGraph(g); q.lookup(Arrays.asList(res1, res2)); int nodeC = res1.getClosestNode(); int nodeD = res2.getClosestNode(); inExplorer = q.createEdgeExplorer(inEdgeFilter); outExplorer = q.createEdgeExplorer(outEdgeFilter); // assert the same behavior for queryGraph assertEdgeIdsStayingEqual(inExplorer, outExplorer, nodeA, nodeC); assertEdgeIdsStayingEqual(inExplorer, outExplorer, nodeC, nodeD); assertEdgeIdsStayingEqual(inExplorer, outExplorer, nodeD, nodeB); } private void assertEdgeIdsStayingEqual( EdgeExplorer inExplorer, EdgeExplorer outExplorer, int startNode, int endNode ) { EdgeIterator it = outExplorer.setBaseNode(startNode); it.next(); assertEquals(startNode, it.getBaseNode()); assertEquals(endNode, it.getAdjNode()); // we expect the edge id to be the same when exploring in backward direction int expectedEdgeId = it.getEdge(); assertFalse(it.next()); // backward iteration, edge id should remain the same!! it = inExplorer.setBaseNode(endNode); it.next(); assertEquals(endNode, it.getBaseNode()); assertEquals(startNode, it.getAdjNode()); assertEquals("The edge id is not the same,", expectedEdgeId, it.getEdge()); assertFalse(it.next()); } @Test public void testTurnCostsProperlyPropagated_Issue282() { TurnCostExtension turnExt = new TurnCostExtension(); FlagEncoder encoder = new CarFlagEncoder(5, 5, 15); GraphStorage graphWithTurnCosts = new GraphHopperStorage(new RAMDirectory(), new EncodingManager(encoder), false, turnExt). create(100); NodeAccess na = graphWithTurnCosts.getNodeAccess(); na.setNode(0, .00, .00); na.setNode(1, .00, .01); na.setNode(2, .01, .01); EdgeIteratorState edge0 = graphWithTurnCosts.edge(0, 1, 10, true); EdgeIteratorState edge1 = graphWithTurnCosts.edge(2, 1, 10, true); QueryGraph qGraph = new QueryGraph(graphWithTurnCosts); FastestWeighting weighting = new FastestWeighting(encoder); TurnWeighting turnWeighting = new TurnWeighting(weighting, encoder, (TurnCostExtension) qGraph.getExtension()); assertEquals(0, turnWeighting.calcTurnWeight(edge0.getEdge(), 1, edge1.getEdge()), .1); // now use turn costs and QueryGraph turnExt.addTurnInfo(edge0.getEdge(), 1, edge1.getEdge(), encoder.getTurnFlags(false, 10)); assertEquals(10, turnWeighting.calcTurnWeight(edge0.getEdge(), 1, edge1.getEdge()), .1); QueryResult res1 = createLocationResult(0.000, 0.005, edge0, 0, QueryResult.Position.EDGE); QueryResult res2 = createLocationResult(0.005, 0.010, edge1, 0, QueryResult.Position.EDGE); qGraph.lookup(Arrays.asList(res1, res2)); int fromQueryEdge = GHUtility.getEdge(qGraph, res1.getClosestNode(), 1).getEdge(); int toQueryEdge = GHUtility.getEdge(qGraph, res2.getClosestNode(), 1).getEdge(); assertEquals(10, turnWeighting.calcTurnWeight(fromQueryEdge, 1, toQueryEdge), .1); graphWithTurnCosts.close(); } }
nside/graphhopper
core/src/test/java/com/graphhopper/routing/QueryGraphTest.java
Java
apache-2.0
21,555
{{ partial "event_header.html" . }} {{ $path := split $.Source.File.Path "/" }} {{ $event_slug := index $path 1 }} {{ $e := (index $.Site.Data.events $event_slug) }} <!-- end event header partial --> <div> {{ .Content }} </div> <!-- speaker page code begin --> {{ range $fname, $s := index .Site.Data.speakers (print (chomp $e.year)) (lower $e.city) }} <div class="row"> <div class="col-md-3"> <img alt = "{{ $s.name }}" src = "/events/{{ $event_slug }}/speakers/{{$fname}}.jpg" class="img-responsive" width = "250px"> </div> <div class= "col-md-8"> <h3><a href="/events/{{ $event_slug }}/program/{{$fname}}">{{ $s.name }}</a></h3> {{ if $s.twitter }} <a href="https://twitter.com/{{ $s.twitter }}">@{{ $s.twitter }}</a><br>{{ end }} <br> {{ $s.bio | markdownify }} <hr> </div> </div> {{ end }} <!-- speaker code end --> </div> <!-- closes the col-md-8 div from event_header --> {{ partial "sponsors.html" . }} </div> <!-- closes the row div --> {{ partial "footer.html" . }}
joelaha/devopsdays-web
themes/devopsdays-responsive/layouts/speakers/single.html
HTML
apache-2.0
1,045
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import tabs from horizon.utils import memoized from openstack_dashboard import api from openstack_dashboard.dashboards.project.volumes.backups \ import forms as backup_forms from openstack_dashboard.dashboards.project.volumes.backups \ import tables as backup_tables from openstack_dashboard.dashboards.project.volumes.backups \ import tabs as backup_tabs class CreateBackupView(forms.ModalFormView): form_class = backup_forms.CreateBackupForm template_name = 'project/volumes/backups/create_backup.html' success_url = reverse_lazy("horizon:project:volumes:backups_tab") def get_context_data(self, **kwargs): context = super(CreateBackupView, self).get_context_data(**kwargs) context['volume_id'] = self.kwargs['volume_id'] return context def get_initial(self): return {"volume_id": self.kwargs["volume_id"]} class BackupDetailView(tabs.TabView): tab_group_class = backup_tabs.BackupDetailTabs template_name = 'project/volumes/backups/detail.html' page_title = _("Volume Backup Details: {{ backup.name }}") def get_context_data(self, **kwargs): context = super(BackupDetailView, self).get_context_data(**kwargs) backup = self.get_data() table = backup_tables.BackupsTable(self.request) context["backup"] = backup context["url"] = self.get_redirect_url() context["actions"] = table.render_row_actions(backup) return context @memoized.memoized_method def get_data(self): try: backup_id = self.kwargs['backup_id'] backup = api.cinder.volume_backup_get(self.request, backup_id) except Exception: exceptions.handle(self.request, _('Unable to retrieve backup details.'), redirect=self.get_redirect_url()) return backup def get_tabs(self, request, *args, **kwargs): backup = self.get_data() return self.tab_group_class(request, backup=backup, **kwargs) @staticmethod def get_redirect_url(): return reverse('horizon:project:volumes:index') class RestoreBackupView(forms.ModalFormView): form_class = backup_forms.RestoreBackupForm template_name = 'project/volumes/backups/restore_backup.html' success_url = reverse_lazy('horizon:project:volumes:index') def get_context_data(self, **kwargs): context = super(RestoreBackupView, self).get_context_data(**kwargs) context['backup_id'] = self.kwargs['backup_id'] return context def get_initial(self): backup_id = self.kwargs['backup_id'] backup_name = self.request.GET.get('backup_name') volume_id = self.request.GET.get('volume_id') return { 'backup_id': backup_id, 'backup_name': backup_name, 'volume_id': volume_id, }
orbitfp7/horizon
openstack_dashboard/dashboards/project/volumes/backups/views.py
Python
apache-2.0
3,709
/* * Copyright 2014 The Error Prone Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.errorprone.refaster.testdata.template; import com.google.errorprone.refaster.annotation.AfterTemplate; import com.google.errorprone.refaster.annotation.BeforeTemplate; /** * Template to demonstrate that parentheses in a Refaster @BeforeTemplate are treated as optional. * * @author [email protected] (Louis Wasserman) */ public class ParenthesesOptionalTemplate { @BeforeTemplate public int before(int a, int b) { return (a * b) + 5; } @AfterTemplate public int after(int a, int b) { return 5 + (a * b); } }
cushon/error-prone
core/src/test/java/com/google/errorprone/refaster/testdata/template/ParenthesesOptionalTemplate.java
Java
apache-2.0
1,155
/* * Copyright 2002-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.http.client; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import org.springframework.http.HttpHeaders; import org.springframework.http.HttpStatus; import org.springframework.util.FileCopyUtils; /** * Simple implementation of {@link ClientHttpResponse} that reads the request's body into memory, * thus allowing for multiple invocations of {@link #getBody()}. * * @author Arjen Poutsma * @since 1.0 */ final class BufferingClientHttpResponseWrapper implements ClientHttpResponse { private final ClientHttpResponse response; private byte[] body; BufferingClientHttpResponseWrapper(ClientHttpResponse response) { this.response = response; } public HttpStatus getStatusCode() throws IOException { return this.response.getStatusCode(); } public int getRawStatusCode() throws IOException { return this.response.getRawStatusCode(); } public String getStatusText() throws IOException { return this.response.getStatusText(); } public HttpHeaders getHeaders() { return this.response.getHeaders(); } public InputStream getBody() throws IOException { if (this.body == null) { this.body = FileCopyUtils.copyToByteArray(this.response.getBody()); } return new ByteArrayInputStream(this.body); } public void close() { this.response.close(); } }
bboyfeiyu/spring-android
spring-android-rest-template/src/main/java/org/springframework/http/client/BufferingClientHttpResponseWrapper.java
Java
apache-2.0
1,986
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ class Google_Service_Appengine_ListAuthorizedDomainsResponse extends Google_Collection { protected $collection_key = 'domains'; protected $domainsType = 'Google_Service_Appengine_AuthorizedDomain'; protected $domainsDataType = 'array'; public $nextPageToken; /** * @param Google_Service_Appengine_AuthorizedDomain[] */ public function setDomains($domains) { $this->domains = $domains; } /** * @return Google_Service_Appengine_AuthorizedDomain[] */ public function getDomains() { return $this->domains; } public function setNextPageToken($nextPageToken) { $this->nextPageToken = $nextPageToken; } public function getNextPageToken() { return $this->nextPageToken; } }
tsugiproject/tsugi
vendor/google/apiclient-services/src/Google/Service/Appengine/ListAuthorizedDomainsResponse.php
PHP
apache-2.0
1,329
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System.Collections.Immutable; using System.Threading.Tasks; using Microsoft.CodeAnalysis.CodeActions; using Microsoft.CodeAnalysis.Diagnostics; using Microsoft.CodeAnalysis.Editing; using Microsoft.CodeAnalysis.Shared.Extensions; namespace Microsoft.CodeAnalysis.CodeFixes.Qualify { internal abstract class AbstractQualifyMemberAccessCodeFixprovider<TSyntaxNode> : CodeFixProvider where TSyntaxNode : SyntaxNode { public sealed override ImmutableArray<string> FixableDiagnosticIds => ImmutableArray.Create(IDEDiagnosticIds.AddQualificationDiagnosticId); public override async Task RegisterCodeFixesAsync(CodeFixContext context) { var document = context.Document; var span = context.Span; var cancellationToken = context.CancellationToken; var root = await document.GetSyntaxRootAsync(cancellationToken).ConfigureAwait(false); var model = await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false); var token = root.FindToken(span.Start); if (!token.Span.IntersectsWith(span)) { return; } var node = token.GetAncestor<TSyntaxNode>(); if (node == null) { return; } var generator = document.GetLanguageService<SyntaxGenerator>(); var codeAction = new CodeAction.DocumentChangeAction( FeaturesResources.AddQualification, c => document.ReplaceNodeAsync(node, GetReplacementSyntax(node, generator), c), FeaturesResources.AddQualification); context.RegisterCodeFix(codeAction, context.Diagnostics); } public override FixAllProvider GetFixAllProvider() { return BatchFixAllProvider.Instance; } private static SyntaxNode GetReplacementSyntax(SyntaxNode node, SyntaxGenerator generator) { var qualifiedAccess = generator.MemberAccessExpression( generator.ThisExpression(), node.WithLeadingTrivia()) .WithLeadingTrivia(node.GetLeadingTrivia()); return qualifiedAccess; } } }
ericfe-ms/roslyn
src/Features/Core/Portable/CodeFixes/Qualify/AbstractQualifyMemberAccessCodeFixprovider.cs
C#
apache-2.0
2,434
# -*- coding: utf-8 -*- # Copyright (C) 1999-2002 Joel Rosdahl # Copyright © 2011-2013 Jason R. Coombs """ Internet Relay Chat (IRC) protocol client library. This library is intended to encapsulate the IRC protocol at a quite low level. It provides an event-driven IRC client framework. It has a fairly thorough support for the basic IRC protocol, CTCP, DCC chat, but DCC file transfers is not yet supported. In order to understand how to make an IRC client, I'm afraid you more or less must understand the IRC specifications. They are available here: [IRC specifications]. The main features of the IRC client framework are: * Abstraction of the IRC protocol. * Handles multiple simultaneous IRC server connections. * Handles server PONGing transparently. * Messages to the IRC server are done by calling methods on an IRC connection object. * Messages from an IRC server triggers events, which can be caught by event handlers. * Reading from and writing to IRC server sockets are normally done by an internal select() loop, but the select()ing may be done by an external main loop. * Functions can be registered to execute at specified times by the event-loop. * Decodes CTCP tagging correctly (hopefully); I haven't seen any other IRC client implementation that handles the CTCP specification subtilties. * A kind of simple, single-server, object-oriented IRC client class that dispatches events to instance methods is included. Current limitations: * The IRC protocol shines through the abstraction a bit too much. * Data is not written asynchronously to the server, i.e. the write() may block if the TCP buffers are stuffed. * There are no support for DCC file transfers. * The author haven't even read RFC 2810, 2811, 2812 and 2813. * Like most projects, documentation is lacking... .. [IRC specifications] http://www.irchelp.org/irchelp/rfc/ """ from __future__ import absolute_import, division import bisect import re import select import socket import string import time import struct import logging import threading import abc import collections import functools import itertools import six try: import pkg_resources except ImportError: pass from . import connection from . import events from . import functools as irc_functools from . import strings from . import util from . import buffer from . import schedule from . import features log = logging.getLogger(__name__) # set the version tuple try: VERSION = tuple(int(res) for res in re.findall('\d+', pkg_resources.require('irc')[0].version)) except Exception: VERSION = () # TODO # ---- # (maybe) color parser convenience functions # documentation (including all event types) # (maybe) add awareness of different types of ircds # send data asynchronously to the server (and DCC connections) # (maybe) automatically close unused, passive DCC connections after a while # NOTES # ----- # connection.quit() only sends QUIT to the server. # ERROR from the server triggers the error event and the disconnect event. # dropping of the connection triggers the disconnect event. class IRCError(Exception): "An IRC exception" class InvalidCharacters(ValueError): "Invalid characters were encountered in the message" class MessageTooLong(ValueError): "Message is too long" class PrioritizedHandler( collections.namedtuple('Base', ('priority', 'callback'))): def __lt__(self, other): "when sorting prioritized handlers, only use the priority" return self.priority < other.priority class IRC(object): """Class that handles one or several IRC server connections. When an IRC object has been instantiated, it can be used to create Connection objects that represent the IRC connections. The responsibility of the IRC object is to provide an event-driven framework for the connections and to keep the connections alive. It runs a select loop to poll each connection's TCP socket and hands over the sockets with incoming data for processing by the corresponding connection. The methods of most interest for an IRC client writer are server, add_global_handler, remove_global_handler, execute_at, execute_delayed, execute_every, process_once, and process_forever. Here is an example: client = irc.client.IRC() server = client.server() server.connect("irc.some.where", 6667, "my_nickname") server.privmsg("a_nickname", "Hi there!") client.process_forever() This will connect to the IRC server irc.some.where on port 6667 using the nickname my_nickname and send the message "Hi there!" to the nickname a_nickname. The methods of this class are thread-safe; accesses to and modifications of its internal lists of connections, handlers, and delayed commands are guarded by a mutex. """ def __do_nothing(*args, **kwargs): pass def __init__(self, on_connect=__do_nothing, on_disconnect=__do_nothing, on_schedule=__do_nothing): """Constructor for IRC objects. on_connect: optional callback invoked when a new connection is made. on_disconnect: optional callback invoked when a socket is disconnected. on_schedule: optional callback, usually supplied by an external event loop, to indicate in float seconds that the client needs to process events that many seconds in the future. An external event loop will implement this callback to schedule a call to process_timeout. The three arguments mainly exist to be able to use an external main loop (for example Tkinter's or PyGTK's main app loop) instead of calling the process_forever method. An alternative is to just call ServerConnection.process_once() once in a while. """ self._on_connect = on_connect self._on_disconnect = on_disconnect self._on_schedule = on_schedule self.connections = [] self.handlers = {} self.delayed_commands = [] # list of DelayedCommands # Modifications to these shared lists and dict need to be thread-safe self.mutex = threading.RLock() self.add_global_handler("ping", _ping_ponger, -42) def server(self): """Creates and returns a ServerConnection object.""" c = ServerConnection(self) with self.mutex: self.connections.append(c) return c def process_data(self, sockets): """Called when there is more data to read on connection sockets. Arguments: sockets -- A list of socket objects. See documentation for IRC.__init__. """ with self.mutex: log.log(logging.DEBUG-2, "process_data()") for s, c in itertools.product(sockets, self.connections): if s == c.socket: c.process_data() def process_timeout(self): """Called when a timeout notification is due. See documentation for IRC.__init__. """ with self.mutex: while self.delayed_commands: command = self.delayed_commands[0] if not command.due(): break command.function() if isinstance(command, schedule.PeriodicCommand): self._schedule_command(command.next()) del self.delayed_commands[0] def process_once(self, timeout=0): """Process data from connections once. Arguments: timeout -- How long the select() call should wait if no data is available. This method should be called periodically to check and process incoming data, if there are any. If that seems boring, look at the process_forever method. """ with self.mutex: log.log(logging.DEBUG-2, "process_once()") sockets = [x.socket for x in self.connections if x is not None] sockets = [x for x in sockets if x is not None] if sockets: (i, o, e) = select.select(sockets, [], [], timeout) self.process_data(i) else: time.sleep(timeout) self.process_timeout() def process_forever(self, timeout=0.2): """Run an infinite loop, processing data from connections. This method repeatedly calls process_once. Arguments: timeout -- Parameter to pass to process_once. """ # This loop should specifically *not* be mutex-locked. # Otherwise no other thread would ever be able to change # the shared state of an IRC object running this function. log.debug("process_forever(timeout=%s)", timeout) while 1: self.process_once(timeout) def disconnect_all(self, message=""): """Disconnects all connections.""" with self.mutex: for c in self.connections: c.disconnect(message) def add_global_handler(self, event, handler, priority=0): """Adds a global handler function for a specific event type. Arguments: event -- Event type (a string). Check the values of numeric_events for possible event types. handler -- Callback function taking 'connection' and 'event' parameters. priority -- A number (the lower number, the higher priority). The handler function is called whenever the specified event is triggered in any of the connections. See documentation for the Event class. The handler functions are called in priority order (lowest number is highest priority). If a handler function returns "NO MORE", no more handlers will be called. """ handler = PrioritizedHandler(priority, handler) with self.mutex: event_handlers = self.handlers.setdefault(event, []) bisect.insort(event_handlers, handler) def remove_global_handler(self, event, handler): """Removes a global handler function. Arguments: event -- Event type (a string). handler -- Callback function. Returns 1 on success, otherwise 0. """ with self.mutex: if not event in self.handlers: return 0 for h in self.handlers[event]: if handler == h.callback: self.handlers[event].remove(h) return 1 def execute_at(self, at, function, arguments=()): """Execute a function at a specified time. Arguments: at -- Execute at this time (standard "time_t" time). function -- Function to call. arguments -- Arguments to give the function. """ function = functools.partial(function, *arguments) command = schedule.DelayedCommand.at_time(at, function) self._schedule_command(command) def execute_delayed(self, delay, function, arguments=()): """ Execute a function after a specified time. delay -- How many seconds to wait. function -- Function to call. arguments -- Arguments to give the function. """ function = functools.partial(function, *arguments) command = schedule.DelayedCommand.after(delay, function) self._schedule_command(command) def execute_every(self, period, function, arguments=()): """ Execute a function every 'period' seconds. period -- How often to run (always waits this long for first). function -- Function to call. arguments -- Arguments to give the function. """ function = functools.partial(function, *arguments) command = schedule.PeriodicCommand.after(period, function) self._schedule_command(command) def _schedule_command(self, command): with self.mutex: bisect.insort(self.delayed_commands, command) self._on_schedule(util.total_seconds(command.delay)) def dcc(self, dcctype="chat"): """Creates and returns a DCCConnection object. Arguments: dcctype -- "chat" for DCC CHAT connections or "raw" for DCC SEND (or other DCC types). If "chat", incoming data will be split in newline-separated chunks. If "raw", incoming data is not touched. """ with self.mutex: c = DCCConnection(self, dcctype) self.connections.append(c) return c def _handle_event(self, connection, event): """ Handle an Event event incoming on ServerConnection connection. """ with self.mutex: h = self.handlers matching_handlers = sorted( h.get("all_events", []) + h.get(event.type, []) ) for handler in matching_handlers: result = handler.callback(connection, event) if result == "NO MORE": return def _remove_connection(self, connection): """[Internal]""" with self.mutex: self.connections.remove(connection) self._on_disconnect(connection.socket) _rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?") class Connection(object): """ Base class for IRC connections. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def socket(self): "The socket for this connection" def __init__(self, irclibobj): self.irclibobj = irclibobj ############################## ### Convenience wrappers. def execute_at(self, at, function, arguments=()): self.irclibobj.execute_at(at, function, arguments) def execute_delayed(self, delay, function, arguments=()): self.irclibobj.execute_delayed(delay, function, arguments) def execute_every(self, period, function, arguments=()): self.irclibobj.execute_every(period, function, arguments) class ServerConnectionError(IRCError): pass class ServerNotConnectedError(ServerConnectionError): pass class ServerConnection(Connection): """ An IRC server connection. ServerConnection objects are instantiated by calling the server method on an IRC object. """ buffer_class = buffer.DecodingLineBuffer socket = None def __init__(self, irclibobj): super(ServerConnection, self).__init__(irclibobj) self.connected = False self.features = features.FeatureSet() # save the method args to allow for easier reconnection. @irc_functools.save_method_args def connect(self, server, port, nickname, password=None, username=None, ircname=None, connect_factory=connection.Factory()): """Connect/reconnect to a server. Arguments: server -- Server name. port -- Port number. nickname -- The nickname. password -- Password (if any). username -- The username. ircname -- The IRC name ("realname"). server_address -- The remote host/port of the server. connect_factory -- A callable that takes the server address and returns a connection (with a socket interface). This function can be called to reconnect a closed connection. Returns the ServerConnection object. """ log.debug("connect(server=%r, port=%r, nickname=%r, ...)", server, port, nickname) if self.connected: self.disconnect("Changing servers") self.buffer = self.buffer_class() self.handlers = {} self.real_server_name = "" self.real_nickname = nickname self.server = server self.port = port self.server_address = (server, port) self.nickname = nickname self.username = username or nickname self.ircname = ircname or nickname self.password = password self.connect_factory = connect_factory try: self.socket = self.connect_factory(self.server_address) except socket.error as err: raise ServerConnectionError("Couldn't connect to socket: %s" % err) self.connected = True self.irclibobj._on_connect(self.socket) # Log on... if self.password: self.pass_(self.password) self.nick(self.nickname) self.user(self.username, self.ircname) return self def reconnect(self): """ Reconnect with the last arguments passed to self.connect() """ self.connect(*self._saved_connect.args, **self._saved_connect.kwargs) def close(self): """Close the connection. This method closes the connection permanently; after it has been called, the object is unusable. """ # Without this thread lock, there is a window during which # select() can find a closed socket, leading to an EBADF error. with self.irclibobj.mutex: self.disconnect("Closing object") self.irclibobj._remove_connection(self) def get_server_name(self): """Get the (real) server name. This method returns the (real) server name, or, more specifically, what the server calls itself. """ if self.real_server_name: return self.real_server_name else: return "" def get_nickname(self): """Get the (real) nick name. This method returns the (real) nickname. The library keeps track of nick changes, so it might not be the nick name that was passed to the connect() method. """ return self.real_nickname def process_data(self): "read and process input from self.socket" try: reader = getattr(self.socket, 'read', self.socket.recv) new_data = reader(2 ** 14) except socket.error: # The server hung up. self.disconnect("Connection reset by peer") return if not new_data: # Read nothing: connection must be down. self.disconnect("Connection reset by peer") return self.buffer.feed(new_data) for line in self.buffer: log.debug("FROM SERVER: %s", line) if not line: continue prefix = None command = None arguments = None self._handle_event(Event("all_raw_messages", self.get_server_name(), None, [line])) m = _rfc_1459_command_regexp.match(line) if m.group("prefix"): prefix = m.group("prefix") if not self.real_server_name: self.real_server_name = prefix if m.group("command"): command = m.group("command").lower() if m.group("argument"): a = m.group("argument").split(" :", 1) arguments = a[0].split() if len(a) == 2: arguments.append(a[1]) # Translate numerics into more readable strings. command = events.numeric.get(command, command) if command == "nick": if NickMask(prefix).nick == self.real_nickname: self.real_nickname = arguments[0] elif command == "welcome": # Record the nickname in case the client changed nick # in a nicknameinuse callback. self.real_nickname = arguments[0] elif command == "featurelist": self.features.load(arguments) if command in ["privmsg", "notice"]: target, message = arguments[0], arguments[1] messages = _ctcp_dequote(message) if command == "privmsg": if is_channel(target): command = "pubmsg" else: if is_channel(target): command = "pubnotice" else: command = "privnotice" for m in messages: if isinstance(m, tuple): if command in ["privmsg", "pubmsg"]: command = "ctcp" else: command = "ctcpreply" m = list(m) log.debug("command: %s, source: %s, target: %s, " "arguments: %s", command, prefix, target, m) self._handle_event(Event(command, NickMask(prefix), target, m)) if command == "ctcp" and m[0] == "ACTION": self._handle_event(Event("action", prefix, target, m[1:])) else: log.debug("command: %s, source: %s, target: %s, " "arguments: %s", command, prefix, target, [m]) self._handle_event(Event(command, NickMask(prefix), target, [m])) else: target = None if command == "quit": arguments = [arguments[0]] elif command == "ping": target = arguments[0] else: target = arguments[0] arguments = arguments[1:] if command == "mode": if not is_channel(target): command = "umode" log.debug("command: %s, source: %s, target: %s, " "arguments: %s", command, prefix, target, arguments) self._handle_event(Event(command, NickMask(prefix), target, arguments)) def _handle_event(self, event): """[Internal]""" self.irclibobj._handle_event(self, event) if event.type in self.handlers: for fn in self.handlers[event.type]: fn(self, event) def is_connected(self): """Return connection status. Returns true if connected, otherwise false. """ return self.connected def add_global_handler(self, *args): """Add global handler. See documentation for IRC.add_global_handler. """ self.irclibobj.add_global_handler(*args) def remove_global_handler(self, *args): """Remove global handler. See documentation for IRC.remove_global_handler. """ self.irclibobj.remove_global_handler(*args) def action(self, target, action): """Send a CTCP ACTION command.""" self.ctcp("ACTION", target, action) def admin(self, server=""): """Send an ADMIN command.""" self.send_raw(" ".join(["ADMIN", server]).strip()) def cap(self, subcommand, *args): """ Send a CAP command according to `the spec <http://ircv3.atheme.org/specification/capability-negotiation-3.1>`_. Arguments: subcommand -- LS, LIST, REQ, ACK, CLEAR, END args -- capabilities, if required for given subcommand Example: .cap('LS') .cap('REQ', 'multi-prefix', 'sasl') .cap('END') """ cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split()) client_subcommands = set(cap_subcommands) - set('NAK') assert subcommand in client_subcommands, "invalid subcommand" def _multi_parameter(args): """ According to the spec:: If more than one capability is named, the RFC1459 designated sentinel (:) for a multi-parameter argument must be present. It's not obvious where the sentinel should be present or if it must be omitted for a single parameter, so follow convention and only include the sentinel prefixed to the first parameter if more than one parameter is present. """ if len(args) > 1: return (':' + args[0],) + args[1:] return args args = _multi_parameter(args) self.send_raw(' '.join(('CAP', subcommand) + args)) def ctcp(self, ctcptype, target, parameter=""): """Send a CTCP command.""" ctcptype = ctcptype.upper() self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or "")) def ctcp_reply(self, target, parameter): """Send a CTCP REPLY command.""" #self.notice(target, "\001%s\001" % parameter) def disconnect(self, message=""): """Hang up the connection. Arguments: message -- Quit message. """ if not self.connected: return self.connected = 0 self.quit(message) try: self.socket.shutdown(socket.SHUT_WR) self.socket.close() except socket.error: pass del self.socket self._handle_event(Event("disconnect", self.server, "", [message])) def globops(self, text): """Send a GLOBOPS command.""" self.send_raw("GLOBOPS :" + text) def info(self, server=""): """Send an INFO command.""" self.send_raw(" ".join(["INFO", server]).strip()) def invite(self, nick, channel): """Send an INVITE command.""" self.send_raw(" ".join(["INVITE", nick, channel]).strip()) def ison(self, nicks): """Send an ISON command. Arguments: nicks -- List of nicks. """ self.send_raw("ISON " + " ".join(nicks)) def join(self, channel, key=""): """Send a JOIN command.""" self.send_raw("JOIN %s%s" % (channel, (key and (" " + key)))) def kick(self, channel, nick, comment=""): """Send a KICK command.""" self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment)))) def links(self, remote_server="", server_mask=""): """Send a LINKS command.""" command = "LINKS" if remote_server: command = command + " " + remote_server if server_mask: command = command + " " + server_mask self.send_raw(command) def list(self, channels=None, server=""): """Send a LIST command.""" command = "LIST" if channels: command = command + " " + ",".join(channels) if server: command = command + " " + server self.send_raw(command) def lusers(self, server=""): """Send a LUSERS command.""" self.send_raw("LUSERS" + (server and (" " + server))) def mode(self, target, command): """Send a MODE command.""" self.send_raw("MODE %s %s" % (target, command)) def motd(self, server=""): """Send an MOTD command.""" self.send_raw("MOTD" + (server and (" " + server))) def names(self, channels=None): """Send a NAMES command.""" self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or "")) def nick(self, newnick): """Send a NICK command.""" self.send_raw("NICK " + newnick) def notice(self, target, text): """Send a NOTICE command.""" # Should limit len(text) here! self.send_raw("NOTICE %s :%s" % (target, text)) def oper(self, nick, password): """Send an OPER command.""" self.send_raw("OPER %s %s" % (nick, password)) def part(self, channels, message=""): """Send a PART command.""" channels = util.always_iterable(channels) cmd_parts = [ 'PART', ','.join(channels), ] if message: cmd_parts.append(message) self.send_raw(' '.join(cmd_parts)) def pass_(self, password): """Send a PASS command.""" self.send_raw("PASS " + password) def ping(self, target, target2=""): """Send a PING command.""" self.send_raw("PING %s%s" % (target, target2 and (" " + target2))) def pong(self, target, target2=""): """Send a PONG command.""" self.send_raw("PONG %s%s" % (target, target2 and (" " + target2))) def privmsg(self, target, text): """Send a PRIVMSG command.""" self.send_raw("PRIVMSG %s :%s" % (target, text)) def privmsg_many(self, targets, text): """Send a PRIVMSG command to multiple targets.""" target = ','.join(targets) return self.privmsg(target, text) def quit(self, message=""): """Send a QUIT command.""" # Note that many IRC servers don't use your QUIT message # unless you've been connected for at least 5 minutes! self.send_raw("QUIT" + (message and (" :" + message))) def send_raw(self, string): """Send raw string to the server. The string will be padded with appropriate CR LF. """ # The string should not contain any carriage return other than the # one added here. if '\n' in string: raise InvalidCharacters( "Carriage returns not allowed in privmsg(text)") bytes = string.encode('utf-8') + b'\r\n' # According to the RFC http://tools.ietf.org/html/rfc2812#page-6, # clients should not transmit more than 512 bytes. if len(bytes) > 512: raise MessageTooLong( "Messages limited to 512 bytes including CR/LF") if self.socket is None: raise ServerNotConnectedError("Not connected.") sender = getattr(self.socket, 'write', self.socket.send) try: sender(bytes) log.debug("TO SERVER: %s", string) except socket.error: # Ouch! self.disconnect("Connection reset by peer.") def squit(self, server, comment=""): """Send an SQUIT command.""" self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment))) def stats(self, statstype, server=""): """Send a STATS command.""" self.send_raw("STATS %s%s" % (statstype, server and (" " + server))) def time(self, server=""): """Send a TIME command.""" self.send_raw("TIME" + (server and (" " + server))) def topic(self, channel, new_topic=None): """Send a TOPIC command.""" if new_topic is None: self.send_raw("TOPIC " + channel) else: self.send_raw("TOPIC %s :%s" % (channel, new_topic)) def trace(self, target=""): """Send a TRACE command.""" self.send_raw("TRACE" + (target and (" " + target))) def user(self, username, realname): """Send a USER command.""" self.send_raw("USER %s 0 * :%s" % (username, realname)) def userhost(self, nicks): """Send a USERHOST command.""" self.send_raw("USERHOST " + ",".join(nicks)) def users(self, server=""): """Send a USERS command.""" self.send_raw("USERS" + (server and (" " + server))) def version(self, server=""): """Send a VERSION command.""" self.send_raw("VERSION" + (server and (" " + server))) def wallops(self, text): """Send a WALLOPS command.""" self.send_raw("WALLOPS :" + text) def who(self, target="", op=""): """Send a WHO command.""" self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o"))) def whois(self, targets): """Send a WHOIS command.""" self.send_raw("WHOIS " + ",".join(targets)) def whowas(self, nick, max="", server=""): """Send a WHOWAS command.""" self.send_raw("WHOWAS %s%s%s" % (nick, max and (" " + max), server and (" " + server))) def set_rate_limit(self, frequency): """ Set a `frequency` limit (messages per second) for this connection. Any attempts to send faster than this rate will block. """ self.send_raw = Throttler(self.send_raw, frequency) def set_keepalive(self, interval): """ Set a keepalive to occur every ``interval`` on this connection. """ pinger = functools.partial(self.ping, 'keep-alive') self.irclibobj.execute_every(period=interval, function=pinger) class Throttler(object): """ Rate-limit a function (or other callable) """ def __init__(self, func, max_rate=float('Inf')): if isinstance(func, Throttler): func = func.func self.func = func self.max_rate = max_rate self.reset() def reset(self): self.last_called = 0 def __call__(self, *args, **kwargs): # ensure at least 1/max_rate seconds from last call elapsed = time.time() - self.last_called must_wait = 1 / self.max_rate - elapsed time.sleep(max(0, must_wait)) self.last_called = time.time() return self.func(*args, **kwargs) class DCCConnectionError(IRCError): pass class DCCConnection(Connection): """This class represents a DCC connection. DCCConnection objects are instantiated by calling the dcc method on an IRC object. """ socket = None def __init__(self, irclibobj, dcctype): super(DCCConnection, self).__init__(irclibobj) self.connected = 0 self.passive = 0 self.dcctype = dcctype self.peeraddress = None self.peerport = None def connect(self, address, port): """Connect/reconnect to a DCC peer. Arguments: address -- Host/IP address of the peer. port -- The port number to connect to. Returns the DCCConnection object. """ self.peeraddress = socket.gethostbyname(address) self.peerport = port self.buffer = LineBuffer() self.handlers = {} self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.passive = 0 try: self.socket.connect((self.peeraddress, self.peerport)) except socket.error as x: raise DCCConnectionError("Couldn't connect to socket: %s" % x) self.connected = 1 self.irclibobj._on_connect(self.socket) return self def listen(self): """Wait for a connection/reconnection from a DCC peer. Returns the DCCConnection object. The local IP address and port are available as self.localaddress and self.localport. After connection from a peer, the peer address and port are available as self.peeraddress and self.peerport. """ self.buffer = LineBuffer() self.handlers = {} self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.passive = 1 try: self.socket.bind((socket.gethostbyname(socket.gethostname()), 0)) self.localaddress, self.localport = self.socket.getsockname() self.socket.listen(10) except socket.error as x: raise DCCConnectionError("Couldn't bind socket: %s" % x) return self def disconnect(self, message=""): """Hang up the connection and close the object. Arguments: message -- Quit message. """ if not self.connected: return self.connected = 0 try: self.socket.shutdown(socket.SHUT_WR) self.socket.close() except socket.error: pass del self.socket self.irclibobj._handle_event( self, Event("dcc_disconnect", self.peeraddress, "", [message])) self.irclibobj._remove_connection(self) def process_data(self): """[Internal]""" if self.passive and not self.connected: conn, (self.peeraddress, self.peerport) = self.socket.accept() self.socket.close() self.socket = conn self.connected = 1 log.debug("DCC connection from %s:%d", self.peeraddress, self.peerport) self.irclibobj._handle_event( self, Event("dcc_connect", self.peeraddress, None, None)) return try: new_data = self.socket.recv(2 ** 14) except socket.error: # The server hung up. self.disconnect("Connection reset by peer") return if not new_data: # Read nothing: connection must be down. self.disconnect("Connection reset by peer") return if self.dcctype == "chat": self.buffer.feed(new_data) chunks = list(self.buffer) if len(self.buffer) > 2 ** 14: # Bad peer! Naughty peer! self.disconnect() return else: chunks = [new_data] command = "dccmsg" prefix = self.peeraddress target = None for chunk in chunks: log.debug("FROM PEER: %s", chunk) arguments = [chunk] log.debug("command: %s, source: %s, target: %s, arguments: %s", command, prefix, target, arguments) self.irclibobj._handle_event( self, Event(command, prefix, target, arguments)) def privmsg(self, text): """ Send text to DCC peer. The text will be padded with a newline if it's a DCC CHAT session. """ if self.dcctype == 'chat': text += '\n' bytes = text.encode('utf-8') return self.send_bytes(bytes) def send_bytes(self, bytes): """ Send data to DCC peer. """ try: self.socket.send(bytes) log.debug("TO PEER: %r\n", bytes) except socket.error: self.disconnect("Connection reset by peer.") class SimpleIRCClient(object): """A simple single-server IRC client class. This is an example of an object-oriented wrapper of the IRC framework. A real IRC client can be made by subclassing this class and adding appropriate methods. The method on_join will be called when a "join" event is created (which is done when the server sends a JOIN messsage/command), on_privmsg will be called for "privmsg" events, and so on. The handler methods get two arguments: the connection object (same as self.connection) and the event object. Instance attributes that can be used by sub classes: ircobj -- The IRC instance. connection -- The ServerConnection instance. dcc_connections -- A list of DCCConnection instances. """ def __init__(self): self.ircobj = IRC() self.connection = self.ircobj.server() self.dcc_connections = [] self.ircobj.add_global_handler("all_events", self._dispatcher, -10) self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10) def _dispatcher(self, connection, event): """ Dispatch events to on_<event.type> method, if present. """ log.debug("_dispatcher: %s", event.type) do_nothing = lambda c, e: None method = getattr(self, "on_" + event.type, do_nothing) method(connection, event) def _dcc_disconnect(self, c, e): self.dcc_connections.remove(c) def connect(self, *args, **kwargs): """Connect using the underlying connection""" self.connection.connect(*args, **kwargs) def dcc_connect(self, address, port, dcctype="chat"): """Connect to a DCC peer. Arguments: address -- IP address of the peer. port -- Port to connect to. Returns a DCCConnection instance. """ dcc = self.ircobj.dcc(dcctype) self.dcc_connections.append(dcc) dcc.connect(address, port) return dcc def dcc_listen(self, dcctype="chat"): """Listen for connections from a DCC peer. Returns a DCCConnection instance. """ dcc = self.ircobj.dcc(dcctype) self.dcc_connections.append(dcc) dcc.listen() return dcc def start(self): """Start the IRC client.""" self.ircobj.process_forever() class Event(object): "An IRC event." def __init__(self, type, source, target, arguments=None): """ Constructor of Event objects. Arguments: type -- A string describing the event. source -- The originator of the event (a nick mask or a server). target -- The target of the event (a nick or a channel). arguments -- Any event-specific arguments. """ self.type = type self.source = source self.target = target if arguments is None: arguments = [] self.arguments = arguments _LOW_LEVEL_QUOTE = "\020" _CTCP_LEVEL_QUOTE = "\134" _CTCP_DELIMITER = "\001" _low_level_mapping = { "0": "\000", "n": "\n", "r": "\r", _LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE } _low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)") def mask_matches(nick, mask): """Check if a nick matches a mask. Returns true if the nick matches, otherwise false. """ nick = strings.lower(nick) mask = strings.lower(mask) mask = mask.replace("\\", "\\\\") for ch in ".$|[](){}+": mask = mask.replace(ch, "\\" + ch) mask = mask.replace("?", ".") mask = mask.replace("*", ".*") r = re.compile(mask, re.IGNORECASE) return r.match(nick) _special = "-[]\\`^{}" nick_characters = string.ascii_letters + string.digits + _special def _ctcp_dequote(message): """[Internal] Dequote a message according to CTCP specifications. The function returns a list where each element can be either a string (normal message) or a tuple of one or two strings (tagged messages). If a tuple has only one element (ie is a singleton), that element is the tag; otherwise the tuple has two elements: the tag and the data. Arguments: message -- The message to be decoded. """ def _low_level_replace(match_obj): ch = match_obj.group(1) # If low_level_mapping doesn't have the character as key, we # should just return the character. return _low_level_mapping.get(ch, ch) if _LOW_LEVEL_QUOTE in message: # Yup, there was a quote. Release the dequoter, man! message = _low_level_regexp.sub(_low_level_replace, message) if _CTCP_DELIMITER not in message: return [message] else: # Split it into parts. (Does any IRC client actually *use* # CTCP stacking like this?) chunks = message.split(_CTCP_DELIMITER) messages = [] i = 0 while i < len(chunks) - 1: # Add message if it's non-empty. if len(chunks[i]) > 0: messages.append(chunks[i]) if i < len(chunks) - 2: # Aye! CTCP tagged data ahead! messages.append(tuple(chunks[i + 1].split(" ", 1))) i = i + 2 if len(chunks) % 2 == 0: # Hey, a lonely _CTCP_DELIMITER at the end! This means # that the last chunk, including the delimiter, is a # normal message! (This is according to the CTCP # specification.) messages.append(_CTCP_DELIMITER + chunks[-1]) return messages def is_channel(string): """Check if a string is a channel name. Returns true if the argument is a channel name, otherwise false. """ return string and string[0] in "#&+!" def ip_numstr_to_quad(num): """ Convert an IP number as an integer given in ASCII representation to an IP address string. >>> ip_numstr_to_quad('3232235521') '192.168.0.1' >>> ip_numstr_to_quad(3232235521) '192.168.0.1' """ n = int(num) packed = struct.pack('>L', n) bytes = struct.unpack('BBBB', packed) return ".".join(map(str, bytes)) def ip_quad_to_numstr(quad): """ Convert an IP address string (e.g. '192.168.0.1') to an IP number as a base-10 integer given in ASCII representation. >>> ip_quad_to_numstr('192.168.0.1') '3232235521' """ bytes = map(int, quad.split(".")) packed = struct.pack('BBBB', *bytes) return str(struct.unpack('>L', packed)[0]) class NickMask(six.text_type): """ A nickmask (the source of an Event) >>> nm = NickMask('[email protected]') >>> print(nm.nick) pinky >>> print(nm.host) example.com >>> print(nm.user) username >>> isinstance(nm, six.text_type) True >>> nm = 'красный[email protected]' >>> if not six.PY3: nm = nm.decode('utf-8') >>> nm = NickMask(nm) >>> isinstance(nm.nick, six.text_type) True """ @classmethod def from_params(cls, nick, user, host): return cls('{nick}!{user}@{host}'.format(**vars())) @property def nick(self): return self.split("!")[0] @property def userhost(self): return self.split("!")[1] @property def host(self): return self.split("@")[1] @property def user(self): return self.userhost.split("@")[0] def _ping_ponger(connection, event): "A global handler for the 'ping' event" connection.pong(event.target) # for backward compatibility LineBuffer = buffer.LineBuffer DecodingLineBuffer = buffer.DecodingLineBuffer
sonoprob/0x56
bot/py/py-irc-8.5.1/irc/client.py
Python
artistic-2.0
45,833
import unittest from Skoarcery import langoids, terminals, nonterminals, dragonsets, parsetable, emissions from Skoarcery.langoids import Terminal, Nonterminal class Code_Parser_Py(unittest.TestCase): def setUp(self): terminals.init() nonterminals.init() langoids.init() dragonsets.init() parsetable.init() emissions.init() def test_PY_rdpp(self): from Skoarcery.dragonsets import FIRST, FOLLOW from Skoarcery.terminals import Empty fd = open("SkoarPyon/rdpp.py", "w") PY = emissions.PY PY.fd = fd # Header # Imports # class SkoarParseException # class SkoarParser: # __init__ # fail self.code_start() PY.tab += 1 N = nonterminals.nonterminals.values() # precompute desirables PY.method("init_desirables") for A in N: R = A.production_rules PY.nl() PY.cmt(str(A)) # each production for P in R: if P.derives_empty: continue # A -> alpha alpha = P.production desires = FIRST(alpha) if Empty in desires: desires.discard(Empty) desires.update(FOLLOW(A)) i = 0 n = len(desires) PY.dict_set("self.desirables", str(P), "[", end="") for toke in desires: PY.raw(toke.toker_name) i += 1 if i != n: if i % 5 == 0: PY.raw(",\n") PY.stmt(" ", end="") else: PY.raw(", ") else: PY.raw("]\n") PY.end() # write each nonterminal as a function for A in N: R = A.production_rules #PY.cmt(str(A)) PY.stmt("def " + A.name + "(self, parent):") PY.tab += 1 PY.stmt("self.tab += 1") if A.intermediate: PY.var("noad", "parent") else: PY.var("noad", PY.v_new("SkoarNoad", PY.v_sym(A.name), "parent")) PY.nl() #PY.code_line("print('" + A.name + "')") for P in R: if P.derives_empty: continue # A -> alpha alpha = P.production PY.stmt("desires = " + PY.v_dict_get("self.desirables", str(P))) PY.if_("self.toker.sees(desires)") #PY.print(str(P)) for x in alpha: if isinstance(x, Terminal): PY.stmt("noad.add_toke('" + x.toker_name + "', self.toker.burn(" + x.toker_name + "))") #PY.print("burning: " + x.name) else: if x.intermediate: PY.stmt("self." + x.name + "(noad)") else: PY.stmt("noad.add_noad(self." + x.name + "(noad))") else: PY.return_("noad") PY.tab -= 1 PY.nl() if A.derives_empty: PY.cmt("<e>") #PY.print("burning empty") PY.return_("noad") else: PY.cmt("Error State") PY.stmt("self.fail()") PY.tab -= 1 PY.nl() PY.tab -= 1 fd.close() def code_start(self): from Skoarcery.terminals import Empty PY = emissions.PY PY.file_header("rdpp", "PyRDPP - Create Recursive Descent Predictive Parser") s = "from Skoarcery.SkoarPyon.apparatus import SkoarNoad\n"\ "from Skoarcery.SkoarPyon.lex import " T = terminals.tokens.values() n = len(T) i = 0 for t in T: if t == Empty: n -= 1 continue s += t.toker_name i += 1 if i < n: if i % 5 == 0: s += ", \\\n " else: s += ", " PY.raw(s + """ class SkoarParseException(Exception): pass class SkoarParser: def __init__(self, runtime): self.runtime = runtime self.toker = runtime.toker self.tab = 0 self.desirables = dict() self.init_desirables() def fail(self): self.toker.dump() raise SkoarParseException @property def tabby(self): if self.tab == 0: return "" return ("{:>" + str(self.tab * 2) + "}").format(" ") def print(self, line, end): print(self.tabby + line, end=end) """)
sofakid/Skoarcery
Skoarcery/factoary/Code_Parser_Py.py
Python
artistic-2.0
4,932
program dummy end program
clementval/claw-compiler
test/claw/sca/issue_578_regression/reference_main_omp.f90
FORTRAN
bsd-2-clause
26
#pragma once #include "clay.hpp" namespace clay { template <class T> inline llvm::raw_ostream &operator<<(llvm::raw_ostream &out, llvm::ArrayRef<T> v) { out << "["; const T *i, *end; bool first = true; for (i = v.begin(), end = v.end(); i != end; ++i) { if (!first) out << ", "; first = false; out << *i; } out << "]"; return out; } // // printer module // llvm::raw_ostream &operator<<(llvm::raw_ostream &out, const Object &obj); llvm::raw_ostream &operator<<(llvm::raw_ostream &out, const Object *obj); llvm::raw_ostream &operator<<(llvm::raw_ostream &out, PVData const &pv); template <class T> llvm::raw_ostream &operator<<(llvm::raw_ostream &out, const Pointer<T> &p) { out << *p; return out; } llvm::raw_ostream &operator<<(llvm::raw_ostream &out, const PatternVar &pvar); void enableSafePrintName(); void disableSafePrintName(); struct SafePrintNameEnabler { SafePrintNameEnabler() { enableSafePrintName(); } ~SafePrintNameEnabler() { disableSafePrintName(); } }; void printNameList(llvm::raw_ostream &out, llvm::ArrayRef<ObjectPtr> x); void printNameList(llvm::raw_ostream &out, llvm::ArrayRef<ObjectPtr> x, llvm::ArrayRef<unsigned> dispatchIndices); void printNameList(llvm::raw_ostream &out, llvm::ArrayRef<TypePtr> x); void printStaticName(llvm::raw_ostream &out, ObjectPtr x); void printName(llvm::raw_ostream &out, ObjectPtr x); void printTypeAndValue(llvm::raw_ostream &out, EValuePtr ev); void printValue(llvm::raw_ostream &out, EValuePtr ev); string shortString(llvm::StringRef in); }
jckarter/clay
compiler/printer.hpp
C++
bsd-2-clause
1,601
cask 'pritunl' do version '1.0.1436.36' sha256 '29274275fa1da414491ed93690e5a5309fe037290f3ee0b99f0a15d517fece85' # github.com/pritunl/pritunl-client-electron was verified as official when first introduced to the cask url "https://github.com/pritunl/pritunl-client-electron/releases/download/#{version}/Pritunl.pkg.zip" appcast 'https://github.com/pritunl/pritunl-client-electron/releases.atom', checkpoint: '8c3a5e68d833c8d3eed26b3e7a24bfd6e440fd3c689f71333151c632b97df91e' name 'Pritunl OpenVPN Client' homepage 'https://client.pritunl.com/' pkg 'Pritunl.pkg' uninstall pkgutil: 'com.pritunl.pkg.Pritunl', launchctl: [ 'com.pritunl.client', 'com.pritunl.service', ], signal: ['TERM', 'com.electron.pritunl'] zap delete: '~/Library/Caches/pritunl', trash: [ '~/Library/Application Support/pritunl', '~/Library/Preferences/com.electron.pritunl*', ] end
muan/homebrew-cask
Casks/pritunl.rb
Ruby
bsd-2-clause
1,042
//////////////////////////////////////////////////////////// // // SFML - Simple and Fast Multimedia Library // Copyright (C) 2007-2016 Laurent Gomila ([email protected]) // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it freely, // subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; // you must not claim that you wrote the original software. // If you use this software in a product, an acknowledgment // in the product documentation would be appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, // and must not be misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Headers //////////////////////////////////////////////////////////// #include <SFML/Network/UdpSocket.hpp> #include <SFML/Network/IpAddress.hpp> #include <SFML/Network/Packet.hpp> #include <SFML/Network/SocketImpl.hpp> #include <SFML/System/Err.hpp> #include <algorithm> namespace sf { //////////////////////////////////////////////////////////// UdpSocket::UdpSocket() : Socket (Udp), m_buffer(MaxDatagramSize) { } //////////////////////////////////////////////////////////// unsigned short UdpSocket::getLocalPort() const { if (getHandle() != priv::SocketImpl::invalidSocket()) { // Retrieve informations about the local end of the socket sockaddr_in address; priv::SocketImpl::AddrLength size = sizeof(address); if (getsockname(getHandle(), reinterpret_cast<sockaddr*>(&address), &size) != -1) { return ntohs(address.sin_port); } } // We failed to retrieve the port return 0; } //////////////////////////////////////////////////////////// Socket::Status UdpSocket::bind(unsigned short port, const IpAddress& address) { // Create the internal socket if it doesn't exist create(); // Check if the address is valid if ((address == IpAddress::None) || (address == IpAddress::Broadcast)) return Error; // Bind the socket sockaddr_in addr = priv::SocketImpl::createAddress(address.toInteger(), port); if (::bind(getHandle(), reinterpret_cast<sockaddr*>(&addr), sizeof(addr)) == -1) { err() << "Failed to bind socket to port " << port << std::endl; return Error; } return Done; } //////////////////////////////////////////////////////////// void UdpSocket::unbind() { // Simply close the socket close(); } //////////////////////////////////////////////////////////// Socket::Status UdpSocket::send(const void* data, std::size_t size, const IpAddress& remoteAddress, unsigned short remotePort) { // Create the internal socket if it doesn't exist create(); // Make sure that all the data will fit in one datagram if (size > MaxDatagramSize) { err() << "Cannot send data over the network " << "(the number of bytes to send is greater than sf::UdpSocket::MaxDatagramSize)" << std::endl; return Error; } // Build the target address sockaddr_in address = priv::SocketImpl::createAddress(remoteAddress.toInteger(), remotePort); // Send the data (unlike TCP, all the data is always sent in one call) int sent = sendto(getHandle(), static_cast<const char*>(data), static_cast<int>(size), 0, reinterpret_cast<sockaddr*>(&address), sizeof(address)); // Check for errors if (sent < 0) return priv::SocketImpl::getErrorStatus(); return Done; } //////////////////////////////////////////////////////////// Socket::Status UdpSocket::receive(void* data, std::size_t size, std::size_t& received, IpAddress& remoteAddress, unsigned short& remotePort) { // First clear the variables to fill received = 0; remoteAddress = IpAddress(); remotePort = 0; // Check the destination buffer if (!data) { err() << "Cannot receive data from the network (the destination buffer is invalid)" << std::endl; return Error; } // Data that will be filled with the other computer's address sockaddr_in address = priv::SocketImpl::createAddress(INADDR_ANY, 0); // Receive a chunk of bytes priv::SocketImpl::AddrLength addressSize = sizeof(address); int sizeReceived = recvfrom(getHandle(), static_cast<char*>(data), static_cast<int>(size), 0, reinterpret_cast<sockaddr*>(&address), &addressSize); // Check for errors if (sizeReceived < 0) return priv::SocketImpl::getErrorStatus(); // Fill the sender informations received = static_cast<std::size_t>(sizeReceived); remoteAddress = IpAddress(ntohl(address.sin_addr.s_addr)); remotePort = ntohs(address.sin_port); return Done; } //////////////////////////////////////////////////////////// Socket::Status UdpSocket::send(Packet& packet, const IpAddress& remoteAddress, unsigned short remotePort) { // UDP is a datagram-oriented protocol (as opposed to TCP which is a stream protocol). // Sending one datagram is almost safe: it may be lost but if it's received, then its data // is guaranteed to be ok. However, splitting a packet into multiple datagrams would be highly // unreliable, since datagrams may be reordered, dropped or mixed between different sources. // That's why SFML imposes a limit on packet size so that they can be sent in a single datagram. // This also removes the overhead associated to packets -- there's no size to send in addition // to the packet's data. // Get the data to send from the packet std::size_t size = 0; const void* data = packet.onSend(size); // Send it return send(data, size, remoteAddress, remotePort); } //////////////////////////////////////////////////////////// Socket::Status UdpSocket::receive(Packet& packet, IpAddress& remoteAddress, unsigned short& remotePort) { // See the detailed comment in send(Packet) above. // Receive the datagram std::size_t received = 0; Status status = receive(&m_buffer[0], m_buffer.size(), received, remoteAddress, remotePort); // If we received valid data, we can copy it to the user packet packet.clear(); if ((status == Done) && (received > 0)) packet.onReceive(&m_buffer[0], received); return status; } } // namespace sf
evanbowman/FLIGHT
deps/SFML-2.4.1/src/SFML/Network/UdpSocket.cpp
C++
bsd-2-clause
6,771
/** * @author Greg Rozmarynowycz <[email protected]> */ /** @type HttpError */ const HttpError = require('standard-http-error'); function getCode(error) { let code = error.code; if (!code) { code = parseInt(/HttpError: (\d{3})/.exec(error.message)[1], 10); } return code; } function processWaterlineError(err, res) { const [match, codeString, message] = /HttpError: (\d{3}) (.+)/.exec(err.message) || []; const code = parseInt(codeString, 10); if (match && Number.isFinite(code) && code !== 500) { // don't try to clean up 500 errors const origError = new HttpError(code, message); mapError(origError, res); } else { res.serverError(err); } } function mapError(err, res) { switch (getCode(err)) { case HttpError.UNAUTHORIZED: case HttpError.UNPROCESSABLE_ENTITY: return res.unauthorized(err); case 'E_VALIDATION': case HttpError.BAD_REQUEST: return res.badRequest(err); case HttpError.NOT_FOUND: return res.notFound(err); case 'E_UNKNOWN': return processWaterlineError(err, res); default: return res.serverError(err); } } module.exports = {mapError};
tlr3552/TheLounge
api/lib/responseUtils.js
JavaScript
bsd-2-clause
1,127
import re from pathlib import Path from typing import Callable, Optional, Union from streamlink_cli.compat import is_win32 REPLACEMENT = "_" SPECIAL_PATH_PARTS = (".", "..") _UNPRINTABLE = "".join(chr(c) for c in range(32)) _UNSUPPORTED_POSIX = "/" _UNSUPPORTED_WIN32 = "\x7f\"*/:<>?\\|" RE_CHARS_POSIX = re.compile(f"[{re.escape(_UNPRINTABLE + _UNSUPPORTED_POSIX)}]+") RE_CHARS_WIN32 = re.compile(f"[{re.escape(_UNPRINTABLE + _UNSUPPORTED_WIN32)}]+") if is_win32: RE_CHARS = RE_CHARS_WIN32 else: RE_CHARS = RE_CHARS_POSIX def replace_chars(path: str, charmap: Optional[str] = None, replacement: str = REPLACEMENT) -> str: if charmap is None: pattern = RE_CHARS else: charmap = charmap.lower() if charmap in ("posix", "unix"): pattern = RE_CHARS_POSIX elif charmap in ("windows", "win32"): pattern = RE_CHARS_WIN32 else: raise ValueError("Invalid charmap") return pattern.sub(replacement, path) def replace_path(pathlike: Union[str, Path], mapper: Callable[[str], str]) -> Path: def get_part(part): newpart = mapper(part) return REPLACEMENT if part != newpart and newpart in SPECIAL_PATH_PARTS else newpart return Path(*(get_part(part) for part in Path(pathlike).expanduser().parts))
amurzeau/streamlink-debian
src/streamlink_cli/utils/path.py
Python
bsd-2-clause
1,315
// // EVConstants.h // EverliveSDKSample // // Created by Mehfuz Hossain on 7/15/13. // Copyright (c) 2013 Telerik AD. All rights reserved. // #import <Foundation/Foundation.h> @class EVUser; @class EVResponse; @class EVFile; typedef void (^EVResultBlock)(EVResponse *response, NSError *error); typedef void (^EVObjectFetchResultBlock)(NSArray *result, NSError *error); typedef void (^EVUserResultBlock)(EVUser *user, NSError *error); typedef void (^EVObjectResultBlock)(BOOL success, NSError *error); typedef void (^EVScalarResultBlock)(NSUInteger result, NSError *error); typedef void (^EVFileResult)(EVFile *result, NSError *error); extern NSString* const kEverliveServer; extern NSString* const kEverliveAppId; extern NSString* const kEverliveMasterKey; NSString* const kAdfsSoapMessageTemplate;
telerik/backend-services-push-ios
Sample/EverliveSDK.framework/Versions/A/Headers/EVConstants.h
C
bsd-2-clause
811
/* * linux/kernel/seccomp.c * * Copyright 2004-2005 Andrea Arcangeli <[email protected]> * * Copyright (C) 2012 Google, Inc. * Will Drewry <[email protected]> * * This defines a simple but solid secure-computing facility. * * Mode 1 uses a fixed list of allowed system calls. * Mode 2 allows user-defined system call filters in the form * of Berkeley Packet Filters/Linux Socket Filters. */ #include <linux/atomic.h> #include <linux/audit.h> #include <linux/compat.h> #include <linux/sched.h> #include <linux/seccomp.h> /* #define SECCOMP_DEBUG 1 */ #ifdef CONFIG_SECCOMP_FILTER #include <asm/syscall.h> #include <linux/filter.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/slab.h> #include <linux/tracehook.h> #include <linux/uaccess.h> /** * struct seccomp_filter - container for seccomp BPF programs * * @usage: reference count to manage the object lifetime. * get/put helpers should be used when accessing an instance * outside of a lifetime-guarded section. In general, this * is only needed for handling filters shared across tasks. * @prev: points to a previously installed, or inherited, filter * @len: the number of instructions in the program * @insns: the BPF program instructions to evaluate * * seccomp_filter objects are organized in a tree linked via the @prev * pointer. For any task, it appears to be a singly-linked list starting * with current->seccomp.filter, the most recently attached or inherited filter. * However, multiple filters may share a @prev node, by way of fork(), which * results in a unidirectional tree existing in memory. This is similar to * how namespaces work. * * seccomp_filter objects should never be modified after being attached * to a task_struct (other than @usage). */ struct seccomp_filter { atomic_t usage; struct seccomp_filter *prev; unsigned short len; /* Instruction count */ struct sock_filter insns[]; }; /* Limit any path through the tree to 256KB worth of instructions. */ #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) /** * get_u32 - returns a u32 offset into data * @data: a unsigned 64 bit value * @index: 0 or 1 to return the first or second 32-bits * * This inline exists to hide the length of unsigned long. If a 32-bit * unsigned long is passed in, it will be extended and the top 32-bits will be * 0. If it is a 64-bit unsigned long, then whatever data is resident will be * properly returned. * * Endianness is explicitly ignored and left for BPF program authors to manage * as per the specific architecture. */ static inline u32 get_u32(u64 data, int index) { return ((u32 *)&data)[index]; } /* Helper for bpf_load below. */ #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) /** * bpf_load: checks and returns a pointer to the requested offset * @off: offset into struct seccomp_data to load from * * Returns the requested 32-bits of data. * seccomp_check_filter() should assure that @off is 32-bit aligned * and not out of bounds. Failure to do so is a BUG. */ u32 seccomp_bpf_load(int off) { struct pt_regs *regs = task_pt_regs(current); if (off == BPF_DATA(nr)) return syscall_get_nr(current, regs); if (off == BPF_DATA(arch)) return syscall_get_arch(current, regs); if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { unsigned long value; int arg = (off - BPF_DATA(args[0])) / sizeof(u64); int index = !!(off % sizeof(u64)); syscall_get_arguments(current, regs, arg, 1, &value); return get_u32(value, index); } if (off == BPF_DATA(instruction_pointer)) return get_u32(KSTK_EIP(current), 0); if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) return get_u32(KSTK_EIP(current), 1); /* seccomp_check_filter should make this impossible. */ BUG(); } /** * seccomp_check_filter - verify seccomp filter code * @filter: filter to verify * @flen: length of filter * * Takes a previously checked filter (by sk_chk_filter) and * redirects all filter code that loads struct sk_buff data * and related data through seccomp_bpf_load. It also * enforces length and alignment checking of those loads. * * Returns 0 if the rule set is legal or -EINVAL if not. */ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) { int pc; for (pc = 0; pc < flen; pc++) { struct sock_filter *ftest = &filter[pc]; u16 code = ftest->code; u32 k = ftest->k; switch (code) { case BPF_S_LD_W_ABS: ftest->code = BPF_S_ANC_SECCOMP_LD_W; /* 32-bit aligned and not out of bounds. */ if (k >= sizeof(struct seccomp_data) || k & 3) return -EINVAL; continue; case BPF_S_LD_W_LEN: ftest->code = BPF_S_LD_IMM; ftest->k = sizeof(struct seccomp_data); continue; case BPF_S_LDX_W_LEN: ftest->code = BPF_S_LDX_IMM; ftest->k = sizeof(struct seccomp_data); continue; /* Explicitly include allowed calls. */ case BPF_S_RET_K: case BPF_S_RET_A: case BPF_S_ALU_ADD_K: case BPF_S_ALU_ADD_X: case BPF_S_ALU_SUB_K: case BPF_S_ALU_SUB_X: case BPF_S_ALU_MUL_K: case BPF_S_ALU_MUL_X: case BPF_S_ALU_DIV_X: case BPF_S_ALU_AND_K: case BPF_S_ALU_AND_X: case BPF_S_ALU_OR_K: case BPF_S_ALU_OR_X: case BPF_S_ALU_LSH_K: case BPF_S_ALU_LSH_X: case BPF_S_ALU_RSH_K: case BPF_S_ALU_RSH_X: case BPF_S_ALU_NEG: case BPF_S_LD_IMM: case BPF_S_LDX_IMM: case BPF_S_MISC_TAX: case BPF_S_MISC_TXA: case BPF_S_ALU_DIV_K: case BPF_S_LD_MEM: case BPF_S_LDX_MEM: case BPF_S_ST: case BPF_S_STX: case BPF_S_JMP_JA: case BPF_S_JMP_JEQ_K: case BPF_S_JMP_JEQ_X: case BPF_S_JMP_JGE_K: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGT_X: case BPF_S_JMP_JSET_K: case BPF_S_JMP_JSET_X: continue; default: return -EINVAL; } } return 0; } /** * seccomp_run_filters - evaluates all seccomp filters against @syscall * @syscall: number of the current system call * * Returns valid seccomp BPF response codes. */ static u32 seccomp_run_filters(int syscall) { struct seccomp_filter *f; u32 ret = SECCOMP_RET_ALLOW; /* Ensure unexpected behavior doesn't result in failing open. */ if (WARN_ON(current->seccomp.filter == NULL)) return SECCOMP_RET_KILL; /* * All filters in the list are evaluated and the lowest BPF return * value always takes priority (ignoring the DATA). */ for (f = current->seccomp.filter; f; f = f->prev) { u32 cur_ret = sk_run_filter(NULL, f->insns); if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) ret = cur_ret; } return ret; } /** * seccomp_attach_filter: Attaches a seccomp filter to current. * @fprog: BPF program to install * * Returns 0 on success or an errno on failure. */ static long seccomp_attach_filter(struct sock_fprog *fprog) { struct seccomp_filter *filter; unsigned long fp_size = fprog->len * sizeof(struct sock_filter); unsigned long total_insns = fprog->len; long ret; if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) return -EINVAL; for (filter = current->seccomp.filter; filter; filter = filter->prev) total_insns += filter->len + 4; /* include a 4 instr penalty */ if (total_insns > MAX_INSNS_PER_PATH) return -ENOMEM; /* * Installing a seccomp filter requires that the task have * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. * This avoids scenarios where unprivileged tasks can affect the * behavior of privileged children. */ if (!current->no_new_privs && security_capable_noaudit(current_cred(), current_user_ns(), CAP_SYS_ADMIN) != 0) return -EACCES; /* Allocate a new seccomp_filter */ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, GFP_KERNEL|__GFP_NOWARN); if (!filter) return -ENOMEM; atomic_set(&filter->usage, 1); filter->len = fprog->len; /* Copy the instructions from fprog. */ ret = -EFAULT; if (copy_from_user(filter->insns, fprog->filter, fp_size)) goto fail; /* Check and rewrite the fprog via the skb checker */ ret = sk_chk_filter(filter->insns, filter->len); if (ret) goto fail; /* Check and rewrite the fprog for seccomp use */ ret = seccomp_check_filter(filter->insns, filter->len); if (ret) goto fail; /* * If there is an existing filter, make it the prev and don't drop its * task reference. */ filter->prev = current->seccomp.filter; current->seccomp.filter = filter; return 0; fail: kfree(filter); return ret; } /** * seccomp_attach_user_filter - attaches a user-supplied sock_fprog * @user_filter: pointer to the user data containing a sock_fprog. * * Returns 0 on success and non-zero otherwise. */ long seccomp_attach_user_filter(char __user *user_filter) { struct sock_fprog fprog; long ret = -EFAULT; #ifdef CONFIG_COMPAT if (is_compat_task()) { struct compat_sock_fprog fprog32; if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) goto out; fprog.len = fprog32.len; fprog.filter = compat_ptr(fprog32.filter); } else /* falls through to the if below. */ #endif if (copy_from_user(&fprog, user_filter, sizeof(fprog))) goto out; ret = seccomp_attach_filter(&fprog); out: return ret; } /* get_seccomp_filter - increments the reference count of the filter on @tsk */ void get_seccomp_filter(struct task_struct *tsk) { struct seccomp_filter *orig = tsk->seccomp.filter; if (!orig) return; /* Reference count is bounded by the number of total processes. */ atomic_inc(&orig->usage); } /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ void put_seccomp_filter(struct task_struct *tsk) { struct seccomp_filter *orig = tsk->seccomp.filter; /* Clean up single-reference branches iteratively. */ while (orig && atomic_dec_and_test(&orig->usage)) { struct seccomp_filter *freeme = orig; orig = orig->prev; kfree(freeme); } } /** * seccomp_send_sigsys - signals the task to allow in-process syscall emulation * @syscall: syscall number to send to userland * @reason: filter-supplied reason code to send to userland (via si_errno) * * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. */ static void seccomp_send_sigsys(int syscall, int reason) { struct siginfo info; memset(&info, 0, sizeof(info)); info.si_signo = SIGSYS; info.si_code = SYS_SECCOMP; info.si_call_addr = (void __user *)KSTK_EIP(current); info.si_errno = reason; info.si_arch = syscall_get_arch(current, task_pt_regs(current)); info.si_syscall = syscall; force_sig_info(SIGSYS, &info, current); } #endif /* CONFIG_SECCOMP_FILTER */ /* * Secure computing mode 1 allows only read/write/exit/sigreturn. * To be fully secure this must be combined with rlimit * to limit the stack allocations too. */ static int mode1_syscalls[] = { __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, 0, /* null terminated */ }; #ifdef CONFIG_COMPAT static int mode1_syscalls_32[] = { __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, 0, /* null terminated */ }; #endif int __secure_computing(int this_syscall) { int mode = current->seccomp.mode; int exit_sig = 0; int *syscall; u32 ret; switch (mode) { case SECCOMP_MODE_STRICT: syscall = mode1_syscalls; #ifdef CONFIG_COMPAT if (is_compat_task()) syscall = mode1_syscalls_32; #endif do { if (*syscall == this_syscall) return 0; } while (*++syscall); exit_sig = SIGKILL; ret = SECCOMP_RET_KILL; break; #ifdef CONFIG_SECCOMP_FILTER case SECCOMP_MODE_FILTER: { int data; ret = seccomp_run_filters(this_syscall); data = ret & SECCOMP_RET_DATA; ret &= SECCOMP_RET_ACTION; switch (ret) { case SECCOMP_RET_ERRNO: /* Set the low-order 16-bits as a errno. */ syscall_set_return_value(current, task_pt_regs(current), -data, 0); goto skip; case SECCOMP_RET_TRAP: /* Show the handler the original registers. */ syscall_rollback(current, task_pt_regs(current)); /* Let the filter pass back 16 bits of data. */ seccomp_send_sigsys(this_syscall, data); goto skip; case SECCOMP_RET_TRACE: /* Skip these calls if there is no tracer. */ if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) goto skip; /* Allow the BPF to provide the event message */ ptrace_event(PTRACE_EVENT_SECCOMP, data); /* * The delivery of a fatal signal during event * notification may silently skip tracer notification. * Terminating the task now avoids executing a system * call that may not be intended. */ if (fatal_signal_pending(current)) break; return 0; case SECCOMP_RET_ALLOW: return 0; case SECCOMP_RET_KILL: default: break; } exit_sig = SIGSYS; break; } #endif default: BUG(); } #ifdef SECCOMP_DEBUG dump_stack(); #endif __audit_seccomp(this_syscall, exit_sig, ret); do_exit(exit_sig); #ifdef CONFIG_SECCOMP_FILTER skip: audit_seccomp(this_syscall, exit_sig, ret); #endif return -1; } long prctl_get_seccomp(void) { return current->seccomp.mode; } /** * prctl_set_seccomp: configures current->seccomp.mode * @seccomp_mode: requested mode to use * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER * * This function may be called repeatedly with a @seccomp_mode of * SECCOMP_MODE_FILTER to install additional filters. Every filter * successfully installed will be evaluated (in reverse order) for each system * call the task makes. * * Once current->seccomp.mode is non-zero, it may not be changed. * * Returns 0 on success or -EINVAL on failure. */ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) { long ret = -EINVAL; if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) goto out; switch (seccomp_mode) { case SECCOMP_MODE_STRICT: ret = 0; #ifdef TIF_NOTSC disable_TSC(); #endif break; #ifdef CONFIG_SECCOMP_FILTER case SECCOMP_MODE_FILTER: ret = seccomp_attach_user_filter(filter); if (ret) goto out; break; #endif default: goto out; } current->seccomp.mode = seccomp_mode; set_thread_flag(TIF_SECCOMP); out: return ret; }
andrewjylee/omniplay
linux-lts-quantal-3.5.0/kernel/seccomp.c
C
bsd-2-clause
14,061
/** * Gui.cpp * emptyExample * * Created by Marek Bereza on 19/06/2011. * */ #include "Gui.h" #include "ofxXmlSettings.h" #include "Instantiator.h" xmlgui::Gui::Gui(): xmlgui::Container() { root = NULL; } bool xmlgui::Gui::loadFromXml(string file) { if(root!=NULL) { removeChild(root); delete root; root = NULL; } if(file=="") file = fileName; else fileName = file; TiXmlDocument doc(ofToDataPath(file)); if(!doc.LoadFile()) return false; TiXmlElement* rootElement = doc.FirstChildElement()->ToElement(); // find out what the root tag is string firstTagName = rootElement->Value(); root = (Container*)INSTANTIATE(firstTagName); // this should recurse, calling createControl as it goes. root->loadFromXmlObject(rootElement); addChild(root); setup(root); return true; } void xmlgui::Gui::setRoot(xmlgui::Container *r) { this->root = r; addChild(root); setup(root); } xmlgui::Container *xmlgui::Gui::getRoot() { return root; } void xmlgui::Gui::saveToXml(string file) { //if(file=="") file = fileName; //else fileName = file; root->saveToXml(file); /* if(children.size()>0) { if(children[0]->isContainer()) { printf("Saved first child to container\n"); ((Container*)children[0])->saveToXml(file); } }*/ // Container::saveToXml(file); } xmlgui::Control *xmlgui::Gui::getControlById(string ctrl) { return root->getControlById(ctrl); }
HellicarAndLewis/MulticolouredMagic
addons/xmlgui/src/framework/Gui.cpp
C++
bsd-2-clause
1,401
#ifndef _DAEMONIZE_H_ #define _DAEMONIZE_H_ /** * daemonize(spid): * Daemonize and write the process ID in decimal to a file named ${spid}. */ int daemonize(const char *); #endif /* !_DAEMONIZE_H_ */
brainwater/spiped
lib/util/daemonize.h
C
bsd-2-clause
205
!------------------------------------------------------------------------------- !> !! Debug utility module !! !! @par Description !! This module is for dubug. !! !! @author H.Tomita !! !! @par History !! @li 2012-06-29 (H.Yashiro) [NEW] !< module mod_debug !----------------------------------------------------------------------------- ! !++ Used modules ! use mpi use mod_adm, only: & ADM_LOG_FID, & ADM_NSYS, & ADM_MAXFNAME !----------------------------------------------------------------------------- implicit none private !----------------------------------------------------------------------------- ! !++ Public procedure ! public :: DEBUG_dampdata public :: DEBUG_dampascii4D public :: DEBUG_dampascii3D public :: DEBUG_rapstart public :: DEBUG_rapend public :: DEBUG_rapreport !----------------------------------------------------------------------------- ! !++ Public parameters & variables ! !----------------------------------------------------------------------------- ! !++ Private procedure ! private :: DEBUG_rapid !----------------------------------------------------------------------------- ! !++ Private parameters & variables ! integer, private, parameter :: DEBUG_rapnlimit = 100 integer, private, save :: DEBUG_rapnmax = 0 character(len=ADM_NSYS), private, save :: DEBUG_rapname(DEBUG_rapnlimit) real(8), private, save :: DEBUG_raptstr(DEBUG_rapnlimit) real(8), private, save :: DEBUG_rapttot(DEBUG_rapnlimit) integer, private, save :: DEBUG_rapnstr(DEBUG_rapnlimit) integer, private, save :: DEBUG_rapnend(DEBUG_rapnlimit) #ifdef PAPI_OPS ! <-- [add] PAPI R.Yoshida 20121022 !integer(8),public, save :: papi_flpins !total floating point instructions since the first call integer(8),public, save :: papi_flpops !total floating point operations since the first call !real(4), public, save :: papi_real_time_i !total realtime since the first PAPI_flins() call !real(4), public, save :: papi_proc_time_i !total process time since the first PAPI_flins() call real(4), public, save :: papi_real_time_o !total realtime since the first PAPI_flops() call real(4), public, save :: papi_proc_time_o !total process time since the first PAPI_flops() call !real(4), public, save :: papi_mflins !Mflip/s achieved since the previous call real(4), public, save :: papi_mflops !Mflop/s achieved since the previous call integer, public, save :: papi_check #endif !----------------------------------------------------------------------------- contains !----------------------------------------------------------------------------- !> !> Damp all data !> subroutine DEBUG_dampdata( & basename, & !--- [IN] var, & !--- [IN] var_pl ) !--- [IN] use mod_misc, only: & MISC_make_idstr, & MISC_get_available_fid use mod_adm, only: & ADM_PRC_PL, & ADM_prc_me implicit none character(len=*), intent(in) :: basename real(8), intent(in) :: var (:,:,:,:) real(8), intent(in) :: var_pl(:,:,:,:) integer :: shp(4) character(LEN=ADM_MAXFNAME) :: fname integer :: fid !--------------------------------------------------------------------------- shp(:) = shape(var) call MISC_make_idstr(fname,trim(basename),'pe',ADM_prc_me) fid = MISC_get_available_fid() open( unit = fid, & file = trim(fname), & form = 'unformatted', & access = 'direct', & recl = shp(1)*shp(2)*shp(3)*shp(4)*8, & status = 'unknown' ) write(fid,rec=1) var close(fid) if ( ADM_prc_me == ADM_prc_pl ) then shp(:) = shape(var_pl) fname = trim(basename)//'.pl' fid = MISC_get_available_fid() open( unit = fid, & file = trim(fname), & form = 'unformatted', & access = 'direct', & recl = shp(1)*shp(2)*shp(3)*shp(4)*8, & status = 'unknown' ) write(fid,rec=1) var_pl close(fid) endif end subroutine DEBUG_dampdata !----------------------------------------------------------------------------- !> !> Damp all data !> subroutine DEBUG_dampascii4D( & basename, & !--- [IN] var, & !--- [IN] var_pl ) !--- [IN] use mod_misc, only: & MISC_make_idstr, & MISC_get_available_fid use mod_adm, only: & ADM_prc_pl, & ADM_prc_me implicit none character(len=*), intent(in) :: basename real(8), intent(in) :: var (:,:,:,:) real(8), intent(in) :: var_pl(:,:,:,:) integer :: shp(4) character(LEN=ADM_MAXFNAME) :: fname integer :: fid integer :: i1,i2,i3,i4 !--------------------------------------------------------------------------- shp(:) = shape(var) call MISC_make_idstr(fname,trim(basename),'txt',ADM_prc_me) fid = MISC_get_available_fid() open( unit = fid, & file = trim(fname), & form = 'formatted', & status = 'unknown' ) do i4 = 1, shp(4) do i3 = 1, shp(3) do i2 = 1, shp(2) do i1 = 1, shp(1) write(fid,*) "(",i1,",",i2,",",i3,",",i4,")=",var(i1,i2,i3,i4) enddo enddo enddo enddo close(fid) if ( ADM_prc_me == ADM_prc_pl ) then shp(:) = shape(var_pl) fname = trim(basename)//'.txtpl' fid = MISC_get_available_fid() open( unit = fid, & file = trim(fname), & form = 'formatted', & status = 'unknown' ) do i4 = 1, shp(4) do i3 = 1, shp(3) do i2 = 1, shp(2) do i1 = 1, shp(1) write(fid,*) "(",i1,",",i2,",",i3,",",i4,")=",var_pl(i1,i2,i3,i4) enddo enddo enddo enddo close(fid) endif end subroutine DEBUG_dampascii4D !----------------------------------------------------------------------------- !> !> Damp all data !> subroutine DEBUG_dampascii3D( & basename, & !--- [IN] var, & !--- [IN] var_pl ) !--- [IN] use mod_misc, only: & MISC_make_idstr, & MISC_get_available_fid use mod_adm, only: & ADM_prc_pl, & ADM_prc_me implicit none character(len=*), intent(in) :: basename real(8), intent(in) :: var (:,:,:) real(8), intent(in) :: var_pl(:,:,:) integer :: shp(3) character(LEN=ADM_MAXFNAME) :: fname integer :: fid integer :: i1,i2,i3 !--------------------------------------------------------------------------- shp(:) = shape(var) call MISC_make_idstr(fname,trim(basename),'txt',ADM_prc_me) fid = MISC_get_available_fid() open( unit = fid, & file = trim(fname), & form = 'formatted', & status = 'unknown' ) do i3 = 1, shp(3) do i2 = 1, shp(2) do i1 = 1, shp(1) write(fid,*) "(",i1,",",i2,",",i3,")=",var(i1,i2,i3) enddo enddo enddo close(fid) if ( ADM_prc_me == ADM_prc_pl ) then shp(:) = shape(var_pl) fname = trim(basename)//'.txtpl' fid = MISC_get_available_fid() open( unit = fid, & file = trim(fname), & form = 'formatted', & status = 'unknown' ) do i3 = 1, shp(3) do i2 = 1, shp(2) do i1 = 1, shp(1) write(fid,*) "(",i1,",",i2,",",i3,")=",var_pl(i1,i2,i3) enddo enddo enddo close(fid) endif end subroutine DEBUG_dampascii3D !----------------------------------------------------------------------------- function DEBUG_rapid( rapname ) result(id) implicit none character(len=*), intent(in) :: rapname integer :: id !--------------------------------------------------------------------------- if ( DEBUG_rapnmax >= 1 ) then do id = 1, DEBUG_rapnmax if( trim(rapname) == trim(DEBUG_rapname(id)) ) return enddo endif DEBUG_rapnmax = DEBUG_rapnmax + 1 id = DEBUG_rapnmax DEBUG_rapname(id) = trim(rapname) DEBUG_raptstr(id) = 0.D0 DEBUG_rapttot(id) = 0.D0 DEBUG_rapnstr(id) = 0 DEBUG_rapnend(id) = 0 end function DEBUG_rapid !----------------------------------------------------------------------------- subroutine DEBUG_rapstart( rapname ) implicit none character(len=*), intent(in) :: rapname real(8) :: time integer :: id !--------------------------------------------------------------------------- id = DEBUG_rapid( rapname ) time = real(MPI_WTIME(), kind=8) DEBUG_raptstr(id) = time DEBUG_rapnstr(id) = DEBUG_rapnstr(id) + 1 #ifdef _FAPP_ call START_COLLECTION( rapname ) #endif return end subroutine DEBUG_rapstart !----------------------------------------------------------------------------- subroutine DEBUG_rapend( rapname ) implicit none character(len=*), intent(in) :: rapname real(8) :: time integer :: id !--------------------------------------------------------------------------- id = DEBUG_rapid( rapname ) time = real(MPI_WTIME(), kind=8) DEBUG_rapttot(id) = DEBUG_rapttot(id) + ( time-DEBUG_raptstr(id) ) DEBUG_rapnend(id) = DEBUG_rapnend(id) + 1 #ifdef _FAPP_ call STOP_COLLECTION( rapname ) #endif return end subroutine DEBUG_rapend !----------------------------------------------------------------------------- subroutine DEBUG_rapreport use mod_adm, only: & ADM_COMM_RUN_WORLD, & ADM_prc_all, & ADM_prc_me implicit none real(8) :: sendbuf(1) real(8) :: recvbuf(ADM_prc_all) real(8) :: globalavg, globalmax, globalmin #ifdef PAPI_OPS real(8) :: globalsum, total_flops #endif integer :: ierr integer :: id !--------------------------------------------------------------------------- if ( DEBUG_rapnmax >= 1 ) then do id = 1, DEBUG_rapnmax if ( DEBUG_rapnstr(id) /= DEBUG_rapnend(id) ) then write(*,*) '*** Mismatch Report',id,DEBUG_rapname(id),DEBUG_rapnstr(id),DEBUG_rapnend(id) endif enddo write(ADM_LOG_FID,*) write(ADM_LOG_FID,*) '*** Computational Time Report' ! do id = 1, DEBUG_rapnmax ! write(ADM_LOG_FID,'(1x,A,I3.3,A,A,A,F10.3,A,I7)') & ! '*** ID=',id,' : ',DEBUG_rapname(id),' T=',DEBUG_rapttot(id),' N=',DEBUG_rapnstr(id) ! enddo do id = 1, DEBUG_rapnmax sendbuf(1) = DEBUG_rapttot(id) call MPI_Allgather( sendbuf, & 1, & MPI_DOUBLE_PRECISION, & recvbuf, & 1, & MPI_DOUBLE_PRECISION, & ADM_COMM_RUN_WORLD, & ierr ) globalavg = sum( recvbuf(:) ) / real(ADM_prc_all,kind=8) globalmax = maxval( recvbuf(:) ) globalmin = minval( recvbuf(:) ) write(ADM_LOG_FID,'(1x,A,I3.3,A,A,A,F10.3,A,F10.3,A,F10.3,A,I7)') & '*** ID=', id, & ' : ', DEBUG_rapname(id), & ' T(avg)=', globalavg, & ', T(max)=', globalmax, & ', T(min)=', globalmin, & ', N=', DEBUG_rapnstr(id) enddo else write(ADM_LOG_FID,*) write(ADM_LOG_FID,*) '*** Computational Time Report: NO item.' endif #ifdef PAPI_OPS ! [add] PAPI R.Yoshida 20121022 !write(ADM_LOG_FID,*) ' *** Type: Instructions' !write(ADM_LOG_FID,*) ' --- Real Time:',papi_real_time_i*2.0d0,' Proc. Time:',papi_proc_time_i*2.0d0 !write(ADM_LOG_FID,*) ' --- flop inst:',papi_flpins*2,' Gflins/s:',papi_mflins*2.0d0/1.0d3 !GIGA write(ADM_LOG_FID,*) write(ADM_LOG_FID,*) '********* PAPI report *********' write(ADM_LOG_FID,*) '*** Type: Operations' write(ADM_LOG_FID,*) '--- Wall clock Time [sec] (this PE):', papi_real_time_o write(ADM_LOG_FID,*) '--- Processor Time [sec] (this PE):', papi_proc_time_o write(ADM_LOG_FID,*) '--- Floating Operations [FLOP] (this PE):', papi_flpops write(ADM_LOG_FID,*) '--- FLOPS by PAPI [MFLOPS] (this PE):', papi_mflops write(ADM_LOG_FID,*) '--- FLOP / Time [MFLOPS] (this PE):', papi_flpops / papi_proc_time_o / 1024.D0**2 !GIGA write(ADM_LOG_FID,*) sendbuf(1) = real(papi_proc_time_o,kind=8) call MPI_Allgather( sendbuf, & 1, & MPI_DOUBLE_PRECISION, & recvbuf, & 1, & MPI_DOUBLE_PRECISION, & ADM_COMM_RUN_WORLD, & ierr ) globalavg = sum( recvbuf(:) ) / real(ADM_prc_all,kind=8) globalmax = maxval( recvbuf(:) ) globalmin = minval( recvbuf(:) ) call COMM_Stat_avg( real(papi_proc_time_o,kind=8), globalavg ) call COMM_Stat_max( real(papi_proc_time_o,kind=8), globalmax ) call COMM_Stat_min( real(papi_proc_time_o,kind=8), globalmin ) write(ADM_LOG_FID,'(1x,A,F10.3,A,F10.3,A,F10.3)') & '--- Processor Time [sec] (avg)=', globalavg, & ', (max)=', globalmax, & ', (min)=', globalmin sendbuf(1) = real(papi_flpops,kind=8) call MPI_Allgather( sendbuf, & 1, & MPI_DOUBLE_PRECISION, & recvbuf, & 1, & MPI_DOUBLE_PRECISION, & ADM_COMM_RUN_WORLD, & ierr ) globalsum = sum( recvbuf(:) ) globalavg = globalsum / real(ADM_prc_all,kind=8) globalmax = maxval( recvbuf(:) ) globalmin = minval( recvbuf(:) ) total_flops = globalsum / globalmax / 1024.D0**3 write(ADM_LOG_FID,'(1x,A,F10.3,A,F10.3,A,F10.3)') & '--- Floating Operations [GFLOP] (avg)=', globalavg / 1024.D0**3, & ', (max)=', globalmax / 1024.D0**3, & ', (min)=', globalmin / 1024.D0**3 write(ADM_LOG_FID,'(1x,A,F10.3)') & '--- Total Flops [GFLOPS] (all PE):',total_flops call PAPIF_shutdown #endif return end subroutine DEBUG_rapreport end module mod_debug
pyotr777/nicam-dc-mini
src/share/mod_debug.f90
FORTRAN
bsd-2-clause
15,452
/* SPDX-License-Identifier: BSD-2-Clause */ /***********************************************************************; * Copyright (c) 2015 - 2017, Intel Corporation * All rights reserved. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "tss2_tpm2_types.h" #include "tss2_mu.h" #include "sysapi_util.h" TSS2_RC Tss2_Sys_HMAC_Prepare( TSS2_SYS_CONTEXT *sysContext, TPMI_DH_OBJECT handle, const TPM2B_MAX_BUFFER *buffer, TPMI_ALG_HASH hashAlg) { _TSS2_SYS_CONTEXT_BLOB *ctx = syscontext_cast(sysContext); TSS2_RC rval; if (!ctx) return TSS2_SYS_RC_BAD_REFERENCE; if (IsAlgorithmWeak(hashAlg, 0)) return TSS2_SYS_RC_BAD_VALUE; rval = CommonPreparePrologue(ctx, TPM2_CC_HMAC); if (rval) return rval; rval = Tss2_MU_UINT32_Marshal(handle, ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData); if (rval) return rval; if (!buffer) { ctx->decryptNull = 1; rval = Tss2_MU_UINT16_Marshal(0, ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData); } else { rval = Tss2_MU_TPM2B_MAX_BUFFER_Marshal(buffer, ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData); } if (rval) return rval; rval = Tss2_MU_UINT16_Marshal(hashAlg, ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData); if (rval) return rval; ctx->decryptAllowed = 1; ctx->encryptAllowed = 1; ctx->authAllowed = 1; return CommonPrepareEpilogue(ctx); } TSS2_RC Tss2_Sys_HMAC_Complete( TSS2_SYS_CONTEXT *sysContext, TPM2B_DIGEST *outHMAC) { _TSS2_SYS_CONTEXT_BLOB *ctx = syscontext_cast(sysContext); TSS2_RC rval; if (!ctx) return TSS2_SYS_RC_BAD_REFERENCE; rval = CommonComplete(ctx); if (rval) return rval; return Tss2_MU_TPM2B_DIGEST_Unmarshal(ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData, outHMAC); } TSS2_RC Tss2_Sys_HMAC( TSS2_SYS_CONTEXT *sysContext, TPMI_DH_OBJECT handle, TSS2L_SYS_AUTH_COMMAND const *cmdAuthsArray, const TPM2B_MAX_BUFFER *buffer, TPMI_ALG_HASH hashAlg, TPM2B_DIGEST *outHMAC, TSS2L_SYS_AUTH_RESPONSE *rspAuthsArray) { _TSS2_SYS_CONTEXT_BLOB *ctx = syscontext_cast(sysContext); TSS2_RC rval; rval = Tss2_Sys_HMAC_Prepare(sysContext, handle, buffer, hashAlg); if (rval) return rval; rval = CommonOneCall(ctx, cmdAuthsArray, rspAuthsArray); if (rval) return rval; return Tss2_Sys_HMAC_Complete(sysContext, outHMAC); }
01org/TPM2.0-TSS
src/tss2-sys/api/Tss2_Sys_HMAC.c
C
bsd-2-clause
2,992
/* SPDX-License-Identifier: BSD-2-Clause */ /***********************************************************************; * Copyright (c) 2015 - 2017, Intel Corporation * All rights reserved. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "tss2_tpm2_types.h" #include "tss2_mu.h" #include "sysapi_util.h" TSS2_RC Tss2_Sys_PolicyNvWritten_Prepare( TSS2_SYS_CONTEXT *sysContext, TPMI_SH_POLICY policySession, TPMI_YES_NO writtenSet) { _TSS2_SYS_CONTEXT_BLOB *ctx = syscontext_cast(sysContext); TSS2_RC rval; if (!ctx) return TSS2_SYS_RC_BAD_REFERENCE; rval = CommonPreparePrologue(ctx, TPM2_CC_PolicyNvWritten); if (rval) return rval; rval = Tss2_MU_UINT32_Marshal(policySession, ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData); if (rval) return rval; rval = Tss2_MU_UINT8_Marshal(writtenSet, ctx->cmdBuffer, ctx->maxCmdSize, &ctx->nextData); if (rval) return rval; ctx->decryptAllowed = 0; ctx->encryptAllowed = 0; ctx->authAllowed = 1; return CommonPrepareEpilogue(ctx); } TSS2_RC Tss2_Sys_PolicyNvWritten_Complete ( TSS2_SYS_CONTEXT *sysContext) { _TSS2_SYS_CONTEXT_BLOB *ctx = syscontext_cast(sysContext); if (!ctx) return TSS2_SYS_RC_BAD_REFERENCE; return CommonComplete(ctx); } TSS2_RC Tss2_Sys_PolicyNvWritten( TSS2_SYS_CONTEXT *sysContext, TPMI_SH_POLICY policySession, TSS2L_SYS_AUTH_COMMAND const *cmdAuthsArray, TPMI_YES_NO writtenSet, TSS2L_SYS_AUTH_RESPONSE *rspAuthsArray) { _TSS2_SYS_CONTEXT_BLOB *ctx = syscontext_cast(sysContext); TSS2_RC rval; rval = Tss2_Sys_PolicyNvWritten_Prepare(sysContext, policySession, writtenSet); if (rval) return rval; rval = CommonOneCall(ctx, cmdAuthsArray, rspAuthsArray); if (rval) return rval; return Tss2_Sys_PolicyNvWritten_Complete(sysContext); }
01org/TPM2.0-TSS
src/tss2-sys/api/Tss2_Sys_PolicyNvWritten.c
C
bsd-2-clause
2,116
// This file is part of the Noise++ Editor. // Copyright (c) 2008, Urs C. Hanselmann // // The Noise++ Editor is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Noise++ Editor is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with the Noise++ Editor. If not, see <http://www.gnu.org/licenses/>. // #include "EditorTurbulenceModule.h" std::string EditorTurbulenceModule::FACTORY_NAME = "Turbulence"; EditorTurbulenceModule::EditorTurbulenceModule() : EditorModule(1) { } void EditorTurbulenceModule::fillPropertyGrid (wxPropertyGrid *pg) { pg->Append( wxPropertyCategory(wxT("Source modules")) ); appendSourceModuleProperty (pg, wxT("Source module"), mSourceModules[0]); pg->Append( wxPropertyCategory(wxT("Parameters")) ); pg->Append( wxFloatProperty(wxT("Power"), wxPG_LABEL, mModule.getPower()) ); pg->Append( wxFloatProperty(wxT("Frequency"), wxPG_LABEL, mModule.getFrequency()) ); pg->Append( wxIntProperty(wxT("Roughness"), wxPG_LABEL, mModule.getRoughness()) ); pg->Append( wxIntProperty(wxT("Seed"), wxPG_LABEL, mModule.getSeed()) ); appendQualityProperty (pg, mModule.getQuality()); } void EditorTurbulenceModule::onPropertyChange (wxPropertyGrid *pg, const wxString &name) { if (name == _("Source module")) { wxString val = pg->GetPropertyValueAsString (name); mSourceModules[0] = val; } else if (name == _("Power")) { double val = pg->GetPropertyValueAsDouble (name); mModule.setPower (val); } else if (name == _("Frequency")) { double val = pg->GetPropertyValueAsDouble (name); mModule.setFrequency (val); } else if (name == _("Roughness")) { int val = pg->GetPropertyValueAsInt (name); mModule.setRoughness (val); } else if (name == _("Seed")) { int val = pg->GetPropertyValueAsInt (name); mModule.setSeed (val); } else if (name == _("Quality")) { int val = pg->GetPropertyValueAsInt (name); mModule.setQuality (val); } } bool EditorTurbulenceModule::validate (wxPropertyGrid *pg) { bool valid = true; EditorModule *module = NULL; module = getSourceModule(0); if (module && module->validateTree(this)) { mModule.setSourceModule(0, module->getModule()); } valid = setValid (pg, "Source module", module != NULL && module->validateTree(this) && module->validate(NULL)) && valid; valid = setValid (pg, "Power", mModule.getPower() >= 0) && valid; valid = setValid (pg, "Roughness", mModule.getRoughness() >= 0) && valid; valid = setValid (pg, "Frequency", mModule.getFrequency() > 0) && valid; return valid; } void EditorTurbulenceModule::writeProperties (TiXmlElement *element) { TiXmlElement *prop; writeSourceModules (element); prop = new TiXmlElement ("Power"); prop->SetDoubleAttribute ("value", mModule.getPower()); element->LinkEndChild (prop); prop = new TiXmlElement ("Roughness"); prop->SetDoubleAttribute ("value", mModule.getRoughness()); element->LinkEndChild (prop); prop = new TiXmlElement ("Frequency"); prop->SetDoubleAttribute ("value", mModule.getFrequency()); element->LinkEndChild (prop); prop = new TiXmlElement ("Seed"); prop->SetAttribute ("value", mModule.getSeed()); element->LinkEndChild (prop); prop = new TiXmlElement ("Quality"); prop->SetAttribute ("value", mModule.getQuality()); element->LinkEndChild (prop); } bool EditorTurbulenceModule::readProperties (TiXmlElement *element) { TiXmlElement *prop; double dval; int ival; if (!readSourceModules (element)) return false; prop = element->FirstChildElement ("Power"); if (prop == NULL || prop->QueryDoubleAttribute ("value", &dval) != TIXML_SUCCESS) return false; mModule.setPower (dval); prop = element->FirstChildElement ("Frequency"); if (prop == NULL || prop->QueryDoubleAttribute ("value", &dval) != TIXML_SUCCESS) return false; mModule.setFrequency (dval); prop = element->FirstChildElement ("Roughness"); if (prop == NULL || prop->QueryIntAttribute ("value", &ival) != TIXML_SUCCESS) return false; mModule.setRoughness (ival); prop = element->FirstChildElement ("Seed"); if (prop == NULL || prop->QueryIntAttribute ("value", &ival) != TIXML_SUCCESS) return false; mModule.setSeed (ival); prop = element->FirstChildElement ("Quality"); if (prop != NULL) { if (prop->QueryIntAttribute ("value", &ival) != TIXML_SUCCESS) return false; mModule.setQuality (ival); } return true; }
ColinGilbert/noisepp-full
editor/modules/EditorTurbulenceModule.cpp
C++
bsd-2-clause
4,774
"""SCons.Tool.Packaging.zip The zip SRC packager. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/packaging/zip.py 3897 2009/01/13 06:45:54 scons" from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot def package(env, target, source, PACKAGEROOT, **kw): bld = env['BUILDERS']['Zip'] bld.set_suffix('.zip') target, source = stripinstallbuilder(target, source, env) target, source = putintopackageroot(target, source, env, PACKAGEROOT) return bld(env, target, source)
rwatson/chromium-capsicum
third_party/scons/scons-local/SCons/Tool/packaging/zip.py
Python
bsd-3-clause
1,658
// // // // #include<stdio.h> #include<string.h> int main(void){ char list[21]; int i=0, counter=0, totChar; printf("Please enter a string > "); scanf("%s", list); totChar = strlen(list); // // while(i<totChar){ if((list[i]=='i')||(list[i]=='a')||(list[i]=='e')||(list[i]=='o')||(list[i]=='u')||(list[i]=='y')){ counter = counter + 1; } i++; } printf("The number of syllables is %d.\n",counter); // return(0); }
ProgramRepair/IntroClass
syllables/d5059e2b1493f91b32bb0c2c846d8461c50356f709a91792b6b625e112675de4edac2a09fa627d58c4651c662bbcf2c477660469b9327ed9427b43c25e4e070c/002/syllables.c
C
bsd-3-clause
451
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "xwalk/test/xwalkdriver/xwalk/version.h" #include "base/strings/stringprintf.h" namespace { // This variable must be able to be found and parsed by the upload script. const int kMinimumSupportedXwalkVersion[] = {29, 0, 1545, 0}; } // namespace const int kMinimumSupportedXwalkBuildNo = kMinimumSupportedXwalkVersion[2]; std::string GetMinimumSupportedXwalkVersion() { return base::StringPrintf( "%d.%d.%d.%d", kMinimumSupportedXwalkVersion[0], kMinimumSupportedXwalkVersion[1], kMinimumSupportedXwalkVersion[2], kMinimumSupportedXwalkVersion[3]); }
hgl888/crosswalk-web-driver
xwalkdriver/xwalk/version.cc
C++
bsd-3-clause
767
/* * Copyright (c) 2015-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ package com.facebook.imagepipeline.producers; import java.io.IOException; import java.util.Map; import java.util.concurrent.Executor; import android.graphics.Rect; import android.media.ExifInterface; import android.util.Pair; import com.facebook.common.internal.ImmutableMap; import com.facebook.common.internal.VisibleForTesting; import com.facebook.common.references.CloseableReference; import com.facebook.imagepipeline.memory.PooledByteBuffer; import com.facebook.imagepipeline.memory.PooledByteBufferFactory; import com.facebook.imagepipeline.request.ImageRequest; import com.facebook.imageformat.ImageFormat; import com.facebook.imageutils.JfifUtil; /** * A producer that retrieves exif thumbnails. * * <p>At present, these thumbnails are retrieved on the java heap before being put into native * memory. */ public class LocalExifThumbnailProducer implements Producer<Pair<CloseableReference<PooledByteBuffer>, ImageTransformMetaData>> { @VisibleForTesting static final String PRODUCER_NAME = "LocalExifThumbnailProducer"; @VisibleForTesting static final String CREATED_THUMBNAIL = "createdThumbnail"; private final Executor mExecutor; private final PooledByteBufferFactory mPooledByteBufferFactory; public LocalExifThumbnailProducer( Executor executor, PooledByteBufferFactory pooledByteBufferFactory) { mExecutor = executor; mPooledByteBufferFactory = pooledByteBufferFactory; } @Override public void produceResults( final Consumer<Pair<CloseableReference<PooledByteBuffer>, ImageTransformMetaData>> consumer, final ProducerContext producerContext) { final ProducerListener listener = producerContext.getListener(); final String requestId = producerContext.getId(); final ImageRequest imageRequest = producerContext.getImageRequest(); final StatefulProducerRunnable cancellableProducerRunnable = new StatefulProducerRunnable< Pair<CloseableReference<PooledByteBuffer>, ImageTransformMetaData>>( consumer, listener, PRODUCER_NAME, requestId) { @Override protected Pair<CloseableReference<PooledByteBuffer>, ImageTransformMetaData> getResult() throws Exception { final ExifInterface exifInterface = getExifInterface(imageRequest.getSourceFile().getPath()); if (!exifInterface.hasThumbnail()) { return null; } byte[] bytes = exifInterface.getThumbnail(); PooledByteBuffer pooledByteBuffer = mPooledByteBufferFactory.newByteBuffer(bytes); ImageTransformMetaData imageTransformMetaData = getImageTransformMetaData(pooledByteBuffer, exifInterface); return Pair.create(CloseableReference.of(pooledByteBuffer), imageTransformMetaData); } @Override protected void disposeResult( Pair<CloseableReference<PooledByteBuffer>, ImageTransformMetaData> result) { if (result != null) { CloseableReference.closeSafely(result.first); } } @Override protected Map<String, String> getExtraMapOnSuccess( final Pair<CloseableReference<PooledByteBuffer>, ImageTransformMetaData> result) { return ImmutableMap.of(CREATED_THUMBNAIL, Boolean.toString(result != null)); } }; producerContext.addCallbacks( new BaseProducerContextCallbacks() { @Override public void onCancellationRequested() { cancellableProducerRunnable.cancel(); } }); mExecutor.execute(cancellableProducerRunnable); } @VisibleForTesting ExifInterface getExifInterface(String path) throws IOException { return new ExifInterface(path); } private ImageTransformMetaData getImageTransformMetaData( PooledByteBuffer imageRef, ExifInterface exifInterface) { ImageTransformMetaData.Builder builder = ImageTransformMetaData.newBuilder() .setImageFormat(ImageFormat.JPEG); builder.setRotationAngle(getRotationAngle(exifInterface)); Rect dimensions = JfifUtil.getDimensions(imageRef.getStream()); if (dimensions != null) { builder.setWidth(dimensions.width()); builder.setHeight(dimensions.height()); } return builder.build(); } // Gets the correction angle based on the image's orientation private int getRotationAngle(final ExifInterface exifInterface) { return JfifUtil.getAutoRotateAngleFromOrientation( Integer.parseInt(exifInterface.getAttribute(ExifInterface.TAG_ORIENTATION))); } }
eity0323/fresco
imagepipeline/src/main/java/com/facebook/imagepipeline/producers/LocalExifThumbnailProducer.java
Java
bsd-3-clause
4,967
require 'test/unit' require 'mocha/test_unit' require 'webmock/test_unit' require_relative '../lib/netki/netki' class TestCreatePartner < Test::Unit::TestCase def setup @resp_data = { "success" => true, "partner" => { "id" => 'new_partner_id', "name" => 'Test Partner' } } end def test_go_right # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/admin/partner/Test%20Partner', 'POST').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.create_new_partner('Test Partner') # Asserts assert_equal('new_partner_id', ret_val) # Unstub Netki.unstub(:process_request) end end class TestGetPartners < Test::Unit::TestCase def setup @resp_data = { "success" => true, "partners" => [{ "id" => 'new_partner_id', "name" => 'Test Partner' }] } end def test_go_right # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/admin/partner', 'GET').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.get_partners # Asserts assert_equal([{"id" => 'new_partner_id', "name" => 'Test Partner'}], ret_val) # Unstub Netki.unstub(:process_request) end end class TestDeletePartner < Test::Unit::TestCase def setup @resp_data = { "success" => false, "message" => 'Error Message' } end def test_go_right # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/admin/partner/Test%20Partner', 'DELETE').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') @obj.delete_partner('Test Partner') # Asserts assert_true(true) # Unstub Netki.unstub(:process_request) end end class TestCreateDomain < Test::Unit::TestCase def setup @resp_data = { "success" => true } end def test_go_right_no_partner_id # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/partner/domain/domain.com', 'POST', '{}').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.create_new_domain('domain.com') # Asserts assert_true(ret_val) # Unstub Netki.unstub(:process_request) end def test_go_right_with_partner_id # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/partner/domain/domain.com', 'POST', '{"partner_id":"sub_partner_id"}').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.create_new_domain('domain.com', 'sub_partner_id') # Asserts assert_true(ret_val) # Unstub Netki.unstub(:process_request) end end class TestGetDomains < Test::Unit::TestCase def setup @resp_data = { "success" => true, "domains" => [{ "domain_name" => 'domain.com' }] } end def test_go_right # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/api/domain', 'GET').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.get_domains # Asserts assert_equal([{"domain_name" => 'domain.com'}], ret_val) # Unstub Netki.unstub(:process_request) end end class TestGetDomainStatus < Test::Unit::TestCase def setup @resp_data = { "success" => true, "domains" => [{ "domain_name" => 'domain.com', "status" => 'OK' }] } end def test_go_right_empty_domain_arg # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/partner/domain', 'GET').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.get_domain_status # Asserts assert_equal([{"domain_name" => 'domain.com', "status" => "OK"}], ret_val) # Unstub Netki.unstub(:process_request) end def test_go_right_with_domain # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/partner/domain/domain.com', 'GET').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.get_domain_status('domain.com') # Asserts assert_equal([{"domain_name" => 'domain.com', "status" => "OK"}], ret_val) # Unstub Netki.unstub(:process_request) end end class TestGetDomainDnssec < Test::Unit::TestCase def setup @resp_data = { "success" => true } end def test_go_right # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/partner/domain/dnssec/domain.com', 'GET').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.get_domain_dnssec('domain.com') # Asserts assert_equal({"success" => true}, ret_val) # Unstub Netki.unstub(:process_request) end end class TestDeleteDomain < Test::Unit::TestCase def setup @resp_data = { "success" => true } end def test_go_right # Setup Data Netki.expects(:process_request).with('api_key', 'partner_id', 'api_url/v1/partner/domain/domain.com', 'DELETE').returns(@resp_data) # Create Netki Object and Call Tested Method @obj = Netki::NetkiPartner.new('partner_id', 'api_key', 'api_url') ret_val = @obj.delete_domain('domain.com') # Asserts assert_true(ret_val) # Unstub Netki.unstub(:process_request) end end
netkicorp/ruby-partner-client
test/test_netki_partner.rb
Ruby
bsd-3-clause
6,274
var MarkerDialog = Backbone.View.extend({ className: "marker-dialog", events: { "submit" : "submit", "click .save-button" : "submit", "click .close-button" : "close" }, initialize: function(options) { this.markers = options.markers; this.type = options.type; this.event = options.event; }, render: function() { this.$el.append($("#marker-dialog-template").html()); $(document.body).append(this.$el); this.modal = this.$el.find(".modal"); this.modal.modal("show"); return this; }, close: function() { this.modal.modal("hide"); }, submit : function() { this.markers.create({ type: this.type, latitude: this.event.latLng.lat(), longitude: this.event.latLng.lng(), title: this.$el.find("#title").val(), description: this.$el.find("#description").val() }, {wait: true}); this.close(); } });
eladkarakuli/anyway
static/js/markerdialog.js
JavaScript
bsd-3-clause
844
<?php namespace HtProfileImage\Service; use ZfcUser\Entity\UserInterface; interface ProfileImageServiceInterface { /** * Stores user image if valid * * @param UserInterface $user * @param array $files * @return bool */ public function storeImage(UserInterface $user, array $files); /** * Gets user image * * @param UserInterface $user * @param string $filterAlias Filter Alias * @return \Imagine\Image\ImageInterface */ public function getUserImage(UserInterface $user, $filterAlias = null); /** * Deletes if user image exists * * @param UserInterface $user * @return void */ public function deleteUserImage(UserInterface $user); }
thiagormoreira/brikbrak
vendor/hrevert/ht-profile-image/src/Service/ProfileImageServiceInterface.php
PHP
bsd-3-clause
797