text
stringlengths
4
5.48M
meta
stringlengths
14
6.54k
#!/bin/sh # This collection of scripts will take settings from /etc/config/meshwizard, /etc/config/freifunk # and /etc/config/profile_<community> and setup the router to participate in wireless mesh networks # Copyright 2011 Manuel Munz <freifunk at somakoma dot de> # Licensed under the Apache License, Version 2.0 (the "License") # You may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 . /lib/functions.sh echo " /* Meshwizard 0.1.0 */ " # config export dir="/usr/bin/meshwizard" . $dir/functions.sh [ -f /proc/net/ipv6_route ] && export has_ipv6=1 # Check which packages we have installed export has_luci=FALSE opkg list_installed |grep luci-mod-admin > /dev/null && export has_luci=TRUE export has_luci_splash=FALSE opkg list_installed |grep luci-app-splash > /dev/null && export has_luci_splash=TRUE # Check whether we want to cleanup/restore uci config before setting new options cleanup=$(uci -q get meshwizard.general.cleanup) [ "$cleanup" == 1 ] && $dir/helpers/restore_default_config.sh # Rename wifi interfaces $dir/helpers/rename-wifi.sh # Get community community=$(uci -q get meshwizard.community.name || uci -q get freifunk.community.name) [ -z "$community" ] && echo "Error: Community is not set in /etc/config/freifunk, aborting now." && exit 1 export community="$community" echo $community # Get a list of networks we need to setup networks=$(uci show meshwizard.netconfig | grep -v "netconfig=" | sed -e 's/meshwizard.netconfig\.\(.*\)\_.*/\1/' |sort|uniq) export networks [ -z "$networks" ] && echo "Error: No networks to setup could be found in /etc/config/meshwizard, aborting now." && exit 1 # Read default values (first from /etc/config/freifunk, then from /etc/config/profile_$community # then /etc/config/meshwizard # last will overwrite first $dir/helpers/read_defaults.sh $community > /tmp/meshwizard.tmp while read line; do export "${line//\"/}" done < /tmp/meshwizard.tmp # Do config $dir/helpers/initial_config.sh $dir/helpers/setup_dnsmasq.sh $dir/helpers/setup_system.sh $dir/helpers/setup_olsrd.sh $dir/helpers/setup_firewall.sh $dir/helpers/setup_ssh.sh $dir/helpers/setup_uhttpd.sh $dir/helpers/setup_widgets.sh if [ "$wan_proto" == "static" ] && [ -n "$wan_ip4addr" ] && [ -n "$wan_netmask" ]; then $dir/helpers/setup_wan_static.sh fi if [ "$wan_proto" == "dhcp" ]; then $dir/helpers/setup_wan_dhcp.sh fi if [ "$lan_proto" == "static" ] && [ -n "$lan_ip4addr" ] && [ -n "$lan_netmask" ]; then $dir/helpers/setup_lan_static.sh fi if [ "$ipv6_enabled" == 1 ] && [ "$has_ipv6" = 1 ]; then $dir/helpers/setup_lan_ipv6.sh # Setup auto-ipv6 if [ -n "$(echo "$ipv6_config" |grep auto-ipv6)" ]; then $dir/helpers/setup_auto-ipv6.sh fi fi # Setup policyrouting if internet sharing is disabled and wan is not used for olsrd # Always disable it first to make sure its disabled when the user decied to share his internet uci set freifunk-policyrouting.pr.enable=0 if [ ! "$general_sharenet" == 1 ] && [ ! "$(uci -q get meshwizard.netconfig.wan_proto)" == "olsr" ]; then $dir/helpers/setup_policyrouting.sh fi # Configure found networks for net in $networks; do # radioX devices need to be renamed netrenamed="${net/radio/wireless}" export netrenamed if [ ! "$net" == "wan" ] && [ ! "$net" == "lan" ]; then $dir/helpers/setup_wifi.sh $net # check if this net supports vap /sbin/wifi # wifi needs to be up for the check export supports_vap="0" type="$(uci -q get wireless.$net.type)" [ -n "$type" ] && $dir/helpers/supports_vap.sh $net $type && export supports_vap=1 if [ "$supports_vap" = 1 ]; then $dir/helpers/setup_wifi_vap.sh $net fi fi $dir/helpers/setup_network.sh $net $dir/helpers/setup_olsrd_interface.sh $net net_dhcp=$(uci -q get meshwizard.netconfig.${net}_dhcp) if [ "$net_dhcp" == 1 ]; then $dir/helpers/setup_dhcp.sh $net fi $dir/helpers/setup_splash.sh $net $dir/helpers/setup_firewall_interface.sh $net if [ -n "$(echo "$ipv6_config" |grep auto-ipv6)" ]; then $dir/helpers/setup_auto-ipv6-interface.sh $net fi done ##### postinstall script [ -f /etc/rc.local.meshkitpostinstall ] && /etc/rc.local.meshkitpostinstall ##### Reboot the router (because simply restarting services gave errors) echo "+ The wizard has finished and the router will reboot now." reboot
{'content_hash': '9bbac7db59d459d160096f636d8233da', 'timestamp': '', 'source': 'github', 'line_count': 137, 'max_line_length': 125, 'avg_line_length': 31.94890510948905, 'alnum_prop': 0.7018505825908157, 'repo_name': 'phi-psi/luci', 'id': 'f7ee7cf9debe374d02d0394931b10389d4fe10f3', 'size': '4377', 'binary': False, 'copies': '12', 'ref': 'refs/heads/master', 'path': 'contrib/package/meshwizard/files/usr/bin/meshwizard/wizard.sh', 'mode': '33261', 'license': 'apache-2.0', 'language': [{'name': 'Awk', 'bytes': '526'}, {'name': 'C', 'bytes': '1265490'}, {'name': 'C#', 'bytes': '42820'}, {'name': 'C++', 'bytes': '36558'}, {'name': 'CSS', 'bytes': '58315'}, {'name': 'Java', 'bytes': '49574'}, {'name': 'JavaScript', 'bytes': '53372'}, {'name': 'Lua', 'bytes': '1648190'}, {'name': 'Perl', 'bytes': '48595'}, {'name': 'Shell', 'bytes': '143241'}, {'name': 'Visual Basic', 'bytes': '33030'}]}
package net.bytebuddy.instrumentation.method.bytecode.bind.annotation; import net.bytebuddy.instrumentation.attribute.annotation.AnnotationDescription; import net.bytebuddy.instrumentation.method.MethodDescription; import net.bytebuddy.instrumentation.method.bytecode.bind.MethodDelegationBinder; import java.lang.annotation.*; /** * Defines a binding priority for a target method. If two target methods can be bound to a source method, * the one with the higher priority will be selected. */ @Documented @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.METHOD) public @interface BindingPriority { /** * The default priority for methods not carrying the * {@link net.bytebuddy.instrumentation.method.bytecode.bind.annotation.BindingPriority} * annotation. */ double DEFAULT = 1d; /** * The binding priority for the annotated method. A method of higher priority will be preferred over a method * of lower priority. * * @return The priority for the annotated method. */ double value(); /** * An ambiguity resolver that considers the priority of a method as defined by the * {@link net.bytebuddy.instrumentation.method.bytecode.bind.annotation.BindingPriority} * annotation. */ enum Resolver implements MethodDelegationBinder.AmbiguityResolver { /** * The singleton instance. */ INSTANCE; /** * Resolves the explicitly stated binding priority of a method or returns the default value if no such * explicit information can be found. * * @param bindingPriority The annotation of the method or {@code null} if no such annotation was found. * @return The factual priority of the method under investigation. */ private static double resolve(AnnotationDescription.Loadable<BindingPriority> bindingPriority) { return bindingPriority == null ? DEFAULT : bindingPriority.loadSilent().value(); } @Override public Resolution resolve(MethodDescription source, MethodDelegationBinder.MethodBinding left, MethodDelegationBinder.MethodBinding right) { double leftPriority = resolve(left.getTarget().getDeclaredAnnotations().ofType(BindingPriority.class)); double rightPriority = resolve(right.getTarget().getDeclaredAnnotations().ofType(BindingPriority.class)); if (leftPriority == rightPriority) { return Resolution.AMBIGUOUS; } else if (leftPriority < rightPriority) { return Resolution.RIGHT; } else { return Resolution.LEFT; } } @Override public String toString() { return "BindingPriority.Resolver." + name(); } } }
{'content_hash': '738848238806dda4ea9ecff7ef923b09', 'timestamp': '', 'source': 'github', 'line_count': 76, 'max_line_length': 117, 'avg_line_length': 38.0, 'alnum_prop': 0.6665512465373962, 'repo_name': 'RobAustin/byte-buddy', 'id': 'a1ba3eed5a314a1aa27ce8e082a13f033106d7bb', 'size': '2888', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'byte-buddy-dep/src/main/java/net/bytebuddy/instrumentation/method/bytecode/bind/annotation/BindingPriority.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Java', 'bytes': '4219030'}]}
'use strict'; goog.provide('Blockly.Msg.zh-hans'); goog.require('Blockly.Msg'); Blockly.Msg.UNION_DEFINE_NAME = "myUnion"; // untranslated Blockly.Msg.PROCEDURES_DEFRETURN_TITLE = Blockly.Msg.PROCEDURES_DEFNORETURN_TITLE; Blockly.Msg.LISTS_GET_SUBLIST_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST; Blockly.Msg.LISTS_SET_INDEX_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST; Blockly.Msg.PROCEDURES_DEFRETURN_PROCEDURE = Blockly.Msg.PROCEDURES_DEFNORETURN_PROCEDURE; Blockly.Msg.VARIABLES_SET_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME; Blockly.Msg.LISTS_CREATE_WITH_ITEM_TITLE = Blockly.Msg.VARIABLES_DEFAULT_NAME; Blockly.Msg.MATH_CHANGE_TITLE_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME; Blockly.Msg.VARIABLES_GET_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME; Blockly.Msg.PROCEDURES_DEFRETURN_DO = Blockly.Msg.PROCEDURES_DEFNORETURN_DO; Blockly.Msg.LISTS_GET_INDEX_HELPURL = Blockly.Msg.LISTS_INDEX_OF_HELPURL; Blockly.Msg.TEXT_CREATE_JOIN_ITEM_TITLE_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME; Blockly.Msg.CONTROLS_IF_MSG_THEN = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO; Blockly.Msg.LISTS_INDEX_OF_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST; Blockly.Msg.PROCEDURES_CALLRETURN_CALL = Blockly.Msg.PROCEDURES_CALLNORETURN_CALL; Blockly.Msg.LISTS_GET_INDEX_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST; Blockly.Msg.CONTROLS_FOR_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO; Blockly.Msg.CONTROLS_FOREACH_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO; Blockly.Msg.CONTROLS_IF_IF_TITLE_IF = Blockly.Msg.CONTROLS_IF_MSG_IF; Blockly.Msg.CONTROLS_WHILEUNTIL_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO; Blockly.Msg.CONTROLS_IF_ELSEIF_TITLE_ELSEIF = Blockly.Msg.CONTROLS_IF_MSG_ELSEIF; Blockly.Msg.TEXT_APPEND_VARIABLE = Blockly.Msg.VARIABLES_DEFAULT_NAME; Blockly.Msg.CONTROLS_IF_ELSE_TITLE_ELSE = Blockly.Msg.CONTROLS_IF_MSG_ELSE;
{'content_hash': '655063eaf7139724879691a289238378', 'timestamp': '', 'source': 'github', 'line_count': 29, 'max_line_length': 90, 'avg_line_length': 62.37931034482759, 'alnum_prop': 0.8009950248756219, 'repo_name': 'cra16/cake-core', 'id': 'a62d8f6124ca542968895bb3f0050583c6f5fb32', 'size': '1868', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'msg/js/zh-hans.js', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '14737'}, {'name': 'HTML', 'bytes': '120656'}, {'name': 'JavaScript', 'bytes': '2463480'}, {'name': 'POV-Ray SDL', 'bytes': '4775'}, {'name': 'Python', 'bytes': '64641'}]}
/* @flow */ import Container from './Container'; import decorators from './decorators'; const container = new Container(); export const { service, inject } = decorators(container); export default container;
{'content_hash': 'c781a96c39f4f729c476e0f27fd56880', 'timestamp': '', 'source': 'github', 'line_count': 9, 'max_line_length': 57, 'avg_line_length': 23.333333333333332, 'alnum_prop': 0.7285714285714285, 'repo_name': 'zackjs/di', 'id': 'ad042bda69183860766aab4d904e1a5e8620dbfd', 'size': '210', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'src/index.js', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'JavaScript', 'bytes': '8442'}]}
package org.elasticsearch.index.snapshots.blobstore; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotException; import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.blobstore.BlobStoreFormat; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat; import org.elasticsearch.repositories.blobstore.LegacyBlobStoreFormat; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.testBlobPrefix; /** * Blob store based implementation of IndexShardRepository */ public class BlobStoreIndexShardRepository extends AbstractComponent implements IndexShardRepository { private static final int BUFFER_SIZE = 4096; private BlobStore blobStore; private BlobPath basePath; private final String repositoryName; private ByteSizeValue chunkSize; private final IndicesService indicesService; private final ClusterService clusterService; private RateLimiter snapshotRateLimiter; private RateLimiter restoreRateLimiter; private RateLimiterListener rateLimiterListener; private RateLimitingInputStream.Listener snapshotThrottleListener; private RateLimitingInputStream.Listener restoreThrottleListener; private boolean compress; private final ParseFieldMatcher parseFieldMatcher; protected static final String LEGACY_SNAPSHOT_PREFIX = "snapshot-"; protected static final String LEGACY_SNAPSHOT_NAME_FORMAT = LEGACY_SNAPSHOT_PREFIX + "%s"; protected static final String SNAPSHOT_PREFIX = "snap-"; protected static final String SNAPSHOT_NAME_FORMAT = SNAPSHOT_PREFIX + "%s.dat"; protected static final String SNAPSHOT_CODEC = "snapshot"; protected static final String SNAPSHOT_INDEX_PREFIX = "index-"; protected static final String SNAPSHOT_INDEX_NAME_FORMAT = SNAPSHOT_INDEX_PREFIX + "%s"; protected static final String SNAPSHOT_INDEX_CODEC = "snapshots"; protected static final String DATA_BLOB_PREFIX = "__"; private ChecksumBlobStoreFormat<BlobStoreIndexShardSnapshot> indexShardSnapshotFormat; private LegacyBlobStoreFormat<BlobStoreIndexShardSnapshot> indexShardSnapshotLegacyFormat; private ChecksumBlobStoreFormat<BlobStoreIndexShardSnapshots> indexShardSnapshotsFormat; @Inject public BlobStoreIndexShardRepository(Settings settings, RepositoryName repositoryName, IndicesService indicesService, ClusterService clusterService) { super(settings); this.parseFieldMatcher = new ParseFieldMatcher(settings); this.repositoryName = repositoryName.name(); this.indicesService = indicesService; this.clusterService = clusterService; } /** * Called by {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} on repository startup * * @param blobStore blob store * @param basePath base path to blob store * @param chunkSize chunk size */ public void initialize(BlobStore blobStore, BlobPath basePath, ByteSizeValue chunkSize, RateLimiter snapshotRateLimiter, RateLimiter restoreRateLimiter, final RateLimiterListener rateLimiterListener, boolean compress) { this.blobStore = blobStore; this.basePath = basePath; this.chunkSize = chunkSize; this.snapshotRateLimiter = snapshotRateLimiter; this.restoreRateLimiter = restoreRateLimiter; this.rateLimiterListener = rateLimiterListener; this.snapshotThrottleListener = nanos -> rateLimiterListener.onSnapshotPause(nanos); this.restoreThrottleListener = nanos -> rateLimiterListener.onRestorePause(nanos); this.compress = compress; indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress()); indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher); indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots.PROTO, parseFieldMatcher, isCompress()); } /** * {@inheritDoc} */ @Override public void snapshot(SnapshotId snapshotId, ShardId shardId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { SnapshotContext snapshotContext = new SnapshotContext(snapshotId, shardId, snapshotStatus); snapshotStatus.startTime(System.currentTimeMillis()); try { snapshotContext.snapshot(snapshotIndexCommit); snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime()); snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE); } catch (Throwable e) { snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime()); snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); snapshotStatus.failure(ExceptionsHelper.detailedMessage(e)); if (e instanceof IndexShardSnapshotFailedException) { throw (IndexShardSnapshotFailedException) e; } else { throw new IndexShardSnapshotFailedException(shardId, e); } } } /** * {@inheritDoc} */ @Override public void restore(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { final RestoreContext snapshotContext = new RestoreContext(snapshotId, version, shardId, snapshotShardId, recoveryState); try { snapshotContext.restore(); } catch (Throwable e) { throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId.getSnapshot() + "]", e); } } /** * {@inheritDoc} */ @Override public IndexShardSnapshotStatus snapshotStatus(SnapshotId snapshotId, Version version, ShardId shardId) { Context context = new Context(snapshotId, version, shardId); BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); IndexShardSnapshotStatus status = new IndexShardSnapshotStatus(); status.updateStage(IndexShardSnapshotStatus.Stage.DONE); status.startTime(snapshot.startTime()); status.files(snapshot.numberOfFiles(), snapshot.totalSize()); // The snapshot is done which means the number of processed files is the same as total status.processedFiles(snapshot.numberOfFiles(), snapshot.totalSize()); status.time(snapshot.time()); return status; } @Override public void verify(String seed) { BlobContainer testBlobContainer = blobStore.blobContainer(basePath.add(testBlobPrefix(seed))); DiscoveryNode localNode = clusterService.localNode(); if (testBlobContainer.blobExists("master.dat")) { try { testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", new BytesArray(seed)); } catch (IOException exp) { throw new RepositoryVerificationException(repositoryName, "store location [" + blobStore + "] is not accessible on the node [" + localNode + "]", exp); } } else { throw new RepositoryVerificationException(repositoryName, "a file written by master to the store [" + blobStore + "] cannot be accessed on the node [" + localNode + "]. " + "This might indicate that the store [" + blobStore + "] is not shared between this node and the master node or " + "that permissions on the store don't allow reading files written by the master node"); } } /** * Delete shard snapshot * * @param snapshotId snapshot id * @param shardId shard id */ public void delete(SnapshotId snapshotId, Version version, ShardId shardId) { Context context = new Context(snapshotId, version, shardId, shardId); context.delete(); } @Override public String toString() { return "BlobStoreIndexShardRepository[" + "[" + repositoryName + "], [" + blobStore + ']' + ']'; } /** * Returns true if metadata files should be compressed * * @return true if compression is needed */ protected boolean isCompress() { return compress; } BlobStoreFormat<BlobStoreIndexShardSnapshot> indexShardSnapshotFormat(Version version) { if (BlobStoreRepository.legacyMetaData(version)) { return indexShardSnapshotLegacyFormat; } else { return indexShardSnapshotFormat; } } /** * Context for snapshot/restore operations */ private class Context { protected final SnapshotId snapshotId; protected final ShardId shardId; protected final BlobContainer blobContainer; protected final Version version; public Context(SnapshotId snapshotId, Version version, ShardId shardId) { this(snapshotId, version, shardId, shardId); } public Context(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId) { this.snapshotId = snapshotId; this.version = version; this.shardId = shardId; blobContainer = blobStore.blobContainer(basePath.add("indices").add(snapshotShardId.getIndex()).add(Integer.toString(snapshotShardId.getId()))); } /** * Delete shard snapshot */ public void delete() { final Map<String, BlobMetaData> blobs; try { blobs = blobContainer.listBlobs(); } catch (IOException e) { throw new IndexShardSnapshotException(shardId, "Failed to list content of gateway", e); } Tuple<BlobStoreIndexShardSnapshots, Integer> tuple = buildBlobStoreIndexShardSnapshots(blobs); BlobStoreIndexShardSnapshots snapshots = tuple.v1(); int fileListGeneration = tuple.v2(); try { indexShardSnapshotFormat(version).delete(blobContainer, snapshotId.getSnapshot()); } catch (IOException e) { logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId); } // Build a list of snapshots that should be preserved List<SnapshotFiles> newSnapshotsList = new ArrayList<>(); for (SnapshotFiles point : snapshots) { if (!point.snapshot().equals(snapshotId.getSnapshot())) { newSnapshotsList.add(point); } } // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index finalize(newSnapshotsList, fileListGeneration + 1, blobs); } /** * Loads information about shard snapshot */ public BlobStoreIndexShardSnapshot loadSnapshot() { try { return indexShardSnapshotFormat(version).read(blobContainer, snapshotId.getSnapshot()); } catch (IOException ex) { throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex); } } /** * Removes all unreferenced files from the repository and writes new index file * * We need to be really careful in handling index files in case of failures to make sure we have index file that * points to files that were deleted. * * * @param snapshots list of active snapshots in the container * @param fileListGeneration the generation number of the snapshot index file * @param blobs list of blobs in the container */ protected void finalize(List<SnapshotFiles> snapshots, int fileListGeneration, Map<String, BlobMetaData> blobs) { BlobStoreIndexShardSnapshots newSnapshots = new BlobStoreIndexShardSnapshots(snapshots); List<String> blobsToDelete = new ArrayList<>(); // delete old index files first for (String blobName : blobs.keySet()) { // delete old file lists if (indexShardSnapshotsFormat.isTempBlobName(blobName) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) { blobsToDelete.add(blobName); } } try { blobContainer.deleteBlobs(blobsToDelete); } catch (IOException e) { // We cannot delete index file - this is fatal, we cannot continue, otherwise we might end up // with references to non-existing files throw new IndexShardSnapshotFailedException(shardId, "error deleting index files during cleanup", e); } blobsToDelete = new ArrayList<>(); // now go over all the blobs, and if they don't exists in a snapshot, delete them for (String blobName : blobs.keySet()) { // delete unused files if (blobName.startsWith(DATA_BLOB_PREFIX)) { if (newSnapshots.findNameFile(FileInfo.canonicalName(blobName)) == null) { blobsToDelete.add(blobName); } } } try { blobContainer.deleteBlobs(blobsToDelete); } catch (IOException e) { logger.debug("[{}] [{}] error deleting some of the blobs [{}] during cleanup", e, snapshotId, shardId, blobsToDelete); } // If we deleted all snapshots - we don't need to create the index file if (snapshots.size() > 0) { try { indexShardSnapshotsFormat.writeAtomic(newSnapshots, blobContainer, Integer.toString(fileListGeneration)); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to write file list", e); } } } /** * Generates blob name * * @param generation the blob number * @return the blob name */ protected String fileNameFromGeneration(long generation) { return DATA_BLOB_PREFIX + Long.toString(generation, Character.MAX_RADIX); } /** * Finds the next available blob number * * @param blobs list of blobs in the repository * @return next available blob number */ protected long findLatestFileNameGeneration(Map<String, BlobMetaData> blobs) { long generation = -1; for (String name : blobs.keySet()) { if (!name.startsWith(DATA_BLOB_PREFIX)) { continue; } name = FileInfo.canonicalName(name); try { long currentGen = Long.parseLong(name.substring(DATA_BLOB_PREFIX.length()), Character.MAX_RADIX); if (currentGen > generation) { generation = currentGen; } } catch (NumberFormatException e) { logger.warn("file [{}] does not conform to the '{}' schema", name, DATA_BLOB_PREFIX); } } return generation; } /** * Loads all available snapshots in the repository * * @param blobs list of blobs in repository * @return tuple of BlobStoreIndexShardSnapshots and the last snapshot index generation */ protected Tuple<BlobStoreIndexShardSnapshots, Integer> buildBlobStoreIndexShardSnapshots(Map<String, BlobMetaData> blobs) { int latest = -1; for (String name : blobs.keySet()) { if (name.startsWith(SNAPSHOT_INDEX_PREFIX)) { try { int gen = Integer.parseInt(name.substring(SNAPSHOT_INDEX_PREFIX.length())); if (gen > latest) { latest = gen; } } catch (NumberFormatException ex) { logger.warn("failed to parse index file name [{}]", name); } } } if (latest >= 0) { try { return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest); } catch (IOException e) { logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest); } } // We couldn't load the index file - falling back to loading individual snapshots List<SnapshotFiles> snapshots = new ArrayList<>(); for (String name : blobs.keySet()) { try { BlobStoreIndexShardSnapshot snapshot = null; if (name.startsWith(SNAPSHOT_PREFIX)) { snapshot = indexShardSnapshotFormat.readBlob(blobContainer, name); } else if (name.startsWith(LEGACY_SNAPSHOT_PREFIX)) { snapshot = indexShardSnapshotLegacyFormat.readBlob(blobContainer, name); } if (snapshot != null) { snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); } } catch (IOException e) { logger.warn("failed to read commit point [{}]", e, name); } } return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1); } } /** * Context for snapshot operations */ private class SnapshotContext extends Context { private final Store store; private final IndexShardSnapshotStatus snapshotStatus; /** * Constructs new context * * @param snapshotId snapshot id * @param shardId shard to be snapshotted * @param snapshotStatus snapshot status to report progress */ public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { super(snapshotId, Version.CURRENT, shardId); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); store = indexService.getShardOrNull(shardId.id()).store(); this.snapshotStatus = snapshotStatus; } /** * Create snapshot from index commit point * * @param snapshotIndexCommit snapshot commit point */ public void snapshot(IndexCommit snapshotIndexCommit) { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, repositoryName); store.incRef(); try { final Map<String, BlobMetaData> blobs; try { blobs = blobContainer.listBlobs(); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); } long generation = findLatestFileNameGeneration(blobs); Tuple<BlobStoreIndexShardSnapshots, Integer> tuple = buildBlobStoreIndexShardSnapshots(blobs); BlobStoreIndexShardSnapshots snapshots = tuple.v1(); int fileListGeneration = tuple.v2(); final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = new ArrayList<>(); int indexNumberOfFiles = 0; long indexTotalFilesSize = 0; ArrayList<FileInfo> filesToSnapshot = new ArrayList<>(); final Store.MetadataSnapshot metadata; // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should final Collection<String> fileNames; try { metadata = store.getMetadata(snapshotIndexCommit); fileNames = snapshotIndexCommit.getFileNames(); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e); } for (String fileName : fileNames) { if (snapshotStatus.aborted()) { logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); final StoreFileMetaData md = metadata.get(fileName); FileInfo existingFileInfo = null; List<FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (FileInfo fileInfo : filesInfo) { try { // in 1.3.3 we added additional hashes for .si / segments_N files // to ensure we don't double the space in the repo since old snapshots // don't have this hash we try to read that hash from the blob store // in a bwc compatible way. maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); } catch (Throwable e) { logger.warn("{} Can't calculate hash from blob for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata()); } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot existingFileInfo = fileInfo; break; } } } if (existingFileInfo == null) { indexNumberOfFiles++; indexTotalFilesSize += md.length(); // create a new FileInfo BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize); indexCommitPointFiles.add(snapshotFileInfo); filesToSnapshot.add(snapshotFileInfo); } else { indexCommitPointFiles.add(existingFileInfo); } } snapshotStatus.files(indexNumberOfFiles, indexTotalFilesSize); if (snapshotStatus.aborted()) { logger.debug("[{}] [{}] Aborted during initialization", shardId, snapshotId); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED); for (FileInfo snapshotFileInfo : filesToSnapshot) { try { snapshotFile(snapshotFileInfo); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); } } snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration()); // now create and write the commit point snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE); BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getSnapshot(), snapshotIndexCommit.getGeneration(), indexCommitPointFiles, snapshotStatus.startTime(), // snapshotStatus.startTime() is assigned on the same machine, so it's safe to use with VLong System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize); //TODO: The time stored in snapshot doesn't include cleanup time. logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); try { indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getSnapshot()); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); } // delete all files that are not referenced by any commit point // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones List<SnapshotFiles> newSnapshotsList = new ArrayList<>(); newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); for (SnapshotFiles point : snapshots) { newSnapshotsList.add(point); } // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index finalize(newSnapshotsList, fileListGeneration + 1, blobs); snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE); } finally { store.decRef(); } } /** * Snapshot individual file * <p> * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are * added to the {@code failures} list * * @param fileInfo file to be snapshotted */ private void snapshotFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo) throws IOException { final String file = fileInfo.physicalName(); try (IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata())) { for (int i = 0; i < fileInfo.numberOfParts(); i++) { final long partBytes = fileInfo.partBytes(i); final InputStreamIndexInput inputStreamIndexInput = new InputStreamIndexInput(indexInput, partBytes); InputStream inputStream = snapshotRateLimiter == null ? inputStreamIndexInput : new RateLimitingInputStream(inputStreamIndexInput, snapshotRateLimiter, snapshotThrottleListener); inputStream = new AbortableInputStream(inputStream, fileInfo.physicalName()); blobContainer.writeBlob(fileInfo.partName(i), inputStream, partBytes); } Store.verify(indexInput); snapshotStatus.addProcessedFile(fileInfo.length()); } catch (Throwable t) { failStoreIfCorrupted(t); snapshotStatus.addProcessedFile(0); throw t; } } private void failStoreIfCorrupted(Throwable t) { if (t instanceof CorruptIndexException || t instanceof IndexFormatTooOldException || t instanceof IndexFormatTooNewException) { try { store.markStoreCorrupted((IOException) t); } catch (IOException e) { logger.warn("store cannot be marked as corrupted", e); } } } /** * Checks if snapshot file already exists in the list of blobs * * @param fileInfo file to check * @param blobs list of blobs * @return true if file exists in the list of blobs */ private boolean snapshotFileExistsInBlobs(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Map<String, BlobMetaData> blobs) { BlobMetaData blobMetaData = blobs.get(fileInfo.name()); if (blobMetaData != null) { return blobMetaData.length() == fileInfo.length(); } else if (blobs.containsKey(fileInfo.partName(0))) { // multi part file sum up the size and check int part = 0; long totalSize = 0; while (true) { blobMetaData = blobs.get(fileInfo.partName(part++)); if (blobMetaData == null) { break; } totalSize += blobMetaData.length(); } return totalSize == fileInfo.length(); } // no file, not exact and not multipart return false; } private class AbortableInputStream extends FilterInputStream { private final String fileName; public AbortableInputStream(InputStream delegate, String fileName) { super(delegate); this.fileName = fileName; } @Override public int read() throws IOException { checkAborted(); return in.read(); } @Override public int read(byte[] b, int off, int len) throws IOException { checkAborted(); return in.read(b, off, len); } private void checkAborted() { if (snapshotStatus.aborted()) { logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } } } } /** * This is a BWC layer to ensure we update the snapshots metdata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Throwable { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (final InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } } private static final class PartSliceStream extends SlicedInputStream { private final BlobContainer container; private final FileInfo info; public PartSliceStream(BlobContainer container, FileInfo info) { super(info.numberOfParts()); this.info = info; this.container = container; } @Override protected InputStream openSlice(long slice) throws IOException { return container.readBlob(info.partName(slice)); } } /** * Context for restore operations */ private class RestoreContext extends Context { private final Store store; private final RecoveryState recoveryState; /** * Constructs new restore context * * @param snapshotId snapshot id * @param shardId shard to be restored * @param snapshotShardId shard in the snapshot that data should be restored from * @param recoveryState recovery state to report progress */ public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { super(snapshotId, version, shardId, snapshotShardId); store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); this.recoveryState = recoveryState; } /** * Performs restore operation */ public void restore() throws IOException { store.incRef(); try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId); BlobStoreIndexShardSnapshot snapshot = loadSnapshot(); SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); final Store.MetadataSnapshot recoveryTargetMetadata; try { recoveryTargetMetadata = store.getMetadataOrEmpty(); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { logger.warn("{} Can't read metadata from store", e, shardId); throw new IndexShardRestoreFailedException(shardId, "Can't restore corrupted shard", e); } final List<FileInfo> filesToRecover = new ArrayList<>(); final Map<String, StoreFileMetaData> snapshotMetaData = new HashMap<>(); final Map<String, FileInfo> fileInfos = new HashMap<>(); for (final FileInfo fileInfo : snapshot.indexFiles()) { try { // in 1.3.3 we added additional hashes for .si / segments_N files // to ensure we don't double the space in the repo since old snapshots // don't have this hash we try to read that hash from the blob store // in a bwc compatible way. maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata); } catch (Throwable e) { // if the index is broken we might not be able to read it logger.warn("{} Can't calculate hash from blog for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata()); } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); } final Store.MetadataSnapshot sourceMetaData = new Store.MetadataSnapshot(unmodifiableMap(snapshotMetaData), emptyMap(), 0); final Store.RecoveryDiff diff = sourceMetaData.recoveryDiff(recoveryTargetMetadata); for (StoreFileMetaData md : diff.identical) { FileInfo fileInfo = fileInfos.get(md.name()); recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), true); if (logger.isTraceEnabled()) { logger.trace("[{}] [{}] not_recovering [{}] from [{}], exists in local store and is same", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); } } for (StoreFileMetaData md : Iterables.concat(diff.different, diff.missing)) { FileInfo fileInfo = fileInfos.get(md.name()); filesToRecover.add(fileInfo); recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), false); if (logger.isTraceEnabled()) { if (md == null) { logger.trace("[{}] [{}] recovering [{}] from [{}], does not exists in local store", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); } else { logger.trace("[{}] [{}] recovering [{}] from [{}], exists in local store but is different", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); } } } final RecoveryState.Index index = recoveryState.getIndex(); if (filesToRecover.isEmpty()) { logger.trace("no files to recover, all exists within the local store"); } if (logger.isTraceEnabled()) { logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId, index.totalRecoverFiles(), new ByteSizeValue(index.totalRecoverBytes()), index.reusedFileCount(), new ByteSizeValue(index.reusedFileCount())); } try { for (final FileInfo fileToRecover : filesToRecover) { logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name()); restoreFile(fileToRecover); } } catch (IOException ex) { throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", ex); } final StoreFileMetaData restoredSegmentsFile = sourceMetaData.getSegmentsFile(); if (recoveryTargetMetadata == null) { throw new IndexShardRestoreFailedException(shardId, "Snapshot has no segments file"); } assert restoredSegmentsFile != null; // read the snapshot data persisted final SegmentInfos segmentCommitInfos; try { segmentCommitInfos = Lucene.pruneUnreferencedFiles(restoredSegmentsFile.name(), store.directory()); } catch (IOException e) { throw new IndexShardRestoreFailedException(shardId, "Failed to fetch index version after copying it over", e); } recoveryState.getIndex().updateVersion(segmentCommitInfos.getVersion()); /// now, go over and clean files that are in the store, but were not in the snapshot try { for (String storeFile : store.directory().listAll()) { if (Store.isAutogenerated(storeFile) || snapshotFiles.containPhysicalIndexFile(storeFile)) { continue; //skip write.lock, checksum files and files that exist in the snapshot } try { store.deleteQuiet("restore", storeFile); store.directory().deleteFile(storeFile); } catch (IOException e) { logger.warn("[{}] failed to delete file [{}] during snapshot cleanup", snapshotId, storeFile); } } } catch (IOException e) { logger.warn("[{}] failed to list directory - some of files might not be deleted", snapshotId); } } finally { store.decRef(); } } /** * Restores a file * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are * added to the {@code failures} list * * @param fileInfo file to be restored */ private void restoreFile(final FileInfo fileInfo) throws IOException { boolean success = false; try (InputStream partSliceStream = new PartSliceStream(blobContainer, fileInfo)) { final InputStream stream; if (restoreRateLimiter == null) { stream = partSliceStream; } else { stream = new RateLimitingInputStream(partSliceStream, restoreRateLimiter, restoreThrottleListener); } try (final IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) { final byte[] buffer = new byte[BUFFER_SIZE]; int length; while ((length = stream.read(buffer)) > 0) { indexOutput.writeBytes(buffer, 0, length); recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length); } Store.verify(indexOutput); indexOutput.close(); // write the checksum if (fileInfo.metadata().hasLegacyChecksum()) { Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums(); legacyChecksums.add(fileInfo.metadata()); legacyChecksums.write(store); } store.directory().sync(Collections.singleton(fileInfo.physicalName())); success = true; } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { try { store.markStoreCorrupted(ex); } catch (IOException e) { logger.warn("store cannot be marked as corrupted", e); } throw ex; } finally { if (success == false) { store.deleteQuiet(fileInfo.physicalName()); } } } } } public interface RateLimiterListener { void onRestorePause(long nanos); void onSnapshotPause(long nanos); } }
{'content_hash': '687177a8f494b8f1b06efbec671f675d', 'timestamp': '', 'source': 'github', 'line_count': 945, 'max_line_length': 198, 'avg_line_length': 47.67089947089947, 'alnum_prop': 0.6032098381762081, 'repo_name': 'AndreKR/elasticsearch', 'id': '674d1085660edc8d8d9c0360b195a892a8faf7e4', 'size': '45837', 'binary': False, 'copies': '8', 'ref': 'refs/heads/master', 'path': 'core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Batchfile', 'bytes': '11612'}, {'name': 'Emacs Lisp', 'bytes': '3243'}, {'name': 'FreeMarker', 'bytes': '45'}, {'name': 'Groovy', 'bytes': '156235'}, {'name': 'HTML', 'bytes': '3624'}, {'name': 'Java', 'bytes': '30356159'}, {'name': 'Perl', 'bytes': '6923'}, {'name': 'Python', 'bytes': '95030'}, {'name': 'Ruby', 'bytes': '17776'}, {'name': 'Shell', 'bytes': '91401'}]}
// Copyright (c) AlphaSierraPapa for the SharpDevelop Team // // Permission is hereby granted, free of charge, to any person obtaining a copy of this // software and associated documentation files (the "Software"), to deal in the Software // without restriction, including without limitation the rights to use, copy, modify, merge, // publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons // to whom the Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all copies or // substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, // INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE // FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. using System; using System.Diagnostics.Contracts; using ICSharpCode.NRefactory.TypeSystem; namespace ICSharpCode.NRefactory.TypeSystem { public interface IUnresolvedEvent : IUnresolvedMember { bool CanAdd { get; } bool CanRemove { get; } bool CanInvoke { get; } IUnresolvedMethod AddAccessor { get; } IUnresolvedMethod RemoveAccessor { get; } IUnresolvedMethod InvokeAccessor { get; } /// <summary> /// Resolves the member. /// </summary> /// <param name="context"> /// Context for looking up the member. The context must specify the current assembly. /// A <see cref="SimpleTypeResolveContext"/> that specifies the current assembly is sufficient. /// </param> /// <returns> /// Returns the resolved member, or <c>null</c> if the member could not be found. /// </returns> new IEvent Resolve(ITypeResolveContext context); } public interface IEvent : IMember { bool CanAdd { get; } bool CanRemove { get; } bool CanInvoke { get; } IMethod AddAccessor { get; } IMethod RemoveAccessor { get; } IMethod InvokeAccessor { get; } } }
{'content_hash': 'f7cc64eab00cd5e8417d854eb153b485', 'timestamp': '', 'source': 'github', 'line_count': 58, 'max_line_length': 97, 'avg_line_length': 38.310344827586206, 'alnum_prop': 0.7349234923492349, 'repo_name': 'phiresky/Netjs', 'id': '21604068df7c72c1d9c4112578f52a6fbe930a2b', 'size': '2224', 'binary': False, 'copies': '6', 'ref': 'refs/heads/master', 'path': 'Dependencies/NRefactory/ICSharpCode.NRefactory/TypeSystem/IEvent.cs', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C#', 'bytes': '294647'}, {'name': 'HTML', 'bytes': '395'}, {'name': 'Makefile', 'bytes': '517'}, {'name': 'Shell', 'bytes': '75'}, {'name': 'TypeScript', 'bytes': '38292'}]}
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not .. use this file except in compliance with the License. You may obtain a copy of .. the License at .. .. http://www.apache.org/licenses/LICENSE-2.0 .. .. Unless required by applicable law or agreed to in writing, software .. distributed under the License is distributed on an "AS IS" BASIS, WITHOUT .. WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the .. License for the specific language governing permissions and limitations under .. the License. .. _cluster/sharding: ======== Sharding ======== .. _cluster/sharding/scaling-out: Scaling out =========== Normally you start small and grow over time. In the beginning you might do just fine with one node, but as your data and number of clients grows, you need to scale out. For simplicity we will start fresh and small. Start node1 and add a database to it. To keep it simple we will have 2 shards and no replicas. .. code-block:: bash curl -X PUT "http://xxx.xxx.xxx.xxx:5984/small?n=1&q=2" --user daboss If you look in the directory ``data/shards`` you will find the 2 shards. .. code-block:: text data/ +-- shards/ | +-- 00000000-7fffffff/ | | -- small.1425202577.couch | +-- 80000000-ffffffff/ | -- small.1425202577.couch Now, go to the admin panel .. code-block:: text http://xxx.xxx.xxx.xxx:5986/_utils and look in the database ``_dbs``, it is here that the metadata for each database is stored. As the database is called small, there is a document called small there. Let us look in it. Yes, you can get it with curl too: .. code-block:: javascript curl -X GET "http://xxx.xxx.xxx.xxx:5986/_dbs/small" { "_id": "small", "_rev": "1-5e2d10c29c70d3869fb7a1fd3a827a64", "shard_suffix": [ 46, 49, 52, 50, 53, 50, 48, 50, 53, 55, 55 ], "changelog": [ [ "add", "00000000-7fffffff", "[email protected]" ], [ "add", "80000000-ffffffff", "[email protected]" ] ], "by_node": { "[email protected]": [ "00000000-7fffffff", "80000000-ffffffff" ] }, "by_range": { "00000000-7fffffff": [ "[email protected]" ], "80000000-ffffffff": [ "[email protected]" ] } } * ``_id`` The name of the database. * ``_rev`` The current revision of the metadata. * ``shard_suffix`` The numbers after small and before .couch. The number of seconds after UNIX epoch that the database was created. Stored in ASCII. * ``changelog`` Self explaining. Only for admins to read. * ``by_node`` Which shards each node have. * ``by_rage`` On which nodes each shard is. Nothing here, nothing there, a shard in my sleeve ------------------------------------------------- Start node2 and add it to the cluster. Check in ``/_membership`` that the nodes are talking with each other. If you look in the directory ``data`` on node2, you will see that there is no directory called shards. Go to Fauxton and edit the metadata for small, so it looks like this: .. code-block:: javascript { "_id": "small", "_rev": "1-5e2d10c29c70d3869fb7a1fd3a827a64", "shard_suffix": [ 46, 49, 52, 50, 53, 50, 48, 50, 53, 55, 55 ], "changelog": [ [ "add", "00000000-7fffffff", "[email protected]" ], [ "add", "80000000-ffffffff", "[email protected]" ], [ "add", "00000000-7fffffff", "[email protected]" ], [ "add", "80000000-ffffffff", "[email protected]" ] ], "by_node": { "[email protected]": [ "00000000-7fffffff", "80000000-ffffffff" ], "[email protected]": [ "00000000-7fffffff", "80000000-ffffffff" ] }, "by_range": { "00000000-7fffffff": [ "[email protected]", "[email protected]" ], "80000000-ffffffff": [ "[email protected]", "[email protected]" ] } } Then press Save and marvel at the magic. The shards are now on node2 too! We now have ``n=2``! If the shards are large, then you can copy them over manually and only have CouchDB syncing the changes from the last minutes instead. .. _cluster/sharding/move: Moving Shards ============= Add, then delete ---------------- In the world of CouchDB there is no such thing as moving. You can add a new replica to a shard and then remove the old replica, thereby creating the illusion of moving. If you try to uphold this illusion with a database that have ``n=1``, you might find yourself in the following scenario: #. Copy the shard to a new node. #. Update the metadata to use the new node. #. Delete the shard on the old node. #. Lose all writes made between 1 and 2. As the realty "I added a new replica of the shard X on node Y and then I waited for them to sync, before I removed the replica of shard X from node Z." is a bit tedious, people and this documentation tend to use the illusion of moving. Moving ------ When you get to ``n=3`` you should start moving the shards instead of adding more replicas. We will stop on ``n=2`` to keep things simple. Start node number 3 and add it to the cluster. Then create the directories for the shard on node3: .. code-block:: bash mkdir -p data/shards/00000000-7fffffff And copy over ``data/shards/00000000-7fffffff/small.1425202577.couch`` from node1 to node3. Do not move files between the shard directories as that will confuse CouchDB! Edit the database document in ``_dbs`` again. Make it so that node3 have a replica of the shard ``00000000-7fffffff``. Save the document and let CouchDB sync. If we do not do this, then writes made during the copy of the shard and the updating of the metadata will only have ``n=1`` until CouchDB has synced. Then update the metadata document so that node2 no longer have the shard ``00000000-7fffffff``. You can now safely delete ``data/shards/00000000-7fffffff/small.1425202577.couch`` on node 2. The changelog is nothing that CouchDB cares about, it is only for the admins. But for the sake of completeness, we will update it again. Use ``delete`` for recording the removal of the shard ``00000000-7fffffff`` from node2. Start node4, add it to the cluster and do the same as above with shard ``80000000-ffffffff``. All documents added during this operation was saved and all reads responded to without the users noticing anything. .. _cluster/sharding/views: Views ===== The views needs to be moved together with the shards. If you do not, then CouchDB will rebuild them and this will take time if you have a lot of documents. The views are stored in ``data/.shards``. It is possible to not move the views and let CouchDB rebuild the view every time you move a shard. As this can take quite some time, it is not recommended. .. _cluster/sharding/preshard: Reshard? No, Preshard! ====================== Reshard? Nope. It can not be done. So do not create databases with to few shards. If you can not scale out more because you set the number of shards to low, then you need to create a new cluster and migrate over. #. Build a cluster with enough nodes to handle one copy of your data. #. Create a database with the same name, n=1 and with enough shards so you do not have to do this again. #. Set up 2 way replication between the 2 clusters. #. Let it sync. #. Tell clients to use both the clusters. #. Add some nodes to the new cluster and add them as replicas. #. Remove some nodes from the old cluster. #. Repeat 6 and 7 until you have enough nodes in the new cluster to have 3 replicas of every shard. #. Redirect all clients to the new cluster #. Turn off the 2 way replication between the clusters. #. Shut down the old cluster and add the servers as new nodes to the new cluster. #. Relax! Creating more shards than you need and then move the shards around is called presharding. The number of shards you need depends on how much data you are going to store. But creating to many shards increases the complexity without any real gain. You might even get lower performance. As an example of this, we can take the author's (15 year) old lab server. It gets noticeably slower with more than one shard and high load, as the hard drive must seek more. How many shards you should have depends, as always, on your use case and your hardware. If you do not know what to do, use the default of 8 shards.
{'content_hash': 'ed7909f7215a35675995e416045c955c', 'timestamp': '', 'source': 'github', 'line_count': 300, 'max_line_length': 80, 'avg_line_length': 30.78, 'alnum_prop': 0.6268139484513754, 'repo_name': 'kxepal/couchdb-documentation', 'id': '568eb7e5134b5831d028e1ab96cc2fe6ef3675f4', 'size': '9234', 'binary': False, 'copies': '6', 'ref': 'refs/heads/master', 'path': 'src/cluster/sharding.rst', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Batchfile', 'bytes': '6699'}, {'name': 'CSS', 'bytes': '15619'}, {'name': 'Erlang', 'bytes': '718'}, {'name': 'HTML', 'bytes': '36154'}, {'name': 'JavaScript', 'bytes': '31926'}, {'name': 'Makefile', 'bytes': '1813'}, {'name': 'Python', 'bytes': '37640'}]}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.9.1"/> <title>blowbox: Member List</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/searchdata.js"></script> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { init_search(); }); </script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> <link href="customdoxygen.css" rel="stylesheet" type="text/css"/> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td style="padding-left: 0.5em;"> <div id="projectname">blowbox </div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.9.1 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="hierarchy.html"><span>Class&#160;Hierarchy</span></a></li> <li><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> </div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div id="nav-path" class="navpath"> <ul> <li class="navelem"><b>blowbox</b></li><li class="navelem"><a class="el" href="structblowbox_1_1_mouse_state.html">MouseState</a></li> </ul> </div> </div><!-- top --> <div class="header"> <div class="headertitle"> <div class="title">blowbox::MouseState Member List</div> </div> </div><!--header--> <div class="contents"> <p>This is the complete list of members for <a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a>, including all inherited members.</p> <table class="directory"> <tr bgcolor="#f0f0f0" class="even"><td class="entry"><b>dbl</b> (defined in <a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a>)</td><td class="entry"><a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a></td><td class="entry"></td></tr> <tr bgcolor="#f0f0f0"><td class="entry"><b>down</b> (defined in <a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a>)</td><td class="entry"><a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a></td><td class="entry"></td></tr> <tr bgcolor="#f0f0f0" class="even"><td class="entry"><b>pressed</b> (defined in <a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a>)</td><td class="entry"><a class="el" href="structblowbox_1_1_mouse_state.html">blowbox::MouseState</a></td><td class="entry"></td></tr> </table></div><!-- contents --> <!-- start footer part --> <hr class="footer"/><address class="footer"><small> Generated on Tue Mar 31 2015 19:14:56 for blowbox by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.8.9.1 </small></address> </body> </html>
{'content_hash': '6d0f06fe9ebb8f25e3d5141b9f90132d', 'timestamp': '', 'source': 'github', 'line_count': 109, 'max_line_length': 297, 'avg_line_length': 47.88073394495413, 'alnum_prop': 0.6512741904579421, 'repo_name': 'RikoOphorst/blowbox-old', 'id': 'd4e45edd0e8a7041ada61a427c3ab3a5bfd0ac37', 'size': '5219', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'doc/html/structblowbox_1_1_mouse_state-members.html', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C', 'bytes': '452'}, {'name': 'C++', 'bytes': '327004'}, {'name': 'CSS', 'bytes': '25183'}, {'name': 'FLUX', 'bytes': '3843'}, {'name': 'Lua', 'bytes': '38006'}]}
package com.google.inject; import com.google.inject.binder.AnnotatedElementBuilder; /** * Returns a binder whose configuration information is hidden from its environment by default. See * {@link com.google.inject.PrivateModule PrivateModule} for details. * * @author [email protected] (Jesse Wilson) * @since 2.0 */ public interface PrivateBinder extends Binder { /** Makes the binding for {@code key} available to the enclosing environment */ void expose(Key<?> key); /** * Makes a binding for {@code type} available to the enclosing environment. Use {@link * com.google.inject.binder.AnnotatedElementBuilder#annotatedWith(Class) annotatedWith()} to * expose {@code type} with a binding annotation. */ AnnotatedElementBuilder expose(Class<?> type); /** * Makes a binding for {@code type} available to the enclosing environment. Use {@link * AnnotatedElementBuilder#annotatedWith(Class) annotatedWith()} to expose {@code type} with a * binding annotation. */ AnnotatedElementBuilder expose(TypeLiteral<?> type); @Override PrivateBinder withSource(Object source); @Override PrivateBinder skipSources(Class... classesToSkip); }
{'content_hash': '49b5f8a43c7a11b73b1e317ea1292fa1', 'timestamp': '', 'source': 'github', 'line_count': 38, 'max_line_length': 98, 'avg_line_length': 31.31578947368421, 'alnum_prop': 0.7378151260504202, 'repo_name': 'sonatype/sisu-guice', 'id': '9e7fcd36492f26d3688e4bc1d125361a53f762a8', 'size': '1784', 'binary': False, 'copies': '3', 'ref': 'refs/heads/master', 'path': 'core/src/com/google/inject/PrivateBinder.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Java', 'bytes': '3329313'}, {'name': 'Shell', 'bytes': '1785'}]}
package org.wso2.siddhi.query.api.execution.partition; /** * Created by suho on 7/3/14. */ public interface PartitionType { public String getStreamId(); }
{'content_hash': 'd82c01cc366ed72c182d2f4ecd36af55', 'timestamp': '', 'source': 'github', 'line_count': 12, 'max_line_length': 54, 'avg_line_length': 13.833333333333334, 'alnum_prop': 0.6987951807228916, 'repo_name': 'mt0803/siddhi', 'id': '4cc80427634d9dfdbb5a75fd3a4a3a7d6021b8cf', 'size': '806', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'modules/siddhi-query-api/src/main/java/org/wso2/siddhi/query/api/execution/partition/PartitionType.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'ANTLR', 'bytes': '15263'}, {'name': 'Java', 'bytes': '3491579'}, {'name': 'Scala', 'bytes': '923'}]}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.7.0_45) on Thu Nov 26 10:47:20 GMT 2015 --> <meta http-equiv="Content-Type" content="text/html" charset="UTF-8"> <title>com.github.rvesse.airline.help.ronn (Airline - Help - RONN 2.1.0 API)</title> <meta name="date" content="2015-11-26"> <link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="com.github.rvesse.airline.help.ronn (Airline - Help - RONN 2.1.0 API)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../com/github/rvesse/airline/help/ronn/package-summary.html">Package</a></li> <li>Class</li> <li><a href="package-use.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev Package</li> <li>Next Package</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?com/github/rvesse/airline/help/ronn/package-summary.html" target="_top">Frames</a></li> <li><a href="package-summary.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h1 title="Package" class="title">Package&nbsp;com.github.rvesse.airline.help.ronn</h1> </div> <div class="contentContainer"> <ul class="blockList"> <li class="blockList"> <table class="packageSummary" border="0" cellpadding="3" cellspacing="0" summary="Class Summary table, listing classes, and an explanation"> <caption><span>Class Summary</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Class</th> <th class="colLast" scope="col">Description</th> </tr> <tbody> <tr class="altColor"> <td class="colFirst"><a href="../../../../../../com/github/rvesse/airline/help/ronn/RonnCommandUsageGenerator.html" title="class in com.github.rvesse.airline.help.ronn">RonnCommandUsageGenerator</a></td> <td class="colLast">Deprecated <div class="block"><i>The RONN format has some know bugs and it is recommended to use classes from the airline-help-man or airline-help-markdown modules instead of classes from this module</i></div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><a href="../../../../../../com/github/rvesse/airline/help/ronn/RonnGlobalUsageGenerator.html" title="class in com.github.rvesse.airline.help.ronn">RonnGlobalUsageGenerator</a>&lt;T&gt;</td> <td class="colLast">Deprecated <div class="block"><i>The RONN format has some know bugs and it is recommended to use classes from the airline-help-man or airline-help-markdown modules instead of classes from this module</i></div> </td> </tr> <tr class="altColor"> <td class="colFirst"><a href="../../../../../../com/github/rvesse/airline/help/ronn/RonnMultiPageGlobalUsageGenerator.html" title="class in com.github.rvesse.airline.help.ronn">RonnMultiPageGlobalUsageGenerator</a>&lt;T&gt;</td> <td class="colLast">Deprecated <div class="block"><i>The RONN format has some know bugs and it is recommended to use classes from the airline-help-man or airline-help-markdown modules instead of classes from this module</i></div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><a href="../../../../../../com/github/rvesse/airline/help/ronn/RonnUsageHelper.html" title="class in com.github.rvesse.airline.help.ronn">RonnUsageHelper</a></td> <td class="colLast">Deprecated <div class="block"><i>The RONN format has some know bugs and it is recommended to use classes from the airline-help-man or airline-help-markdown modules instead of classes from this module</i></div> </td> </tr> </tbody> </table> </li> </ul> </div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../com/github/rvesse/airline/help/ronn/package-summary.html">Package</a></li> <li>Class</li> <li><a href="package-use.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev Package</li> <li>Next Package</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?com/github/rvesse/airline/help/ronn/package-summary.html" target="_top">Frames</a></li> <li><a href="package-summary.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small>Copyright &#169; 2012&#x2013;2015. All rights reserved.</small></p> </body> </html>
{'content_hash': '9db51b42dd994fd805d0bf8f1316bf6c', 'timestamp': '', 'source': 'github', 'line_count': 161, 'max_line_length': 228, 'avg_line_length': 40.099378881987576, 'alnum_prop': 0.6392503097893433, 'repo_name': 'rvesse/airline', 'id': '16b42692427371f2f582c2415a322b124a228483', 'size': '6456', 'binary': False, 'copies': '1', 'ref': 'refs/heads/main', 'path': 'docs/javadoc/2.1.0/airline-help-ronn/com/github/rvesse/airline/help/ronn/package-summary.html', 'mode': '33261', 'license': 'apache-2.0', 'language': [{'name': 'Batchfile', 'bytes': '91'}, {'name': 'Java', 'bytes': '2176058'}, {'name': 'Shell', 'bytes': '4550'}]}
""" Translated from https://webdocs.cs.ualberta.ca/~sutton/MountainCar/MountainCar1.cp Algorithm described at https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node89.html Some minor adjustments to constants were made to make the program work on environments besides Mountain Car. """ import random import math import numpy as np import gym import sys np.random.seed(0) env = gym.make(sys.argv[1]) outdir = sys.argv[2] initial_epsilon = 0.1 # probability of choosing a random action (changed from original value of 0.0) alpha = 0.5 # learning rate lambda_ = 0.9 # trace decay rate gamma = 1.0 # discount rate N = 30000 # memory for storing parameters (changed from original value of 3000) M = env.action_space.n NUM_TILINGS = 10 NUM_TILES = 8 def main(): env.monitor.start(outdir) epsilon = initial_epsilon theta = np.zeros(N) # parameters (memory) for episode_num in xrange(2000): print episode_num, episode(epsilon, theta, env.spec.timestep_limit) epsilon = epsilon * 0.999 # added epsilon decay env.monitor.close() def episode(epsilon, theta, max_steps): Q = np.zeros(M) # action values e = np.zeros(N) # eligibility traces F = np.zeros((M, NUM_TILINGS), dtype=np.int32) # features for each action def load_F(observation): state_vars = [] for i, var in enumerate(observation): range_ = (env.observation_space.high[i] - env.observation_space.low[i]) # in CartPole, there is no range on the velocities, so default to 1 if range_ == float('inf'): range_ = 1 state_vars.append(var / range_ * NUM_TILES) for a in xrange(M): F[a] = get_tiles(NUM_TILINGS, state_vars, N, a) def load_Q(): for a in xrange(M): Q[a] = 0 for j in xrange(NUM_TILINGS): Q[a] += theta[F[a,j]] observation = env.reset() load_F(observation) load_Q() action = np.argmax(Q) # numpy argmax chooses first in a tie, not random like original implementation if np.random.random() < epsilon: action = env.action_space.sample() step = 0 while True: step += 1 e *= gamma * lambda_ for a in xrange(M): v = 0.0 if a == action: v = 1.0 for j in xrange(NUM_TILINGS): e[F[a,j]] = v observation, reward, done, info = env.step(action) delta = reward - Q[action] load_F(observation) load_Q() next_action = np.argmax(Q) if np.random.random() < epsilon: next_action = env.action_space.sample() if not done: delta += gamma * Q[next_action] theta += alpha / NUM_TILINGS * delta * e load_Q() if done or step > max_steps: break action = next_action return step # translated from https://web.archive.org/web/20030618225322/http://envy.cs.umass.edu/~rich/tiles.html def get_tiles(num_tilings, variables, memory_size, hash_value): num_coordinates = len(variables) + 2 coordinates = [0 for i in xrange(num_coordinates)] coordinates[-1] = hash_value qstate = [0 for i in xrange(len(variables))] base = [0 for i in xrange(len(variables))] tiles = [0 for i in xrange(num_tilings)] for i, variable in enumerate(variables): qstate[i] = int(math.floor(variable * num_tilings)) base[i] = 0 for j in xrange(num_tilings): for i in xrange(len(variables)): if (qstate[i] >= base[i]): coordinates[i] = qstate[i] - ((qstate[i] - base[i]) % num_tilings) else: coordinates[i] = qstate[i] + 1 + ((base[i] - qstate[i] - 1) % num_tilings) - num_tilings base[i] += 1 + (2 * i) coordinates[len(variables)] = j tiles[j] = hash_coordinates(coordinates, memory_size) return tiles rndseq = np.random.randint(0, 2**32-1, 2048) def hash_coordinates(coordinates, memory_size): total = 0 for i, coordinate in enumerate(coordinates): index = coordinate index += (449 * i) index %= 2048 while index < 0: index += 2048 total += rndseq[index] index = total % memory_size while index < 0: index += memory_size return index main()
{'content_hash': 'e599a16d8ed1bffd27a6d4f5cff366ee', 'timestamp': '', 'source': 'github', 'line_count': 146, 'max_line_length': 104, 'avg_line_length': 29.815068493150687, 'alnum_prop': 0.5952216861934299, 'repo_name': 'MikeOuimet/AI-fun', 'id': 'c9cc029c91784b205d03f5a6474c8e13b1d18859', 'size': '4353', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'RLbasics/sarsa-lambda.py', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C', 'bytes': '4915'}, {'name': 'CSS', 'bytes': '68'}, {'name': 'HTML', 'bytes': '3060'}, {'name': 'Python', 'bytes': '87414'}]}
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://www.netbeans.org/ns/project/1"> <type>org.netbeans.modules.j2me.project</type> <configuration> <data xmlns="http://www.netbeans.org/ns/j2me-embedded-project/1"> <name>IRClockExtreme</name> <source-roots> <root id="src.dir"/> </source-roots> <test-roots/> </data> </configuration> </project>
{'content_hash': '24f03125773fd5851622a1c1f7560ac7', 'timestamp': '', 'source': 'github', 'line_count': 13, 'max_line_length': 73, 'avg_line_length': 35.23076923076923, 'alnum_prop': 0.5502183406113537, 'repo_name': 'mikan/ir-clock', 'id': '50c95c269c4bdd4eb8e8d7a4086a2beb84d7bfcb', 'size': '458', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'midlet/nbproject/project.xml', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'CSS', 'bytes': '1699'}, {'name': 'Java', 'bytes': '84597'}, {'name': 'Python', 'bytes': '699'}]}
#include <Wire.h> #include <Arduino.h> #include "MAX31725Temp.h" #define TEMP_DATA_REGISTER 0x00 #define TEMP_CONFIG_REGISTER 0x01 #define TEMP_THYST_REGISTER 0x02 #define TEMP_TOS_REGISTER 0x03 /* * Default constructor, uses max supersampling value and default I2C address. * Also init I2C. */ MAX31725::MAX31725(uint8_t _samples) { //Wire.begin(); i2cAddress = TEMPERATURE_DEFAULT_ADDR; if((_samples <= TEMPERATURE_MAX_SAMPLES) && (_samples > 0)) { numSamples = _samples; } else { numSamples = TEMPERATURE_MAX_SAMPLES; } } /* * Constructor with additional I2C address argument. * Allows use of alternate addresses. */ MAX31725::MAX31725(uint8_t _i2cAddress, uint8_t _samples) { Wire.begin(); i2cAddress = _i2cAddress; if((_samples <= TEMPERATURE_MAX_SAMPLES) && (_samples > 0)) { numSamples = _samples; } else { numSamples = TEMPERATURE_MAX_SAMPLES; } } /* * Destructor. Empty but explicit. */ MAX31725::~MAX31725() { } /* * Write to the configuration register of the sensor. * One byte write. * * Inputs: * _newConfig: New configuration byte to send to the sensor. * * Outputs: * Sets current status value in class. * * Returns: * Arduino Wire library status. Should be check by user. */ int MAX31725::write_config_register(uint8_t _newConfig) { Wire.beginTransmission(i2cAddress); Wire.write(TEMP_CONFIG_REGISTER); Wire.write(_newConfig); int status_ = Wire.endTransmission(); if(status_ == 0) { currentConfig = _newConfig; return 0; } else { return status_; } } /* * Write new value to temp hysteresis register * Used for thermal shutdown config. * * Inputs: * _newThyst: New value for the THYST register * * Returns: * Arduino Wire status code. Should be checked by user. */ int MAX31725::write_thyst_register(uint16_t _newThyst) { return write_two_bytes(TEMP_THYST_REGISTER, _newThyst); } /* * Write new value to overtemp shutdown register. * Used for thermal shutdown config. * * Inputs: * _newTos: New value for the TOS register * * Returns: * Arduino Wire status code. Should be checked by user. */ int MAX31725::write_tos_register(uint16_t _newTos) { return write_two_bytes(TEMP_TOS_REGISTER, _newTos); } /* * Read on-chip config register. * Set internal class value and return to user. * * Outputs: Sets currentConfig value in class. * * Returns: current configuration as read from sensor. */ uint8_t MAX31725::read_config_register() { // Set register to read Wire.beginTransmission(i2cAddress); Wire.write(TEMP_CONFIG_REGISTER); int status_ = Wire.endTransmission(false); // Send restart as per datasheet // If sensor didn't respond, return current config. if(status_ != 0) { return currentConfig; } // Read the single byte of the config register Wire.requestFrom(i2cAddress, 1); uint8_t config_ = Wire.read(); // Set the internal value for the class and return said value to the user. currentConfig = config_; return config_; } /* * Read hysteresis register from chip * * Outputs: * _regValue: passed by reference 16-bit value to store register value. * * Returns: * Arduino Wire status code. User should use to validity check. */ uint8_t MAX31725::read_thyst_register(uint16_t& _regValue) { return read_two_bytes(TEMP_THYST_REGISTER, _regValue); } /* * Read overtemp shutdown register from chip * * Outputs: * _regValue: passed by reference 16-bit value to store register value. * * Returns: * Arduino Wire status code. User should use to validity check. */ uint8_t MAX31725::read_tos_register(uint16_t& _regValue) { return read_two_bytes(TEMP_TOS_REGISTER, _regValue); } /* * Read temperature data from chip, store internally. * Supports supersampling. * NOTE: supersampling temporarily disabled due to way chip stores decimal values * * Outputs: changes class internal raw temperature value * * Returns: * Arduino Wire status code; user should use to validity check. */ uint8_t MAX31725::recieve_data() { int status_ = 0; uint16_t runningvalue_ = 0; // Reset raw temp value // Grab data number of times supersampling value is set to for(int i=0; i<numSamples; i++) { uint16_t data_ = 0; status_ = read_two_bytes(TEMP_DATA_REGISTER, data_); // Read the data // If errored, return with failure code. if(status_ != 0) { return status_; } runningvalue_ += data_; // Add to running count } rawTemperature = runningvalue_ / numSamples; // Take the average and set class value. return 0; // Return success } /* * Covert raw temperature to floating point value. * May be optimized later to use lookup table. * NOTE: SHOULD ONLY BE USED IF FLOATING POINT HIT IS OKAY. * * Returns: floating point converted temperature in degrees C. */ float MAX31725::convert_temp() { int8_t intpart_ = (rawTemperature & 0xff00) >> 8; // Convert int part to signed and extract from total data float decimalpart_ = 0.0f; // The decimal part encoding is funky, so here's a terrible bit of code to extract that: uint8_t lsb_ = (uint8_t)((uint16_t)0x00ff & intpart_); if((lsb_ & 0b10000000) >> 7) { decimalpart_ += 0.5f; } if((lsb_ & 0b01000000) >> 6) { decimalpart_ += 0.25f; } if((lsb_ & 0b00100000) >> 5) { decimalpart_ += 0.125f; } if((lsb_ & 0b00010000) >> 4) { decimalpart_ += 0.0625f; } if((lsb_ & 0b00001000) >> 3) { decimalpart_ += 0.03125f; } if((lsb_ & 0b00000100) >> 2) { decimalpart_ += 0.015625f; } if((lsb_ & 0b00000010) >> 1) { decimalpart_ += 0.0078125f; } if(lsb_ & 0b00000001) { decimalpart_ += 0.00390625f; } return (float)intpart_ + decimalpart_; } /* * Setter to modify supersampling value. * Bounds checks for valid range. * * Outputs: Modifys numSamples to new value. */ void MAX31725::set_num_samples(uint8_t _newSamples) { if((_newSamples <= TEMPERATURE_MAX_SAMPLES) && (_newSamples > 0)) { numSamples = _newSamples; } else { numSamples = TEMPERATURE_MAX_SAMPLES; } } /* * Internal two byte transmit function. * * Inputs: * _reg: register to write to * _value: 16 bit value to write * * Returns: * Arduino Wire response code; should be passed to user. */ int MAX31725::write_two_bytes(uint8_t _reg, uint16_t _value) { uint8_t msb_ = ((uint16_t)0xff00 & _value) >> 8; uint8_t lsb_ = (uint8_t)((uint16_t)0x00ff & _value); Wire.beginTransmission(i2cAddress); Wire.write(_reg); Wire.write(msb_); Wire.write(lsb_); return Wire.endTransmission(); } /* * Internal two byte read function. * * Inputs: * _reg: register to read from * * Outputs: * _data: 16-bit pass-by-reference for read data * * Returns: * Arduino Wire response code; should be passed to user. */ int MAX31725::read_two_bytes(uint8_t _reg, uint16_t& _data) { // Set register pointer on chip Wire.beginTransmission(i2cAddress); Wire.write(_reg); int status_ = Wire.endTransmission(false); // Send restart as per datasheet. // If failure, quit and report. if(status_ != 0) { return status_; } Wire.requestFrom(i2cAddress, 2); _data = (Wire.read() << 8) | (Wire.read()); return 0; }
{'content_hash': 'ea220abd52b0fb512e484b5bd9ea27f8', 'timestamp': '', 'source': 'github', 'line_count': 288, 'max_line_length': 111, 'avg_line_length': 25.82638888888889, 'alnum_prop': 0.6468136595859102, 'repo_name': 'UMDBPP/STAR', 'id': 'cf5431f7c642bd7bf1dc9a05d17a8e14e81398f9', 'size': '7438', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'Software/STARduino/src/Sensors/MAX31725Temp.cpp', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Arduino', 'bytes': '11415'}, {'name': 'C', 'bytes': '57055'}, {'name': 'C++', 'bytes': '245429'}, {'name': 'Eagle', 'bytes': '7535355'}, {'name': 'Makefile', 'bytes': '451'}, {'name': 'Matlab', 'bytes': '2035'}, {'name': 'Shell', 'bytes': '59681'}]}
<?php namespace app\models; use Yii; use yii\base\Model; use yii\data\ActiveDataProvider; use app\models\PriceType; /** * PriceTypeSearch represents the model behind the search form about `app\models\PriceType`. */ class PriceTypeSearch extends PriceType { /** * @inheritdoc */ public function rules() { return [ [['id'], 'integer'], [['name'], 'safe'], ]; } /** * @inheritdoc */ public function scenarios() { // bypass scenarios() implementation in the parent class return Model::scenarios(); } /** * Creates data provider instance with search query applied * * @param array $params * * @return ActiveDataProvider */ public function search($params) { $query = PriceType::find(); $dataProvider = new ActiveDataProvider([ 'query' => $query, ]); $this->load($params); if (!$this->validate()) { // uncomment the following line if you do not want to return any records when validation fails // $query->where('0=1'); return $dataProvider; } $query->andFilterWhere([ 'id' => $this->id, ]); $query->andFilterWhere(['like', 'name', $this->name]); return $dataProvider; } }
{'content_hash': '038244b94a50386c47c19bed08f55c05', 'timestamp': '', 'source': 'github', 'line_count': 66, 'max_line_length': 106, 'avg_line_length': 20.78787878787879, 'alnum_prop': 0.5408163265306123, 'repo_name': 'A111ex/parser.ru', 'id': '03886d5226ba8ebc7a85906cb1e0ee51aa56020c', 'size': '1372', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'models/PriceTypeSearch.php', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'ApacheConf', 'bytes': '392'}, {'name': 'Batchfile', 'bytes': '1030'}, {'name': 'CSS', 'bytes': '165594'}, {'name': 'JavaScript', 'bytes': '67546'}, {'name': 'PHP', 'bytes': '244547'}]}
<!-- --> <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <title>WebGL GLSL Conformance Tests</title> <link rel="stylesheet" href="./resources/js-test-style.css"/> <link rel="stylesheet" href="./resources/glsl-feature-tests.css"/> <script src="./resources/js-test-pre.js"></script> <script src="./resources/webgl-test-utils.js"></script> <script src="./resources/glsl-conformance-test.js"></script> </head> <body> <div id="description"></div> <div id="console"></div> <script id="vertexShader" type="text/something-not-javascript"> // implicit cast of int to float with less than or equal to should fail void main() { bool b = 1.0 <= 1; } </script> <script> "use strict"; GLSLConformanceTester.runTest(); var successfullyParsed = true; </script> </body> </html>
{'content_hash': 'a1c06c0bdc56bdd1bc47356afb61fdf8', 'timestamp': '', 'source': 'github', 'line_count': 35, 'max_line_length': 71, 'avg_line_length': 22.114285714285714, 'alnum_prop': 0.6886304909560723, 'repo_name': 'pk-sam/crosswalk-test-suite', 'id': 'fb374f2e683b86a2dc9ee3943f3c22f969b6c1ed', 'size': '1915', 'binary': False, 'copies': '14', 'ref': 'refs/heads/master', 'path': 'webapi/tct-webgl-nonw3c-tests/webgl/khronos/less_than_equal.vert.html', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'C', 'bytes': '28136'}, {'name': 'CSS', 'bytes': '697706'}, {'name': 'CoffeeScript', 'bytes': '18978'}, {'name': 'Cucumber', 'bytes': '63597'}, {'name': 'GLSL', 'bytes': '3495'}, {'name': 'Groff', 'bytes': '12'}, {'name': 'HTML', 'bytes': '39810614'}, {'name': 'Java', 'bytes': '602994'}, {'name': 'JavaScript', 'bytes': '17479410'}, {'name': 'Makefile', 'bytes': '1044'}, {'name': 'PHP', 'bytes': '44946'}, {'name': 'Python', 'bytes': '4304927'}, {'name': 'Shell', 'bytes': '1100341'}, {'name': 'XSLT', 'bytes': '767778'}]}
'use strict'; var path = require('path'); var fs = require('fs'); var SSL_ROOTS_PATH = path.resolve(__dirname, '..', '..', 'etc', 'roots.pem'); var _ = require('lodash'); var ProtoBuf = require('protobufjs'); var client = require('./src/client.js'); var server = require('./src/server.js'); var Metadata = require('./src/metadata.js'); var grpc = require('./src/grpc_extension'); grpc.setDefaultRootsPem(fs.readFileSync(SSL_ROOTS_PATH, 'ascii')); /** * Load a gRPC object from an existing ProtoBuf.Reflect object. * @param {ProtoBuf.Reflect.Namespace} value The ProtoBuf object to load. * @param {Object=} options Options to apply to the loaded object * @return {Object<string, *>} The resulting gRPC object */ exports.loadObject = function loadObject(value, options) { var result = {}; if (value.className === 'Namespace') { _.each(value.children, function(child) { result[child.name] = loadObject(child, options); }); return result; } else if (value.className === 'Service') { return client.makeProtobufClientConstructor(value, options); } else if (value.className === 'Message' || value.className === 'Enum') { return value.build(); } else { return value; } }; var loadObject = exports.loadObject; /** * Load a gRPC object from a .proto file. The options object can provide the * following options: * - convertFieldsToCamelCase: Loads this file with that option on protobuf.js * set as specified. See * https://github.com/dcodeIO/protobuf.js/wiki/Advanced-options for details * - binaryAsBase64: deserialize bytes values as base64 strings instead of * Buffers. Defaults to false * - longsAsStrings: deserialize long values as strings instead of objects. * Defaults to true * - deprecatedArgumentOrder: Use the beta method argument order for client * methods, with optional arguments after the callback. Defaults to false. * This option is only a temporary stopgap measure to smooth an API breakage. * It is deprecated, and new code should not use it. * @param {string|{root: string, file: string}} filename The file to load * @param {string=} format The file format to expect. Must be either 'proto' or * 'json'. Defaults to 'proto' * @param {Object=} options Options to apply to the loaded file * @return {Object<string, *>} The resulting gRPC object */ exports.load = function load(filename, format, options) { if (!format) { format = 'proto'; } var convertFieldsToCamelCaseOriginal = ProtoBuf.convertFieldsToCamelCase; if(options && options.hasOwnProperty('convertFieldsToCamelCase')) { ProtoBuf.convertFieldsToCamelCase = options.convertFieldsToCamelCase; } var builder; try { switch(format) { case 'proto': builder = ProtoBuf.loadProtoFile(filename); break; case 'json': builder = ProtoBuf.loadJsonFile(filename); break; default: throw new Error('Unrecognized format "' + format + '"'); } } finally { ProtoBuf.convertFieldsToCamelCase = convertFieldsToCamelCaseOriginal; } return loadObject(builder.ns, options); }; /** * @see module:src/server.Server */ exports.Server = server.Server; /** * @see module:src/metadata */ exports.Metadata = Metadata; /** * Status name to code number mapping */ exports.status = grpc.status; /** * Propagate flag name to number mapping */ exports.propagate = grpc.propagate; /** * Call error name to code number mapping */ exports.callError = grpc.callError; /** * Write flag name to code number mapping */ exports.writeFlags = grpc.writeFlags; /** * Credentials factories */ exports.credentials = require('./src/credentials.js'); /** * ServerCredentials factories */ exports.ServerCredentials = grpc.ServerCredentials; /** * @see module:src/client.makeClientConstructor */ exports.makeGenericClientConstructor = client.makeClientConstructor; /** * @see module:src/client.getClientChannel */ exports.getClientChannel = client.getClientChannel; /** * @see module:src/client.waitForClientReady */ exports.waitForClientReady = client.waitForClientReady;
{'content_hash': '8a69e1c033a74e4d2be8d3d8a5dea003', 'timestamp': '', 'source': 'github', 'line_count': 147, 'max_line_length': 79, 'avg_line_length': 27.959183673469386, 'alnum_prop': 0.7021897810218978, 'repo_name': 'lipnus/hackpretty', 'id': '66664d94b5a225b3fb90c802c07a35257d8ff71c', 'size': '5678', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'Backend/node_modules/grpc/src/node/index.js', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'HTML', 'bytes': '3839'}, {'name': 'Java', 'bytes': '31928'}, {'name': 'JavaScript', 'bytes': '5370'}]}
/* Spectral by HTML5 UP html5up.net | @n33co Free for personal and commercial use under the CCA 3.0 license (html5up.net/license) */ /* Spotlight */ .spotlight { display: block; } .spotlight .image { display: inline-block; vertical-align: top; } .spotlight .content { padding: 4em 4em 2em 4em ; display: inline-block; } .spotlight:after { clear: both; content: ''; display: block; } /* Features */ .features { display: block; } .features li { float: left; } .features:after { content: ''; display: block; clear: both; } /* Banner + Wrapper (style4) */ #banner, .wrapper.style4 { background-image: url("../../images/banner.jpg"); background-position: center center; background-repeat: no-repeat; background-size: cover; position: relative; } #banner:before, .wrapper.style4:before { background: #000000; content: ''; height: 100%; left: 0; opacity: 0.5; position: absolute; top: 0; width: 100%; } #banner .inner, .wrapper.style4 .inner { position: relative; z-index: 1; } /* Banner */ #banner { padding: 14em 0 12em 0 ; height: auto; } #banner:after { display: none; } /* CTA */ #cta .inner header { float: left; } #cta .inner .actions { float: left; } #cta .inner:after { clear: both; content: ''; display: block; } /* Main */ #main > header { background-image: url("../../images/banner.jpg"); background-position: center center; background-repeat: no-repeat; background-size: cover; position: relative; } #main > header:before { background: #000000; content: ''; height: 100%; left: 0; opacity: 0.5; position: absolute; top: 0; width: 100%; } #main > header > * { position: relative; z-index: 1; }
{'content_hash': '74839000d8be3fcba1f6876b8b6d18d4', 'timestamp': '', 'source': 'github', 'line_count': 34, 'max_line_length': 188, 'avg_line_length': 47.26470588235294, 'alnum_prop': 0.6776602364654636, 'repo_name': 'ac-opensource/ac-opensource.github.io', 'id': '3d1baed791b8275f27802e7a0f623d11e551bd0a', 'size': '1607', 'binary': False, 'copies': '3', 'ref': 'refs/heads/master', 'path': '_site/css/ie9.css', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '245237'}, {'name': 'HTML', 'bytes': '67801'}, {'name': 'JavaScript', 'bytes': '42611'}, {'name': 'Ruby', 'bytes': '3272'}]}
from ConfigParser import ConfigParser from cheeseprism.utils import path from cheeseprism.utils import resource_spec from itertools import count from mock import patch from pprint import pformat as pprint from pyramid.decorator import reify import logging import time import unittest logger = logging.getLogger(__name__) here = path(__file__).parent class FunctionalTests(unittest.TestCase): testdir = here / 'test-indexes' dummy = here / "dummypackage/dist/dummypackage-0.0dev.tar.gz" counter = count() index_parent = here / "test-indexes" pipcache = here / "pipcache" devini = here / 'development.ini' dummy = here / "dummypackage/dist/dummypackage-0.0dev.tar.gz" @classmethod def get_base(cls): return path(resource_spec(cls.index_parent)) base = reify(lambda self: self.get_base()) def setUp(self): self.count = next(self.counter) self.dummy.copy(self.testdir) self.dummypath = self.testdir / self.dummy.name def makeone(self, xtra=None, index_name='test-func-index', count=None): from cheeseprism.wsgiapp import main cp = ConfigParser(dict(here=self.base)) with open(resource_spec(self.devini)) as fp: cp.readfp(fp) defaults = dict((x, cp.get('DEFAULT', x)) for x in cp.defaults()) count = count is None and self.count or count self.idxpath = index_path = self.base / ("%s-%s" %(count, index_name)) settings = { 'cheeseprism.file_root': index_path, 'cheeseprism.data_json': 'data.json' } settings = xtra and dict(settings, **xtra) or settings app = main(defaults, **settings) self.executor = app.registry['cp.executor'] from webtest import TestApp return TestApp(app) def test_async_bulkupdate(self): idxname = 'async_bulk_up' idxpath = self.base / "0-" + idxname idxpath.mkdir_p() self.dummy.copy(idxpath) with patch('cheeseprism.index.async_bulk_update_at_start') as abuaas: testapp = self.makeone({'cheeseprism.async_restart':'true'}, index_name=idxname, count=0) assert abuaas.called res = testapp.get('/index', status=200) assert res def test_root_thead_pip_sync(self): with patch.dict('os.environ', {'PIP_DOWNLOAD_CACHE': resource_spec(self.pipcache)}): testapp = self.makeone({'cheeseprism.futures':'thread', 'cheeseprism.pipcache_mirror':'true'}) res = testapp.get('/index', status=200) assert 'WUT' in res.body def test_root_thread(self): testapp = self.makeone() res = testapp.get('/', status=200) self.failUnless('Cheese' in res.body) def test_request_size_limit(self): testapp = self.makeone({'cheeseprism.temp_file_limit': 100*1024, 'pyramid.includes': __name__ + '.request_size_check'}) res = testapp.get('/sizetest', status=200) assert res.json.get('request_size', False) == 102400 def tearDown(self): logger.debug("teardown: %s", self.count) if self.base.exists(): dirs = self.base.dirs() logger.debug(pprint(dirs)) time.sleep(0.02) logger.debug(pprint([x.rmtree() for x in dirs])) if hasattr(self, 'executor'): self.executor.shutdown() def request_size_check(config): def sizetest(request): return {'request_size': request.request_body_tempfile_limit} config.add_route('sizetest', '/sizetest') config.add_view(sizetest, route_name='sizetest', renderer='json')
{'content_hash': 'b41f09a0e35d0ba9d8cee08c3fe1e8a2', 'timestamp': '', 'source': 'github', 'line_count': 110, 'max_line_length': 92, 'avg_line_length': 33.836363636363636, 'alnum_prop': 0.6163353036002149, 'repo_name': 'whitmo/CheesePrism', 'id': '6a16296ed9245c9f0a42cb7c7a47b2fe650282da', 'size': '3722', 'binary': False, 'copies': '3', 'ref': 'refs/heads/master', 'path': 'tests/test_functional.py', 'mode': '33188', 'license': 'bsd-2-clause', 'language': [{'name': 'CSS', 'bytes': '9543'}, {'name': 'HTML', 'bytes': '6162'}, {'name': 'Python', 'bytes': '107663'}, {'name': 'Shell', 'bytes': '61'}]}
<span class="wiki-builder">This page was generated with Wiki Builder. Do not change the format!</span> ## Info * **URI:** [[/Forum/ChangeLockState/{param1}/{param2}/|https://www.bungie.net/Platform/Forum/ChangeLockState/{param1}/{param2}/]] * **Method:** POST * **Accessibility:** Private * **Service:** [[ForumService|Endpoints#ForumService]] ## Parameters ### Path Parameters Name | Description ---- | ----------- param1 | param2 | ### Query String Parameters None ### JSON POST Parameters None ## Example ## References
{'content_hash': '727390bfbff0d0bb79479405fed90106', 'timestamp': '', 'source': 'github', 'line_count': 26, 'max_line_length': 129, 'avg_line_length': 20.46153846153846, 'alnum_prop': 0.6766917293233082, 'repo_name': 'DestinyDevs/BungieNetPlatform', 'id': 'e0b98d306c4fbccdd9514698e72a5f842b8dd41e', 'size': '532', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'wiki-docs/v1/ForumServicePages/ChangeLockState.md', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'GLSL', 'bytes': '7007'}, {'name': 'HTML', 'bytes': '4966'}, {'name': 'JavaScript', 'bytes': '2424226'}, {'name': 'PHP', 'bytes': '380417'}]}
from os import path from pkg_resources import parse_requirements from setuptools import setup, find_packages name = 'reqlice' # PyPI name here = path.dirname(path.abspath(__file__)) # Get the long description from the relevant file long_description = None try: with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() except Exception: pass def get_requirements(filename): return [str(r) for r in parse_requirements(open(filename).read())] setup( name=name, version='1.0.0', url='https://github.com/5monkeys/reqlice', license='MIT', author='Akseli Nelander', author_email='[email protected]', description='Fetches and annotates the license of pip requirements.', long_description=long_description, packages=find_packages(include='reqlice'), entry_points={ 'console_scripts': [ 'reqlice = reqlice:cli' ] }, install_requires=get_requirements('requirements.txt'), classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python :: 3.5', 'License :: OSI Approved :: MIT License', ] )
{'content_hash': '3e198adfae52a9b157c53830ff30bb1f', 'timestamp': '', 'source': 'github', 'line_count': 43, 'max_line_length': 73, 'avg_line_length': 27.209302325581394, 'alnum_prop': 0.6606837606837607, 'repo_name': '5monkeys/reqlice', 'id': '217816d43088e6a28e75ee20629631394d5036f7', 'size': '1192', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'setup.py', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Python', 'bytes': '11402'}]}
module Rougemine::WikiHelper end
{'content_hash': 'f5f18199c6ac062d68762f5aedf7add5', 'timestamp': '', 'source': 'github', 'line_count': 2, 'max_line_length': 28, 'avg_line_length': 16.5, 'alnum_prop': 0.8484848484848485, 'repo_name': 'opsone/rougemine', 'id': '018b502a042fc9cecd87b1410713ba096128e96e', 'size': '33', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'app/helpers/rougemine/wiki_helper.rb', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Ruby', 'bytes': '13428'}]}
#set( $symbol_pound = '#' ) #set( $symbol_dollar = '$' ) #set( $symbol_escape = '\' ) <assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd"> <includeBaseDirectory>false</includeBaseDirectory> <id>arc42-html-export</id> <formats> <format>zip</format> </formats> <fileSets> <fileSet> <directory>target/generated-docs</directory> <outputDirectory>.</outputDirectory> <includes> <include>**/**</include> </includes> <excludes> <exclude>**/**.cache</exclude> </excludes> </fileSet> </fileSets> </assembly>
{'content_hash': '4468fc6dc771d1bb7b3940aa22864c03', 'timestamp': '', 'source': 'github', 'line_count': 24, 'max_line_length': 147, 'avg_line_length': 37.333333333333336, 'alnum_prop': 0.5837053571428571, 'repo_name': 'plafue/arc42-template-archetype', 'id': '632a38a71dc8e2d7fbd5381de5e6dd3bef85d8db', 'size': '896', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'src/main/resources/archetype-resources/src/assembly/assembly.xml', 'mode': '33188', 'license': 'mit', 'language': []}
<?xml version="1.0" encoding="UTF-8"?> <workingSetManager> <workingSet aggregate="true" factoryID="org.eclipse.ui.internal.WorkingSetFactory" id="1446209756016_0" label="Window Working Set" name="Aggregate for window 1446209756016"/> </workingSetManager>
{'content_hash': '0f8f057f4d365414de7f0eba722776f5', 'timestamp': '', 'source': 'github', 'line_count': 4, 'max_line_length': 174, 'avg_line_length': 63.5, 'alnum_prop': 0.7834645669291339, 'repo_name': 'royhpr/KDDProject', 'id': '72d3827480dcf6b75d1228843b96389f51a235e9', 'size': '254', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': '.metadata/.plugins/org.eclipse.ui.workbench/workingsets.xml', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '11649'}, {'name': 'HTML', 'bytes': '46976575'}, {'name': 'Java', 'bytes': '21827'}]}
yield cannot be used in a finally block when the generator is force-closed --FILE-- <?php function gen() { try { echo "before yield\n"; yield; echo "after yield\n"; } finally { echo "before yield in finally\n"; yield; echo "after yield in finally\n"; } echo "after finally\n"; } $gen = gen(); $gen->rewind(); unset($gen); ?> --EXPECTF-- before yield before yield in finally Fatal error: Cannot yield from finally in a force-closed generator in %s on line %d
{'content_hash': '9468ddaf28d99b328453ee5c5901dd6f', 'timestamp': '', 'source': 'github', 'line_count': 28, 'max_line_length': 83, 'avg_line_length': 18.821428571428573, 'alnum_prop': 0.6110056925996205, 'repo_name': 'glayzzle/tests', 'id': 'aada676a68e8e4b9c340d9076d5dbaaba14fbf8b', 'size': '536', 'binary': False, 'copies': '6', 'ref': 'refs/heads/master', 'path': 'zend/generators/errors/yield_in_force_closed_finally_error.phpt', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'C++', 'bytes': '4786'}, {'name': 'JavaScript', 'bytes': '22986'}, {'name': 'PHP', 'bytes': '960521'}]}
package com.opengamma.financial.analytics.model.volatility.local; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.opengamma.core.id.ExternalSchemes; import com.opengamma.engine.ComputationTarget; import com.opengamma.engine.function.FunctionCompilationContext; import com.opengamma.engine.target.ComputationTargetType; import com.opengamma.engine.value.ValueProperties; import com.opengamma.engine.value.ValuePropertyNames; import com.opengamma.engine.value.ValueRequirement; import com.opengamma.engine.value.ValueRequirementNames; import com.opengamma.financial.analytics.model.InstrumentTypeProperties; import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyNamesAndValues; import com.opengamma.financial.analytics.model.volatility.surface.black.BlackVolatilitySurfacePropertyUtils; /** * */ public abstract class EquityDupireLocalVolatilitySurfaceFunction extends DupireLocalVolatilitySurfaceFunction { private static final Logger s_logger = LoggerFactory.getLogger(EquityDupireLocalVolatilitySurfaceFunction.class); @Override public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) { final String targetScheme = target.getUniqueId().getScheme(); return (targetScheme.equalsIgnoreCase(ExternalSchemes.BLOOMBERG_TICKER.getName()) || targetScheme.equalsIgnoreCase(ExternalSchemes.BLOOMBERG_TICKER_WEAK.getName())); } @Override public ComputationTargetType getTargetType() { return ComputationTargetType.PRIMITIVE; // Bloomberg ticker or weak ticker } @Override protected String getInstrumentType() { return InstrumentTypeProperties.EQUITY_OPTION; } /** * Equity requires an additional three properties. * This is to specify the Funding curve used to build the Equity Forwards. * @return ValueProperties specifying any currency, curve name and curve calculation config */ protected ValueProperties getCurrencyProperties() { final ValueProperties equityProperties = createValueProperties() .withAny(ValuePropertyNames.CURVE_CURRENCY) .withAny(ValuePropertyNames.CURVE) .withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG) .get(); return equityProperties; } /** * Equity requires an additional three properties. * This is to specify the Funding curve used to build the Equity Forwards. * @param desiredValue ValueRequirement containing "CurveCurrency" and "FundingCurve" * @return ValueProperties containing specified values */ protected ValueProperties getCurrencyProperties(final ValueRequirement desiredValue) { final String curveCurrency = desiredValue.getConstraint(ValuePropertyNames.CURVE_CURRENCY); final String fundingCurve = desiredValue.getConstraint(ValuePropertyNames.CURVE); final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG); final ValueProperties equityProperties = createValueProperties() .with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency) .with(ValuePropertyNames.CURVE, fundingCurve) .with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig) .get(); return equityProperties; } /** * Equity requires an additional three properties. * This is to specify the Funding curve used to build the Equity Forwards. * @param desiredValue ValueRequirement containing "CurveCurrency" and "FundingCurve" * @return ValueProperties containing specified values */ protected ValueProperties getCurrencyPropertiesForVolatilitySurface(final ValueRequirement desiredValue) { final String curveCurrency = desiredValue.getConstraint(ValuePropertyNames.CURVE_CURRENCY); final String fundingCurve = desiredValue.getConstraint(ValuePropertyNames.CURVE); final String curveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.CURVE_CALCULATION_CONFIG); final ValueProperties equityProperties = ValueProperties.builder() .with(ValuePropertyNames.CURVE_CURRENCY, curveCurrency) .with(ValuePropertyNames.CURVE, fundingCurve) .with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig) .get(); return equityProperties; } @Override protected ValueProperties getResultProperties(final String parameterizationType) { final ValueProperties equityProperties = getCurrencyProperties(); return LocalVolatilitySurfaceUtils.addAllDupireLocalVolatilitySurfaceProperties(equityProperties, getInstrumentType(), getBlackSmileInterpolatorName(), parameterizationType).get(); } @Override protected ValueProperties getResultProperties(final ValueRequirement desiredValue, final String parameterizationType) { final ValueProperties equityProperties = getCurrencyProperties(desiredValue); return LocalVolatilitySurfaceUtils.addAllDupireLocalVolatilitySurfaceProperties(equityProperties, getInstrumentType(), getBlackSmileInterpolatorName(), parameterizationType, desiredValue).get(); } @Override protected ValueRequirement getVolatilitySurfaceRequirement(final ComputationTarget target, final ValueRequirement desiredValue) { final ValueProperties equityProperties = getCurrencyPropertiesForVolatilitySurface(desiredValue); final ValueProperties properties = BlackVolatilitySurfacePropertyUtils.addAllBlackSurfaceProperties(equityProperties, getInstrumentType(), desiredValue).get(); return new ValueRequirement(ValueRequirementNames.BLACK_VOLATILITY_SURFACE, target.toSpecification(), properties); } /** * Function producing a local volatility surface using a Black volatility surface with spline interpolation */ public static class Spline extends EquityDupireLocalVolatilitySurfaceFunction { @Override protected String getBlackSmileInterpolatorName() { return BlackVolatilitySurfacePropertyNamesAndValues.SPLINE; } } /** * Function producing a local volatility surface using a Black volatility surface with SABR interpolation */ public static class SABR extends EquityDupireLocalVolatilitySurfaceFunction { @Override protected String getBlackSmileInterpolatorName() { return BlackVolatilitySurfacePropertyNamesAndValues.SABR; } } /** * Function producing a local volatility surface using a Black volatility surface with mixed log-normal interpolation */ public static class MixedLogNormal extends EquityDupireLocalVolatilitySurfaceFunction { @Override protected String getBlackSmileInterpolatorName() { return BlackVolatilitySurfacePropertyNamesAndValues.MIXED_LOG_NORMAL; } } }
{'content_hash': '3bdec99944c18abfe1efc21e1c1ee7d2', 'timestamp': '', 'source': 'github', 'line_count': 149, 'max_line_length': 163, 'avg_line_length': 44.95302013422819, 'alnum_prop': 0.8017318602567931, 'repo_name': 'jerome79/OG-Platform', 'id': 'f5650fcb99ce8e907f443e307e92bf9d8eaebd52', 'size': '6836', 'binary': False, 'copies': '6', 'ref': 'refs/heads/master', 'path': 'projects/OG-Financial/src/main/java/com/opengamma/financial/analytics/model/volatility/local/EquityDupireLocalVolatilitySurfaceFunction.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Batchfile', 'bytes': '4064'}, {'name': 'CSS', 'bytes': '212432'}, {'name': 'GAP', 'bytes': '1490'}, {'name': 'Groovy', 'bytes': '11518'}, {'name': 'HTML', 'bytes': '284313'}, {'name': 'Java', 'bytes': '80970799'}, {'name': 'JavaScript', 'bytes': '1528695'}, {'name': 'PLSQL', 'bytes': '105'}, {'name': 'PLpgSQL', 'bytes': '13175'}, {'name': 'Protocol Buffer', 'bytes': '53119'}, {'name': 'SQLPL', 'bytes': '1004'}, {'name': 'Shell', 'bytes': '10958'}]}
import pluginHooks from './plugin-hooks' import initConstructor from './init-constructor' import { KEY_EVENT_LISTENERS, INITIALIZED_KEY, COELEMENT_DATA_KEY_PREFIX } from './util/const' /** * Initialize component. * @param Constructor The coelement class * @param el The element * @param name The coelement name * @return The created coelement instance */ export default (Constructor: any, el: HTMLElement, name?: string): any => { if (!Constructor[INITIALIZED_KEY]) { initConstructor(Constructor, name) } const coelem = new Constructor() // Assigns element to coelement's .el property coelem.el = el if (name) { // Assigns coelement to element's "hidden" property ;(el as any)[COELEMENT_DATA_KEY_PREFIX + name] = coelem } // Initialize event listeners defined by @emit decorator ;(Constructor[KEY_EVENT_LISTENERS] || []).map((listenerBinder: any) => { listenerBinder(el, coelem, name) }) // Executes plugin hooks pluginHooks.forEach(pluginHook => { pluginHook(el, coelem) }) if (typeof coelem.__mount__ === 'function') { coelem.__mount__() } return coelem }
{'content_hash': 'c9ced6318697cf1de715f11e182508da', 'timestamp': '', 'source': 'github', 'line_count': 46, 'max_line_length': 75, 'avg_line_length': 24.652173913043477, 'alnum_prop': 0.6807760141093474, 'repo_name': 'kt3k/capsid', 'id': 'c6267fb0912c5acfe822e11d810f21f368307ed8', 'size': '1134', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'src/init-component.ts', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'HTML', 'bytes': '549'}, {'name': 'JavaScript', 'bytes': '26474'}]}
package fetcher import ( "errors" "math/rand" "time" "github.com/lab2528/go-oneTime/common" "github.com/lab2528/go-oneTime/core" "github.com/lab2528/go-oneTime/core/types" "github.com/lab2528/go-oneTime/log" "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) const ( arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block maxUncleDist = 7 // Maximum allowed backward distance from the chain head maxQueueDist = 32 // Maximum allowed distance from the chain head to queue hashLimit = 256 // Maximum number of unique blocks a peer may have announced blockLimit = 64 // Maximum number of unique blocks a peer may have delivered ) var ( errTerminated = errors.New("terminated") ) // blockRetrievalFn is a callback type for retrieving a block from the local chain. type blockRetrievalFn func(common.Hash) *types.Block // headerRequesterFn is a callback type for sending a header retrieval request. type headerRequesterFn func(common.Hash) error // bodyRequesterFn is a callback type for sending a body retrieval request. type bodyRequesterFn func([]common.Hash) error // headerVerifierFn is a callback type to verify a block's header for fast propagation. type headerVerifierFn func(header *types.Header) error // blockBroadcasterFn is a callback type for broadcasting a block to connected peers. type blockBroadcasterFn func(block *types.Block, propagate bool) // chainHeightFn is a callback type to retrieve the current chain height. type chainHeightFn func() uint64 // chainInsertFn is a callback type to insert a batch of blocks into the local chain. type chainInsertFn func(types.Blocks) (int, error) // peerDropFn is a callback type for dropping a peer detected as malicious. type peerDropFn func(id string) // announce is the hash notification of the availability of a new block in the // network. type announce struct { hash common.Hash // Hash of the block being announced number uint64 // Number of the block being announced (0 = unknown | old protocol) header *types.Header // Header of the block partially reassembled (new protocol) time time.Time // Timestamp of the announcement origin string // Identifier of the peer originating the notification fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block } // headerFilterTask represents a batch of headers needing fetcher filtering. type headerFilterTask struct { headers []*types.Header // Collection of headers to filter time time.Time // Arrival time of the headers } // headerFilterTask represents a batch of block bodies (transactions and uncles) // needing fetcher filtering. type bodyFilterTask struct { transactions [][]*types.Transaction // Collection of transactions per block bodies uncles [][]*types.Header // Collection of uncles per block bodies time time.Time // Arrival time of the blocks' contents } // inject represents a schedules import operation. type inject struct { origin string block *types.Block } // Fetcher is responsible for accumulating block announcements from various peers // and scheduling them for retrieval. type Fetcher struct { // Various event channels notify chan *announce inject chan *inject blockFilter chan chan []*types.Block headerFilter chan chan *headerFilterTask bodyFilter chan chan *bodyFilterTask done chan common.Hash quit chan struct{} // Announce states announces map[string]int // Per peer announce counts to prevent memory exhaustion announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching fetching map[common.Hash]*announce // Announced blocks, currently fetching fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval completing map[common.Hash]*announce // Blocks with headers, currently body-completing // Block cache queue *prque.Prque // Queue containing the import operations (block number sorted) queues map[string]int // Per peer block counts to prevent memory exhaustion queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports) // Callbacks getBlock blockRetrievalFn // Retrieves a block from the local chain verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers chainHeight chainHeightFn // Retrieves the current chain's height insertChain chainInsertFn // Injects a batch of blocks into the chain dropPeer peerDropFn // Drops a peer for misbehaving // Testing hooks announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62) } // New creates a block fetcher to retrieve blocks based on hash announcements. func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher { return &Fetcher{ notify: make(chan *announce), inject: make(chan *inject), blockFilter: make(chan chan []*types.Block), headerFilter: make(chan chan *headerFilterTask), bodyFilter: make(chan chan *bodyFilterTask), done: make(chan common.Hash), quit: make(chan struct{}), announces: make(map[string]int), announced: make(map[common.Hash][]*announce), fetching: make(map[common.Hash]*announce), fetched: make(map[common.Hash][]*announce), completing: make(map[common.Hash]*announce), queue: prque.New(), queues: make(map[string]int), queued: make(map[common.Hash]*inject), getBlock: getBlock, verifyHeader: verifyHeader, broadcastBlock: broadcastBlock, chainHeight: chainHeight, insertChain: insertChain, dropPeer: dropPeer, } } // Start boots up the announcement based synchroniser, accepting and processing // hash notifications and block fetches until termination requested. func (f *Fetcher) Start() { go f.loop() } // Stop terminates the announcement based synchroniser, canceling all pending // operations. func (f *Fetcher) Stop() { close(f.quit) } // Notify announces the fetcher of the potential availability of a new block in // the network. func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { block := &announce{ hash: hash, number: number, time: time, origin: peer, fetchHeader: headerFetcher, fetchBodies: bodyFetcher, } select { case f.notify <- block: return nil case <-f.quit: return errTerminated } } // Enqueue tries to fill gaps the the fetcher's future import queue. func (f *Fetcher) Enqueue(peer string, block *types.Block) error { op := &inject{ origin: peer, block: block, } select { case f.inject <- op: return nil case <-f.quit: return errTerminated } } // FilterHeaders extracts all the headers that were explicitly requested by the fetcher, // returning those that should be handled differently. func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header { log.Trace("Filtering headers", "headers", len(headers)) // Send the filter channel to the fetcher filter := make(chan *headerFilterTask) select { case f.headerFilter <- filter: case <-f.quit: return nil } // Request the filtering of the header list select { case filter <- &headerFilterTask{headers: headers, time: time}: case <-f.quit: return nil } // Retrieve the headers remaining after filtering select { case task := <-filter: return task.headers case <-f.quit: return nil } } // FilterBodies extracts all the block bodies that were explicitly requested by // the fetcher, returning those that should be handled differently. func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { log.Trace("Filtering bodies", "txs", len(transactions), "uncles", len(uncles)) // Send the filter channel to the fetcher filter := make(chan *bodyFilterTask) select { case f.bodyFilter <- filter: case <-f.quit: return nil, nil } // Request the filtering of the body list select { case filter <- &bodyFilterTask{transactions: transactions, uncles: uncles, time: time}: case <-f.quit: return nil, nil } // Retrieve the bodies remaining after filtering select { case task := <-filter: return task.transactions, task.uncles case <-f.quit: return nil, nil } } // Loop is the main fetcher loop, checking and processing various notification // events. func (f *Fetcher) loop() { // Iterate the block fetching until a quit is requested fetchTimer := time.NewTimer(0) completeTimer := time.NewTimer(0) for { // Clean up any expired block fetches for hash, announce := range f.fetching { if time.Since(announce.time) > fetchTimeout { f.forgetHash(hash) } } // Import any queued blocks that could potentially fit height := f.chainHeight() for !f.queue.Empty() { op := f.queue.PopItem().(*inject) if f.queueChangeHook != nil { f.queueChangeHook(op.block.Hash(), false) } // If too high up the chain or phase, continue later number := op.block.NumberU64() if number > height+1 { f.queue.Push(op, -float32(op.block.NumberU64())) if f.queueChangeHook != nil { f.queueChangeHook(op.block.Hash(), true) } break } // Otherwise if fresh and still unknown, try and import hash := op.block.Hash() if number+maxUncleDist < height || f.getBlock(hash) != nil { f.forgetBlock(hash) continue } f.insert(op.origin, op.block) } // Wait for an outside event to occur select { case <-f.quit: // Fetcher terminating, abort all operations return case notification := <-f.notify: // A block was announced, make sure the peer isn't DOSing us propAnnounceInMeter.Mark(1) count := f.announces[notification.origin] + 1 if count > hashLimit { log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) propAnnounceDOSMeter.Mark(1) break } // If we have a valid block number, check that it's potentially useful if notification.number > 0 { if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) propAnnounceDropMeter.Mark(1) break } } // All is well, schedule the announce if block's not yet downloading if _, ok := f.fetching[notification.hash]; ok { break } if _, ok := f.completing[notification.hash]; ok { break } f.announces[notification.origin] = count f.announced[notification.hash] = append(f.announced[notification.hash], notification) if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { f.announceChangeHook(notification.hash, true) } if len(f.announced) == 1 { f.rescheduleFetch(fetchTimer) } case op := <-f.inject: // A direct block insertion was requested, try and fill any pending gaps propBroadcastInMeter.Mark(1) f.enqueue(op.origin, op.block) case hash := <-f.done: // A pending import finished, remove all traces of the notification f.forgetHash(hash) f.forgetBlock(hash) case <-fetchTimer.C: // At least one block's timer ran out, check for needing retrieval request := make(map[string][]common.Hash) for hash, announces := range f.announced { if time.Since(announces[0].time) > arriveTimeout-gatherSlack { // Pick a random peer to retrieve from, reset all others announce := announces[rand.Intn(len(announces))] f.forgetHash(hash) // If the block still didn't arrive, queue for fetching if f.getBlock(hash) == nil { request[announce.origin] = append(request[announce.origin], hash) f.fetching[hash] = announce } } } // Send out all block header requests for peer, hashes := range request { log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) // Create a closure of the fetch and schedule in on a new thread fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes go func() { if f.fetchingHook != nil { f.fetchingHook(hashes) } for _, hash := range hashes { headerFetchMeter.Mark(1) fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals } }() } // Schedule the next fetch if blocks are still pending f.rescheduleFetch(fetchTimer) case <-completeTimer.C: // At least one header's timer ran out, retrieve everything request := make(map[string][]common.Hash) for hash, announces := range f.fetched { // Pick a random peer to retrieve from, reset all others announce := announces[rand.Intn(len(announces))] f.forgetHash(hash) // If the block still didn't arrive, queue for completion if f.getBlock(hash) == nil { request[announce.origin] = append(request[announce.origin], hash) f.completing[hash] = announce } } // Send out all block body requests for peer, hashes := range request { log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) // Create a closure of the fetch and schedule in on a new thread if f.completingHook != nil { f.completingHook(hashes) } bodyFetchMeter.Mark(int64(len(hashes))) go f.completing[hashes[0]].fetchBodies(hashes) } // Schedule the next fetch if blocks are still pending f.rescheduleComplete(completeTimer) case filter := <-f.headerFilter: // Headers arrived from a remote peer. Extract those that were explicitly // requested by the fetcher, and return everything else so it's delivered // to other parts of the system. var task *headerFilterTask select { case task = <-filter: case <-f.quit: return } headerFilterInMeter.Mark(int64(len(task.headers))) // Split the batch of headers into unknown ones (to return to the caller), // known incomplete ones (requiring body retrievals) and completed blocks. unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{} for _, header := range task.headers { hash := header.Hash() // Filter fetcher-requested headers from other synchronisation algorithms if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { // If the delivered header does not match the promised number, drop the announcer if header.Number.Uint64() != announce.number { log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) f.dropPeer(announce.origin) f.forgetHash(hash) continue } // Only keep if not imported by other means if f.getBlock(hash) == nil { announce.header = header announce.time = task.time // If the block is empty (header only), short circuit into the final import queue if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) block := types.NewBlockWithHeader(header) block.ReceivedAt = task.time complete = append(complete, block) f.completing[hash] = announce continue } // Otherwise add to the list of blocks needing completion incomplete = append(incomplete, announce) } else { log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) f.forgetHash(hash) } } else { // Fetcher doesn't know about it, add to the return list unknown = append(unknown, header) } } headerFilterOutMeter.Mark(int64(len(unknown))) select { case filter <- &headerFilterTask{headers: unknown, time: task.time}: case <-f.quit: return } // Schedule the retrieved headers for body completion for _, announce := range incomplete { hash := announce.header.Hash() if _, ok := f.completing[hash]; ok { continue } f.fetched[hash] = append(f.fetched[hash], announce) if len(f.fetched) == 1 { f.rescheduleComplete(completeTimer) } } // Schedule the header-only blocks for import for _, block := range complete { if announce := f.completing[block.Hash()]; announce != nil { f.enqueue(announce.origin, block) } } case filter := <-f.bodyFilter: // Block bodies arrived, extract any explicitly requested blocks, return the rest var task *bodyFilterTask select { case task = <-filter: case <-f.quit: return } bodyFilterInMeter.Mark(int64(len(task.transactions))) blocks := []*types.Block{} for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { // Match up a body to any possible completion request matched := false for hash, announce := range f.completing { if f.queued[hash] == nil { txnHash := types.DeriveSha(types.Transactions(task.transactions[i])) uncleHash := types.CalcUncleHash(task.uncles[i]) if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash { // Mark the body matched, reassemble if still unknown matched = true if f.getBlock(hash) == nil { block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) block.ReceivedAt = task.time blocks = append(blocks, block) } else { f.forgetHash(hash) } } } } if matched { task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) i-- continue } } bodyFilterOutMeter.Mark(int64(len(task.transactions))) select { case filter <- task: case <-f.quit: return } // Schedule the retrieved blocks for ordered import for _, block := range blocks { if announce := f.completing[block.Hash()]; announce != nil { f.enqueue(announce.origin, block) } } } } } // rescheduleFetch resets the specified fetch timer to the next announce timeout. func (f *Fetcher) rescheduleFetch(fetch *time.Timer) { // Short circuit if no blocks are announced if len(f.announced) == 0 { return } // Otherwise find the earliest expiring announcement earliest := time.Now() for _, announces := range f.announced { if earliest.After(announces[0].time) { earliest = announces[0].time } } fetch.Reset(arriveTimeout - time.Since(earliest)) } // rescheduleComplete resets the specified completion timer to the next fetch timeout. func (f *Fetcher) rescheduleComplete(complete *time.Timer) { // Short circuit if no headers are fetched if len(f.fetched) == 0 { return } // Otherwise find the earliest expiring announcement earliest := time.Now() for _, announces := range f.fetched { if earliest.After(announces[0].time) { earliest = announces[0].time } } complete.Reset(gatherSlack - time.Since(earliest)) } // enqueue schedules a new future import operation, if the block to be imported // has not yet been seen. func (f *Fetcher) enqueue(peer string, block *types.Block) { hash := block.Hash() // Ensure the peer isn't DOSing us count := f.queues[peer] + 1 if count > blockLimit { log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit) propBroadcastDOSMeter.Mark(1) f.forgetHash(hash) return } // Discard any past or too distant blocks if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist) propBroadcastDropMeter.Mark(1) f.forgetHash(hash) return } // Schedule the block for future importing if _, ok := f.queued[hash]; !ok { op := &inject{ origin: peer, block: block, } f.queues[peer] = count f.queued[hash] = op f.queue.Push(op, -float32(block.NumberU64())) if f.queueChangeHook != nil { f.queueChangeHook(op.block.Hash(), true) } log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size()) } } // insert spawns a new goroutine to run a block insertion into the chain. If the // block's number is at the same height as the current import phase, if updates // the phase states accordingly. func (f *Fetcher) insert(peer string, block *types.Block) { hash := block.Hash() // Run the import on a new thread log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) go func() { defer func() { f.done <- hash }() // If the parent's unknown, abort insertion parent := f.getBlock(block.ParentHash()) if parent == nil { log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) return } // Quickly validate the header and propagate the block if it passes switch err := f.verifyHeader(block.Header()); err { case nil: // All ok, quickly propagate to our peers propBroadcastOutTimer.UpdateSince(block.ReceivedAt) go f.broadcastBlock(block, true) case core.BlockFutureErr: // Weird future block, don't fail, but neither propagate default: // Something went very wrong, drop the peer log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) f.dropPeer(peer) return } // Run the actual import and log any issues if _, err := f.insertChain(types.Blocks{block}); err != nil { log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) return } // If import succeeded, broadcast the block propAnnounceOutTimer.UpdateSince(block.ReceivedAt) go f.broadcastBlock(block, false) // Invoke the testing hook if needed if f.importedHook != nil { f.importedHook(block) } }() } // forgetHash removes all traces of a block announcement from the fetcher's // internal state. func (f *Fetcher) forgetHash(hash common.Hash) { // Remove all pending announces and decrement DOS counters for _, announce := range f.announced[hash] { f.announces[announce.origin]-- if f.announces[announce.origin] == 0 { delete(f.announces, announce.origin) } } delete(f.announced, hash) if f.announceChangeHook != nil { f.announceChangeHook(hash, false) } // Remove any pending fetches and decrement the DOS counters if announce := f.fetching[hash]; announce != nil { f.announces[announce.origin]-- if f.announces[announce.origin] == 0 { delete(f.announces, announce.origin) } delete(f.fetching, hash) } // Remove any pending completion requests and decrement the DOS counters for _, announce := range f.fetched[hash] { f.announces[announce.origin]-- if f.announces[announce.origin] == 0 { delete(f.announces, announce.origin) } } delete(f.fetched, hash) // Remove any pending completions and decrement the DOS counters if announce := f.completing[hash]; announce != nil { f.announces[announce.origin]-- if f.announces[announce.origin] == 0 { delete(f.announces, announce.origin) } delete(f.completing, hash) } } // forgetBlock removes all traces of a queued block from the fetcher's internal // state. func (f *Fetcher) forgetBlock(hash common.Hash) { if insert := f.queued[hash]; insert != nil { f.queues[insert.origin]-- if f.queues[insert.origin] == 0 { delete(f.queues, insert.origin) } delete(f.queued, hash) } }
{'content_hash': '523a660ec01e8085add37a5ab94583cf', 'timestamp': '', 'source': 'github', 'line_count': 717, 'max_line_length': 187, 'avg_line_length': 35.01534170153417, 'alnum_prop': 0.6920656416792799, 'repo_name': 'lab2528/go-oneTime', 'id': '9231ab8bf054d07fda75d134e50b75ab5c0f52be', 'size': '25973', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'eth/fetcher/fetcher.go', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Assembly', 'bytes': '42966'}, {'name': 'C', 'bytes': '585129'}, {'name': 'C++', 'bytes': '79669'}, {'name': 'CSS', 'bytes': '133'}, {'name': 'Go', 'bytes': '5744690'}, {'name': 'HTML', 'bytes': '202'}, {'name': 'Java', 'bytes': '30290'}, {'name': 'JavaScript', 'bytes': '491817'}, {'name': 'M4', 'bytes': '25931'}, {'name': 'Makefile', 'bytes': '11133'}, {'name': 'NSIS', 'bytes': '23228'}, {'name': 'Python', 'bytes': '30192'}, {'name': 'Ruby', 'bytes': '1706'}, {'name': 'Shell', 'bytes': '688'}]}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information namespace DotNetNuke.Web.Api.Auth { using System; using System.Net; using System.Net.Http; using System.Net.Http.Headers; using System.Text; using System.Threading; using DotNetNuke.Entities.Portals; using DotNetNuke.Security.Membership; using DotNetNuke.Web.Api.Internal.Auth; /// <summary> /// Digest authentication message handler. /// </summary> public class DigestAuthMessageHandler : AuthMessageHandlerBase { /// <summary> /// Initializes a new instance of the <see cref="DigestAuthMessageHandler"/> class. /// </summary> /// <param name="includeByDefault">Should this handler be included by default on all routes.</param> /// <param name="forceSsl">Should this handler enforce SSL usage.</param> public DigestAuthMessageHandler(bool includeByDefault, bool forceSsl) : base(includeByDefault, forceSsl) { } /// <inheritdoc/> public override string AuthScheme => DigestAuthentication.AuthenticationScheme; /// <inheritdoc/> public override HttpResponseMessage OnInboundRequest(HttpRequestMessage request, CancellationToken cancellationToken) { if (this.NeedsAuthentication(request)) { var portalSettings = PortalController.Instance.GetCurrentSettings(); if (portalSettings != null) { var isStale = TryToAuthenticate(request, portalSettings.PortalId); if (isStale) { var staleResponse = request.CreateResponse(HttpStatusCode.Unauthorized); this.AddStaleWwwAuthenticateHeader(staleResponse); return staleResponse; } } } return base.OnInboundRequest(request, cancellationToken); } /// <inheritdoc/> public override HttpResponseMessage OnOutboundResponse(HttpResponseMessage response, CancellationToken cancellationToken) { if (response.StatusCode == HttpStatusCode.Unauthorized && this.SupportsDigestAuth(response.RequestMessage)) { this.AddWwwAuthenticateHeader(response); } return base.OnOutboundResponse(response, cancellationToken); } private static string CreateNewNonce() { DateTime nonceTime = DateTime.Now + TimeSpan.FromMinutes(1); string expireStr = nonceTime.ToString("G"); byte[] expireBytes = Encoding.Default.GetBytes(expireStr); string nonce = Convert.ToBase64String(expireBytes); nonce = nonce.TrimEnd(new[] { '=' }); return nonce; } private static bool TryToAuthenticate(HttpRequestMessage request, int portalId) { if (request?.Headers.Authorization == null) { return false; } string authHeader = request?.Headers.Authorization.ToString(); var digestAuthentication = new DigestAuthentication(new DigestAuthenticationRequest(authHeader, request.Method.Method), portalId, request.GetIPAddress()); if (digestAuthentication.IsValid) { SetCurrentPrincipal(digestAuthentication.User, request); } else if (digestAuthentication.IsNonceStale) { return true; } return false; } private void AddStaleWwwAuthenticateHeader(HttpResponseMessage response) { this.AddWwwAuthenticateHeader(response, true); } private void AddWwwAuthenticateHeader(HttpResponseMessage response, bool isStale = false) { var value = string.Format("realm=\"DNNAPI\", nonce=\"{0}\", opaque=\"0000000000000000\", stale={1}, algorithm=MD5, qop=\"auth\"", CreateNewNonce(), isStale); response.Headers.WwwAuthenticate.Add(new AuthenticationHeaderValue(this.AuthScheme, value)); } private bool SupportsDigestAuth(HttpRequestMessage request) { return !IsXmlHttpRequest(request) && MembershipProviderConfig.PasswordRetrievalEnabled; } } }
{'content_hash': 'bee72012fd7fa2a59e63e980eaa806c8', 'timestamp': '', 'source': 'github', 'line_count': 121, 'max_line_length': 170, 'avg_line_length': 38.28099173553719, 'alnum_prop': 0.6070811744386874, 'repo_name': 'dnnsoftware/Dnn.Platform', 'id': '174e490bef94c0ac42b58d472e09e52781230520', 'size': '4634', 'binary': False, 'copies': '5', 'ref': 'refs/heads/develop', 'path': 'DNN Platform/DotNetNuke.Web/Api/Auth/DigestAuthMessageHandler.cs', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'ASP.NET', 'bytes': '522038'}, {'name': 'Batchfile', 'bytes': '289'}, {'name': 'C#', 'bytes': '21895919'}, {'name': 'CSS', 'bytes': '920009'}, {'name': 'HTML', 'bytes': '543211'}, {'name': 'JavaScript', 'bytes': '8406477'}, {'name': 'Less', 'bytes': '566334'}, {'name': 'PowerShell', 'bytes': '5984'}, {'name': 'SCSS', 'bytes': '12527'}, {'name': 'Shell', 'bytes': '1429'}, {'name': 'TSQL', 'bytes': '128041'}, {'name': 'TypeScript', 'bytes': '135977'}, {'name': 'Visual Basic .NET', 'bytes': '114706'}, {'name': 'XSLT', 'bytes': '11388'}]}
var File = require('vinyl'); var fs = require('fs'); module.exports = new File({ cwd: process.cwd(), base: 'test/fixtures', path: 'test/fixtures/example.md', contents: fs.readFileSync('test/fixtures/example.md') });
{'content_hash': 'ce5a281d957b5d8371f147b33821c72f', 'timestamp': '', 'source': 'github', 'line_count': 9, 'max_line_length': 55, 'avg_line_length': 25.0, 'alnum_prop': 0.6666666666666666, 'repo_name': 'gakimball/supercollider', 'id': '3cb832fcef44aa8e57b3c3c4165f3be9133aeb77', 'size': '225', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'test/fixtures/test_file.js', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'JavaScript', 'bytes': '27485'}]}
/** * range is set of integers from one integer including "[" to another excluding * ")" * where beginning must be less then ending * @author saba * */ public class IntegerRange { // integer from where begins the range private int beginning; // integer where (excluding) ends the range private int end; /** * constructor * checks if range is correct * if not throws UncorrectRangeException * @param begining * @param end * @throws UncorrectRangeException */ public IntegerRange(int beginning, int end) throws UncorrectRangeException { if (beginning >= end) { throw new UncorrectRangeException("begining : " + beginning + " end " + end + "is not correct range"); } this.end = end; this.beginning = beginning; } public int getBegining() { return beginning; } public int getEnd() { return end; } /** * checks if number is in this range * @param num * @return */ boolean isInRange(int num) { if (num >= beginning && num < end) { return true; } return false; } /** * intersects 2 ranges returns intersection of this ranges * * @param range * @return * @throws UncorrectRangeException */ public IntegerRange intersectWithRange(IntegerRange range) throws UncorrectRangeException { int resBegining = Math.max(range.getBegining(),beginning); int resEnd = Math.min(range.getEnd(),end); IntegerRange res = new IntegerRange(resBegining,resEnd); return res; } }
{'content_hash': 'eb611984e1d7ef7ed0e2ad973a50f9ae', 'timestamp': '', 'source': 'github', 'line_count': 66, 'max_line_length': 92, 'avg_line_length': 22.09090909090909, 'alnum_prop': 0.6803840877914952, 'repo_name': 'freeuni-sdp/FreeUni-SDP-2014-Lab-Testing', 'id': 'f8e1938f9fb9bd0b73ff25af226c351854ad4094', 'size': '1458', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'Range/src/IntegerRange.java', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Java', 'bytes': '81827'}]}
title: Git Verifying Commits localeTitle: Git Verificando Commits --- ## Git Verificando Commits Quando você cria softwares com pessoas de todo o mundo, às vezes é importante validar que os commits e tags são provenientes de uma fonte identificada. O Git suporta assinatura de commits e tags com o GPG. O GitHub mostra quando os commits e tags são assinados. ![Compromissos verificados](https://cloud.githubusercontent.com/assets/25792/14253743/87b504be-fa41-11e5-9140-6dc8b7203c31.png) Quando você visualizar uma confirmação ou tag assinada, verá um selo indicando se a assinatura pode ser verificada usando qualquer uma das chaves GPG do contribuidor enviadas para o GitHub. Você pode fazer upload de suas chaves GPG visitando a página de configurações de chaves. Muitos projetos de código aberto e empresas querem ter certeza de que um commit é de uma fonte verificada. A verificação de assinatura GPG em commits e tags facilita a visualização de quando uma confirmação ou tag é assinada por uma chave verificada que o GitHub conhece. ![Assinatura Verificada](https://cloud.githubusercontent.com/assets/25792/14290042/5b27dab2-fb12-11e5-9ff9-44116a7780ea.png) ### Mais Informações: * [Assinando seu trabalho](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work)
{'content_hash': 'ff89cf4bee45ae2aec4c8cdf42d336f8', 'timestamp': '', 'source': 'github', 'line_count': 18, 'max_line_length': 278, 'avg_line_length': 71.11111111111111, 'alnum_prop': 0.80703125, 'repo_name': 'pahosler/freecodecamp', 'id': '68e2b49007730aad1a8e2c0241555663d4484f55', 'size': '1308', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'guide/portuguese/git/git-verifying-commits/index.md', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'CSS', 'bytes': '35491'}, {'name': 'HTML', 'bytes': '17600'}, {'name': 'JavaScript', 'bytes': '777274'}]}
/* ** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/mgmt/scan.c#4 $ */ /*! \file "scan.c" \brief This file defines the scan profile and the processing function of scan result for SCAN Module. The SCAN Profile selection is part of SCAN MODULE and responsible for defining SCAN Parameters - e.g. MIN_CHANNEL_TIME, number of scan channels. In this file we also define the process of SCAN Result including adding, searching and removing SCAN record from the list. */ /* ** $Log: scan.c $ ** ** 04 15 2014 eason.tsai ** [ALPS01510349] [6595][KK][HotKnot][Reboot][KE][p2pDevFsmRunEventScanDone]Sender reboot automatically with KE about p2pDevFsmRunEventScanDone. ** add debug msg for scan ** ** 03 12 2014 eason.tsai ** [ALPS01070904] [Need Patch] [Volunteer Patch][MT6630][Driver]MT6630 Wi-Fi Patch ** revise for cfg80211 disconnet because of timeout ** ** 08 09 2013 cp.wu ** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module ** 1. integrate scheduled scan functionality ** 2. condition compilation for linux-3.4 & linux-3.8 compatibility ** 3. correct CMD queue access to reduce lock scope ** ** 08 05 2013 terry.wu ** [BORA00002207] [MT6630 Wi-Fi] TXM & MQM Implementation ** 1. Add SW rate definition ** 2. Add HW default rate selection logic from FW ** ** 04 30 2013 eason.tsai ** [BORA00002255] [MT6630 Wi-Fi][Driver] develop ** update 11ac channel setting ** ** 03 12 2013 tsaiyuan.hsu ** [BORA00002222] MT6630 unified MAC RXM ** remove hif_rx_hdr usage. ** ** 03 08 2013 wh.su ** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code ** Remove non-used compiling flag and code ** ** 02 19 2013 cp.wu ** [BORA00002227] [MT6630 Wi-Fi][Driver] Update for Makefile and HIFSYS modifications ** take use of GET_BSS_INFO_BY_INDEX() and MAX_BSS_INDEX macros ** for correctly indexing of BSS-INFO pointers ** ** 01 30 2013 yuche.tsai ** [BORA00002398] [MT6630][Volunteer Patch] P2P Driver Re-Design for Multiple BSS support ** Code first update. ** ** 01 22 2013 cp.wu ** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module ** modification for ucBssIndex migration ** ** 01 16 2013 cp.wu ** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module ** sync for MT6620/MT6628 main trunk change ** ** 12 27 2012 cp.wu ** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module ** sync. for AP timestamp reset detection ** ** 10 25 2012 cp.wu ** [BORA00002227] [MT6630 Wi-Fi][Driver] Update for Makefile and HIFSYS modifications ** sync with MT6630 HIFSYS update. ** ** 09 17 2012 cm.chang ** [BORA00002149] [MT6630 Wi-Fi] Initial software development ** Duplicate source from MT6620 v2.3 driver branch ** (Davinci label: MT6620_WIFI_Driver_V2_3_120913_1942_As_MT6630_Base) ** ** 08 24 2012 cp.wu ** [WCXRP00001269] [MT6620 Wi-Fi][Driver] cfg80211 porting merge back to DaVinci ** . ** ** 08 24 2012 cp.wu ** [WCXRP00001269] [MT6620 Wi-Fi][Driver] cfg80211 porting merge back to DaVinci ** cfg80211 support merge back from ALPS.JB to DaVinci - MT6620 Driver v2.3 branch. * * 07 17 2012 yuche.tsai * NULL * Let netdev bring up. * * 07 17 2012 yuche.tsai * NULL * Compile no error before trial run. * * 06 25 2012 cp.wu * [WCXRP00001258] [MT6620][MT5931][MT6628][Driver] Do not use stale scan result for deciding connection target * drop off scan result which is older than 5 seconds when choosing which BSS to join * * 03 02 2012 terry.wu * NULL * Sync CFG80211 modification from branch 2,2. * * 01 16 2012 cp.wu * [WCXRP00001169] [MT6620 Wi-Fi][Driver] API and behavior modification for preferred band configuration with corresponding network configuration * correct typo. * * 01 16 2012 cp.wu * [MT6620 Wi-Fi][Driver] API and behavior modification for preferred band configuration with corresponding network configuration * add wlanSetPreferBandByNetwork() for glue layer to invoke for setting preferred band configuration corresponding to network type. * * 12 05 2011 cp.wu * [WCXRP00001131] [MT6620 Wi-Fi][Driver][AIS] Implement connect-by-BSSID path * add CONNECT_BY_BSSID policy * * 11 23 2011 cp.wu * [WCXRP00001123] [MT6620 Wi-Fi][Driver] Add option to disable beacon content change detection * add compile option to disable beacon content change detection. * * 11 04 2011 cp.wu * [WCXRP00001085] [MT6628 Wi-Fi][Driver] deprecate old BSS-DESC if timestamp is reset with received beacon/probe response frames * deprecate old BSS-DESC when timestamp in received beacon/probe response frames showed a smaller value than before * * 10 11 2011 cm.chang * [WCXRP00001031] [All Wi-Fi][Driver] Check HT IE length to avoid wrong SCO parameter * Ignore HT OP IE if its length field is not valid * * 09 30 2011 cp.wu * [WCXRP00001021] [MT5931][Driver] Correct scan result generation for conversion between BSS type and operation mode * correct type casting issue. * * 08 23 2011 yuche.tsai * NULL * Fix multicast address list issue. * * 08 11 2011 cp.wu * [WCXRP00000830] [MT6620 Wi-Fi][Firmware] Use MDRDY counter to detect empty channel for shortening scan time * sparse channel detection: * driver: collect sparse channel information with scan-done event * * 08 10 2011 cp.wu * [WCXRP00000922] [MT6620 Wi-Fi][Driver] traverse whole BSS-DESC list for removing * traverse whole BSS-DESC list because BSSID is not unique anymore. * * 07 12 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * for multiple BSS descriptior detecting issue: * 1) check BSSID for infrastructure network * 2) check SSID for AdHoc network * * 07 12 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * check for BSSID for beacons used to update DTIM * * 07 12 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * do not check BSS descriptor for connected flag due to linksys's hidden SSID will use another BSS descriptor and never connected * * 07 11 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * just pass beacons with the same BSSID. * * 07 11 2011 wh.su * [WCXRP00000849] [MT6620 Wi-Fi][Driver] Remove some of the WAPI define for make sure the value is initialize, for customer not enable WAPI * For make sure wapi initial value is set. * * 06 28 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * Do not check for SSID as beacon content change due to the existence of single BSSID with multiple SSID AP configuration * * 06 27 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * 1. correct logic * 2. replace only BSS-DESC which doesn't have a valid SSID. * * 06 27 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * remove unused temporal variable reference. * * 06 27 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * allow to have a single BSSID with multiple SSID to be presented in scanning result * * 06 02 2011 cp.wu * [WCXRP00000757] [MT6620 Wi-Fi][Driver][SCN] take use of RLM API to filter out BSS in disallowed channels * filter out BSS in disallowed channel by * 1. do not add to scan result array if BSS is at disallowed channel * 2. do not allow to search for BSS-DESC in disallowed channels * * 05 02 2011 cm.chang * [WCXRP00000691] [MT6620 Wi-Fi][Driver] Workaround about AP's wrong HT capability IE to have wrong channel number * Refine range of valid channel number * * 05 02 2011 cp.wu * [MT6620 Wi-Fi][Driver] Take parsed result for channel information instead of hardware channel number passed from firmware domain * take parsed result for generating scanning result with channel information. * * 05 02 2011 cm.chang * [WCXRP00000691] [MT6620 Wi-Fi][Driver] Workaround about AP's wrong HT capability IE to have wrong channel number * Check if channel is valided before record ing BSS channel * * 04 18 2011 terry.wu * [WCXRP00000660] [MT6620 Wi-Fi][Driver] Remove flag CFG_WIFI_DIRECT_MOVED * Remove flag CFG_WIFI_DIRECT_MOVED. * * 04 14 2011 cm.chang * [WCXRP00000634] [MT6620 Wi-Fi][Driver][FW] 2nd BSS will not support 40MHz bandwidth for concurrency * . * * 04 12 2011 eddie.chen * [WCXRP00000617] [MT6620 Wi-Fi][DRV/FW] Fix for sigma * Fix the sta index in processing security frame * Simple flow control for TC4 to avoid mgt frames for PS STA to occupy the TC4 * Add debug message. * * 03 25 2011 yuche.tsai * NULL * Always update Bss Type, for Bss Type for P2P Network is changing every time. * * 03 23 2011 yuche.tsai * NULL * Fix concurrent issue when AIS scan result would overwrite p2p scan result. * * 03 14 2011 cp.wu * [WCXRP00000535] [MT6620 Wi-Fi][Driver] Fixed channel operation when AIS and Tethering are operating concurrently * filtering out other BSS coming from adjacent channels * * 03 11 2011 chinglan.wang * [WCXRP00000537] [MT6620 Wi-Fi][Driver] Can not connect to 802.11b/g/n mixed AP with WEP security. * . * * 03 11 2011 cp.wu * [WCXRP00000535] [MT6620 Wi-Fi][Driver] Fixed channel operation when AIS and Tethering are operating concurrently * When fixed channel operation is necessary, AIS-FSM would scan and only connect for BSS on the specific channel * * 02 24 2011 cp.wu * [WCXRP00000490] [MT6620 Wi-Fi][Driver][Win32] modify kalMsleep() implementation because NdisMSleep() won't sleep long enough for specified interval such as 500ms * implement beacon change detection by checking SSID and supported rate. * * 02 22 2011 yuche.tsai * [WCXRP00000480] [Volunteer Patch][MT6620][Driver] WCS IE format issue * Fix WSC big endian issue. * * 02 21 2011 terry.wu * [WCXRP00000476] [MT6620 Wi-Fi][Driver] Clean P2P scan list while removing P2P * Clean P2P scan list while removing P2P. * * 01 27 2011 yuche.tsai * [WCXRP00000399] [Volunteer Patch][MT6620/MT5931][Driver] Fix scan side effect after P2P module separate. * Fix scan channel extension issue when p2p module is not registered. * * 01 26 2011 cm.chang * [WCXRP00000395] [MT6620 Wi-Fi][Driver][FW] Search STA_REC with additional net type index argument * . * * 01 21 2011 cp.wu * [WCXRP00000380] [MT6620 Wi-Fi][Driver] SSID information should come from buffered BSS_DESC_T rather than using beacon-carried information * SSID should come from buffered prBssDesc rather than beacon-carried information * * 01 14 2011 yuche.tsai * [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue * Fix compile error. * * 01 14 2011 yuche.tsai * [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue * Memfree for P2P Descriptor & P2P Descriptor List. * * 01 14 2011 yuche.tsai * [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue * Free P2P Descriptor List & Descriptor under BSS Descriptor. * * 01 04 2011 cp.wu * [WCXRP00000338] [MT6620 Wi-Fi][Driver] Separate kalMemAlloc into kmalloc and vmalloc implementations to ease physically continous memory demands * 1) correct typo in scan.c * 2) TX descriptors, RX descriptos and management buffer should use virtually continous buffer instead of physically contineous one * * 01 04 2011 cp.wu * [WCXRP00000338] [MT6620 Wi-Fi][Driver] Separate kalMemAlloc into kmalloc and vmalloc implementations to ease physically continous memory demands * separate kalMemAlloc() into virtually-continous and physically-continous type to ease slab system pressure * * 12 31 2010 cp.wu * [WCXRP00000327] [MT6620 Wi-Fi][Driver] Improve HEC WHQA 6972 workaround coverage in driver side * while being unloaded, clear all pending interrupt then set LP-own to firmware * * 12 21 2010 cp.wu * [WCXRP00000280] [MT6620 Wi-Fi][Driver] Enable BSS selection with best RCPI policy in SCN module * SCN: enable BEST RSSI selection policy support * * 11 29 2010 cp.wu * [WCXRP00000210] [MT6620 Wi-Fi][Driver][FW] Set RCPI value in STA_REC for initial TX rate selection of auto-rate algorithm * update ucRcpi of STA_RECORD_T for AIS when * 1) Beacons for IBSS merge is received * 2) Associate Response for a connecting peer is received * * 11 03 2010 wh.su * [WCXRP00000124] [MT6620 Wi-Fi] [Driver] Support the dissolve P2P Group * Refine the HT rate disallow TKIP pairwise cipher . * * 10 12 2010 cp.wu * [WCXRP00000091] [MT6620 Wi-Fi][Driver] Add scanning logic to filter out beacons which is received on the folding frequency * trust HT IE if available for 5GHz band * * 10 11 2010 cp.wu * [WCXRP00000091] [MT6620 Wi-Fi][Driver] Add scanning logic to filter out beacons which is received on the folding frequency * add timing and strenght constraint for filtering out beacons with same SSID/TA but received on different channels * * 10 08 2010 wh.su * [WCXRP00000085] [MT6620 Wif-Fi] [Driver] update the modified p2p state machine * update the frog's new p2p state machine. * * 10 01 2010 yuche.tsai * NULL * [MT6620 P2P] Fix Big Endian Issue when parse P2P device name TLV. * * 09 24 2010 cp.wu * [WCXRP00000052] [MT6620 Wi-Fi][Driver] Eliminate Linux Compile Warning * eliminate unused variables which lead gcc to argue * * 09 08 2010 cp.wu * NULL * use static memory pool for storing IEs of scanning result. * * 09 07 2010 yuche.tsai * NULL * When indicate scan result, append IE buffer information in the scan result. * * 09 03 2010 yuche.tsai * NULL * 1. Update Beacon RX count when running SLT. * 2. Ignore Beacon when running SLT, would not update information from Beacon. * * 09 03 2010 kevin.huang * NULL * Refine #include sequence and solve recursive/nested #include issue * * 08 31 2010 kevin.huang * NULL * Use LINK LIST operation to process SCAN result * * 08 29 2010 yuche.tsai * NULL * 1. Fix P2P Descriptor List to be a link list, to avoid link corrupt after Bss Descriptor Free. * 2.. Fix P2P Device Name Length BE issue. * * 08 23 2010 yuche.tsai * NULL * Add P2P Device Found Indication to supplicant * * 08 20 2010 cp.wu * NULL * reset BSS_DESC_T variables before parsing IE due to peer might have been reconfigured. * * 08 20 2010 yuche.tsai * NULL * Workaround for P2P Descriptor Infinite loop issue. * * 08 16 2010 cp.wu * NULL * Replace CFG_SUPPORT_BOW by CFG_ENABLE_BT_OVER_WIFI. * There is no CFG_SUPPORT_BOW in driver domain source. * * 08 16 2010 yuche.tsai * NULL * Modify code of processing Probe Resonse frame for P2P. * * 08 12 2010 yuche.tsai * NULL * Add function to get P2P descriptor of BSS descriptor directly. * * 08 11 2010 yuche.tsai * NULL * Modify Scan result processing for P2P module. * * 08 05 2010 yuche.tsai * NULL * Update P2P Device Discovery result add function. * * 08 03 2010 cp.wu * NULL * surpress compilation warning. * * 07 26 2010 yuche.tsai * * Add support for Probe Request & Response parsing. * * 07 21 2010 cp.wu * * 1) change BG_SCAN to ONLINE_SCAN for consistent term * 2) only clear scanning result when scan is permitted to do * * 07 21 2010 yuche.tsai * * Fix compile error for SCAN module while disabling P2P feature. * * 07 21 2010 yuche.tsai * * Add P2P Scan & Scan Result Parsing & Saving. * * 07 19 2010 wh.su * * update for security supporting. * * 07 19 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * Add Ad-Hoc support to AIS-FSM * * 07 19 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * SCN module is now able to handle multiple concurrent scanning requests * * 07 15 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * driver no longer generates probe request frames * * 07 14 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * remove timer in DRV-SCN. * * 07 09 2010 cp.wu * * 1) separate AIS_FSM state for two kinds of scanning. (OID triggered scan, and scan-for-connection) * 2) eliminate PRE_BSS_DESC_T, Beacon/PrebResp is now parsed in single pass * 3) implment DRV-SCN module, currently only accepts single scan request, other request will be directly dropped by returning BUSY * * 07 08 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository. * * 07 08 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * take use of RLM module for parsing/generating HT IEs for 11n capability * * 07 05 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * 1) ignore RSN checking when RSN is not turned on. * 2) set STA-REC deactivation callback as NULL * 3) add variable initialization API based on PHY configuration * * 07 05 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * correct BSS_DESC_T initialization after allocated. * * 07 02 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * 1) for event packet, no need to fill RFB. * 2) when wlanAdapterStart() failed, no need to initialize state machines * 3) after Beacon/ProbeResp parsing, corresponding BSS_DESC_T should be marked as IE-parsed * * 07 01 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * add scan uninitialization procedure * * 06 30 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * if beacon/probe-resp is received in 2.4GHz bands and there is ELEM_ID_DS_PARAM_SET IE available, * trust IE instead of RMAC information * * 06 29 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * 1) sync to. CMD/EVENT document v0.03 * 2) simplify DTIM period parsing in scan.c only, bss.c no longer parses it again. * 3) send command packet to indicate FW-PM after * a) 1st beacon is received after AIS has connected to an AP * b) IBSS-ALONE has been created * c) IBSS-MERGE has occured * * 06 28 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * send MMPDU in basic rate. * * 06 25 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * modify Beacon/ProbeResp to complete parsing, * because host software has looser memory usage restriction * * 06 23 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * integrate . * * 06 22 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * comment out RLM APIs by CFG_RLM_MIGRATION. * * 06 21 2010 yuche.tsai * [WPD00003839][MT6620 5931][P2P] Feature migration * Update P2P Function call. * * 06 21 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * RSN/PRIVACY compilation flag awareness correction * * 06 21 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * specify correct value for management frames. * * 06 18 2010 cm.chang * [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver * Provide cnmMgtPktAlloc() and alloc/free function of msg/buf * * 06 18 2010 wh.su * [WPD00003840][MT6620 5931] Security migration * migration from MT6620 firmware. * * 06 17 2010 yuche.tsai * [WPD00003839][MT6620 5931][P2P] Feature migration * Fix compile error when enable P2P function. * * 06 15 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * correct when ADHOC support is turned on. * * 06 15 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * add scan.c. * * 06 04 2010 george.huang * [BORA00000678][MT6620]WiFi LP integration * [PM] Support U-APSD for STA mode * * 05 28 2010 wh.su * [BORA00000680][MT6620] Support the statistic for Microsoft os query * adding the TKIP disallow join a HT AP code. * * 05 14 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Add more chance of JOIN retry for BG_SCAN * * 05 12 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Add Power Management - Legacy PS-POLL support. * * 04 29 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * adjsut the pre-authentication code. * * 04 27 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Add Set Slot Time and Beacon Timeout Support for AdHoc Mode * * 04 24 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * g_aprBssInfo[] depends on CFG_SUPPORT_P2P and CFG_SUPPORT_BOW * * 04 19 2010 kevin.huang * [BORA00000714][WIFISYS][New Feature]Beacon Timeout Support * Add Beacon Timeout Support and will send Null frame to diagnose connection * * 04 13 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Add new HW CH macro support * * 04 06 2010 wh.su * [BORA00000680][MT6620] Support the statistic for Microsoft os query * fixed the firmware return the broadcast frame at wrong tc. * * 03 29 2010 wh.su * [BORA00000605][WIFISYS] Phase3 Integration * let the rsn wapi IE always parsing. * * 03 24 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Not carry HT cap when being associated with b/g only AP * * 03 18 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Solve the compile warning for 'return non-void' function * * 03 16 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Add AdHoc Mode * * 03 10 2010 kevin.huang * [BORA00000654][WIFISYS][New Feature] CNM Module - Ch Manager Support * * * * * * * * * * * * * * * * * Add Channel Manager for arbitration of JOIN and SCAN Req * * 03 03 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * move the AIS specific variable for security to AIS specific structure. * * 03 01 2010 wh.su * [BORA00000605][WIFISYS] Phase3 Integration * Refine the variable and parameter for security. * * 02 26 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Fix No PKT_INFO_T issue * * 02 26 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Update outgoing ProbeRequest Frame's TX data rate * * 02 23 2010 wh.su * [BORA00000592][MT6620 Wi-Fi] Adding the security related code for driver * refine the scan procedure, reduce the WPA and WAPI IE parsing, and move the parsing to the time for join. * * 02 23 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Add support scan channel 1~14 and update scan result's frequency infou1rwduu`wvpghlqg|n`slk+mpdkb * * 02 04 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Add AAA Module Support, Revise Net Type to Net Type Index for array lookup * * 01 27 2010 wh.su * [BORA00000476][Wi-Fi][firmware] Add the security module initialize code * add and fixed some security function. * * 01 22 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Support protection and bandwidth switch * * 01 20 2010 kevin.huang * [BORA00000569][WIFISYS] Phase 2 Integration Test * Add PHASE_2_INTEGRATION_WORK_AROUND and CFG_SUPPORT_BCM flags * * 01 11 2010 kevin.huang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Add Deauth and Disassoc Handler * * 01 08 2010 kevin.huang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * * Refine Beacon processing, add read RF channel from RX Status * * 01 04 2010 tehuang.liu * [BORA00000018]Integrate WIFI part into BORA for the 1st time * For working out the first connection Chariot-verified version * * 12 18 2009 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * . * * Dec 12 2009 mtk01104 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Modify u2EstimatedExtraIELen for probe request * * Dec 9 2009 mtk01104 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add HT cap IE to probe request * * Dec 7 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Fix lint warning * * * Dec 3 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Update the process of SCAN Result by adding more Phy Attributes * * Dec 1 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adjust the function and code for meet the new define * * Nov 30 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Rename u4RSSI to i4RSSI * * Nov 30 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Report event of scan result to host * * Nov 26 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Fix SCAN Record update * * Nov 24 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Revise MGMT Handler with Retain Status and Integrate with TXM * * Nov 23 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add (Ext)Support Rate Set IE to ProbeReq * * Nov 20 2009 mtk02468 * [BORA00000337] To check in codes for FPGA emulation * Removed the use of SW_RFB->u2FrameLength * * Nov 20 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Fix uninitial aucMacAddress[] for ProbeReq * * Nov 16 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add scanSearchBssDescByPolicy() * * Nov 5 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add Send Probe Request Frame * * Oct 30 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * */ /******************************************************************************* * C O M P I L E R F L A G S ******************************************************************************** */ /******************************************************************************* * E X T E R N A L R E F E R E N C E S ******************************************************************************** */ #include "precomp.h" /******************************************************************************* * C O N S T A N T S ******************************************************************************** */ #define REPLICATED_BEACON_TIME_THRESHOLD (3000) #define REPLICATED_BEACON_FRESH_PERIOD (10000) #define REPLICATED_BEACON_STRENGTH_THRESHOLD (32) #define ROAMING_NO_SWING_RCPI_STEP (10) /******************************************************************************* * D A T A T Y P E S ******************************************************************************** */ /******************************************************************************* * P U B L I C D A T A ******************************************************************************** */ /******************************************************************************* * P R I V A T E D A T A ******************************************************************************** */ /******************************************************************************* * M A C R O S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N D E C L A R A T I O N S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N S ******************************************************************************** */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is used by SCN to initialize its variables * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scnInit(IN P_ADAPTER_T prAdapter) { P_SCAN_INFO_T prScanInfo; P_BSS_DESC_T prBSSDesc; PUINT_8 pucBSSBuff; UINT_32 i; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); pucBSSBuff = &prScanInfo->aucScanBuffer[0]; DBGLOG(SCN, INFO, ("->scnInit()\n")); /* 4 <1> Reset STATE and Message List */ prScanInfo->eCurrentState = SCAN_STATE_IDLE; prScanInfo->rLastScanCompletedTime = (OS_SYSTIME) 0; LINK_INITIALIZE(&prScanInfo->rPendingMsgList); /* 4 <2> Reset link list of BSS_DESC_T */ kalMemZero((PVOID) pucBSSBuff, SCN_MAX_BUFFER_SIZE); LINK_INITIALIZE(&prScanInfo->rFreeBSSDescList); LINK_INITIALIZE(&prScanInfo->rBSSDescList); for (i = 0; i < CFG_MAX_NUM_BSS_LIST; i++) { prBSSDesc = (P_BSS_DESC_T) pucBSSBuff; LINK_INSERT_TAIL(&prScanInfo->rFreeBSSDescList, &prBSSDesc->rLinkEntry); pucBSSBuff += ALIGN_4(sizeof(BSS_DESC_T)); } /* Check if the memory allocation consist with this initialization function */ ASSERT(((ULONG) pucBSSBuff - (ULONG) &prScanInfo->aucScanBuffer[0]) == SCN_MAX_BUFFER_SIZE); /* reset freest channel information */ prScanInfo->fgIsSparseChannelValid = FALSE; /* reset NLO state */ prScanInfo->fgNloScanning = FALSE; return; } /* end of scnInit() */ VOID scnFreeAllPendingScanRquests (IN P_ADAPTER_T prAdapter) { P_SCAN_INFO_T prScanInfo; P_MSG_HDR_T prMsgHdr; P_MSG_SCN_SCAN_REQ prScanReqMsg; prScanInfo = &(prAdapter->rWifiVar.rScanInfo); /* check for pending scanning requests */ while (!LINK_IS_EMPTY(&(prScanInfo->rPendingMsgList))) { /* load next message from pending list as scan parameters */ LINK_REMOVE_HEAD(&(prScanInfo->rPendingMsgList), prMsgHdr, P_MSG_HDR_T); if(prMsgHdr) { prScanReqMsg = (P_MSG_SCN_SCAN_REQ)prMsgHdr; DBGLOG(SCN, INFO, ("free scan request eMsgId[%d] ucSeqNum [%d] BSSID[%d]!! \n",prMsgHdr->eMsgId, prScanReqMsg->ucSeqNum, prScanReqMsg->ucBssIndex)); cnmMemFree(prAdapter, prMsgHdr); } else { /* should not deliver to this function */ ASSERT(0); } /* switch to next state */ } } /*----------------------------------------------------------------------------*/ /*! * @brief This function is used by SCN to uninitialize its variables * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scnUninit(IN P_ADAPTER_T prAdapter) { P_SCAN_INFO_T prScanInfo; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); DBGLOG(SCN, INFO, ("->scnUninit()\n")); scnFreeAllPendingScanRquests(prAdapter); DBGLOG(SCN, INFO, ("scnFreeAllPendingScanrRquests !! \n")); /* 4 <1> Reset STATE and Message List */ prScanInfo->eCurrentState = SCAN_STATE_IDLE; prScanInfo->rLastScanCompletedTime = (OS_SYSTIME) 0; /* NOTE(Kevin): Check rPendingMsgList ? */ /* 4 <2> Reset link list of BSS_DESC_T */ LINK_INITIALIZE(&prScanInfo->rFreeBSSDescList); LINK_INITIALIZE(&prScanInfo->rBSSDescList); return; } /* end of scnUninit() */ /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given BSSID * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByBssid(IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { return scanSearchBssDescByBssidAndSsid(prAdapter, aucBSSID, FALSE, NULL); } /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given BSSID * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByBssidAndSsid(IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[], IN BOOLEAN fgCheckSsid, IN P_PARAM_SSID_T prSsid) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc; P_BSS_DESC_T prDstBssDesc = (P_BSS_DESC_T) NULL; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { if (fgCheckSsid == FALSE || prSsid == NULL) { return prBssDesc; } else { if (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prSsid->aucSsid, prSsid->u4SsidLen)) { return prBssDesc; } else if (prDstBssDesc == NULL && prBssDesc->fgIsHiddenSSID == TRUE) { prDstBssDesc = prBssDesc; } else if (prBssDesc->eBSSType == BSS_TYPE_P2P_DEVICE) { /* 20120206 frog: Equal BSSID but not SSID, SSID not hidden, SSID must be updated. */ COPY_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prSsid->aucSsid, (UINT_8) (prSsid->u4SsidLen)); return prBssDesc; } } } } return prDstBssDesc; } /* end of scanSearchBssDescByBssid() */ /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given Transmitter Address. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucSrcAddr Given Source Address(TA). * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByTA(IN P_ADAPTER_T prAdapter, IN UINT_8 aucSrcAddr[] ) { return scanSearchBssDescByTAAndSsid(prAdapter, aucSrcAddr, FALSE, NULL); } /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given Transmitter Address. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucSrcAddr Given Source Address(TA). * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByTAAndSsid(IN P_ADAPTER_T prAdapter, IN UINT_8 aucSrcAddr[], IN BOOLEAN fgCheckSsid, IN P_PARAM_SSID_T prSsid) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc; P_BSS_DESC_T prDstBssDesc = (P_BSS_DESC_T) NULL; ASSERT(prAdapter); ASSERT(aucSrcAddr); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucSrcAddr, aucSrcAddr)) { if (fgCheckSsid == FALSE || prSsid == NULL) { return prBssDesc; } else { if (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prSsid->aucSsid, prSsid->u4SsidLen)) { return prBssDesc; } else if (prDstBssDesc == NULL && prBssDesc->fgIsHiddenSSID == TRUE) { prDstBssDesc = prBssDesc; } } } } return prDstBssDesc; } /* end of scanSearchBssDescByTA() */ /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to * given eBSSType, BSSID and Transmitter Address * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] eBSSType BSS Type of incoming Beacon/ProbeResp frame. * @param[in] aucBSSID Given BSSID of Beacon/ProbeResp frame. * @param[in] aucSrcAddr Given source address (TA) of Beacon/ProbeResp frame. * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchExistingBssDesc(IN P_ADAPTER_T prAdapter, IN ENUM_BSS_TYPE_T eBSSType, IN UINT_8 aucBSSID[], IN UINT_8 aucSrcAddr[] ) { return scanSearchExistingBssDescWithSsid(prAdapter, eBSSType, aucBSSID, aucSrcAddr, FALSE, NULL); } /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to * given eBSSType, BSSID and Transmitter Address * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] eBSSType BSS Type of incoming Beacon/ProbeResp frame. * @param[in] aucBSSID Given BSSID of Beacon/ProbeResp frame. * @param[in] aucSrcAddr Given source address (TA) of Beacon/ProbeResp frame. * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchExistingBssDescWithSsid(IN P_ADAPTER_T prAdapter, IN ENUM_BSS_TYPE_T eBSSType, IN UINT_8 aucBSSID[], IN UINT_8 aucSrcAddr[], IN BOOLEAN fgCheckSsid, IN P_PARAM_SSID_T prSsid) { P_SCAN_INFO_T prScanInfo; P_BSS_DESC_T prBssDesc, prIBSSBssDesc; ASSERT(prAdapter); ASSERT(aucSrcAddr); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); switch (eBSSType) { case BSS_TYPE_P2P_DEVICE: fgCheckSsid = FALSE; case BSS_TYPE_INFRASTRUCTURE: case BSS_TYPE_BOW_DEVICE: { prBssDesc = scanSearchBssDescByBssidAndSsid(prAdapter, aucBSSID, fgCheckSsid, prSsid); /* if (eBSSType == prBssDesc->eBSSType) */ return prBssDesc; } case BSS_TYPE_IBSS: { prIBSSBssDesc = scanSearchBssDescByBssidAndSsid(prAdapter, aucBSSID, fgCheckSsid, prSsid); prBssDesc = scanSearchBssDescByTAAndSsid(prAdapter, aucSrcAddr, fgCheckSsid, prSsid); /* NOTE(Kevin): * Rules to maintain the SCAN Result: * For AdHoc - * CASE I We have TA1(BSSID1), but it change its BSSID to BSSID2 * -> Update TA1 entry's BSSID. * CASE II We have TA1(BSSID1), and get TA1(BSSID1) again * -> Update TA1 entry's contain. * CASE III We have a SCAN result TA1(BSSID1), and TA2(BSSID2). Sooner or * later, TA2 merge into TA1, we get TA2(BSSID1) * -> Remove TA2 first and then replace TA1 entry's TA with TA2, Still have only one entry of BSSID. * CASE IV We have a SCAN result TA1(BSSID1), and another TA2 also merge into BSSID1. * -> Replace TA1 entry's TA with TA2, Still have only one entry. * CASE V New IBSS * -> Add this one to SCAN result. */ if (prBssDesc) { if ((!prIBSSBssDesc) || /* CASE I */ (prBssDesc == prIBSSBssDesc)) { /* CASE II */ return prBssDesc; } else { /* CASE III */ P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); return prIBSSBssDesc; } } if (prIBSSBssDesc) { /* CASE IV */ return prIBSSBssDesc; } /* CASE V */ break; /* Return NULL; */ } default: break; } return (P_BSS_DESC_T) NULL; } /* end of scanSearchExistingBssDesc() */ /*----------------------------------------------------------------------------*/ /*! * @brief Delete BSS Descriptors from current list according to given Remove Policy. * * @param[in] u4RemovePolicy Remove Policy. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveBssDescsByPolicy(IN P_ADAPTER_T prAdapter, IN UINT_32 u4RemovePolicy) { P_CONNECTION_SETTINGS_T prConnSettings; P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc; ASSERT(prAdapter); prConnSettings = &(prAdapter->rWifiVar.rConnSettings); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; /* DBGLOG(SCN, TRACE, ("Before Remove - Number Of SCAN Result = %ld\n", */ /* prBSSDescList->u4NumElem)); */ if (u4RemovePolicy & SCN_RM_POLICY_TIMEOUT) { P_BSS_DESC_T prBSSDescNext; OS_SYSTIME rCurrentTime; GET_CURRENT_SYSTIME(&rCurrentTime); /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(SCN_BSS_DESC_REMOVE_TIMEOUT_SEC))) { /* DBGLOG(SCN, TRACE, ("Remove TIMEOUT BSS DESC(%#x): MAC: "MACSTR", Current Time = %08lx, Update Time = %08lx\n", */ /* prBssDesc, MAC2STR(prBssDesc->aucBSSID), rCurrentTime, prBssDesc->rUpdateTime)); */ /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); } } } else if (u4RemovePolicy & SCN_RM_POLICY_OLDEST_HIDDEN) { P_BSS_DESC_T prBssDescOldest = (P_BSS_DESC_T) NULL; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } if (!prBssDesc->fgIsHiddenSSID) { continue; } if (!prBssDescOldest) { /* 1st element */ prBssDescOldest = prBssDesc; continue; } if (TIME_BEFORE(prBssDesc->rUpdateTime, prBssDescOldest->rUpdateTime)) { prBssDescOldest = prBssDesc; } } if (prBssDescOldest) { /* DBGLOG(SCN, TRACE, ("Remove OLDEST HIDDEN BSS DESC(%#x): MAC: "MACSTR", Update Time = %08lx\n", */ /* prBssDescOldest, MAC2STR(prBssDescOldest->aucBSSID), prBssDescOldest->rUpdateTime)); */ /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDescOldest); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDescOldest->rLinkEntry); } } else if (u4RemovePolicy & SCN_RM_POLICY_SMART_WEAKEST) { P_BSS_DESC_T prBssDescWeakest = (P_BSS_DESC_T) NULL; P_BSS_DESC_T prBssDescWeakestSameSSID = (P_BSS_DESC_T) NULL; UINT_32 u4SameSSIDCount = 0; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } if ((!prBssDesc->fgIsHiddenSSID) && (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen))) { u4SameSSIDCount++; if (!prBssDescWeakestSameSSID) { prBssDescWeakestSameSSID = prBssDesc; } else if (prBssDesc->ucRCPI < prBssDescWeakestSameSSID->ucRCPI) { prBssDescWeakestSameSSID = prBssDesc; } } if (!prBssDescWeakest) { /* 1st element */ prBssDescWeakest = prBssDesc; continue; } if (prBssDesc->ucRCPI < prBssDescWeakest->ucRCPI) { prBssDescWeakest = prBssDesc; } } if ((u4SameSSIDCount >= SCN_BSS_DESC_SAME_SSID_THRESHOLD) && (prBssDescWeakestSameSSID)) { prBssDescWeakest = prBssDescWeakestSameSSID; } if (prBssDescWeakest) { /* DBGLOG(SCN, TRACE, ("Remove WEAKEST BSS DESC(%#x): MAC: "MACSTR", Update Time = %08lx\n", */ /* prBssDescOldest, MAC2STR(prBssDescOldest->aucBSSID), prBssDescOldest->rUpdateTime)); */ /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDescWeakest); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDescWeakest->rLinkEntry); } } else if (u4RemovePolicy & SCN_RM_POLICY_ENTIRE) { P_BSS_DESC_T prBSSDescNext; LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); } } return; } /* end of scanRemoveBssDescsByPolicy() */ /*----------------------------------------------------------------------------*/ /*! * @brief Delete BSS Descriptors from current list according to given BSSID. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveBssDescByBssid(IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T) NULL; P_BSS_DESC_T prBSSDescNext; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; /* Check if such BSS Descriptor exists in a valid list */ LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); /* BSSID is not unique, so need to traverse whols link-list */ } } return; } /* end of scanRemoveBssDescByBssid() */ /*----------------------------------------------------------------------------*/ /*! * @brief Delete BSS Descriptors from current list according to given band configuration * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] eBand Given band * @param[in] ucBssIndex AIS - Remove IBSS/Infrastructure BSS * BOW - Remove BOW BSS * P2P - Remove P2P BSS * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveBssDescByBandAndNetwork(IN P_ADAPTER_T prAdapter, IN ENUM_BAND_T eBand, IN UINT_8 ucBssIndex) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T) NULL; P_BSS_DESC_T prBSSDescNext; BOOLEAN fgToRemove; ASSERT(prAdapter); ASSERT(eBand <= BAND_NUM); ASSERT(ucBssIndex <= MAX_BSS_INDEX); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; if (eBand == BAND_NULL) { return; /* no need to do anything, keep all scan result */ } /* Check if such BSS Descriptor exists in a valid list */ LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { fgToRemove = FALSE; if (prBssDesc->eBand == eBand) { switch (GET_BSS_INFO_BY_INDEX(prAdapter, ucBssIndex)->eNetworkType) { case NETWORK_TYPE_AIS: if ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) || (prBssDesc->eBSSType == BSS_TYPE_IBSS)) { fgToRemove = TRUE; } break; case NETWORK_TYPE_P2P: if (prBssDesc->eBSSType == BSS_TYPE_P2P_DEVICE) { fgToRemove = TRUE; } break; case NETWORK_TYPE_BOW: if (prBssDesc->eBSSType == BSS_TYPE_BOW_DEVICE) { fgToRemove = TRUE; } break; default: ASSERT(0); break; } } if (fgToRemove == TRUE) { /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); } } return; } /* end of scanRemoveBssDescByBand() */ /*----------------------------------------------------------------------------*/ /*! * @brief Clear the CONNECTION FLAG of a specified BSS Descriptor. * * @param[in] aucBSSID Given BSSID. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveConnFlagOfBssDescByBssid(IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T) NULL; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { prBssDesc->fgIsConnected = FALSE; prBssDesc->fgIsConnecting = FALSE; /* BSSID is not unique, so need to traverse whols link-list */ } } return; } /* end of scanRemoveConnectionFlagOfBssDescByBssid() */ /*----------------------------------------------------------------------------*/ /*! * @brief Allocate new BSS_DESC_T * * @param[in] prAdapter Pointer to the Adapter structure. * * @return Pointer to BSS Descriptor, if has free space. NULL, if has no space. */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanAllocateBssDesc(IN P_ADAPTER_T prAdapter) { P_SCAN_INFO_T prScanInfo; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; LINK_REMOVE_HEAD(prFreeBSSDescList, prBssDesc, P_BSS_DESC_T); if (prBssDesc) { P_LINK_T prBSSDescList; kalMemZero(prBssDesc, sizeof(BSS_DESC_T)); #if CFG_ENABLE_WIFI_DIRECT LINK_INITIALIZE(&(prBssDesc->rP2pDeviceList)); prBssDesc->fgIsP2PPresent = FALSE; #endif /* CFG_ENABLE_WIFI_DIRECT */ prBSSDescList = &prScanInfo->rBSSDescList; /* NOTE(Kevin): In current design, this new empty BSS_DESC_T will be * inserted to BSSDescList immediately. */ LINK_INSERT_TAIL(prBSSDescList, &prBssDesc->rLinkEntry); } return prBssDesc; } /* end of scanAllocateBssDesc() */ /*----------------------------------------------------------------------------*/ /*! * @brief This API parses Beacon/ProbeResp frame and insert extracted BSS_DESC_T * with IEs into prAdapter->rWifiVar.rScanInfo.aucScanBuffer * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] prSwRfb Pointer to the receiving frame buffer. * * @return Pointer to BSS Descriptor * NULL if the Beacon/ProbeResp frame is invalid */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanAddToBssDesc(IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb) { P_BSS_DESC_T prBssDesc = NULL; UINT_16 u2CapInfo; ENUM_BSS_TYPE_T eBSSType = BSS_TYPE_INFRASTRUCTURE; PUINT_8 pucIE; UINT_16 u2IELength; UINT_16 u2Offset = 0; P_WLAN_BEACON_FRAME_T prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T) NULL; P_IE_SSID_T prIeSsid = (P_IE_SSID_T) NULL; P_IE_SUPPORTED_RATE_T prIeSupportedRate = (P_IE_SUPPORTED_RATE_T) NULL; P_IE_EXT_SUPPORTED_RATE_T prIeExtSupportedRate = (P_IE_EXT_SUPPORTED_RATE_T) NULL; UINT_8 ucHwChannelNum = 0; UINT_8 ucIeDsChannelNum = 0; UINT_8 ucIeHtChannelNum = 0; BOOLEAN fgIsValidSsid = FALSE, fgEscape = FALSE; PARAM_SSID_T rSsid; UINT_64 u8Timestamp; BOOLEAN fgIsNewBssDesc = FALSE; UINT_32 i; UINT_8 ucSSIDChar; ASSERT(prAdapter); ASSERT(prSwRfb); prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T) prSwRfb->pvHeader; WLAN_GET_FIELD_16(&prWlanBeaconFrame->u2CapInfo, &u2CapInfo); WLAN_GET_FIELD_64(&prWlanBeaconFrame->au4Timestamp[0], &u8Timestamp); /* decide BSS type */ switch (u2CapInfo & CAP_INFO_BSS_TYPE) { case CAP_INFO_ESS: /* It can also be Group Owner of P2P Group. */ eBSSType = BSS_TYPE_INFRASTRUCTURE; break; case CAP_INFO_IBSS: eBSSType = BSS_TYPE_IBSS; break; case 0: /* The P2P Device shall set the ESS bit of the Capabilities field in the Probe Response fame to 0 and IBSS bit to 0. (3.1.2.1.1) */ eBSSType = BSS_TYPE_P2P_DEVICE; break; #if CFG_ENABLE_BT_OVER_WIFI /* @TODO: add rule to identify BOW beacons */ #endif default: return NULL; } /* 4 <1.1> Pre-parse SSID IE */ pucIE = prWlanBeaconFrame->aucInfoElem; u2IELength = (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16) OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]); if (u2IELength > CFG_IE_BUFFER_SIZE) { u2IELength = CFG_IE_BUFFER_SIZE; } IE_FOR_EACH(pucIE, u2IELength, u2Offset) { switch (IE_ID(pucIE)) { case ELEM_ID_SSID: if (IE_LEN(pucIE) <= ELEM_MAX_LEN_SSID) { ucSSIDChar = '\0'; /* D-Link DWL-900AP+ */ if (IE_LEN(pucIE) == 0) { fgIsValidSsid = FALSE; } /* Cisco AP1230A - (IE_LEN(pucIE) == 1) && (SSID_IE(pucIE)->aucSSID[0] == '\0') */ /* Linksys WRK54G/WL520g - (IE_LEN(pucIE) == n) && (SSID_IE(pucIE)->aucSSID[0~(n-1)] == '\0') */ else { for (i = 0; i < IE_LEN(pucIE); i++) { ucSSIDChar |= SSID_IE(pucIE)->aucSSID[i]; } if (ucSSIDChar) { fgIsValidSsid = TRUE; } } /* Update SSID to BSS Descriptor only if SSID is not hidden. */ if (fgIsValidSsid == TRUE) { COPY_SSID(rSsid.aucSsid, rSsid.u4SsidLen, SSID_IE(pucIE)->aucSSID, SSID_IE(pucIE)->ucLength); } } fgEscape = TRUE; break; default: break; } if (fgEscape == TRUE) { break; } } /* 4 <1.2> Replace existing BSS_DESC_T or allocate a new one */ prBssDesc = scanSearchExistingBssDescWithSsid(prAdapter, eBSSType, (PUINT_8) prWlanBeaconFrame->aucBSSID, (PUINT_8) prWlanBeaconFrame->aucSrcAddr, fgIsValidSsid, fgIsValidSsid == TRUE ? &rSsid : NULL); if (prBssDesc == (P_BSS_DESC_T) NULL) { fgIsNewBssDesc = TRUE; do { /* 4 <1.2.1> First trial of allocation */ prBssDesc = scanAllocateBssDesc(prAdapter); if (prBssDesc) { break; } /* 4 <1.2.2> Hidden is useless, remove the oldest hidden ssid. (for passive scan) */ scanRemoveBssDescsByPolicy(prAdapter, (SCN_RM_POLICY_EXCLUDE_CONNECTED | SCN_RM_POLICY_OLDEST_HIDDEN | SCN_RM_POLICY_TIMEOUT)); /* 4 <1.2.3> Second tail of allocation */ prBssDesc = scanAllocateBssDesc(prAdapter); if (prBssDesc) { break; } /* 4 <1.2.4> Remove the weakest one */ /* If there are more than half of BSS which has the same ssid as connection * setting, remove the weakest one from them. * Else remove the weakest one. */ scanRemoveBssDescsByPolicy(prAdapter, (SCN_RM_POLICY_EXCLUDE_CONNECTED | SCN_RM_POLICY_SMART_WEAKEST)); /* 4 <1.2.5> reallocation */ prBssDesc = scanAllocateBssDesc(prAdapter); if (prBssDesc) { break; } /* 4 <1.2.6> no space, should not happen */ /* ASSERT(0); // still no space available ? */ return NULL; } while (FALSE); } else { OS_SYSTIME rCurrentTime; /* WCXRP00000091 */ /* if the received strength is much weaker than the original one, */ /* ignore it due to it might be received on the folding frequency */ GET_CURRENT_SYSTIME(&rCurrentTime); ASSERT(prSwRfb->prRxStatusGroup3); if (prBssDesc->eBSSType != eBSSType) { prBssDesc->eBSSType = eBSSType; } else if (HAL_RX_STATUS_GET_CHNL_NUM(prSwRfb->prRxStatus) != prBssDesc->ucChannelNum && prBssDesc->ucRCPI > HAL_RX_STATUS_GET_RCPI(prSwRfb->prRxStatusGroup3)) { /* for signal strength is too much weaker and previous beacon is not stale */ ASSERT(prSwRfb->prRxStatusGroup3); if ((prBssDesc->ucRCPI - HAL_RX_STATUS_GET_RCPI(prSwRfb->prRxStatusGroup3)) >= REPLICATED_BEACON_STRENGTH_THRESHOLD && rCurrentTime - prBssDesc->rUpdateTime <= REPLICATED_BEACON_FRESH_PERIOD) { return prBssDesc; } /* for received beacons too close in time domain */ else if (rCurrentTime - prBssDesc->rUpdateTime <= REPLICATED_BEACON_TIME_THRESHOLD) { return prBssDesc; } } /* if Timestamp has been reset, re-generate BSS DESC 'cause AP should have reset itself */ if (prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE && u8Timestamp < prBssDesc->u8TimeStamp.QuadPart) { BOOLEAN fgIsConnected, fgIsConnecting; /* set flag for indicating this is a new BSS-DESC */ fgIsNewBssDesc = TRUE; /* backup 2 flags for APs which reset timestamp unexpectedly */ fgIsConnected = prBssDesc->fgIsConnected; fgIsConnecting = prBssDesc->fgIsConnecting; scanRemoveBssDescByBssid(prAdapter, prBssDesc->aucBSSID); prBssDesc = scanAllocateBssDesc(prAdapter); if (!prBssDesc) { return NULL; } /* restore */ prBssDesc->fgIsConnected = fgIsConnected; prBssDesc->fgIsConnecting = fgIsConnecting; } } #if 1 prBssDesc->u2RawLength = prSwRfb->u2PacketLen; kalMemCopy(prBssDesc->aucRawBuf, prWlanBeaconFrame, prBssDesc->u2RawLength); #endif /* NOTE: Keep consistency of Scan Record during JOIN process */ if (fgIsNewBssDesc == FALSE && prBssDesc->fgIsConnecting) { return prBssDesc; } /* 4 <2> Get information from Fixed Fields */ prBssDesc->eBSSType = eBSSType; /* Update the latest BSS type information. */ COPY_MAC_ADDR(prBssDesc->aucSrcAddr, prWlanBeaconFrame->aucSrcAddr); COPY_MAC_ADDR(prBssDesc->aucBSSID, prWlanBeaconFrame->aucBSSID); prBssDesc->u8TimeStamp.QuadPart = u8Timestamp; WLAN_GET_FIELD_16(&prWlanBeaconFrame->u2BeaconInterval, &prBssDesc->u2BeaconInterval); prBssDesc->u2CapInfo = u2CapInfo; /* 4 <2.1> Retrieve IEs for later parsing */ u2IELength = (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16) OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]); if (u2IELength > CFG_IE_BUFFER_SIZE) { u2IELength = CFG_IE_BUFFER_SIZE; prBssDesc->fgIsIEOverflow = TRUE; } else { prBssDesc->fgIsIEOverflow = FALSE; } prBssDesc->u2IELength = u2IELength; kalMemCopy(prBssDesc->aucIEBuf, prWlanBeaconFrame->aucInfoElem, u2IELength); /* 4 <2.2> reset prBssDesc variables in case that AP has been reconfigured */ prBssDesc->fgIsERPPresent = FALSE; prBssDesc->fgIsHTPresent = FALSE; prBssDesc->eSco = CHNL_EXT_SCN; prBssDesc->fgIEWAPI = FALSE; prBssDesc->fgIERSN = FALSE; prBssDesc->fgIEWPA = FALSE; prBssDesc->eChannelWidth = CW_20_40MHZ; /*Reset VHT OP IE relative settings*/ prBssDesc->ucCenterFreqS1 = 0; prBssDesc->ucCenterFreqS2 = 0; /* 4 <3.1> Full IE parsing on SW_RFB_T */ pucIE = prWlanBeaconFrame->aucInfoElem; IE_FOR_EACH(pucIE, u2IELength, u2Offset) { switch (IE_ID(pucIE)) { case ELEM_ID_SSID: if ((!prIeSsid) && /* NOTE(Kevin): for Atheros IOT #1 */ (IE_LEN(pucIE) <= ELEM_MAX_LEN_SSID)) { BOOLEAN fgIsHiddenSSID = FALSE; ucSSIDChar = '\0'; prIeSsid = (P_IE_SSID_T) pucIE; /* D-Link DWL-900AP+ */ if (IE_LEN(pucIE) == 0) { fgIsHiddenSSID = TRUE; } /* Cisco AP1230A - (IE_LEN(pucIE) == 1) && (SSID_IE(pucIE)->aucSSID[0] == '\0') */ /* Linksys WRK54G/WL520g - (IE_LEN(pucIE) == n) && (SSID_IE(pucIE)->aucSSID[0~(n-1)] == '\0') */ else { for (i = 0; i < IE_LEN(pucIE); i++) { ucSSIDChar |= SSID_IE(pucIE)->aucSSID[i]; } if (!ucSSIDChar) { fgIsHiddenSSID = TRUE; } } /* Update SSID to BSS Descriptor only if SSID is not hidden. */ if (!fgIsHiddenSSID) { COPY_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, SSID_IE(pucIE)->aucSSID, SSID_IE(pucIE)->ucLength); } } break; case ELEM_ID_SUP_RATES: /* NOTE(Kevin): Buffalo WHR-G54S's supported rate set IE exceed 8. * IE_LEN(pucIE) == 12, "1(B), 2(B), 5.5(B), 6(B), 9(B), 11(B), * 12(B), 18(B), 24(B), 36(B), 48(B), 54(B)" */ /* TP-LINK will set extra and incorrect ie with ELEM_ID_SUP_RATES */ if ((!prIeSupportedRate) && (IE_LEN(pucIE) <= RATE_NUM_SW)) { prIeSupportedRate = SUP_RATES_IE(pucIE); } break; case ELEM_ID_DS_PARAM_SET: if (IE_LEN(pucIE) == ELEM_MAX_LEN_DS_PARAMETER_SET) { ucIeDsChannelNum = DS_PARAM_IE(pucIE)->ucCurrChnl; } break; case ELEM_ID_TIM: if (IE_LEN(pucIE) <= ELEM_MAX_LEN_TIM) { prBssDesc->ucDTIMPeriod = TIM_IE(pucIE)->ucDTIMPeriod; } break; case ELEM_ID_IBSS_PARAM_SET: if (IE_LEN(pucIE) == ELEM_MAX_LEN_IBSS_PARAMETER_SET) { prBssDesc->u2ATIMWindow = IBSS_PARAM_IE(pucIE)->u2ATIMWindow; } break; #if 0 /* CFG_SUPPORT_802_11D */ case ELEM_ID_COUNTRY_INFO: prBssDesc->prIECountry = (P_IE_COUNTRY_T) pucIE; break; #endif case ELEM_ID_ERP_INFO: if (IE_LEN(pucIE) == ELEM_MAX_LEN_ERP) { prBssDesc->fgIsERPPresent = TRUE; } break; case ELEM_ID_EXTENDED_SUP_RATES: if (!prIeExtSupportedRate) { prIeExtSupportedRate = EXT_SUP_RATES_IE(pucIE); } break; case ELEM_ID_RSN: if (rsnParseRsnIE(prAdapter, RSN_IE(pucIE), &prBssDesc->rRSNInfo)) { prBssDesc->fgIERSN = TRUE; prBssDesc->u2RsnCap = prBssDesc->rRSNInfo.u2RsnCap; if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) { rsnCheckPmkidCache(prAdapter, prBssDesc); } } break; case ELEM_ID_HT_CAP: prBssDesc->fgIsHTPresent = TRUE; break; case ELEM_ID_HT_OP: if (IE_LEN(pucIE) != (sizeof(IE_HT_OP_T) - 2)) { break; } if ((((P_IE_HT_OP_T) pucIE)->ucInfo1 & HT_OP_INFO1_SCO) != CHNL_EXT_RES) { prBssDesc->eSco = (ENUM_CHNL_EXT_T) (((P_IE_HT_OP_T) pucIE)->ucInfo1 & HT_OP_INFO1_SCO); } ucIeHtChannelNum = ((P_IE_HT_OP_T) pucIE)->ucPrimaryChannel; break; case ELEM_ID_VHT_CAP: prBssDesc->fgIsVHTPresent = TRUE; break; case ELEM_ID_VHT_OP: if (IE_LEN(pucIE) != (sizeof(IE_VHT_OP_T) - 2)) { break; } prBssDesc->eChannelWidth = (ENUM_CHANNEL_WIDTH_T) (((P_IE_VHT_OP_T) pucIE)->ucVhtOperation[0]); prBssDesc->ucCenterFreqS1 = (ENUM_CHANNEL_WIDTH_T) (((P_IE_VHT_OP_T) pucIE)->ucVhtOperation[1]); prBssDesc->ucCenterFreqS2 = (ENUM_CHANNEL_WIDTH_T) (((P_IE_VHT_OP_T) pucIE)->ucVhtOperation[2]); break; #if CFG_SUPPORT_WAPI case ELEM_ID_WAPI: if (wapiParseWapiIE(WAPI_IE(pucIE), &prBssDesc->rIEWAPI)) { prBssDesc->fgIEWAPI = TRUE; } break; #endif case ELEM_ID_VENDOR: /* ELEM_ID_P2P, ELEM_ID_WMM */ { UINT_8 ucOuiType; UINT_16 u2SubTypeVersion; if (rsnParseCheckForWFAInfoElem (prAdapter, pucIE, &ucOuiType, &u2SubTypeVersion)) { if ((ucOuiType == VENDOR_OUI_TYPE_WPA) && (u2SubTypeVersion == VERSION_WPA) && (rsnParseWpaIE(prAdapter, WPA_IE(pucIE), &prBssDesc->rWPAInfo))) { prBssDesc->fgIEWPA = TRUE; } } #if CFG_ENABLE_WIFI_DIRECT if (prAdapter->fgIsP2PRegistered) { if ((p2pFuncParseCheckForP2PInfoElem(prAdapter, pucIE, &ucOuiType)) && (ucOuiType == VENDOR_OUI_TYPE_P2P)) { prBssDesc->fgIsP2PPresent = TRUE; } } #endif /* CFG_ENABLE_WIFI_DIRECT */ } break; /* no default */ } } /* 4 <3.2> Save information from IEs - SSID */ /* Update Flag of Hidden SSID for used in SEARCH STATE. */ /* NOTE(Kevin): in current driver, the ucSSIDLen == 0 represent * all cases of hidden SSID. * If the fgIsHiddenSSID == TRUE, it means we didn't get the ProbeResp with * valid SSID. */ if (prBssDesc->ucSSIDLen == 0) { prBssDesc->fgIsHiddenSSID = TRUE; } else { prBssDesc->fgIsHiddenSSID = FALSE; } /* 4 <3.3> Check rate information in related IEs. */ if (prIeSupportedRate || prIeExtSupportedRate) { rateGetRateSetFromIEs(prIeSupportedRate, prIeExtSupportedRate, &prBssDesc->u2OperationalRateSet, &prBssDesc->u2BSSBasicRateSet, &prBssDesc->fgIsUnknownBssBasicRate); } /* 4 <4> Update information from HIF RX Header */ { P_HW_MAC_RX_DESC_T prRxStatus; UINT_8 ucRxRCPI; prRxStatus = prSwRfb->prRxStatus; ASSERT(prRxStatus); /* 4 <4.1> Get TSF comparison result */ prBssDesc->fgIsLargerTSF = HAL_RX_STATUS_GET_TCL(prRxStatus); /* 4 <4.2> Get Band information */ prBssDesc->eBand = HAL_RX_STATUS_GET_RF_BAND(prRxStatus); /* 4 <4.2> Get channel and RCPI information */ ucHwChannelNum = HAL_RX_STATUS_GET_CHNL_NUM(prRxStatus); ASSERT(prSwRfb->prRxStatusGroup3); ucRxRCPI = (UINT_8) HAL_RX_STATUS_GET_RCPI(prSwRfb->prRxStatusGroup3); if (BAND_2G4 == prBssDesc->eBand) { /* Update RCPI if in right channel */ if (ucIeDsChannelNum >= 1 && ucIeDsChannelNum <= 14) { /* Receive Beacon/ProbeResp frame from adjacent channel. */ if ((ucIeDsChannelNum == ucHwChannelNum) || (ucRxRCPI > prBssDesc->ucRCPI)) { prBssDesc->ucRCPI = ucRxRCPI; } /* trust channel information brought by IE */ prBssDesc->ucChannelNum = ucIeDsChannelNum; } else if (ucIeHtChannelNum >= 1 && ucIeHtChannelNum <= 14) { /* Receive Beacon/ProbeResp frame from adjacent channel. */ if ((ucIeHtChannelNum == ucHwChannelNum) || (ucRxRCPI > prBssDesc->ucRCPI)) { prBssDesc->ucRCPI = ucRxRCPI; } /* trust channel information brought by IE */ prBssDesc->ucChannelNum = ucIeHtChannelNum; } else { prBssDesc->ucRCPI = ucRxRCPI; prBssDesc->ucChannelNum = ucHwChannelNum; } } /* 5G Band */ else { if (ucIeHtChannelNum >= 1 && ucIeHtChannelNum < 200) { /* Receive Beacon/ProbeResp frame from adjacent channel. */ if ((ucIeHtChannelNum == ucHwChannelNum) || (ucRxRCPI > prBssDesc->ucRCPI)) { prBssDesc->ucRCPI = ucRxRCPI; } /* trust channel information brought by IE */ prBssDesc->ucChannelNum = ucIeHtChannelNum; } else { /* Always update RCPI */ prBssDesc->ucRCPI = ucRxRCPI; prBssDesc->ucChannelNum = ucHwChannelNum; } } } /* 4 <5> Check IE information corret or not */ if (!rlmDomainIsValidRfSetting(prAdapter, prBssDesc->eBand, prBssDesc->ucChannelNum, prBssDesc->eSco, prBssDesc->eChannelWidth, prBssDesc->ucCenterFreqS1, prBssDesc->ucCenterFreqS2)) { /*Dump IE Inforamtion*/ DBGLOG(RLM, WARN, ("ScanAddToBssDesc IE Information\n")); DBGLOG(RLM, WARN, ("IE Length = %d\n", u2IELength)); DBGLOG_MEM8(RLM, WARN, pucIE, u2IELength); /*Error Handling for Non-predicted IE - Fixed to set 20MHz*/ prBssDesc->eChannelWidth = CW_20_40MHZ; prBssDesc->ucCenterFreqS1 = 0; prBssDesc->ucCenterFreqS2 = 0; prBssDesc->eSco = CHNL_EXT_SCN; } /* 4 <6> PHY type setting */ prBssDesc->ucPhyTypeSet = 0; if (BAND_2G4 == prBssDesc->eBand) { /* check if support 11n */ if (prBssDesc->fgIsHTPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_HT; } /* if not 11n only */ if (!(prBssDesc->u2BSSBasicRateSet & RATE_SET_BIT_HT_PHY)) { /* check if support 11g */ if ((prBssDesc->u2OperationalRateSet & RATE_SET_OFDM) || prBssDesc->fgIsERPPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_ERP; } /* if not 11g only */ if (!(prBssDesc->u2BSSBasicRateSet & RATE_SET_OFDM)) { /* check if support 11b */ if ((prBssDesc->u2OperationalRateSet & RATE_SET_HR_DSSS)) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_HR_DSSS; } } } } else { /* (BAND_5G == prBssDesc->eBande) */ /* check if support 11n */ if (prBssDesc->fgIsVHTPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_VHT; } if (prBssDesc->fgIsHTPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_HT; } /* if not 11n only */ if (!(prBssDesc->u2BSSBasicRateSet & RATE_SET_BIT_HT_PHY)) { /* Support 11a definitely */ prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_OFDM; /* ASSERT(!(prBssDesc->u2OperationalRateSet & RATE_SET_HR_DSSS)); */ } } /* 4 <7> Update BSS_DESC_T's Last Update TimeStamp. */ GET_CURRENT_SYSTIME(&prBssDesc->rUpdateTime); return prBssDesc; } /*----------------------------------------------------------------------------*/ /*! * @brief Convert the Beacon or ProbeResp Frame in SW_RFB_T to scan result for query * * @param[in] prSwRfb Pointer to the receiving SW_RFB_T structure. * * @retval WLAN_STATUS_SUCCESS It is a valid Scan Result and been sent to the host. * @retval WLAN_STATUS_FAILURE It is not a valid Scan Result. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS scanAddScanResult(IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBssDesc, IN P_SW_RFB_T prSwRfb) { P_SCAN_INFO_T prScanInfo; UINT_8 aucRatesEx[PARAM_MAX_LEN_RATES_EX]; P_WLAN_BEACON_FRAME_T prWlanBeaconFrame; PARAM_MAC_ADDRESS rMacAddr; PARAM_SSID_T rSsid; ENUM_PARAM_NETWORK_TYPE_T eNetworkType; PARAM_802_11_CONFIG_T rConfiguration; ENUM_PARAM_OP_MODE_T eOpMode; UINT_8 ucRateLen = 0; UINT_32 i; ASSERT(prAdapter); ASSERT(prSwRfb); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); if (prBssDesc->eBand == BAND_2G4) { if ((prBssDesc->u2OperationalRateSet & RATE_SET_OFDM) || prBssDesc->fgIsERPPresent) { eNetworkType = PARAM_NETWORK_TYPE_OFDM24; } else { eNetworkType = PARAM_NETWORK_TYPE_DS; } } else { ASSERT(prBssDesc->eBand == BAND_5G); eNetworkType = PARAM_NETWORK_TYPE_OFDM5; } if (prBssDesc->eBSSType == BSS_TYPE_P2P_DEVICE) { /* NOTE(Kevin): Not supported by WZC(TBD) */ return WLAN_STATUS_FAILURE; } prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T) prSwRfb->pvHeader; COPY_MAC_ADDR(rMacAddr, prWlanBeaconFrame->aucBSSID); COPY_SSID(rSsid.aucSsid, rSsid.u4SsidLen, prBssDesc->aucSSID, prBssDesc->ucSSIDLen); rConfiguration.u4Length = sizeof(PARAM_802_11_CONFIG_T); rConfiguration.u4BeaconPeriod = (UINT_32) prWlanBeaconFrame->u2BeaconInterval; rConfiguration.u4ATIMWindow = prBssDesc->u2ATIMWindow; rConfiguration.u4DSConfig = nicChannelNum2Freq(prBssDesc->ucChannelNum); rConfiguration.rFHConfig.u4Length = sizeof(PARAM_802_11_CONFIG_FH_T); rateGetDataRatesFromRateSet(prBssDesc->u2OperationalRateSet, 0, aucRatesEx, &ucRateLen); /* NOTE(Kevin): Set unused entries, if any, at the end of the array to 0. * from OID_802_11_BSSID_LIST */ for (i = ucRateLen; i < sizeof(aucRatesEx) / sizeof(aucRatesEx[0]); i++) { aucRatesEx[i] = 0; } switch (prBssDesc->eBSSType) { case BSS_TYPE_IBSS: eOpMode = NET_TYPE_IBSS; break; case BSS_TYPE_INFRASTRUCTURE: case BSS_TYPE_P2P_DEVICE: case BSS_TYPE_BOW_DEVICE: default: eOpMode = NET_TYPE_INFRA; break; } DBGLOG(SCN, TRACE, ("ind %s %d %d\n", prBssDesc->aucSSID, prBssDesc->ucChannelNum, prBssDesc->ucRCPI)); kalIndicateBssInfo(prAdapter->prGlueInfo, (PUINT_8) prSwRfb->pvHeader, prSwRfb->u2PacketLen, prBssDesc->ucChannelNum, RCPI_TO_dBm(prBssDesc->ucRCPI)); nicAddScanResult(prAdapter, rMacAddr, &rSsid, prWlanBeaconFrame->u2CapInfo & CAP_INFO_PRIVACY ? 1 : 0, RCPI_TO_dBm(prBssDesc->ucRCPI), eNetworkType, &rConfiguration, eOpMode, aucRatesEx, prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen, (PUINT_8) ((ULONG) (prSwRfb->pvHeader) + WLAN_MAC_MGMT_HEADER_LEN)); return WLAN_STATUS_SUCCESS; } /* end of scanAddScanResult() */ BOOLEAN scanCheckBssIsLegal(IN P_ADAPTER_T prAdapter, P_BSS_DESC_T prBssDesc) { BOOLEAN fgAddToScanResult = FALSE; ENUM_BAND_T eBand; UINT_8 ucChannel; ASSERT(prAdapter); /* check the channel is in the legal doamin */ if (rlmDomainIsLegalChannel(prAdapter, prBssDesc->eBand, prBssDesc->ucChannelNum) == TRUE) { /* check ucChannelNum/eBand for adjacement channel filtering */ if (cnmAisInfraChannelFixed(prAdapter, &eBand, &ucChannel) == TRUE && (eBand != prBssDesc->eBand || ucChannel != prBssDesc->ucChannelNum)) { fgAddToScanResult = FALSE; } else { fgAddToScanResult = TRUE; } } return fgAddToScanResult; } /*----------------------------------------------------------------------------*/ /*! * @brief Parse the content of given Beacon or ProbeResp Frame. * * @param[in] prSwRfb Pointer to the receiving SW_RFB_T structure. * * @retval WLAN_STATUS_SUCCESS if not report this SW_RFB_T to host * @retval WLAN_STATUS_PENDING if report this SW_RFB_T to host as scan result */ /*----------------------------------------------------------------------------*/ WLAN_STATUS scanProcessBeaconAndProbeResp(IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb) { P_SCAN_INFO_T prScanInfo; P_CONNECTION_SETTINGS_T prConnSettings; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T) NULL; WLAN_STATUS rStatus = WLAN_STATUS_SUCCESS; P_BSS_INFO_T prAisBssInfo; P_WLAN_BEACON_FRAME_T prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T) NULL; #if CFG_SLT_SUPPORT P_SLT_INFO_T prSltInfo = (P_SLT_INFO_T) NULL; #endif ASSERT(prAdapter); ASSERT(prSwRfb); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); /* 4 <0> Ignore invalid Beacon Frame */ if ((prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) < (TIMESTAMP_FIELD_LEN + BEACON_INTERVAL_FIELD_LEN + CAP_INFO_FIELD_LEN)) { #ifndef _lint ASSERT(0); #endif /* _lint */ return rStatus; } #if CFG_SLT_SUPPORT prSltInfo = &prAdapter->rWifiVar.rSltInfo; if (prSltInfo->fgIsDUT) { DBGLOG(P2P, INFO, ("\n\rBCN: RX\n")); prSltInfo->u4BeaconReceiveCnt++; return WLAN_STATUS_SUCCESS; } else { return WLAN_STATUS_SUCCESS; } #endif prConnSettings = &(prAdapter->rWifiVar.rConnSettings); prAisBssInfo = prAdapter->prAisBssInfo; prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T) prSwRfb->pvHeader; /* 4 <1> Parse and add into BSS_DESC_T */ prBssDesc = scanAddToBssDesc(prAdapter, prSwRfb); if (prBssDesc) { /* 4 <1.1> Beacon Change Detection for Connected BSS */ if (prAisBssInfo->eConnectionState == PARAM_MEDIA_STATE_CONNECTED && ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE && prConnSettings->eOPMode != NET_TYPE_IBSS) || (prBssDesc->eBSSType == BSS_TYPE_IBSS && prConnSettings->eOPMode != NET_TYPE_INFRA)) && EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prAisBssInfo->aucBSSID) && EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prAisBssInfo->aucSSID, prAisBssInfo->ucSSIDLen)) { BOOLEAN fgNeedDisconnect = FALSE; #if CFG_SUPPORT_BEACON_CHANGE_DETECTION /* <1.1.2> check if supported rate differs */ if (prAisBssInfo->u2OperationalRateSet != prBssDesc->u2OperationalRateSet) { fgNeedDisconnect = TRUE; } #endif /* <1.1.3> beacon content change detected, disconnect immediately */ if (fgNeedDisconnect == TRUE) { aisBssBeaconTimeout(prAdapter); } } /* 4 <1.1> Update AIS_BSS_INFO */ if (((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE && prConnSettings->eOPMode != NET_TYPE_IBSS) || (prBssDesc->eBSSType == BSS_TYPE_IBSS && prConnSettings->eOPMode != NET_TYPE_INFRA))) { if (prAisBssInfo->eConnectionState == PARAM_MEDIA_STATE_CONNECTED) { /* *not* checking prBssDesc->fgIsConnected anymore, * due to Linksys AP uses " " as hidden SSID, and would have different BSS descriptor */ if ((!prAisBssInfo->ucDTIMPeriod) && EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prAisBssInfo->aucBSSID) && (prAisBssInfo->eCurrentOPMode == OP_MODE_INFRASTRUCTURE) && ((prWlanBeaconFrame->u2FrameCtrl & MASK_FRAME_TYPE) == MAC_FRAME_BEACON)) { prAisBssInfo->ucDTIMPeriod = prBssDesc->ucDTIMPeriod; /* sync with firmware for beacon information */ nicPmIndicateBssConnected(prAdapter, prAisBssInfo->ucBssIndex); } } #if CFG_SUPPORT_ADHOC if (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen) && (prBssDesc->eBSSType == BSS_TYPE_IBSS) && (prAisBssInfo->eCurrentOPMode == OP_MODE_IBSS)) { ASSERT(prSwRfb->prRxStatusGroup3); ibssProcessMatchedBeacon(prAdapter, prAisBssInfo, prBssDesc, (UINT_8) HAL_RX_STATUS_GET_RCPI(prSwRfb-> prRxStatusGroup3)); } #endif /* CFG_SUPPORT_ADHOC */ } rlmProcessBcn(prAdapter, prSwRfb, ((P_WLAN_BEACON_FRAME_T) (prSwRfb->pvHeader))->aucInfoElem, (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16) (OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]))); mqmProcessBcn(prAdapter, prSwRfb, ((P_WLAN_BEACON_FRAME_T) (prSwRfb->pvHeader))->aucInfoElem, (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16) (OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]))); /* 4 <3> Send SW_RFB_T to HIF when we perform SCAN for HOST */ if (prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE || prBssDesc->eBSSType == BSS_TYPE_IBSS) { /* for AIS, send to host */ if (prConnSettings->fgIsScanReqIssued) { BOOLEAN fgAddToScanResult; fgAddToScanResult = scanCheckBssIsLegal(prAdapter, prBssDesc); if (fgAddToScanResult == TRUE) { rStatus = scanAddScanResult(prAdapter, prBssDesc, prSwRfb); } } } #if CFG_ENABLE_WIFI_DIRECT if (prAdapter->fgIsP2PRegistered) { scanP2pProcessBeaconAndProbeResp(prAdapter, prSwRfb, &rStatus, prBssDesc, prWlanBeaconFrame); } #endif } return rStatus; } /* end of scanProcessBeaconAndProbeResp() */ /*----------------------------------------------------------------------------*/ /*! * \brief Search the Candidate of BSS Descriptor for JOIN(Infrastructure) or * MERGE(AdHoc) according to current Connection Policy. * * \return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByPolicy(IN P_ADAPTER_T prAdapter, IN UINT_8 ucBssIndex) { P_CONNECTION_SETTINGS_T prConnSettings; P_BSS_INFO_T prBssInfo; P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T) NULL; P_BSS_DESC_T prPrimaryBssDesc = (P_BSS_DESC_T) NULL; P_BSS_DESC_T prCandidateBssDesc = (P_BSS_DESC_T) NULL; P_STA_RECORD_T prStaRec = (P_STA_RECORD_T) NULL; P_STA_RECORD_T prPrimaryStaRec; P_STA_RECORD_T prCandidateStaRec = (P_STA_RECORD_T) NULL; OS_SYSTIME rCurrentTime; /* The first one reach the check point will be our candidate */ BOOLEAN fgIsFindFirst = (BOOLEAN) FALSE; BOOLEAN fgIsFindBestRSSI = (BOOLEAN) FALSE; BOOLEAN fgIsFindBestEncryptionLevel = (BOOLEAN) FALSE; /* BOOLEAN fgIsFindMinChannelLoad = (BOOLEAN)FALSE; */ /* TODO(Kevin): Support Min Channel Load */ /* UINT_8 aucChannelLoad[CHANNEL_NUM] = {0}; */ BOOLEAN fgIsFixedChannel; ENUM_BAND_T eBand; UINT_8 ucChannel; ASSERT(prAdapter); prConnSettings = &(prAdapter->rWifiVar.rConnSettings); prBssInfo = GET_BSS_INFO_BY_INDEX(prAdapter, ucBssIndex); prAisSpecBssInfo = &(prAdapter->rWifiVar.rAisSpecificBssInfo); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; GET_CURRENT_SYSTIME(&rCurrentTime); /* check for fixed channel operation */ if (prBssInfo->eNetworkType == NETWORK_TYPE_AIS) { #if CFG_SUPPORT_CHNL_CONFLICT_REVISE fgIsFixedChannel = cnmAisDetectP2PChannel(prAdapter, &eBand, &ucChannel); #else fgIsFixedChannel = cnmAisInfraChannelFixed(prAdapter, &eBand, &ucChannel); #endif } else { fgIsFixedChannel = FALSE; } #if DBG if (prConnSettings->ucSSIDLen < ELEM_MAX_LEN_SSID) { prConnSettings->aucSSID[prConnSettings->ucSSIDLen] = '\0'; } #endif #if 0 DBGLOG(SCN, INFO, ("SEARCH: Num Of BSS_DESC_T = %d, Look for SSID: %s\n", prBSSDescList->u4NumElem, prConnSettings->aucSSID)); #endif /* 4 <1> The outer loop to search for a candidate. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { /* TODO(Kevin): Update Minimum Channel Load Information here */ #if 0 DBGLOG(SCN, INFO, ("SEARCH: [" MACSTR "], SSID:%s\n", MAC2STR(prBssDesc->aucBSSID), prBssDesc->aucSSID)); #endif /* 4 <2> Check PHY Type and attributes */ /* 4 <2.1> Check Unsupported BSS PHY Type */ if (!(prBssDesc->ucPhyTypeSet & (prAdapter->rWifiVar.ucAvailablePhyTypeSet))) { DBGLOG(SCN, INFO, ("SEARCH: Ignore unsupported ucPhyTypeSet = %x\n", prBssDesc->ucPhyTypeSet)); continue; } /* 4 <2.2> Check if has unknown NonHT BSS Basic Rate Set. */ if (prBssDesc->fgIsUnknownBssBasicRate) { continue; } /* 4 <2.3> Check if fixed operation cases should be aware */ if (fgIsFixedChannel == TRUE && (prBssDesc->eBand != eBand || prBssDesc->ucChannelNum != ucChannel)) { continue; } /* 4 <2.4> Check if the channel is legal under regulatory domain */ if (rlmDomainIsLegalChannel(prAdapter, prBssDesc->eBand, prBssDesc->ucChannelNum) == FALSE) { continue; } /* 4 <2.5> Check if this BSS_DESC_T is stale */ if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(SCN_BSS_DESC_STALE_SEC))) { continue; } /* 4 <3> Check if reach the excessive join retry limit */ /* NOTE(Kevin): STA_RECORD_T is recorded by TA. */ prStaRec = cnmGetStaRecByAddress(prAdapter, ucBssIndex, prBssDesc->aucSrcAddr); if (prStaRec) { /* NOTE(Kevin): * The Status Code is the result of a Previous Connection Request, we use this as SCORE for choosing a proper * candidate (Also used for compare see <6>) * The Reason Code is an indication of the reason why AP reject us, we use this Code for "Reject" * a SCAN result to become our candidate(Like a blacklist). */ #if 0 /* TODO(Kevin): */ if (prStaRec->u2ReasonCode != REASON_CODE_RESERVED) { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS with previous Reason Code = %d\n", prStaRec->u2ReasonCode)); continue; } else #endif if (prStaRec->u2StatusCode != STATUS_CODE_SUCCESSFUL) { /* NOTE(Kevin): greedy association - after timeout, we'll still * try to associate to the AP whose STATUS of conection attempt * was not success. * We may also use (ucJoinFailureCount x JOIN_RETRY_INTERVAL_SEC) for * time bound. */ if ((prStaRec->ucJoinFailureCount < JOIN_MAX_RETRY_FAILURE_COUNT) || (CHECK_FOR_TIMEOUT(rCurrentTime, prStaRec->rLastJoinTime, SEC_TO_SYSTIME(JOIN_RETRY_INTERVAL_SEC)))) { /* NOTE(Kevin): Every JOIN_RETRY_INTERVAL_SEC interval, we can retry * JOIN_MAX_RETRY_FAILURE_COUNT times. */ if (prStaRec->ucJoinFailureCount >= JOIN_MAX_RETRY_FAILURE_COUNT) { prStaRec->ucJoinFailureCount = 0; } DBGLOG(SCN, INFO, ("SEARCH: Try to join BSS again which has Status Code = %d (Curr = %ld/Last Join = %ld)\n", prStaRec->u2StatusCode, rCurrentTime, prStaRec->rLastJoinTime)); } else { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS which reach maximum Join Retry Count = %d\n", JOIN_MAX_RETRY_FAILURE_COUNT)); continue; } } } /* 4 <4> Check for various NETWORK conditions */ if (prBssInfo->eNetworkType == NETWORK_TYPE_AIS) { /* 4 <4.1> Check BSS Type for the corresponding Operation Mode in Connection Setting */ /* NOTE(Kevin): For NET_TYPE_AUTO_SWITCH, we will always pass following check. */ if (((prConnSettings->eOPMode == NET_TYPE_INFRA) && (prBssDesc->eBSSType != BSS_TYPE_INFRASTRUCTURE)) || ((prConnSettings->eOPMode == NET_TYPE_IBSS || prConnSettings->eOPMode == NET_TYPE_DEDICATED_IBSS) && (prBssDesc->eBSSType != BSS_TYPE_IBSS))) { DBGLOG(SCN, INFO, ("SEARCH: Ignore eBSSType = %s\n", ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) ? "INFRASTRUCTURE" : "IBSS"))); continue; } /* 4 <4.2> Check AP's BSSID if OID_802_11_BSSID has been set. */ if ((prConnSettings->fgIsConnByBssidIssued) && (prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE)) { if (UNEQUAL_MAC_ADDR(prConnSettings->aucBSSID, prBssDesc->aucBSSID)) { DBGLOG(SCN, INFO, ("SEARCH: Ignore due to BSSID was not matched!\n")); continue; } } #if CFG_SUPPORT_ADHOC /* 4 <4.3> Check for AdHoc Mode */ if (prBssDesc->eBSSType == BSS_TYPE_IBSS) { OS_SYSTIME rCurrentTime; /* 4 <4.3.1> Check if this SCAN record has been updated recently for IBSS. */ /* NOTE(Kevin): Because some STA may change its BSSID frequently after it * create the IBSS - e.g. IPN2220, so we need to make sure we get the new one. * For BSS, if the old record was matched, however it won't be able to pass * the Join Process later. */ GET_CURRENT_SYSTIME(&rCurrentTime); if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME (SCN_ADHOC_BSS_DESC_TIMEOUT_SEC))) { DBGLOG(SCN, LOUD, ("SEARCH: Skip old record of BSS Descriptor - BSSID:[" MACSTR "]\n\n", MAC2STR(prBssDesc->aucBSSID))); continue; } /* 4 <4.3.2> Check Peer's capability */ if (ibssCheckCapabilityForAdHocMode(prAdapter, prBssDesc) == WLAN_STATUS_FAILURE) { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS DESC MAC: " MACSTR ", Capability is not supported for current AdHoc Mode.\n", MAC2STR(prPrimaryBssDesc->aucBSSID))); continue; } /* 4 <4.3.3> Compare TSF */ if (prBssInfo->fgIsBeaconActivated && UNEQUAL_MAC_ADDR(prBssInfo->aucBSSID, prBssDesc->aucBSSID)) { DBGLOG(SCN, LOUD, ("SEARCH: prBssDesc->fgIsLargerTSF = %d\n", prBssDesc->fgIsLargerTSF)); if (!prBssDesc->fgIsLargerTSF) { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS DESC MAC: [" MACSTR "], Smaller TSF\n", MAC2STR(prBssDesc->aucBSSID))); continue; } } } #endif /* CFG_SUPPORT_ADHOC */ } #if 0 /* TODO(Kevin): For IBSS */ /* 4 <2.c> Check if this SCAN record has been updated recently for IBSS. */ /* NOTE(Kevin): Because some STA may change its BSSID frequently after it * create the IBSS, so we need to make sure we get the new one. * For BSS, if the old record was matched, however it won't be able to pass * the Join Process later. */ if (prBssDesc->eBSSType == BSS_TYPE_IBSS) { OS_SYSTIME rCurrentTime; GET_CURRENT_SYSTIME(&rCurrentTime); if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(BSS_DESC_TIMEOUT_SEC))) { DBGLOG(SCAN, TRACE, ("Skip old record of BSS Descriptor - BSSID:[" MACSTR "]\n\n", MAC2STR(prBssDesc->aucBSSID))); continue; } } if ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) && (prAdapter->eConnectionState == MEDIA_STATE_CONNECTED)) { OS_SYSTIME rCurrentTime; GET_CURRENT_SYSTIME(&rCurrentTime); if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(BSS_DESC_TIMEOUT_SEC))) { DBGLOG(SCAN, TRACE, ("Skip old record of BSS Descriptor - BSSID:[" MACSTR "]\n\n", MAC2STR(prBssDesc->aucBSSID))); continue; } } /* 4 <4B> Check for IBSS AdHoc Mode. */ /* Skip if one or more BSS Basic Rate are not supported by current AdHocMode */ if (prPrimaryBssDesc->eBSSType == BSS_TYPE_IBSS) { /* 4 <4B.1> Check if match the Capability of current IBSS AdHoc Mode. */ if (ibssCheckCapabilityForAdHocMode(prAdapter, prPrimaryBssDesc) == WLAN_STATUS_FAILURE) { DBGLOG(SCAN, TRACE, ("Ignore BSS DESC MAC: " MACSTR ", Capability is not supported for current AdHoc Mode.\n", MAC2STR(prPrimaryBssDesc->aucBSSID))); continue; } /* 4 <4B.2> IBSS Merge Decision Flow for SEARCH STATE. */ if (prAdapter->fgIsIBSSActive && UNEQUAL_MAC_ADDR(prBssInfo->aucBSSID, prPrimaryBssDesc->aucBSSID)) { if (!fgIsLocalTSFRead) { NIC_GET_CURRENT_TSF(prAdapter, &rCurrentTsf); DBGLOG(SCAN, TRACE, ("\n\nCurrent TSF : %08lx-%08lx\n\n", rCurrentTsf.u.HighPart, rCurrentTsf.u.LowPart)); } if (rCurrentTsf.QuadPart > prPrimaryBssDesc->u8TimeStamp.QuadPart) { DBGLOG(SCAN, TRACE, ("Ignore BSS DESC MAC: [" MACSTR "], Current BSSID: [" MACSTR "].\n", MAC2STR(prPrimaryBssDesc->aucBSSID), MAC2STR(prBssInfo->aucBSSID))); DBGLOG(SCAN, TRACE, ("\n\nBSS's TSF : %08lx-%08lx\n\n", prPrimaryBssDesc->u8TimeStamp.u.HighPart, prPrimaryBssDesc->u8TimeStamp.u.LowPart)); prPrimaryBssDesc->fgIsLargerTSF = FALSE; continue; } else { prPrimaryBssDesc->fgIsLargerTSF = TRUE; } } } /* 4 <5> Check the Encryption Status. */ if (rsnPerformPolicySelection(prPrimaryBssDesc)) { if (prPrimaryBssDesc->ucEncLevel > 0) { fgIsFindBestEncryptionLevel = TRUE; fgIsFindFirst = FALSE; } } else { /* Can't pass the Encryption Status Check, get next one */ continue; } /* For RSN Pre-authentication, update the PMKID canidate list for same SSID and encrypt status */ /* Update PMKID candicate list. */ if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) { rsnUpdatePmkidCandidateList(prPrimaryBssDesc); if (prAdapter->rWifiVar.rAisBssInfo.u4PmkidCandicateCount) { prAdapter->rWifiVar.rAisBssInfo.fgIndicatePMKID = rsnCheckPmkidCandicate(); } } #endif prPrimaryBssDesc = (P_BSS_DESC_T) NULL; /* 4 <6> Check current Connection Policy. */ switch (prConnSettings->eConnectionPolicy) { case CONNECT_BY_SSID_BEST_RSSI: /* Choose Hidden SSID to join only if the `fgIsEnableJoin...` is TRUE */ if (prAdapter->rWifiVar.fgEnableJoinToHiddenSSID && prBssDesc->fgIsHiddenSSID) { /* NOTE(Kevin): following if () statement means that * If Target is hidden, then we won't connect when user specify SSID_ANY policy. */ if (prConnSettings->ucSSIDLen) { prPrimaryBssDesc = prBssDesc; fgIsFindBestRSSI = TRUE; } } else if (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen)) { prPrimaryBssDesc = prBssDesc; fgIsFindBestRSSI = TRUE; } break; case CONNECT_BY_SSID_ANY: /* NOTE(Kevin): In this policy, we don't know the desired * SSID from user, so we should exclude the Hidden SSID from scan list. * And because we refuse to connect to Hidden SSID node at the beginning, so * when the JOIN Module deal with a BSS_DESC_T which has fgIsHiddenSSID == TRUE, * then the Connection Settings must be valid without doubt. */ if (!prBssDesc->fgIsHiddenSSID) { prPrimaryBssDesc = prBssDesc; fgIsFindFirst = TRUE; } break; case CONNECT_BY_BSSID: if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prConnSettings->aucBSSID)) { prPrimaryBssDesc = prBssDesc; } break; default: break; } /* Primary Candidate was not found */ if (prPrimaryBssDesc == NULL) { continue; } /* 4 <7> Check the Encryption Status. */ if (prPrimaryBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) { #if CFG_SUPPORT_WAPI if (prAdapter->rWifiVar.rConnSettings.fgWapiMode) { if (wapiPerformPolicySelection(prAdapter, prPrimaryBssDesc)) { fgIsFindFirst = TRUE; } else { /* Can't pass the Encryption Status Check, get next one */ continue; } } else #endif if (rsnPerformPolicySelection(prAdapter, prPrimaryBssDesc)) { if (prAisSpecBssInfo->fgCounterMeasure) { DBGLOG(RSN, INFO, ("Skip while at counter measure period!!!\n")); continue; } if (prPrimaryBssDesc->ucEncLevel > 0) { fgIsFindBestEncryptionLevel = TRUE; fgIsFindFirst = FALSE; } } else { /* Can't pass the Encryption Status Check, get next one */ continue; } } else { /* Todo:: P2P and BOW Policy Selection */ } prPrimaryStaRec = prStaRec; /* 4 <8> Compare the Candidate and the Primary Scan Record. */ if (!prCandidateBssDesc) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; /* 4 <8.1> Condition - Get the first matched one. */ if (fgIsFindFirst) { break; } } else { /* 4 <6D> Condition - Visible SSID win Hidden SSID. */ if (prCandidateBssDesc->fgIsHiddenSSID) { if (!prPrimaryBssDesc->fgIsHiddenSSID) { prCandidateBssDesc = prPrimaryBssDesc; /* The non Hidden SSID win. */ prCandidateStaRec = prPrimaryStaRec; continue; } } else { if (prPrimaryBssDesc->fgIsHiddenSSID) { continue; } } /* 4 <6E> Condition - Choose the one with better RCPI(RSSI). */ if (fgIsFindBestRSSI) { /* TODO(Kevin): We shouldn't compare the actual value, we should * allow some acceptable tolerance of some RSSI percentage here. */ DBGLOG(SCN, TRACE, ("Candidate [" MACSTR "]: RCPI = %d, joinFailCnt=%d, Primary [" MACSTR"]: RCPI = %d, joinFailCnt=%d\n", MAC2STR(prCandidateBssDesc->aucBSSID), prCandidateBssDesc->ucRCPI, prCandidateBssDesc->ucJoinFailureCount, MAC2STR(prPrimaryBssDesc->aucBSSID), prPrimaryBssDesc->ucRCPI, prPrimaryBssDesc->ucJoinFailureCount)); ASSERT(!(prCandidateBssDesc->fgIsConnected && prPrimaryBssDesc->fgIsConnected)); if (prPrimaryBssDesc->ucJoinFailureCount > SCN_BSS_JOIN_FAIL_THRESOLD) { /* give a chance to do join if join fail before SCN_BSS_DECRASE_JOIN_FAIL_CNT_SEC seconds */ if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rJoinFailTime, SEC_TO_SYSTIME(SCN_BSS_JOIN_FAIL_CNT_RESET_SEC))) { prBssDesc->ucJoinFailureCount -= SCN_BSS_JOIN_FAIL_RESET_STEP; DBGLOG(AIS, INFO, ("decrease join fail count for Bss "MACSTR" to %u, timeout second %d\n", MAC2STR(prBssDesc->aucBSSID), prBssDesc->ucJoinFailureCount, SCN_BSS_JOIN_FAIL_CNT_RESET_SEC)); } } /* NOTE: To prevent SWING, we do roaming only if target AP has at least 5dBm larger than us. */ if (prCandidateBssDesc->fgIsConnected) { if ((prCandidateBssDesc->ucRCPI + ROAMING_NO_SWING_RCPI_STEP <= prPrimaryBssDesc->ucRCPI) && prPrimaryBssDesc->ucJoinFailureCount <= SCN_BSS_JOIN_FAIL_THRESOLD) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } else if (prPrimaryBssDesc->fgIsConnected) { if ((prCandidateBssDesc->ucRCPI < prPrimaryBssDesc->ucRCPI + ROAMING_NO_SWING_RCPI_STEP) || (prCandidateBssDesc->ucJoinFailureCount > SCN_BSS_JOIN_FAIL_THRESOLD)) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } else if (prPrimaryBssDesc->ucJoinFailureCount > SCN_BSS_JOIN_FAIL_THRESOLD) continue; else if (prCandidateBssDesc->ucJoinFailureCount > SCN_BSS_JOIN_FAIL_THRESOLD || prCandidateBssDesc->ucRCPI < prPrimaryBssDesc->ucRCPI) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } #if 0 /* If reach here, that means they have the same Encryption Score, and * both RSSI value are close too. */ /* 4 <6F> Seek the minimum Channel Load for less interference. */ if (fgIsFindMinChannelLoad) { /* TODO(Kevin): Check which one has minimum channel load in its channel */ } #endif } } return prCandidateBssDesc; } /* end of scanSearchBssDescByPolicy() */ VOID scanReportBss2Cfg80211(IN P_ADAPTER_T prAdapter, IN ENUM_BSS_TYPE_T eBSSType, IN P_BSS_DESC_T SpecificprBssDesc) { P_SCAN_INFO_T prScanInfo = NULL; P_LINK_T prBSSDescList = NULL; P_BSS_DESC_T prBssDesc = NULL; RF_CHANNEL_INFO_T rChannelInfo; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; DBGLOG(SCN, TRACE, ("scanReportBss2Cfg80211\n")); if (SpecificprBssDesc) { { /* check BSSID is legal channel */ if (!scanCheckBssIsLegal(prAdapter, SpecificprBssDesc)) { DBGLOG(SCN, TRACE, ("Remove specific SSID[%s %d]\n", SpecificprBssDesc->aucSSID, SpecificprBssDesc->ucChannelNum)); return; } DBGLOG(SCN, TRACE, ("Report Specific SSID[%s]\n", SpecificprBssDesc->aucSSID)); if (eBSSType == BSS_TYPE_INFRASTRUCTURE) { kalIndicateBssInfo(prAdapter->prGlueInfo, (PUINT_8) SpecificprBssDesc->aucRawBuf, SpecificprBssDesc->u2RawLength, SpecificprBssDesc->ucChannelNum, RCPI_TO_dBm(SpecificprBssDesc->ucRCPI)); } else { rChannelInfo.ucChannelNum = SpecificprBssDesc->ucChannelNum; rChannelInfo.eBand = SpecificprBssDesc->eBand; kalP2PIndicateBssInfo(prAdapter->prGlueInfo, (PUINT_8) SpecificprBssDesc->aucRawBuf, SpecificprBssDesc->u2RawLength, &rChannelInfo, RCPI_TO_dBm(SpecificprBssDesc->ucRCPI)); } #if CFG_ENABLE_WIFI_DIRECT SpecificprBssDesc->fgIsP2PReport = FALSE; #endif } } else { /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { #if CFG_AUTO_CHANNEL_SEL_SUPPORT /* Auto Channel Selection:Record the AP Number */ P_PARAM_CHN_LOAD_INFO prChnLoad = NULL; if ((prBssDesc->ucChannelNum <= 48) && (prBssDesc->ucChannelNum >= 1)) { if (prBssDesc->ucChannelNum <= 14) prChnLoad = (P_PARAM_CHN_LOAD_INFO) &(prAdapter->rWifiVar.rChnLoadInfo.rEachChnLoad[prBssDesc->ucChannelNum - 1]); else prChnLoad = (P_PARAM_CHN_LOAD_INFO) &(prAdapter->rWifiVar.rChnLoadInfo.rEachChnLoad[(prBssDesc->ucChannelNum / 4) + 5]); prChnLoad->u2APNum++; prChnLoad->ucChannel=prBssDesc->ucChannelNum; } DBGLOG(SCN, TRACE, ("chNum=%d,apNum=%d\n", prBssDesc->ucChannelNum, prChnLoad->u2APNum)); #endif /* check BSSID is legal channel */ if (!scanCheckBssIsLegal(prAdapter, prBssDesc)) { DBGLOG(SCN, TRACE, ("Remove SSID[%s %d]\n", prBssDesc->aucSSID, prBssDesc->ucChannelNum)); continue; } if ((prBssDesc->eBSSType == eBSSType) #if CFG_ENABLE_WIFI_DIRECT || ((eBSSType == BSS_TYPE_P2P_DEVICE) && (prBssDesc->fgIsP2PReport == TRUE)) #endif ) { DBGLOG(SCN, TRACE, ("Report ALL SSID[%s %d]\n", prBssDesc->aucSSID, prBssDesc->ucChannelNum)); if (eBSSType == BSS_TYPE_INFRASTRUCTURE) { if (prBssDesc->u2RawLength != 0) { kalIndicateBssInfo(prAdapter->prGlueInfo, (PUINT_8) prBssDesc->aucRawBuf, prBssDesc->u2RawLength, prBssDesc->ucChannelNum, RCPI_TO_dBm(prBssDesc->ucRCPI)); kalMemZero(prBssDesc->aucRawBuf, CFG_RAW_BUFFER_SIZE); prBssDesc->u2RawLength = 0; #if CFG_ENABLE_WIFI_DIRECT prBssDesc->fgIsP2PReport = FALSE; #endif } } else { #if CFG_ENABLE_WIFI_DIRECT if (prBssDesc->fgIsP2PReport == TRUE) #endif { rChannelInfo.ucChannelNum = prBssDesc->ucChannelNum; rChannelInfo.eBand = prBssDesc->eBand; kalP2PIndicateBssInfo(prAdapter->prGlueInfo, (PUINT_8) prBssDesc-> aucRawBuf, prBssDesc->u2RawLength, &rChannelInfo, RCPI_TO_dBm(prBssDesc-> ucRCPI)); /* do not clear it then we can pass the bss in Specific report */ /* kalMemZero(prBssDesc->aucRawBuf,CFG_RAW_BUFFER_SIZE); */ /* the BSS entry will not be cleared after scan done. So if we dont receive the BSS in next scan, we cannot pass it. We use u2RawLength for the purpose. */ /* prBssDesc->u2RawLength=0; */ #if CFG_ENABLE_WIFI_DIRECT prBssDesc->fgIsP2PReport = FALSE; #endif } } } } #if CFG_AUTO_CHANNEL_SEL_SUPPORT prAdapter->rWifiVar.rChnLoadInfo.fgDataReadyBit = TRUE; #endif } } #if CFG_SUPPORT_PASSPOINT /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given BSSID * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByBssidAndLatestUpdateTime(IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc; P_BSS_DESC_T prDstBssDesc = (P_BSS_DESC_T) NULL; OS_SYSTIME rLatestUpdateTime = 0; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { if (!rLatestUpdateTime || CHECK_FOR_EXPIRATION(prBssDesc->rUpdateTime, rLatestUpdateTime)) { prDstBssDesc = prBssDesc; COPY_SYSTIME(rLatestUpdateTime, prBssDesc->rUpdateTime); } } } return prDstBssDesc; } /* end of scanSearchBssDescByBssid() */ #endif /* CFG_SUPPORT_PASSPOINT */ #if CFG_SUPPORT_AGPS_ASSIST VOID scanReportScanResultToAgps(P_ADAPTER_T prAdapter) { P_LINK_T prBSSDescList = &prAdapter->rWifiVar.rScanInfo.rBSSDescList; P_BSS_DESC_T prBssDesc = NULL; P_AGPS_AP_LIST_T prAgpsApList = kalMemAlloc(sizeof(AGPS_AP_LIST_T), VIR_MEM_TYPE); P_AGPS_AP_INFO_T prAgpsInfo = &prAgpsApList->arApInfo[0]; P_SCAN_INFO_T prScanInfo = &prAdapter->rWifiVar.rScanInfo; UINT_8 ucIndex = 0; LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (prBssDesc->rUpdateTime < prScanInfo->rLastScanCompletedTime) continue; COPY_MAC_ADDR(prAgpsInfo->aucBSSID, prBssDesc->aucBSSID); prAgpsInfo->ePhyType = AGPS_PHY_G; prAgpsInfo->u2Channel = prBssDesc->ucChannelNum; prAgpsInfo->i2ApRssi = RCPI_TO_dBm(prBssDesc->ucRCPI); prAgpsInfo++; ucIndex++; if (ucIndex == SCN_AGPS_AP_LIST_MAX_NUM) break; } prAgpsApList->ucNum = ucIndex; GET_CURRENT_SYSTIME(&prScanInfo->rLastScanCompletedTime); //DBGLOG(SCN, INFO, ("num of scan list:%d\n", ucIndex)); kalIndicateAgpsNotify(prAdapter, AGPS_EVENT_WLAN_AP_LIST, (PUINT_8)prAgpsApList, sizeof(AGPS_AP_LIST_T)); kalMemFree(prAgpsApList, VIR_MEM_TYPE, sizeof(AGPS_AP_LIST_T)); } #endif /* CFG_SUPPORT_AGPS_ASSIST */
{'content_hash': '127483d2d19a6fd464eb1f6033ee1f9d', 'timestamp': '', 'source': 'github', 'line_count': 3190, 'max_line_length': 164, 'avg_line_length': 32.62257053291536, 'alnum_prop': 0.6561893413795091, 'repo_name': 'zhengdejin/X1_Code', 'id': '630da0fd20a184bc4dc5ad29120fc66a1cc33348', 'size': '104066', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'kernel-3.10/drivers/misc/mediatek/connectivity/combo/drv_wlan/mt6630/wlan/mgmt/scan.c', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'ASP', 'bytes': '4528'}, {'name': 'Assembly', 'bytes': '10107915'}, {'name': 'Awk', 'bytes': '18681'}, {'name': 'Batchfile', 'bytes': '144'}, {'name': 'C', 'bytes': '519266172'}, {'name': 'C++', 'bytes': '11700846'}, {'name': 'GDB', 'bytes': '18113'}, {'name': 'Lex', 'bytes': '47705'}, {'name': 'M4', 'bytes': '3388'}, {'name': 'Makefile', 'bytes': '1619668'}, {'name': 'Objective-C', 'bytes': '2963724'}, {'name': 'Perl', 'bytes': '570279'}, {'name': 'Perl 6', 'bytes': '3727'}, {'name': 'Python', 'bytes': '92743'}, {'name': 'Roff', 'bytes': '55837'}, {'name': 'Scilab', 'bytes': '21433'}, {'name': 'Shell', 'bytes': '185922'}, {'name': 'SourcePawn', 'bytes': '2711'}, {'name': 'UnrealScript', 'bytes': '6113'}, {'name': 'XS', 'bytes': '1240'}, {'name': 'Yacc', 'bytes': '92226'}]}
import pkg1.pkg2.pkg3.m5 <warning descr="Unused import statement 'import pkg1.pkg2.pkg3.m6'">import pkg1.pkg2.pkg3.m6</warning> <warning descr="Unused import statement 'import pkg1.pkg2.m3'">import pkg1.pkg2.m3</warning> import pkg1.pkg2.m4 import pkg1.m1 import pkg1.m2 print (pkg1.m1) print (pkg1.m2) print (pkg1.pkg2.m4) print (pkg1.pkg2.pkg3)
{'content_hash': '5a7557d21d49e5dafdc2e04288e64256', 'timestamp': '', 'source': 'github', 'line_count': 13, 'max_line_length': 102, 'avg_line_length': 26.923076923076923, 'alnum_prop': 0.76, 'repo_name': 'leafclick/intellij-community', 'id': '0430130d5d007ee68d0348cfd87576db8f717d29', 'size': '350', 'binary': False, 'copies': '13', 'ref': 'refs/heads/master', 'path': 'python/testData/inspections/unusedImport/multipleSubmodules/test1.py', 'mode': '33188', 'license': 'apache-2.0', 'language': []}
(function($) { var ngmod = angular.module("framework.controllers", ["framework.merchantCertService"]); ngmod.controller("merchantCertAddCtrl",function($scope,merchantCertRES,merchantCertGRES){ $scope.project = new Object(); merchantCertGRES.get({ method:"getClient" }, function(projects) { $scope.project=projects.model; $scope.project.status="1"; $scope.project.certType="1"; }); $scope.save = function(invalid) { if(invalid){ alert('错误信息:必填信息不能为空,请检查表单项!'); return; } if($("#certType_sys").attr("checked")=="checked") { $("#td_upload").hide(); // $(".J_pubKeyValue input").val(); // $(".J_aliveTime input").val(); // $(".J_destoryTime input").val(); }else if($("#certType_mer").attr("checked")=="checked") { $(".J_isModifyPrivKey radio").val(); } $scope.project.fileUrl=$("#fileUrl").val(); $scope.project.fileName=$("#fileName").val(); merchantCertRES.save({'method':"add"},$scope.project, function(project) { if(project.respCode=="00"){ $.messager.alert('温馨提示','操作成功','success'); location.href="list.shtml"; }else { $.messager.alert('操作失败',project.respDesc,'error'); } }); }; //单击商户选择时,控制上传控件显示 $scope.Upload_ClickCertType=function(){ $("#mainFrame").css('height','800');; if($("#certType_mer").attr("checked")=="checked") { $("#td_upload").show(); $(".J_pubKeyValue").show(); $(".J_aliveTime").show(); $(".J_destoryTime").show(); $(".J_isModifyPrivKey").hide(); } else if($("#certType_sys").attr("checked")=="checked") { $("#td_upload").hide(); $(".J_pubKeyValue").hide(); $(".J_aliveTime").hide(); $(".J_destoryTime").hide(); } }; }); })(jQuery);
{'content_hash': '3ce11f1d331e7038f760315b22894569', 'timestamp': '', 'source': 'github', 'line_count': 62, 'max_line_length': 93, 'avg_line_length': 36.53225806451613, 'alnum_prop': 0.44724061810154525, 'repo_name': 'leadwit/AdminUI', 'id': '7c87fc48856194830dc741dc8b51b6159c97d859', 'size': '2359', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'service-manager/modules/merchantCert/controller/add-controller.js', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'CSS', 'bytes': '483914'}, {'name': 'JavaScript', 'bytes': '1459826'}]}
package org.apache.lens.lib.query; import java.io.IOException; import org.apache.lens.server.api.driver.LensResultSetMetadata; import org.apache.lens.server.api.query.QueryContext; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; /** * Wraps the formatter {@link FileFormatter}, which can have implementations like {@link HadoopFileFormatter} or * {@link ZipFileFormatter}. */ public abstract class WrappedFileFormatter extends AbstractOutputFormatter { /** * The formatter. */ private AbstractFileFormatter formatter; /* * (non-Javadoc) * * @see org.apache.lens.lib.query.AbstractOutputFormatter#init(org.apache.lens.server.api.query.QueryContext, * org.apache.lens.server.api.driver.LensResultSetMetadata) */ public void init(QueryContext ctx, LensResultSetMetadata metadata) throws IOException { super.init(ctx, metadata); if (ctx.splitResultIntoMultipleFiles()) { formatter = new ZipFileFormatter(); } else { formatter = new HadoopFileFormatter(); } formatter.init(ctx, metadata); } /* * (non-Javadoc) * * @see org.apache.lens.server.api.query.QueryOutputFormatter#writeHeader() */ @Override public void writeHeader() throws IOException { String header = ctx.getResultHeader(); if (!StringUtils.isBlank(header)) { formatter.writeHeader(header); } else { formatter.writeHeader(getHeaderFromSerde()); } } /* * (non-Javadoc) * * @see org.apache.lens.server.api.query.QueryOutputFormatter#writeFooter() */ @Override public void writeFooter() throws IOException { String footer = ctx.getResultFooter(); if (!StringUtils.isBlank(footer)) { formatter.writeFooter(footer); } else { formatter.writeFooter("Total rows:" + getNumRows()); } } /** * Write row. * * @param row the row * @throws IOException Signals that an I/O exception has occurred. */ protected void writeRow(String row) throws IOException { formatter.writeRow(row); } @Override public Integer getNumRows() { return formatter.getNumRows(); } @Override public Long getFileSize() { return formatter.getFileSize(); } /* * (non-Javadoc) * * @see org.apache.lens.server.api.query.QueryOutputFormatter#commit() */ @Override public void commit() throws IOException { formatter.commit(); } /* * (non-Javadoc) * * @see org.apache.lens.server.api.query.QueryOutputFormatter#close() */ @Override public void close() throws IOException { if (formatter != null) { formatter.close(); } } @Override public String getFinalOutputPath() { return formatter.getFinalOutputPath(); } public Path getTmpPath() { return formatter.getTmpPath(); } public String getEncoding() { return formatter.getEncoding(); } }
{'content_hash': '19be36549811cd91abefa8bd3311367e', 'timestamp': '', 'source': 'github', 'line_count': 123, 'max_line_length': 112, 'avg_line_length': 23.447154471544714, 'alnum_prop': 0.6806518723994452, 'repo_name': 'kamaldeep-ebay/lens', 'id': 'e28c17b50665f5ed26254c7393d89c17d3b265f1', 'size': '3692', 'binary': False, 'copies': '3', 'ref': 'refs/heads/master', 'path': 'lens-query-lib/src/main/java/org/apache/lens/lib/query/WrappedFileFormatter.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'HTML', 'bytes': '10377'}, {'name': 'Java', 'bytes': '4547758'}, {'name': 'JavaScript', 'bytes': '282078'}, {'name': 'Shell', 'bytes': '14016'}]}
package com.xthena.auth.support; public class CheckRoleException extends RuntimeException { public CheckRoleException(String message) { super(message); } }
{'content_hash': '42283628b92b111ac25293f4520897ce', 'timestamp': '', 'source': 'github', 'line_count': 7, 'max_line_length': 58, 'avg_line_length': 24.714285714285715, 'alnum_prop': 0.7398843930635838, 'repo_name': 'fuhongliang/xhf', 'id': 'a0a88097bfa0c623385ed5ec32fde9ce814c9c9e', 'size': '173', 'binary': False, 'copies': '3', 'ref': 'refs/heads/master', 'path': 'src/main/java/com/xthena/auth/support/CheckRoleException.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '644181'}, {'name': 'HTML', 'bytes': '178758'}, {'name': 'Java', 'bytes': '11247706'}, {'name': 'JavaScript', 'bytes': '6185245'}]}
[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url] # Jaeger Bindings for Go OpenTracing API Instrumentation library that implements an [OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io). **IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release. * :white_check_mark: `import "github.com/uber/jaeger-client-go"` * :x: `import "github.com/jaegertracing/jaeger-client-go"` ## How to Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md). ## Installation We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide) and [semantic versioning](http://semver.org/) when including this library into an application. For example, Jaeger backend imports this library like this: ```yaml - package: github.com/uber/jaeger-client-go version: ^2.7.0 ``` If you instead want to use the latest version in `master`, you can pull it via `go get`. Note that during `go get` you may see build errors due to incompatible dependencies, which is why we recommend using semantic versions for dependencies. The error may be fixed by running `make install` (it will install `glide` if you don't have it): ```shell go get -u github.com/uber/jaeger-client-go/ cd $GOPATH/src/github.com/uber/jaeger-client-go/ git submodule update --init --recursive make install ``` ## Initialization See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples) and [config/example_test.go](./config/example_test.go). ### Environment variables The tracer can be initialized with values coming from environment variables. None of the env vars are required and all of them can be overriden via direct setting of the property on the configuration object. Property| Description --- | --- JAEGER_SERVICE_NAME | The service name JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP JAEGER_AGENT_PORT | The port for communicating with agent via UDP JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval (ms) JAEGER_SAMPLER_TYPE | The sampler type JAEGER_SAMPLER_PARAM | The sampler parameter (number) JAEGER_SAMPLER_MANAGER_HOST_PORT | The host name and port when using the remote controlled sampler JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used. JAEGER_RPC_METRICS | Whether to store RPC metrics ### Closing the tracer via `io.Closer` The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance. It is recommended to structure your `main()` so that it calls the `Close()` function on the closer before exiting, e.g. ```go tracer, closer, err := cfg.NewTracer(...) defer closer.Close() ``` This is especially useful for command-line tools that enable tracing, as well as for the long-running apps that support graceful shutdown. For example, if your deployment system sends SIGTERM instead of killing the process and you trap that signal to do a graceful exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed. ### Metrics & Monitoring The tracer emits a number of different metrics, defined in [metrics.go](metrics.go). The monitoring backend is expected to support tag-based metric names, e.g. instead of `statsd`-style string names like `counters.my-service.jaeger.spans.started.sampled`, the metrics are defined by a short name and a collection of key/value tags, for example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go) file for the full list and descriptions of emitted metrics. The monitoring backend is represented by the `metrics.Factory` interface from package [`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation of that interface can be passed as an option to either the Configuration object or the Tracer constructor, for example: ```go import ( "github.com/uber/jaeger-client-go/config" "github.com/uber/jaeger-lib/metrics/prometheus" ) metricsFactory := prometheus.New() tracer, closer, err := config.Configuration{ ServiceName: "your-service-name", }.NewTracer( config.Metrics(metricsFactory), ) ``` By default, a no-op `metrics.NullFactory` is used. ### Logging The tracer can be configured with an optional logger, which will be used to log communication errors, or log spans if a logging reporter option is specified in the configuration. The logging API is abstracted by the [Logger](logger.go) interface. A logger instance implementing this interface can be set on the `Config` object before calling the `New` method. Besides the [zap](https://github.com/uber-go/zap) implementation bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository. ## Instrumentation for Tracing Since this tracer is fully compliant with OpenTracing API 1.0, all code instrumentation should only use the API itself, as described in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation. ## Features ### Reporters A "reporter" is a component that receives the finished spans and reports them to somewhere. Under normal circumstances, the Tracer should use the default `RemoteReporter`, which sends the spans out of process via configurable "transport". For testing purposes, one can use an `InMemoryReporter` that accumulates spans in a buffer and allows to retrieve them for later verification. Also available are `NullReporter`, a no-op reporter that does nothing, a `LoggingReporter` which logs all finished spans using their `String()` method, and a `CompositeReporter` that can be used to combine more than one reporter into one, e.g. to attach a logging reporter to the main remote reporter. ### Span Reporting Transports The remote reporter uses "transports" to actually send the spans out of process. Currently the supported transports include: * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP, * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP. ### Sampling The tracer does not record all spans, but only those that have the sampling bit set in the `flags`. When a new trace is started and a new unique ID is generated, a sampling decision is made whether this trace should be sampled. The sampling decision is propagated to all downstream calls via the `flags` field of the trace context. The following samplers are available: 1. `RemotelyControlledSampler` uses one of the other simpler samplers and periodically updates it by polling an external server. This allows dynamic control of the sampling strategies. 1. `ConstSampler` always makes the same sampling decision for all trace IDs. it can be configured to either sample all traces, or to sample none. 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability for a given trace to be sampled. The actual decision is made by comparing the trace ID with a random number multiplied by the sampling rate. 1. `RateLimitingSampler` can be used to allow only a certain fixed number of traces to be sampled per second. ### Baggage Injection The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added to the span context and propagated throughout the trace. An external process can inject baggage by setting the special HTTP Header `jaeger-baggage` on a request: ```sh curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com ``` Baggage can also be programatically set inside your service: ```go if span := opentracing.SpanFromContext(ctx); span != nil { span.SetBaggageItem("key", "value") } ``` Another service downstream of that can retrieve the baggage in a similar way: ```go if span := opentracing.SpanFromContext(ctx); span != nil { val := span.BaggageItem("key") println(val) } ``` ### Debug Traces (Forced Sampling) #### Programmatically The OpenTracing API defines a `sampling.priority` standard tag that can be used to affect the sampling of a span and its children: ```go import ( "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" ) span := opentracing.SpanFromContext(ctx) ext.SamplingPriority.Set(span, 1) ``` #### Via HTTP Headers Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`, which can be set in the incoming request, e.g. ```sh curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com ``` When Jaeger sees this header in the request that otherwise has no tracing context, it ensures that the new trace started for this request will be sampled in the "debug" mode (meaning it should survive all downsampling that might happen in the collection pipeline), and the root span will have a tag as if this statement was executed: ```go span.SetTag("jaeger-debug-id", "some-correlation-id") ``` This allows using Jaeger UI to find the trace by this tag. ### Zipkin HTTP B3 compatible header propagation Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin). However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up. ## License [Apache 2.0 License](LICENSE). [doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg [doc]: https://godoc.org/github.com/uber/jaeger-client-go [ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master [ci]: https://travis-ci.org/jaegertracing/jaeger-client-go [cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go [ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg [ot-url]: http://opentracing.io [baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
{'content_hash': 'e39ea37b7708a4e774416c7311748972', 'timestamp': '', 'source': 'github', 'line_count': 260, 'max_line_length': 318, 'avg_line_length': 42.86923076923077, 'alnum_prop': 0.7662838686524314, 'repo_name': 'corystone/hummingbird', 'id': '16b04454e16848818d8eeeca0eadba21a1bc6b4c', 'size': '11146', 'binary': False, 'copies': '8', 'ref': 'refs/heads/master', 'path': 'vendor/github.com/uber/jaeger-client-go/README.md', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Go', 'bytes': '2167451'}, {'name': 'Makefile', 'bytes': '1575'}, {'name': 'Shell', 'bytes': '335'}]}
package org.elasticsearch.action.search; import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.ObjectObjectHashMap; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest.Suggestion; import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Function; import java.util.function.IntFunction; import java.util.stream.Collectors; import java.util.stream.StreamSupport; public final class SearchPhaseController extends AbstractComponent { private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; private final Function<Boolean, ReduceContext> reduceContextFunction; /** * Constructor. * @param settings Node settings * @param reduceContextFunction A function that builds a context for the reduce of an {@link InternalAggregation} */ public SearchPhaseController(Settings settings, Function<Boolean, ReduceContext> reduceContextFunction) { super(settings); this.reduceContextFunction = reduceContextFunction; } public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) { ObjectObjectHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap(); ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap(); long aggMaxDoc = 0; for (DfsSearchResult lEntry : results) { final Term[] terms = lEntry.terms(); final TermStatistics[] stats = lEntry.termStatistics(); assert terms.length == stats.length; for (int i = 0; i < terms.length; i++) { assert terms[i] != null; if (stats[i] == null) { continue; } TermStatistics existing = termStatistics.get(terms[i]); if (existing != null) { assert terms[i].bytes().equals(existing.term()); termStatistics.put(terms[i], new TermStatistics(existing.term(), existing.docFreq() + stats[i].docFreq(), existing.totalTermFreq() + stats[i].totalTermFreq())); } else { termStatistics.put(terms[i], stats[i]); } } assert !lEntry.fieldStatistics().containsKey(null); final Object[] keys = lEntry.fieldStatistics().keys; final Object[] values = lEntry.fieldStatistics().values; for (int i = 0; i < keys.length; i++) { if (keys[i] != null) { String key = (String) keys[i]; CollectionStatistics value = (CollectionStatistics) values[i]; if (value == null) { continue; } assert key != null; CollectionStatistics existing = fieldStatistics.get(key); if (existing != null) { CollectionStatistics merged = new CollectionStatistics(key, existing.maxDoc() + value.maxDoc(), existing.docCount() + value.docCount(), existing.sumTotalTermFreq() + value.sumTotalTermFreq(), existing.sumDocFreq() + value.sumDocFreq() ); fieldStatistics.put(key, merged); } else { fieldStatistics.put(key, value); } } } aggMaxDoc += lEntry.maxDoc(); } return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); } /** * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. * * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. * * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result. * Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase. * @param results the search phase results to obtain the sort docs from * @param bufferedTopDocs the pre-consumed buffered top docs * @param topDocsStats the top docs stats to fill * @param from the offset into the search results top docs * @param size the number of hits to return from the merged top docs */ public SortedTopDocs sortDocs(boolean ignoreFrom, Collection<? extends SearchPhaseResult> results, final Collection<TopDocs> bufferedTopDocs, final TopDocsStats topDocsStats, int from, int size) { if (results.isEmpty()) { return SortedTopDocs.EMPTY; } final Collection<TopDocs> topDocs = bufferedTopDocs == null ? new ArrayList<>() : bufferedTopDocs; final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>(); for (SearchPhaseResult sortedResult : results) { // TODO we can move this loop into the reduce call to only loop over this once /* We loop over all results once, group together the completion suggestions if there are any and collect relevant * top docs results. Each top docs gets it's shard index set on all top docs to simplify top docs merging down the road * this allowed to remove a single shared optimization code here since now we don't materialized a dense array of * top docs anymore but instead only pass relevant results / top docs to the merge method*/ QuerySearchResult queryResult = sortedResult.queryResult(); if (queryResult.hasConsumedTopDocs() == false) { // already consumed? final TopDocsAndMaxScore td = queryResult.consumeTopDocs(); assert td != null; topDocsStats.add(td); if (td.topDocs.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet setShardIndex(td.topDocs, queryResult.getShardIndex()); topDocs.add(td.topDocs); } } if (queryResult.hasSuggestHits()) { Suggest shardSuggest = queryResult.suggest(); for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) { suggestion.setShardIndex(sortedResult.getShardIndex()); List<Suggestion<CompletionSuggestion.Entry>> suggestions = groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggestions.add(suggestion); } } } final boolean hasHits = (groupedCompletionSuggestions.isEmpty() && topDocs.isEmpty()) == false; if (hasHits) { final TopDocs mergedTopDocs = mergeTopDocs(topDocs, size, ignoreFrom ? 0 : from); final ScoreDoc[] mergedScoreDocs = mergedTopDocs == null ? EMPTY_DOCS : mergedTopDocs.scoreDocs; ScoreDoc[] scoreDocs = mergedScoreDocs; if (groupedCompletionSuggestions.isEmpty() == false) { int numSuggestDocs = 0; List<Suggestion<? extends Entry<? extends Entry.Option>>> completionSuggestions = new ArrayList<>(groupedCompletionSuggestions.size()); for (List<Suggestion<CompletionSuggestion.Entry>> groupedSuggestions : groupedCompletionSuggestions.values()) { final CompletionSuggestion completionSuggestion = CompletionSuggestion.reduceTo(groupedSuggestions); assert completionSuggestion != null; numSuggestDocs += completionSuggestion.getOptions().size(); completionSuggestions.add(completionSuggestion); } scoreDocs = new ScoreDoc[mergedScoreDocs.length + numSuggestDocs]; System.arraycopy(mergedScoreDocs, 0, scoreDocs, 0, mergedScoreDocs.length); int offset = mergedScoreDocs.length; Suggest suggestions = new Suggest(completionSuggestions); for (CompletionSuggestion completionSuggestion : suggestions.filter(CompletionSuggestion.class)) { for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) { scoreDocs[offset++] = option.getDoc(); } } } final boolean isSortedByField; final SortField[] sortFields; if (mergedTopDocs != null && mergedTopDocs instanceof TopFieldDocs) { TopFieldDocs fieldDocs = (TopFieldDocs) mergedTopDocs; isSortedByField = (fieldDocs instanceof CollapseTopFieldDocs && fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) == false; sortFields = fieldDocs.fields; } else { isSortedByField = false; sortFields = null; } return new SortedTopDocs(scoreDocs, isSortedByField, sortFields); } else { // no relevant docs return SortedTopDocs.EMPTY; } } TopDocs mergeTopDocs(Collection<TopDocs> results, int topN, int from) { if (results.isEmpty()) { return null; } assert results.isEmpty() == false; final boolean setShardIndex = false; final TopDocs topDocs = results.stream().findFirst().get(); final TopDocs mergedTopDocs; final int numShards = results.size(); if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them. return topDocs; } else if (topDocs instanceof CollapseTopFieldDocs) { CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) topDocs; final Sort sort = new Sort(firstTopDocs.fields); final CollapseTopFieldDocs[] shardTopDocs = results.toArray(new CollapseTopFieldDocs[numShards]); mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs, setShardIndex); } else if (topDocs instanceof TopFieldDocs) { TopFieldDocs firstTopDocs = (TopFieldDocs) topDocs; final Sort sort = new Sort(firstTopDocs.fields); final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]); mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, setShardIndex); } else { final TopDocs[] shardTopDocs = results.toArray(new TopDocs[numShards]); mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, setShardIndex); } return mergedTopDocs; } private static void setShardIndex(TopDocs topDocs, int shardIndex) { for (ScoreDoc doc : topDocs.scoreDocs) { if (doc.shardIndex != -1) { // once there is a single shard index initialized all others will be initialized too // there are many asserts down in lucene land that this is actually true. we can shortcut it here. return; } doc.shardIndex = shardIndex; } } public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase, int numShards) { final ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards]; if (reducedQueryPhase.isEmptyResult == false) { final ScoreDoc[] sortedScoreDocs = reducedQueryPhase.scoreDocs; // from is always zero as when we use scroll, we ignore from long size = Math.min(reducedQueryPhase.fetchHits, reducedQueryPhase.size); // with collapsing we can have more hits than sorted docs size = Math.min(sortedScoreDocs.length, size); for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) { ScoreDoc scoreDoc = sortedScoreDocs[sortedDocsIndex]; lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc; } } return lastEmittedDocPerShard; } /** * Builds an array, with potential null elements, with docs to load. */ public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { IntArrayList[] docIdsToLoad = new IntArrayList[numShards]; for (ScoreDoc shardDoc : shardDocs) { IntArrayList shardDocIdsToLoad = docIdsToLoad[shardDoc.shardIndex]; if (shardDocIdsToLoad == null) { shardDocIdsToLoad = docIdsToLoad[shardDoc.shardIndex] = new IntArrayList(); } shardDocIdsToLoad.add(shardDoc.doc); } return docIdsToLoad; } /** * Enriches search hits and completion suggestion hits from <code>sortedDocs</code> using <code>fetchResultsArr</code>, * merges suggestions, aggregations and profile results * * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, Collection<? extends SearchPhaseResult> fetchResults, IntFunction<SearchPhaseResult> resultsLookup) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.empty(); } ScoreDoc[] sortedDocs = reducedQueryPhase.scoreDocs; SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResults, resultsLookup); if (reducedQueryPhase.suggest != null) { if (!fetchResults.isEmpty()) { int currentOffset = hits.getHits().length; for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { final List<CompletionSuggestion.Entry.Option> suggestionOptions = suggestion.getOptions(); for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex); if (searchResultProvider == null) { // this can happen if we are hitting a shard failure during the fetch phase // in this case we referenced the shard result via the ScoreDoc but never got a // result from fetch. // TODO it would be nice to assert this in the future continue; } FetchSearchResult fetchResult = searchResultProvider.fetchResult(); final int index = fetchResult.counterGetAndIncrement(); assert index < fetchResult.hits().getHits().length : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; SearchHit hit = fetchResult.hits().getHits()[index]; CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); hit.score(shardDoc.score); hit.shard(fetchResult.getSearchShardTarget()); suggestOption.setHit(hit); } currentOffset += suggestionOptions.size(); } assert currentOffset == sortedDocs.length : "expected no more score doc slices"; } } return reducedQueryPhase.buildResponse(hits); } private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, Collection<? extends SearchPhaseResult> fetchResults, IntFunction<SearchPhaseResult> resultsLookup) { final boolean sorted = reducedQueryPhase.isSortedByField; ScoreDoc[] sortedDocs = reducedQueryPhase.scoreDocs; int sortScoreIndex = -1; if (sorted) { for (int i = 0; i < reducedQueryPhase.sortField.length; i++) { if (reducedQueryPhase.sortField[i].getType() == SortField.Type.SCORE) { sortScoreIndex = i; } } } // clean the fetch counter for (SearchPhaseResult entry : fetchResults) { entry.fetchResult().initCounter(); } int from = ignoreFrom ? 0 : reducedQueryPhase.from; int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.size); // with collapsing we can have more fetch hits than sorted docs numSearchHits = Math.min(sortedDocs.length, numSearchHits); // merge hits List<SearchHit> hits = new ArrayList<>(); if (!fetchResults.isEmpty()) { for (int i = 0; i < numSearchHits; i++) { ScoreDoc shardDoc = sortedDocs[i]; SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex); if (fetchResultProvider == null) { // this can happen if we are hitting a shard failure during the fetch phase // in this case we referenced the shard result via the ScoreDoc but never got a // result from fetch. // TODO it would be nice to assert this in the future continue; } FetchSearchResult fetchResult = fetchResultProvider.fetchResult(); final int index = fetchResult.counterGetAndIncrement(); assert index < fetchResult.hits().getHits().length : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; SearchHit searchHit = fetchResult.hits().getHits()[index]; if (sorted == false) { searchHit.score(shardDoc.score); } searchHit.shard(fetchResult.getSearchShardTarget()); if (sorted) { FieldDoc fieldDoc = (FieldDoc) shardDoc; searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.sortValueFormats); if (sortScoreIndex != -1) { searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue()); } } hits.add(searchHit); } } return new SearchHits(hits.toArray(new SearchHit[hits.size()]), reducedQueryPhase.totalHits, reducedQueryPhase.maxScore); } /** * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results */ public ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, boolean isScrollRequest) { return reducedQueryPhase(queryResults, isScrollRequest, true); } /** * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results */ public ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, boolean isScrollRequest, boolean trackTotalHits) { return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest); } /** * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results * @param bufferedAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed * from all non-null query results. * @param bufferedTopDocs a list of pre-collected / buffered top docs. if this list is non-null all top docs have been consumed * from all non-null query results. * @param numReducePhases the number of non-final reduce phases applied to the query results. * @see QuerySearchResult#consumeAggs() * @see QuerySearchResult#consumeProfileResult() */ private ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, List<InternalAggregations> bufferedAggs, List<TopDocs> bufferedTopDocs, TopDocsStats topDocsStats, int numReducePhases, boolean isScrollRequest) { assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; numReducePhases++; // increment for this phase boolean timedOut = false; Boolean terminatedEarly = null; if (queryResults.isEmpty()) { // early terminate we have nothing to reduce return new ReducedQueryPhase(topDocsStats.totalHits, topDocsStats.fetchHits, topDocsStats.maxScore, timedOut, terminatedEarly, null, null, null, EMPTY_DOCS, null, null, numReducePhases, false, 0, 0, true); } final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult(); final boolean hasSuggest = firstResult.suggest() != null; final boolean hasProfileResults = firstResult.hasProfileResults(); final boolean consumeAggs; final List<InternalAggregations> aggregationsList; if (bufferedAggs != null) { consumeAggs = false; // we already have results from intermediate reduces and just need to perform the final reduce assert firstResult.hasAggs() : "firstResult has no aggs but we got non null buffered aggs?"; aggregationsList = bufferedAggs; } else if (firstResult.hasAggs()) { // the number of shards was less than the buffer size so we reduce agg results directly aggregationsList = new ArrayList<>(queryResults.size()); consumeAggs = true; } else { // no aggregations aggregationsList = Collections.emptyList(); consumeAggs = false; } // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) : Collections.emptyMap(); int from = 0; int size = 0; for (SearchPhaseResult entry : queryResults) { QuerySearchResult result = entry.queryResult(); from = result.from(); size = result.size(); if (result.searchTimedOut()) { timedOut = true; } if (result.terminatedEarly() != null) { if (terminatedEarly == null) { terminatedEarly = result.terminatedEarly(); } else if (result.terminatedEarly()) { terminatedEarly = true; } } if (hasSuggest) { assert result.suggest() != null; for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) { List<Suggestion> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggestionList.add(suggestion); } } if (consumeAggs) { aggregationsList.add((InternalAggregations) result.consumeAggs()); } if (hasProfileResults) { String key = result.getSearchShardTarget().toString(); profileResults.put(key, result.consumeProfileResult()); } } final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); ReduceContext reduceContext = reduceContextFunction.apply(true); final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList, firstResult.pipelineAggregators(), reduceContext); final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults); final SortedTopDocs scoreDocs = this.sortDocs(isScrollRequest, queryResults, bufferedTopDocs, topDocsStats, from, size); return new ReducedQueryPhase(topDocsStats.totalHits, topDocsStats.fetchHits, topDocsStats.maxScore, timedOut, terminatedEarly, suggest, aggregations, shardResults, scoreDocs.scoreDocs, scoreDocs.sortFields, firstResult != null ? firstResult.sortValueFormats() : null, numReducePhases, scoreDocs.isSortedByField, size, from, firstResult == null); } /** * Performs an intermediate reduce phase on the aggregations. For instance with this reduce phase never prune information * that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)} */ private InternalAggregations reduceAggsIncrementally(List<InternalAggregations> aggregationsList) { ReduceContext reduceContext = reduceContextFunction.apply(false); return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList, null, reduceContext); } private InternalAggregations reduceAggs(List<InternalAggregations> aggregationsList, List<SiblingPipelineAggregator> pipelineAggregators, ReduceContext reduceContext) { InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); if (pipelineAggregators != null) { List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext); newAggs.add(newAgg); } return new InternalAggregations(newAggs); } return aggregations; } public static final class ReducedQueryPhase { // the sum of all hits across all reduces shards final long totalHits; // the number of returned hits (doc IDs) across all reduces shards final long fetchHits; // the max score across all reduces hits or {@link Float#NaN} if no hits returned final float maxScore; // <code>true</code> if at least one reduced result timed out final boolean timedOut; // non null and true if at least one reduced result was terminated early final Boolean terminatedEarly; // the reduced suggest results final Suggest suggest; // the reduced internal aggregations final InternalAggregations aggregations; // the reduced profile results final SearchProfileShardResults shardResults; // the number of reduces phases final int numReducePhases; // the searches merged top docs final ScoreDoc[] scoreDocs; // the top docs sort fields used to sort the score docs, <code>null</code> if the results are not sorted final SortField[] sortField; // <code>true</code> iff the result score docs is sorted by a field (not score), this implies that <code>sortField</code> is set. final boolean isSortedByField; // the size of the top hits to return final int size; // <code>true</code> iff the query phase had no results. Otherwise <code>false</code> final boolean isEmptyResult; // the offset into the merged top hits final int from; // sort value formats used to sort / format the result final DocValueFormat[] sortValueFormats; ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly, Suggest suggest, InternalAggregations aggregations, SearchProfileShardResults shardResults, ScoreDoc[] scoreDocs, SortField[] sortFields, DocValueFormat[] sortValueFormats, int numReducePhases, boolean isSortedByField, int size, int from, boolean isEmptyResult) { if (numReducePhases <= 0) { throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases); } this.totalHits = totalHits; this.fetchHits = fetchHits; if (Float.isInfinite(maxScore)) { this.maxScore = Float.NaN; } else { this.maxScore = maxScore; } this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.suggest = suggest; this.aggregations = aggregations; this.shardResults = shardResults; this.numReducePhases = numReducePhases; this.scoreDocs = scoreDocs; this.sortField = sortFields; this.isSortedByField = isSortedByField; this.size = size; this.from = from; this.isEmptyResult = isEmptyResult; this.sortValueFormats = sortValueFormats; } /** * Creates a new search response from the given merged hits. * @see #merge(boolean, ReducedQueryPhase, Collection, IntFunction) */ public InternalSearchResponse buildResponse(SearchHits hits) { return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases); } } /** * A {@link InitialSearchPhase.ArraySearchPhaseResults} implementation * that incrementally reduces aggregation results as shard results are consumed. * This implementation can be configured to batch up a certain amount of results and only reduce them * iff the buffer is exhausted. */ static final class QueryPhaseResultConsumer extends InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult> { private final InternalAggregations[] aggsBuffer; private final TopDocs[] topDocsBuffer; private final boolean hasAggs; private final boolean hasTopDocs; private final int bufferSize; private int index; private final SearchPhaseController controller; private int numReducePhases = 0; private final TopDocsStats topDocsStats = new TopDocsStats(); /** * Creates a new {@link QueryPhaseResultConsumer} * @param controller a controller instance to reduce the query response objects * @param expectedResultSize the expected number of query results. Corresponds to the number of shards queried * @param bufferSize the size of the reduce buffer. if the buffer size is smaller than the number of expected results * the buffer is used to incrementally reduce aggregation results before all shards responded. */ private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize, boolean hasTopDocs, boolean hasAggs) { super(expectedResultSize); if (expectedResultSize != 1 && bufferSize < 2) { throw new IllegalArgumentException("buffer size must be >= 2 if there is more than one expected result"); } if (expectedResultSize <= bufferSize) { throw new IllegalArgumentException("buffer size must be less than the expected result size"); } if (hasAggs == false && hasTopDocs == false) { throw new IllegalArgumentException("either aggs or top docs must be present"); } this.controller = controller; // no need to buffer anything if we have less expected results. in this case we don't consume any results ahead of time. this.aggsBuffer = new InternalAggregations[hasAggs ? bufferSize : 0]; this.topDocsBuffer = new TopDocs[hasTopDocs ? bufferSize : 0]; this.hasTopDocs = hasTopDocs; this.hasAggs = hasAggs; this.bufferSize = bufferSize; } @Override public void consumeResult(SearchPhaseResult result) { super.consumeResult(result); QuerySearchResult queryResult = result.queryResult(); consumeInternal(queryResult); } private synchronized void consumeInternal(QuerySearchResult querySearchResult) { if (index == bufferSize) { if (hasAggs) { InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(aggsBuffer)); Arrays.fill(aggsBuffer, null); aggsBuffer[0] = reducedAggs; } if (hasTopDocs) { TopDocs reducedTopDocs = controller.mergeTopDocs(Arrays.asList(topDocsBuffer), querySearchResult.from() + querySearchResult.size() // we have to merge here in the same way we collect on a shard , 0); Arrays.fill(topDocsBuffer, null); topDocsBuffer[0] = reducedTopDocs; } numReducePhases++; index = 1; } final int i = index++; if (hasAggs) { aggsBuffer[i] = (InternalAggregations) querySearchResult.consumeAggs(); } if (hasTopDocs) { final TopDocsAndMaxScore topDocs = querySearchResult.consumeTopDocs(); // can't be null topDocsStats.add(topDocs); SearchPhaseController.setShardIndex(topDocs.topDocs, querySearchResult.getShardIndex()); topDocsBuffer[i] = topDocs.topDocs; } } private synchronized List<InternalAggregations> getRemainingAggs() { return hasAggs ? Arrays.asList(aggsBuffer).subList(0, index) : null; } private synchronized List<TopDocs> getRemainingTopDocs() { return hasTopDocs ? Arrays.asList(topDocsBuffer).subList(0, index) : null; } @Override public ReducedQueryPhase reduce() { return controller.reducedQueryPhase(results.asList(), getRemainingAggs(), getRemainingTopDocs(), topDocsStats, numReducePhases, false); } /** * Returns the number of buffered results */ int getNumBuffered() { return index; } int getNumReducePhases() { return numReducePhases; } } /** * Returns a new ArraySearchPhaseResults instance. This might return an instance that reduces search responses incrementally. */ InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult> newSearchPhaseResults(SearchRequest request, int numShards) { SearchSourceBuilder source = request.source(); boolean isScrollRequest = request.scroll() != null; final boolean hasAggs = source != null && source.aggregations() != null; final boolean hasTopDocs = source == null || source.size() != 0; final boolean trackTotalHits = source == null || source.trackTotalHits(); if (isScrollRequest == false && (hasAggs || hasTopDocs)) { // no incremental reduce if scroll is used - we only hit a single shard or sometimes more... if (request.getBatchedReduceSize() < numShards) { // only use this if there are aggs and if there are more shards than we should reduce at once return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize(), hasTopDocs, hasAggs); } } return new InitialSearchPhase.ArraySearchPhaseResults(numShards) { @Override public ReducedQueryPhase reduce() { return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHits); } }; } static final class TopDocsStats { final boolean trackTotalHits; long totalHits; TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; long fetchHits; float maxScore = Float.NEGATIVE_INFINITY; TopDocsStats() { this(true); } TopDocsStats(boolean trackTotalHits) { this.trackTotalHits = trackTotalHits; this.totalHits = trackTotalHits ? 0 : -1; } void add(TopDocsAndMaxScore topDocs) { if (trackTotalHits) { totalHits += topDocs.topDocs.totalHits.value; if (topDocs.topDocs.totalHits.relation == Relation.GREATER_THAN_OR_EQUAL_TO) { totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; } } fetchHits += topDocs.topDocs.scoreDocs.length; if (!Float.isNaN(topDocs.maxScore)) { maxScore = Math.max(maxScore, topDocs.maxScore); } } } static final class SortedTopDocs { static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null); final ScoreDoc[] scoreDocs; final boolean isSortedByField; final SortField[] sortFields; SortedTopDocs(ScoreDoc[] scoreDocs, boolean isSortedByField, SortField[] sortFields) { this.scoreDocs = scoreDocs; this.isSortedByField = isSortedByField; this.sortFields = sortFields; } } }
{'content_hash': '5c3a5b91b81d07f8bf6f20f615a19610', 'timestamp': '', 'source': 'github', 'line_count': 773, 'max_line_length': 151, 'avg_line_length': 52.778783958602844, 'alnum_prop': 0.6361831462326585, 'repo_name': 'gfyoung/elasticsearch', 'id': '7d08c9f864e331e26a61e4fc53a73a273422f92f', 'size': '41586', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'ANTLR', 'bytes': '11082'}, {'name': 'Batchfile', 'bytes': '13592'}, {'name': 'Emacs Lisp', 'bytes': '3341'}, {'name': 'FreeMarker', 'bytes': '45'}, {'name': 'Groovy', 'bytes': '330070'}, {'name': 'HTML', 'bytes': '2186'}, {'name': 'Java', 'bytes': '42238459'}, {'name': 'Perl', 'bytes': '7271'}, {'name': 'Python', 'bytes': '54395'}, {'name': 'Shell', 'bytes': '108747'}]}
package ru.job4j.strategy; import org.junit.Test; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; /** * Test. * @author Aleksey Sidorenko (mailto:[email protected]) * @since 14.12.2017 */ public class TriangleTest { /** * Test pic. */ @Test public void whenDrawTriangle() { Shape triangle = new Triangle(); assertThat(triangle.draw(), is(new StringBuilder() .append(" . ") .append(System.lineSeparator()) .append(" - ") .append(System.lineSeparator()) .append(" --- ") .append(System.lineSeparator()) .append(" ----- ") .append(System.lineSeparator()) .append("-------") .toString() ) ); } }
{'content_hash': '05917b3049cd5b1ad962030d5f4edee8', 'timestamp': '', 'source': 'github', 'line_count': 34, 'max_line_length': 73, 'avg_line_length': 33.5, 'alnum_prop': 0.3845478489903424, 'repo_name': 'AlekseySidorenko/aleksey_sidorenko', 'id': '24585ff00a13d95702ac2eb9d10f15fe9da28187', 'size': '1139', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'chapter_002/src/test/java/ru/job4j/strategy/TriangleTest.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Java', 'bytes': '308816'}, {'name': 'Shell', 'bytes': '14'}, {'name': 'TSQL', 'bytes': '2241'}]}
namespace GafferSceneUI { IE_CORE_FORWARDDECLARE( SceneView ) class GAFFERSCENEUI_API ScaleTool : public TransformTool { public : ScaleTool( SceneView *view, const std::string &name = defaultName<ScaleTool>() ); ~ScaleTool() override; IE_CORE_DECLARERUNTIMETYPEDEXTENSION( GafferSceneUI::ScaleTool, ScaleToolTypeId, TransformTool ); /// Scales the current selection as if the handles /// had been dragged interactively. Exists mainly for /// use in the unit tests. void scale( const Imath::V3f &scale ); protected : bool affectsHandles( const Gaffer::Plug *input ) const override; void updateHandles() override; private : // The guts of the scaling logic. This is factored out of the // drag handling so it can be shared with the `scale()` public // method. struct Scale { Imath::V3f originalScale; GafferUI::Style::Axes axes; }; Scale createScale( GafferUI::Style::Axes axes ); void applyScale( const Scale &scale, float s ); // Drag handling. IECore::RunTimeTypedPtr dragBegin( GafferUI::Style::Axes axes ); bool dragMove( const GafferUI::Gadget *gadget, const GafferUI::DragDropEvent &event ); bool dragEnd(); Scale m_drag; static ToolDescription<ScaleTool, SceneView> g_toolDescription; }; } // namespace GafferSceneUI #endif // GAFFERSCENEUI_SCALETOOL_H
{'content_hash': '2517487a309b9ae3eee263f8df0c0980', 'timestamp': '', 'source': 'github', 'line_count': 54, 'max_line_length': 99, 'avg_line_length': 24.666666666666668, 'alnum_prop': 0.722972972972973, 'repo_name': 'ivanimanishi/gaffer', 'id': '900cfa36c3c4d2374bbdc7dcb37202145ace5241', 'size': '3296', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'include/GafferSceneUI/ScaleTool.h', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'C', 'bytes': '39753'}, {'name': 'C++', 'bytes': '6086015'}, {'name': 'CMake', 'bytes': '83446'}, {'name': 'CSS', 'bytes': '28027'}, {'name': 'GLSL', 'bytes': '6236'}, {'name': 'Python', 'bytes': '6120483'}, {'name': 'Shell', 'bytes': '13049'}, {'name': 'Slash', 'bytes': '2870'}]}
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.dialogflow.v2.model; /** * The card response message. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Dialogflow API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class GoogleCloudDialogflowV2IntentMessageCard extends com.google.api.client.json.GenericJson { /** * Optional. The collection of card buttons. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudDialogflowV2IntentMessageCardButton> buttons; /** * Optional. The public URI to an image file for the card. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String imageUri; /** * Optional. The subtitle of the card. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String subtitle; /** * Optional. The title of the card. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String title; /** * Optional. The collection of card buttons. * @return value or {@code null} for none */ public java.util.List<GoogleCloudDialogflowV2IntentMessageCardButton> getButtons() { return buttons; } /** * Optional. The collection of card buttons. * @param buttons buttons or {@code null} for none */ public GoogleCloudDialogflowV2IntentMessageCard setButtons(java.util.List<GoogleCloudDialogflowV2IntentMessageCardButton> buttons) { this.buttons = buttons; return this; } /** * Optional. The public URI to an image file for the card. * @return value or {@code null} for none */ public java.lang.String getImageUri() { return imageUri; } /** * Optional. The public URI to an image file for the card. * @param imageUri imageUri or {@code null} for none */ public GoogleCloudDialogflowV2IntentMessageCard setImageUri(java.lang.String imageUri) { this.imageUri = imageUri; return this; } /** * Optional. The subtitle of the card. * @return value or {@code null} for none */ public java.lang.String getSubtitle() { return subtitle; } /** * Optional. The subtitle of the card. * @param subtitle subtitle or {@code null} for none */ public GoogleCloudDialogflowV2IntentMessageCard setSubtitle(java.lang.String subtitle) { this.subtitle = subtitle; return this; } /** * Optional. The title of the card. * @return value or {@code null} for none */ public java.lang.String getTitle() { return title; } /** * Optional. The title of the card. * @param title title or {@code null} for none */ public GoogleCloudDialogflowV2IntentMessageCard setTitle(java.lang.String title) { this.title = title; return this; } @Override public GoogleCloudDialogflowV2IntentMessageCard set(String fieldName, Object value) { return (GoogleCloudDialogflowV2IntentMessageCard) super.set(fieldName, value); } @Override public GoogleCloudDialogflowV2IntentMessageCard clone() { return (GoogleCloudDialogflowV2IntentMessageCard) super.clone(); } }
{'content_hash': 'edeb9b04a233c92e1a842b9b119088ed', 'timestamp': '', 'source': 'github', 'line_count': 138, 'max_line_length': 182, 'avg_line_length': 30.028985507246375, 'alnum_prop': 0.7084942084942085, 'repo_name': 'googleapis/google-api-java-client-services', 'id': '2b017a073900c3fea0f9df2aa751d163e63d5a00', 'size': '4144', 'binary': False, 'copies': '5', 'ref': 'refs/heads/main', 'path': 'clients/google-api-services-dialogflow/v2/1.27.0/com/google/api/services/dialogflow/v2/model/GoogleCloudDialogflowV2IntentMessageCard.java', 'mode': '33188', 'license': 'apache-2.0', 'language': []}
SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
{'content_hash': 'c8a1c14a03c460cfa12f55184515e87b', 'timestamp': '', 'source': 'github', 'line_count': 13, 'max_line_length': 39, 'avg_line_length': 10.23076923076923, 'alnum_prop': 0.6917293233082706, 'repo_name': 'mdoering/backbone', 'id': 'dceff0006c4d148508551763539abf7548b54a4b', 'size': '191', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'life/Plantae/Magnoliophyta/Magnoliopsida/Fabales/Fabaceae/Calliandra/Calliandra rubescens/ Syn. Anneslia xalapensis/README.md', 'mode': '33188', 'license': 'apache-2.0', 'language': []}
import tensorflow as tf def run_python(): """ Getting the new height and new width for max pooling """ input_height = 4 input_width = 4 input_depth = 5 filter_height = 2 filter_width = 2 s = 2 # stride new_height = (input_height - filter_height)/s + 1 new_width = (input_width - filter_width)/s + 1 print("new height = {}, new width = {} and depth is {}".format(new_height, new_width, input_depth)) def run_tf(): """ Getting the new height and new width for max pooling using TensorFlow """ place_holder = tf.placeholder(tf.float32, (None, 4, 4, 5)) filter_shape = [1, 2, 2, 1] strides = [1, 2, 2, 1] padding = 'VALID' pool = tf.nn.max_pool(place_holder, filter_shape, strides, padding) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(pool, feed_dict={}) # Exception might occur. print(output) if __name__ == '__main__': run_python() run_tf()
{'content_hash': 'ec3fd325a36d17aca3f6d7847866d452', 'timestamp': '', 'source': 'github', 'line_count': 38, 'max_line_length': 103, 'avg_line_length': 26.57894736842105, 'alnum_prop': 0.594059405940594, 'repo_name': 'akshaybabloo/Car-ND', 'id': '3eb8307580f640a1f866589c28988e8fcf7f3406', 'size': '1010', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'Term_1/CNN_5/max_pooling_4.py', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'JavaScript', 'bytes': '551'}, {'name': 'Jupyter Notebook', 'bytes': '16855408'}, {'name': 'Python', 'bytes': '367767'}]}
@interface LNHomeModel : NSObject @property (nonatomic, strong) NSString *cover_image_url; @property (nonatomic, strong) NSNumber *ID; // 系统关键字id @property (nonatomic, strong) NSNumber *published_at; @property (nonatomic, strong) NSString *temp; // 系统关键字template @property (nonatomic, strong) NSNumber *editor_id; @property (nonatomic, strong) NSNumber *created_at; @property (nonatomic, strong) NSString *content_url; @property (nonatomic, strong) NSArray *labels; @property (nonatomic, strong) NSString *url; @property (nonatomic, strong) NSString *type; // 是NSTaggedPointerString类型 @property (nonatomic, strong) NSString *share_msg; @property (nonatomic, strong) NSString *title; @property (nonatomic, strong) NSNumber *updated_at; @property (nonatomic, strong) NSString *short_title; @property (nonatomic, assign) BOOL liked; @property (nonatomic, strong) NSNumber *likes_count; @property (nonatomic, strong) NSNumber *status; @end
{'content_hash': '4a276c5d98c8d81f269406db018c6c05', 'timestamp': '', 'source': 'github', 'line_count': 21, 'max_line_length': 73, 'avg_line_length': 44.666666666666664, 'alnum_prop': 0.767590618336887, 'repo_name': 'liuning1084126465/-A-', 'id': '9ef95d10d55e1c5ff55dcc5386c04c89d619a84f', 'size': '1136', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': '礼物说/礼物说/Classes/Models/LNHomeModel.h', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Objective-C', 'bytes': '683069'}]}
![Quasar Framework logo](https://cdn.rawgit.com/quasarframework/quasar-art/9127036d/dist/svg/quasar-play-logo-inline.svg) # Quasar Play > Quasar Framework Showcase webiste/app; Used on official documentation too. Currently only on [Google Play](https://play.google.com/store/apps/details?id=com.quasarframework.quasarplay&utm_source=global_co&utm_medium=prtnr&utm_content=Mar2515&utm_campaign=PartBadge&pcampaignid=MKT-Other-global-all-co-prtnr-py-PartBadge-Mar2515-1) BUT **way outdated** (uses very old Quasar v0.13). Requiring funding to pay fees on Apple Store and release the iOS counterpart too. Help [here](http://quasar-framework.org/support-quasar-framework.html). Please rate it on [Google Play](https://play.google.com/store/apps/details?id=com.quasarframework.quasarplay) and comment about it in the [Community Forum](http://forum.quasar-framework.org). # Quasar Framework > Build responsive Single Page Apps, **SSR Apps**, PWAs, Hybrid Mobile Apps and Electron Apps, all using the same codebase!, powered with Vue. <a href="https://badge.fury.io/js/quasar-framework"><img src="https://badge.fury.io/js/quasar-framework.svg"></a> ## Supporting Quasar Quasar Framework is an MIT-licensed open source project. Its ongoing development is made possible thanks to the support by these awesome [backers](https://github.com/rstoenescu/quasar-framework/blob/dev/backers.md). If you'd like to join them, check out [Quasar Framework's Patreon campaign](https://www.patreon.com/quasarframework). ## Documentation Head on to the Quasar Framework official website: [http://quasar-framework.org](http://quasar-framework.org) ## Community Forum Head on to the official community forum: [http://forum.quasar-framework.org](http://forum.quasar-framework.org) ## Quasar Repositories * [Quasar Framework](https://github.com/quasarframework/quasar) * [Quasar CLI](https://github.com/quasarframework/quasar-cli) * [Quasar Play App](https://github.com/quasarframework/quasar-play) ## Contributing I'm excited if you want to contribute to Quasar under any form (report bugs, write a plugin, fix an issue, write a new feature). ### Issue Reporting Guidelines **Please use the appropriate Github repo to report issues. See "Related Components" above.** For example, a bug related to CLI should be reported to the CLI repo, one related to build issues to Quasar Framework Templates repo and so on. - The issue list of the repository is **exclusively** for bug reports and feature requests. For anything else please use the [Community Forum](http://forum.quasar-framework.org). - Try to search for your issue, it may have already been fixed in the development branch or it may have a resolution. - Check if the issue is reproducible with the latest stable version of Quasar. If you are using a pre-release, please indicate the specific version you are using. - It is **required** that you clearly describe the steps necessary to reproduce the issue you are running into. Issues with no clear repro steps will not be triaged. If an issue labeled "need repro" receives no further input from the issue author for more than 5 days, it will be closed. - If your issue is resolved but still open, don’t hesitate to close it. In case you found a solution by yourself, it could be helpful to explain how you fixed it. Read more [here](http://quasar-framework.org/guide/contributing.html). ## License Copyright (c) 2016 Razvan Stoenescu [MIT License](http://en.wikipedia.org/wiki/MIT_License)
{'content_hash': '58ef778d2d1e7a6e9d7aeba27f5ef820', 'timestamp': '', 'source': 'github', 'line_count': 57, 'max_line_length': 460, 'avg_line_length': 61.31578947368421, 'alnum_prop': 0.7751072961373391, 'repo_name': 'quasarframework/quasar-play', 'id': '4676b95490d128f6d1cc2157ab7663334df86ef3', 'size': '3497', 'binary': False, 'copies': '2', 'ref': 'refs/heads/dev', 'path': 'README.md', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'CSS', 'bytes': '2403'}, {'name': 'HTML', 'bytes': '958'}, {'name': 'JavaScript', 'bytes': '24661'}, {'name': 'Vue', 'bytes': '423125'}]}
<?php defined('BASEPATH') OR exit('No direct script access allowed'); class Products extends MY_Controller { public function __construct(){ parent::__construct(); $this->load->model('products_model'); } public function index() { //$this->load->view('contracts/index'); } public function roofing_bended_panels() { $this->load->view('products/roofing_bended_panels'); } public function hardware_accessories() { $this->load->view('products/hardware_accessories'); } public function getProductsByCategoryComboBox( $cat_id = null) { $products = $this->products_model->getProductByCategory($cat_id); $products_data = array(); foreach ($products as $p) { $temp = array( 'id' => $p->p_id, 'text' => $p->p_name ); $products_data[] = $temp; } $this->output ->set_content_type('application/json') ->set_output(json_encode($products_data) ); } public function getProductThicknessComboBox( $cat_id = null) {} public function getProductWidthComboBox( $cat_id = null) {} public function getProductsLengthComboBox( $cat_id = null) {} public function getRoofingBendedPanels() { $roofingBendedPanels = $this->products_model->getRoofingBendedPanels(); $resultSet['rows'] = $roofingBendedPanels; $resultSet['total'] = count($roofingBendedPanels); $this->output ->set_content_type('application/json') ->set_output(json_encode($resultSet)); } public function getRoofingBendedPanelsCategory() { $roofingBendedPanelsCategory = $this->products_model->getRoofingBendedPanelsCategory(); $roofingBendedPanelsCategory_data = array(); foreach ($roofingBendedPanelsCategory as $p) { $temp = array( 'id' => $p->cat_id, 'text' => $p->cat_name ); $roofingBendedPanelsCategory_data[] = $temp; } $this->output ->set_content_type('application/json') ->set_output(json_encode($roofingBendedPanelsCategory_data) ); } public function saveProduct() { $post = $_POST; $rbp = $this->products_model->save( $post['data'] ); if ( $rbp ) { $this->output ->set_content_type('application/json') ->set_output( json_encode( array( 'status' => 'success' ) ) ); } } public function deleteByProductId() { $post = $_POST; $deleteRoofingBendedPanel = $this->products_model->deleteByProductId( $post ); if( $deleteRoofingBendedPanel ) { $this->output ->set_content_type('application/json') ->set_output( json_encode( array( 'status' => 'success' ) ) ); } } public function getHardwareAccessories() { $hardwareAccessories = $this->products_model->getHardwareAccessories(); $resultSet['rows'] = $hardwareAccessories; $resultSet['total'] = count($hardwareAccessories); $this->output ->set_content_type('application/json') ->set_output(json_encode($resultSet)); } }
{'content_hash': '16dae67052251d287cb3bf8f35aaa32f', 'timestamp': '', 'source': 'github', 'line_count': 121, 'max_line_length': 95, 'avg_line_length': 27.305785123966942, 'alnum_prop': 0.5699152542372882, 'repo_name': 'AsCEX/hardware', 'id': 'c586632aa8d79cf4261094e9c0995c424bd105b2', 'size': '3304', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'application/controllers/Products.php', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'ApacheConf', 'bytes': '506'}, {'name': 'CSS', 'bytes': '606407'}, {'name': 'HTML', 'bytes': '5633'}, {'name': 'JavaScript', 'bytes': '730369'}, {'name': 'PHP', 'bytes': '1854008'}]}
/** @file ACLoader.h * @brief Declaration of the .ac importer class. */ #ifndef AI_AC3DLOADER_H_INCLUDED #define AI_AC3DLOADER_H_INCLUDED #include <vector> #include <assimp/BaseImporter.h> #include <assimp/types.h> struct aiNode; struct aiMesh; struct aiMaterial; struct aiLight; namespace Assimp { // --------------------------------------------------------------------------- /** AC3D (*.ac) importer class */ class AC3DImporter : public BaseImporter { public: AC3DImporter(); ~AC3DImporter(); // Represents an AC3D material struct Material { Material() : rgb (0.6f,0.6f,0.6f) , spec (1.f,1.f,1.f) , shin (0.f) , trans (0.f) {} // base color of the material aiColor3D rgb; // ambient color of the material aiColor3D amb; // emissive color of the material aiColor3D emis; // specular color of the material aiColor3D spec; // shininess exponent float shin; // transparency. 0 == opaque float trans; // name of the material. optional. std::string name; }; // Represents an AC3D surface struct Surface { Surface() : mat (0) , flags (0) {} unsigned int mat,flags; typedef std::pair<unsigned int, aiVector2D > SurfaceEntry; std::vector< SurfaceEntry > entries; }; // Represents an AC3D object struct Object { Object() : type (World) , name( "" ) , children() , texture( "" ) , texRepeat( 1.f, 1.f ) , texOffset( 0.0f, 0.0f ) , rotation() , translation() , vertices() , surfaces() , numRefs (0) , subDiv (0) , crease() {} // Type description enum Type { World = 0x0, Poly = 0x1, Group = 0x2, Light = 0x4 } type; // name of the object std::string name; // object children std::vector<Object> children; // texture to be assigned to all surfaces of the object std::string texture; // texture repat factors (scaling for all coordinates) aiVector2D texRepeat, texOffset; // rotation matrix aiMatrix3x3 rotation; // translation vector aiVector3D translation; // vertices std::vector<aiVector3D> vertices; // surfaces std::vector<Surface> surfaces; // number of indices (= num verts in verbose format) unsigned int numRefs; // number of subdivisions to be performed on the // imported data unsigned int subDiv; // max angle limit for smoothing float crease; }; public: // ------------------------------------------------------------------- /** Returns whether the class can handle the format of the given file. * See BaseImporter::CanRead() for details. */ bool CanRead( const std::string& pFile, IOSystem* pIOHandler, bool checkSig) const; protected: // ------------------------------------------------------------------- /** Return importer meta information. * See #BaseImporter::GetInfo for the details */ const aiImporterDesc* GetInfo () const; // ------------------------------------------------------------------- /** Imports the given file into the given scene structure. * See BaseImporter::InternReadFile() for details*/ void InternReadFile( const std::string& pFile, aiScene* pScene, IOSystem* pIOHandler); // ------------------------------------------------------------------- /** Called prior to ReadFile(). * The function is a request to the importer to update its configuration * basing on the Importer's configuration property list.*/ void SetupProperties(const Importer* pImp); private: // ------------------------------------------------------------------- /** Get the next line from the file. * @return false if the end of the file was reached*/ bool GetNextLine(); // ------------------------------------------------------------------- /** Load the object section. This method is called recursively to * load subobjects, the method returns after a 'kids 0' was * encountered. * @objects List of output objects*/ void LoadObjectSection(std::vector<Object>& objects); // ------------------------------------------------------------------- /** Convert all objects into meshes and nodes. * @param object Current object to work on * @param meshes Pointer to the list of output meshes * @param outMaterials List of output materials * @param materials Material list * @param Scenegraph node for the object */ aiNode* ConvertObjectSection(Object& object, std::vector<aiMesh*>& meshes, std::vector<aiMaterial*>& outMaterials, const std::vector<Material>& materials, aiNode* parent = NULL); // ------------------------------------------------------------------- /** Convert a material * @param object Current object * @param matSrc Source material description * @param matDest Destination material to be filled */ void ConvertMaterial(const Object& object, const Material& matSrc, aiMaterial& matDest); private: // points to the next data line const char* buffer; // Configuration option: if enabled, up to two meshes // are generated per material: those faces who have // their bf cull flags set are separated. bool configSplitBFCull; // Configuration switch: subdivision surfaces are only // evaluated if the value is true. bool configEvalSubdivision; // counts how many objects we have in the tree. // basing on this information we can find a // good estimate how many meshes we'll have in the final scene. unsigned int mNumMeshes; // current list of light sources std::vector<aiLight*>* mLights; // name counters unsigned int lights, groups, polys, worlds; }; } // end of namespace Assimp #endif // AI_AC3DIMPORTER_H_INC
{'content_hash': 'e226220cd0d3c67eaa024ebe8f838cea', 'timestamp': '', 'source': 'github', 'line_count': 236, 'max_line_length': 78, 'avg_line_length': 27.14406779661017, 'alnum_prop': 0.5298157976896659, 'repo_name': 'google/filament', 'id': 'cab2c3ae54fe098c2a7e19f1668dd7d8315ce2c6', 'size': '8123', 'binary': False, 'copies': '4', 'ref': 'refs/heads/main', 'path': 'third_party/libassimp/code/AC/ACLoader.h', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Assembly', 'bytes': '2833995'}, {'name': 'Batchfile', 'bytes': '4607'}, {'name': 'C', 'bytes': '2796377'}, {'name': 'C#', 'bytes': '1099'}, {'name': 'C++', 'bytes': '7044879'}, {'name': 'CMake', 'bytes': '209759'}, {'name': 'CSS', 'bytes': '17232'}, {'name': 'Dockerfile', 'bytes': '2404'}, {'name': 'F#', 'bytes': '710'}, {'name': 'GLSL', 'bytes': '223763'}, {'name': 'Go', 'bytes': '61019'}, {'name': 'Groovy', 'bytes': '11811'}, {'name': 'HTML', 'bytes': '80095'}, {'name': 'Java', 'bytes': '780083'}, {'name': 'JavaScript', 'bytes': '90947'}, {'name': 'Kotlin', 'bytes': '345783'}, {'name': 'Objective-C', 'bytes': '55990'}, {'name': 'Objective-C++', 'bytes': '314291'}, {'name': 'Python', 'bytes': '76565'}, {'name': 'RenderScript', 'bytes': '1769'}, {'name': 'Ruby', 'bytes': '4436'}, {'name': 'Shell', 'bytes': '76965'}, {'name': 'TypeScript', 'bytes': '3293'}]}
using RethinkDb.Spec; using System; using System.Linq; using System.Linq.Expressions; using System.Reflection; using RethinkDb.Expressions; namespace RethinkDb.QueryTerm { public static class ExpressionUtils { public static Term CreateValueTerm<TReturn>(IQueryConverter queryConverter, Expression<Func<TReturn>> expression) { var termConverter = queryConverter.CreateExpressionConverter<TReturn>(queryConverter); return termConverter.CreateFunctionTerm(expression); } public static Term CreateFunctionTerm<TParameter1, TReturn>(IQueryConverter queryConverter, Expression<Func<TParameter1, TReturn>> expression) { var termConverter = queryConverter.CreateExpressionConverter<TParameter1, TReturn>(queryConverter); return termConverter.CreateFunctionTerm(expression); } public static Term CreateFunctionTerm<TParameter1, TParameter2, TReturn>(IQueryConverter queryConverter, Expression<Func<TParameter1, TParameter2, TReturn>> expression) { var termConverter = queryConverter.CreateExpressionConverter<TParameter1, TParameter2, TReturn>(queryConverter); return termConverter.CreateFunctionTerm(expression); } } }
{'content_hash': 'e885438b437a59974536bddd776e8e28', 'timestamp': '', 'source': 'github', 'line_count': 30, 'max_line_length': 176, 'avg_line_length': 42.36666666666667, 'alnum_prop': 0.7419354838709677, 'repo_name': 'nkreipke/rethinkdb-net', 'id': '77756c5f3f533985fe7902bf52724aa97a9d5658', 'size': '1273', 'binary': False, 'copies': '6', 'ref': 'refs/heads/master', 'path': 'rethinkdb-net/QueryTerm/ExpressionUtils.cs', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'C#', 'bytes': '969541'}, {'name': 'Protocol Buffer', 'bytes': '40726'}, {'name': 'Shell', 'bytes': '5181'}]}
package UI::Status; # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # # JvD Note: you always want to put Utils as the first use. Sh*t don't work if it's after the Mojo lines. use UI::Utils; use Mojo::Base 'Mojolicious::Controller'; use Data::Dumper; sub index { my $self = shift; my @data; my $orderby = "name"; $orderby = $self->param('orderby') if ( defined $self->param('orderby') ); my $rs_data = $self->db->resultset("Status")->search( undef, { order_by => $orderby } ); while ( my $row = $rs_data->next ) { push( @data, { "id" => $row->id, "name" => $row->name, "description" => $row->description, "last_updated" => $row->last_updated, } ); } $self->render( json => \@data ); } sub delete { my $self = shift; my $id = $self->param('id'); if ( !&is_admin($self) ) { $self->flash( alertmsg => "You must be an ADMIN to perform this operation!" ); } else { my $p_name = $self->db->resultset('Status')->search( { id => $id } )->get_column('name')->single(); my $delete = $self->db->resultset('Status')->search( { id => $id } ); $delete->delete(); &log( $self, "Delete status " . $p_name, "UICHANGE" ); } return $self->redirect_to('/misc'); } sub check_status_input { my $self = shift; my $sep = "__NEWLINE__"; # the line separator sub that with \n in the .ep javascript my $err = undef; # First, check permissions if ( !&is_admin($self) ) { $err .= "You must be an ADMIN to perform this operation!" . $sep; return $err; } return $err; } sub update { my $self = shift; my $id = $self->param('id'); my $err = &check_status_input($self); if ( defined($err) ) { $self->flash( alertmsg => $err ); } else { my $update = $self->db->resultset('Status')->find( { id => $self->param('id') } ); $update->name( $self->param('name') ); $update->description( $self->param('description') ); $update->update(); # if the update has failed, we don't even get here, we go to the exception page. &log( $self, "Update status with name " . $self->param('name') . " and id " . $self->param('id'), "UICHANGE" ); } $self->flash( alertmsg => "Success!" ); return $self->redirect_to('/misc'); } sub create { my $self = shift; my $err = &check_status_input($self); if ( defined($err) ) { $self->flash( alertmsg => $err ); } else { my $insert = $self->db->resultset('Status')->create( { name => $self->param('name'), description => $self->param('description'), } ); $insert->insert(); } $self->flash( alertmsg => "Success!" ); return $self->redirect_to('/misc'); } sub is_valid_status { my $self = shift; my $status = shift; my $valid = 0; my $row = $self->db->resultset("Status")->search( { name => $status } )->single; if ( defined($row) ) { return ( $row->id ); } else { return (undef); } } 1;
{'content_hash': '195a6fa7b6f0f4114c51ea28ceb85683', 'timestamp': '', 'source': 'github', 'line_count': 132, 'max_line_length': 113, 'avg_line_length': 25.416666666666668, 'alnum_prop': 0.6032786885245902, 'repo_name': 'naamashoresh/incubator-trafficcontrol', 'id': '71863df0403fb1d5fc386c64ab9f0984c783a62f', 'size': '3355', 'binary': False, 'copies': '23', 'ref': 'refs/heads/master', 'path': 'traffic_ops/app/lib/UI/Status.pm', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'C', 'bytes': '21929'}, {'name': 'CSS', 'bytes': '195099'}, {'name': 'Go', 'bytes': '1160352'}, {'name': 'HTML', 'bytes': '612938'}, {'name': 'Java', 'bytes': '1231693'}, {'name': 'JavaScript', 'bytes': '1767993'}, {'name': 'Makefile', 'bytes': '1047'}, {'name': 'PLSQL', 'bytes': '3450'}, {'name': 'PLpgSQL', 'bytes': '70798'}, {'name': 'Perl', 'bytes': '2614543'}, {'name': 'Perl6', 'bytes': '631229'}, {'name': 'Python', 'bytes': '11054'}, {'name': 'Roff', 'bytes': '4011'}, {'name': 'Ruby', 'bytes': '4090'}, {'name': 'Shell', 'bytes': '161675'}]}
<?php defined('C5_EXECUTE') or die("Access Denied."); class Marketplace { const E_INVALID_BASE_URL = 20; const E_MARKETPLACE_SUPPORT_MANUALLY_DISABLED = 21; const E_GENERAL_CONNECTION_ERROR = 99; protected $isConnected = false; protected $connectionError = false; public static function getInstance() { static $instance; if (!isset($instance)) { $m = __CLASS__; $instance = new $m; } return $instance; } public function __construct() { if (defined('ENABLE_MARKETPLACE_SUPPORT') && ENABLE_MARKETPLACE_SUPPORT == false) { $this->connectionError = Marketplace::E_MARKETPLACE_SUPPORT_MANUALLY_DISABLED; return; } $csToken = Config::get('MARKETPLACE_SITE_TOKEN'); if ($csToken != '') { $fh = Loader::helper('file'); $csiURL = urlencode(BASE_URL . DIR_REL); $url = MARKETPLACE_URL_CONNECT_VALIDATE."?csToken={$csToken}&csiURL=" . $csiURL; $vn = Loader::helper('validation/numbers'); $r = $fh->getContents($url); if ($r == false) { $this->isConnected = true; } else if ($vn->integer($r)) { $this->isConnected = false; $this->connectionError = $r; } else { $this->isConnected = false; $this->connectionError = self::E_GENERAL_CONNECTION_ERROR; } } } public function isConnected() { return $this->isConnected; } public function hasConnectionError() { return $this->connectionError != false; } public function getConnectionError() { return $this->connectionError; } public function generateSiteToken() { $fh = Loader::helper('file'); $token = $fh->getContents(MARKETPLACE_URL_CONNECT_TOKEN_NEW); return $token; } public function getSiteToken() { $token = Config::get('MARKETPLACE_SITE_TOKEN'); return $token; } public function getSitePageURL() { $token = Config::get('MARKETPLACE_SITE_URL_TOKEN'); return MARKETPLACE_BASE_URL_SITE_PAGE . '/' . $token; } public static function downloadRemoteFile($file) { $fh = Loader::helper('file'); $file .= '?csiURL=' . urlencode(BASE_URL . DIR_REL); $pkg = $fh->getContents($file); if (empty($pkg)) { return Package::E_PACKAGE_DOWNLOAD; } $file = time(); // Use the same method as the Archive library to build a temporary file name. $tmpFile = $fh->getTemporaryDirectory() . '/' . $file . '.zip'; $fp = fopen($tmpFile, "wb"); if ($fp) { fwrite($fp, $pkg); fclose($fp); } else { return Package::E_PACKAGE_SAVE; } return $file; } public function getMarketplaceFrame($width = '100%', $height = '530', $completeURL = false) { // if $mpID is passed, we are going to either // a. go to its purchase page // b. pass you through to the page AFTER connecting. $tp = new TaskPermission(); if ($tp->canInstallPackages()) { if (!$this->isConnected()) { $url = MARKETPLACE_URL_CONNECT; if (!$completeURL) { $completeURL = BASE_URL . View::url('/dashboard/settings/marketplace', 'connect_complete'); } $csReferrer = urlencode($completeURL); $csiURL = urlencode(BASE_URL . DIR_REL); if ($this->hasConnectionError()) { $csToken = $this->getSiteToken(); } else { // new connection $csToken = Marketplace::generateSiteToken(); } $url = $url . '?ts=' . time() . '&csiURL=' . $csiURL . '&csToken=' . $csToken . '&csReferrer=' . $csReferrer . '&csName=' . htmlspecialchars(SITE, ENT_QUOTES, APP_CHARSET); } if ($csToken == false) { return '<div class="ccm-error">' . t('Unable to generate a marketplace token. Please ensure that allow_url_fopen is turned on, or that cURL is enabled on your server. If these are both true, It\'s possible your site\'s IP address may be blacklisted for some reason on our server. Please ask your webhost what your site\'s outgoing cURL request IP address is, and email it to us at <a href="mailto:[email protected]">[email protected]</a>.') . '</div>'; } else { return '<iframe id="ccm-marketplace-frame-' . time() . '" frameborder="0" width="' . $width . '" height="' . $height . '" src="' . $url . '"></iframe>'; } } else { return '<div class="ccm-error">' . t('You do not have permission to connect this site to the marketplace.') . '</div>'; } } /** * Runs through all packages on the marketplace, sees if they're installed here, and updates the available version number for them */ public static function checkPackageUpdates() { Loader::model('system_notification'); $items = Marketplace::getAvailableMarketplaceItems(false); foreach($items as $i) { $p = Package::getByHandle($i->getHandle()); if (is_object($p)) { // we only add a notification if it's newer than the last one we know about if (version_compare($p->getPackageVersionUpdateAvailable(), $i->getVersion(), '<') && version_compare($p->getPackageVersion(), $i->getVersion(), '<')) { SystemNotification::add(SystemNotification::SN_TYPE_ADDON_UPDATE, t('An updated version of %s is available.', $i->getName()), t('New Version: %s.', $i->getVersion()), '', View::url('/dashboard/install', 'update'), $i->getRemoteURL()); } $p->updateAvailableVersionNumber($i->getVersion()); } } } public function getAvailableMarketplaceItems($filterInstalled=true) { Loader::model('marketplace_remote_item'); $fh = Loader::helper('file'); if (!$fh) return array(); // Retrieve the URL contents $csToken = Config::get('MARKETPLACE_SITE_TOKEN'); $csiURL = urlencode(BASE_URL . DIR_REL); $url = MARKETPLACE_PURCHASES_LIST_WS."?csToken={$csToken}&csiURL=" . $csiURL . "&csiVersion=" . APP_VERSION; $json = $fh->getContents($url); $addons=array(); $objects = @Loader::helper('json')->decode($json); if (is_array($objects)) { try { foreach($objects as $addon){ $mi = new MarketplaceRemoteItem(); $mi->setPropertiesFromJSONObject($addon); $remoteCID = $mi->getRemoteCollectionID(); if (!empty($remoteCID)) { $addons[$mi->getHandle()] = $mi; } } } catch (Exception $e) {} if ($filterInstalled && is_array($addons)) { Loader::model('package'); $handles = Package::getInstalledHandles(); if (is_array($handles)) { $adlist = array(); foreach($addons as $key=>$ad) { if (!in_array($ad->getHandle(), $handles)) { $adlist[$key] = $ad; } } $addons = $adlist; } } } return $addons; } } ?>
{'content_hash': '173e8f9da3b09ac624fd787700a8ac5e', 'timestamp': '', 'source': 'github', 'line_count': 196, 'max_line_length': 458, 'avg_line_length': 32.40816326530612, 'alnum_prop': 0.6363350125944585, 'repo_name': 'homer6/concrete5', 'id': '411e9699c6257d7214eeacd48de13277bbe3f84e', 'size': '6352', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'concrete/libraries/marketplace.php', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'ActionScript', 'bytes': '172889'}, {'name': 'JavaScript', 'bytes': '761341'}, {'name': 'PHP', 'bytes': '6679489'}, {'name': 'Python', 'bytes': '2003'}]}
package ac.ncic.test.console3; import java.util.List; import java.util.ArrayList; import java.util.Map; import android.os.Bundle; import android.os.AsyncTask; import android.app.ActionBar; import android.app.ListFragment; import android.app.AlertDialog; import android.content.DialogInterface; import android.view.View; import android.view.ViewGroup; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuItem; import android.view.MenuInflater; import android.widget.ListView; import android.widget.BaseAdapter; import android.widget.ArrayAdapter; import android.widget.HorizontalScrollView; import android.widget.TableLayout; import android.widget.TableRow; import android.widget.ImageView; import android.widget.EditText; import android.widget.TextView; import android.widget.Spinner; import android.widget.Toast; import com.woorea.openstack.nova.model.Server; import com.woorea.openstack.nova.model.Server.Addresses.Address; import com.woorea.openstack.nova.model.Servers; import com.woorea.openstack.nova.model.Link; import com.woorea.openstack.nova.model.Flavor; import com.woorea.openstack.nova.model.Flavors; import com.woorea.openstack.nova.model.Image; import com.woorea.openstack.nova.model.Images; import com.woorea.openstack.nova.model.KeyPair; import com.woorea.openstack.nova.model.KeyPairs; import com.woorea.openstack.nova.model.SecurityGroup; import com.woorea.openstack.nova.model.SecurityGroups; import ac.ncic.phpc.ul.OpenStackClientService; public class InstancesFragment extends ListFragment { private ActionBar actionBar; private MenuItem launchMenuItem, refreshMenuItem; private View _thisView; private AlertDialog launchServerDialog, launchedServerDialog; private View launchServerView; private AlertDialog errorInfoDialog; private InstanceListAdapter instanceListAdapter; private final String[] stateMap = new String[] { "NOSTATE", "RUNNING", "BLOCKED", "PAUSED", "SHUTDOWN", "SHUTOFF", "CRASHED", "SUSPENDED", "FAILED", "BUILDING" }; @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { setHasOptionsMenu(true); if (null == actionBar) actionBar = getActivity().getActionBar(); actionBar.setTitle("Instances"); actionBar.removeAllTabs(); if (null == _thisView) { _thisView = super.onCreateView(inflater, container, savedInstanceState); if (null == instanceListAdapter) instanceListAdapter = new InstanceListAdapter(); setListAdapter(instanceListAdapter); } else { //((ViewGroup)_thisView.getParent()).removeView(_thisView); } return _thisView; } @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { inflater.inflate(R.menu.action_instances, menu); super.onCreateOptionsMenu(menu, inflater); launchMenuItem = menu.findItem(R.id.actionLaunch ); refreshMenuItem = menu.findItem(R.id.actionRefresh); } @Override public boolean onOptionsItemSelected(MenuItem menuItem) { switch (menuItem.getItemId()) { case R.id.actionLaunch: launchMenuItem = menuItem; new prepareAndShowLaunchDialog().execute(); return true; case R.id.actionRefresh: refreshMenuItem = menuItem; new NovaList().execute(); return true; default: return false; } } @Override public void onListItemClick(ListView l, View v, int position, long id) { Toast.makeText(getActivity(), "nova show", Toast.LENGTH_SHORT).show(); showInstanceDetail(instanceListAdapter.getItem(position)); } private void showInstanceDetail(Server server) { Bundle instanceDetail = new Bundle(); StringBuilder sb; instanceDetail.putString("instanceId" , server.getId() ); instanceDetail.putString("instanceName" , server.getName() ); instanceDetail.putString("instanceLibvirtName", server.getInstanceName()); instanceDetail.putString("userId" , server.getUserId() ); instanceDetail.putString("tenantId" , server.getTenantId() ); instanceDetail.putString("instanceStatus" , server.getStatus() ); if (server.getPowerState() != null) instanceDetail.putString("instancePowerState", stateMap[Integer.parseInt(server.getPowerState())]); Map<String, List<Address>> addresses = server.getAddresses().getAddresses(); sb = new StringBuilder(); for (Map.Entry<String, List<Address>> entry : addresses.entrySet()) { sb.append(entry.getKey() + "="); List<Address> subAddresses = entry.getValue(); for (int i = 0; i < subAddresses.size(); i++) { sb.append(subAddresses.get(i).getAddr()); if (i != (subAddresses.size()-1)) sb.append(", "); } sb.append("; "); } if (sb.length() > 1) sb.setLength(sb.length() - 2); instanceDetail.putString("instanceNetworks", sb.toString()); instanceDetail.putString("instanceImage", server.getImage().getName() + " (" + server.getImage().getId() + ")"); instanceDetail.putString("instanceFlavor", server.getFlavor().getName() + " (" + server.getFlavor().getId() + ")"); instanceDetail.putString("instanceKeyName", server.getKeyName()); List<SecurityGroup> serverSecurityGroups = server.getSecurityGroups(); sb = new StringBuilder(); for (int i = 0; i < serverSecurityGroups.size(); i++) { sb.append(serverSecurityGroups.get(i).getName()); if (i != (serverSecurityGroups.size()-1)) sb.append(", "); } instanceDetail.putString("instanceSecurityGroups", sb.toString()); List<String> volumes = server.getOsExtendedVolumesAttached(); sb = new StringBuilder(); for (int i = 0; i < volumes.size(); i++) { sb.append(volumes.get(i)); if (i != (volumes.size()-1)) sb.append(", "); } instanceDetail.putString("instanceVolumes", sb.toString()); instanceDetail.putString("instanceHost", server.getHost()); instanceDetail.putString("instanceZone", server.getAvailabilityZone()); instanceDetail.putString("instanceCreatedAt", server.getCreated()); instanceDetail.putString("instanceUpdatedAt", server.getUpdated()); sb = new StringBuilder(); if (null == server.getAccessIPv4() || "".equals(server.getAccessIPv4())) sb.append("null"); else sb.append(server.getAccessIPv4()); sb.append(", "); if (null == server.getAccessIPv6() || "".equals(server.getAccessIPv6())) sb.append("null"); else sb.append(server.getAccessIPv6()); instanceDetail.putString("instanceAccessIps", sb.toString()); InstanceDetailFragment instanceFragment = new InstanceDetailFragment(); instanceFragment.setArguments(instanceDetail); getFragmentManager().beginTransaction() .replace(R.id.content, instanceFragment, "instance-detail") .addToBackStack("instance-detail") .commit(); } class InstanceListAdapter extends BaseAdapter { private List<Server> servers = new ArrayList<Server>(); @Override public int getCount() { return servers.size(); } @Override public Server getItem(int position) { return servers.get(position); } @Override public long getItemId(int position) { return position; } @Override public View getView(int position, View convertView, ViewGroup parent) { View instanceItemView = convertView; if (null == instanceItemView) { LayoutInflater inflater = getActivity().getLayoutInflater(); instanceItemView = inflater.inflate(R.layout.fragment_instance_item, parent, false); ViewHolder viewHolder = new ViewHolder(); viewHolder.serverImage = (ImageView) instanceItemView.findViewById(R.id.serverImage); viewHolder.serverName = (TextView) instanceItemView.findViewById(R.id.serverName); viewHolder.serverStatus = (TextView) instanceItemView.findViewById(R.id.serverStatus); viewHolder.serverNetworks = (TextView) instanceItemView.findViewById(R.id.serverNetworks); viewHolder.serverPowerState = (TextView) instanceItemView.findViewById(R.id.serverPowerState); instanceItemView.setTag(viewHolder); } ViewHolder holder = (ViewHolder) instanceItemView.getTag(); Server server = getItem(position); String serverName = server.getName(); holder.serverName.setText(serverName); if (serverName.startsWith("Win") || serverName.startsWith("win") || serverName.startsWith("WES") || serverName.startsWith("wes")) holder.serverImage.setImageResource(R.drawable.ic_os_windows); else if (serverName.startsWith("CentOS") || serverName.startsWith("centos")) holder.serverImage.setImageResource(R.drawable.ic_os_centos); else if (serverName.startsWith("Ubuntu") || serverName.startsWith("ubuntu")) holder.serverImage.setImageResource(R.drawable.ic_os_ubuntu); else if (serverName.startsWith("Fedora") || serverName.startsWith("fedora")) holder.serverImage.setImageResource(R.drawable.ic_os_fedora); else if (serverName.startsWith("Debian") || serverName.startsWith("debian")) holder.serverImage.setImageResource(R.drawable.ic_os_debian); else holder.serverImage.setImageResource(R.drawable.ic_os_default); holder.serverStatus.setText(server.getStatus()); Map<String, List<Address>> addresses = server.getAddresses().getAddresses(); StringBuilder sb = new StringBuilder(); for (Map.Entry<String, List<Address>> entry : addresses.entrySet()) { sb.append(entry.getKey() + "="); List<Address> subAddresses = entry.getValue(); for (int i = 0; i < subAddresses.size(); i++) { sb.append(subAddresses.get(i).getAddr()); if (i != (subAddresses.size()-1)) sb.append(", "); } sb.append("; "); } if (sb.length() > 1) sb.setLength(sb.length() - 2); holder.serverNetworks.setText(sb.toString()); holder.serverPowerState.setText(stateMap[Integer.parseInt(server.getPowerState())]); return instanceItemView; } public void updateServers(Servers servers) { this.servers = servers.getList(); notifyDataSetChanged(); } class ViewHolder { public ImageView serverImage; public TextView serverName; public TextView serverStatus; public TextView serverNetworks; public TextView serverPowerState; } } class prepareAndShowLaunchDialog extends AsyncTask<Void, Void, prepareAndShowLaunchDialog.ResultType> { @Override protected void onPreExecute() { Toast.makeText(getActivity(), "Preparing ...", Toast.LENGTH_SHORT).show(); launchMenuItem.setActionView(R.layout.action_refresh_progress); launchMenuItem.expandActionView(); } @Override protected ResultType doInBackground(Void... params) { ResultType result = new ResultType(); try { result.flavors = OpenStackClientService.getInstance().compute().getFlavors(true); result.images = OpenStackClientService.getInstance().compute().getImages(true); result.keyPairs = OpenStackClientService.getInstance().compute().getKeyPairs(); result.securityGroups = OpenStackClientService.getInstance().compute().getSecurityGroups(); } catch (Exception e) { result.exceptionCatched = true; result.exceptionMessage = e.getMessage(); } return result; } @Override protected void onPostExecute(final ResultType result) { launchMenuItem.collapseActionView(); launchMenuItem.setActionView(null); if (result.exceptionCatched) { if (null == errorInfoDialog) errorInfoDialog = new AlertDialog.Builder(getActivity()) .setTitle("Error!") .setPositiveButton("OK", null) .setCancelable(false) .create(); errorInfoDialog.setMessage(result.exceptionMessage); errorInfoDialog.show(); } else { if (null == launchServerView) { launchServerView = getActivity() .getLayoutInflater() .inflate(R.layout.dialog_launch_server, (ViewGroup) getActivity().findViewById(R.id.launchServer)); ViewHolder viewHolder = new ViewHolder(); viewHolder.nameEdit = (EditText) launchServerView.findViewById(R.id.nameEdit); viewHolder.flavorSpinner = (Spinner) launchServerView.findViewById(R.id.flavorSpinner); viewHolder.imageSpinner = (Spinner) launchServerView.findViewById(R.id.imageSpinner); viewHolder.keyNameSpinner = (Spinner) launchServerView.findViewById(R.id.keyNameSpinner); viewHolder.securityGroupSpinner = (Spinner) launchServerView.findViewById(R.id.securityGroupSpinner); launchServerView.setTag(viewHolder); } final ViewHolder holder = (ViewHolder) launchServerView.getTag(); List<String> flavorList = new ArrayList<String>(); for (Flavor flavor: result.flavors) { flavorList.add(flavor.getName()); } ArrayAdapter<String> flavorAdapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, flavorList); flavorAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); holder.flavorSpinner.setAdapter(flavorAdapter); List<String> imageList = new ArrayList<String>(); for (Image image: result.images) { imageList.add(image.getName()); } ArrayAdapter<String> imageAdapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, imageList); imageAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); holder.imageSpinner.setAdapter(imageAdapter); List<String> keyNameList = new ArrayList<String>(); for (KeyPair keyPair: result.keyPairs) { keyNameList.add(keyPair.getName()); } ArrayAdapter<String> keyNameAdapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, keyNameList); keyNameAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); holder.keyNameSpinner.setAdapter(keyNameAdapter); List<String> securityGroupList = new ArrayList<String>(); for (SecurityGroup securityGroup: result.securityGroups) { securityGroupList.add(securityGroup.getName()); } ArrayAdapter<String> securityGroupAdapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, securityGroupList); securityGroupAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); holder.securityGroupSpinner.setAdapter(securityGroupAdapter); if (null == launchServerDialog) launchServerDialog = new AlertDialog.Builder(getActivity()) .setTitle("Launch Server") .setView(launchServerView) .setPositiveButton("OK", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface d, int which) { } }) .setNegativeButton("Cancel", null) .setCancelable(false) .create(); launchServerDialog.setOnShowListener(new DialogInterface.OnShowListener() { @Override public void onShow(DialogInterface dialog) { launchServerDialog.getButton(AlertDialog.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { String serverName = holder.nameEdit.getText().toString(); if (serverName.equals("")) { Toast.makeText(getActivity(), "Please enter a NAME!", Toast.LENGTH_SHORT).show(); } else { launchServerDialog.dismiss(); NovaBoot novaBoot = new NovaBoot(); NovaBoot.ParameterType params = novaBoot.new ParameterType(); params.name = serverName; params.flavorId = result.flavors.getList().get(holder.flavorSpinner.getSelectedItemPosition()).getId(); params.imageId = result.images.getList().get(holder.imageSpinner.getSelectedItemPosition()).getId(); params.keyName = result.keyPairs.getList().get(holder.keyNameSpinner.getSelectedItemPosition()).getName(); params.securityGroup = result.securityGroups.getList().get(holder.securityGroupSpinner.getSelectedItemPosition()).getName(); novaBoot.execute(params); } } }); } }); launchServerDialog.show(); } } class ResultType { public Flavors flavors; public Images images; public KeyPairs keyPairs; public SecurityGroups securityGroups; public boolean exceptionCatched; public String exceptionMessage; public ResultType() { exceptionCatched = false; } } class ViewHolder { public EditText nameEdit; public Spinner flavorSpinner; public Spinner imageSpinner; public Spinner keyNameSpinner; public Spinner securityGroupSpinner; } } class NovaList extends AsyncTask<Void, Void, NovaList.ResultType> { @Override protected void onPreExecute() { Toast.makeText(getActivity(), "nova list", Toast.LENGTH_SHORT).show(); refreshMenuItem.setActionView(R.layout.action_refresh_progress); refreshMenuItem.expandActionView(); }; @Override protected ResultType doInBackground(Void ... params) { ResultType result = new ResultType(); try { result.servers = OpenStackClientService.getInstance().compute().getServers(true); } catch (Exception e) { result.exceptionCatched = true; result.exceptionMessage = e.getMessage(); } return result; } @Override protected void onPostExecute(ResultType result) { refreshMenuItem.collapseActionView(); refreshMenuItem.setActionView(null); if (result.exceptionCatched) { if (null == errorInfoDialog) errorInfoDialog = new AlertDialog.Builder(getActivity()) .setTitle("Error!") .setPositiveButton("OK", null) .setCancelable(false) .create(); errorInfoDialog.setMessage(result.exceptionMessage); errorInfoDialog.show(); } else { instanceListAdapter.updateServers(result.servers); } } class ResultType { public Servers servers; public boolean exceptionCatched; public String exceptionMessage; public ResultType() { exceptionCatched = false; } } } class NovaBoot extends AsyncTask<NovaBoot.ParameterType, Void, NovaBoot.ResultType> { @Override protected void onPreExecute() { Toast.makeText(getActivity(), "nova boot ...", Toast.LENGTH_SHORT).show(); launchMenuItem.setActionView(R.layout.action_refresh_progress); launchMenuItem.expandActionView(); } @Override protected ResultType doInBackground(ParameterType... params) { ResultType result = new ResultType(); try { result.server = OpenStackClientService .getInstance() .compute() .simpleBootServer(params[0].name, params[0].flavorId, params[0].imageId, params[0].keyName, params[0].securityGroup); } catch (Exception e) { result.exceptionCatched = true; result.exceptionMessage = e.getMessage(); } return result; } @Override protected void onPostExecute(ResultType result) { launchMenuItem.collapseActionView(); launchMenuItem.setActionView(null); if (result.exceptionCatched) { if (null == errorInfoDialog) errorInfoDialog = new AlertDialog.Builder(getActivity()) .setTitle("Error!") .setPositiveButton("OK", null) .setCancelable(false) .create(); errorInfoDialog.setMessage(result.exceptionMessage); errorInfoDialog.show(); } else { //if (null == launchedServerDialog) launchedServerDialog = new AlertDialog.Builder(getActivity()) .setTitle("Server Launched") .setPositiveButton("OK", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface d, int which) { onOptionsItemSelected(refreshMenuItem); } }) .setCancelable(false) .create(); /*StringBuilder sb = new StringBuilder(); sb.append("ID: " + result.server.getId() + "\n"); for (Link link: result.server.getLinks()) if (link.getRel().equals("self")) sb.append("Link: " + link.getHref() + "\n"); sb.append("Security Group(s): "); List<SecurityGroup> serverSecurityGroups = result.server.getSecurityGroups(); for (int i = 0; i < serverSecurityGroups.size(); i++) { sb.append(serverSecurityGroups.get(i).getName()); if (i != (serverSecurityGroups.size()-1)) sb.append(", "); } sb.append("\n"); sb.append("OS-DCF:diskConfig: " + result.server.getDiskConfig() + "\n"); sb.append("adminPass: " + result.server.getAdminPass() + "\n"); launchedServerDialog.setMessage(sb.toString());*/ /* As an ugly demo of programmatically building a view. */ HorizontalScrollView sv = new HorizontalScrollView(getActivity()); TableLayout tl = new TableLayout(getActivity()); tl.setStretchAllColumns(true); tl.setPaddingRelative(16, 16, 16, 16); TableRow[] tr = new TableRow[] { (TableRow) getActivity().getLayoutInflater().inflate(R.layout.tablerow_common, tl, false), (TableRow) getActivity().getLayoutInflater().inflate(R.layout.tablerow_common, tl, false), (TableRow) getActivity().getLayoutInflater().inflate(R.layout.tablerow_common, tl, false), (TableRow) getActivity().getLayoutInflater().inflate(R.layout.tablerow_common, tl, false), (TableRow) getActivity().getLayoutInflater().inflate(R.layout.tablerow_common, tl, false) }; TextView[] key = new TextView[] { (TextView) tr[0].findViewById(R.id.rowKey), (TextView) tr[1].findViewById(R.id.rowKey), (TextView) tr[2].findViewById(R.id.rowKey), (TextView) tr[3].findViewById(R.id.rowKey), (TextView) tr[4].findViewById(R.id.rowKey) }; key[0].setText("ID"); key[1].setText("Link"); key[2].setText("Security Group(s)"); key[3].setText("OS-DCF:diskConfig"); key[4].setText("adminPass"); TextView[] value = new TextView[] { (TextView) tr[0].findViewById(R.id.rowValue), (TextView) tr[1].findViewById(R.id.rowValue), (TextView) tr[2].findViewById(R.id.rowValue), (TextView) tr[3].findViewById(R.id.rowValue), (TextView) tr[4].findViewById(R.id.rowValue) }; value[0].setText(result.server.getId()); for (Link link: result.server.getLinks()) if (link.getRel().equals("self")) value[1].setText(link.getHref()); StringBuilder sb = new StringBuilder(); List<SecurityGroup> serverSecurityGroups = result.server.getSecurityGroups(); for (int i = 0; i < serverSecurityGroups.size(); i++) { sb.append(serverSecurityGroups.get(i).getName()); if (i != (serverSecurityGroups.size()-1)) sb.append(", "); } value[2].setText(sb.toString()); value[3].setText(result.server.getDiskConfig()); value[4].setText(result.server.getAdminPass()); tl.addView(tr[0]); tl.addView(tr[1]); tl.addView(tr[2]); tl.addView(tr[3]); tl.addView(tr[4]); sv.addView(tl); launchedServerDialog.setView(sv); //launchedServerDialog.getWindow().setLayout(600, 400); launchedServerDialog.show(); } } public class ParameterType { public String name; public String flavorId; public String imageId; public String keyName; public String securityGroup; } class ResultType { public Server server; public boolean exceptionCatched; public String exceptionMessage; public ResultType() { exceptionCatched = false; } } } }
{'content_hash': '58b7da00c516148230a16c2c29c52d0a', 'timestamp': '', 'source': 'github', 'line_count': 743, 'max_line_length': 145, 'avg_line_length': 33.48452220726783, 'alnum_prop': 0.6566984203545159, 'repo_name': 'qzan9/openstack-android-client', 'id': 'd1fa4076e2f95b3c362752e8854e08df7f059353', 'size': '24914', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'src/ac/ncic/test/console3/InstancesFragment.java', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Java', 'bytes': '105927'}]}
<!DOCTYPE HTML> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (11.0.6) on Mon Mar 02 14:33:08 NOVT 2020 --> <title>ModelUtil</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="dc.created" content="2020-03-02"> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style"> <link rel="stylesheet" type="text/css" href="../../../../jquery/jquery-ui.css" title="Style"> <script type="text/javascript" src="../../../../script.js"></script> <script type="text/javascript" src="../../../../jquery/jszip/dist/jszip.min.js"></script> <script type="text/javascript" src="../../../../jquery/jszip-utils/dist/jszip-utils.min.js"></script> <!--[if IE]> <script type="text/javascript" src="../../../../jquery/jszip-utils/dist/jszip-utils-ie.min.js"></script> <![endif]--> <script type="text/javascript" src="../../../../jquery/jquery-3.3.1.js"></script> <script type="text/javascript" src="../../../../jquery/jquery-migrate-3.0.1.js"></script> <script type="text/javascript" src="../../../../jquery/jquery-ui.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="ModelUtil"; } } catch(err) { } //--> var data = {"i0":9,"i1":9,"i2":9,"i3":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; var activeTableTab = "activeTableTab"; var pathtoroot = "../../../../"; var useModuleDirectories = true; loadScripts(document, 'script');</script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <header role="banner"> <nav role="navigation"> <div class="fixedNav"> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a id="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a id="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../index.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="class-use/ModelUtil.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-files/index-1.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../allclasses.html">All&nbsp;Classes</a></li> </ul> <ul class="navListSearch"> <li><label for="search">SEARCH:</label> <input type="text" id="search" value="search" disabled="disabled"> <input type="reset" id="reset" value="reset" disabled="disabled"> </li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.detail">Method</a></li> </ul> </div> <a id="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> </div> <div class="navPadding">&nbsp;</div> <script type="text/javascript"><!-- $('.navPadding').css('padding-top', $('.fixedNav').css("height")); //--> </script> </nav> </header> <!-- ======== START OF CLASS DATA ======== --> <main role="main"> <div class="header"> <div class="subTitle"><span class="packageLabelInType">Package</span>&nbsp;<a href="package-summary.html">org.enterprisedomain.classmaker.util</a></div> <h2 title="Class ModelUtil" class="title">Class ModelUtil</h2> </div> <div class="contentContainer"> <ul class="inheritance"> <li>java.lang.Object</li> <li> <ul class="inheritance"> <li>org.enterprisedomain.classmaker.util.ModelUtil</li> </ul> </li> </ul> <div class="description"> <ul class="blockList"> <li class="blockList"> <hr> <pre>public class <span class="typeNameLabel">ModelUtil</span> extends java.lang.Object</pre> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <section role="region"> <ul class="blockList"> <li class="blockList"><a id="constructor.summary"> <!-- --> </a> <h3>Constructor Summary</h3> <table class="memberSummary"> <caption><span>Constructors</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Constructor</th> <th class="colLast" scope="col">Description</th> </tr> <tr class="altColor"> <th class="colConstructorName" scope="row"><code><span class="memberNameLink"><a href="#%3Cinit%3E()">ModelUtil</a></span>()</code></th> <td class="colLast">&nbsp;</td> </tr> </table> </li> </ul> </section> <!-- ========== METHOD SUMMARY =========== --> <section role="region"> <ul class="blockList"> <li class="blockList"><a id="method.summary"> <!-- --> </a> <h3>Method Summary</h3> <table class="memberSummary"> <caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd">&nbsp;</span></span><span id="t1" class="tableTab"><span><a href="javascript:show(1);">Static Methods</a></span><span class="tabEnd">&nbsp;</span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd">&nbsp;</span></span></caption> <tr> <th class="colFirst" scope="col">Modifier and Type</th> <th class="colSecond" scope="col">Method</th> <th class="colLast" scope="col">Description</th> </tr> <tr id="i0" class="altColor"> <td class="colFirst"><code>static boolean</code></td> <th class="colSecond" scope="row"><code><span class="memberNameLink"><a href="#eObjectsAreEqual(org.eclipse.emf.common.util.EList,org.eclipse.emf.common.util.EList,boolean)">eObjectsAreEqual</a></span>&#8203;(org.eclipse.emf.common.util.EList&lt;org.eclipse.emf.ecore.EObject&gt;&nbsp;first, org.eclipse.emf.common.util.EList&lt;org.eclipse.emf.ecore.EObject&gt;&nbsp;second, boolean&nbsp;conjunction)</code></th> <td class="colLast">&nbsp;</td> </tr> <tr id="i1" class="rowColor"> <td class="colFirst"><code>static boolean</code></td> <th class="colSecond" scope="row"><code><span class="memberNameLink"><a href="#eObjectsAreEqual(org.eclipse.emf.ecore.EObject,org.eclipse.emf.common.util.EList,boolean)">eObjectsAreEqual</a></span>&#8203;(org.eclipse.emf.ecore.EObject&nbsp;first, org.eclipse.emf.common.util.EList&lt;org.eclipse.emf.ecore.EObject&gt;&nbsp;second, boolean&nbsp;conjunction)</code></th> <td class="colLast">&nbsp;</td> </tr> <tr id="i2" class="altColor"> <td class="colFirst"><code>static boolean</code></td> <th class="colSecond" scope="row"><code><span class="memberNameLink"><a href="#eObjectsAreEqual(org.eclipse.emf.ecore.EObject,org.eclipse.emf.ecore.EObject,boolean)">eObjectsAreEqual</a></span>&#8203;(org.eclipse.emf.ecore.EObject&nbsp;first, org.eclipse.emf.ecore.EObject&nbsp;second, boolean&nbsp;conjunction)</code></th> <td class="colLast">&nbsp;</td> </tr> <tr id="i3" class="rowColor"> <td class="colFirst"><code>static org.eclipse.emf.ecore.EPackage</code></td> <th class="colSecond" scope="row"><code><span class="memberNameLink"><a href="#getEPackage(org.eclipse.emf.ecore.EObject)">getEPackage</a></span>&#8203;(org.eclipse.emf.ecore.EObject&nbsp;eObject)</code></th> <td class="colLast">&nbsp;</td> </tr> </table> <ul class="blockList"> <li class="blockList"><a id="methods.inherited.from.class.java.lang.Object"> <!-- --> </a> <h3>Methods inherited from class&nbsp;java.lang.Object</h3> <code>equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li> </ul> </li> </ul> </section> </li> </ul> </div> <div class="details"> <ul class="blockList"> <li class="blockList"> <!-- ========= CONSTRUCTOR DETAIL ======== --> <section role="region"> <ul class="blockList"> <li class="blockList"><a id="constructor.detail"> <!-- --> </a> <h3>Constructor Detail</h3> <a id="&lt;init&gt;()"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>ModelUtil</h4> <pre>public&nbsp;ModelUtil()</pre> </li> </ul> </li> </ul> </section> <!-- ============ METHOD DETAIL ========== --> <section role="region"> <ul class="blockList"> <li class="blockList"><a id="method.detail"> <!-- --> </a> <h3>Method Detail</h3> <a id="eObjectsAreEqual(org.eclipse.emf.ecore.EObject,org.eclipse.emf.ecore.EObject,boolean)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>eObjectsAreEqual</h4> <pre class="methodSignature">public static&nbsp;boolean&nbsp;eObjectsAreEqual&#8203;(org.eclipse.emf.ecore.EObject&nbsp;first, org.eclipse.emf.ecore.EObject&nbsp;second, boolean&nbsp;conjunction)</pre> <dl> <dt><span class="paramLabel">Parameters:</span></dt> <dd><code>first</code> - the first EObject</dd> <dd><code>second</code> - the second EObject</dd> <dd><code>conjunction</code> - true if exact matching is required, false if at least one of features are equal</dd> <dt><span class="returnLabel">Returns:</span></dt> <dd>whether the first and second are the same EPackages</dd> </dl> </li> </ul> <a id="eObjectsAreEqual(org.eclipse.emf.ecore.EObject,org.eclipse.emf.common.util.EList,boolean)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>eObjectsAreEqual</h4> <pre class="methodSignature">public static&nbsp;boolean&nbsp;eObjectsAreEqual&#8203;(org.eclipse.emf.ecore.EObject&nbsp;first, org.eclipse.emf.common.util.EList&lt;org.eclipse.emf.ecore.EObject&gt;&nbsp;second, boolean&nbsp;conjunction)</pre> </li> </ul> <a id="eObjectsAreEqual(org.eclipse.emf.common.util.EList,org.eclipse.emf.common.util.EList,boolean)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>eObjectsAreEqual</h4> <pre class="methodSignature">public static&nbsp;boolean&nbsp;eObjectsAreEqual&#8203;(org.eclipse.emf.common.util.EList&lt;org.eclipse.emf.ecore.EObject&gt;&nbsp;first, org.eclipse.emf.common.util.EList&lt;org.eclipse.emf.ecore.EObject&gt;&nbsp;second, boolean&nbsp;conjunction)</pre> </li> </ul> <a id="getEPackage(org.eclipse.emf.ecore.EObject)"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>getEPackage</h4> <pre class="methodSignature">public static&nbsp;org.eclipse.emf.ecore.EPackage&nbsp;getEPackage&#8203;(org.eclipse.emf.ecore.EObject&nbsp;eObject)</pre> </li> </ul> </li> </ul> </section> </li> </ul> </div> </div> </main> <!-- ========= END OF CLASS DATA ========= --> <footer role="contentinfo"> <nav role="navigation"> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a id="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a id="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../index.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="class-use/ModelUtil.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-files/index-1.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../allclasses.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.detail">Method</a></li> </ul> </div> <a id="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </nav> </footer> </body> </html>
{'content_hash': '59128a698491ad6e9810ebf5a3e6e9d2', 'timestamp': '', 'source': 'github', 'line_count': 370, 'max_line_length': 389, 'avg_line_length': 35.61891891891892, 'alnum_prop': 0.6393504818271493, 'repo_name': 'EnterpriseDomain/site', 'id': '298323894cd198497e367674eb661393967a0f52', 'size': '13179', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'javadoc/org/enterprisedomain/classmaker/util/ModelUtil.html', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '9056'}, {'name': 'HTML', 'bytes': '3161'}, {'name': 'JavaScript', 'bytes': '536'}, {'name': 'Ruby', 'bytes': '73'}]}
package com.percolate.coffee.model; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import com.percolate.coffee.util.date.DateUtils; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; /** * Extension of {@link com.percolate.coffee.model.CoffeeType}, contains extra fields needed for the show (detailed coffee type) requests */ @JsonIgnoreProperties(ignoreUnknown = true) public class CoffeeTypeDetailed extends CoffeeType { private String mUpdatedAt; /** * Default constructor for CoffeeType.class. * - This constructor is necessary (even if blank) in order for Jackson JSON Parsing to work properly. */ @JsonCreator public CoffeeTypeDetailed() { } @JsonProperty("last_updated_at") public String getUpdatedAt() { return mUpdatedAt; } @JsonProperty("last_updated_at") public void setUpdatedAt(String updatedAt) { Date date = new Date(); SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd' 'HH:mm:ss"); try { date = format.parse(updatedAt.substring(0,19)); } catch (ParseException e) { e.printStackTrace(); } mUpdatedAt = "Updated " + DateUtils.getTimeAgo(date.getTime()); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof CoffeeTypeDetailed)) return false; if (!super.equals(o)) return false; CoffeeTypeDetailed that = (CoffeeTypeDetailed) o; if (!mUpdatedAt.equals(that.mUpdatedAt)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + mUpdatedAt.hashCode(); return result; } }
{'content_hash': '203f14292a7685f5f4f494a61a47433d', 'timestamp': '', 'source': 'github', 'line_count': 64, 'max_line_length': 136, 'avg_line_length': 27.015625, 'alnum_prop': 0.741469057258531, 'repo_name': 'aaemman/Coffee', 'id': '382b5fd766668d7d04a8f301b5a4ee440625ed3b', 'size': '1729', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'java/com/percolate/coffee/model/CoffeeTypeDetailed.java', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Groovy', 'bytes': '1361'}, {'name': 'Java', 'bytes': '42716'}]}
 #include <aws/medialive/model/AudioChannelMapping.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace MediaLive { namespace Model { AudioChannelMapping::AudioChannelMapping() : m_inputChannelLevelsHasBeenSet(false), m_outputChannel(0), m_outputChannelHasBeenSet(false) { } AudioChannelMapping::AudioChannelMapping(JsonView jsonValue) : m_inputChannelLevelsHasBeenSet(false), m_outputChannel(0), m_outputChannelHasBeenSet(false) { *this = jsonValue; } AudioChannelMapping& AudioChannelMapping::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("inputChannelLevels")) { Aws::Utils::Array<JsonView> inputChannelLevelsJsonList = jsonValue.GetArray("inputChannelLevels"); for(unsigned inputChannelLevelsIndex = 0; inputChannelLevelsIndex < inputChannelLevelsJsonList.GetLength(); ++inputChannelLevelsIndex) { m_inputChannelLevels.push_back(inputChannelLevelsJsonList[inputChannelLevelsIndex].AsObject()); } m_inputChannelLevelsHasBeenSet = true; } if(jsonValue.ValueExists("outputChannel")) { m_outputChannel = jsonValue.GetInteger("outputChannel"); m_outputChannelHasBeenSet = true; } return *this; } JsonValue AudioChannelMapping::Jsonize() const { JsonValue payload; if(m_inputChannelLevelsHasBeenSet) { Aws::Utils::Array<JsonValue> inputChannelLevelsJsonList(m_inputChannelLevels.size()); for(unsigned inputChannelLevelsIndex = 0; inputChannelLevelsIndex < inputChannelLevelsJsonList.GetLength(); ++inputChannelLevelsIndex) { inputChannelLevelsJsonList[inputChannelLevelsIndex].AsObject(m_inputChannelLevels[inputChannelLevelsIndex].Jsonize()); } payload.WithArray("inputChannelLevels", std::move(inputChannelLevelsJsonList)); } if(m_outputChannelHasBeenSet) { payload.WithInteger("outputChannel", m_outputChannel); } return payload; } } // namespace Model } // namespace MediaLive } // namespace Aws
{'content_hash': 'c47a39dca906d425f7a78612afb1a24d', 'timestamp': '', 'source': 'github', 'line_count': 81, 'max_line_length': 138, 'avg_line_length': 25.28395061728395, 'alnum_prop': 0.7626953125, 'repo_name': 'aws/aws-sdk-cpp', 'id': '4eb1685748433896f41afcd68eab96a1bf81e87f', 'size': '2167', 'binary': False, 'copies': '1', 'ref': 'refs/heads/main', 'path': 'aws-cpp-sdk-medialive/source/model/AudioChannelMapping.cpp', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'C', 'bytes': '309797'}, {'name': 'C++', 'bytes': '476866144'}, {'name': 'CMake', 'bytes': '1245180'}, {'name': 'Dockerfile', 'bytes': '11688'}, {'name': 'HTML', 'bytes': '8056'}, {'name': 'Java', 'bytes': '413602'}, {'name': 'Python', 'bytes': '79245'}, {'name': 'Shell', 'bytes': '9246'}]}
layout: 3c highlighted: Blog posttype: blog --- {% include post.html %}
{'content_hash': '65739079549a79f7b7b649165532058f', 'timestamp': '', 'source': 'github', 'line_count': 5, 'max_line_length': 23, 'avg_line_length': 14.4, 'alnum_prop': 0.6944444444444444, 'repo_name': 'syouts/jekyll-theme-void', 'id': '6adc8bb590e67636be6c99dcd2290dcf64bb024e', 'size': '76', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': '_layouts/post.html', 'mode': '33188', 'license': 'bsd-2-clause', 'language': [{'name': 'CSS', 'bytes': '6275'}]}
package com.atanasg.fileprocessingapp.commands; import com.atanasg.fileprocessingapp.command.status.CommandExecStatus; import com.atanasg.fileprocessingapp.command.status.CommandFailed; import com.atanasg.fileprocessingapp.mvc.model.FileContentModel; import com.atanasg.fileprocessingapp.mvc.view.FileProcessingAppView; /** * Process an unknown user input. Only * inform the {@link FileProcessingAppView}. * * @author Atanas Gegov */ public class UnknownCommand extends AbstractCommandWithArgs { public UnknownCommand(FileContentModel fileContentModel, FileProcessingAppView userInterface, String[] commandArgs) { super(fileContentModel, userInterface, commandArgs); } @Override public void execute() { userInterface.showInfoForCommandInProgress(String.format("Executing '%s'...", commandArgs[0])); CommandExecStatus unknownCommandStatus = new CommandFailed(); unknownCommandStatus.appendDetailedInfo("Unknown command"); userInterface.showCommandExecutionStatus(unknownCommandStatus); } }
{'content_hash': 'a4ac7b6dcd89129c6bb083a63af36299', 'timestamp': '', 'source': 'github', 'line_count': 30, 'max_line_length': 97, 'avg_line_length': 33.93333333333333, 'alnum_prop': 0.8143418467583498, 'repo_name': 'atanasg/file-processing-application', 'id': 'f4c26630c044388dddcd8e202c0ddc74c841b18e', 'size': '1613', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'FileProcessingApp/src/main/java/com/atanasg/fileprocessingapp/commands/UnknownCommand.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Java', 'bytes': '99558'}, {'name': 'XSLT', 'bytes': '65696'}]}
import React, { Component, PropTypes } from 'react'; import classnames from 'classnames'; import CSSClassnames from '../utils/CSSClassnames'; const CLASS_ROOT = CSSClassnames.TABLE_ROW; export default class TableRow extends Component { render () { const { children, className, onClick, ...props } = this.props; const classes = classnames( CLASS_ROOT, { [`${CLASS_ROOT}--selectable`]: onClick }, className ); return ( <tr {...props} className={classes} onClick={onClick}> {children} </tr> ); } }; TableRow.propTypes = { onClick: PropTypes.func };
{'content_hash': '14bc21b2a38ebc7b76b515f835c652a6', 'timestamp': '', 'source': 'github', 'line_count': 29, 'max_line_length': 66, 'avg_line_length': 21.689655172413794, 'alnum_prop': 0.6216216216216216, 'repo_name': 'nickjvm/grommet', 'id': 'c4db9eefbd6c9cb7dc58f40add697b0d01309435', 'size': '699', 'binary': False, 'copies': '5', 'ref': 'refs/heads/master', 'path': 'src/js/components/TableRow.js', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '258719'}, {'name': 'HTML', 'bytes': '4207'}, {'name': 'JavaScript', 'bytes': '1489305'}]}
package org.apache.geode.internal.serialization.filter; /** * Creates an instance of {@code ObjectInputFilterApi}. */ @FunctionalInterface public interface ObjectInputFilterApiFactory { ObjectInputFilterApi createObjectInputFilterApi(); }
{'content_hash': 'becab79e6dd8055563174a8121026d84', 'timestamp': '', 'source': 'github', 'line_count': 11, 'max_line_length': 55, 'avg_line_length': 22.363636363636363, 'alnum_prop': 0.8048780487804879, 'repo_name': 'masaki-yamakawa/geode', 'id': '11957dd4b6058a446720689965c5ec35a672ddef', 'size': '1035', 'binary': False, 'copies': '4', 'ref': 'refs/heads/develop', 'path': 'geode-serialization/src/main/java/org/apache/geode/internal/serialization/filter/ObjectInputFilterApiFactory.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '104031'}, {'name': 'Dockerfile', 'bytes': '15935'}, {'name': 'Go', 'bytes': '40709'}, {'name': 'Groovy', 'bytes': '36499'}, {'name': 'HTML', 'bytes': '4053180'}, {'name': 'Java', 'bytes': '36217142'}, {'name': 'JavaScript', 'bytes': '1780821'}, {'name': 'Python', 'bytes': '29801'}, {'name': 'Ruby', 'bytes': '1801'}, {'name': 'SCSS', 'bytes': '2677'}, {'name': 'Shell', 'bytes': '286170'}]}
///<summary> /// /// Timer selectable - David Azouz 1/10/16 /// </summary> using UnityEngine; using UnityEngine.SceneManagement; using UnityEngine.UI; using System.Collections; using System.Collections.Generic; public class RoundTimer : MonoBehaviour { //private float m_TimePerRound = 180.0f; [SerializeField] private float m_TimeRemaining; public float m_RoundCountdownTime = 5.0f; private bool m_RoundStarted = false; private bool m_Spawned = false; [SerializeField] //private List<GameObject> m_PlayerSpawns; private int m_PlayerCount; [SerializeField] private PlayerManager m_PlayerManager; public GameObject m_ScoreBoardWindow; //public Text c_CountdownText; void Awake() { m_ScoreBoardWindow = FindObjectOfType<WindowManager>().gameObject; m_TimeRemaining = 180.0f;// m_TimePerRound; m_Spawned = false; m_RoundStarted = true; } void Start() { m_PlayerManager = PlayerManager.Instance; SetTimerSelection(GameManager.Instance.m_ActiveGameSettings.iRoundTimerChoice); } void Update() { if(Input.GetKeyDown(KeyCode.G) || Input.GetKeyDown(KeyCode.JoystickButton6)) { if (SceneManager.GetActiveScene().buildIndex != Scene.Menu) { if (SceneManager.GetActiveScene().buildIndex != 0) { m_RoundStarted = true; } } } if(m_RoundStarted) { m_TimeRemaining = Mathf.Max( m_TimeRemaining - Time.deltaTime, 0.0f ); // Time's up and Scoreboard if (m_TimeRemaining <= 0.0f) { m_RoundStarted = false; for (int i = 0; i < PlayerManager.MAX_PLAYERS; i++) { m_PlayerManager.GetPlayer(i).GetComponent<PlayerController>().enabled = false; } m_ScoreBoardWindow.GetComponent<WindowManager>().TimesUp(); Time.timeScale = 0.5f; } else if(!m_Spawned) { // Allow the game to run here m_Spawned = true; m_PlayerManager.CreatePlayers(); // Disable movement so Princess Anims can sync up. /*for (int i = 0; i < PlayerManager.MAX_PLAYERS; i++) { //m_PlayerManager.GetPlayer(i).GetComponent<PlayerController>().enabled = false; }*/ } } if (GetTimeRemaining() <= 0) { if (Input.GetButton("Pause")) { //m_Window.SetActive(false); m_Spawned = false; //SceneManager.LoadScene(SceneManager.GetActiveScene().buildIndex); //Time.timeScale = 1.0f; } } } public float GetTimeRemaining() { return m_TimeRemaining; } // Selects time duration for a round public void SetTimerSelection(int a_time) { switch (a_time) { // One minute case 0: { m_TimeRemaining = 60.0f; break; } // Three Minutes case 1: { m_TimeRemaining = 180.0f; break; } // Five Minutes case 2: { m_TimeRemaining = 300.0f; break; } default: { m_TimeRemaining = 180.0f; break; } } } }
{'content_hash': 'bf4311413367958e363f951542efedd7', 'timestamp': '', 'source': 'github', 'line_count': 137, 'max_line_length': 100, 'avg_line_length': 27.094890510948904, 'alnum_prop': 0.5, 'repo_name': 'DavidAzouz29/Food-Fight-2016-Prototype-Unity', 'id': '2285923b44263c1c9f75fe059683415793f57b97', 'size': '3714', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'Assets/Scripts/RoundTimer.cs', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C#', 'bytes': '612952'}, {'name': 'GLSL', 'bytes': '354902'}]}
package org.polyglotted.attributerepo.stash; import static org.polyglotted.attributerepo.stash.StashConstants.PARAM_AT; import static org.polyglotted.attributerepo.stash.StashConstants.SEGMENT_API; import static org.polyglotted.attributerepo.stash.StashConstants.SEGMENT_BROWSE; import static org.polyglotted.attributerepo.stash.StashConstants.SEGMENT_PROJECTS; import static org.polyglotted.attributerepo.stash.StashConstants.SEGMENT_REPOS; import org.polyglotted.attributerepo.core.GitClient; import org.polyglotted.attributerepo.core.Response; import org.polyglotted.attributerepo.git.common.FileRequest; import org.polyglotted.attributerepo.model.Artifact; import org.polyglotted.attributerepo.model.RepoId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Joiner; /** * Stash based Browse request that downloads contents of a file * * @author Shankar Vasudevan */ public class StashFileRequest extends FileRequest { private static final Joiner NEWJOINER = Joiner.on("\n"); private static final Logger logger = LoggerFactory.getLogger(StashFileRequest.class); /** * Create a new StashFileRequest * * @param repo * the repository definition * @param artifact * the deployed artifact that you would like to get the properties */ public StashFileRequest(RepoId repo, Artifact artifact) { super(repo, artifact); } public String execute(GitClient client) { try { Response response = client.execute(this); StashFile fileContent = response.getResult(StashFile.class); return collateLines(fileContent); } catch (Exception ex) { logger.error("error in content request execute", ex); throw new RuntimeException(ex); } } @Override protected String getRefParam() { return PARAM_AT; } @Override protected StringBuilder buildUri() { StringBuilder uri = new StringBuilder(); uri.append(SEGMENT_API); uri.append(SEGMENT_PROJECTS); uri.append(repo.getUser()); uri.append(SEGMENT_REPOS); uri.append(repo.getRepo()); uri.append(SEGMENT_BROWSE); uri.append(artifact.buildFilePath()); return uri; } protected String collateLines(StashFile fileContent) { return NEWJOINER.join(fileContent.getLines()); } }
{'content_hash': '3fc8f66e39f58af6db71f47593cef48c', 'timestamp': '', 'source': 'github', 'line_count': 74, 'max_line_length': 89, 'avg_line_length': 33.067567567567565, 'alnum_prop': 0.7029015120555783, 'repo_name': 'polyglotted/attribute-repo', 'id': 'c4d580b49c03448540fdb55f5644e22978913817', 'size': '2447', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'attribute-repo-core/src/main/java/org/polyglotted/attributerepo/stash/StashFileRequest.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Java', 'bytes': '99943'}]}
using System; namespace Prism.Regions { /// <summary> /// Provides additional methods to the <see cref="INavigateAsync"/> interface. /// </summary> public static class NavigationAsyncExtensions { /// <summary> /// Initiates navigation to the target specified by the <paramref name="target"/>. /// </summary> /// <param name="navigation">The navigation object.</param> /// <param name="target">The navigation target</param> public static void RequestNavigate(this INavigateAsync navigation, string target) { RequestNavigate(navigation, target, nr => { }); } /// <summary> /// Initiates navigation to the target specified by the <paramref name="target"/>. /// </summary> /// <param name="navigation">The navigation object.</param> /// <param name="target">The navigation target</param> /// <param name="navigationCallback">The callback executed when the navigation request is completed.</param> public static void RequestNavigate(this INavigateAsync navigation, string target, Action<NavigationResult> navigationCallback) { if (navigation == null) throw new ArgumentNullException("navigation"); if (target == null) throw new ArgumentNullException("target"); var targetUri = new Uri(target, UriKind.RelativeOrAbsolute); navigation.RequestNavigate(targetUri, navigationCallback); } /// <summary> /// Initiates navigation to the target specified by the <see cref="Uri"/>. /// </summary> /// <param name="navigation">The navigation object.</param> /// <param name="target">The navigation target</param> public static void RequestNavigate(this INavigateAsync navigation, Uri target) { if (navigation == null) throw new ArgumentNullException("navigation"); navigation.RequestNavigate(target, nr => { }); } /// <summary> /// Initiates navigation to the target specified by the <paramref name="target"/>. /// </summary> /// <param name="navigation">The navigation object.</param> /// <param name="target">The navigation target</param> /// <param name="navigationCallback">The callback executed when the navigation request is completed.</param> /// <param name="navigationParameters">An instance of NavigationParameters, which holds a collection of object parameters.</param> public static void RequestNavigate(this INavigateAsync navigation, string target, Action<NavigationResult> navigationCallback, NavigationParameters navigationParameters) { if (navigation == null) throw new ArgumentNullException("navigation"); if (target == null) throw new ArgumentNullException("target"); var targetUri = new Uri(target, UriKind.RelativeOrAbsolute); navigation.RequestNavigate(targetUri, navigationCallback, navigationParameters); } /// <summary> /// Initiates navigation to the target specified by the <paramref name="target"/>. /// </summary> /// <param name="navigation">The navigation object.</param> /// <param name="target">A Uri that represents the target where the region will navigate.</param> /// <param name="navigationParameters">An instance of NavigationParameters, which holds a collection of object parameters.</param> public static void RequestNavigate(this INavigateAsync navigation, Uri target, NavigationParameters navigationParameters) { if (navigation == null) throw new ArgumentNullException("navigation"); navigation.RequestNavigate(target, nr => { }, navigationParameters); } /// <summary> /// Initiates navigation to the target specified by the <paramref name="target"/>. /// </summary> /// <param name="navigation">The navigation object.</param> /// <param name="target">A string that represents the target where the region will navigate.</param> /// <param name="navigationParameters">An instance of NavigationParameters, which holds a collection of object parameters.</param> public static void RequestNavigate(this INavigateAsync navigation, string target, NavigationParameters navigationParameters) { if (navigation == null) throw new ArgumentNullException("navigation"); if (target == null) throw new ArgumentNullException("target"); navigation.RequestNavigate(new Uri(target, UriKind.RelativeOrAbsolute), nr => { }, navigationParameters); } } }
{'content_hash': '53b10df35bb16ff24476661567bce796', 'timestamp': '', 'source': 'github', 'line_count': 92, 'max_line_length': 177, 'avg_line_length': 51.21739130434783, 'alnum_prop': 0.6593803056027164, 'repo_name': 'KSSDevelopment/Prism', 'id': 'c771812d20bdeac5c7a71515ce9d9a7a86b1cc23', 'size': '4835', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'Source/Wpf/Prism.Wpf/Regions/NavigationAsyncExtensions.cs', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'C#', 'bytes': '1762844'}]}
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package estacionamento.view; /** * * @author flavio */ public interface IMainView { public void swapPanel(Object panel); public void addPanel(Object panel); }
{'content_hash': '4866dd8921c899a07bce24bd3e991234', 'timestamp': '', 'source': 'github', 'line_count': 18, 'max_line_length': 79, 'avg_line_length': 20.61111111111111, 'alnum_prop': 0.692722371967655, 'repo_name': 'gumadeiras/engsoft', 'id': '8ae23a3d4de70d265615462c82ba0a9464503878', 'size': '371', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'Estacionamento/src/estacionamento/view/IMainView.java', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Java', 'bytes': '59138'}]}
//*********************************************************// // Copyright (c) Microsoft. All rights reserved. // // Apache 2.0 License // // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // //*********************************************************// using System; using System.Collections.Generic; using System.IO; namespace Microsoft.NodejsTools.Analysis.Values { /// <summary> /// The object that is created for a Node.js module's /// exports variable. We create this so that we can show /// a different icon in intellisense for modules. /// </summary> [Serializable] class ExportsValue : ObjectValue { private readonly string _name; public ExportsValue(string name, ProjectEntry projectEntry) : base(projectEntry) { _name = name; } public override JsMemberType MemberType { get { return JsMemberType.Module; } } public override string ObjectDescription { get { return "exports from " + Path.GetFileName(_name); } } public override IEnumerable<LocationInfo> Locations { get { if (ProjectEntry.IsBuiltin) { return new LocationInfo[0]; } return new[] { new LocationInfo(ProjectEntry, 1, 1) }; } } } }
{'content_hash': 'cc2735681fe9677ff95c432472c78c13', 'timestamp': '', 'source': 'github', 'line_count': 58, 'max_line_length': 74, 'avg_line_length': 31.379310344827587, 'alnum_prop': 0.5538461538461539, 'repo_name': 'chanchaldabriya/nodejstools', 'id': '9499b38e19fa92e0168ef397528375639ac0338b', 'size': '1822', 'binary': False, 'copies': '15', 'ref': 'refs/heads/master', 'path': 'Nodejs/Product/Analysis/Analysis/Values/ExportsValue.cs', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'ASP', 'bytes': '112'}, {'name': 'Batchfile', 'bytes': '27850'}, {'name': 'C#', 'bytes': '9594527'}, {'name': 'CSS', 'bytes': '4635'}, {'name': 'HTML', 'bytes': '29428'}, {'name': 'JavaScript', 'bytes': '666028'}, {'name': 'PowerShell', 'bytes': '85116'}, {'name': 'Python', 'bytes': '4937'}, {'name': 'TypeScript', 'bytes': '11615'}]}
<?xml version="1.0" encoding="ascii"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>lib.cuckoo.common.objects.Dictionary</title> <link rel="stylesheet" href="epydoc.css" type="text/css" /> <script type="text/javascript" src="epydoc.js"></script> </head> <body bgcolor="white" text="black" link="blue" vlink="#204080" alink="#204080"> <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" ><a class="navbar" target="_top" href="http://cuckoosandbox.org">Cuckoo Sandbox</a></th> </tr></table></th> </tr> </table> <table width="100%" cellpadding="0" cellspacing="0"> <tr valign="top"> <td width="100%"> <span class="breadcrumbs"> <a href="lib-module.html">Package&nbsp;lib</a> :: <a href="lib.cuckoo-module.html">Package&nbsp;cuckoo</a> :: <a href="lib.cuckoo.common-module.html">Package&nbsp;common</a> :: <a href="lib.cuckoo.common.objects-module.html">Module&nbsp;objects</a> :: Class&nbsp;Dictionary </span> </td> <td> <table cellpadding="0" cellspacing="0"> <!-- hide/show private --> <tr><td align="right"><span class="options">[<a href="javascript:void(0);" class="privatelink" onclick="toggle_private();">hide&nbsp;private</a>]</span></td></tr> <tr><td align="right"><span class="options" >[<a href="frames.html" target="_top">frames</a >]&nbsp;|&nbsp;<a href="lib.cuckoo.common.objects.Dictionary-class.html" target="_top">no&nbsp;frames</a>]</span></td></tr> </table> </td> </tr> </table> <!-- ==================== CLASS DESCRIPTION ==================== --> <h1 class="epydoc">Class Dictionary</h1><p class="nomargin-top"><span class="codelink"><a href="lib.cuckoo.common.objects-pysrc.html#Dictionary">source&nbsp;code</a></span></p> <pre class="base-tree"> object --+ | dict --+ | <strong class="uidshort">Dictionary</strong> </pre> <hr /> <p>Cuckoo custom dict.</p> <!-- ==================== INSTANCE METHODS ==================== --> <a name="section-InstanceMethods"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Instance Methods</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-InstanceMethods" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__getattr__"></a><span class="summary-sig-name">__getattr__</span>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">key</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="lib.cuckoo.common.objects-pysrc.html#Dictionary.__getattr__">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="lib.cuckoo.common.objects.Dictionary-class.html#__setattr__" class="summary-sig-name">__setattr__</a>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">i</span>, <span class="summary-sig-arg">y</span>)</span><br /> x[i]=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="lib.cuckoo.common.objects.Dictionary-class.html#__delattr__" class="summary-sig-name">__delattr__</a>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> del x[y]</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="indent-wrapped-lines"><b>Inherited from <code>dict</code></b>: <code>__cmp__</code>, <code>__contains__</code>, <code>__delitem__</code>, <code>__eq__</code>, <code>__ge__</code>, <code>__getattribute__</code>, <code>__getitem__</code>, <code>__gt__</code>, <code>__init__</code>, <code>__iter__</code>, <code>__le__</code>, <code>__len__</code>, <code>__lt__</code>, <code>__ne__</code>, <code>__new__</code>, <code>__repr__</code>, <code>__setitem__</code>, <code>__sizeof__</code>, <code>clear</code>, <code>copy</code>, <code>fromkeys</code>, <code>get</code>, <code>has_key</code>, <code>items</code>, <code>iteritems</code>, <code>iterkeys</code>, <code>itervalues</code>, <code>keys</code>, <code>pop</code>, <code>popitem</code>, <code>setdefault</code>, <code>update</code>, <code>values</code>, <code>viewitems</code>, <code>viewkeys</code>, <code>viewvalues</code> </p> <p class="indent-wrapped-lines"><b>Inherited from <code>object</code></b>: <code>__format__</code>, <code>__reduce__</code>, <code>__reduce_ex__</code>, <code>__str__</code>, <code>__subclasshook__</code> </p> </td> </tr> </table> <!-- ==================== CLASS VARIABLES ==================== --> <a name="section-ClassVariables"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Class Variables</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-ClassVariables" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="indent-wrapped-lines"><b>Inherited from <code>dict</code></b>: <code>__hash__</code> </p> </td> </tr> </table> <!-- ==================== PROPERTIES ==================== --> <a name="section-Properties"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Properties</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-Properties" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="indent-wrapped-lines"><b>Inherited from <code>object</code></b>: <code>__class__</code> </p> </td> </tr> </table> <!-- ==================== METHOD DETAILS ==================== --> <a name="section-MethodDetails"></a> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Method Details</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-MethodDetails" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> </table> <a name="__setattr__"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">__setattr__</span>(<span class="sig-arg">x</span>, <span class="sig-arg">i</span>, <span class="sig-arg">y</span>)</span> </h3> </td><td align="right" valign="top" >&nbsp; </td> </tr></table> <p>x[i]=y</p> <dl class="fields"> <dt>Overrides: object.__setattr__ </dt> </dl> </td></tr></table> </div> <a name="__delattr__"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">__delattr__</span>(<span class="sig-arg">x</span>, <span class="sig-arg">y</span>)</span> </h3> </td><td align="right" valign="top" >&nbsp; </td> </tr></table> <p>del x[y]</p> <dl class="fields"> <dt>Overrides: object.__delattr__ </dt> </dl> </td></tr></table> </div> <br /> <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" ><a class="navbar" target="_top" href="http://cuckoosandbox.org">Cuckoo Sandbox</a></th> </tr></table></th> </tr> </table> <table border="0" cellpadding="0" cellspacing="0" width="100%%"> <tr> <td align="left" class="footer"> Generated by Epydoc 3.0.1 on Fri Nov 4 23:21:58 2016 </td> <td align="right" class="footer"> <a target="mainFrame" href="http://epydoc.sourceforge.net" >http://epydoc.sourceforge.net</a> </td> </tr> </table> <script type="text/javascript"> <!-- // Private objects are initially displayed (because if // javascript is turned off then we want them to be // visible); but by default, we want to hide them. So hide // them unless we have a cookie that says to show them. checkCookie(); // --> </script> </body> </html>
{'content_hash': 'ae244471e468abb5dc4233bf60068682', 'timestamp': '', 'source': 'github', 'line_count': 359, 'max_line_length': 190, 'avg_line_length': 34.32033426183844, 'alnum_prop': 0.5495495495495496, 'repo_name': 'mburakergenc/Malware-Detection-using-Machine-Learning', 'id': 'bbb5bb680138b0aff190aecb6f150a6cf5064463', 'size': '12321', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'cuckoo/docs/epydoc/host/lib.cuckoo.common.objects.Dictionary-class.html', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C', 'bytes': '120655'}, {'name': 'CSS', 'bytes': '57002'}, {'name': 'HTML', 'bytes': '14690306'}, {'name': 'JavaScript', 'bytes': '134909'}, {'name': 'Jupyter Notebook', 'bytes': '167644'}, {'name': 'Makefile', 'bytes': '4676'}, {'name': 'Mako', 'bytes': '1078'}, {'name': 'Python', 'bytes': '1576528'}, {'name': 'Shell', 'bytes': '34027'}, {'name': 'Visual Basic', 'bytes': '1101'}]}
package org.vitanov.essentials.repositories; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.data.jpa.repository.Query; import org.springframework.data.repository.query.Param; import org.springframework.stereotype.Repository; import org.vitanov.essentials.entitites.Customer; import java.util.List; @Repository public interface CustomerRepository extends JpaRepository<Customer, Long> { @Query(value = "select * from customers as c " + "order by c.date_of_birth asc, c.is_young_driver desc", nativeQuery = true) List<Customer> getAllAscendingByDateAndExperience(); @Query(value = "select * from customers as c " + "order by c.date_of_birth desc, c.is_young_driver desc", nativeQuery = true) List<Customer> getAllDescendingByDateAndExperience(); @Query(value = "select " + " c.name," + " count(c.id) as bougth_cars," + " round(sum(p.price), 2) as spend_money " + " from customers as c " + " inner join sales as s" + " on s.customer_id = c.id" + " inner join cars as ca" + " on s.car_id = ca.id" + " inner join cars_parts as cp" + " on cp.car_id = ca.id" + " inner join parts as p" + " on cp.parts_id = p.id" + " where c.id = :id" + " group by c.id", nativeQuery = true) List<Object[]> getCustomerSaleDetails(@Param(value = "id") long id); }
{'content_hash': '3454690bf32388c16433cefac4f58ba0', 'timestamp': '', 'source': 'github', 'line_count': 38, 'max_line_length': 88, 'avg_line_length': 39.81578947368421, 'alnum_prop': 0.6159947124917383, 'repo_name': 'StoyanVitanov/SoftwareUniversity', 'id': '8ce18805a93909ab1e22325a5bac1cae68fc802f', 'size': '1513', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'Java Web/Spring MVC Framework/01.Essentials/src/main/java/org/vitanov/essentials/repositories/CustomerRepository.java', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C#', 'bytes': '520639'}, {'name': 'CSS', 'bytes': '638'}, {'name': 'HTML', 'bytes': '8027'}, {'name': 'Java', 'bytes': '705608'}, {'name': 'JavaScript', 'bytes': '9369'}, {'name': 'PHP', 'bytes': '10137'}, {'name': 'PLSQL', 'bytes': '449'}, {'name': 'PLpgSQL', 'bytes': '1478'}, {'name': 'SQLPL', 'bytes': '8271'}]}
package jcuda; /** * Base class for all classes that store a native pointer */ public abstract class NativePointerObject { /** * The native pointer, written by native methods */ private long nativePointer; /** * Creates a new NativePointerObject with a <code>null</code> pointer. */ protected NativePointerObject() { nativePointer = 0; } /** * Creates a new Pointer with the given native pointer value */ protected NativePointerObject(long nativePointer) { this.nativePointer = nativePointer; } /** * Creates a new Pointer with the samme native pointer as the * given one * * @param other The other NativePointerObject */ protected NativePointerObject(NativePointerObject other) { this.nativePointer = other.nativePointer; } /** * Obtain the native pointer value. * * @return The native pointer value */ protected long getNativePointer() { return nativePointer; } @Override public String toString() { return "NativePointerObject[nativePointer=" + nativePointer + "]"; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (int)(nativePointer ^ (nativePointer >>> 32)); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; NativePointerObject other = (NativePointerObject)obj; if (nativePointer != other.nativePointer) return false; return true; } }
{'content_hash': 'cbe3e69ca1b31afd273ec4032eae9ded', 'timestamp': '', 'source': 'github', 'line_count': 82, 'max_line_length': 80, 'avg_line_length': 23.01219512195122, 'alnum_prop': 0.5585585585585585, 'repo_name': 'SkymindIO/jcublas', 'id': '7b6a6b36883fc1bad0a20866fcc2f4487809567a', 'size': '3144', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'jcuda/src/main/java/jcuda/NativePointerObject.java', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Java', 'bytes': '2470447'}]}
The Go language comes with a complete set of command operation tools. You can execute the `go` command on the terminal to see them: ![](images/1.3.go.png?raw=true) Figure 1.3 Go command displays detailed information These are all useful for us. Let's see how to use some of them. ## go build This command is for compiling tests. It will compile packages and dependencies if it's necessary. - If the package is not the `main` package such as `mymath` in section 1.2, nothing will be generated after you execute `go build`. If you need the package file `.a` in `$GOPATH/pkg`, use `go install` instead. - If the package is the `main` package, it will generate an executable file in the same folder. If you want the file to be generated in `$GOPATH/bin`, use `go install` or `go build -o ${PATH_HERE}/a.exe.` - If there are many files in the folder, but you just want to compile one of them, you should append the file name after `go build`. For example, `go build a.go`. `go build` will compile all the files in the folder. - You can also assign the name of the file that will be generated. For instance, in the `mathapp` project (in section 1.2), using `go build -o astaxie.exe` will generate `astaxie.exe` instead of `mathapp.exe`. The default name is your folder name (non-main package) or the first source file name (main package). (According to [The Go Programming Language Specification](https://golang.org/ref/spec), package names should be the name after the word `package` in the first line of your source files. It doesn't have to be the same as the folder name, and the executable file name will be your folder name by default.) - `go build` ignores files whose names start with `_` or `.`. - If you want to have different source files for every operating system, you can name files with the system name as a suffix. Suppose there are some source files for loading arrays. They could be named as follows: array_linux.go | array_darwin.go | array_windows.go | array_freebsd.go `go build` chooses the one that's associated with your operating system. For example, it only compiles array_linux.go in Linux systems, and ignores all the others. ## go clean This command is for cleaning files that are generated by compilers, including the following files: _obj/ // old directory of object, left by Makefiles _test/ // old directory of test, left by Makefiles _testmain.go // old directory of gotest, left by Makefiles test.out // old directory of test, left by Makefiles build.out // old directory of test, left by Makefiles *.[568ao] // object files, left by Makefiles DIR(.exe) // generated by go build DIR.test(.exe) // generated by go test -c MAINFILE(.exe) // generated by go build MAINFILE.go I usually use this command to clean up my files before I upload my project to Github. These are useful for local tests, but useless for version control. ## go fmt and gofmt The people who are working with C/C++ should know that people are always arguing about which code style is better: K&R-style or ANSI-style. However in Go, there is only one code style which is enforced. For example, left braces must only be inserted at the end of lines, and they cannot be on their own lines, otherwise you will get compile errors! Fortunately, you don't have to remember these rules. `go fmt` does this job for you. Just execute the command `go fmt <File name>.go` in terminal. I don't use this command very much because IDEs usually execute this command automatically when you save source files. I will talk more about IDEs in the next section. `go fmt` is just an alias, which runs the command `gofmt -l -w` on the packages named by the import paths. We usually use `gofmt -w` instead of `go fmt`. The latter will not rewrite your source files after formatting code. `gofmt -w src` formats the whole project. ## go get This command is for getting remote packages. So far, it supports BitBucket, GitHub, Google Code and Launchpad. There are actually two things that happen after we execute this command. The first thing is that Go downloads the source code, then executes `go install`. Before you use this command, make sure you have installed all of the related tools. BitBucket (Mercurial Git) GitHub (git) Google Code (Git, Mercurial, Subversion) Launchpad (Bazaar) In order to use this command, you have to install these tools correctly. Don't forget to update the `$PATH` variable. By the way, it also supports customized domain names. Use `go help importpath` for more details about this. ## go install This command compiles all packages and generates files, then moves them to `$GOPATH/pkg` or `$GOPATH/bin`. ## go test This command loads all files whose name include `*_test.go` and generates test files, then prints information that looks like the following. ok archive/tar 0.011s FAIL archive/zip 0.022s ok compress/gzip 0.033s ... It tests all your test files by default. Use command `go help testflag` for more details. ## godoc Many people say that we don't need any third-party documentation for programming in Go (actually I've made a [CHM](https://github.com/astaxie/godoc) already). Go has a powerful tool to manage documentation natively. So how do we look up package information in documentation? For instance, if you want to get more details about the `builtin` package, use the `godoc builtin` command. Similarly, use the `godoc net/http` command to look up the `http` package documentation. If you want to see more details about specific functions, use the `godoc fmt Printf` and `godoc -src fmt Printf` commands to view the source code. Execute the `godoc -http=:8080` command, then open `127.0.0.1:8080` in your browser. You should see a localized golang.org. It can not only show the standard packages' information, but also packages in your `$GOPATH/pkg`. It's great for people who are suffering from the Great Firewall of China. ## Other commands Go provides more commands than those we've just talked about. go fix // upgrade code from an old version before go1 to a new version after go1 go version // get information about your version of Go go env // view environment variables about Go go list // list all installed packages go run // compile temporary files and run the application There are also more details about the commands that I've talked about. You can use `go help <command>` to look them up. ## Links - [Directory](preface.md) - Previous section: [$GOPATH and workspace](01.2.md) - Next section: [Go development tools](01.4.md)
{'content_hash': '41f23b7afd6cfb49876c3043e97940dd', 'timestamp': '', 'source': 'github', 'line_count': 102, 'max_line_length': 663, 'avg_line_length': 64.63725490196079, 'alnum_prop': 0.7500379190050053, 'repo_name': 'admpub/build-web-application-with-golang', 'id': 'f3eb165025730a4672cb6d58a581d258fc7f33c5', 'size': '6628', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'en/01.3.md', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'Go', 'bytes': '372338'}, {'name': 'Groovy', 'bytes': '37565'}, {'name': 'HTML', 'bytes': '23485'}, {'name': 'Python', 'bytes': '1238'}, {'name': 'Shell', 'bytes': '10842'}]}
require 'curses' module GitCrecord module UI module HelpWindow CONTENT = <<HELP q - quit s - stage selection and quit c - commit selection and quit j / ↓ - down k / ↑ - up h / ← - collapse fold l / → - expand fold f - toggle fold g - go to first line G - go to last line C-P - up to previous hunk / file C-N - down to next hunk / file SPACE - toggle selection A - toggle all selections ? - display help R - force redraw HELP def self.show win = Curses::Window.new(height, width, 0, 0) win.box('|', '-') CONTENT.split("\n").each_with_index do |line, index| win.setpos(index + 1, 1) win.addstr(line) end win.getch win.close end def self.width CONTENT.lines.map(&:size).max + 3 end def self.height CONTENT.lines.size + 2 end end end end
{'content_hash': '12c0fced69792ab8c389ff1989befbd6', 'timestamp': '', 'source': 'github', 'line_count': 45, 'max_line_length': 60, 'avg_line_length': 21.42222222222222, 'alnum_prop': 0.5280082987551867, 'repo_name': 'mbrendler/git-crecord', 'id': 'f7bf58db4593950d5d47c8fbc57e4e1a1d055c72', 'size': '1003', 'binary': False, 'copies': '1', 'ref': 'refs/heads/main', 'path': 'lib/git_crecord/ui/help_window.rb', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Ruby', 'bytes': '25799'}, {'name': 'Shell', 'bytes': '5363'}]}
from __future__ import absolute_import, division, print_function, unicode_literals import glob import logging import os import re import subprocess import sys import tempfile import threading import time import unittest import shutil sys.dont_write_bytecode = True # prevent creation of .pyc files from benchexec import container from benchexec import containerexecutor from benchexec import filehierarchylimit from benchexec.runexecutor import RunExecutor from benchexec import runexecutor from benchexec import util try: from subprocess import DEVNULL except ImportError: DEVNULL = open(os.devnull, 'wb') try: unichr(0) except NameError: unichr = chr here = os.path.dirname(__file__) base_dir = os.path.join(here, '..') bin_dir = os.path.join(base_dir, 'bin') runexec = os.path.join(bin_dir, 'runexec') python = 'python2' if sys.version_info[0] == 2 else 'python3' class TestRunExecutor(unittest.TestCase): @classmethod def setUpClass(cls): cls.longMessage = True cls.maxDiff = None logging.disable(logging.CRITICAL) if not hasattr(cls, 'assertRegex'): cls.assertRegex = cls.assertRegexpMatches if not hasattr(cls, 'assertRaisesRegex'): cls.assertRaisesRegex = cls.assertRaisesRegexp def setUp(self, *args, **kwargs): self.runexecutor = RunExecutor(*args, **kwargs) def execute_run(self, *args, **kwargs): (output_fd, output_filename) = tempfile.mkstemp('.log', 'output_', text=True) try: result = self.runexecutor.execute_run(list(args), output_filename, **kwargs) output_lines = os.read(output_fd, 4096).decode().splitlines() return (result, output_lines) finally: os.close(output_fd) os.remove(output_filename) def execute_run_extern(self, *args, **kwargs): (output_fd, output_filename) = tempfile.mkstemp('.log', 'output_', text=True) try: runexec_output = subprocess.check_output( args=[python, runexec] + list(args) + ['--output', output_filename], stderr=DEVNULL, **kwargs ).decode() output_lines = os.read(output_fd, 4096).decode().splitlines() except subprocess.CalledProcessError as e: print(e.output.decode()) raise e finally: os.close(output_fd) os.remove(output_filename) result={key.strip(): value.strip() for (key, _, value) in (line.partition('=') for line in runexec_output.splitlines())} return (result, output_lines) def check_command_in_output(self, output, cmd): self.assertEqual(output[0], cmd, 'run output misses executed command') def check_result_keys(self, result, *additional_keys): expected_keys = {'cputime', 'walltime', 'memory', 'exitcode', 'cpuenergy', 'blkio-read', 'blkio-write', } expected_keys.update(additional_keys) for key in result.keys(): if key.startswith('cputime-cpu'): self.assertRegex(key, '^cputime-cpu[0-9]+$', "unexpected result entry '{}={}'".format(key, result[key])) elif key.startswith('cpuenergy-'): self.assertRegex(key, '^cpuenergy-pkg[0-9]+(-(core|uncore|dram|psys))?$', "unexpected result entry '{}={}'".format(key, result[key])) else: self.assertIn(key, expected_keys, "unexpected result entry '{}={}'".format(key, result[key])) def check_exitcode(self, result, exitcode, msg=None): self.assertEqual(int(result['exitcode']), exitcode, msg) def test_command_output(self): if not os.path.exists('/bin/echo'): self.skipTest('missing /bin/echo') (_, output) = self.execute_run('/bin/echo', 'TEST_TOKEN') self.check_command_in_output(output, '/bin/echo TEST_TOKEN') self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output') for line in output[1:-1]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_command_error_output(self): if not os.path.exists('/bin/echo'): self.skipTest('missing /bin/echo') if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') def execute_Run_intern(*args, **kwargs): (error_fd, error_filename) = tempfile.mkstemp('.log', 'error_', text=True) try: (_, output_lines) = self.execute_run(*args, error_filename=error_filename, **kwargs) error_lines = os.read(error_fd, 4096).decode().splitlines() return (output_lines, error_lines) finally: os.close(error_fd) os.remove(error_filename) (output_lines, error_lines) = execute_Run_intern('/bin/sh', '-c', '/bin/echo ERROR_TOKEN >&2') self.assertEqual(error_lines[-1], 'ERROR_TOKEN', 'run error output misses command output') for line in output_lines[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') for line in error_lines[1:-1]: self.assertRegex(line, '^-*$', 'unexpected text in run error output') (output_lines, error_lines) = execute_Run_intern('/bin/echo', 'OUT_TOKEN') self.check_command_in_output(output_lines, '/bin/echo OUT_TOKEN') self.check_command_in_output(error_lines, '/bin/echo OUT_TOKEN') self.assertEqual(output_lines[-1], 'OUT_TOKEN', 'run output misses command output') for line in output_lines[1:-1]: self.assertRegex(line, '^-*$', 'unexpected text in run output') for line in error_lines[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run error output') def test_command_result(self): if not os.path.exists('/bin/echo'): self.skipTest('missing /bin/echo') (result, _) = self.execute_run('/bin/echo', 'TEST_TOKEN') self.check_exitcode(result, 0, 'exit code of /bin/echo is not zero') self.assertAlmostEqual(result['walltime'], 0.2, delta=0.2, msg='walltime of /bin/echo not as expected') self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of /bin/echo not as expected') self.check_result_keys(result) def test_cputime_hardlimit(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i', hardtimelimit=1) self.check_exitcode(result, 9, 'exit code of killed process is not 9') if 'terminationreason' in result: # not produced currently if killed by ulimit self.assertEqual(result['terminationreason'], 'cputime', 'termination reason is not "cputime"') self.assertAlmostEqual(result['walltime'], 1.4, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed') self.assertAlmostEqual(result['cputime'], 1.4, delta=0.5, msg='cputime is not approximately the time after which the process should have been killed') self.check_result_keys(result, 'terminationreason') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_cputime_softlimit(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') try: (result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i', softtimelimit=1) except SystemExit as e: self.assertEqual(str(e), 'Soft time limit cannot be specified without cpuacct cgroup.') self.skipTest(e) self.check_exitcode(result, 15, 'exit code of killed process is not 15') self.assertEqual(result['terminationreason'], 'cputime-soft', 'termination reason is not "cputime-soft"') self.assertAlmostEqual(result['walltime'], 4, delta=3, msg='walltime is not approximately the time after which the process should have been killed') self.assertAlmostEqual(result['cputime'], 4, delta=3, msg='cputime is not approximately the time after which the process should have been killed') self.check_result_keys(result, 'terminationreason') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_walltime_limit(self): if not os.path.exists('/bin/sleep'): self.skipTest('missing /bin/sleep') try: (result, output) = self.execute_run('/bin/sleep', '10', walltimelimit=1) except SystemExit as e: self.assertEqual(str(e), 'Wall time limit is not implemented for systems without cpuacct cgroup.') self.skipTest(e) self.check_exitcode(result, 9, 'exit code of killed process is not 9') self.assertEqual(result['terminationreason'], 'walltime', 'termination reason is not "walltime"') self.assertAlmostEqual(result['walltime'], 4, delta=3, msg='walltime is not approximately the time after which the process should have been killed') self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of /bin/sleep is not approximately zero') self.check_result_keys(result, 'terminationreason') self.check_command_in_output(output, '/bin/sleep 10') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_cputime_walltime_limit(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i', hardtimelimit=1, walltimelimit=5) self.check_exitcode(result, 9, 'exit code of killed process is not 9') if 'terminationreason' in result: # not produced currently if killed by ulimit self.assertEqual(result['terminationreason'], 'cputime', 'termination reason is not "cputime"') self.assertAlmostEqual(result['walltime'], 1.4, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed') self.assertAlmostEqual(result['cputime'], 1.4, delta=0.5, msg='cputime is not approximately the time after which the process should have been killed') self.check_result_keys(result, 'terminationreason') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_all_timelimits(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') try: (result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i', softtimelimit=1, hardtimelimit=2, walltimelimit=5) except SystemExit as e: self.assertEqual(str(e), 'Soft time limit cannot be specified without cpuacct cgroup.') self.skipTest(e) self.check_exitcode(result, 15, 'exit code of killed process is not 15') self.assertEqual(result['terminationreason'], 'cputime-soft', 'termination reason is not "cputime-soft"') self.assertAlmostEqual(result['walltime'], 1.4, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed') self.assertAlmostEqual(result['cputime'], 1.4, delta=0.5, msg='cputime is not approximately the time after which the process should have been killed') self.check_result_keys(result, 'terminationreason') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_input_is_redirected_from_devnull(self): if not os.path.exists('/bin/cat'): self.skipTest('missing /bin/cat') try: (result, output) = self.execute_run('/bin/cat', walltimelimit=1) except SystemExit as e: self.assertEqual(str(e), 'Wall time limit is not implemented for systems without cpuacct cgroup.') self.skipTest(e) self.check_exitcode(result, 0, 'exit code of process is not 0') self.assertAlmostEqual(result['walltime'], 0.2, delta=0.2, msg='walltime of "/bin/cat < /dev/null" is not approximately zero') self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of "/bin/cat < /dev/null" is not approximately zero') self.check_result_keys(result) self.check_command_in_output(output, '/bin/cat') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_input_is_redirected_from_file(self): if not os.path.exists('/bin/cat'): self.skipTest('missing /bin/cat') with tempfile.TemporaryFile() as tmp: tmp.write(b'TEST_TOKEN') tmp.flush() tmp.seek(0) try: (result, output) = self.execute_run('/bin/cat', stdin=tmp, walltimelimit=1) except SystemExit as e: self.assertEqual(str(e), 'Wall time limit is not implemented for systems without cpuacct cgroup.') self.skipTest(e) self.check_exitcode(result, 0, 'exit code of process is not 0') self.assertAlmostEqual(result['walltime'], 0.2, delta=0.2, msg='walltime of "/bin/cat < /dev/null" is not approximately zero') self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of "/bin/cat < /dev/null" is not approximately zero') self.check_result_keys(result) self.check_command_in_output(output, '/bin/cat') self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output') for line in output[1:-1]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_input_is_redirected_from_stdin(self): if not os.path.exists('/bin/cat'): self.skipTest('missing /bin/cat') (output_fd, output_filename) = tempfile.mkstemp('.log', 'output_', text=True) cmd = [runexec, '--input', '-', '--output', output_filename, '--walltime', '1', '/bin/cat'] try: process = subprocess.Popen(args=cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=DEVNULL) try: runexec_output, unused_err = process.communicate(b'TEST_TOKEN') except: process.kill() process.wait() raise retcode = process.poll() if retcode: print(runexec_output.decode()) raise subprocess.CalledProcessError(retcode, cmd, output=runexec_output) output = os.read(output_fd, 4096).decode().splitlines() finally: os.close(output_fd) os.remove(output_filename) result={key.strip(): value.strip() for (key, _, value) in (line.partition('=') for line in runexec_output.decode().splitlines())} self.check_exitcode(result, 0, 'exit code of process is not 0') self.assertAlmostEqual(float(result['walltime'].rstrip('s')), 0.2, delta=0.2, msg='walltime of "/bin/cat < /dev/null" is not approximately zero') self.assertAlmostEqual(float(result['cputime'].rstrip('s')), 0.2, delta=0.2, msg='cputime of "/bin/cat < /dev/null" is not approximately zero') self.check_result_keys(result, 'returnvalue') self.check_command_in_output(output, '/bin/cat') self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output') for line in output[1:-1]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_append_environment_variable(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (_, output) = self.execute_run('/bin/sh', '-c', 'echo $PATH') path = output[-1] (_, output) = self.execute_run('/bin/sh', '-c', 'echo $PATH', environments={'additionalEnv': {'PATH': ':TEST_TOKEN'}}) self.assertEqual(output[-1], path + ':TEST_TOKEN') def test_new_environment_variable(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (_, output) = self.execute_run('/bin/sh', '-c', 'echo $PATH', environments={'newEnv': {'PATH': '/usr/bin'}}) self.assertEqual(output[-1], '/usr/bin') def test_stop_run(self): if not os.path.exists('/bin/sleep'): self.skipTest('missing /bin/sleep') thread = _StopRunThread(1, self.runexecutor) thread.start() (result, output) = self.execute_run('/bin/sleep', '10') thread.join() self.check_exitcode(result, 9, 'exit code of killed process is not 9') self.assertEqual(result['terminationreason'], 'killed', 'termination reason is not "killed"') self.assertAlmostEqual(result['walltime'], 1, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed') self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of /bin/sleep is not approximately zero') self.check_result_keys(result, 'terminationreason') self.check_command_in_output(output, '/bin/sleep 10') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_reduce_file_size_empty_file(self): with tempfile.NamedTemporaryFile() as tmp: runexecutor._reduce_file_size_if_necessary(tmp.name, 0) self.assertEqual(os.path.getsize(tmp.name), 0) def test_reduce_file_size_empty_file2(self): with tempfile.NamedTemporaryFile() as tmp: runexecutor._reduce_file_size_if_necessary(tmp.name, 500) self.assertEqual(os.path.getsize(tmp.name), 0) def test_reduce_file_size_long_line_not_truncated(self): with tempfile.NamedTemporaryFile(mode='wt') as tmp: content = 'Long line ' * 500 tmp.write(content) tmp.flush() runexecutor._reduce_file_size_if_necessary(tmp.name, 500) with open(tmp.name, 'rt') as tmp2: self.assertMultiLineEqual(tmp2.read(), content) REDUCE_WARNING_MSG = "WARNING: YOUR LOGFILE WAS TOO LONG, SOME LINES IN THE MIDDLE WERE REMOVED." REDUCE_OVERHEAD = 100 def test_reduce_file_size(self): with tempfile.NamedTemporaryFile(mode='wt') as tmp: line = 'Some text\n' tmp.write(line * 500) tmp.flush() limit = 500 runexecutor._reduce_file_size_if_necessary(tmp.name, limit) self.assertLessEqual(os.path.getsize(tmp.name), limit + self.REDUCE_OVERHEAD) with open(tmp.name, 'rt') as tmp2: new_content = tmp2.read() self.assertIn(self.REDUCE_WARNING_MSG, new_content) self.assertTrue(new_content.startswith(line)) self.assertTrue(new_content.endswith(line)) def test_reduce_file_size_limit_zero(self): with tempfile.NamedTemporaryFile(mode='wt') as tmp: line = 'Some text\n' tmp.write(line * 500) tmp.flush() runexecutor._reduce_file_size_if_necessary(tmp.name, 0) self.assertLessEqual(os.path.getsize(tmp.name), self.REDUCE_OVERHEAD) with open(tmp.name, 'rt') as tmp2: new_content = tmp2.read() self.assertIn(self.REDUCE_WARNING_MSG, new_content) self.assertTrue(new_content.startswith(line)) def test_integration(self): if not os.path.exists('/bin/echo'): self.skipTest('missing /bin/echo') (result, output) = self.execute_run_extern('/bin/echo', 'TEST_TOKEN') self.check_exitcode(result, 0, 'exit code of /bin/echo is not zero') self.check_result_keys(result, 'returnvalue') self.check_command_in_output(output, '/bin/echo TEST_TOKEN') self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output') for line in output[1:-1]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_home_and_tmp_is_separate(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (result, output) = self.execute_run('/bin/sh', '-c', 'echo $HOME $TMPDIR') self.check_exitcode(result, 0, 'exit code of /bin/sh is not zero') self.assertRegex(output[-1], '/BenchExec_run_[^/]*/home .*/BenchExec_run_[^/]*/tmp', 'HOME or TMPDIR variable does not contain expected temporary directory') def test_temp_dirs_are_removed(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (result, output) = self.execute_run('/bin/sh', '-c', 'echo $HOME $TMPDIR') self.check_exitcode(result, 0, 'exit code of /bin/sh is not zero') home_dir = output[-1].split(' ')[0] temp_dir = output[-1].split(' ')[1] self.assertFalse(os.path.exists(home_dir), 'temporary home directory {} was not cleaned up'.format(home_dir)) self.assertFalse(os.path.exists(temp_dir), 'temporary temp directory {} was not cleaned up'.format(temp_dir)) def test_home_is_writable(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') (result, output) = self.execute_run('/bin/sh', '-c', 'touch $HOME/TEST_FILE') self.check_exitcode( result, 0, 'Failed to write to $HOME/TEST_FILE, output was\n{}'.format(output)) def test_no_cleanup_temp(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') self.setUp(cleanup_temp_dir=False) # create RunExecutor with desired parameter (result, output) = self.execute_run('/bin/sh', '-c', 'echo "$TMPDIR"; echo "" > "$TMPDIR/test"') self.check_exitcode(result, 0, 'exit code of /bin/sh is not zero') temp_dir = output[-1] test_file = os.path.join(temp_dir, 'test') subprocess.check_call(self.runexecutor._build_cmdline(['test', '-f', test_file])) self.assertEqual('tmp', os.path.basename(temp_dir), 'unexpected name of temp dir') self.assertNotEqual('/tmp', temp_dir, 'temp dir should not be the global temp dir') subprocess.check_call(self.runexecutor._build_cmdline(['rm', '-r', os.path.dirname(temp_dir)])) def test_require_cgroup_invalid(self): self.assertRaisesRegex(SystemExit, '.*invalid.*', lambda: RunExecutor(additional_cgroup_subsystems=['invalid'])) def test_require_cgroup_cpu(self): try: self.setUp(additional_cgroup_subsystems=['cpu']) except SystemExit as e: self.skipTest(e) if not os.path.exists('/bin/cat'): self.skipTest('missing /bin/cat') (result, output) = self.execute_run('/bin/cat', '/proc/self/cgroup') self.check_exitcode(result, 0, 'exit code of /bin/cat is not zero') for line in output: if re.match('^[0-9]*:([^:]*,)?cpu(,[^:]*)?:/(.*/)?benchmark_.*$',line): return # Success self.fail('Not in expected cgroup for subsystem cpu:\n' + '\n'.join(output)) def test_set_cgroup_cpu_shares(self): if not os.path.exists('/bin/echo'): self.skipTest('missing /bin/echo') try: self.setUp(additional_cgroup_subsystems=['cpu']) except SystemExit as e: self.skipTest(e) (result, _) = self.execute_run('/bin/echo', cgroupValues={('cpu', 'shares'): 42}) self.check_exitcode(result, 0, 'exit code of /bin/echo is not zero') # Just assert that execution was successful, # testing that the value was actually set is much more difficult. class TestRunExecutorWithSudo(TestRunExecutor): """ Run tests using the sudo mode of RunExecutor, if possible. sudo is typically set up to allow executing as our own user, so we try that. Note that this will not catch all problems, for example if we forget to use "sudo kill" to send a signal and instead send it directly, but requiring a second user for tests would not be good, either. """ # Use user name defined in environment variable if present, # or fall back to current user (sudo always allows this). # sudo allows refering to numerical uids with '#'. user = os.environ.get('BENCHEXEC_TEST_USER', '#' + str(os.getuid())) def setUp(self, *args, **kwargs): try: self.runexecutor = RunExecutor(user=self.user, *args, **kwargs) except SystemExit as e: # sudo seems not to be available self.skipTest(e) def check_exitcode(self, result, expected, msg=None): actual = int(result['exitcode']) if expected == 15 and actual == 0: # On Ubuntu 16.04, sudo returns 0 if process is killed with signal 15 return # Using sudo may affect the exit code: # what was the returnsignal is now the returnvalue. # The distinction between returnsignal and returnvalue of the actual # process is lost. # If the returnsignal (of the sudo process) is 0, # we replace the exit code with the mixed returnsignal/returnvalue of # the actual process (with bit for core dump cleared). returnsignal = actual & 0x7F returnvalue = (actual >> 8) & 0x7F if returnsignal == 0: actual = returnvalue self.assertEqual(actual, expected, msg) def check_command_in_output(self, output, cmd): self.assertTrue(output[0].endswith(cmd), 'run output misses executed command') def test_detect_new_files_in_home(self): if not os.path.exists('/usr/bin/mktemp'): self.skipTest('missing /usr/bin/mktemp') home_dir = runexecutor._get_user_account_info(self.user).pw_dir tmp_file_pattern = '.BenchExec_test_runexecutor_'+unichr(0xe4)+unichr(0xf6)+unichr(0xfc)+'_XXXXXXXXXX' (result, output) = self.execute_run( '/usr/bin/mktemp', '--tmpdir=' + home_dir, tmp_file_pattern) try: self.check_exitcode(result, 0, 'exit code of /usr/bin/mktemp is not zero') tmp_file = output[-1] self.assertIn(tmp_file, self.runexecutor.check_for_new_files_in_home(), 'runexecutor failed to detect new temporary file in home directory') finally: subprocess.check_call(self.runexecutor._build_cmdline(['rm', tmp_file])) def test_append_environment_variable(self): # sudo-mode has a suboptimal implementation for additionalEnv: # If an environment variable is not modified, it will be cleared completely and in case of # PATH sudo will set it. If PATH is specified in additionalEnv, we will copy the value # from the current process (which is different than what sudo would set) # and append the given string. pass class TestRunExecutorWithContainer(TestRunExecutor): def setUp(self, *args, **kwargs): try: container.execute_in_namespace(lambda: 0) except OSError as e: self.skipTest("Namespaces not supported: {}".format(os.strerror(e.errno))) self.runexecutor = RunExecutor( use_namespaces=True, dir_modes={"/": containerexecutor.DIR_READ_ONLY, "/tmp": containerexecutor.DIR_HIDDEN}, container_system_config=False, *args, **kwargs) def execute_run(self, *args, **kwargs): return super(TestRunExecutorWithContainer, self).execute_run(workingDir="/tmp", *args, **kwargs) def test_home_and_tmp_is_separate(self): self.skipTest("not relevant in container") def test_temp_dirs_are_removed(self): self.skipTest("not relevant in container") def test_home_is_writable(self): self.skipTest("needs container_system_config=True and thus overlay mode") def test_no_cleanup_temp(self): self.skipTest("not relevant in container") def check_result_files(self, shell_cmd, result_files_patterns, expected_result_files): output_dir = tempfile.mkdtemp("", "output_") try: result, output = self.execute_run("/bin/sh", "-c", shell_cmd, output_dir=output_dir, result_files_patterns=result_files_patterns) self.assertNotIn("terminationreason", result) self.assertEqual(result["exitcode"], 0, "exit code of {} is not zero,\nresult was {!r},\noutput was\n{}" .format(" ".join(shell_cmd), result, "\n".join(output))) result_files = [] for root, unused_dirs, files in os.walk(output_dir): for file in files: result_files.append(os.path.relpath(os.path.join(root, file), output_dir)) expected_result_files.sort() result_files.sort() self.assertListEqual(result_files, expected_result_files, "\nList of retrieved result files differs from expected list,\n" "result was {!r},\noutput was\n{}".format(result, "\n".join(output))) finally: shutil.rmtree(output_dir, ignore_errors=True) def test_result_file_simple(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["."], ["TEST_FILE"]) def test_result_file_recursive(self): self.check_result_files("mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE", ["."], ["TEST_DIR/TEST_FILE"]) def test_result_file_multiple(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE; echo TEST_TOKEN > TEST_FILE2", ["."], ["TEST_FILE", "TEST_FILE2"]) def test_result_file_symlink(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE; ln -s TEST_FILE TEST_LINK", ["."], ["TEST_FILE"]) def test_result_file_no_match(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["NO_MATCH"], []) def test_result_file_no_pattern(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE", [], []) def test_result_file_empty_pattern(self): self.assertRaises(ValueError, lambda: self.check_result_files("echo TEST_TOKEN > TEST_FILE", [""], [])) def test_result_file_partial_match(self): self.check_result_files( "echo TEST_TOKEN > TEST_FILE; mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE", ["TEST_DIR"], ["TEST_DIR/TEST_FILE"]) def test_result_file_multiple_patterns(self): self.check_result_files( "echo TEST_TOKEN > TEST_FILE; " "echo TEST_TOKEN > TEST_FILE2; " "mkdir TEST_DIR; " "echo TEST_TOKEN > TEST_DIR/TEST_FILE; ", ["TEST_FILE", "TEST_DIR/TEST_FILE"], ["TEST_FILE", "TEST_DIR/TEST_FILE"]) def test_result_file_wildcard(self): self.check_result_files( "echo TEST_TOKEN > TEST_FILE; " "echo TEST_TOKEN > TEST_FILE2; " "echo TEST_TOKEN > TEST_NOFILE; ", ["TEST_FILE*"], ["TEST_FILE", "TEST_FILE2"]) def test_result_file_absolute_pattern(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["/"], ["tmp/TEST_FILE"]) def test_result_file_absolute_and_pattern(self): self.check_result_files( "echo TEST_TOKEN > TEST_FILE; mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE", ["TEST_FILE", "/tmp/TEST_DIR", ], ["tmp/TEST_FILE", "tmp/TEST_DIR/TEST_FILE"]) def test_result_file_relative_traversal(self): self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["foo/../TEST_FILE"], ["TEST_FILE"]) def test_result_file_illegal_relative_traversal(self): self.assertRaises(ValueError, lambda: self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["foo/../../bar"], [])) def test_result_file_recursive_pattern(self): if not util.maybe_recursive_iglob == glob.iglob: self.skipTest("missing recursive glob.iglob") self.check_result_files( "mkdir -p TEST_DIR/TEST_DIR; " "echo TEST_TOKEN > TEST_FILE.txt; " "echo TEST_TOKEN > TEST_DIR/TEST_FILE.txt; " "echo TEST_TOKEN > TEST_DIR/TEST_DIR/TEST_FILE.txt; ", ["**/*.txt"], ["TEST_FILE.txt", "TEST_DIR/TEST_FILE.txt", "TEST_DIR/TEST_DIR/TEST_FILE.txt"]) def test_file_count_limit(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') filehierarchylimit._CHECK_INTERVAL_SECONDS = 0.1 (result, output) = self.execute_run('/bin/sh', '-c', 'for i in $(seq 1 10000); do touch $i; done', files_count_limit=100, result_files_patterns=None) self.check_exitcode(result, 9, 'exit code of killed process is not 15') self.assertEqual(result['terminationreason'], 'files-count', 'termination reason is not "files-count"') self.check_result_keys(result, 'terminationreason') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') def test_file_size_limit(self): if not os.path.exists('/bin/sh'): self.skipTest('missing /bin/sh') filehierarchylimit._CHECK_INTERVAL_SECONDS = 0.1 (result, output) = self.execute_run('/bin/sh', '-c', 'for i in $(seq 1 100000); do echo $i >> TEST_FILE; done', files_size_limit=100, result_files_patterns=None) self.check_exitcode(result, 9, 'exit code of killed process is not 15') self.assertEqual(result['terminationreason'], 'files-size', 'termination reason is not "files-size"') self.check_result_keys(result, 'terminationreason') for line in output[1:]: self.assertRegex(line, '^-*$', 'unexpected text in run output') class _StopRunThread(threading.Thread): def __init__(self, delay, runexecutor): super(_StopRunThread, self).__init__() self.daemon = True self.delay = delay self.runexecutor = runexecutor def run(self): time.sleep(self.delay) self.runexecutor.stop()
{'content_hash': '272822faf4bea42400273b8469a7309b', 'timestamp': '', 'source': 'github', 'line_count': 717, 'max_line_length': 160, 'avg_line_length': 48.91073919107392, 'alnum_prop': 0.606860760215575, 'repo_name': 'martin-neuhaeusser/benchexec', 'id': 'c0df5d84dde3b2444dabf517afe8bb24cc90a9a4', 'size': '35790', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'benchexec/test_runexecutor.py', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Gnuplot', 'bytes': '3882'}, {'name': 'HTML', 'bytes': '63337'}, {'name': 'PHP', 'bytes': '4704'}, {'name': 'Python', 'bytes': '797325'}, {'name': 'Shell', 'bytes': '5328'}, {'name': 'TeX', 'bytes': '6538'}]}
using Microsoft.PowerShell.Activities; using System.Management.Automation; using System.Activities; using System.Collections.Generic; using System.ComponentModel; namespace Microsoft.PowerShell.Management.Activities { /// <summary> /// Activity to invoke the Microsoft.PowerShell.Management\Test-Connection command in a Workflow. /// </summary> [System.CodeDom.Compiler.GeneratedCode("Microsoft.PowerShell.Activities.ActivityGenerator.GenerateFromName", "3.0")] public sealed class TestConnection : PSRemotingActivity { /// <summary> /// Gets the display name of the command invoked by this activity. /// </summary> public TestConnection() { this.DisplayName = "Test-Connection"; } /// <summary> /// Gets the fully qualified name of the command invoked by this activity. /// </summary> public override string PSCommandName { get { return "Microsoft.PowerShell.Management\\Test-Connection"; } } // Arguments /// <summary> /// Provides access to the AsJob parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Management.Automation.SwitchParameter> AsJob { get; set; } /// <summary> /// Provides access to the DcomAuthentication parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Management.AuthenticationLevel> DcomAuthentication { get; set; } /// <summary> /// Provides access to the WsmanAuthentication parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.String> WsmanAuthentication { get; set; } /// <summary> /// Provides access to the Protocol parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.String> Protocol { get; set; } /// <summary> /// Provides access to the BufferSize parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Int32> BufferSize { get; set; } /// <summary> /// Provides access to the ComputerName parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.String[]> ComputerName { get; set; } /// <summary> /// Provides access to the Count parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Int32> Count { get; set; } /// <summary> /// Provides access to the Credential parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Management.Automation.PSCredential> Credential { get; set; } /// <summary> /// Provides access to the Source parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.String[]> Source { get; set; } /// <summary> /// Provides access to the Impersonation parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Management.ImpersonationLevel> Impersonation { get; set; } /// <summary> /// Provides access to the ThrottleLimit parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Int32> ThrottleLimit { get; set; } /// <summary> /// Provides access to the TimeToLive parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Int32> TimeToLive { get; set; } /// <summary> /// Provides access to the Delay parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Int32> Delay { get; set; } /// <summary> /// Provides access to the Quiet parameter. /// </summary> [ParameterSpecificCategory] [DefaultValue(null)] public InArgument<System.Management.Automation.SwitchParameter> Quiet { get; set; } // Module defining this command // Optional custom code for this activity /// <summary> /// Returns a configured instance of System.Management.Automation.PowerShell, pre-populated with the command to run. /// </summary> /// <param name="context">The NativeActivityContext for the currently running activity.</param> /// <returns>A populated instance of Sytem.Management.Automation.PowerShell</returns> /// <remarks>The infrastructure takes responsibility for closing and disposing the PowerShell instance returned.</remarks> protected override ActivityImplementationContext GetPowerShell(NativeActivityContext context) { System.Management.Automation.PowerShell invoker = global::System.Management.Automation.PowerShell.Create(); System.Management.Automation.PowerShell targetCommand = invoker.AddCommand(PSCommandName); // Initialize the arguments if(AsJob.Expression != null) { targetCommand.AddParameter("AsJob", AsJob.Get(context)); } if(DcomAuthentication.Expression != null) { targetCommand.AddParameter("DcomAuthentication", DcomAuthentication.Get(context)); } if(WsmanAuthentication.Expression != null) { targetCommand.AddParameter("WsmanAuthentication", WsmanAuthentication.Get(context)); } if(Protocol.Expression != null) { targetCommand.AddParameter("Protocol", Protocol.Get(context)); } if(BufferSize.Expression != null) { targetCommand.AddParameter("BufferSize", BufferSize.Get(context)); } if(ComputerName.Expression != null) { targetCommand.AddParameter("ComputerName", ComputerName.Get(context)); } if(Count.Expression != null) { targetCommand.AddParameter("Count", Count.Get(context)); } if(Credential.Expression != null) { targetCommand.AddParameter("Credential", Credential.Get(context)); } if(Source.Expression != null) { targetCommand.AddParameter("Source", Source.Get(context)); } if(Impersonation.Expression != null) { targetCommand.AddParameter("Impersonation", Impersonation.Get(context)); } if(ThrottleLimit.Expression != null) { targetCommand.AddParameter("ThrottleLimit", ThrottleLimit.Get(context)); } if(TimeToLive.Expression != null) { targetCommand.AddParameter("TimeToLive", TimeToLive.Get(context)); } if(Delay.Expression != null) { targetCommand.AddParameter("Delay", Delay.Get(context)); } if(Quiet.Expression != null) { targetCommand.AddParameter("Quiet", Quiet.Get(context)); } return new ActivityImplementationContext() { PowerShellInstance = invoker }; } } }
{'content_hash': '2ee3e36a119afac27edbcfe7629b7b52', 'timestamp': '', 'source': 'github', 'line_count': 223, 'max_line_length': 130, 'avg_line_length': 34.87892376681614, 'alnum_prop': 0.5957829776292106, 'repo_name': 'kmosher/PowerShell', 'id': 'd65e1e32ea5d9e6b7436ba23a3906bc35e2079e4', 'size': '7837', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'src/Microsoft.PowerShell.Management.Activities/Generated/TestConnectionActivity.cs', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C', 'bytes': '5425'}, {'name': 'C#', 'bytes': '37140986'}, {'name': 'C++', 'bytes': '304887'}, {'name': 'CMake', 'bytes': '23659'}, {'name': 'PowerShell', 'bytes': '2223161'}, {'name': 'Python', 'bytes': '492'}, {'name': 'Shell', 'bytes': '293'}, {'name': 'XSLT', 'bytes': '14404'}]}
package org.wso2.identity.integration.common.clients.claim.metadata.mgt; import org.apache.axis2.AxisFault; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.carbon.identity.claim.metadata.mgt.stub.ClaimMetadataManagementServiceClaimMetadataException; import org.wso2.carbon.identity.claim.metadata.mgt.stub.ClaimMetadataManagementServiceStub; import org.wso2.carbon.identity.claim.metadata.mgt.stub.dto.ClaimDialectDTO; import org.wso2.carbon.identity.claim.metadata.mgt.stub.dto.ExternalClaimDTO; import org.wso2.carbon.identity.claim.metadata.mgt.stub.dto.LocalClaimDTO; import org.wso2.identity.integration.common.clients.AuthenticateStub; import org.wso2.identity.integration.common.clients.ClaimManagementServiceClient; import java.rmi.RemoteException; /** * This class invokes the operations of ClaimMetadataManagementService. */ public class ClaimMetadataManagementServiceClient { private static Log log = LogFactory.getLog(ClaimManagementServiceClient.class); private final String serviceName = "ClaimMetadataManagementService"; private ClaimMetadataManagementServiceStub claimMetadataManagementServiceStub; private String endPoint; public ClaimMetadataManagementServiceClient(String backEndUrl, String sessionCookie) throws AxisFault { this.endPoint = backEndUrl + serviceName; claimMetadataManagementServiceStub = new ClaimMetadataManagementServiceStub(endPoint); AuthenticateStub.authenticateStub(sessionCookie, claimMetadataManagementServiceStub); } public ClaimMetadataManagementServiceClient(String backEndUrl, String userName, String password) throws AxisFault { this.endPoint = backEndUrl + serviceName; claimMetadataManagementServiceStub = new ClaimMetadataManagementServiceStub(endPoint); AuthenticateStub.authenticateStub(userName, password, claimMetadataManagementServiceStub); } public ClaimDialectDTO[] getClaimDialects() throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { return claimMetadataManagementServiceStub.getClaimDialects(); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void addClaimDialect(ClaimDialectDTO externalClaimDialect) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.addClaimDialect(externalClaimDialect); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void removeClaimDialect(String externalClaimDialect) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { ClaimDialectDTO claimDialect = new ClaimDialectDTO(); claimDialect.setClaimDialectURI(externalClaimDialect); claimMetadataManagementServiceStub.removeClaimDialect(claimDialect); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public LocalClaimDTO[] getLocalClaims() throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { return claimMetadataManagementServiceStub.getLocalClaims(); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void addLocalClaim(LocalClaimDTO localCLaim) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.addLocalClaim(localCLaim); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void updateLocalClaim(LocalClaimDTO localClaim) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.updateLocalClaim(localClaim); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void removeLocalClaim(String localCLaimURI) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.removeLocalClaim(localCLaimURI); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public ExternalClaimDTO[] getExternalClaims(String externalClaimDialectURI) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { return claimMetadataManagementServiceStub.getExternalClaims(externalClaimDialectURI); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void addExternalClaim(ExternalClaimDTO externalClaim) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.addExternalClaim(externalClaim); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void updateExternalClaim(ExternalClaimDTO externalClaim) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.updateExternalClaim(externalClaim); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } public void removeExternalClaim(String externalClaimDialectURI, String externalClaimURI) throws RemoteException, ClaimMetadataManagementServiceClaimMetadataException { try { claimMetadataManagementServiceStub.removeExternalClaim(externalClaimDialectURI, externalClaimURI); } catch (RemoteException e) { throw e; } catch (ClaimMetadataManagementServiceClaimMetadataException e) { throw e; } } }
{'content_hash': 'f1cf1708a47c99334aa83200a1aaee31', 'timestamp': '', 'source': 'github', 'line_count': 174, 'max_line_length': 116, 'avg_line_length': 38.87931034482759, 'alnum_prop': 0.716629711751663, 'repo_name': 'ChamaraPhilipsuom/product-is', 'id': '9d4b3965e01a71a53691526fefaa4b27e5b1e267', 'size': '7401', 'binary': False, 'copies': '17', 'ref': 'refs/heads/master', 'path': 'modules/integration/tests-common/admin-clients/src/main/java/org/wso2/identity/integration/common/clients/claim/metadata/mgt/ClaimMetadataManagementServiceClient.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Batchfile', 'bytes': '2042'}, {'name': 'CSS', 'bytes': '1499408'}, {'name': 'HTML', 'bytes': '123995'}, {'name': 'Java', 'bytes': '1843682'}, {'name': 'JavaScript', 'bytes': '1667484'}, {'name': 'Objective-C', 'bytes': '37909'}, {'name': 'PLSQL', 'bytes': '33146'}, {'name': 'PLpgSQL', 'bytes': '14249'}, {'name': 'Perl', 'bytes': '5722'}, {'name': 'SQLPL', 'bytes': '471'}, {'name': 'Shell', 'bytes': '1841'}]}
require 'formula' class Udis86 < Formula homepage 'http://udis86.sourceforge.net' url 'https://downloads.sourceforge.net/udis86/udis86-1.7.2.tar.gz' sha1 'f55dec2d5319aac9d0a7ae2614ddcc7aa73d3744' option :universal def install ENV.universal_binary if build.universal? system "./configure", "--prefix=#{prefix}", "--enable-shared" system "make" system "make install" end test do IO.popen("#{bin}/udcli -x", "w+") do |pipe| pipe.write "cd 80" pipe.close_write assert pipe.read.include?("int 0x80") end end end
{'content_hash': '2543d56a7471e2f9ab6330f3ed317adf', 'timestamp': '', 'source': 'github', 'line_count': 25, 'max_line_length': 68, 'avg_line_length': 23.84, 'alnum_prop': 0.6375838926174496, 'repo_name': 'docwhat/homebrew', 'id': '18994752782bcb4e69e0b311bfe1262876801b30', 'size': '596', 'binary': False, 'copies': '29', 'ref': 'refs/heads/master', 'path': 'Library/Formula/udis86.rb', 'mode': '33188', 'license': 'bsd-2-clause', 'language': [{'name': 'JavaScript', 'bytes': '18'}, {'name': 'Python', 'bytes': '10600'}, {'name': 'Ruby', 'bytes': '3547564'}, {'name': 'Shell', 'bytes': '29544'}]}
/** * This module contains general functions that can be used for building * different kinds of domTree nodes in a consistent manner. */ var domTree = require("./domTree"); var fontMetrics = require("./fontMetrics"); var symbols = require("./symbols"); /** * Makes a symbolNode after translation via the list of symbols in symbols.js. * Correctly pulls out metrics for the character, and optionally takes a list of * classes to be attached to the node. */ var makeSymbol = function(value, style, mode, color, classes) { // Replace the value with its replaced value from symbol.js if (symbols[mode][value] && symbols[mode][value].replace) { value = symbols[mode][value].replace; } var metrics = fontMetrics.getCharacterMetrics(value, style); var symbolNode; if (metrics) { symbolNode = new domTree.symbolNode( value, metrics.height, metrics.depth, metrics.italic, metrics.skew, classes); } else { // TODO(emily): Figure out a good way to only print this in development typeof console !== "undefined" && console.warn( "No character metrics for '" + value + "' in style '" + style + "'"); symbolNode = new domTree.symbolNode(value, 0, 0, 0, 0, classes); } if (color) { symbolNode.style.color = color; } return symbolNode; }; /** * Makes a symbol in the italic math font. */ var mathit = function(value, mode, color, classes) { return makeSymbol( value, "Math-Italic", mode, color, classes.concat(["mathit"])); }; /** * Makes a symbol in the upright roman font. */ var mathrm = function(value, mode, color, classes) { // Decide what font to render the symbol in by its entry in the symbols // table. if (symbols[mode][value].font === "main") { return makeSymbol(value, "Main-Regular", mode, color, classes); } else { return makeSymbol( value, "AMS-Regular", mode, color, classes.concat(["amsrm"])); } }; /** * Calculate the height, depth, and maxFontSize of an element based on its * children. */ var sizeElementFromChildren = function(elem) { var height = 0; var depth = 0; var maxFontSize = 0; if (elem.children) { for (var i = 0; i < elem.children.length; i++) { if (elem.children[i].height > height) { height = elem.children[i].height; } if (elem.children[i].depth > depth) { depth = elem.children[i].depth; } if (elem.children[i].maxFontSize > maxFontSize) { maxFontSize = elem.children[i].maxFontSize; } } } elem.height = height; elem.depth = depth; elem.maxFontSize = maxFontSize; }; /** * Makes a span with the given list of classes, list of children, and color. */ var makeSpan = function(classes, children, color) { var span = new domTree.span(classes, children); sizeElementFromChildren(span); if (color) { span.style.color = color; } return span; }; /** * Makes a document fragment with the given list of children. */ var makeFragment = function(children) { var fragment = new domTree.documentFragment(children); sizeElementFromChildren(fragment); return fragment; }; /** * Makes an element placed in each of the vlist elements to ensure that each * element has the same max font size. To do this, we create a zero-width space * with the correct font size. */ var makeFontSizer = function(options, fontSize) { var fontSizeInner = makeSpan([], [new domTree.symbolNode("\u200b")]); fontSizeInner.style.fontSize = (fontSize / options.style.sizeMultiplier) + "em"; var fontSizer = makeSpan( ["fontsize-ensurer", "reset-" + options.size, "size5"], [fontSizeInner]); return fontSizer; }; /** * Makes a vertical list by stacking elements and kerns on top of each other. * Allows for many different ways of specifying the positioning method. * * Arguments: * - children: A list of child or kern nodes to be stacked on top of each other * (i.e. the first element will be at the bottom, and the last at * the top). Element nodes are specified as * {type: "elem", elem: node} * while kern nodes are specified as * {type: "kern", size: size} * - positionType: The method by which the vlist should be positioned. Valid * values are: * - "individualShift": The children list only contains elem * nodes, and each node contains an extra * "shift" value of how much it should be * shifted (note that shifting is always * moving downwards). positionData is * ignored. * - "top": The positionData specifies the topmost point of * the vlist (note this is expected to be a height, * so positive values move up) * - "bottom": The positionData specifies the bottommost point * of the vlist (note this is expected to be a * depth, so positive values move down * - "shift": The vlist will be positioned such that its * baseline is positionData away from the baseline * of the first child. Positive values move * downwards. * - "firstBaseline": The vlist will be positioned such that * its baseline is aligned with the * baseline of the first child. * positionData is ignored. (this is * equivalent to "shift" with * positionData=0) * - positionData: Data used in different ways depending on positionType * - options: An Options object * */ var makeVList = function(children, positionType, positionData, options) { var depth; var currPos; var i; if (positionType === "individualShift") { var oldChildren = children; children = [oldChildren[0]]; // Add in kerns to the list of children to get each element to be // shifted to the correct specified shift depth = -oldChildren[0].shift - oldChildren[0].elem.depth; currPos = depth; for (i = 1; i < oldChildren.length; i++) { var diff = -oldChildren[i].shift - currPos - oldChildren[i].elem.depth; var size = diff - (oldChildren[i - 1].elem.height + oldChildren[i - 1].elem.depth); currPos = currPos + diff; children.push({type: "kern", size: size}); children.push(oldChildren[i]); } } else if (positionType === "top") { // We always start at the bottom, so calculate the bottom by adding up // all the sizes var bottom = positionData; for (i = 0; i < children.length; i++) { if (children[i].type === "kern") { bottom -= children[i].size; } else { bottom -= children[i].elem.height + children[i].elem.depth; } } depth = bottom; } else if (positionType === "bottom") { depth = -positionData; } else if (positionType === "shift") { depth = -children[0].elem.depth - positionData; } else if (positionType === "firstBaseline") { depth = -children[0].elem.depth; } else { depth = 0; } // Make the fontSizer var maxFontSize = 0; for (i = 0; i < children.length; i++) { if (children[i].type === "elem") { maxFontSize = Math.max(maxFontSize, children[i].elem.maxFontSize); } } var fontSizer = makeFontSizer(options, maxFontSize); // Create a new list of actual children at the correct offsets var realChildren = []; currPos = depth; for (i = 0; i < children.length; i++) { if (children[i].type === "kern") { currPos += children[i].size; } else { var child = children[i].elem; var shift = -child.depth - currPos; currPos += child.height + child.depth; var childWrap = makeSpan([], [fontSizer, child]); childWrap.height -= shift; childWrap.depth += shift; childWrap.style.top = shift + "em"; realChildren.push(childWrap); } } // Add in an element at the end with no offset to fix the calculation of // baselines in some browsers (namely IE, sometimes safari) var baselineFix = makeSpan( ["baseline-fix"], [fontSizer, new domTree.symbolNode("\u200b")]); realChildren.push(baselineFix); var vlist = makeSpan(["vlist"], realChildren); // Fix the final height and depth, in case there were kerns at the ends // since the makeSpan calculation won't take that in to account. vlist.height = Math.max(currPos, vlist.height); vlist.depth = Math.max(-depth, vlist.depth); return vlist; }; // A table of size -> font size for the different sizing functions var sizingMultiplier = { size1: 0.5, size2: 0.7, size3: 0.8, size4: 0.9, size5: 1.0, size6: 1.2, size7: 1.44, size8: 1.73, size9: 2.07, size10: 2.49 }; // A map of spacing functions to their attributes, like size and corresponding // CSS class var spacingFunctions = { "\\qquad": { size: "2em", className: "qquad" }, "\\quad": { size: "1em", className: "quad" }, "\\enspace": { size: "0.5em", className: "enspace" }, "\\;": { size: "0.277778em", className: "thickspace" }, "\\:": { size: "0.22222em", className: "mediumspace" }, "\\,": { size: "0.16667em", className: "thinspace" }, "\\!": { size: "-0.16667em", className: "negativethinspace" } }; module.exports = { makeSymbol: makeSymbol, mathit: mathit, mathrm: mathrm, makeSpan: makeSpan, makeFragment: makeFragment, makeVList: makeVList, sizingMultiplier: sizingMultiplier, spacingFunctions: spacingFunctions };
{'content_hash': 'ddf7b8f20cf14b4c0aec42348ebfab34', 'timestamp': '', 'source': 'github', 'line_count': 320, 'max_line_length': 84, 'avg_line_length': 33.0625, 'alnum_prop': 0.5655954631379962, 'repo_name': 'timholy/Escher.jl', 'id': '21fd4f479a1877716982361cc132b695513782f8', 'size': '10580', 'binary': False, 'copies': '17', 'ref': 'refs/heads/master', 'path': 'assets/bower_components/katex/src/buildCommon.js', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'CSS', 'bytes': '6941'}, {'name': 'HTML', 'bytes': '24173'}, {'name': 'JavaScript', 'bytes': '815'}, {'name': 'Julia', 'bytes': '103644'}, {'name': 'Makefile', 'bytes': '508'}]}
<html> <head> <title>Test for bug 62243</title> <style type="text/css"> #ruler { -webkit-transform: translateZ(0); border-bottom: 10px solid black; width: 550px; height: 500px; } </style> <script src="../../resources/run-after-display.js"></script> <script> if (window.testRunner) { testRunner.dumpAsTextWithPixelResults(); testRunner.waitUntilDone(); } function repaintTest() { document.getElementById("ruler").style.height = "450px"; testRunner.notifyDone(); } </script> </head> <body onload="runAfterDisplay(repaintTest);"> <div id="ruler"> </body> </html>
{'content_hash': '2cfcfe30c1ca0a8f586cbf81f6546885', 'timestamp': '', 'source': 'github', 'line_count': 28, 'max_line_length': 68, 'avg_line_length': 26.428571428571427, 'alnum_prop': 0.5432432432432432, 'repo_name': 'ondra-novak/blink', 'id': 'e47c91ca728f8e9420acd02fac594b6b878e16a9', 'size': '740', 'binary': False, 'copies': '3', 'ref': 'refs/heads/nw', 'path': 'LayoutTests/compositing/repaint/shrink-layer.html', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'Assembly', 'bytes': '12983'}, {'name': 'Bison', 'bytes': '64327'}, {'name': 'C', 'bytes': '1487362'}, {'name': 'C++', 'bytes': '40237536'}, {'name': 'CSS', 'bytes': '537586'}, {'name': 'Java', 'bytes': '66510'}, {'name': 'JavaScript', 'bytes': '26502253'}, {'name': 'Makefile', 'bytes': '677'}, {'name': 'Objective-C', 'bytes': '23525'}, {'name': 'Objective-C++', 'bytes': '377730'}, {'name': 'PHP', 'bytes': '166434'}, {'name': 'Perl', 'bytes': '585757'}, {'name': 'Python', 'bytes': '3997910'}, {'name': 'Ruby', 'bytes': '141818'}, {'name': 'Shell', 'bytes': '8806'}, {'name': 'XSLT', 'bytes': '49099'}]}
<?php // // Cache banner roles in the MySQL portal database // require __DIR__ . '/init.php'; require_once 'autoload.php'; ini_set('memory_limit', '512M'); set_time_limit(-1); //PSU::db('portal')->debug = true; function purge_temp_table() { $sql = 'TRUNCATE TABLE banner_roles_temp'; PSU::db('portal')->execute($sql); } function swap_tables() { $sql = 'RENAME TABLE banner_roles_temp TO roless'; PSU::db('portal')->execute($sql); $sql = 'RENAME TABLE banner_roles TO banner_roles_temp'; PSU::db('portal')->execute($sql); $sql = 'RENAME TABLE roless TO banner_roles'; PSU::db('portal')->execute($sql); } function update_temp_table() { $sql = 'SELECT gorirol_pidm, LOWER(gorirol_role) gorirol_role FROM gorirol'; $results = PSU::db('psc1')->Execute($sql); $populate = 'INSERT INTO `banner_roles_temp` (`pidm`, `role`) VALUES '; $count = 0; $params = array(); foreach($results as $result) { $populate .= ($params ? "," : "") . "(?,?)"; $count++; $params[] = $result['gorirol_pidm']; $params[] = $result['gorirol_role']; if($count%1000==0) { PSU::db('portal')->execute($populate, $params); $populate = 'INSERT INTO `banner_roles_temp` (`pidm`, `role`) VALUES '; unset($params); $params = array(); } } if( $params ) { PSU::db('portal')->execute($populate, $params); } } purge_temp_table(); update_temp_table(); swap_tables();
{'content_hash': 'c9091f5143ddd95db460f21b64cf4a59', 'timestamp': '', 'source': 'github', 'line_count': 70, 'max_line_length': 77, 'avg_line_length': 19.942857142857143, 'alnum_prop': 0.6110315186246418, 'repo_name': 'maxxxieee/plymouth-webapp', 'id': 'd3b8691e5993e2a2ecb60d151949b9bfa536bdf7', 'size': '1396', 'binary': False, 'copies': '9', 'ref': 'refs/heads/master', 'path': 'webapp/my/util/bannerroles2portal.php', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'JavaScript', 'bytes': '2122915'}, {'name': 'PHP', 'bytes': '29877667'}, {'name': 'Shell', 'bytes': '14039'}]}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=ISO-8859-1" /> <title>InplaceFileConverter xref</title> <link type="text/css" rel="stylesheet" href="../../../stylesheet.css" /> </head> <body> <pre> <a name="1" href="#1">1</a> <strong class="jxr_keyword">package</strong> org.slf4j.migrator; <a name="2" href="#2">2</a> <a name="3" href="#3">3</a> <strong class="jxr_keyword">import</strong> java.io.BufferedReader; <a name="4" href="#4">4</a> <strong class="jxr_keyword">import</strong> java.io.ByteArrayInputStream; <a name="5" href="#5">5</a> <strong class="jxr_keyword">import</strong> java.io.ByteArrayOutputStream; <a name="6" href="#6">6</a> <strong class="jxr_keyword">import</strong> java.io.File; <a name="7" href="#7">7</a> <strong class="jxr_keyword">import</strong> java.io.FileInputStream; <a name="8" href="#8">8</a> <strong class="jxr_keyword">import</strong> java.io.FileOutputStream; <a name="9" href="#9">9</a> <strong class="jxr_keyword">import</strong> java.io.IOException; <a name="10" href="#10">10</a> <strong class="jxr_keyword">import</strong> java.io.InputStreamReader; <a name="11" href="#11">11</a> <strong class="jxr_keyword">import</strong> java.io.OutputStream; <a name="12" href="#12">12</a> <strong class="jxr_keyword">import</strong> java.io.Reader; <a name="13" href="#13">13</a> <a name="14" href="#14">14</a> <strong class="jxr_keyword">import</strong> org.slf4j.migrator.internal.ProgressListener; <a name="15" href="#15">15</a> <strong class="jxr_keyword">import</strong> org.slf4j.migrator.line.LineConverter; <a name="16" href="#16">16</a> <strong class="jxr_keyword">import</strong> org.slf4j.migrator.line.RuleSet; <a name="17" href="#17">17</a> <a name="18" href="#18">18</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">class</strong> <a href="../../../org/slf4j/migrator/InplaceFileConverter.html">InplaceFileConverter</a> { <a name="19" href="#19">19</a> <a name="20" href="#20">20</a> <strong class="jxr_keyword">final</strong> <strong class="jxr_keyword">static</strong> <strong class="jxr_keyword">int</strong> BUFFER_LEN = 8 * 1024; <a name="21" href="#21">21</a> <strong class="jxr_keyword">final</strong> <a href="../../../org/slf4j/migrator/line/LineConverter.html">LineConverter</a> lineConverter; <a name="22" href="#22">22</a> <strong class="jxr_keyword">final</strong> String lineTerminator; <a name="23" href="#23">23</a> <strong class="jxr_keyword">final</strong> <a href="../../../org/slf4j/migrator/internal/ProgressListener.html">ProgressListener</a> pl; <a name="24" href="#24">24</a> <a name="25" href="#25">25</a> <a href="../../../org/slf4j/migrator/InplaceFileConverter.html">InplaceFileConverter</a>(<a href="../../../org/slf4j/migrator/line/RuleSet.html">RuleSet</a> ruleSet, <a href="../../../org/slf4j/migrator/internal/ProgressListener.html">ProgressListener</a> pl) { <a name="26" href="#26">26</a> <strong class="jxr_keyword">this</strong>.lineConverter = <strong class="jxr_keyword">new</strong> <a href="../../../org/slf4j/migrator/line/LineConverter.html">LineConverter</a>(ruleSet); <a name="27" href="#27">27</a> lineTerminator = System.getProperty(<span class="jxr_string">"line.separator"</span>); <a name="28" href="#28">28</a> <strong class="jxr_keyword">this</strong>.pl = pl; <a name="29" href="#29">29</a> } <a name="30" href="#30">30</a> <a name="31" href="#31">31</a> <strong class="jxr_keyword">private</strong> byte[] readIntoByteArray(File file) <strong class="jxr_keyword">throws</strong> IOException { <a name="32" href="#32">32</a> FileInputStream fis = <strong class="jxr_keyword">new</strong> FileInputStream(file); <a name="33" href="#33">33</a> ByteArrayOutputStream baos = <strong class="jxr_keyword">new</strong> ByteArrayOutputStream(); <a name="34" href="#34">34</a> <strong class="jxr_keyword">int</strong> n = 0; <a name="35" href="#35">35</a> byte[] buffer = <strong class="jxr_keyword">new</strong> byte[BUFFER_LEN]; <a name="36" href="#36">36</a> <strong class="jxr_keyword">while</strong> ((n = fis.read(buffer)) != -1) { <a name="37" href="#37">37</a> <em class="jxr_comment">// System.out.println("ba="+new String(buffer, "UTF-8"));</em> <a name="38" href="#38">38</a> baos.write(buffer, 0, n); <a name="39" href="#39">39</a> } <a name="40" href="#40">40</a> fis.close(); <a name="41" href="#41">41</a> <strong class="jxr_keyword">return</strong> baos.toByteArray(); <a name="42" href="#42">42</a> } <a name="43" href="#43">43</a> <a name="44" href="#44">44</a> <strong class="jxr_keyword">void</strong> convert(File file) <strong class="jxr_keyword">throws</strong> IOException { <a name="45" href="#45">45</a> byte[] originalBytes = readIntoByteArray(file); <a name="46" href="#46">46</a> byte[] convertedBytes = convertIntoTempByteArray(originalBytes); <a name="47" href="#47">47</a> <strong class="jxr_keyword">if</strong> (lineConverter.atLeastOneMatchOccured()) { <a name="48" href="#48">48</a> <em class="jxr_comment">//System.out.println("Converting ["+file+"]");</em> <a name="49" href="#49">49</a> writeConvertedBytesIntoFile(file, convertedBytes); <a name="50" href="#50">50</a> pl.onInplaceConversion(file); <a name="51" href="#51">51</a> } <strong class="jxr_keyword">else</strong> { <a name="52" href="#52">52</a> <em class="jxr_comment">//System.out.println("Not touching ["+file+"]");</em> <a name="53" href="#53">53</a> } <a name="54" href="#54">54</a> } <a name="55" href="#55">55</a> <a name="56" href="#56">56</a> <strong class="jxr_keyword">private</strong> <strong class="jxr_keyword">void</strong> writeConvertedBytesIntoFile(File file, byte[] convertedBytes) <strong class="jxr_keyword">throws</strong> IOException { <a name="57" href="#57">57</a> FileOutputStream fos = <strong class="jxr_keyword">new</strong> FileOutputStream(file); <a name="58" href="#58">58</a> fos.write(convertedBytes); <a name="59" href="#59">59</a> fos.flush(); <a name="60" href="#60">60</a> fos.close(); <a name="61" href="#61">61</a> } <a name="62" href="#62">62</a> <a name="63" href="#63">63</a> <strong class="jxr_keyword">private</strong> byte[] convertIntoTempByteArray(byte[] input) <strong class="jxr_keyword">throws</strong> IOException { <a name="64" href="#64">64</a> ByteArrayInputStream bais = <strong class="jxr_keyword">new</strong> ByteArrayInputStream(input); <a name="65" href="#65">65</a> Reader reader = <strong class="jxr_keyword">new</strong> InputStreamReader(bais); <a name="66" href="#66">66</a> BufferedReader breader = <strong class="jxr_keyword">new</strong> BufferedReader(reader); <a name="67" href="#67">67</a> ByteArrayOutputStream baos = <strong class="jxr_keyword">new</strong> ByteArrayOutputStream(); <a name="68" href="#68">68</a> <strong class="jxr_keyword">while</strong> (<strong class="jxr_keyword">true</strong>) { <a name="69" href="#69">69</a> String line = breader.readLine(); <a name="70" href="#70">70</a> <strong class="jxr_keyword">if</strong> (line != <strong class="jxr_keyword">null</strong>) { <a name="71" href="#71">71</a> String[] replacement = lineConverter.getReplacement(line); <a name="72" href="#72">72</a> writeReplacement(baos, replacement); <a name="73" href="#73">73</a> } <strong class="jxr_keyword">else</strong> { <a name="74" href="#74">74</a> <strong class="jxr_keyword">break</strong>; <a name="75" href="#75">75</a> } <a name="76" href="#76">76</a> } <a name="77" href="#77">77</a> <strong class="jxr_keyword">return</strong> baos.toByteArray(); <a name="78" href="#78">78</a> } <a name="79" href="#79">79</a> <a name="80" href="#80">80</a> <strong class="jxr_keyword">private</strong> <strong class="jxr_keyword">void</strong> writeReplacement(OutputStream os, String[] replacement) <a name="81" href="#81">81</a> <strong class="jxr_keyword">throws</strong> IOException { <a name="82" href="#82">82</a> <strong class="jxr_keyword">for</strong> (<strong class="jxr_keyword">int</strong> i = 0; i &lt; replacement.length; i++) { <a name="83" href="#83">83</a> os.write(replacement[i].getBytes()); <a name="84" href="#84">84</a> os.write(lineTerminator.getBytes()); <a name="85" href="#85">85</a> } <a name="86" href="#86">86</a> } <a name="87" href="#87">87</a> } </pre> <hr/><div id="footer">This page was automatically generated by <a href="http://maven.apache.org/">Maven</a></div></body> </html>
{'content_hash': 'a0d683d19746b6ef8678ddc30cd28cb0', 'timestamp': '', 'source': 'github', 'line_count': 101, 'max_line_length': 295, 'avg_line_length': 88.81188118811882, 'alnum_prop': 0.6350055741360089, 'repo_name': 'virtix/mut4j', 'id': '511b23a6e2b065125889b2287d94e789495f1144', 'size': '8970', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'lib/slf4j-1.6.0/site/xref/org/slf4j/migrator/InplaceFileConverter.html', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C', 'bytes': '12860'}, {'name': 'C++', 'bytes': '10287'}, {'name': 'Java', 'bytes': '12763391'}, {'name': 'JavaScript', 'bytes': '56884'}, {'name': 'PHP', 'bytes': '2581'}, {'name': 'Perl', 'bytes': '7771'}, {'name': 'Python', 'bytes': '6682'}, {'name': 'Shell', 'bytes': '11842'}]}
<?php namespace Pagekit\Auth\Event; use Pagekit\Auth\UserInterface; use Pagekit\Event\Event as BaseEvent; class Event extends BaseEvent { /** * @var UserInterface */ protected $user; /** * Constructor. * * @param UserInterface $user */ public function __construct($name, UserInterface $user = null) { parent::__construct($name); $this->user = $user; } /** * Gets the user. * * @return UserInterface|null */ public function getUser() { return $this->user; } }
{'content_hash': '46524a5ac6196955cdcfc5abaec64006', 'timestamp': '', 'source': 'github', 'line_count': 36, 'max_line_length': 66, 'avg_line_length': 16.083333333333332, 'alnum_prop': 0.5561312607944733, 'repo_name': 'zqcloveping/pagekit', 'id': 'b85bc0257cb6cd6be0f7d60a1d59031dfd6608c3', 'size': '579', 'binary': False, 'copies': '17', 'ref': 'refs/heads/master', 'path': 'app/modules/auth/src/Event/Event.php', 'mode': '33261', 'license': 'mit', 'language': [{'name': 'ApacheConf', 'bytes': '1708'}, {'name': 'CSS', 'bytes': '1624545'}, {'name': 'HTML', 'bytes': '6449'}, {'name': 'JavaScript', 'bytes': '1476479'}, {'name': 'PHP', 'bytes': '1858406'}, {'name': 'Vue', 'bytes': '134238'}]}
package org.apache.sysml.runtime.io; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.stream.Collectors; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.sysml.hops.OptimizerUtils; import org.apache.sysml.runtime.matrix.data.DenseBlock; import org.apache.sysml.runtime.matrix.data.IJV; import org.apache.sysml.runtime.matrix.data.InputInfo; import org.apache.sysml.runtime.matrix.data.MatrixBlock; import org.apache.sysml.runtime.matrix.data.SparseBlock; import org.apache.sysml.runtime.util.CommonThreadPool; import org.apache.sysml.runtime.util.FastStringTokenizer; import org.apache.sysml.runtime.util.MapReduceTool; import org.apache.sysml.runtime.util.UtilFunctions; /** * Parallel version of ReaderTextCell.java. To summarize, we create read tasks per split * and use a fixed-size thread pool, to executed these tasks. If the target matrix is dense, * the inserts are done lock-free. If the matrix is sparse, we use a buffer to collect * unordered input cells, lock the the target sparse matrix once, and append all buffered values. * * Note MatrixMarket: * 1) For matrix market files each read task probes for comments until it finds data because * for very small tasks or large comments, any split might encounter % or %%. Hence, * the parallel reader does not do the validity check for. * 2) In extreme scenarios, the last comment might be in one split, and the following meta data * in the subsequent split. This would create incorrect results or errors. However, this * scenario is extremely unlikely (num threads &gt; num lines if 1 comment line) and hence ignored * similar to our parallel MR setting (but there we have a 128MB guarantee). * 3) However, we use MIN_FILESIZE_MM (8KB) to give guarantees for the common case of small headers * in order the issue described in (2). * */ public class ReaderTextCellParallel extends ReaderTextCell { private static final long MIN_FILESIZE_MM = 8L * 1024; //8KB private int _numThreads = 1; public ReaderTextCellParallel(InputInfo info) { super(info, false); _numThreads = OptimizerUtils.getParallelTextReadParallelism(); } @Override protected void readTextCellMatrixFromHDFS( Path path, JobConf job, MatrixBlock dest, long rlen, long clen, int brlen, int bclen ) throws IOException { int par = _numThreads; FileInputFormat.addInputPath(job, path); TextInputFormat informat = new TextInputFormat(); informat.configure(job); //check for min file size for matrix market (adjust num splits if necessary) if( _isMMFile ){ long len = MapReduceTool.getFilesizeOnHDFS(path); par = ( len < MIN_FILESIZE_MM ) ? 1: par; } try { ExecutorService pool = CommonThreadPool.get(par); InputSplit[] splits = informat.getSplits(job, par); //count nnz per row for sparse preallocation if( dest.isInSparseFormat() ) { int[] rNnz = new int[(int)rlen]; boolean isSymmetric = _isMMFile && _mmProps.isSymmetric(); List<CountNnzTask> tasks = Arrays.stream(splits) .map(s ->new CountNnzTask(s, informat, job, rNnz, isSymmetric)) .collect(Collectors.toList()); List<Future<Void>> rt1 = pool.invokeAll(tasks); for( Future<Void> task : rt1 ) task.get(); SparseBlock sblock = dest.allocateBlock().getSparseBlock(); for( int i=0; i<rlen; i++ ) if( rNnz[i] > 0 ) sblock.allocate(i, UtilFunctions.roundToNext(rNnz[i], 4)); } //create and execute read tasks for all splits List<ReadTask> tasks = Arrays.stream(splits) .map(s ->new ReadTask(s, informat, job, dest, rlen, clen, _isMMFile, _mmProps)) .collect(Collectors.toList()); List<Future<Long>> rt2 = pool.invokeAll(tasks); //check for exceptions and aggregate nnz long lnnz = 0; for( Future<Long> task : rt2 ) lnnz += task.get(); //post-processing dest.setNonZeros( lnnz ); if( dest.isInSparseFormat() ) sortSparseRowsParallel(dest, rlen, _numThreads, pool); pool.shutdown(); } catch (Exception e) { throw new IOException("Threadpool issue, while parallel read.", e); } } public static class ReadTask implements Callable<Long> { private final InputSplit _split; private final boolean _sparse; private final TextInputFormat _informat; private final JobConf _job; private final MatrixBlock _dest; private final long _rlen; private final long _clen; private final boolean _matrixMarket; private final FileFormatPropertiesMM _mmProps; public ReadTask( InputSplit split, TextInputFormat informat, JobConf job, MatrixBlock dest, long rlen, long clen, boolean mm, FileFormatPropertiesMM mmProps ) { _split = split; _sparse = dest.isInSparseFormat(); _informat = informat; _job = job; _dest = dest; _rlen = rlen; _clen = clen; _matrixMarket = mm; _mmProps = mmProps; } @Override public Long call() throws Exception { long lnnz = 0; //aggregate block nnz //writables for reuse during read LongWritable key = new LongWritable(); Text value = new Text(); IJV cell = new IJV(); FastStringTokenizer st = new FastStringTokenizer(' '); RecordReader<LongWritable,Text> reader = _informat.getRecordReader(_split, _job, Reporter.NULL); try { // Read the header lines, if reading from a matrixMarket file if ( _matrixMarket ) { // skip until end-of-comments (%% or %) boolean foundComment = false; while( reader.next(key, value) && value.toString().charAt(0) == '%' ) { //do nothing just skip comments foundComment = true; } //process current value (otherwise ignore following meta data) if( !foundComment ) { cell = parseCell(value.toString(), st, cell, _mmProps); synchronized( _dest ){ //sparse requires lock lnnz += appendCell(cell, _dest, _mmProps); } } } if( _sparse ) { //SPARSE<-value CellBuffer buff = new CellBuffer(); while( reader.next(key, value) ) { cell = parseCell(value.toString(), st, cell, _mmProps); buff.addCell(cell.getI(), cell.getJ(), cell.getV()); if( _mmProps != null && _mmProps.isSymmetric() && !cell.onDiag() ) buff.addCell(cell.getJ(), cell.getI(), cell.getV()); if( buff.size()>=CellBuffer.CAPACITY ) synchronized( _dest ){ //sparse requires lock lnnz += buff.size(); buff.flushCellBufferToMatrixBlock(_dest); } } //final buffer flush synchronized( _dest ){ //sparse requires lock lnnz += buff.size(); buff.flushCellBufferToMatrixBlock(_dest); } } else { //DENSE<-value DenseBlock a = _dest.getDenseBlock(); while( reader.next(key, value) ) { cell = parseCell(value.toString(), st, cell, _mmProps); lnnz += appendCell(cell, a, _mmProps); } } } catch(Exception ex) { //post-mortem error handling and bounds checking if( cell.getI() < 0 || cell.getI() + 1 > _rlen || cell.getJ() < 0 || cell.getJ() + 1 > _clen ) throw new RuntimeException("Matrix cell ["+(cell.getI()+1)+","+(cell.getJ()+1)+"] " + "out of overall matrix range [1:"+_rlen+",1:"+_clen+"]. ", ex); else throw new RuntimeException("Unable to read matrix in text cell format. ", ex); } finally { IOUtilFunctions.closeSilently(reader); } return lnnz; } } public static class CountNnzTask implements Callable<Void> { private final InputSplit _split; private final TextInputFormat _informat; private final JobConf _job; private final int[] _rNnz; private final boolean _isSymmetric; public CountNnzTask( InputSplit split, TextInputFormat informat, JobConf job, int[] rNnz, boolean isSymmetric ) { _split = split; _informat = informat; _job = job; _rNnz = rNnz; _isSymmetric = isSymmetric; } @Override public Void call() throws Exception { LongWritable key = new LongWritable(); Text value = new Text(); FastStringTokenizer st = new FastStringTokenizer(' '); RecordReader<LongWritable,Text> reader = _informat.getRecordReader(_split, _job, Reporter.NULL); try { //counting without locking as conflicts unlikely while( reader.next(key, value) ) { if( value.toString().charAt(0) == '%' ) continue; st.reset( value.toString() ); _rNnz[(int)st.nextLong()-1] ++; if( _isSymmetric ) _rNnz[(int)st.nextLong()-1] ++; } } finally { IOUtilFunctions.closeSilently(reader); } return null; } } /** * Useful class for buffering unordered cells before locking target onces and * appending all buffered cells. * */ public static class CellBuffer { public static final int CAPACITY = 100*1024; //100K elements private int[] _rlen; private int[] _clen; private double[] _vals; private int _pos; public CellBuffer( ) { _rlen = new int[CAPACITY]; _clen = new int[CAPACITY]; _vals = new double[CAPACITY]; _pos = -1; } public void addCell(int rlen, int clen, double val) { if( val==0 ) return; _pos++; _rlen[_pos] = rlen; _clen[_pos] = clen; _vals[_pos] = val; } public void flushCellBufferToMatrixBlock( MatrixBlock dest ) { for( int i=0; i<=_pos; i++ ) dest.appendValue(_rlen[i], _clen[i], _vals[i]); reset(); } public int size() { return _pos+1; } public void reset() { _pos = -1; } } }
{'content_hash': '24a5cd32b533aeac07eec831dce87a05', 'timestamp': '', 'source': 'github', 'line_count': 305, 'max_line_length': 162, 'avg_line_length': 32.4655737704918, 'alnum_prop': 0.6757220763482125, 'repo_name': 'niketanpansare/systemml', 'id': '864dfda7777ba4072326aac2c9e53daf33ff6d0f', 'size': '10711', 'binary': False, 'copies': '6', 'ref': 'refs/heads/master', 'path': 'src/main/java/org/apache/sysml/runtime/io/ReaderTextCellParallel.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'ANTLR', 'bytes': '31629'}, {'name': 'Batchfile', 'bytes': '23989'}, {'name': 'C', 'bytes': '9256'}, {'name': 'C++', 'bytes': '35736'}, {'name': 'CMake', 'bytes': '10373'}, {'name': 'Cuda', 'bytes': '93696'}, {'name': 'Java', 'bytes': '14397494'}, {'name': 'Jupyter Notebook', 'bytes': '107164'}, {'name': 'Makefile', 'bytes': '2470'}, {'name': 'Python', 'bytes': '402773'}, {'name': 'R', 'bytes': '828062'}, {'name': 'Scala', 'bytes': '337451'}, {'name': 'Shell', 'bytes': '165430'}]}
import { Component, Input, ChangeDetectionStrategy } from 'angular2/core'; @Component({ changeDetection: ChangeDetectionStrategy.OnPush, selector: 'CommentItemRenderer', template: '<li><a (click)="toggleComment()" href="#">{{comment.text}}</a></li>', directives : [] }) export class CommentItemRenderer { @Input() comment; toggleComment(){ this.comment.toggle(); } }
{'content_hash': 'dcb09c4881fb31d90f42b2a8f56d6df5', 'timestamp': '', 'source': 'github', 'line_count': 20, 'max_line_length': 82, 'avg_line_length': 19.3, 'alnum_prop': 0.694300518134715, 'repo_name': 'mattimatti/Angular2Exercises', 'id': '32fed49b36c6e9e07146f55a17edbacf33ad00bf', 'size': '386', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'src/app/components/comments/CommentItemRenderer.ts', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'CSS', 'bytes': '785'}, {'name': 'HTML', 'bytes': '5620'}, {'name': 'JavaScript', 'bytes': '27310'}, {'name': 'Smarty', 'bytes': '25'}, {'name': 'TypeScript', 'bytes': '27004'}]}
import pytest import midware.core as core @pytest.fixture def nested_dict(): return {'a': {'b': 0}, 'c': 1} def test_get_in(nested_dict): assert core.get_in(nested_dict, ('a')) == {'b': 0} assert core.get_in(nested_dict, ('a', 'b')) == 0 assert core.get_in(nested_dict, ('c')) == 1 assert core.get_in(nested_dict, ('a', 'd')) == None assert core.get_in(nested_dict, ('d')) == None def test_get_in_default(nested_dict): assert core.get_in(nested_dict, ('a', 'b')) == 0 assert core.get_in(nested_dict, ('c', 'd')) == None assert core.get_in(nested_dict, ('a', 'd')) == None assert core.get_in(nested_dict, ('a', 'd'), default=0) == 0 def test_assoc_in(): d = {} core.assoc_in(d, ('a'), 0) assert d['a'] == 0 with pytest.raises(TypeError): core.assoc_in(d, ('a', 'b'), 0) core.assoc_in(d, ('b', 'c'), 1) assert d['b']['c'] == 1 def test_identity(): assert core.identity(1) == 1 assert core.identity('a') == 'a' assert core.identity(None) == None def add_two(x): return x + 2 def add_three(x): return x + 3 def test_compose(): assert core.compose(add_two, add_three)(1) == 6 def test_compose_order(): assert core.compose(sum, add_two, add_three)([1, 2, 3]) == 11 @pytest.fixture def verbose(scope='function'): core._VERBOSE_MODE = True yield core._VERBOSE_MODE = False def test_print_inwards(verbose, capsys): core._print_inwards('abc') out, err = capsys.readouterr() assert out == 'abc--->\n' assert err == '' def test_print_outwards(verbose, capsys): core._print_outwards('xyz') out, err = capsys.readouterr() assert out == '<---xyz\n' assert err == '' import os.path as fs def test_mw_from_cm_enter(tmpdir): tmpfile = fs.join(tmpdir, 'tmp.txt') with open(tmpfile, 'w') as f: print('abc', file=f) ks = ('file', 'desc') mw = core.mw_from_cm('test_mw', open, ks, {}, file=tmpfile) lines = None def handler(ctx): with core.get_in(ctx, ks) as f: assert f.readlines() == ['abc\n'] return ctx ctx = mw(handler)({}) def test_mw_from_cm_exit(tmpdir): tmpfile = fs.join(tmpdir, 'tmp.txt') with open(tmpfile, 'w') as f: print('abc', file=f) ks = ('file', 'desc') mw = core.mw_from_cm('test_mw', open, ks, {}, file=tmpfile) ctx = mw(core.identity)({}) with pytest.raises(ValueError): with core.get_in(ctx, ks) as f: f.readlines() def test_mw_from_cm_exit(tmpdir, verbose, capsys): tmpfile = fs.join(tmpdir, 'tmp.txt') with open(tmpfile, 'w') as f: print('abc', file=f) ks = ('file', 'desc') mw = core.mw_from_cm('test_cm_mw', open, ks, {}, file=tmpfile) ctx = mw(core.identity)({}) out, err = capsys.readouterr() assert out == 'test_cm_mw--->\n<---test_cm_mw\n' assert err == '' @core.middleware('wrap_add') def wrap_add(ctx): amount = ctx['amount'] ctx['value'] += amount new_ctx = yield ctx new_ctx['post'] = True yield new_ctx def test_middleware(): def add_one(ctx): ctx['value'] += 1 return ctx ctx = wrap_add(add_one)({'value': 1, 'amount': 2}) assert ctx['value'] == 4 assert ctx['post'] def test_middleware_verbose(verbose, capsys): ctx = wrap_add(core.identity)({'value': 1, 'amount': 2}) assert ctx['value'] == 3 assert ctx['post'] out, err = capsys.readouterr() assert out == 'wrap_add--->\n<---wrap_add\n' assert err == '' @core.middleware('wrap_sub', 1) def wrap_sub(ctx, amount): ctx['value'] -= amount new_ctx = yield ctx yield new_ctx def test_middleware_args(): ctx = wrap_sub(core.identity)({'value': 1}) assert ctx['value'] == 0 @core.middleware('wrap_replace') def wrap_replace(ctx): _ = yield ctx yield {'replacement': True} def test_middleware_replacement(): ctx = wrap_add(wrap_replace(core.identity))({'value': 1, 'amount': 2}) assert ctx == {'replacement': True, 'post': True} def wrap_unnamed(handle): def new_handle(ctx): new_ctx = handle(ctx) return new_ctx return new_handle def test_unnamed_verbose(verbose, capsys): ctx = wrap_unnamed(core.identity)({}) out, err = capsys.readouterr() assert out == '' assert err == '' @core.named('wrap_named') def wrap_named(handle): def new_handle(ctx): new_ctx = handle(ctx) return new_ctx return new_handle def test_named_verbose(verbose, capsys): ctx = wrap_named(core.identity)({}) out, err = capsys.readouterr() assert out == 'wrap_named--->\n<---wrap_named\n' assert err == ''
{'content_hash': '6e2fd36934e710c2fa53bb01674ef653', 'timestamp': '', 'source': 'github', 'line_count': 228, 'max_line_length': 74, 'avg_line_length': 20.68421052631579, 'alnum_prop': 0.5750636132315522, 'repo_name': 'idmit/midware', 'id': '73b2a862d5b8b982c7c65e3b805413d56c4f713f', 'size': '4741', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'tests/test_core.py', 'mode': '33261', 'license': 'mit', 'language': [{'name': 'Python', 'bytes': '14356'}]}
<?xml version="1.0" encoding="utf-8"?> <!-- Copyright (C) 2014 The Android Open Source Project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <android.support.v7.internal.widget.TintImageView xmlns:android="http://schemas.android.com/apk/res/android" android:id="@+id/action_mode_close_button" android:contentDescription="@string/abc_action_mode_done" android:focusable="true" android:clickable="true" android:src="?attr/actionModeCloseDrawable" style="?attr/actionModeCloseButtonStyle" android:layout_width="wrap_content" android:layout_height="match_parent" /><!-- From: file:/usr/local/google/buildbot/repo_clients/https___googleplex-android.googlesource.com_a_platform_manifest.git/mnc-sdk-release/frameworks/support/v7/appcompat/res/layout/abc_action_mode_close_item_material.xml --><!-- From: file:/Users/Oodrye/Downloads/clarifai-android-starter-master/app/build/intermediates/exploded-aar/com.android.support/appcompat-v7/23.0.1/res/layout/abc_action_mode_close_item_material.xml -->
{'content_hash': '86c52367400b020bcf6714a23bfe8cda', 'timestamp': '', 'source': 'github', 'line_count': 26, 'max_line_length': 481, 'avg_line_length': 62.96153846153846, 'alnum_prop': 0.7110568112400732, 'repo_name': 'aul004/hacktech', 'id': '39a708fdedb1aa012d4b60f5cf4a3449a3d366d1', 'size': '1637', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'app/build/intermediates/res/merged/debug/layout/abc_action_mode_close_item_material.xml', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Java', 'bytes': '1190378'}]}
var MapsVector = function () { var setMap = function (name) { var data = { map: 'world_en', backgroundColor: null, borderColor: '#333333', borderOpacity: 0.5, borderWidth: 1, color: '#c6c6c6', enableZoom: true, hoverColor: '#c9dfaf', hoverOpacity: null, values: sample_data, normalizeFunction: 'linear', scaleColors: ['#b6da93', '#427d1a'], selectedColor: '#c9dfaf', selectedRegion: null, showTooltip: true, onRegionOver: function (event, code) { //sample to interact with map if (code == 'ca') { event.preventDefault(); } }, onRegionClick: function (element, code, region) { //sample to interact with map var message = 'You clicked "' + region + '" which has the code: ' + code.toUpperCase(); alert(message); } }; data.map = name + '_en'; var map = jQuery('#vmap_' + name); if (!map) { return; } map.width(map.parent().width()); map.vectorMap(data); } return { //main function to initiate map samples init: function () { setMap("world"); setMap("usa"); setMap("europe"); setMap("russia"); setMap("germany"); // redraw maps on window or content resized App.addResponsiveHandler(function () { setMap("world"); setMap("usa"); setMap("europe"); setMap("russia"); setMap("germany"); }); } }; }();
{'content_hash': '898eb0ebaef7674d0b118bda2ff4fa85', 'timestamp': '', 'source': 'github', 'line_count': 64, 'max_line_length': 103, 'avg_line_length': 28.6875, 'alnum_prop': 0.4422657952069717, 'repo_name': 'ShahidulHasan/pms', 'id': '956171ecca2cfbad22d1839027e0ac5e9324d828', 'size': '1836', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'app/Resources/assets/scripts/maps-vector.js', 'mode': '33261', 'license': 'mit', 'language': [{'name': 'CSS', 'bytes': '390443'}, {'name': 'JavaScript', 'bytes': '2714454'}, {'name': 'PHP', 'bytes': '366124'}, {'name': 'Perl', 'bytes': '2687'}]}
UNIX_FibreProtocolServiceFixture::UNIX_FibreProtocolServiceFixture() { } UNIX_FibreProtocolServiceFixture::~UNIX_FibreProtocolServiceFixture() { } void UNIX_FibreProtocolServiceFixture::Run() { CIMName className("UNIX_FibreProtocolService"); CIMNamespaceName nameSpace("root/cimv2"); UNIX_FibreProtocolService _p; UNIX_FibreProtocolServiceProvider _provider; Uint32 propertyCount; CIMOMHandle omHandle; _provider.initialize(omHandle); _p.initialize(); for(int pIndex = 0; _p.load(pIndex); pIndex++) { CIMInstance instance = _provider.constructInstance(className, nameSpace, _p); CIMObjectPath path = instance.getPath(); cout << path.toString() << endl; propertyCount = instance.getPropertyCount(); for(Uint32 i = 0; i < propertyCount; i++) { CIMProperty propertyItem = instance.getProperty(i); cout << " Name: " << propertyItem.getName().getString() << " - Value: " << propertyItem.getValue().toString() << endl; } cout << "------------------------------------" << endl; cout << endl; } _p.finalize(); }
{'content_hash': 'a6abbe99172f2f6039b841fbe8aec5eb', 'timestamp': '', 'source': 'github', 'line_count': 41, 'max_line_length': 121, 'avg_line_length': 25.78048780487805, 'alnum_prop': 0.6859035004730369, 'repo_name': 'brunolauze/openpegasus-providers-old', 'id': '2c5585f1aa05296afb82e9610bfcda645c7cb4b8', 'size': '2862', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'src/Providers/UNIXProviders/tests/UNIXProviders.Tests/UNIX_FibreProtocolServiceFixture.cpp', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'C', 'bytes': '2631164'}, {'name': 'C++', 'bytes': '120884401'}, {'name': 'Objective-C', 'bytes': '64'}, {'name': 'Shell', 'bytes': '17094'}]}
namespace net { ProxyInfo::ProxyInfo() : config_id_(ProxyConfig::kInvalidConfigID), config_source_(PROXY_CONFIG_SOURCE_UNKNOWN), did_bypass_proxy_(false), did_use_pac_script_(false) { } ProxyInfo::ProxyInfo(const ProxyInfo& other) = default; ProxyInfo::~ProxyInfo() { } void ProxyInfo::Use(const ProxyInfo& other) { proxy_resolve_start_time_ = other.proxy_resolve_start_time_; proxy_resolve_end_time_ = other.proxy_resolve_end_time_; proxy_list_ = other.proxy_list_; proxy_retry_info_ = other.proxy_retry_info_; config_id_ = other.config_id_; config_source_ = other.config_source_; did_bypass_proxy_ = other.did_bypass_proxy_; did_use_pac_script_ = other.did_use_pac_script_; } void ProxyInfo::UseDirect() { Reset(); proxy_list_.SetSingleProxyServer(ProxyServer::Direct()); } void ProxyInfo::UseDirectWithBypassedProxy() { UseDirect(); did_bypass_proxy_ = true; } void ProxyInfo::UseNamedProxy(const std::string& proxy_uri_list) { Reset(); proxy_list_.Set(proxy_uri_list); } void ProxyInfo::UseProxyServer(const ProxyServer& proxy_server) { Reset(); proxy_list_.SetSingleProxyServer(proxy_server); } void ProxyInfo::UsePacString(const std::string& pac_string) { Reset(); proxy_list_.SetFromPacString(pac_string); } void ProxyInfo::UseProxyList(const ProxyList& proxy_list) { Reset(); proxy_list_ = proxy_list; } void ProxyInfo::OverrideProxyList(const ProxyList& proxy_list) { proxy_list_ = proxy_list; } std::string ProxyInfo::ToPacString() const { return proxy_list_.ToPacString(); } bool ProxyInfo::Fallback(int net_error, const BoundNetLog& net_log) { return proxy_list_.Fallback(&proxy_retry_info_, net_error, net_log); } void ProxyInfo::DeprioritizeBadProxies( const ProxyRetryInfoMap& proxy_retry_info) { proxy_list_.DeprioritizeBadProxies(proxy_retry_info); } void ProxyInfo::RemoveProxiesWithoutScheme(int scheme_bit_field) { proxy_list_.RemoveProxiesWithoutScheme(scheme_bit_field); } void ProxyInfo::Reset() { proxy_resolve_start_time_ = base::TimeTicks(); proxy_resolve_end_time_ = base::TimeTicks(); proxy_list_.Clear(); proxy_retry_info_.clear(); config_id_ = ProxyConfig::kInvalidConfigID; config_source_ = PROXY_CONFIG_SOURCE_UNKNOWN; did_bypass_proxy_ = false; did_use_pac_script_ = false; } } // namespace net
{'content_hash': '42465360eeb0fdfa7cc01d7e468964e8', 'timestamp': '', 'source': 'github', 'line_count': 88, 'max_line_length': 70, 'avg_line_length': 26.568181818181817, 'alnum_prop': 0.7159965782720273, 'repo_name': 'heke123/chromium-crosswalk', 'id': '75000791c52c1f3985ea2373f47a52a3a16f3d35', 'size': '2584', 'binary': False, 'copies': '11', 'ref': 'refs/heads/master', 'path': 'net/proxy/proxy_info.cc', 'mode': '33188', 'license': 'bsd-3-clause', 'language': []}
from OSEncryptionState import * class SelinuxState(OSEncryptionState): def __init__(self, context): super(SelinuxState, self).__init__('SelinuxState', context) def should_enter(self): self.context.logger.log("Verifying if machine should enter selinux state") if not super(SelinuxState, self).should_enter(): return False self.context.logger.log("Performing enter checks for selinux state") return True def enter(self): if not self.should_enter(): return self.context.logger.log("Entering selinux state") se_linux_status = self.context.encryption_environment.get_se_linux() if(se_linux_status.lower() == 'enforcing'): self.context.logger.log("SELinux is in enforcing mode, disabling") self.context.encryption_environment.disable_se_linux() def should_exit(self): self.context.logger.log("Verifying if machine should exit selinux state") return super(SelinuxState, self).should_exit()
{'content_hash': '1543c6608d769a371a1c305472360e4e', 'timestamp': '', 'source': 'github', 'line_count': 31, 'max_line_length': 82, 'avg_line_length': 33.96774193548387, 'alnum_prop': 0.6562203228869895, 'repo_name': 'varunkumta/azure-linux-extensions', 'id': '633d4391eeb6382ff460a311b62fae9624c094ee', 'size': '1714', 'binary': False, 'copies': '16', 'ref': 'refs/heads/master', 'path': 'VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/SelinuxState.py', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'C', 'bytes': '39379'}, {'name': 'JavaScript', 'bytes': '22883'}, {'name': 'Makefile', 'bytes': '4033'}, {'name': 'PowerShell', 'bytes': '24124'}, {'name': 'Python', 'bytes': '3893505'}, {'name': 'Shell', 'bytes': '21864'}]}
<?php /** * Merges the External Module language file (English.ini) into REDCap's language file (English.ini). * This should be executed as part of the deployment/packaging pipeline, after the external module * framework files have been copied to redcap_vX.Y.Z/ExternalModules. * * Execute: * php redcap/redcap_vX.Y.Z/ExternalModules/merge_language_file.php * * After this file has been executed and was successfull, it can be deleted. * */ error_reporting(E_ALL); $redcap_lang_file = dirname(__DIR__) . "/LanguageUpdater/English.ini"; $em_lang_file = __DIR__ . "/classes/English.ini"; if (file_exists($em_lang_file)) { // Read REDCap and External Module Framework language files. $redcap = parse_ini_file($redcap_lang_file); $em = parse_ini_file(EM); // Combine both. $combined = array_merge($redcap, $em); // Prepare a string containing the full merged content. $ini = ""; foreach ($combined as $key => $value) { $ini .= $key . " = \"" . $value . "\"\n"; } // Write the merged content back to REDCap's language file. file_put_contents($redcap_lang_file, $ini); // Remove the now redundant English.ini from the framework. unlink($em_lang_file); }
{'content_hash': '567fc6e70223d8489f35d41c3c896b4d', 'timestamp': '', 'source': 'github', 'line_count': 35, 'max_line_length': 100, 'avg_line_length': 34.97142857142857, 'alnum_prop': 0.6650326797385621, 'repo_name': 'vanderbilt/redcap-external-modules', 'id': 'a1c2b4c34c61b465edb8e5ede2b55c7231fa64db', 'size': '1224', 'binary': False, 'copies': '1', 'ref': 'refs/heads/testing', 'path': 'merge_language_file.php', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'Batchfile', 'bytes': '139'}, {'name': 'CSS', 'bytes': '3733'}, {'name': 'JavaScript', 'bytes': '71419'}, {'name': 'PHP', 'bytes': '517709'}, {'name': 'Shell', 'bytes': '775'}]}
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /** * Object representing an image item (a photo or a video). * * @param {string} url Image url. * @constructor */ Gallery.Item = function(url) { this.url_ = url; this.original_ = true; }; /** * @return {string} Image url. */ Gallery.Item.prototype.getUrl = function() { return this.url_ }; /** * @param {string} url New url. */ Gallery.Item.prototype.setUrl = function(url) { this.url_ = url }; /** * @return {string} File name. */ Gallery.Item.prototype.getFileName = function() { return ImageUtil.getFullNameFromUrl(this.url_); }; /** * @return {boolean} True if this image has not been created in this session. */ Gallery.Item.prototype.isOriginal = function() { return this.original_ }; // TODO: Localize? /** * @type {string} Suffix for a edited copy file name. */ Gallery.Item.COPY_SIGNATURE = ' - Edited'; /** * Regular exression to match '... - Edited'. * @type {RegExp} */ Gallery.Item.REGEXP_COPY_0 = new RegExp('^(.+)' + Gallery.Item.COPY_SIGNATURE + '$'); /** * Regular exression to match '... - Edited (N)'. * @type {RegExp} */ Gallery.Item.REGEXP_COPY_N = new RegExp('^(.+)' + Gallery.Item.COPY_SIGNATURE + ' \\((\\d+)\\)$'); /** * Create a name for an edited copy of the file. * * @param {Entry} dirEntry Entry. * @param {function} callback Callback. * @private */ Gallery.Item.prototype.createCopyName_ = function(dirEntry, callback) { var name = this.getFileName(); // If the item represents a file created during the current Gallery session // we reuse it for subsequent saves instead of creating multiple copies. if (!this.original_) { callback(name); return; } var ext = ''; var index = name.lastIndexOf('.'); if (index != -1) { ext = name.substr(index); name = name.substr(0, index); } if (!ext.match(/jpe?g/i)) { // Chrome can natively encode only two formats: JPEG and PNG. // All non-JPEG images are saved in PNG, hence forcing the file extension. ext = '.png'; } function tryNext(tries) { // All the names are used. Let's overwrite the last one. if (tries == 0) { setTimeout(callback, 0, name + ext); return; } // If the file name contains the copy signature add/advance the sequential // number. var matchN = Gallery.Item.REGEXP_COPY_N.exec(name); var match0 = Gallery.Item.REGEXP_COPY_0.exec(name); if (matchN && matchN[1] && matchN[2]) { var copyNumber = parseInt(matchN[2], 10) + 1; name = matchN[1] + Gallery.Item.COPY_SIGNATURE + ' (' + copyNumber + ')'; } else if (match0 && match0[1]) { name = match0[1] + Gallery.Item.COPY_SIGNATURE + ' (1)'; } else { name += Gallery.Item.COPY_SIGNATURE; } dirEntry.getFile(name + ext, {create: false, exclusive: false}, tryNext.bind(null, tries - 1), callback.bind(null, name + ext)); } tryNext(10); }; /** * Write the new item content to the file. * * @param {Entry} overrideDir Directory to save to. If null, save to the same * directory as the original. * @param {boolean} overwrite True if overwrite, false if copy. * @param {HTMLCanvasElement} canvas Source canvas. * @param {ImageEncoder.MetadataEncoder} metadataEncoder MetadataEncoder. * @param {function(boolean)} opt_callback Callback accepting true for success. */ Gallery.Item.prototype.saveToFile = function( overrideDir, overwrite, canvas, metadataEncoder, opt_callback) { ImageUtil.metrics.startInterval(ImageUtil.getMetricName('SaveTime')); var name = this.getFileName(); var onSuccess = function(url) { console.log('Saved from gallery', name); ImageUtil.metrics.recordEnum(ImageUtil.getMetricName('SaveResult'), 1, 2); ImageUtil.metrics.recordInterval(ImageUtil.getMetricName('SaveTime')); this.setUrl(url); if (opt_callback) opt_callback(true); }.bind(this); function onError(error) { console.log('Error saving from gallery', name, error); ImageUtil.metrics.recordEnum(ImageUtil.getMetricName('SaveResult'), 0, 2); if (opt_callback) opt_callback(false); } function doSave(newFile, fileEntry) { fileEntry.createWriter(function(fileWriter) { function writeContent() { fileWriter.onwriteend = onSuccess.bind(null, fileEntry.toURL()); fileWriter.write(ImageEncoder.getBlob(canvas, metadataEncoder)); } fileWriter.onerror = function(error) { onError(error); // Disable all callbacks on the first error. fileWriter.onerror = null; fileWriter.onwriteend = null; }; if (newFile) { writeContent(); } else { fileWriter.onwriteend = writeContent; fileWriter.truncate(0); } }, onError); } function getFile(dir, newFile) { dir.getFile(name, {create: newFile, exclusive: newFile}, doSave.bind(null, newFile), onError); } function checkExistence(dir) { dir.getFile(name, {create: false, exclusive: false}, getFile.bind(null, dir, false /* existing file */), getFile.bind(null, dir, true /* create new file */)); } var saveToDir = function(dir) { if (overwrite) { checkExistence(dir); } else { this.createCopyName_(dir, function(copyName) { this.original_ = false; name = copyName; checkExistence(dir); }.bind(this)); } }.bind(this); if (overrideDir) { saveToDir(overrideDir); } else { webkitResolveLocalFileSystemURL(this.getUrl(), function(entry) { entry.getParent(saveToDir, onError)}, onError); } }; /** * Rename the file. * * @param {string} name New file name. * @param {function} onSuccess Success callback. * @param {function} onExists Called if the file with the new name exists. */ Gallery.Item.prototype.rename = function(name, onSuccess, onExists) { var oldName = this.getFileName(); if (ImageUtil.getExtensionFromFullName(name) == ImageUtil.getExtensionFromFullName(oldName)) { name = ImageUtil.getFileNameFromFullName(name); } var newName = ImageUtil.replaceFileNameInFullName(oldName, name); if (oldName == newName) return; function onError() { console.log('Rename error: "' + oldName + '" to "' + newName + '"'); } var onRenamed = function(entry) { this.setUrl(entry.toURL()); onSuccess(); }.bind(this); function moveIfDoesNotExist(entry, parentDir) { parentDir.getFile(newName, {create: false, exclusive: false}, onExists, function() { entry.moveTo(parentDir, newName, onRenamed, onError) }); } webkitResolveLocalFileSystemURL(this.getUrl(), function(entry) { entry.getParent(moveIfDoesNotExist.bind(null, entry), onError); }, onError); };
{'content_hash': '0a3a0292e34f274c4c46489c9d14ad5c', 'timestamp': '', 'source': 'github', 'line_count': 234, 'max_line_length': 79, 'avg_line_length': 29.39316239316239, 'alnum_prop': 0.6512067461471358, 'repo_name': 'leiferikb/bitpop-private', 'id': '8a82d52389b2373ccbab93f632faa89bf3d625c1', 'size': '6878', 'binary': False, 'copies': '3', 'ref': 'refs/heads/master', 'path': 'chrome/browser/resources/file_manager/js/photo/gallery_item.js', 'mode': '33188', 'license': 'bsd-3-clause', 'language': [{'name': 'AppleScript', 'bytes': '6973'}, {'name': 'Arduino', 'bytes': '464'}, {'name': 'Assembly', 'bytes': '1871'}, {'name': 'C', 'bytes': '1800028'}, {'name': 'C++', 'bytes': '76499582'}, {'name': 'CSS', 'bytes': '803682'}, {'name': 'Java', 'bytes': '1234788'}, {'name': 'JavaScript', 'bytes': '21793252'}, {'name': 'Objective-C', 'bytes': '5358744'}, {'name': 'PHP', 'bytes': '97817'}, {'name': 'Perl', 'bytes': '64410'}, {'name': 'Python', 'bytes': '3017857'}, {'name': 'Ruby', 'bytes': '650'}, {'name': 'Shell', 'bytes': '322362'}, {'name': 'XSLT', 'bytes': '418'}, {'name': 'nesC', 'bytes': '12138'}]}
<!DOCTYPE html> <html lang="en-us"> <head> <meta charset="utf-8" /> <title>Alexander Wong</title> <meta name="author" content="Alexander Wong" /> <meta name="description" content="Personal site containing content about self journey (technology and otherwise)." /> <meta name="twitter:card" content="summary" /> <meta name="twitter:site" content="@FindingUdia" /> <meta name="twitter:title" content="Alexander Wong" /> <meta name="twitter:description" content="Personal site containing content about self journey (technology and otherwise)." /> <meta name="twitter:image" content="https://alexander-wong.com/img/avatar.jpg" /> <meta name="generator" content="Hugo 0.31.1" /> <link rel="canonical" href="https://alexander-wong.com/" /> <link rel="alternative" href="https://alexander-wong.com/index.xml" title="Alexander Wong" type="application/atom+xml" /> <meta name="renderer" content="webkit" /> <meta name="viewport" content="width=device-width,initial-scale=1" /> <meta name="format-detection" content="telephone=no,email=no,adress=no" /> <meta http-equiv="Cache-Control" content="no-transform" /> <meta name="robots" content="index,follow" /> <meta name="referrer" content="origin-when-cross-origin" /> <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" /> <meta name="apple-mobile-web-app-title" content="Alexander Wong" /> <meta name="msapplication-tooltip" content="Alexander Wong" /> <meta name='msapplication-navbutton-color' content="#5fbf5e" /> <meta name="msapplication-TileColor" content="#5fbf5e" /> <meta name="msapplication-TileImage" content="/img/tile-image-windows.png" /> <link rel="icon" href="https://alexander-wong.com/img/favicon.ico" /> <link rel="icon" type="image/png" sizes="16x16" href="https://alexander-wong.com/img/favicon-16x16.png" /> <link rel="icon" type="image/png" sizes="32x32" href="https://alexander-wong.com/img/favicon-32x32.png" /> <link rel="icon" sizes="192x192" href="https://alexander-wong.com/img/touch-icon-android.png" /> <link rel="apple-touch-icon" href="https://alexander-wong.com/img/touch-icon-apple.png" /> <link rel="mask-icon" href="https://alexander-wong.com/img/safari-pinned-tab.svg" color="#5fbf5e" /> <link rel="stylesheet" href="https://alexander-wong.com/css/bundle.css" /> <!--[if lt IE 9]> <script src="//cdn.bootcss.com/html5shiv/3.7.3/html5shiv.min.js"></script> <script src="//cdn.bootcss.com/respond.js/1.4.2/respond.min.js"></script> <script src="//cdn.bootcss.com/video.js/6.2.8/ie8/videojs-ie8.min.js"></script> <![endif]--> <!--[if lte IE 11]> <script src="//cdn.bootcss.com/classlist/1.1.20170427/classList.min.js"></script> <![endif]--> <script src="//cdn.bootcss.com/object-fit-images/3.2.3/ofi.min.js"></script> <script src="//cdn.bootcss.com/smooth-scroll/12.1.4/js/smooth-scroll.polyfills.min.js"></script> </head> <body> <div class="suspension"> <a title="Go to top" class="to-top is-hide"><span class="icon icon-up"></span></a> </div> <header class="site-header"> <img class="avatar" src="https://alexander-wong.com/img/avatar.png" alt="Avatar"> <h1 class="title">Alexander Wong</h1> <p class="subtitle">Incremental iterations towards meaning in life.</p> <button class="menu-toggle" type="button"> <span class="icon icon-menu"></span> </button> <nav class="site-menu collapsed"> <h2 class="offscreen">Main Menu</h2> <ul class="menu-list"> <li class="menu-item is-active "> <a href="https://alexander-wong.com/">Blog</a> </li> <li class="menu-item "> <a href="https://alexander-wong.com/about/">About</a> </li> <li class="menu-item "> <a href="https://alexander-wong.com/projects/">Projects</a> </li> <li class="menu-item "> <a href="https://alexander-wong.com/chatbot/">Chatbot</a> </li> </ul> </nav> <nav class="social-menu collapsed"> <h2 class="offscreen">Social Networks</h2> <ul class="social-list"> <li class="social-item"> <a href="mailto:[email protected]" title="Email"><span class="icon icon-email"></span></a> </li> <li class="social-item"> <a href="//github.com/awwong1" title="GitHub"><span class="icon icon-github"></span></a> </li> <li class="social-item"> <a href="//twitter.com/FindingUdia" title="Twitter"><span class="icon icon-twitter"></span></a> </li> <li class="social-item"> <a href="//www.linkedin.com/in/awwong1" title="Linkedin"><span class="icon icon-linkedin"></span></a> </li> <li class="social-item"> <a href="https://alexander-wong.com/index.xml"><span class="icon icon-rss" title="RSS"></span></a> </li> </ul> </nav> </header> <section class="main post-list"> <header class="list-header offscreen"> <h2 class="list-label">All Posts</h2> </header> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/sequence-models-week-3/" class="post-link">Sequence Models, Week 3</a></h3> <p class="post-meta">@Alexander Wong · Mar 17, 2018 · 10 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Sequence Models course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Sequence models &amp; Attention mechanism Various sequence to sequence architectures Basic Models Picking the most likely sentence Beam search Refinements to Beam Search Error analysis in Beam Search Bleu Score Attention Model Intuition Attention Model Speech recognition - Audio data Speech Recognition Trigger Word Detection Conclusion Sequence models &amp; Attention mechanism Sequence models can have an attention mechanism.</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/sequence-models-week-3/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/sequence-models-week-2/" class="post-link">Sequence Models, Week 2</a></h3> <p class="post-meta">@Alexander Wong · Mar 10, 2018 · 8 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Sequence Models course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Natural Langauge Processing &amp; Word Embeddings Introduction to Word Embeddings Word Representation Using word embeddings Properties of word embeddings Embedding matrix Learning Word Embeddings: Word2vec &amp; GloVe Learning word embeddings Word2Vec Negative Sampling GloVe word vectors Applications using Word Embeddings Sentiment Classification Debiasing word embeddings Natural Langauge Processing &amp; Word Embeddings Learn about how to use deep learning for natraul language processing.</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/sequence-models-week-2/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/sequence-models-week-1/" class="post-link">Sequence Models, Week 1</a></h3> <p class="post-meta">@Alexander Wong · Mar 3, 2018 · 7 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Sequence Models course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Recurrent Neural Networks Recurrent Neural Networks Why sequence models Notation Recurrent Neural Network Model Backpropagation through time Different types of RNNs Language model and sequence generation Sampling novel sequences Vanishing gradients with RNNs Gated Recurrent Unit (GRU) Long Short Term Memory (LSTM) Bidirectional RNN Deep RNNs Recurrent Neural Networks Learn about recurrent neural networks.</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/sequence-models-week-1/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/convolutional-neural-networks-week-4/" class="post-link">Convolutional Neural Networks, Week 4</a></h3> <p class="post-meta">@Alexander Wong · Feb 26, 2018 · 3 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Convolutional Neural Networks course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Special Applications: Face Recognition &amp; Neural Style Transfer Face Recognition What is face recognition? One Shot Learning Siamese Network Triplet Loss Face Verification and Binary Classification Neural Style Transfer What is neural style transfer?</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/convolutional-neural-networks-week-4/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/convolutional-neural-networks-week-3/" class="post-link">Convolutional Neural Networks, Week 3</a></h3> <p class="post-meta">@Alexander Wong · Feb 18, 2018 · 2 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Convolutional Neural Networks course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Object Detection Learning Objectives Detection Algorithms Object Localization Landmark Detection Object Detection Convolutional Implementation of Sliding Windows Bounding Box Predictions Intersection Over Union Non-max Suppression Anchor Boxes YOLO Algorithm (Optional) Region Proposals Object Detection Learning Objectives Understand the challenges of Object Localization, Object Detection, Landmark Finding Understand and implement non-max suppression Understand and implement intersection over union Understand how to label a dataset for an object detection application Remember the vocabulary of object detection (landmark, anchor, bounding box, grid) Detection Algorithms Object Localization Image classification: One object (Is cat or no cat)</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/convolutional-neural-networks-week-3/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/convolutional-neural-networks-week-2/" class="post-link">Convolutional Neural Networks, Week 2</a></h3> <p class="post-meta">@Alexander Wong · Feb 11, 2018 · 4 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Convolutional Neural Networks course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Deep Convolutional Models: Case Studies Learning Objectives Case Studies Why look at case studies Classic Networks Residual Networks (ResNets) Why ResNets Work Networks in Networks and 1x1 Convolutions Inception Network Motivation Inception Network Practical Advices for using ConvNets Using Open-Source Implementation Transfer Learning Data Augmentation State of Computer Vision Deep Convolutional Models: Case Studies Learning Objectives Understand foundational papers of Convolutional Neural Networks (CNN) Analyze dymensionality reduction of a volume in a very deep network Understand and implement a residual network Build a deep neural network using Keras Implement skip-connection in your network Clone a repository from Github and use transfer learning Case Studies Why look at case studies Good way to gain intuition about convolutional neural networks is to read existing architectures that utilize CNNs</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/convolutional-neural-networks-week-2/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/convolutional-neural-networks-week-1/" class="post-link">Convolutional Neural Networks, Week 1</a></h3> <p class="post-meta">@Alexander Wong · Jan 20, 2018 · 3 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Convolutional Neural Networks course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents Foundations of Convolutional Neural Networks Convolutional Neural Networks Computer Vision Edge Detection Example More Edge Detection Padding Strided Convolutions Convolutions Over Volume One Layer of a Convolutional Network Simple Convolutional Network Example Pooling Layers CNN Example Why Convolutions?</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/convolutional-neural-networks-week-1/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/structuring-machine-learning-projects-week1/" class="post-link">Structuring Machine Learning Projects, Week 1</a></h3> <p class="post-meta">@Alexander Wong · Jan 1, 2018 · 6 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Structuring Machine Learning Projects course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Table of Contents ML Strategy Introduction to ML Strategy Why ML Strategy Orthogonalization Setting Up Your Goal Single Number Evaluation Metric Satisficing and Optimizing Metric Train/Dev/Test Distributions Size of the Dev and Test Sets When to Change Dev/Test Sets and Metrics Comparing to Human-Level Performance Why Human-level Performance?</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/structuring-machine-learning-projects-week1/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/improving-deep-neural-networks-week3/" class="post-link">Improving Deep Neural Networks, Week 3</a></h3> <p class="post-meta">@Alexander Wong · Dec 20, 2017 · 7 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Improving Deep Neural Networks: Hyperparameter tuning, Regularization and Optimization course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Assumes you have knowledge of Improving Deep Neural Networks, Week 2. Table of Contents Hyperparameter Tuning, Batch Normalization, and Programming Frameworks Hyperparameter Tuning Tuning Process Using an appropriate scale to pick hyperparameters Hyperparameters tuning in practice: Pandas vs Caviar Batch Normalization Normalizing activations in a network Fitting Batch Normalization into a neural network Why does Batch Normalization Work?</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/improving-deep-neural-networks-week3/">Read More →</a> </footer> </article> <article class="post-entry"> <header class="post-header"> <h3 class="post-title"><a href="https://alexander-wong.com/post/improving-deep-neural-networks-week2/" class="post-link">Improving Deep Neural Networks, Week 2</a></h3> <p class="post-meta">@Alexander Wong · Dec 17, 2017 · 4 min read</p> </header> <p class="post-summary">Taking the Coursera Deep Learning Specialization, Improving Deep Neural Networks: Hyperparameter tuning, Regularization and Optimization course. Will post condensed notes every week as part of the review process. All material originates from the free Coursera course, taught by Andrew Ng. See deeplearning.ai for more details. Assumes you have knowledge of Improving Deep Neural Networks, Week 1. Table of Contents Optimization Algorithms Mini-Batch Gradient Descent Understanding Mini-batch Gradient Descent Exponentially Weighted Averages Understanding Exponentially Weighted Averages Bias Correction in Exponentially Weighted Averages Gradient Descent with Momentum RMSprop Adam Optimization Algorithm Learning Rate Decay The Problem of Local Optima Optimization Algorithms Mini-Batch Gradient Descent Rather than training on your entire training set during each step of gradient descent, break out your examples into groups.</p> <footer class="post-footer"> <a class="read-more" href="https://alexander-wong.com/post/improving-deep-neural-networks-week2/">Read More →</a> </footer> </article> <footer class="list-footer"> <nav class="pagination"> <h3 class="offscreen">Pagination</h3> <a class="pagination-next" href="https://alexander-wong.com/page/2/">Older Posts →</a> </nav> </footer> </section> <footer class="site-footer"> <p>© 2017-2018 Alexander Wong</p> <p>Powered by <a href="https://gohugo.io/" target="_blank">Hugo</a> with theme <a href="https://github.com/laozhu/hugo-nuo" target="_blank">Nuo</a>.</p> </footer> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ tex2jax: { inlineMath: [['$','$'], ['\\(','\\)']], displayMath: [['$$','$$'], ['\[','\]']], processEscapes: true, processEnvironments: true, skipTags: ['script', 'noscript', 'style', 'textarea', 'pre'], TeX: { equationNumbers: { autoNumber: "AMS" }, extensions: ["AMSmath.js", "AMSsymbols.js"] } } }) </script> <script src="https://alexander-wong.com/js/bundle.js"></script> <script> window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date; ga('create', 'UA-37311284-1', 'auto'); ga('send', 'pageview'); </script> <script async src='//www.google-analytics.com/analytics.js'></script> </body> </html>
{'content_hash': '0461c05e6e1fefbda4d9d68ffa90cf2f', 'timestamp': '', 'source': 'github', 'line_count': 409, 'max_line_length': 946, 'avg_line_length': 48.88753056234719, 'alnum_prop': 0.7051262815703926, 'repo_name': 'awwong1/alexander-wong.com', 'id': 'bc44bc251a784fa69ab0c52aa8b0332550f71fd3', 'size': '20038', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'docs/index.html', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'HTML', 'bytes': '1692'}]}
<?xml version="1.0" encoding="utf-8"?> <!-- --> <config xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../../../../../vendor/magento/mtf/etc/variations.xsd"> <testCase name="Magento\Backend\Test\TestCase\NavigateMenuTest"> <variation name="NavigateMenuTest22"> <data name="menuItem" xsi:type="string">Customers > All Customers</data> <data name="pageTitle" xsi:type="string">Customers</data> <constraint name="Magento\Backend\Test\Constraint\AssertBackendPageIsAvailable"/> </variation> <variation name="NavigateMenuTest23"> <data name="menuItem" xsi:type="string">Customers > Now Online</data> <data name="pageTitle" xsi:type="string">Customers Now Online</data> <constraint name="Magento\Backend\Test\Constraint\AssertBackendPageIsAvailable"/> </variation> <variation name="NavigateMenuTest24"> <data name="menuItem" xsi:type="string">Stores > Customer Groups</data> <data name="pageTitle" xsi:type="string">Customer Groups</data> <constraint name="Magento\Backend\Test\Constraint\AssertBackendPageIsAvailable"/> </variation> </testCase> </config>
{'content_hash': '352e9e27fe41a73e19d730bbd276af12', 'timestamp': '', 'source': 'github', 'line_count': 23, 'max_line_length': 150, 'avg_line_length': 54.65217391304348, 'alnum_prop': 0.6603023070803501, 'repo_name': 'florentinaa/magento', 'id': '8be44cc04665b1d90a4389f7554f71bb36b6fde4', 'size': '1355', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'store/dev/tests/functional/tests/app/Magento/Customer/Test/TestCase/NavigateMenuTest.xml', 'mode': '33188', 'license': 'mit', 'language': [{'name': 'ApacheConf', 'bytes': '23874'}, {'name': 'CSS', 'bytes': '3779785'}, {'name': 'HTML', 'bytes': '6149486'}, {'name': 'JavaScript', 'bytes': '4396691'}, {'name': 'PHP', 'bytes': '22079463'}, {'name': 'Shell', 'bytes': '6072'}, {'name': 'XSLT', 'bytes': '19889'}]}
ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
{'content_hash': '713c9422118493044f6d057bf8ca0f47', 'timestamp': '', 'source': 'github', 'line_count': 13, 'max_line_length': 31, 'avg_line_length': 9.692307692307692, 'alnum_prop': 0.7063492063492064, 'repo_name': 'mdoering/backbone', 'id': '6122b44bd1850ed59c8b1304d3dcd85fc1756661', 'size': '179', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'life/Plantae/Pteridophyta/Polypodiopsida/Polypodiales/Dryopteridaceae/Nephrodium/Nephrodium dentatum/README.md', 'mode': '33188', 'license': 'apache-2.0', 'language': []}
ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
{'content_hash': 'ad3b5f5777ef433f3224e1b549edab8f', 'timestamp': '', 'source': 'github', 'line_count': 13, 'max_line_length': 39, 'avg_line_length': 10.307692307692308, 'alnum_prop': 0.6940298507462687, 'repo_name': 'mdoering/backbone', 'id': 'd3decae898976f05441ba896eeac983279650884', 'size': '179', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'life/Plantae/Magnoliophyta/Magnoliopsida/Myrtales/Myrtaceae/Eugenia/Eugenia rekoi/README.md', 'mode': '33188', 'license': 'apache-2.0', 'language': []}
from __future__ import print_function import argparse import collections import fnmatch import os import sys import tarfile try: # For Python 3.0 and later from urllib.request import urlretrieve except ImportError: # Fall back to Python 2's urllib2 from urllib import urlretrieve import elm_package import exact_dependencies def read_native_elm_package(package_file): """ Reads elm-native-package.json. """ with open(package_file) as f: return exact_dependencies.load(f) def format_tarball_url(package): """ Creates the url to fetch the tar from github. >>> format_tarball_url({'owner': 'elm-lang', 'project': 'navigation', 'version': '2.0.0'}) 'https://github.com/elm-lang/navigation/archive/2.0.0.tar.gz' """ return "https://github.com/{owner}/{project}/archive/{version}.tar.gz".format(**package) def packages_from_exact_deps(exact_dependencies): """ Parses the json and returns a list of {version, owner, project}. >>> packages_from_exact_deps({'elm-lang/navigation': '2.0.0'}) \ == [{'version': '2.0.0', 'owner': 'elm-lang', 'project': 'navigation'}] True """ result = [] for package, version in exact_dependencies.items(): owner, project = package.split('/') result.append({ 'owner': owner, 'project': project, 'version': version }) return result def ensure_vendor_owner_dir(base, owner): """ Creates the path in the vendor folder. >>> ensure_vendor_owner_dir('foo', 'bar') 'foo/bar' """ path = os.path.join(base, owner) try: os.makedirs(path) except Exception as e: pass return path def vendor_package_dir(vendor_dir, package): """ Returns the path to the elm package. Also creates the parent directory if misisng. >>> vendor_package_dir('vendor/assets/elm', {'version': '2.0.0', 'owner': 'elm-lang', 'project': 'navigation'}) 'vendor/assets/elm/elm-lang/navigation-2.0.0' """ vendor_owner_dir = ensure_vendor_owner_dir(vendor_dir, package['owner']) return "{vendor_owner_dir}/{project}-{version}".format( vendor_owner_dir=vendor_owner_dir, project=package['project'], version=package['version'] ) def fetch_packages(vendor_dir, packages): """ Fetches all packages from github. """ for package in packages: tar_filename = format_tar_path(vendor_dir, package) vendor_owner_dir = ensure_vendor_owner_dir(vendor_dir, package['owner']) url = format_tarball_url(package) print("Downloading {owner}/{project} {version}".format(**package)) urlretrieve(url, tar_filename) with tarfile.open(tar_filename) as tar: tar.extractall(vendor_owner_dir, members=tar.getmembers()) return packages def format_tar_path(vendor_dir, package): """ The path of the tar. >>> format_tar_path('vendor/assets/elm', {'owner': 'elm-lang', 'project': 'navigation', 'version': '2.0.0'}) 'vendor/assets/elm/elm-lang/navigation-2.0.0-tar.gz' """ ensure_vendor_owner_dir(vendor_dir, package['owner']) return vendor_package_dir(vendor_dir, package) + "-tar.gz" def format_native_name(owner, project): """ Formates the package to the owner used in elm native. >>> format_native_name('elm-lang', 'navigation') '_elm_lang$navigation' """ underscored_owner = owner.replace("-", "_") underscored_project = project.replace("-", "_") return "_{owner}${repo}".format(owner=underscored_owner, repo=underscored_project) def package_name_from_repo(repository): """ Owner and project from repository. >>> package_name_from_repo('https://github.com/NoRedInk/noredink.git') ('NoRedInk', 'noredink') """ repo_without_domain = repository.split('https://github.com/')[1].split('.git')[0] (owner, project) = repo_without_domain.split('/') return (owner, project) def get_source_dirs(vendor_dir, package): """ get the source-directories out of an elm-package file """ elm_package_filename = os.path.join(vendor_package_dir(vendor_dir, package), 'elm-package.json') with open(elm_package_filename) as f: data = elm_package.load(f) return data['source-directories'] def replace_in_file(filePath, src, target): """ find replace in a file """ output = "" with open(filePath) as infile: output = infile.read().replace(src, target) with open(filePath, 'w') as outfile: outfile.write(output) def find_all_native_files(path): """ recursivly find all js files in a package """ native_files = [] for root, dirnames, filenames in os.walk(path): if "Native" not in root: continue for filename in fnmatch.filter(filenames, '*.js'): native_files.append(os.path.join(root, filename)) return native_files def munge_names(vendor_dir, repository, packages): """ Replaces the namespaced function names in all native code by the namespace from the given elm-package.json. """ owner, project = package_name_from_repo(repository) for package in packages: native_files = find_all_native_files(vendor_package_dir(vendor_dir, package)) for native_file in native_files: replace_in_file( native_file, format_native_name(package['owner'], package['project']), format_native_name(owner, project) ) def update_source_directories(vendor_dir, elm_package_paths, native_packages): """ Updates the source-directories in the given elm-package.json files. Returns the repository of the last elm-package.json. """ repository = "" for elm_package_path in elm_package_paths: with open(elm_package_path) as f: data = elm_package.load(f) repository = data['repository'] source_directories = data['source-directories'] elm_package_dir = os.path.dirname(elm_package_path) needs_save = False for native_package in native_packages: source_dirs = get_source_dirs(vendor_dir, native_package) for source_dir in source_dirs: absolute_source_dir = os.path.join( vendor_package_dir(vendor_dir, native_package), source_dir) relative_path = os.path.relpath(absolute_source_dir, elm_package_dir) if relative_path not in data['source-directories']: data['source-directories'].append(relative_path) needs_save = True if needs_save: with open(elm_package_path, 'w') as f: elm_package.dump(data, f) return repository def exclude_existing_packages(vendor_dir, packages): return [x for x in packages if not package_exists(vendor_dir, x)] def package_exists(vendor_dir, package): return os.path.isdir(vendor_package_dir(vendor_dir, package)) def main(native_elm_package_path, elm_package_paths, vendor_dir): absolute_vendor_dir = os.path.abspath(vendor_dir) absolute_elm_package_paths = list(map(os.path.abspath, elm_package_paths)) raw_json = read_native_elm_package(native_elm_package_path) all_packages = packages_from_exact_deps(raw_json) required_packages = exclude_existing_packages(absolute_vendor_dir, all_packages) fetch_packages(absolute_vendor_dir, required_packages) repository = update_source_directories( absolute_vendor_dir, absolute_elm_package_paths, required_packages) munge_names(absolute_vendor_dir, repository, required_packages) def test(): import doctest doctest.testmod() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Fetch elm packages') parser.add_argument( 'native_elm_package', help='The elm-native-package.json file you want to use', default='elm-native-package.json' ) parser.add_argument('--elm-config', '-e', nargs='+') parser.add_argument('--vendor-dir', default='vendor/assets/elm') parser.add_argument('--test', '-t', action='store_true') args = parser.parse_args() if args.test: test() exit() main(args.native_elm_package, args.elm_config, args.vendor_dir)
{'content_hash': 'babdfb20d35af00b30121d4be18dfc2e', 'timestamp': '', 'source': 'github', 'line_count': 263, 'max_line_length': 115, 'avg_line_length': 31.627376425855513, 'alnum_prop': 0.6424621303197884, 'repo_name': 'NoRedInk/elm-ops-tooling', 'id': '032f20da969fdf7912bb6e551444528fcc149ae7', 'size': '8342', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'native_package_install.py', 'mode': '33261', 'license': 'bsd-3-clause', 'language': [{'name': 'Python', 'bytes': '45835'}, {'name': 'Ruby', 'bytes': '2395'}]}
package network import ( "crypto/md5" "encoding/hex" "io/ioutil" "os" "strings" "github.com/rancher/os/pkg/log" ) const ( cacheDirectory = "/var/lib/rancher/cache/" ) func locationHash(location string) string { sum := md5.Sum([]byte(location)) return hex.EncodeToString(sum[:]) } func cacheLookup(location string) []byte { cacheFile := cacheDirectory + locationHash(location) bytes, err := ioutil.ReadFile(cacheFile) if err == nil { log.Debugf("Using cached file: %s", cacheFile) return bytes } return nil } func cacheAdd(location string, data []byte) { tempFile, err := ioutil.TempFile(cacheDirectory, "") if err != nil { return } defer os.Remove(tempFile.Name()) _, err = tempFile.Write(data) if err != nil { return } cacheFile := cacheDirectory + locationHash(location) os.Rename(tempFile.Name(), cacheFile) } func cacheMove(location string) (string, error) { cacheFile := cacheDirectory + locationHash(location) tempFile := cacheFile + "_temp" if err := os.Rename(cacheFile, tempFile); err != nil { return "", err } return tempFile, nil } func cacheMoveBack(name string) error { return os.Rename(name, strings.TrimRight(name, "_temp")) }
{'content_hash': 'ab4b1ed1539583066f7a83e18479eb97', 'timestamp': '', 'source': 'github', 'line_count': 59, 'max_line_length': 57, 'avg_line_length': 20.1864406779661, 'alnum_prop': 0.6935348446683459, 'repo_name': 'rancher/os', 'id': 'e02e90b3c619b87d3602960e06f78b350cfcc605', 'size': '1191', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'pkg/util/network/cache.go', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Dockerfile', 'bytes': '3294'}, {'name': 'Go', 'bytes': '745909'}, {'name': 'Makefile', 'bytes': '1712'}, {'name': 'Shell', 'bytes': '113822'}, {'name': 'Smarty', 'bytes': '3064'}]}
<!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="description" content="Javadoc API documentation for ShinobiCharts For Android API." /> <link rel="shortcut icon" type="image/x-icon" href="../../../favicon.ico" /> <title> Axis.Orientation | ShinobiCharts For Android API </title> <link href="../../../../assets/doclava-developer-docs.css" rel="stylesheet" type="text/css" /> <link href="../../../../assets/customizations.css" rel="stylesheet" type="text/css" /> <script src="../../../../assets/search_autocomplete.js" type="text/javascript"></script> <script src="../../../../assets/jquery-resizable.min.js" type="text/javascript"></script> <script src="../../../../assets/doclava-developer-docs.js" type="text/javascript"></script> <script src="../../../../assets/prettify.js" type="text/javascript"></script> <script type="text/javascript"> setToRoot("../../../", "../../../../assets/"); </script> <script src="../../../../assets/doclava-developer-reference.js" type="text/javascript"></script> <script src="../../../../assets/navtree_data.js" type="text/javascript"></script> <script src="../../../../assets/customizations.js" type="text/javascript"></script> <noscript> <style type="text/css"> html,body{overflow:auto;} #body-content{position:relative; top:0;} #doc-content{overflow:visible;border-left:3px solid #666;} #side-nav{padding:0;} #side-nav .toggle-list ul {display:block;} #resize-packages-nav{border-bottom:3px solid #666;} </style> </noscript> </head> <body class=""> <div id="header"> <div id="headerLeft"> <span id="masthead-title">ShinobiCharts For Android API</span> </div> <div id="headerRight"> <div id="search" > <div id="searchForm"> <form accept-charset="utf-8" class="gsc-search-box" onsubmit="return submit_search()"> <table class="gsc-search-box" cellpadding="0" cellspacing="0"><tbody> <tr> <td class="gsc-input"> <input id="search_autocomplete" class="gsc-input" type="text" size="33" autocomplete="off" title="search developer docs" name="q" value="search developer docs" onFocus="search_focus_changed(this, true)" onBlur="search_focus_changed(this, false)" onkeydown="return search_changed(event, true, '../../../')" onkeyup="return search_changed(event, false, '../../../')" /> <div id="search_filtered_div" class="no-display"> <table id="search_filtered" cellspacing=0> </table> </div> </td> <td class="gsc-search-button"> <input type="submit" value="Search" title="search" id="search-button" class="gsc-search-button" /> </td> <td class="gsc-clear-button"> <div title="clear results" class="gsc-clear-button">&nbsp;</div> </td> </tr></tbody> </table> </form> </div><!-- searchForm --> </div><!-- search --> </div> </div><!-- header --> <div class="g-section g-tpl-240" id="body-content"> <div class="g-unit g-first side-nav-resizable" id="side-nav"> <div id="swapper"> <div id="nav-panels"> <div id="resize-packages-nav"> <div id="packages-nav"> <div id="index-links"><nobr> <a href="../../../packages.html" >Package Index</a> | <a href="../../../classes.html" >Class Index</a></nobr> </div> <ul> <li class="selected api apilevel-"> <a href="../../../com/shinobicontrols/charts/package-summary.html">com.shinobicontrols.charts</a></li> </ul><br/> </div> <!-- end packages --> </div> <!-- end resize-packages --> <div id="classes-nav"> <ul> <li><h2>Interfaces</h2> <ul> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Data.html">Data</a>&lt;Tx,&nbsp;Ty&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/MultiValueData.html">MultiValueData</a>&lt;T&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/SelectableData.html">SelectableData</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.html">ShinobiChart</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnAxisMotionStateChangeListener.html">ShinobiChart.OnAxisMotionStateChangeListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnAxisRangeChangeListener.html">ShinobiChart.OnAxisRangeChangeListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnCrosshairListener.html">ShinobiChart.OnCrosshairListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnGestureListener.html">ShinobiChart.OnGestureListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnInternalLayoutListener.html">ShinobiChart.OnInternalLayoutListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnPieDonutSliceLabelDrawListener.html">ShinobiChart.OnPieDonutSliceLabelDrawListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnPieDonutSliceUpdateListener.html">ShinobiChart.OnPieDonutSliceUpdateListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnSeriesSelectionListener.html">ShinobiChart.OnSeriesSelectionListener</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ShinobiChart.OnSnapshotDoneListener.html">ShinobiChart.OnSnapshotDoneListener</a></li> </ul> </li> <li><h2>Classes</h2> <ul> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Annotation.html">Annotation</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/AnnotationsManager.html">AnnotationsManager</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/AnnotationStyle.html">AnnotationStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Axis.html">Axis</a>&lt;T&nbsp;extends&nbsp;<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Comparable.html">Comparable</a>&lt;T&gt;,&nbsp;U&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/AxisStyle.html">AxisStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/AxisTitleStyle.html">AxisTitleStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/BandSeries.html">BandSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/BandSeriesStyle.html">BandSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/BarSeries.html">BarSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/BarSeriesStyle.html">BarSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/BuildConfig.html">BuildConfig</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/CandlestickSeries.html">CandlestickSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/CandlestickSeriesStyle.html">CandlestickSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/CartesianSeries.html">CartesianSeries</a>&lt;T&nbsp;extends&nbsp;<a href="../../../com/shinobicontrols/charts/SeriesStyle.html">SeriesStyle</a>&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/CategoryAxis.html">CategoryAxis</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ChartFragment.html">ChartFragment</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ChartStyle.html">ChartStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ChartUtils.html">ChartUtils</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ChartView.html">ChartView</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ColumnSeries.html">ColumnSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/ColumnSeriesStyle.html">ColumnSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Crosshair.html">Crosshair</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/CrosshairStyle.html">CrosshairStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DataAdapter.html">DataAdapter</a>&lt;Tx,&nbsp;Ty&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DataPoint.html">DataPoint</a>&lt;Tx,&nbsp;Ty&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DateFrequency.html">DateFrequency</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DateRange.html">DateRange</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DateTimeAxis.html">DateTimeAxis</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DonutSeries.html">DonutSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DonutSeriesStyle.html">DonutSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/GridlineStyle.html">GridlineStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/GridStripeStyle.html">GridStripeStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Legend.html">Legend</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/LegendStyle.html">LegendStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/LineSeries.html">LineSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/LineSeriesStyle.html">LineSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/MainTitleStyle.html">MainTitleStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/MultiValueDataPoint.html">MultiValueDataPoint</a>&lt;Tx,&nbsp;Tv&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/NumberAxis.html">NumberAxis</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/NumberRange.html">NumberRange</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/OHLCSeries.html">OHLCSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/OHLCSeriesStyle.html">OHLCSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PieDonutSeries.html">PieDonutSeries</a>&lt;T&nbsp;extends&nbsp;PieDonutSeriesStyle&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PieDonutSlice.html">PieDonutSlice</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PieSeries.html">PieSeries</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PieSeriesStyle.html">PieSeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PointStyle.html">PointStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.html">R</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.attr.html">R.attr</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.bool.html">R.bool</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.color.html">R.color</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.dimen.html">R.dimen</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.style.html">R.style</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/R.styleable.html">R.styleable</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/RadialSeries.html">RadialSeries</a>&lt;T&nbsp;extends&nbsp;<a href="../../../com/shinobicontrols/charts/SeriesStyle.html">SeriesStyle</a>&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Range.html">Range</a>&lt;T&nbsp;extends&nbsp;<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Comparable.html">Comparable</a>&lt;T&gt;&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Series.html">Series</a>&lt;T&nbsp;extends&nbsp;<a href="../../../com/shinobicontrols/charts/SeriesStyle.html">SeriesStyle</a>&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/SeriesStyle.html">SeriesStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/SimpleDataAdapter.html">SimpleDataAdapter</a>&lt;Tx,&nbsp;Ty&gt;</li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/SupportChartFragment.html">SupportChartFragment</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/TickMark.html">TickMark</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/TickStyle.html">TickStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Title.html">Title</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/TitleStyle.html">TitleStyle</a></li> </ul> </li> <li><h2>Enums</h2> <ul> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Annotation.Position.html">Annotation.Position</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Axis.MotionState.html">Axis.MotionState</a></li> <li class="selected api apilevel-"><a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Axis.Position.html">Axis.Position</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/DateFrequency.Denomination.html">DateFrequency.Denomination</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Legend.Placement.html">Legend.Placement</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Legend.Position.html">Legend.Position</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Legend.SymbolAlignment.html">Legend.SymbolAlignment</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PieDonutSeries.DrawDirection.html">PieDonutSeries.DrawDirection</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/PieDonutSeries.RadialEffect.html">PieDonutSeries.RadialEffect</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/RadialSeries.DrawDirection.html">RadialSeries.DrawDirection</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/RadialSeries.RadialEffect.html">RadialSeries.RadialEffect</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Series.Orientation.html">Series.Orientation</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Series.SelectionMode.html">Series.SelectionMode</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/SeriesStyle.FillStyle.html">SeriesStyle.FillStyle</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/TickMark.ClippingMode.html">TickMark.ClippingMode</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/TickMark.Orientation.html">TickMark.Orientation</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Title.CentersOn.html">Title.CentersOn</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Title.Orientation.html">Title.Orientation</a></li> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/Title.Position.html">Title.Position</a></li> </ul> </li> <li><h2>Exceptions</h2> <ul> <li class="api apilevel-"><a href="../../../com/shinobicontrols/charts/InvalidLicenseException.html">InvalidLicenseException</a></li> </ul> </li> </ul><br/> </div><!-- end classes --> </div><!-- end nav-panels --> <div id="nav-tree" style="display:none"> <div id="index-links"><nobr> <a href="../../../packages.html" >Package Index</a> | <a href="../../../classes.html" >Class Index</a></nobr> </div> </div><!-- end nav-tree --> </div><!-- end swapper --> </div> <!-- end side-nav --> <script> if (!isMobile) { $("<a href='#' id='nav-swap' onclick='swapNav();return false;' style='font-size:10px;line-height:9px;margin-left:1em;text-decoration:none;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>").appendTo("#side-nav"); chooseDefaultNav(); if ($("#nav-tree").is(':visible')) { init_default_navtree("../../../"); } else { addLoadEvent(function() { scrollIntoView("packages-nav"); scrollIntoView("classes-nav"); }); } $("#swapper").css({borderBottom:"2px solid #aaa"}); } else { swapNav(); // tree view should be used on mobile } </script> <div class="g-unit" id="doc-content"> <div id="api-info-block"> <div class="sum-details-links"> Summary: <a href="#enumconstants">Enums</a> &#124; <a href="#pubmethods">Methods</a> &#124; <a href="#inhmethods">Inherited Methods</a> &#124; <a href="#" onclick="return toggleAllClassInherited()" id="toggleAllClassInherited">[Expand All]</a> </div><!-- end sum-details-links --> <div class="api-level"> </div> </div><!-- end api-info-block --> <!-- ======== START OF CLASS DATA ======== --> <div id="jd-header"> public static final enum <h1>Axis.Orientation</h1> extends <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Enum.html">Enum</a>&lt;E&nbsp;extends&nbsp;<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Enum.html">Enum</a>&lt;E&gt;&gt;<br/> </div><!-- end header --> <div id="naMessage"></div> <div id="jd-content" class="api apilevel-"> <table class="jd-inheritance-table"> <tr> <td colspan="3" class="jd-inheritance-class-cell"><a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">java.lang.Object</a></td> </tr> <tr> <td class="jd-inheritance-space">&nbsp;&nbsp;&nbsp;&#x21b3;</td> <td colspan="2" class="jd-inheritance-class-cell"><a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Enum.html">java.lang.Enum</a>&lt;E&nbsp;extends&nbsp;<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Enum.html">java.lang.Enum</a>&lt;E&gt;&gt;</td> </tr> <tr> <td class="jd-inheritance-space">&nbsp;</td> <td class="jd-inheritance-space">&nbsp;&nbsp;&nbsp;&#x21b3;</td> <td colspan="1" class="jd-inheritance-class-cell">com.shinobicontrols.charts.Axis.Orientation</td> </tr> </table> <div class="jd-descr"> <h2>Class Overview</h2> <p>The orientation of the axis. </p> </div><!-- jd-descr --> <div class="jd-descr"> <h2>Summary</h2> <!-- =========== ENUM CONSTANT SUMMARY =========== --> <table id="enumconstants" class="jd-sumtable"><tr><th colspan="12">Enum Values</th></tr> <tr class="alt-color api apilevel-" > <td class="jd-descrcol"><a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a>&nbsp;</td> <td class="jd-linkcol">HORIZONTAL&nbsp;</td> <td class="jd-descrcol" width="100%">&nbsp;</td> </tr> <tr class=" api apilevel-" > <td class="jd-descrcol"><a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a>&nbsp;</td> <td class="jd-linkcol">VERTICAL&nbsp;</td> <td class="jd-descrcol" width="100%">&nbsp;</td> </tr> <!-- ========== METHOD SUMMARY =========== --> <table id="pubmethods" class="jd-sumtable"><tr><th colspan="12">Public Methods</th></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> static <a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad"><a href="../../../com/shinobicontrols/charts/Axis.Orientation.html#valueOf(java.lang.String)">valueOf</a></span>(<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/String.html">String</a> name)</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final static <a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Orientation[]</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad"><a href="../../../com/shinobicontrols/charts/Axis.Orientation.html#values()">values</a></span>()</nobr> </td></tr> </table> <!-- ========== METHOD SUMMARY =========== --> <table id="inhmethods" class="jd-sumtable"><tr><th> <a href="#" class="toggle-all" onclick="return toggleAllInherited(this, null)">[Expand]</a> <div style="clear:left;">Inherited Methods</div></th></tr> <tr class="api apilevel-" > <td colspan="12"> <a href="#" onclick="return toggleInherited(this, null)" id="inherited-methods-java.lang.Enum" class="jd-expando-trigger closed" ><img id="inherited-methods-java.lang.Enum-trigger" src="../../../../assets/images/triangle-closed.png" class="jd-expando-trigger-img" /></a> From class <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Enum.html">java.lang.Enum</a> <div id="inherited-methods-java.lang.Enum"> <div id="inherited-methods-java.lang.Enum-list" class="jd-inheritedlinks"> </div> <div id="inherited-methods-java.lang.Enum-summary" style="display: none;"> <table class="jd-sumtable-expando"> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">Object</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">clone</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> int</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">compareTo</span>(<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">Object</a> arg0)</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final int</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">compareTo</span>(E arg0)</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final boolean</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">equals</span>(<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">Object</a> arg0)</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">finalize</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Class.html">Class</a>&lt;E&gt;</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">getDeclaringClass</span>()</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final int</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">hashCode</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/String.html">String</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">name</span>()</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final int</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">ordinal</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/String.html">String</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">toString</span>()</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> static &lt;T&nbsp;extends&nbsp;<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Enum.html">Enum</a>&lt;T&gt;&gt; T</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">valueOf</span>(<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Class.html">Class</a>&lt;T&gt; arg0, <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/String.html">String</a> arg1)</nobr> </td></tr> </table> </div> </div> </td></tr> <tr class="api apilevel-" > <td colspan="12"> <a href="#" onclick="return toggleInherited(this, null)" id="inherited-methods-java.lang.Object" class="jd-expando-trigger closed" ><img id="inherited-methods-java.lang.Object-trigger" src="../../../../assets/images/triangle-closed.png" class="jd-expando-trigger-img" /></a> From class <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">java.lang.Object</a> <div id="inherited-methods-java.lang.Object"> <div id="inherited-methods-java.lang.Object-list" class="jd-inheritedlinks"> </div> <div id="inherited-methods-java.lang.Object-summary" style="display: none;"> <table class="jd-sumtable-expando"> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">Object</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">clone</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> boolean</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">equals</span>(<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Object.html">Object</a> arg0)</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">finalize</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Class.html">Class</a>&lt;?&gt;</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">getClass</span>()</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> int</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">hashCode</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">notify</span>()</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">notifyAll</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/String.html">String</a></nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">toString</span>()</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">wait</span>()</nobr> </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"><nobr> final void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">wait</span>(long arg0, int arg1)</nobr> </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> final void</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">wait</span>(long arg0)</nobr> </td></tr> </table> </div> </div> </td></tr> <tr class="api apilevel-" > <td colspan="12"> <a href="#" onclick="return toggleInherited(this, null)" id="inherited-methods-java.lang.Comparable" class="jd-expando-trigger closed" ><img id="inherited-methods-java.lang.Comparable-trigger" src="../../../../assets/images/triangle-closed.png" class="jd-expando-trigger-img" /></a> From interface <a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/Comparable.html">java.lang.Comparable</a> <div id="inherited-methods-java.lang.Comparable"> <div id="inherited-methods-java.lang.Comparable-list" class="jd-inheritedlinks"> </div> <div id="inherited-methods-java.lang.Comparable-summary" style="display: none;"> <table class="jd-sumtable-expando"> <tr class="alt-color api apilevel-" > <td class="jd-typecol"><nobr> abstract int</nobr> </td> <td class="jd-linkcol" width="100%"><nobr> <span class="sympad">compareTo</span>(T arg0)</nobr> </td></tr> </table> </div> </div> </td></tr> </table> </div><!-- jd-descr (summary) --> <!-- Details --> <!-- XML Attributes --> <!-- Enum Values --> <!-- ========= ENUM CONSTANTS DETAIL ======== --> <h2>Enum Values</h2> <A NAME="HORIZONTAL"></A> <div class="jd-details api apilevel-"> <h4 class="jd-details-title"> <span class="normal"> public static final <a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a> </span> HORIZONTAL </h4> <div class="api-level"> </div> <div class="jd-details-descr"> <div class="jd-tagdata jd-tagdescr"><p></p></div> </div> </div> <A NAME="VERTICAL"></A> <div class="jd-details api apilevel-"> <h4 class="jd-details-title"> <span class="normal"> public static final <a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a> </span> VERTICAL </h4> <div class="api-level"> </div> <div class="jd-details-descr"> <div class="jd-tagdata jd-tagdescr"><p></p></div> </div> </div> <!-- Constants --> <!-- Fields --> <!-- Public ctors --> <!-- ========= CONSTRUCTOR DETAIL ======== --> <!-- Protected ctors --> <!-- ========= METHOD DETAIL ======== --> <!-- Public methdos --> <h2>Public Methods</h2> <A NAME="valueOf(java.lang.String)"></A> <div class="jd-details api apilevel-"> <h4 class="jd-details-title"> <span class="normal"> public static <a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Axis.Orientation</a> </span> <span class="sympad">valueOf</span> <span class="normal">(<a href="http://download.oracle.com/javase/6/docs/api/index.html?java/lang/String.html">String</a> name)</span> </h4> <div class="api-level"> <div> </div> </div> <div class="jd-details-descr"> <div class="jd-tagdata jd-tagdescr"><p></p></div> </div> </div> <A NAME="values()"></A> <div class="jd-details api apilevel-"> <h4 class="jd-details-title"> <span class="normal"> public static final <a href="../../../com/shinobicontrols/charts/Axis.Orientation.html">Orientation[]</a> </span> <span class="sympad">values</span> <span class="normal">()</span> </h4> <div class="api-level"> <div> </div> </div> <div class="jd-details-descr"> <div class="jd-tagdata jd-tagdescr"><p></p></div> </div> </div> <!-- ========= METHOD DETAIL ======== --> <!-- ========= END OF CLASS DATA ========= --> <A NAME="navbar_top"></A> <div id="footer"> Generated by <a href="http://code.google.com/p/doclava/">Doclava</a>. </div> <!-- end footer --> </div> <!-- jd-content --> </div><!-- end doc-content --> </div> <!-- end body-content --> <script type="text/javascript"> init(); /* initialize doclava-developer-docs.js */ </script> </body> </html>
{'content_hash': '3ab3e97477078c5c2f7ac34686cb68ff', 'timestamp': '', 'source': 'github', 'line_count': 1218, 'max_line_length': 308, 'avg_line_length': 31.014778325123153, 'alnum_prop': 0.5598528166031342, 'repo_name': 'zoozooll/MyExercise', 'id': 'e8f91b52ee3e99adafc558c87bd9fbff68ee8924', 'size': '37776', 'binary': False, 'copies': '1', 'ref': 'refs/heads/master', 'path': 'bw121_android/bw121_android/shinobicharts-android-library/assets/docs/reference/com/shinobicontrols/charts/Axis.Orientation.html', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'C', 'bytes': '1495689'}, {'name': 'C#', 'bytes': '190108'}, {'name': 'C++', 'bytes': '8719269'}, {'name': 'CMake', 'bytes': '46692'}, {'name': 'CSS', 'bytes': '149067'}, {'name': 'GLSL', 'bytes': '1069'}, {'name': 'HTML', 'bytes': '5933291'}, {'name': 'Java', 'bytes': '20935928'}, {'name': 'JavaScript', 'bytes': '420263'}, {'name': 'Kotlin', 'bytes': '13567'}, {'name': 'Makefile', 'bytes': '40498'}, {'name': 'Objective-C', 'bytes': '1149532'}, {'name': 'Objective-C++', 'bytes': '248482'}, {'name': 'Python', 'bytes': '23625'}, {'name': 'RenderScript', 'bytes': '3899'}, {'name': 'Shell', 'bytes': '18962'}, {'name': 'TSQL', 'bytes': '184481'}]}
/** * JavaFX specific type formatters. * * @since 2.0.0 */ package griffon.javafx.formatters;
{'content_hash': '877d592d6eacb5baa7a496eebe7b125e', 'timestamp': '', 'source': 'github', 'line_count': 8, 'max_line_length': 35, 'avg_line_length': 12.5, 'alnum_prop': 0.66, 'repo_name': 'aalmiray/griffon2', 'id': '7cf903a2523b05c8f9cf2a717e575987c9a03dc2', 'size': '719', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'subprojects/griffon-javafx/src/main/java/griffon/javafx/formatters/package-info.java', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'CSS', 'bytes': '11139'}, {'name': 'Groovy', 'bytes': '668044'}, {'name': 'Java', 'bytes': '3166231'}, {'name': 'Shell', 'bytes': '38600'}]}
#include "postgres.h" #include "access/genam.h" #include "access/hash.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "access/table.h" #include "bootstrap/bootstrap.h" #include "catalog/namespace.h" #include "catalog/pg_am.h" #include "catalog/pg_amop.h" #include "catalog/pg_amproc.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" #include "catalog/pg_proc.h" #include "catalog/pg_range.h" #include "catalog/pg_statistic.h" #include "catalog/pg_transform.h" #include "catalog/pg_type.h" #include "commands/tablecmds.h" #include "commands/trigger.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "parser/parse_clause.h" /* for sort_op_can_sort() */ #include "parser/parse_coerce.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/datum.h" #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" #include "utils/typcache.h" #include "utils/fmgroids.h" #include "funcapi.h" #include "cdb/cdbhash.h" #include "catalog/heap.h" /* SystemAttributeDefinition() */ #include "catalog/pg_aggregate.h" #include "catalog/pg_inherits.h" #include "catalog/pg_trigger.h" /* Hook for plugins to get control in get_attavgwidth() */ get_attavgwidth_hook_type get_attavgwidth_hook = NULL; /* ---------- AMOP CACHES ---------- */ /* * op_in_opfamily * * Return t iff operator 'opno' is in operator family 'opfamily'. * * This function only considers search operators, not ordering operators. */ bool op_in_opfamily(Oid opno, Oid opfamily) { return SearchSysCacheExists3(AMOPOPID, ObjectIdGetDatum(opno), CharGetDatum(AMOP_SEARCH), ObjectIdGetDatum(opfamily)); } /* * get_op_opfamily_strategy * * Get the operator's strategy number within the specified opfamily, * or 0 if it's not a member of the opfamily. * * This function only considers search operators, not ordering operators. */ int get_op_opfamily_strategy(Oid opno, Oid opfamily) { HeapTuple tp; Form_pg_amop amop_tup; int result; tp = SearchSysCache3(AMOPOPID, ObjectIdGetDatum(opno), CharGetDatum(AMOP_SEARCH), ObjectIdGetDatum(opfamily)); if (!HeapTupleIsValid(tp)) return 0; amop_tup = (Form_pg_amop) GETSTRUCT(tp); result = amop_tup->amopstrategy; ReleaseSysCache(tp); return result; } /* * get_op_opfamily_sortfamily * * If the operator is an ordering operator within the specified opfamily, * return its amopsortfamily OID; else return InvalidOid. */ Oid get_op_opfamily_sortfamily(Oid opno, Oid opfamily) { HeapTuple tp; Form_pg_amop amop_tup; Oid result; tp = SearchSysCache3(AMOPOPID, ObjectIdGetDatum(opno), CharGetDatum(AMOP_ORDER), ObjectIdGetDatum(opfamily)); if (!HeapTupleIsValid(tp)) return InvalidOid; amop_tup = (Form_pg_amop) GETSTRUCT(tp); result = amop_tup->amopsortfamily; ReleaseSysCache(tp); return result; } /* * get_op_opfamily_properties * * Get the operator's strategy number and declared input data types * within the specified opfamily. * * Caller should already have verified that opno is a member of opfamily, * therefore we raise an error if the tuple is not found. */ void get_op_opfamily_properties(Oid opno, Oid opfamily, bool ordering_op, int *strategy, Oid *lefttype, Oid *righttype) { HeapTuple tp; Form_pg_amop amop_tup; tp = SearchSysCache3(AMOPOPID, ObjectIdGetDatum(opno), CharGetDatum(ordering_op ? AMOP_ORDER : AMOP_SEARCH), ObjectIdGetDatum(opfamily)); if (!HeapTupleIsValid(tp)) elog(ERROR, "operator %u is not a member of opfamily %u", opno, opfamily); amop_tup = (Form_pg_amop) GETSTRUCT(tp); *strategy = amop_tup->amopstrategy; *lefttype = amop_tup->amoplefttype; *righttype = amop_tup->amoprighttype; ReleaseSysCache(tp); } /* * get_opfamily_member * Get the OID of the operator that implements the specified strategy * with the specified datatypes for the specified opfamily. * * Returns InvalidOid if there is no pg_amop entry for the given keys. */ Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy) { HeapTuple tp; Form_pg_amop amop_tup; Oid result; tp = SearchSysCache4(AMOPSTRATEGY, ObjectIdGetDatum(opfamily), ObjectIdGetDatum(lefttype), ObjectIdGetDatum(righttype), Int16GetDatum(strategy)); if (!HeapTupleIsValid(tp)) return InvalidOid; amop_tup = (Form_pg_amop) GETSTRUCT(tp); result = amop_tup->amopopr; ReleaseSysCache(tp); return result; } /* * get_ordering_op_properties * Given the OID of an ordering operator (a btree "<" or ">" operator), * determine its opfamily, its declared input datatype, and its * strategy number (BTLessStrategyNumber or BTGreaterStrategyNumber). * * Returns true if successful, false if no matching pg_amop entry exists. * (This indicates that the operator is not a valid ordering operator.) * * Note: the operator could be registered in multiple families, for example * if someone were to build a "reverse sort" opfamily. This would result in * uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST * or NULLS LAST, as well as inefficient planning due to failure to match up * pathkeys that should be the same. So we want a determinate result here. * Because of the way the syscache search works, we'll use the interpretation * associated with the opfamily with smallest OID, which is probably * determinate enough. Since there is no longer any particularly good reason * to build reverse-sort opfamilies, it doesn't seem worth expending any * additional effort on ensuring consistency. */ bool get_ordering_op_properties(Oid opno, Oid *opfamily, Oid *opcintype, int16 *strategy) { bool result = false; CatCList *catlist; int i; /* ensure outputs are initialized on failure */ *opfamily = InvalidOid; *opcintype = InvalidOid; *strategy = 0; /* * Search pg_amop to see if the target operator is registered as the "<" * or ">" operator of any btree opfamily. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); /* must be btree */ if (aform->amopmethod != BTREE_AM_OID) continue; if (aform->amopstrategy == BTLessStrategyNumber || aform->amopstrategy == BTGreaterStrategyNumber) { /* Found it ... should have consistent input types */ if (aform->amoplefttype == aform->amoprighttype) { /* Found a suitable opfamily, return info */ *opfamily = aform->amopfamily; *opcintype = aform->amoplefttype; *strategy = aform->amopstrategy; result = true; break; } } } ReleaseSysCacheList(catlist); return result; } /* * get_compare_function_for_ordering_op * Get the OID of the datatype-specific btree comparison function * associated with an ordering operator (a "<" or ">" operator). * * *cmpfunc receives the comparison function OID. * *reverse is set FALSE if the operator is "<", TRUE if it's ">" * (indicating the comparison result must be negated before use). * * Returns TRUE if successful, FALSE if no btree function can be found. * (This indicates that the operator is not a valid ordering operator.) */ bool get_compare_function_for_ordering_op(Oid opno, Oid *cmpfunc, bool *reverse) { Oid opfamily; Oid opcintype; int16 strategy; /* Find the operator in pg_amop */ if (get_ordering_op_properties(opno, &opfamily, &opcintype, &strategy)) { /* Found a suitable opfamily, get matching support function */ *cmpfunc = get_opfamily_proc(opfamily, opcintype, opcintype, BTORDER_PROC); if (!OidIsValid(*cmpfunc)) /* should not happen */ elog(ERROR, "missing support function %d(%u,%u) in opfamily %u", BTORDER_PROC, opcintype, opcintype, opfamily); *reverse = (strategy == BTGreaterStrategyNumber); return true; } /* ensure outputs are set on failure */ *cmpfunc = InvalidOid; *reverse = false; return false; } /* * get_equality_op_for_ordering_op * Get the OID of the datatype-specific btree equality operator * associated with an ordering operator (a "<" or ">" operator). * * If "reverse" isn't NULL, also set *reverse to false if the operator is "<", * true if it's ">" * * Returns InvalidOid if no matching equality operator can be found. * (This indicates that the operator is not a valid ordering operator.) */ Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse) { Oid result = InvalidOid; Oid opfamily; Oid opcintype; int16 strategy; /* Find the operator in pg_amop */ if (get_ordering_op_properties(opno, &opfamily, &opcintype, &strategy)) { /* Found a suitable opfamily, get matching equality operator */ result = get_opfamily_member(opfamily, opcintype, opcintype, BTEqualStrategyNumber); if (reverse) *reverse = (strategy == BTGreaterStrategyNumber); } return result; } /* * get_ordering_op_for_equality_op * Get the OID of a datatype-specific btree ordering operator * associated with an equality operator. (If there are multiple * possibilities, assume any one will do.) * * This function is used when we have to sort data before unique-ifying, * and don't much care which sorting op is used as long as it's compatible * with the intended equality operator. Since we need a sorting operator, * it should be single-data-type even if the given operator is cross-type. * The caller specifies whether to find an op for the LHS or RHS data type. * * Returns InvalidOid if no matching ordering operator can be found. */ Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type) { Oid result = InvalidOid; CatCList *catlist; int i; /* * Search pg_amop to see if the target operator is registered as the "=" * operator of any btree opfamily. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); /* must be btree */ if (aform->amopmethod != BTREE_AM_OID) continue; if (aform->amopstrategy == BTEqualStrategyNumber) { /* Found a suitable opfamily, get matching ordering operator */ Oid typid; typid = use_lhs_type ? aform->amoplefttype : aform->amoprighttype; result = get_opfamily_member(aform->amopfamily, typid, typid, BTLessStrategyNumber); if (OidIsValid(result)) break; /* failure probably shouldn't happen, but keep looking if so */ } } ReleaseSysCacheList(catlist); return result; } /* * get_mergejoin_opfamilies * Given a putatively mergejoinable operator, return a list of the OIDs * of the btree opfamilies in which it represents equality. * * It is possible (though at present unusual) for an operator to be equality * in more than one opfamily, hence the result is a list. This also lets us * return NIL if the operator is not found in any opfamilies. * * The planner currently uses simple equal() tests to compare the lists * returned by this function, which makes the list order relevant, though * strictly speaking it should not be. Because of the way syscache list * searches are handled, in normal operation the result will be sorted by OID * so everything works fine. If running with system index usage disabled, * the result ordering is unspecified and hence the planner might fail to * recognize optimization opportunities ... but that's hardly a scenario in * which performance is good anyway, so there's no point in expending code * or cycles here to guarantee the ordering in that case. */ List * get_mergejoin_opfamilies(Oid opno) { List *result = NIL; CatCList *catlist; int i; /* * Search pg_amop to see if the target operator is registered as the "=" * operator of any btree opfamily. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); /* must be btree equality */ if (aform->amopmethod == BTREE_AM_OID && aform->amopstrategy == BTEqualStrategyNumber) result = lappend_oid(result, aform->amopfamily); } ReleaseSysCacheList(catlist); return result; } /* * get_compatible_hash_operators * Get the OID(s) of hash equality operator(s) compatible with the given * operator, but operating on its LHS and/or RHS datatype. * * An operator for the LHS type is sought and returned into *lhs_opno if * lhs_opno isn't NULL. Similarly, an operator for the RHS type is sought * and returned into *rhs_opno if rhs_opno isn't NULL. * * If the given operator is not cross-type, the results should be the same * operator, but in cross-type situations they will be different. * * Returns true if able to find the requested operator(s), false if not. * (This indicates that the operator should not have been marked oprcanhash.) */ bool get_compatible_hash_operators(Oid opno, Oid *lhs_opno, Oid *rhs_opno) { return get_compatible_hash_operators_and_family(opno, lhs_opno, rhs_opno, NULL); } /* * Extended version of get_compatible_hash_operators, which also returns * the operator family that the returned operators belong to. */ bool get_compatible_hash_operators_and_family(Oid opno, Oid *lhs_opno, Oid *rhs_opno, Oid *opfamily) { bool result = false; CatCList *catlist; int i; Oid this_opfamily = InvalidOid; /* Ensure output args are initialized on failure */ if (lhs_opno) *lhs_opno = InvalidOid; if (rhs_opno) *rhs_opno = InvalidOid; if (opfamily) *opfamily = InvalidOid; /* * Search pg_amop to see if the target operator is registered as the "=" * operator of any hash opfamily. If the operator is registered in * multiple opfamilies, assume we can use any one. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); this_opfamily = aform->amopfamily; if (aform->amopmethod == HASH_AM_OID && aform->amopstrategy == HTEqualStrategyNumber) { /* No extra lookup needed if given operator is single-type */ if (aform->amoplefttype == aform->amoprighttype) { if (lhs_opno) *lhs_opno = opno; if (rhs_opno) *rhs_opno = opno; result = true; break; } /* * Get the matching single-type operator(s). Failure probably * shouldn't happen --- it implies a bogus opfamily --- but * continue looking if so. */ if (lhs_opno) { *lhs_opno = get_opfamily_member(aform->amopfamily, aform->amoplefttype, aform->amoplefttype, HTEqualStrategyNumber); if (!OidIsValid(*lhs_opno)) continue; /* Matching LHS found, done if caller doesn't want RHS */ if (!rhs_opno) { result = true; break; } } if (rhs_opno) { *rhs_opno = get_opfamily_member(aform->amopfamily, aform->amoprighttype, aform->amoprighttype, HTEqualStrategyNumber); if (!OidIsValid(*rhs_opno)) { /* Forget any LHS operator from this opfamily */ if (lhs_opno) *lhs_opno = InvalidOid; continue; } /* Matching RHS found, so done */ result = true; break; } } } if (result && opfamily) *opfamily = this_opfamily; ReleaseSysCacheList(catlist); return result; } Oid get_compatible_hash_opfamily(Oid opno) { Oid result = InvalidOid; CatCList *catlist; int i; /* * Search pg_amop to see if the target operator is registered as the "=" * operator of any hash opfamily. If the operator is registered in * multiple opfamilies, assume we can use any one. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); if (aform->amopmethod == HASH_AM_OID && aform->amopstrategy == HTEqualStrategyNumber) { result = aform->amopfamily; break; } } ReleaseSysCacheList(catlist); return result; } Oid get_compatible_legacy_hash_opfamily(Oid opno) { Oid result = InvalidOid; CatCList *catlist; int i; /* * Search pg_amop to see if the target operator is registered as the "=" * operator of any hash opfamily. If the operator is registered in * multiple opfamilies, assume we can use any one. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); if (aform->amopmethod == HASH_AM_OID && aform->amopstrategy == HTEqualStrategyNumber) { Oid hashfunc = cdb_hashproc_in_opfamily(aform->amopfamily, aform->amoplefttype); if (isLegacyCdbHashFunction(hashfunc)) { result = aform->amopfamily; break; } } } ReleaseSysCacheList(catlist); return result; } /* * get_op_hash_functions * Get the OID(s) of the standard hash support function(s) compatible with * the given operator, operating on its LHS and/or RHS datatype as required. * * A function for the LHS type is sought and returned into *lhs_procno if * lhs_procno isn't NULL. Similarly, a function for the RHS type is sought * and returned into *rhs_procno if rhs_procno isn't NULL. * * If the given operator is not cross-type, the results should be the same * function, but in cross-type situations they will be different. * * Returns true if able to find the requested function(s), false if not. * (This indicates that the operator should not have been marked oprcanhash.) */ bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno) { bool result = false; CatCList *catlist; int i; /* Ensure output args are initialized on failure */ if (lhs_procno) *lhs_procno = InvalidOid; if (rhs_procno) *rhs_procno = InvalidOid; /* * Search pg_amop to see if the target operator is registered as the "=" * operator of any hash opfamily. If the operator is registered in * multiple opfamilies, assume we can use any one. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple tuple = &catlist->members[i]->tuple; Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple); if (aform->amopmethod == HASH_AM_OID && aform->amopstrategy == HTEqualStrategyNumber) { /* * Get the matching support function(s). Failure probably * shouldn't happen --- it implies a bogus opfamily --- but * continue looking if so. */ if (lhs_procno) { *lhs_procno = get_opfamily_proc(aform->amopfamily, aform->amoplefttype, aform->amoplefttype, HASHSTANDARD_PROC); if (!OidIsValid(*lhs_procno)) continue; /* Matching LHS found, done if caller doesn't want RHS */ if (!rhs_procno) { result = true; break; } /* Only one lookup needed if given operator is single-type */ if (aform->amoplefttype == aform->amoprighttype) { *rhs_procno = *lhs_procno; result = true; break; } } if (rhs_procno) { *rhs_procno = get_opfamily_proc(aform->amopfamily, aform->amoprighttype, aform->amoprighttype, HASHSTANDARD_PROC); if (!OidIsValid(*rhs_procno)) { /* Forget any LHS function from this opfamily */ if (lhs_procno) *lhs_procno = InvalidOid; continue; } /* Matching RHS found, so done */ result = true; break; } } } ReleaseSysCacheList(catlist); return result; } /* * get_op_btree_interpretation * Given an operator's OID, find out which btree opfamilies it belongs to, * and what properties it has within each one. The results are returned * as a palloc'd list of OpBtreeInterpretation structs. * * In addition to the normal btree operators, we consider a <> operator to be * a "member" of an opfamily if its negator is an equality operator of the * opfamily. ROWCOMPARE_NE is returned as the strategy number for this case. */ List * get_op_btree_interpretation(Oid opno) { List *result = NIL; OpBtreeInterpretation *thisresult; CatCList *catlist; int i; /* * Find all the pg_amop entries containing the operator. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple op_tuple = &catlist->members[i]->tuple; Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple); StrategyNumber op_strategy; /* must be btree */ if (op_form->amopmethod != BTREE_AM_OID) continue; /* Get the operator's btree strategy number */ op_strategy = (StrategyNumber) op_form->amopstrategy; Assert(op_strategy >= 1 && op_strategy <= 5); thisresult = (OpBtreeInterpretation *) palloc(sizeof(OpBtreeInterpretation)); thisresult->opfamily_id = op_form->amopfamily; thisresult->strategy = op_strategy; thisresult->oplefttype = op_form->amoplefttype; thisresult->oprighttype = op_form->amoprighttype; result = lappend(result, thisresult); } ReleaseSysCacheList(catlist); /* * If we didn't find any btree opfamily containing the operator, perhaps * it is a <> operator. See if it has a negator that is in an opfamily. */ if (result == NIL) { Oid op_negator = get_negator(opno); if (OidIsValid(op_negator)) { catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(op_negator)); for (i = 0; i < catlist->n_members; i++) { HeapTuple op_tuple = &catlist->members[i]->tuple; Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple); StrategyNumber op_strategy; /* must be btree */ if (op_form->amopmethod != BTREE_AM_OID) continue; /* Get the operator's btree strategy number */ op_strategy = (StrategyNumber) op_form->amopstrategy; Assert(op_strategy >= 1 && op_strategy <= 5); /* Only consider negators that are = */ if (op_strategy != BTEqualStrategyNumber) continue; /* OK, report it with "strategy" ROWCOMPARE_NE */ thisresult = (OpBtreeInterpretation *) palloc(sizeof(OpBtreeInterpretation)); thisresult->opfamily_id = op_form->amopfamily; thisresult->strategy = ROWCOMPARE_NE; thisresult->oplefttype = op_form->amoplefttype; thisresult->oprighttype = op_form->amoprighttype; result = lappend(result, thisresult); } ReleaseSysCacheList(catlist); } } return result; } /* * equality_ops_are_compatible * Return true if the two given equality operators have compatible * semantics. * * This is trivially true if they are the same operator. Otherwise, * we look to see if they can be found in the same btree or hash opfamily. * Either finding allows us to assume that they have compatible notions * of equality. (The reason we need to do these pushups is that one might * be a cross-type operator; for instance int24eq vs int4eq.) */ bool equality_ops_are_compatible(Oid opno1, Oid opno2) { bool result; CatCList *catlist; int i; /* Easy if they're the same operator */ if (opno1 == opno2) return true; /* * We search through all the pg_amop entries for opno1. */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno1)); result = false; for (i = 0; i < catlist->n_members; i++) { HeapTuple op_tuple = &catlist->members[i]->tuple; Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple); /* must be btree or hash */ if (op_form->amopmethod == BTREE_AM_OID || op_form->amopmethod == HASH_AM_OID) { if (op_in_opfamily(opno2, op_form->amopfamily)) { result = true; break; } } } ReleaseSysCacheList(catlist); return result; } /* ---------- AMPROC CACHES ---------- */ /* * get_opfamily_proc * Get the OID of the specified support function * for the specified opfamily and datatypes. * * Returns InvalidOid if there is no pg_amproc entry for the given keys. */ Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum) { HeapTuple tp; Form_pg_amproc amproc_tup; RegProcedure result; tp = SearchSysCache4(AMPROCNUM, ObjectIdGetDatum(opfamily), ObjectIdGetDatum(lefttype), ObjectIdGetDatum(righttype), Int16GetDatum(procnum)); if (!HeapTupleIsValid(tp)) return InvalidOid; amproc_tup = (Form_pg_amproc) GETSTRUCT(tp); result = amproc_tup->amproc; ReleaseSysCache(tp); return result; } /* ---------- ATTRIBUTE CACHES ---------- */ /* * get_attname * Given the relation id and the attribute number, return the "attname" * field from the attribute relation as a palloc'ed string. * * If no such attribute exists and missing_ok is true, NULL is returned; * otherwise a not-intended-for-user-consumption error is thrown. */ char * get_attname(Oid relid, AttrNumber attnum, bool missing_ok) { HeapTuple tp; tp = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attnum)); if (HeapTupleIsValid(tp)) { Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(att_tup->attname)); ReleaseSysCache(tp); return result; } if (!missing_ok) elog(ERROR, "cache lookup failed for attribute %d of relation %u", attnum, relid); return NULL; } /* * get_attnum * * Given the relation id and the attribute name, * return the "attnum" field from the attribute relation. * * Returns InvalidAttrNumber if the attr doesn't exist (or is dropped). */ AttrNumber get_attnum(Oid relid, const char *attname) { HeapTuple tp; tp = SearchSysCacheAttName(relid, attname); if (HeapTupleIsValid(tp)) { Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); AttrNumber result; result = att_tup->attnum; ReleaseSysCache(tp); return result; } else return InvalidAttrNumber; } /* * get_attgenerated * * Given the relation id and the attribute name, * return the "attgenerated" field from the attribute relation. * * Errors if not found. * * Since not generated is represented by '\0', this can also be used as a * Boolean test. */ char get_attgenerated(Oid relid, AttrNumber attnum) { HeapTuple tp; Form_pg_attribute att_tup; char result; tp = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attnum)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for attribute %d of relation %u", attnum, relid); att_tup = (Form_pg_attribute) GETSTRUCT(tp); result = att_tup->attgenerated; ReleaseSysCache(tp); return result; } /* * get_atttype * * Given the relation OID and the attribute number with the relation, * return the attribute type OID. */ Oid get_atttype(Oid relid, AttrNumber attnum) { HeapTuple tp; tp = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attnum)); if (HeapTupleIsValid(tp)) { Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); Oid result; result = att_tup->atttypid; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_atttypetypmodcoll * * A three-fer: given the relation id and the attribute number, * fetch atttypid, atttypmod, and attcollation in a single cache lookup. * * Unlike the otherwise-similar get_atttype, this routine * raises an error if it can't obtain the information. */ void get_atttypetypmodcoll(Oid relid, AttrNumber attnum, Oid *typid, int32 *typmod, Oid *collid) { HeapTuple tp; Form_pg_attribute att_tup; /* CDB: Get type for sysattr even if relid is no good (e.g. SubqueryScan) */ if (attnum < 0 && attnum > FirstLowInvalidHeapAttributeNumber) { const FormData_pg_attribute *sysatt_tup; sysatt_tup = SystemAttributeDefinition(attnum); *typid = sysatt_tup->atttypid; *typmod = sysatt_tup->atttypmod; *collid = sysatt_tup->attcollation; return; } tp = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attnum)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for attribute %d of relation %u", attnum, relid); att_tup = (Form_pg_attribute) GETSTRUCT(tp); *typid = att_tup->atttypid; *typmod = att_tup->atttypmod; *collid = att_tup->attcollation; ReleaseSysCache(tp); } /* ---------- COLLATION CACHE ---------- */ /* * get_collation_name * Returns the name of a given pg_collation entry. * * Returns a palloc'd copy of the string, or NULL if no such collation. * * NOTE: since collation name is not unique, be wary of code that uses this * for anything except preparing error messages. */ char * get_collation_name(Oid colloid) { HeapTuple tp; tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(colloid)); if (HeapTupleIsValid(tp)) { Form_pg_collation colltup = (Form_pg_collation) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(colltup->collname)); ReleaseSysCache(tp); return result; } else return NULL; } bool get_collation_isdeterministic(Oid colloid) { HeapTuple tp; Form_pg_collation colltup; bool result; tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(colloid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for collation %u", colloid); colltup = (Form_pg_collation) GETSTRUCT(tp); result = colltup->collisdeterministic; ReleaseSysCache(tp); return result; } /* ---------- CONSTRAINT CACHE ---------- */ /* * get_constraint_name * Returns the name of a given pg_constraint entry. * * Returns a palloc'd copy of the string, or NULL if no such constraint. * * NOTE: since constraint name is not unique, be wary of code that uses this * for anything except preparing error messages. */ char * get_constraint_name(Oid conoid) { HeapTuple tp; tp = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conoid)); if (HeapTupleIsValid(tp)) { Form_pg_constraint contup = (Form_pg_constraint) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(contup->conname)); ReleaseSysCache(tp); return result; } else return NULL; } /* ---------- LANGUAGE CACHE ---------- */ char * get_language_name(Oid langoid, bool missing_ok) { HeapTuple tp; tp = SearchSysCache1(LANGOID, ObjectIdGetDatum(langoid)); if (HeapTupleIsValid(tp)) { Form_pg_language lantup = (Form_pg_language) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(lantup->lanname)); ReleaseSysCache(tp); return result; } if (!missing_ok) elog(ERROR, "cache lookup failed for language %u", langoid); return NULL; } /* ---------- OPCLASS CACHE ---------- */ /* * get_opclass_family * * Returns the OID of the operator family the opclass belongs to. */ Oid get_opclass_family(Oid opclass) { HeapTuple tp; Form_pg_opclass cla_tup; Oid result; tp = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for opclass %u", opclass); cla_tup = (Form_pg_opclass) GETSTRUCT(tp); result = cla_tup->opcfamily; ReleaseSysCache(tp); return result; } /* * get_opclass_input_type * * Returns the OID of the datatype the opclass indexes. */ Oid get_opclass_input_type(Oid opclass) { HeapTuple tp; Form_pg_opclass cla_tup; Oid result; tp = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for opclass %u", opclass); cla_tup = (Form_pg_opclass) GETSTRUCT(tp); result = cla_tup->opcintype; ReleaseSysCache(tp); return result; } /* * get_opclass_opfamily_and_input_type * * Returns the OID of the operator family the opclass belongs to, * the OID of the datatype the opclass indexes */ bool get_opclass_opfamily_and_input_type(Oid opclass, Oid *opfamily, Oid *opcintype) { HeapTuple tp; Form_pg_opclass cla_tup; tp = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); if (!HeapTupleIsValid(tp)) return false; cla_tup = (Form_pg_opclass) GETSTRUCT(tp); *opfamily = cla_tup->opcfamily; *opcintype = cla_tup->opcintype; ReleaseSysCache(tp); return true; } /* ---------- OPERATOR CACHE ---------- */ /* * get_opcode * * Returns the regproc id of the routine used to implement an * operator given the operator oid. */ RegProcedure get_opcode(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); RegProcedure result; result = optup->oprcode; ReleaseSysCache(tp); return result; } else return (RegProcedure) InvalidOid; } /* * get_opname * returns the name of the operator with the given opno * * Note: returns a palloc'd copy of the string, or NULL if no such operator. */ char * get_opname(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(optup->oprname)); ReleaseSysCache(tp); return result; } else return NULL; } /* * get_op_rettype * Given operator oid, return the operator's result type. */ Oid get_op_rettype(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); Oid result; result = optup->oprresult; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * op_input_types * * Returns the left and right input datatypes for an operator * (InvalidOid if not relevant). */ void op_input_types(Oid opno, Oid *lefttype, Oid *righttype) { HeapTuple tp; Form_pg_operator optup; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (!HeapTupleIsValid(tp)) /* shouldn't happen */ elog(ERROR, "cache lookup failed for operator %u", opno); optup = (Form_pg_operator) GETSTRUCT(tp); *lefttype = optup->oprleft; *righttype = optup->oprright; ReleaseSysCache(tp); } /* * op_mergejoinable * * Returns true if the operator is potentially mergejoinable. (The planner * will fail to find any mergejoin plans unless there are suitable btree * opfamily entries for this operator and associated sortops. The pg_operator * flag is just a hint to tell the planner whether to bother looking.) * * In some cases (currently only array_eq and record_eq), mergejoinability * depends on the specific input data type the operator is invoked for, so * that must be passed as well. We currently assume that only one input's type * is needed to check this --- by convention, pass the left input's data type. */ bool op_mergejoinable(Oid opno, Oid inputtype) { bool result = false; HeapTuple tp; TypeCacheEntry *typentry; /* * For array_eq or record_eq, we can sort if the element or field types * are all sortable. We could implement all the checks for that here, but * the typcache already does that and caches the results too, so let's * rely on the typcache. */ if (opno == ARRAY_EQ_OP) { typentry = lookup_type_cache(inputtype, TYPECACHE_CMP_PROC); if (typentry->cmp_proc == F_BTARRAYCMP) result = true; } else if (opno == RECORD_EQ_OP) { typentry = lookup_type_cache(inputtype, TYPECACHE_CMP_PROC); if (typentry->cmp_proc == F_BTRECORDCMP) result = true; } else { /* For all other operators, rely on pg_operator.oprcanmerge */ tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); result = optup->oprcanmerge; ReleaseSysCache(tp); } } return result; } /* * op_hashjoinable * * Returns true if the operator is hashjoinable. (There must be a suitable * hash opfamily entry for this operator if it is so marked.) * * In some cases (currently only array_eq), hashjoinability depends on the * specific input data type the operator is invoked for, so that must be * passed as well. We currently assume that only one input's type is needed * to check this --- by convention, pass the left input's data type. */ bool op_hashjoinable(Oid opno, Oid inputtype) { bool result = false; HeapTuple tp; TypeCacheEntry *typentry; /* As in op_mergejoinable, let the typcache handle the hard cases */ /* Eventually we'll need a similar case for record_eq ... */ if (opno == ARRAY_EQ_OP) { typentry = lookup_type_cache(inputtype, TYPECACHE_HASH_PROC); if (typentry->hash_proc == F_HASH_ARRAY) result = true; } else { /* For all other operators, rely on pg_operator.oprcanhash */ tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); result = optup->oprcanhash; ReleaseSysCache(tp); } } return result; } /* * op_strict * * Get the proisstrict flag for the operator's underlying function. */ bool op_strict(Oid opno) { RegProcedure funcid = get_opcode(opno); if (funcid == (RegProcedure) InvalidOid) elog(ERROR, "operator %u does not exist", opno); return func_strict((Oid) funcid); } /* * op_volatile * * Get the provolatile flag for the operator's underlying function. */ char op_volatile(Oid opno) { RegProcedure funcid = get_opcode(opno); if (funcid == (RegProcedure) InvalidOid) elog(ERROR, "operator %u does not exist", opno); return func_volatile((Oid) funcid); } /* * get_commutator * * Returns the corresponding commutator of an operator. */ Oid get_commutator(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); Oid result; result = optup->oprcom; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_negator * * Returns the corresponding negator of an operator. */ Oid get_negator(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); Oid result; result = optup->oprnegate; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_oprrest * * Returns procedure id for computing selectivity of an operator. */ RegProcedure get_oprrest(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); RegProcedure result; result = optup->oprrest; ReleaseSysCache(tp); return result; } else return (RegProcedure) InvalidOid; } /* * get_oprjoin * * Returns procedure id for computing selectivity of a join. */ RegProcedure get_oprjoin(Oid opno) { HeapTuple tp; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (HeapTupleIsValid(tp)) { Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp); RegProcedure result; result = optup->oprjoin; ReleaseSysCache(tp); return result; } else return (RegProcedure) InvalidOid; } /* ---------- TRIGGER CACHE ---------- */ /* Does table have update triggers? */ bool has_update_triggers(Oid relid) { Relation relation; bool result = false; /* Assume the caller already holds a suitable lock. */ relation = table_open(relid, NoLock); if (relation->rd_rel->relhastriggers) { bool found = false; if (relation->trigdesc == NULL) RelationBuildTriggers(relation); if (relation->trigdesc) { for (int i = 0; i < relation->trigdesc->numtriggers && !found; i++) { Trigger trigger = relation->trigdesc->triggers[i]; found = trigger_enabled(trigger.tgoid) && (get_trigger_type(trigger.tgoid) & TRIGGER_TYPE_UPDATE) == TRIGGER_TYPE_UPDATE; if (found) break; } } /* GPDB_96_MERGE_FIXME: Why is this not allowed? */ if (found || child_triggers(relation->rd_id, TRIGGER_TYPE_UPDATE)) result = true; } table_close(relation, NoLock); return result; } /* * get_trigger_type * Given trigger id, return the trigger's type */ int32 get_trigger_type(Oid triggerid) { Relation rel; HeapTuple tp; int32 result = -1; ScanKeyData scankey; SysScanDesc sscan; ScanKeyInit(&scankey, Anum_pg_trigger_oid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(triggerid)); rel = table_open(TriggerRelationId, AccessShareLock); sscan = systable_beginscan(rel, TriggerOidIndexId, true, NULL, 1, &scankey); tp = systable_getnext(sscan); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for trigger %u", triggerid); result = ((Form_pg_trigger) GETSTRUCT(tp))->tgtype; systable_endscan(sscan); table_close(rel, AccessShareLock); return result; } /* * trigger_enabled * Given trigger id, return the trigger's enabled flag */ bool trigger_enabled(Oid triggerid) { Relation rel; HeapTuple tp; bool result; ScanKeyData scankey; SysScanDesc sscan; ScanKeyInit(&scankey, Anum_pg_trigger_oid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(triggerid)); rel = table_open(TriggerRelationId, AccessShareLock); sscan = systable_beginscan(rel, TriggerOidIndexId, true, NULL, 1, &scankey); tp = systable_getnext(sscan); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for trigger %u", triggerid); char tgenabled = ((Form_pg_trigger) GETSTRUCT(tp))->tgenabled; switch (tgenabled) { case TRIGGER_FIRES_ON_ORIGIN: /* fallthrough */ /* * FIXME: we should probably return false when * SessionReplicationRole isn't SESSION_REPLICATION_ROLE_ORIGIN, * but does that means we'll also have to flush ORCA's metadata * cache on every assignment of session_replication_role? */ case TRIGGER_FIRES_ALWAYS: result = true; break; case TRIGGER_FIRES_ON_REPLICA: case TRIGGER_DISABLED: result = false; break; default: elog(ERROR, "Unknown trigger type: %c", tgenabled); } systable_endscan(sscan); table_close(rel, AccessShareLock); return result; } /* ---------- FUNCTION CACHE ---------- */ /* * get_func_name * returns the name of the function with the given funcid * * Note: returns a palloc'd copy of the string, or NULL if no such function. */ char * get_func_name(Oid funcid) { HeapTuple tp; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (HeapTupleIsValid(tp)) { Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(functup->proname)); ReleaseSysCache(tp); return result; } else return NULL; } /* * get_type_name * returns the name of the type with the given oid * * Note: returns a palloc'd copy of the string, or NULL if no such type. */ char * get_type_name(Oid oid) { HeapTuple tp; tp = SearchSysCache(TYPEOID, ObjectIdGetDatum(oid), 0, 0, 0); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(typtup->typname)); ReleaseSysCache(tp); return result; } else return NULL; } /* * get_func_namespace * * Returns the pg_namespace OID associated with a given function. */ Oid get_func_namespace(Oid funcid) { HeapTuple tp; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (HeapTupleIsValid(tp)) { Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp); Oid result; result = functup->pronamespace; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* ---------- RELATION CACHE ---------- */ /* * get_func_rettype * Given procedure id, return the function's result type. */ Oid get_func_rettype(Oid funcid) { HeapTuple tp; Oid result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->prorettype; ReleaseSysCache(tp); return result; } /* * get_agg_transtype * Given aggregate id, return the aggregate transition function's result type. */ Oid get_agg_transtype(Oid aggid) { HeapTuple tp; Oid result; tp = SearchSysCache1(AGGFNOID, ObjectIdGetDatum(aggid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for aggregate %u", aggid); result = ((Form_pg_aggregate) GETSTRUCT(tp))->aggtranstype; ReleaseSysCache(tp); return result; } /* * is_ordered_agg * Given aggregate id, check if it is an ordered aggregate */ bool is_agg_ordered(Oid aggid) { HeapTuple aggTuple; char aggkind; bool isnull = false; aggTuple = SearchSysCache1(AGGFNOID, ObjectIdGetDatum(aggid)); if (!HeapTupleIsValid(aggTuple)) elog(ERROR, "cache lookup failed for aggregate %u", aggid); aggkind = DatumGetChar(SysCacheGetAttr(AGGFNOID, aggTuple, Anum_pg_aggregate_aggkind, &isnull)); Assert(!isnull); ReleaseSysCache(aggTuple); return AGGKIND_IS_ORDERED_SET(aggkind); } /* * is_agg_partial_capable * Given aggregate id, check if it can be used in 2-phase aggregation. * * It must have a combine function, and if the transition type is 'internal', * also serial/deserial functions. */ bool is_agg_partial_capable(Oid aggid) { HeapTuple aggTuple; Form_pg_aggregate aggform; bool result = true; aggTuple = SearchSysCache1(AGGFNOID, ObjectIdGetDatum(aggid)); if (!HeapTupleIsValid(aggTuple)) elog(ERROR, "cache lookup failed for aggregate %u", aggid); aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple); if (aggform->aggcombinefn == InvalidOid) result = false; else if (aggform->aggtranstype == INTERNALOID) { if (aggform->aggserialfn == InvalidOid || aggform->aggdeserialfn == InvalidOid) { result = false; } } ReleaseSysCache(aggTuple); return result; } /* * get_rel_tablespace * * Returns the pg_tablespace OID associated with a given relation. * * Note: InvalidOid might mean either that we couldn't find the relation, * or that it is in the database's default tablespace. */ Oid get_rel_tablespace(Oid relid) { HeapTuple tp; tp = SearchSysCache(RELOID, ObjectIdGetDatum(relid), 0, 0, 0); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); Oid result; result = reltup->reltablespace; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_func_nargs * Given procedure id, return the number of arguments. */ int get_func_nargs(Oid funcid) { HeapTuple tp; int result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->pronargs; ReleaseSysCache(tp); return result; } /* * get_func_signature * Given procedure id, return the function's argument and result types. * (The return value is the result type.) * * The arguments are returned as a palloc'd array. */ Oid get_func_signature(Oid funcid, Oid **argtypes, int *nargs) { HeapTuple tp; Form_pg_proc procstruct; Oid result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); procstruct = (Form_pg_proc) GETSTRUCT(tp); result = procstruct->prorettype; *nargs = (int) procstruct->pronargs; Assert(*nargs == procstruct->proargtypes.dim1); *argtypes = (Oid *) palloc(*nargs * sizeof(Oid)); memcpy(*argtypes, procstruct->proargtypes.values, *nargs * sizeof(Oid)); ReleaseSysCache(tp); return result; } /* * pfree_ptr_array * Free an array of pointers, after freeing each individual element */ void pfree_ptr_array(char **ptrarray, int nelements) { int i; if (NULL == ptrarray) return; for (i = 0; i < nelements; i++) { if (NULL != ptrarray[i]) { pfree(ptrarray[i]); } } pfree(ptrarray); } /* * get_func_output_arg_types * Given procedure id, return the function's output argument types */ List * get_func_output_arg_types(Oid funcid) { HeapTuple tp; int numargs; Oid *argtypes = NULL; char **argnames = NULL; char *argmodes = NULL; List *l_argtypes = NIL; int i; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); numargs = get_func_arg_info(tp, &argtypes, &argnames, &argmodes); if (NULL == argmodes) { pfree_ptr_array(argnames, numargs); if (NULL != argtypes) { pfree(argtypes); } ReleaseSysCache(tp); return NULL; } for (i = 0; i < numargs; i++) { Oid argtype = argtypes[i]; char argmode = argmodes[i]; if (PROARGMODE_INOUT == argmode || PROARGMODE_OUT == argmode || PROARGMODE_TABLE == argmode) { l_argtypes = lappend_oid(l_argtypes, argtype); } } pfree_ptr_array(argnames, numargs); pfree(argtypes); pfree(argmodes); ReleaseSysCache(tp); return l_argtypes; } /* * get_func_arg_types * Given procedure id, return all the function's argument types */ List * get_func_arg_types(Oid funcid) { HeapTuple tp; Form_pg_proc procstruct; oidvector *args; List *result = NIL; tp = SearchSysCache(PROCOID, ObjectIdGetDatum(funcid), 0, 0, 0); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); procstruct = (Form_pg_proc) GETSTRUCT(tp); args = &procstruct->proargtypes; for (int i = 0; i < args->dim1; i++) { result = lappend_oid(result, args->values[i]); } ReleaseSysCache(tp); return result; } /* * get_func_variadictype * Given procedure id, return the function's provariadic field. */ Oid get_func_variadictype(Oid funcid) { HeapTuple tp; Oid result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->provariadic; ReleaseSysCache(tp); return result; } /* * get_func_retset * Given procedure id, return the function's proretset flag. */ bool get_func_retset(Oid funcid) { HeapTuple tp; bool result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->proretset; ReleaseSysCache(tp); return result; } /* * func_strict * Given procedure id, return the function's proisstrict flag. */ bool func_strict(Oid funcid) { HeapTuple tp; bool result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->proisstrict; ReleaseSysCache(tp); return result; } /* * func_volatile * Given procedure id, return the function's provolatile flag. */ char func_volatile(Oid funcid) { HeapTuple tp; char result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->provolatile; ReleaseSysCache(tp); return result; } /* * func_parallel * Given procedure id, return the function's proparallel flag. */ char func_parallel(Oid funcid) { HeapTuple tp; char result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->proparallel; ReleaseSysCache(tp); return result; } /* * get_func_prokind * Given procedure id, return the routine kind. */ char get_func_prokind(Oid funcid) { HeapTuple tp; char result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->prokind; ReleaseSysCache(tp); return result; } /* * get_func_leakproof * Given procedure id, return the function's leakproof field. */ bool get_func_leakproof(Oid funcid) { HeapTuple tp; bool result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = ((Form_pg_proc) GETSTRUCT(tp))->proleakproof; ReleaseSysCache(tp); return result; } /* * get_func_support * * Returns the support function OID associated with a given function, * or InvalidOid if there is none. */ RegProcedure get_func_support(Oid funcid) { HeapTuple tp; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (HeapTupleIsValid(tp)) { Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp); RegProcedure result; result = functup->prosupport; ReleaseSysCache(tp); return result; } else return (RegProcedure) InvalidOid; } /* * func_data_access * Given procedure id, return the function's data access flag. */ char func_data_access(Oid funcid) { HeapTuple tp; char result; bool isnull; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = DatumGetChar( SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_prodataaccess, &isnull)); ReleaseSysCache(tp); Assert(!isnull); return result; } /* * func_exec_location * Given procedure id, return the function's proexeclocation field */ char func_exec_location(Oid funcid) { HeapTuple tp; char result; bool isnull; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); result = DatumGetChar( SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_proexeclocation, &isnull)); ReleaseSysCache(tp); Assert(!isnull); return result; } /* ---------- RELATION CACHE ---------- */ /* * get_relname_relid * Given name and namespace of a relation, look up the OID. * * Returns InvalidOid if there is no such relation. */ Oid get_relname_relid(const char *relname, Oid relnamespace) { return GetSysCacheOid2(RELNAMENSP, Anum_pg_class_oid, PointerGetDatum(relname), ObjectIdGetDatum(relnamespace)); } #ifdef NOT_USED /* * get_relnatts * * Returns the number of attributes for a given relation. */ int get_relnatts(Oid relid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); int result; result = reltup->relnatts; ReleaseSysCache(tp); return result; } else return InvalidAttrNumber; } #endif /* * get_rel_name * Returns the name of a given relation. * * Returns a palloc'd copy of the string, or NULL if no such relation. * * NOTE: since relation name is not unique, be wary of code that uses this * for anything except preparing error messages. */ char * get_rel_name(Oid relid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(reltup->relname)); ReleaseSysCache(tp); return result; } else return NULL; } /* * get_rel_namespace * * Returns the pg_namespace OID associated with a given relation. */ Oid get_rel_namespace(Oid relid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); Oid result; result = reltup->relnamespace; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_rel_type_id * * Returns the pg_type OID associated with a given relation. * * Note: not all pg_class entries have associated pg_type OIDs; so be * careful to check for InvalidOid result. */ Oid get_rel_type_id(Oid relid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); Oid result; result = reltup->reltype; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_rel_relkind * * Returns the relkind associated with a given relation. */ char get_rel_relkind(Oid relid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); char result; result = reltup->relkind; ReleaseSysCache(tp); return result; } else return '\0'; } /* * get_rel_relispartition * * Returns the relispartition flag associated with a given relation. */ bool get_rel_relispartition(Oid relid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); bool result; result = reltup->relispartition; ReleaseSysCache(tp); return result; } else return false; } /* * get_rel_persistence * * Returns the relpersistence associated with a given relation. */ char get_rel_persistence(Oid relid) { HeapTuple tp; Form_pg_class reltup; char result; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for relation %u", relid); reltup = (Form_pg_class) GETSTRUCT(tp); result = reltup->relpersistence; ReleaseSysCache(tp); return result; } /* ---------- TRANSFORM CACHE ---------- */ Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes) { HeapTuple tup; if (!list_member_oid(trftypes, typid)) return InvalidOid; tup = SearchSysCache2(TRFTYPELANG, typid, langid); if (HeapTupleIsValid(tup)) { Oid funcid; funcid = ((Form_pg_transform) GETSTRUCT(tup))->trffromsql; ReleaseSysCache(tup); return funcid; } else return InvalidOid; } Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes) { HeapTuple tup; if (!list_member_oid(trftypes, typid)) return InvalidOid; tup = SearchSysCache2(TRFTYPELANG, typid, langid); if (HeapTupleIsValid(tup)) { Oid funcid; funcid = ((Form_pg_transform) GETSTRUCT(tup))->trftosql; ReleaseSysCache(tup); return funcid; } else return InvalidOid; } /* ---------- TYPE CACHE ---------- */ /* * get_typisdefined * * Given the type OID, determine whether the type is defined * (if not, it's only a shell). */ bool get_typisdefined(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); bool result; result = typtup->typisdefined; ReleaseSysCache(tp); return result; } else return false; } /* * get_typlen * * Given the type OID, return the length of the type. */ int16 get_typlen(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); int16 result; result = typtup->typlen; ReleaseSysCache(tp); return result; } else return 0; } /* * get_typbyval * * Given the type OID, determine whether the type is returned by value or * not. Returns true if by value, false if by reference. */ bool get_typbyval(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); bool result; result = typtup->typbyval; ReleaseSysCache(tp); return result; } else return false; } /* * get_typlenbyval * * A two-fer: given the type OID, return both typlen and typbyval. * * Since both pieces of info are needed to know how to copy a Datum, * many places need both. Might as well get them with one cache lookup * instead of two. Also, this routine raises an error instead of * returning a bogus value when given a bad type OID. */ void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval) { HeapTuple tp; Form_pg_type typtup; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for type %u", typid); typtup = (Form_pg_type) GETSTRUCT(tp); *typlen = typtup->typlen; *typbyval = typtup->typbyval; ReleaseSysCache(tp); } /* * get_typlenbyvalalign * * A three-fer: given the type OID, return typlen, typbyval, typalign. */ void get_typlenbyvalalign(Oid typid, int16 *typlen, bool *typbyval, char *typalign) { HeapTuple tp; Form_pg_type typtup; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for type %u", typid); typtup = (Form_pg_type) GETSTRUCT(tp); *typlen = typtup->typlen; *typbyval = typtup->typbyval; *typalign = typtup->typalign; ReleaseSysCache(tp); } /* * getTypeIOParam * Given a pg_type row, select the type OID to pass to I/O functions * * Formerly, all I/O functions were passed pg_type.typelem as their second * parameter, but we now have a more complex rule about what to pass. * This knowledge is intended to be centralized here --- direct references * to typelem elsewhere in the code are wrong, if they are associated with * I/O calls and not with actual subscripting operations! (But see * bootstrap.c's boot_get_type_io_data() if you need to change this.) * * As of PostgreSQL 8.1, output functions receive only the value itself * and not any auxiliary parameters, so the name of this routine is now * a bit of a misnomer ... it should be getTypeInputParam. */ Oid getTypeIOParam(HeapTuple typeTuple) { Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTuple); /* * Array types get their typelem as parameter; everybody else gets their * own type OID as parameter. */ if (OidIsValid(typeStruct->typelem)) return typeStruct->typelem; else return typeStruct->oid; } /* * get_type_io_data * * A six-fer: given the type OID, return typlen, typbyval, typalign, * typdelim, typioparam, and IO function OID. The IO function * returned is controlled by IOFuncSelector */ void get_type_io_data(Oid typid, IOFuncSelector which_func, int16 *typlen, bool *typbyval, char *typalign, char *typdelim, Oid *typioparam, Oid *func) { HeapTuple typeTuple; Form_pg_type typeStruct; /* * In bootstrap mode, pass it off to bootstrap.c. This hack allows us to * use array_in and array_out during bootstrap. */ if (IsBootstrapProcessingMode()) { Oid typinput; Oid typoutput; boot_get_type_io_data(typid, typlen, typbyval, typalign, typdelim, typioparam, &typinput, &typoutput); switch (which_func) { case IOFunc_input: *func = typinput; break; case IOFunc_output: *func = typoutput; break; default: elog(ERROR, "binary I/O not supported during bootstrap"); break; } return; } typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", typid); typeStruct = (Form_pg_type) GETSTRUCT(typeTuple); *typlen = typeStruct->typlen; *typbyval = typeStruct->typbyval; *typalign = typeStruct->typalign; *typdelim = typeStruct->typdelim; *typioparam = getTypeIOParam(typeTuple); switch (which_func) { case IOFunc_input: *func = typeStruct->typinput; break; case IOFunc_output: *func = typeStruct->typoutput; break; case IOFunc_receive: *func = typeStruct->typreceive; break; case IOFunc_send: *func = typeStruct->typsend; break; } ReleaseSysCache(typeTuple); } #ifdef NOT_USED char get_typalign(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); char result; result = typtup->typalign; ReleaseSysCache(tp); return result; } else return 'i'; } #endif char get_typstorage(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); char result; result = typtup->typstorage; ReleaseSysCache(tp); return result; } else return 'p'; } /* * get_typdefault * Given a type OID, return the type's default value, if any. * * The result is a palloc'd expression node tree, or NULL if there * is no defined default for the datatype. * * NB: caller should be prepared to coerce result to correct datatype; * the returned expression tree might produce something of the wrong type. */ Node * get_typdefault(Oid typid) { HeapTuple typeTuple; Form_pg_type type; Datum datum; bool isNull; Node *expr; typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", typid); type = (Form_pg_type) GETSTRUCT(typeTuple); /* * typdefault and typdefaultbin are potentially null, so don't try to * access 'em as struct fields. Must do it the hard way with * SysCacheGetAttr. */ datum = SysCacheGetAttr(TYPEOID, typeTuple, Anum_pg_type_typdefaultbin, &isNull); if (!isNull) { /* We have an expression default */ expr = stringToNode(TextDatumGetCString(datum)); } else { /* Perhaps we have a plain literal default */ datum = SysCacheGetAttr(TYPEOID, typeTuple, Anum_pg_type_typdefault, &isNull); if (!isNull) { char *strDefaultVal; /* Convert text datum to C string */ strDefaultVal = TextDatumGetCString(datum); /* Convert C string to a value of the given type */ datum = OidInputFunctionCall(type->typinput, strDefaultVal, getTypeIOParam(typeTuple), -1); /* Build a Const node containing the value */ expr = (Node *) makeConst(typid, -1, type->typcollation, type->typlen, datum, false, type->typbyval); pfree(strDefaultVal); } else { /* No default */ expr = NULL; } } ReleaseSysCache(typeTuple); return expr; } /* * getBaseType * If the given type is a domain, return its base type; * otherwise return the type's own OID. */ Oid getBaseType(Oid typid) { int32 typmod = -1; return getBaseTypeAndTypmod(typid, &typmod); } /* * getBaseTypeAndTypmod * If the given type is a domain, return its base type and typmod; * otherwise return the type's own OID, and leave *typmod unchanged. * * Note that the "applied typmod" should be -1 for every domain level * above the bottommost; therefore, if the passed-in typid is indeed * a domain, *typmod should be -1. */ Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod) { /* * We loop to find the bottom base type in a stack of domains. */ for (;;) { HeapTuple tup; Form_pg_type typTup; tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for type %u", typid); typTup = (Form_pg_type) GETSTRUCT(tup); if (typTup->typtype != TYPTYPE_DOMAIN) { /* Not a domain, so done */ ReleaseSysCache(tup); break; } Assert(*typmod == -1); typid = typTup->typbasetype; *typmod = typTup->typtypmod; ReleaseSysCache(tup); } return typid; } /* * get_typavgwidth * * Given a type OID and a typmod value (pass -1 if typmod is unknown), * estimate the average width of values of the type. This is used by * the planner, which doesn't require absolutely correct results; * it's OK (and expected) to guess if we don't know for sure. */ int32 get_typavgwidth(Oid typid, int32 typmod) { int typlen = get_typlen(typid); int32 maxwidth; /* * Easy if it's a fixed-width type */ if (typlen > 0) return typlen; /* * type_maximum_size knows the encoding of typmod for some datatypes; * don't duplicate that knowledge here. */ maxwidth = type_maximum_size(typid, typmod); if (maxwidth > 0) { /* * For BPCHAR, the max width is also the only width. Otherwise we * need to guess about the typical data width given the max. A sliding * scale for percentage of max width seems reasonable. */ if (typid == BPCHAROID) return maxwidth; if (maxwidth <= 32) return maxwidth; /* assume full width */ if (maxwidth < 1000) return 32 + (maxwidth - 32) / 2; /* assume 50% */ /* * Beyond 1000, assume we're looking at something like * "varchar(10000)" where the limit isn't actually reached often, and * use a fixed estimate. */ return 32 + (1000 - 32) / 2; } /* * Oops, we have no idea ... wild guess time. */ return 32; } /* * get_typtype * * Given the type OID, find if it is a basic type, a complex type, etc. * It returns the null char if the cache lookup fails... */ char get_typtype(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); char result; result = typtup->typtype; ReleaseSysCache(tp); return result; } else return '\0'; } /* * type_is_rowtype * * Convenience function to determine whether a type OID represents * a "rowtype" type --- either RECORD or a named composite type * (including a domain over a named composite type). */ bool type_is_rowtype(Oid typid) { if (typid == RECORDOID) return true; /* easy case */ switch (get_typtype(typid)) { case TYPTYPE_COMPOSITE: return true; case TYPTYPE_DOMAIN: if (get_typtype(getBaseType(typid)) == TYPTYPE_COMPOSITE) return true; break; default: break; } return false; } /* * type_is_enum * Returns true if the given type is an enum type. */ bool type_is_enum(Oid typid) { return (get_typtype(typid) == TYPTYPE_ENUM); } /* * type_is_range * Returns true if the given type is a range type. */ bool type_is_range(Oid typid) { return (get_typtype(typid) == TYPTYPE_RANGE); } /* * get_type_category_preferred * * Given the type OID, fetch its category and preferred-type status. * Throws error on failure. */ void get_type_category_preferred(Oid typid, char *typcategory, bool *typispreferred) { HeapTuple tp; Form_pg_type typtup; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for type %u", typid); typtup = (Form_pg_type) GETSTRUCT(tp); *typcategory = typtup->typcategory; *typispreferred = typtup->typispreferred; ReleaseSysCache(tp); } /* * get_typ_typrelid * * Given the type OID, get the typrelid (InvalidOid if not a complex * type). */ Oid get_typ_typrelid(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); Oid result; result = typtup->typrelid; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_element_type * * Given the type OID, get the typelem (InvalidOid if not an array type). * * NB: this only considers varlena arrays to be true arrays; InvalidOid is * returned if the input is a fixed-length array type. */ Oid get_element_type(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); Oid result; if (typtup->typlen == -1) result = typtup->typelem; else result = InvalidOid; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_array_type * * Given the type OID, get the corresponding "true" array type. * Returns InvalidOid if no array type can be found. */ Oid get_array_type(Oid typid) { HeapTuple tp; Oid result = InvalidOid; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { result = ((Form_pg_type) GETSTRUCT(tp))->typarray; ReleaseSysCache(tp); } return result; } /* * get_promoted_array_type * * The "promoted" type is what you'd get from an ARRAY(SELECT ...) * construct, that is, either the corresponding "true" array type * if the input is a scalar type that has such an array type, * or the same type if the input is already a "true" array type. * Returns InvalidOid if neither rule is satisfied. */ Oid get_promoted_array_type(Oid typid) { Oid array_type = get_array_type(typid); if (OidIsValid(array_type)) return array_type; if (OidIsValid(get_element_type(typid))) return typid; return InvalidOid; } /* * get_base_element_type * Given the type OID, get the typelem, looking "through" any domain * to its underlying array type. * * This is equivalent to get_element_type(getBaseType(typid)), but avoids * an extra cache lookup. Note that it fails to provide any information * about the typmod of the array. */ Oid get_base_element_type(Oid typid) { /* * We loop to find the bottom base type in a stack of domains. */ for (;;) { HeapTuple tup; Form_pg_type typTup; tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(tup)) break; typTup = (Form_pg_type) GETSTRUCT(tup); if (typTup->typtype != TYPTYPE_DOMAIN) { /* Not a domain, so stop descending */ Oid result; /* This test must match get_element_type */ if (typTup->typlen == -1) result = typTup->typelem; else result = InvalidOid; ReleaseSysCache(tup); return result; } typid = typTup->typbasetype; ReleaseSysCache(tup); } /* Like get_element_type, silently return InvalidOid for bogus input */ return InvalidOid; } /* * getTypeInputInfo * * Get info needed for converting values of a type to internal form */ void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam) { HeapTuple typeTuple; Form_pg_type pt; typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", type); pt = (Form_pg_type) GETSTRUCT(typeTuple); if (!pt->typisdefined) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type %s is only a shell", format_type_be(type)))); if (!OidIsValid(pt->typinput)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("no input function available for type %s", format_type_be(type)))); *typInput = pt->typinput; *typIOParam = getTypeIOParam(typeTuple); ReleaseSysCache(typeTuple); } /* * getTypeOutputInfo * * Get info needed for printing values of a type */ void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena) { HeapTuple typeTuple; Form_pg_type pt; typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", type); pt = (Form_pg_type) GETSTRUCT(typeTuple); if (!pt->typisdefined) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type %s is only a shell", format_type_be(type)))); if (!OidIsValid(pt->typoutput)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("no output function available for type %s", format_type_be(type)))); *typOutput = pt->typoutput; *typIsVarlena = (!pt->typbyval) && (pt->typlen == -1); ReleaseSysCache(typeTuple); } /* * getTypeBinaryInputInfo * * Get info needed for binary input of values of a type */ void getTypeBinaryInputInfo(Oid type, Oid *typReceive, Oid *typIOParam) { HeapTuple typeTuple; Form_pg_type pt; typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", type); pt = (Form_pg_type) GETSTRUCT(typeTuple); if (!pt->typisdefined) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type %s is only a shell", format_type_be(type)))); if (!OidIsValid(pt->typreceive)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("no binary input function available for type %s", format_type_be(type)))); *typReceive = pt->typreceive; *typIOParam = getTypeIOParam(typeTuple); ReleaseSysCache(typeTuple); } /* * getTypeBinaryOutputInfo * * Get info needed for binary output of values of a type */ void getTypeBinaryOutputInfo(Oid type, Oid *typSend, bool *typIsVarlena) { HeapTuple typeTuple; Form_pg_type pt; typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", type); pt = (Form_pg_type) GETSTRUCT(typeTuple); if (!pt->typisdefined) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type %s is only a shell", format_type_be(type)))); if (!OidIsValid(pt->typsend)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("no binary output function available for type %s", format_type_be(type)))); *typSend = pt->typsend; *typIsVarlena = (!pt->typbyval) && (pt->typlen == -1); ReleaseSysCache(typeTuple); } /* * get_typmodin * * Given the type OID, return the type's typmodin procedure, if any. */ Oid get_typmodin(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); Oid result; result = typtup->typmodin; ReleaseSysCache(tp); return result; } else return InvalidOid; } #ifdef NOT_USED /* * get_typmodout * * Given the type OID, return the type's typmodout procedure, if any. */ Oid get_typmodout(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); Oid result; result = typtup->typmodout; ReleaseSysCache(tp); return result; } else return InvalidOid; } #endif /* NOT_USED */ /* * get_typcollation * * Given the type OID, return the type's typcollation attribute. */ Oid get_typcollation(Oid typid) { HeapTuple tp; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); Oid result; result = typtup->typcollation; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * type_is_collatable * * Return whether the type cares about collations */ bool type_is_collatable(Oid typid) { return OidIsValid(get_typcollation(typid)); } /* ---------- STATISTICS CACHE ---------- */ /* * get_attavgwidth * * Given the table and attribute number of a column, get the average * width of entries in the column. Return zero if no data available. * * Currently this is only consulted for individual tables, not for inheritance * trees, so we don't need an "inh" parameter. * * Calling a hook at this point looks somewhat strange, but is required * because the optimizer calls this function without any other way for * plug-ins to control the result. */ int32 get_attavgwidth(Oid relid, AttrNumber attnum) { HeapTuple tp; int32 stawidth; if (get_attavgwidth_hook) { stawidth = (*get_attavgwidth_hook) (relid, attnum); if (stawidth > 0) return stawidth; } tp = SearchSysCache3(STATRELATTINH, ObjectIdGetDatum(relid), Int16GetDatum(attnum), BoolGetDatum(false)); if (HeapTupleIsValid(tp)) { stawidth = ((Form_pg_statistic) GETSTRUCT(tp))->stawidth; ReleaseSysCache(tp); if (stawidth > 0) return stawidth; } return 0; } /* * get_attnullfrac * * Given the table and attribute number of a column, get the null * fraction of entries in the column. Return zero if no data. */ float4 get_attnullfrac(Oid relid, AttrNumber attnum) { HeapTuple tp; float4 stanullfrac; tp = SearchSysCache(STATRELATTINH, ObjectIdGetDatum(relid), Int16GetDatum(attnum), BoolGetDatum(false), 0); if (HeapTupleIsValid(tp)) { stanullfrac = ((Form_pg_statistic) GETSTRUCT(tp))->stanullfrac; ReleaseSysCache(tp); if (stanullfrac > 0.0) return stanullfrac; } return 0.0; } /* * get_attstatsslot * * Extract the contents of a "slot" of a pg_statistic tuple. * Returns true if requested slot type was found, else false. * * Unlike other routines in this file, this takes a pointer to an * already-looked-up tuple in the pg_statistic cache. We do this since * most callers will want to extract more than one value from the cache * entry, and we don't want to repeat the cache lookup unnecessarily. * Also, this API allows this routine to be used with statistics tuples * that have been provided by a stats hook and didn't really come from * pg_statistic. * * sslot: pointer to output area (typically, a local variable in the caller). * statstuple: pg_statistic tuple to be examined. * reqkind: STAKIND code for desired statistics slot kind. * reqop: STAOP value wanted, or InvalidOid if don't care. * flags: bitmask of ATTSTATSSLOT_VALUES and/or ATTSTATSSLOT_NUMBERS. * * If a matching slot is found, true is returned, and *sslot is filled thus: * staop: receives the actual STAOP value. * stacoll: receives the actual STACOLL value. * valuetype: receives actual datatype of the elements of stavalues. * values: receives pointer to an array of the slot's stavalues. * nvalues: receives number of stavalues. * numbers: receives pointer to an array of the slot's stanumbers (as float4). * nnumbers: receives number of stanumbers. * * valuetype/values/nvalues are InvalidOid/NULL/0 if ATTSTATSSLOT_VALUES * wasn't specified. Likewise, numbers/nnumbers are NULL/0 if * ATTSTATSSLOT_NUMBERS wasn't specified. * * If no matching slot is found, false is returned, and *sslot is zeroed. * * Note that the current API doesn't allow for searching for a slot with * a particular collation. If we ever actually support recording more than * one collation, we'll have to extend the API, but for now simple is good. * * The data referred to by the fields of sslot is locally palloc'd and * is independent of the original pg_statistic tuple. When the caller * is done with it, call free_attstatsslot to release the palloc'd data. * * If it's desirable to call free_attstatsslot when get_attstatsslot might * not have been called, memset'ing sslot to zeroes will allow that. */ bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags) { Form_pg_statistic stats = (Form_pg_statistic) GETSTRUCT(statstuple); int i; Datum val; bool isnull; ArrayType *statarray; Oid arrayelemtype; int narrayelem; HeapTuple typeTuple; Form_pg_type typeForm; /* initialize *sslot properly */ memset(sslot, 0, sizeof(AttStatsSlot)); for (i = 0; i < STATISTIC_NUM_SLOTS; i++) { if ((&stats->stakind1)[i] == reqkind && (reqop == InvalidOid || (&stats->staop1)[i] == reqop)) break; } if (i >= STATISTIC_NUM_SLOTS) return false; /* not there */ sslot->staop = (&stats->staop1)[i]; sslot->stacoll = (&stats->stacoll1)[i]; /* * XXX Hopefully-temporary hack: if stacoll isn't set, inject the default * collation. This won't matter for non-collation-aware datatypes. For * those that are, this covers cases where stacoll has not been set. In * the short term we need this because some code paths involving type NAME * do not pass any collation to prefix_selectivity and related functions. * Even when that's been fixed, it's likely that some add-on typanalyze * functions won't get the word right away about filling stacoll during * ANALYZE, so we'll probably need this for awhile. */ if (sslot->stacoll == InvalidOid) sslot->stacoll = DEFAULT_COLLATION_OID; if (flags & ATTSTATSSLOT_VALUES) { val = SysCacheGetAttr(STATRELATTINH, statstuple, Anum_pg_statistic_stavalues1 + i, &isnull); if (isnull) elog(ERROR, "stavalues is null"); /* * Detoast the array if needed, and in any case make a copy that's * under control of this AttStatsSlot. */ statarray = DatumGetArrayTypePCopy(val); /* * Extract the actual array element type, and pass it back in case the * caller needs it. */ sslot->valuetype = arrayelemtype = ARR_ELEMTYPE(statarray); /* Need info about element type */ typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(arrayelemtype)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", arrayelemtype); typeForm = (Form_pg_type) GETSTRUCT(typeTuple); /* Deconstruct array into Datum elements; NULLs not expected */ deconstruct_array(statarray, arrayelemtype, typeForm->typlen, typeForm->typbyval, typeForm->typalign, &sslot->values, NULL, &sslot->nvalues); /* * If the element type is pass-by-reference, we now have a bunch of * Datums that are pointers into the statarray, so we need to keep * that until free_attstatsslot. Otherwise, all the useful info is in * sslot->values[], so we can free the array object immediately. */ if (!typeForm->typbyval) sslot->values_arr = statarray; else pfree(statarray); ReleaseSysCache(typeTuple); } if (flags & ATTSTATSSLOT_NUMBERS) { val = SysCacheGetAttr(STATRELATTINH, statstuple, Anum_pg_statistic_stanumbers1 + i, &isnull); if (isnull) elog(ERROR, "stanumbers is null"); /* * Detoast the array if needed, and in any case make a copy that's * under control of this AttStatsSlot. */ statarray = DatumGetArrayTypePCopy(val); narrayelem = ARR_DIMS(statarray)[0]; if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 || ARR_HASNULL(statarray) || ARR_ELEMTYPE(statarray) != FLOAT4OID) elog(ERROR, "stanumbers is not a 1-D float4 array"); /* Give caller a pointer directly into the statarray */ sslot->numbers = (float4 *) ARR_DATA_PTR(statarray); sslot->nnumbers = narrayelem; /* We'll free the statarray in free_attstatsslot */ sslot->numbers_arr = statarray; } return true; } /* * free_attstatsslot * Free data allocated by get_attstatsslot */ void free_attstatsslot(AttStatsSlot *sslot) { /* The values[] array was separately palloc'd by deconstruct_array */ if (sslot->values) pfree(sslot->values); /* The numbers[] array points into numbers_arr, do not pfree it */ /* Free the detoasted array objects, if any */ if (sslot->values_arr) pfree(sslot->values_arr); if (sslot->numbers_arr) pfree(sslot->numbers_arr); } /* * get_att_stats * Get attribute statistics. Return a copy of the HeapTuple object, or NULL * if no stats found for attribute * */ HeapTuple get_att_stats(Oid relid, AttrNumber attrnum) { HeapTuple result; /* * This is used by ORCA, and ORCA doesn't know that there are two different kinds of stats, * the inherited stats and the non-inherited. Use the inherited stats, i.e. stats that * cover all the child tables, too, if available. */ result = SearchSysCacheCopy3(STATRELATTINH, ObjectIdGetDatum(relid), Int16GetDatum(attrnum), BoolGetDatum(true)); if (!result) result = SearchSysCacheCopy3(STATRELATTINH, ObjectIdGetDatum(relid), Int16GetDatum(attrnum), BoolGetDatum(false)); return result; } /* ---------- PG_NAMESPACE CACHE ---------- */ /* * get_namespace_name * Returns the name of a given namespace * * Returns a palloc'd copy of the string, or NULL if no such namespace. */ char * get_namespace_name(Oid nspid) { HeapTuple tp; tp = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(nspid)); if (HeapTupleIsValid(tp)) { Form_pg_namespace nsptup = (Form_pg_namespace) GETSTRUCT(tp); char *result; result = pstrdup(NameStr(nsptup->nspname)); ReleaseSysCache(tp); return result; } else return NULL; } /* * get_namespace_name_or_temp * As above, but if it is this backend's temporary namespace, return * "pg_temp" instead. */ char * get_namespace_name_or_temp(Oid nspid) { if (isTempNamespace(nspid)) return "pg_temp"; else return get_namespace_name(nspid); } /* ---------- PG_RANGE CACHE ---------- */ /* * get_range_subtype * Returns the subtype of a given range type * * Returns InvalidOid if the type is not a range type. */ Oid get_range_subtype(Oid rangeOid) { HeapTuple tp; tp = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(rangeOid)); if (HeapTupleIsValid(tp)) { Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp); Oid result; result = rngtup->rngsubtype; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * relation_exists * Is there a relation with the given oid */ bool relation_exists(Oid oid) { return SearchSysCacheExists(RELOID, oid, 0, 0, 0); } /* * index_exists * Is there an index with the given oid */ bool index_exists(Oid oid) { return SearchSysCacheExists(INDEXRELID, oid, 0, 0, 0); } /* * type_exists * Is there a type with the given oid */ bool type_exists(Oid oid) { return SearchSysCacheExists(TYPEOID, oid, 0, 0, 0); } /* * operator_exists * Is there an operator with the given oid */ bool operator_exists(Oid oid) { return SearchSysCacheExists(OPEROID, oid, 0, 0, 0); } /* * function_exists * Is there a function with the given oid */ bool function_exists(Oid oid) { return SearchSysCacheExists(PROCOID, oid, 0, 0, 0); } /* * aggregate_exists * Is there an aggregate with the given oid */ bool aggregate_exists(Oid oid) { return SearchSysCacheExists(AGGFNOID, oid, 0, 0, 0); } // Get oid of aggregate with given name and argument type Oid get_aggregate(const char *aggname, Oid oidType) { CatCList *catlist; int i; Oid oidResult; // lookup pg_proc for functions with the given name and arg type catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum((char *) aggname)); oidResult = InvalidOid; for (i = 0; i < catlist->n_members; i++) { HeapTuple htup = &catlist->members[i]->tuple; Form_pg_proc proctuple = (Form_pg_proc) GETSTRUCT(htup); Oid oidProc = proctuple->oid; // skip functions with the wrong number of type of arguments if (1 != proctuple->pronargs || oidType != proctuple->proargtypes.values[0]) { continue; } if (SearchSysCacheExists(AGGFNOID, ObjectIdGetDatum(oidProc), 0, 0, 0)) { oidResult = oidProc; break; } } ReleaseSysCacheList(catlist); return oidResult; } /* * trigger_exists * Is there a trigger with the given oid */ bool trigger_exists(Oid oid) { ScanKeyData scankey; Relation rel; SysScanDesc sscan; bool result; ScanKeyInit(&scankey, Anum_pg_trigger_oid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oid)); rel = table_open(TriggerRelationId, AccessShareLock); sscan = systable_beginscan(rel, TriggerOidIndexId, true, NULL, 1, &scankey); result = (systable_getnext(sscan) != NULL); systable_endscan(sscan); table_close(rel, AccessShareLock); return result; } /* * get_relation_keys * Return a list of relation keys */ List * get_relation_keys(Oid relid) { List *keys = NIL; // lookup unique constraints for relation from the catalog table ScanKeyData skey[1]; Relation rel = table_open(ConstraintRelationId, AccessShareLock); SysScanDesc scan; HeapTuple htup; ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relid); scan = systable_beginscan(rel, ConstraintRelidTypidNameIndexId, true, NULL, 1, skey); while (HeapTupleIsValid(htup = systable_getnext(scan))) { Form_pg_constraint contuple = (Form_pg_constraint) GETSTRUCT(htup); // skip non-unique constraints if (CONSTRAINT_UNIQUE != contuple->contype && CONSTRAINT_PRIMARY != contuple->contype) { continue; } // store key set in an array List *key = NIL; bool null = false; Datum dat = heap_getattr(htup, Anum_pg_constraint_conkey, RelationGetDescr(rel), &null); Datum *dats = NULL; int numKeys = 0; // extract key elements deconstruct_array(DatumGetArrayTypeP(dat), INT2OID, 2, true, 's', &dats, NULL, &numKeys); for (int i = 0; i < numKeys; i++) { int16 key_elem = DatumGetInt16(dats[i]); key = lappend_int(key, key_elem); } keys = lappend(keys, key); } systable_endscan(scan); table_close(rel, AccessShareLock); return keys; } /* * check_constraint_exists * Is there a check constraint with the given oid */ bool check_constraint_exists(Oid oidCheckconstraint) { return SearchSysCacheExists1(CONSTROID, ObjectIdGetDatum(oidCheckconstraint)); } /* * get_check_constraint_relid * Given check constraint id, return the check constraint's relation oid */ Oid get_check_constraint_relid(Oid oidCheckconstraint) { HeapTuple tp; tp = SearchSysCache(CONSTROID, ObjectIdGetDatum(oidCheckconstraint), 0, 0, 0); if (HeapTupleIsValid(tp)) { Form_pg_constraint contup = (Form_pg_constraint) GETSTRUCT(tp); Oid result; result = contup->conrelid; ReleaseSysCache(tp); return result; } else return InvalidOid; } /* * get_check_constraint_oids * Extract all check constraint oid for a given relation. */ List * get_check_constraint_oids(Oid oidRel) { List *plConstraints = NIL; HeapTuple htup; Relation conrel; ScanKeyData scankey; SysScanDesc sscan; /* * lookup constraints for relation from the catalog table * * SELECT * FROM pg_constraint WHERE conrelid = :1 */ conrel = table_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scankey, Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oidRel)); sscan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, true, NULL, 1, &scankey); while (HeapTupleIsValid(htup = systable_getnext(sscan))) { Form_pg_constraint contuple = (Form_pg_constraint) GETSTRUCT(htup); // only consider check constraints if (CONSTRAINT_CHECK != contuple->contype || !contuple->convalidated) { continue; } plConstraints = lappend_oid(plConstraints, contuple->oid); } systable_endscan(sscan); table_close(conrel, AccessShareLock); return plConstraints; } /* * get_check_constraint_name * returns the name of the check constraint with the given oidConstraint. * * Note: returns a palloc'd copy of the string, or NULL if no such constraint. */ char * get_check_constraint_name(Oid oidCheckconstraint) { return get_constraint_name(oidCheckconstraint); } /* * get_check_constraint_expr_tree * returns the expression node tree representing the check constraint * with the given oidConstraint. * * Note: returns a palloc'd expression node tree, or NULL if no such constraint. */ Node * get_check_constraint_expr_tree(Oid oidCheckconstraint) { HeapTuple tp; Node *result = NULL; tp = SearchSysCache(CONSTROID, ObjectIdGetDatum(oidCheckconstraint), 0, 0, 0); if (HeapTupleIsValid(tp)) { Datum conbin; bool isnull; conbin = SysCacheGetAttr(CONSTROID, tp, Anum_pg_constraint_conbin, &isnull); if (!isnull) result = stringToNode(TextDatumGetCString(conbin)); ReleaseSysCache(tp); } return result; } /* * get_cast_func * finds the cast function between the given source and destination type, * and records its oid and properties in the output parameters. * Returns true if a cast exists, false otherwise. */ bool get_cast_func(Oid oidSrc, Oid oidDest, bool *is_binary_coercible, Oid *oidCastFunc, CoercionPathType *pathtype) { if (IsBinaryCoercible(oidSrc, oidDest)) { *is_binary_coercible = true; *oidCastFunc = 0; return true; } *is_binary_coercible = false; *pathtype = find_coercion_pathway(oidDest, oidSrc, COERCION_IMPLICIT, oidCastFunc); if (*pathtype == COERCION_PATH_RELABELTYPE) *is_binary_coercible = true; if (*pathtype != COERCION_PATH_NONE) return true; return false; } /* * get_comparison_type * Retrieve comparison type */ CmpType get_comparison_type(Oid oidOp) { OpBtreeInterpretation *opBti; List *opBtis; opBtis = get_op_btree_interpretation(oidOp); if (opBtis == NIL) { /* The operator does not belong to any B-tree operator family */ return CmptOther; } /* * XXX: Arbitrarily use the first found operator family. Usually * there is only one, but e.g. if someone has created a reverse ordering * family that sorts in descending order, it is ambiguous whether a * < operator stands for the less than operator of the ascending opfamily, * or the greater than operator for the descending opfamily. */ opBti = (OpBtreeInterpretation*)linitial(opBtis); switch(opBti->strategy) { case BTLessStrategyNumber: return CmptLT; case BTLessEqualStrategyNumber: return CmptLEq; case BTEqualStrategyNumber: return CmptEq; case BTGreaterEqualStrategyNumber: return CmptGEq; case BTGreaterStrategyNumber: return CmptGT; case ROWCOMPARE_NE: return CmptNEq; default: elog(ERROR, "unknown B-tree strategy: %d", opBti->strategy); return CmptOther; } } /* * get_comparison_operator * Retrieve comparison operator between given types */ Oid get_comparison_operator(Oid oidLeft, Oid oidRight, CmpType cmpt) { int16 opstrat; HeapTuple ht; Oid result = InvalidOid; Relation pg_amop; ScanKeyData scankey[4]; SysScanDesc sscan; switch(cmpt) { case CmptLT: opstrat = BTLessStrategyNumber; break; case CmptLEq: opstrat = BTLessEqualStrategyNumber; break; case CmptEq: opstrat = BTEqualStrategyNumber; break; case CmptGEq: opstrat = BTGreaterEqualStrategyNumber; break; case CmptGT: opstrat = BTGreaterStrategyNumber; break; default: return InvalidOid; } pg_amop = heap_open(AccessMethodOperatorRelationId, AccessShareLock); /* * SELECT * FROM pg_amop * WHERE amoplefttype = :1 and amoprighttype = :2 and amopmethod = :3 and amopstrategy = :4 */ ScanKeyInit(&scankey[0], Anum_pg_amop_amoplefttype, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oidLeft)); ScanKeyInit(&scankey[1], Anum_pg_amop_amoprighttype, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oidRight)); ScanKeyInit(&scankey[2], Anum_pg_amop_amopmethod, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(BTREE_AM_OID)); ScanKeyInit(&scankey[3], Anum_pg_amop_amopstrategy, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(opstrat)); /* XXX: There is no index for this, so this is slow! */ sscan = systable_beginscan(pg_amop, InvalidOid, false, NULL, 4, scankey); /* XXX: There can be multiple results. Arbitrarily use the first one */ while (HeapTupleIsValid(ht = systable_getnext(sscan))) { Form_pg_amop amoptup = (Form_pg_amop) GETSTRUCT(ht); result = amoptup->amopopr; break; } systable_endscan(sscan); heap_close(pg_amop, AccessShareLock); return result; } /* * has_subclass_slow * * Performs the exhaustive check whether a relation has a subclass. This is * different from has_subclass(), in that the latter can return true if a relation. * *might* have a subclass. See comments in has_subclass() for more details. */ bool has_subclass_slow(Oid relationId) { ScanKeyData scankey; Relation rel; SysScanDesc sscan; bool result; if (!has_subclass(relationId)) { return false; } rel = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&scankey, Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); /* no index on inhparent */ sscan = systable_beginscan(rel, InvalidOid, false, NULL, 1, &scankey); result = (systable_getnext(sscan) != NULL); systable_endscan(sscan); heap_close(rel, AccessShareLock); return result; } /* * get_operator_opfamilies * Get the oid of operator families the given operator belongs to * * ORCA calls this. */ List * get_operator_opfamilies(Oid opno) { List *opfam_oids; CatCList *catlist; int i; opfam_oids = NIL; /* SELECT * FROM pg_amop WHERE amopopr = :1 */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { HeapTuple htup = &catlist->members[i]->tuple; Form_pg_amop amop_tuple = (Form_pg_amop) GETSTRUCT(htup); opfam_oids = lappend_oid(opfam_oids, amop_tuple->amopfamily); } ReleaseSysCacheList(catlist); return opfam_oids; } /* * get_index_opfamilies * Get the oid of operator families for the index keys */ List * get_index_opfamilies(Oid oidIndex) { HeapTuple htup; List *opfam_oids; bool isnull = false; int indnkeyatts; Datum indclassDatum; oidvector *indclass; htup = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(oidIndex)); if (!HeapTupleIsValid(htup)) elog(ERROR, "Index %u not found", oidIndex); /* * use SysCacheGetAttr() to retrieve number of index attributes, and the oid * vector of indclass */ indnkeyatts = DatumGetInt16(SysCacheGetAttr(INDEXRELID, htup, Anum_pg_index_indnkeyatts, &isnull)); Assert(!isnull); indclassDatum = SysCacheGetAttr(INDEXRELID, htup, Anum_pg_index_indclass, &isnull); if (isnull) return NIL; indclass = (oidvector *) DatumGetPointer(indclassDatum); opfam_oids = NIL; for (int i = 0; i < indnkeyatts; i++) { Oid oidOpClass = indclass->values[i]; Oid opfam = get_opclass_family(oidOpClass); opfam_oids = lappend_oid(opfam_oids, opfam); } ReleaseSysCache(htup); return opfam_oids; } /* * relation_policy * Return the distribution policy of a table. */ GpPolicy * relation_policy(Relation rel) { Assert(NULL != rel); /* not a partitioned table */ return rel->rd_cdbpolicy; } /* * child_distribution_mismatch * Return true if the table is partitioned and one of its children has a * different distribution policy. The only allowed mismatch is for the parent * to be hash distributed, and its child part to be randomly distributed. */ bool child_distribution_mismatch(Relation rel) { Assert(NULL != rel); if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) { /* not a partitioned table */ return false; } GpPolicy *rootPolicy = rel->rd_cdbpolicy; Assert(NULL != rootPolicy && "Partitioned tables cannot be master-only"); /* replicated table can't have child */ Assert(!GpPolicyIsReplicated(rootPolicy)); if (GpPolicyIsRandomPartitioned(rootPolicy)) { /* root partition policy already marked as Random, no mismatch possible as * all children must be random as well */ return false; } List *child_oids = find_all_inheritors(rel->rd_id, NoLock, NULL); ListCell *lc; foreach (lc, child_oids) { Oid oidChild = lfirst_oid(lc); Relation relChild = RelationIdGetRelation(oidChild); Assert(NULL != relChild); GpPolicy *childPolicy = relChild->rd_cdbpolicy; Assert(!GpPolicyIsReplicated(childPolicy)); if (GpPolicyIsRandomPartitioned(childPolicy)) { /* child partition is Random, and parent is not */ RelationClose(relChild); return true; } RelationClose(relChild); } list_free(child_oids); /* all children match the root's distribution policy */ return false; } /* * child_triggers * Return true if the table is partitioned and any of the child partitions * have a trigger of the given type. */ bool child_triggers(Oid relationId, int32 triggerType) { /* GPDB_12_MERGE_FIXME */ return false; #if 0 Assert(InvalidOid != relationId); if (PART_STATUS_NONE == rel_part_status(relationId)) { /* not a partitioned table */ return false; } List *childOids = find_all_inheritors(relationId, NoLock, NULL); ListCell *lc; bool found = false; foreach (lc, childOids) { Oid oidChild = lfirst_oid(lc); Relation relChild = RelationIdGetRelation(oidChild); Assert(NULL != relChild); if (relChild->rd_rel->relhastriggers && NULL == relChild->trigdesc) { RelationBuildTriggers(relChild); if (NULL == relChild->trigdesc) { relChild->rd_rel->relhastriggers = false; } } if (relChild->rd_rel->relhastriggers) { for (int i = 0; i < relChild->trigdesc->numtriggers && !found; i++) { Trigger trigger = relChild->trigdesc->triggers[i]; found = trigger_enabled(trigger.tgoid) && (get_trigger_type(trigger.tgoid) & triggerType) == triggerType; } } RelationClose(relChild); if (found) { break; } } list_free(childOids); /* no child triggers matching the given type */ return found; #endif } /* ---------- PG_INDEX CACHE ---------- */ /* * get_index_column_opclass * * Given the index OID and column number, * return opclass of the index column * or InvalidOid if the index was not found. */ Oid get_index_column_opclass(Oid index_oid, int attno) { HeapTuple tuple; Form_pg_index rd_index PG_USED_FOR_ASSERTS_ONLY; Datum datum; bool isnull; oidvector *indclass; Oid opclass; /* First we need to know the column's opclass. */ tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid)); if (!HeapTupleIsValid(tuple)) return InvalidOid; rd_index = (Form_pg_index) GETSTRUCT(tuple); /* caller is supposed to guarantee this */ Assert(attno > 0 && attno <= rd_index->indnatts); datum = SysCacheGetAttr(INDEXRELID, tuple, Anum_pg_index_indclass, &isnull); Assert(!isnull); indclass = ((oidvector *) DatumGetPointer(datum)); opclass = indclass->values[attno - 1]; ReleaseSysCache(tuple); return opclass; } /* GPDB_12_MERGE_FIXME: only used by ORCA. Fix the callers to check * Relation->relkind == RELKIND_PARTITIONED_TABLE instead. They should * have the relcache entry at hand anyway. */ bool relation_is_partitioned(Oid relid) { HeapTuple tuple; tuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(tuple)) { ReleaseSysCache(tuple); return true; } else return false; } bool index_is_partitioned(Oid relid) { HeapTuple tuple; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", relid); Form_pg_class pg_class_tuple = (Form_pg_class) GETSTRUCT(tuple); ReleaseSysCache(tuple); return pg_class_tuple->relkind == RELKIND_PARTITIONED_INDEX; } List * relation_get_leaf_partitions(Oid oid) { List *descendants = find_all_inheritors(oid, AccessShareLock, NULL); List *leaves = NIL; ListCell *lc; foreach(lc, descendants) { const Oid descendant = lfirst_oid(lc); if (get_rel_relkind(descendant) != RELKIND_PARTITIONED_TABLE && get_rel_relkind(descendant) != RELKIND_PARTITIONED_INDEX) leaves = lappend_oid(leaves, descendant); } return leaves; }
{'content_hash': '0a3e1996bfc09aeb0fcc7c162d95ba41', 'timestamp': '', 'source': 'github', 'line_count': 4525, 'max_line_length': 111, 'avg_line_length': 23.63270718232044, 'alnum_prop': 0.6910546297854832, 'repo_name': 'lisakowen/gpdb', 'id': '047d4318352ff864f81816efdac124f8619753e7', 'size': '107597', 'binary': False, 'copies': '7', 'ref': 'refs/heads/master', 'path': 'src/backend/utils/cache/lsyscache.c', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Assembly', 'bytes': '3266'}, {'name': 'Awk', 'bytes': '836'}, {'name': 'Batchfile', 'bytes': '15613'}, {'name': 'C', 'bytes': '48331196'}, {'name': 'C++', 'bytes': '12756630'}, {'name': 'CMake', 'bytes': '41334'}, {'name': 'DTrace', 'bytes': '3833'}, {'name': 'Emacs Lisp', 'bytes': '4164'}, {'name': 'Fortran', 'bytes': '14873'}, {'name': 'GDB', 'bytes': '576'}, {'name': 'Gherkin', 'bytes': '497626'}, {'name': 'HTML', 'bytes': '215381'}, {'name': 'JavaScript', 'bytes': '23969'}, {'name': 'Lex', 'bytes': '254578'}, {'name': 'M4', 'bytes': '133878'}, {'name': 'Makefile', 'bytes': '510527'}, {'name': 'PLpgSQL', 'bytes': '9268532'}, {'name': 'Perl', 'bytes': '1161283'}, {'name': 'PowerShell', 'bytes': '422'}, {'name': 'Python', 'bytes': '3396518'}, {'name': 'Roff', 'bytes': '30385'}, {'name': 'Ruby', 'bytes': '299639'}, {'name': 'SCSS', 'bytes': '339'}, {'name': 'Shell', 'bytes': '404184'}, {'name': 'XS', 'bytes': '7098'}, {'name': 'XSLT', 'bytes': '448'}, {'name': 'Yacc', 'bytes': '747692'}, {'name': 'sed', 'bytes': '1231'}]}
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include <assert.h> #include <stdio.h> #include "nsCOMPtr.h" #include "nsISupports.h" #define NS_IFOO_IID \ { 0x6f7652e0, 0xee43, 0x11d1, \ { 0x9c, 0xc3, 0x00, 0x60, 0x08, 0x8c, 0xa6, 0xb3 } } class IFoo : public nsISupports { public: NS_DECLARE_STATIC_IID_ACCESSOR(NS_IFOO_IID) public: IFoo(); // virtual dtor because IBar uses our Release() virtual ~IFoo(); NS_IMETHOD_(nsrefcnt) AddRef(); NS_IMETHOD_(nsrefcnt) Release(); NS_IMETHOD QueryInterface( const nsIID&, void** ); static void print_totals(); private: unsigned int refcount_; static unsigned int total_constructions_; static unsigned int total_destructions_; }; NS_DEFINE_STATIC_IID_ACCESSOR(IFoo, NS_IFOO_IID) class IBar; // some types I'll need typedef unsigned long NS_RESULT; // some functions I'll need (and define below) nsresult CreateIFoo( void** ); nsresult CreateIBar( void** result ); void AnIFooPtrPtrContext( IFoo** ); void AnISupportsPtrPtrContext( nsISupports** ); void AVoidPtrPtrContext( void** ); void set_a_IFoo( nsCOMPtr<IFoo>* result ); nsCOMPtr<IFoo> return_a_IFoo(); unsigned int IFoo::total_constructions_; unsigned int IFoo::total_destructions_; class test_message { public: test_message() { printf("BEGIN unit tests for |nsCOMPtr|, compiled " __DATE__ "\n"); } ~test_message() { IFoo::print_totals(); printf("END unit tests for |nsCOMPtr|.\n"); } }; test_message gTestMessage; /* ... */ void IFoo::print_totals() { printf("total constructions/destructions --> %d/%d\n", total_constructions_, total_destructions_); } IFoo::IFoo() : refcount_(0) { ++total_constructions_; printf(" new IFoo@%p [#%d]\n", static_cast<void*>(this), total_constructions_); } IFoo::~IFoo() { ++total_destructions_; printf("IFoo@%p::~IFoo() [#%d]\n", static_cast<void*>(this), total_destructions_); } nsrefcnt IFoo::AddRef() { ++refcount_; printf("IFoo@%p::AddRef(), refcount --> %d\n", static_cast<void*>(this), refcount_); return refcount_; } nsrefcnt IFoo::Release() { int newcount = --refcount_; if ( newcount == 0 ) printf(">>"); printf("IFoo@%p::Release(), refcount --> %d\n", static_cast<void*>(this), refcount_); if ( newcount == 0 ) { printf(" delete IFoo@%p\n", static_cast<void*>(this)); printf("<<IFoo@%p::Release()\n", static_cast<void*>(this)); delete this; } return newcount; } nsresult IFoo::QueryInterface( const nsIID& aIID, void** aResult ) { printf("IFoo@%p::QueryInterface()\n", static_cast<void*>(this)); nsISupports* rawPtr = 0; nsresult status = NS_OK; if ( aIID.Equals(GetIID()) ) rawPtr = this; else { nsID iid_of_ISupports = NS_ISUPPORTS_IID; if ( aIID.Equals(iid_of_ISupports) ) rawPtr = static_cast<nsISupports*>(this); else status = NS_ERROR_NO_INTERFACE; } NS_IF_ADDREF(rawPtr); *aResult = rawPtr; return status; } nsresult CreateIFoo( void** result ) // a typical factory function (that calls AddRef) { printf(">>CreateIFoo() --> "); IFoo* foop = new IFoo; printf("IFoo@%p\n", static_cast<void*>(foop)); foop->AddRef(); *result = foop; printf("<<CreateIFoo()\n"); return NS_OK; } void set_a_IFoo( nsCOMPtr<IFoo>* result ) { printf(">>set_a_IFoo()\n"); assert(result); nsCOMPtr<IFoo> foop( do_QueryInterface(new IFoo) ); *result = foop; printf("<<set_a_IFoo()\n"); } nsCOMPtr<IFoo> return_a_IFoo() { printf(">>return_a_IFoo()\n"); nsCOMPtr<IFoo> foop( do_QueryInterface(new IFoo) ); printf("<<return_a_IFoo()\n"); return foop; } #define NS_IBAR_IID \ { 0x6f7652e1, 0xee43, 0x11d1, \ { 0x9c, 0xc3, 0x00, 0x60, 0x08, 0x8c, 0xa6, 0xb3 } } class IBar : public IFoo { public: NS_DECLARE_STATIC_IID_ACCESSOR(NS_IBAR_IID) public: IBar(); virtual ~IBar(); NS_IMETHOD QueryInterface( const nsIID&, void** ); }; NS_DEFINE_STATIC_IID_ACCESSOR(IBar, NS_IBAR_IID) IBar::IBar() { printf(" new IBar@%p\n", static_cast<void*>(this)); } IBar::~IBar() { printf("IBar@%p::~IBar()\n", static_cast<void*>(this)); } nsresult IBar::QueryInterface( const nsID& aIID, void** aResult ) { printf("IBar@%p::QueryInterface()\n", static_cast<void*>(this)); nsISupports* rawPtr = 0; nsresult status = NS_OK; if ( aIID.Equals(GetIID()) ) rawPtr = this; else if ( aIID.Equals(NS_GET_IID(IFoo)) ) rawPtr = static_cast<IFoo*>(this); else { nsID iid_of_ISupports = NS_ISUPPORTS_IID; if ( aIID.Equals(iid_of_ISupports) ) rawPtr = static_cast<nsISupports*>(this); else status = NS_ERROR_NO_INTERFACE; } NS_IF_ADDREF(rawPtr); *aResult = rawPtr; return status; } nsresult CreateIBar( void** result ) // a typical factory function (that calls AddRef) { printf(">>CreateIBar() --> "); IBar* barp = new IBar; printf("IBar@%p\n", static_cast<void*>(barp)); barp->AddRef(); *result = barp; printf("<<CreateIBar()\n"); return NS_OK; } void AnIFooPtrPtrContext( IFoo** ) { } void AVoidPtrPtrContext( void** ) { } void AnISupportsPtrPtrContext( nsISupports** ) { } static nsresult TestBloat_Raw_Unsafe() { IBar* barP = 0; nsresult result = CreateIBar(reinterpret_cast<void**>(&barP)); if ( barP ) { IFoo* fooP = 0; if ( NS_SUCCEEDED( result = barP->QueryInterface(NS_GET_IID(IFoo), reinterpret_cast<void**>(&fooP)) ) ) { fooP->print_totals(); NS_RELEASE(fooP); } NS_RELEASE(barP); } return result; } static nsresult TestBloat_Smart() { nsCOMPtr<IBar> barP; nsresult result = CreateIBar( getter_AddRefs(barP) ); nsCOMPtr<IFoo> fooP( do_QueryInterface(barP, &result) ); if ( fooP ) fooP->print_totals(); return result; } nsCOMPtr<IFoo> gFoop; int main() { printf(">>main()\n"); printf("sizeof(nsCOMPtr<IFoo>) --> %u\n", unsigned(sizeof(nsCOMPtr<IFoo>))); TestBloat_Raw_Unsafe(); TestBloat_Smart(); { printf("\n### Test 1: will a |nsCOMPtr| call |AddRef| on a pointer assigned into it?\n"); nsCOMPtr<IFoo> foop( do_QueryInterface(new IFoo) ); printf("\n### Test 2: will a |nsCOMPtr| |Release| its old pointer when a new one is assigned in?\n"); foop = do_QueryInterface(new IFoo); // [Shouldn't compile] Is it a compile time error to try to |AddRef| by hand? //foop->AddRef(); // [Shouldn't compile] Is it a compile time error to try to |Release| be hand? //foop->Release(); // [Shouldn't compile] Is it a compile time error to try to |delete| an |nsCOMPtr|? //delete foop; printf("\n### Test 3: can you |AddRef| if you must?\n"); static_cast<IFoo*>(foop)->AddRef(); printf("\n### Test 4: can you |Release| if you must?\n"); static_cast<IFoo*>(foop)->Release(); printf("\n### Test 5: will a |nsCOMPtr| |Release| when it goes out of scope?\n"); } { printf("\n### Test 6: will a |nsCOMPtr| call the correct destructor?\n"); nsCOMPtr<IFoo> foop( do_QueryInterface(new IBar) ); } { printf("\n### Test 7: can you compare one |nsCOMPtr| with another [!=]?\n"); nsCOMPtr<IFoo> foo1p( do_QueryInterface(new IFoo) ); // [Shouldn't compile] Is it a compile time error to omit |getter_[doesnt_]AddRef[s]|? //AnIFooPtrPtrContext(&foo1p); // [Shouldn't compile] Is it a compile time error to omit |getter_[doesnt_]AddRef[s]|? //AVoidPtrPtrContext(&foo1p); nsCOMPtr<IFoo> foo2p( do_QueryInterface(new IFoo) ); if ( foo1p != foo2p ) printf("foo1p != foo2p\n"); else printf("foo1p == foo2p\n"); printf("\n### Test 7.5: can you compare a |nsCOMPtr| with NULL, 0, nullptr [!=]?\n"); if ( foo1p != 0 ) printf("foo1p != 0\n"); if ( 0 != foo1p ) printf("0 != foo1p\n"); if ( foo1p == 0 ) printf("foo1p == 0\n"); if ( 0 == foo1p ) printf("0 == foo1p\n"); IFoo* raw_foo2p = foo2p.get(); printf("\n### Test 8: can you compare a |nsCOMPtr| with a raw interface pointer [!=]?\n"); if ( foo1p.get() != raw_foo2p ) printf("foo1p != raw_foo2p\n"); else printf("foo1p == raw_foo2p\n"); printf("\n### Test 9: can you assign one |nsCOMPtr| into another?\n"); foo1p = foo2p; printf("\n### Test 10: can you compare one |nsCOMPtr| with another [==]?\n"); if ( foo1p == foo2p ) printf("foo1p == foo2p\n"); else printf("foo1p != foo2p\n"); printf("\n### Test 11: can you compare a |nsCOMPtr| with a raw interface pointer [==]?\n"); if ( raw_foo2p == foo2p.get() ) printf("raw_foo2p == foo2p\n"); else printf("raw_foo2p != foo2p\n"); #if 1 printf("\n### Test 11.5: can you compare a |nsCOMPtr| with a raw interface pointer [==]?\n"); if ( nsCOMPtr<IFoo>( raw_foo2p ) == foo2p ) printf("raw_foo2p == foo2p\n"); else printf("raw_foo2p != foo2p\n"); #endif printf("\n### Test 12: bare pointer test?\n"); if ( foo1p ) printf("foo1p is not NULL\n"); else printf("foo1p is NULL\n"); printf("\n### Test 13: numeric pointer test?\n"); if ( foo1p == 0 ) printf("foo1p is NULL\n"); else printf("foo1p is not NULL\n"); #if 0 if ( foo1p == 1 ) printf("foo1p allowed compare with in\n"); #endif printf("\n### Test 14: how about when two |nsCOMPtr|s referring to the same object go out of scope?\n"); } { printf("\n### Test 15,16 ...setup...\n"); IFoo* raw_foo1p = new IFoo; raw_foo1p->AddRef(); IFoo* raw_foo2p = new IFoo; raw_foo2p->AddRef(); printf("\n### Test 15: what if I don't want to |AddRef| when I construct?\n"); nsCOMPtr<IFoo> foo1p( dont_AddRef(raw_foo1p) ); //nsCOMPtr<IFoo> foo1p = dont_AddRef(raw_foo1p); printf("\n### Test 16: what if I don't want to |AddRef| when I assign in?\n"); nsCOMPtr<IFoo> foo2p; foo2p = dont_AddRef(raw_foo2p); } { printf("\n### setup for Test 17\n"); nsCOMPtr<IFoo> foop; printf("### Test 17: basic parameter behavior?\n"); CreateIFoo( nsGetterAddRefs<IFoo>(foop) ); } printf("### End Test 17\n"); { printf("\n### setup for Test 18\n"); nsCOMPtr<IFoo> foop; printf("### Test 18: basic parameter behavior, using the short form?\n"); CreateIFoo( getter_AddRefs(foop) ); } printf("### End Test 18\n"); { printf("\n### setup for Test 19, 20\n"); nsCOMPtr<IFoo> foop; printf("### Test 19: reference parameter behavior?\n"); set_a_IFoo(address_of(foop)); printf("### Test 20: return value behavior?\n"); foop = return_a_IFoo(); } printf("### End Test 19, 20\n"); { printf("\n### setup for Test 21\n"); nsCOMPtr<IFoo> fooP; printf("### Test 21: is |QueryInterface| called on assigning in a raw pointer?\n"); fooP = do_QueryInterface(new IFoo); } printf("### End Test 21\n"); { printf("\n### setup for Test 22\n"); nsCOMPtr<IFoo> fooP; fooP = do_QueryInterface(new IFoo); nsCOMPtr<IFoo> foo2P; printf("### Test 22: is |QueryInterface| _not_ called when assigning in a smart-pointer of the same type?\n"); foo2P = fooP; } printf("### End Test 22\n"); { printf("\n### setup for Test 23\n"); nsCOMPtr<IBar> barP( do_QueryInterface(new IBar) ); printf("### Test 23: is |QueryInterface| called when assigning in a smart-pointer of a different type?\n"); nsCOMPtr<IFoo> fooP( do_QueryInterface(barP) ); if ( fooP ) printf("an IBar* is an IFoo*\n"); } printf("### End Test 23\n"); { printf("\n### setup for Test 24\n"); nsCOMPtr<IFoo> fooP( do_QueryInterface(new IFoo) ); printf("### Test 24: does |forget| avoid an AddRef/Release when assigning to another nsCOMPtr?\n"); nsCOMPtr<IFoo> fooP2( fooP.forget() ); } printf("### End Test 24\n"); { nsCOMPtr<IFoo> fooP; AnIFooPtrPtrContext( getter_AddRefs(fooP) ); AVoidPtrPtrContext( getter_AddRefs(fooP) ); AnISupportsPtrPtrContext( getter_AddRefs(fooP) ); } { nsCOMPtr<nsISupports> supportsP; AVoidPtrPtrContext( getter_AddRefs(supportsP) ); AnISupportsPtrPtrContext( getter_AddRefs(supportsP) ); } printf("\n### Test 25: will a static |nsCOMPtr| |Release| before program termination?\n"); gFoop = do_QueryInterface(new IFoo); printf("<<main()\n"); return 0; }
{'content_hash': '082f94cf3f2b30855115feb82d7068f8', 'timestamp': '', 'source': 'github', 'line_count': 564, 'max_line_length': 113, 'avg_line_length': 23.398936170212767, 'alnum_prop': 0.5836174888232174, 'repo_name': 'wilebeast/FireFox-OS', 'id': '1e5d00d9419d872b455b9aabbeed7db98a8c5c14', 'size': '13197', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'B2G/gecko/xpcom/tests/TestCOMPtr.cpp', 'mode': '33188', 'license': 'apache-2.0', 'language': []}
// +build !providerless package azure import ( "context" "strings" "sync" "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" azcache "k8s.io/legacy-cloud-providers/azure/cache" ) var ( vmssNameSeparator = "_" vmssKey = "k8svmssKey" vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey" availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" availabilitySetNodesCacheTTLDefaultInSeconds = 900 vmssCacheTTLDefaultInSeconds = 600 vmssVirtualMachinesCacheTTLDefaultInSeconds = 600 ) type vmssVirtualMachinesEntry struct { resourceGroup string vmssName string instanceID string virtualMachine *compute.VirtualMachineScaleSetVM lastUpdate time.Time } type vmssEntry struct { vmss *compute.VirtualMachineScaleSet lastUpdate time.Time } func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [vmssName]*vmssEntry allResourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err } for _, resourceGroup := range allResourceGroups.List() { allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(context.Background(), resourceGroup) if rerr != nil { klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr) return nil, rerr.Error() } for i := range allScaleSets { scaleSet := allScaleSets[i] if scaleSet.Name == nil || *scaleSet.Name == "" { klog.Warning("failed to get the name of VMSS") continue } localCache.Store(*scaleSet.Name, &vmssEntry{ vmss: &scaleSet, lastUpdate: time.Now().UTC(), }) } } return localCache, nil } if ss.Config.VmssCacheTTLInSeconds == 0 { ss.Config.VmssCacheTTLInSeconds = vmssCacheTTLDefaultInSeconds } return azcache.NewTimedcache(time.Duration(ss.Config.VmssCacheTTLInSeconds)*time.Second, getter) } func extractVmssVMName(name string) (string, string, error) { split := strings.SplitAfter(name, vmssNameSeparator) if len(split) < 2 { klog.V(3).Infof("Failed to extract vmssVMName %q", name) return "", "", ErrorNotVmssInstance } ssName := strings.Join(split[0:len(split)-1], "") // removing the trailing `vmssNameSeparator` since we used SplitAfter ssName = ssName[:len(ssName)-1] instanceID := split[len(split)-1] return ssName, instanceID, nil } func (ss *scaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry oldCache := make(map[string]vmssVirtualMachinesEntry) if ss.vmssVMCache != nil { // get old cache before refreshing the cache entry, exists, err := ss.vmssVMCache.Store.GetByKey(vmssVirtualMachinesKey) if err != nil { return nil, err } if exists { cached := entry.(*azcache.AzureCacheEntry).Data if cached != nil { virtualMachines := cached.(*sync.Map) virtualMachines.Range(func(key, value interface{}) bool { oldCache[key.(string)] = *value.(*vmssVirtualMachinesEntry) return true }) } } } allResourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err } for _, resourceGroup := range allResourceGroups.List() { scaleSetNames, err := ss.listScaleSets(resourceGroup) if err != nil { return nil, err } for _, ssName := range scaleSetNames { vms, err := ss.listScaleSetVMs(ssName, resourceGroup) if err != nil { return nil, err } for i := range vms { vm := vms[i] if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) continue } computerName := strings.ToLower(*vm.OsProfile.ComputerName) vmssVMCacheEntry := &vmssVirtualMachinesEntry{ resourceGroup: resourceGroup, vmssName: ssName, instanceID: to.String(vm.InstanceID), virtualMachine: &vm, lastUpdate: time.Now().UTC(), } // set cache entry to nil when the VM is under deleting. if vm.VirtualMachineScaleSetVMProperties != nil && strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) vmssVMCacheEntry.virtualMachine = nil } localCache.Store(computerName, vmssVMCacheEntry) if _, exists := oldCache[computerName]; exists { delete(oldCache, computerName) } } } // add old missing cache data with nil entries to prevent aggressive // ARM calls during cache invalidation for name, vmEntry := range oldCache { // if the nil cache entry has existed for 15 minutes in the cache // then it should not be added back to the cache if vmEntry.virtualMachine == nil || time.Since(vmEntry.lastUpdate) > 15*time.Minute { klog.V(5).Infof("ignoring expired entries from old cache for %s", name) continue } lastUpdate := time.Now().UTC() if vmEntry.virtualMachine == nil { // if this is already a nil entry then keep the time the nil // entry was first created, so we can cleanup unwanted entries lastUpdate = vmEntry.lastUpdate } klog.V(5).Infof("adding old entries to new cache for %s", name) localCache.Store(name, &vmssVirtualMachinesEntry{ resourceGroup: vmEntry.resourceGroup, vmssName: vmEntry.vmssName, instanceID: vmEntry.instanceID, virtualMachine: nil, lastUpdate: lastUpdate, }) } } return localCache, nil } if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 { ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds } return azcache.NewTimedcache(time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds)*time.Second, getter) } func (ss *scaleSet) deleteCacheForNode(nodeName string) error { cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) return err } virtualMachines := cached.(*sync.Map) virtualMachines.Delete(nodeName) return nil } func (ss *scaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error) { getter := func(key string) (interface{}, error) { localCache := sets.NewString() resourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err } for _, resourceGroup := range resourceGroups.List() { vmList, err := ss.Cloud.ListVirtualMachines(resourceGroup) if err != nil { return nil, err } for _, vm := range vmList { if vm.Name != nil { localCache.Insert(*vm.Name) } } } return localCache, nil } if ss.Config.AvailabilitySetNodesCacheTTLInSeconds == 0 { ss.Config.AvailabilitySetNodesCacheTTLInSeconds = availabilitySetNodesCacheTTLDefaultInSeconds } return azcache.NewTimedcache(time.Duration(ss.Config.AvailabilitySetNodesCacheTTLInSeconds)*time.Second, getter) } func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt azcache.AzureCacheReadType) (bool, error) { // Assume all nodes are managed by VMSS when DisableAvailabilitySetNodes is enabled. if ss.DisableAvailabilitySetNodes { klog.V(2).Infof("Assuming node %q is managed by VMSS since DisableAvailabilitySetNodes is set to true", nodeName) return false, nil } cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, crt) if err != nil { return false, err } availabilitySetNodes := cached.(sets.String) return availabilitySetNodes.Has(nodeName), nil }
{'content_hash': '842d558feffe5dc267528f4454ca8520', 'timestamp': '', 'source': 'github', 'line_count': 260, 'max_line_length': 136, 'avg_line_length': 30.315384615384616, 'alnum_prop': 0.7023598071555442, 'repo_name': 'jim-minter/origin', 'id': '5a820e4bf19d81c476b9e89e52df8bf1740e1b18', 'size': '8451', 'binary': False, 'copies': '4', 'ref': 'refs/heads/master', 'path': 'vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go', 'mode': '33188', 'license': 'apache-2.0', 'language': [{'name': 'Awk', 'bytes': '921'}, {'name': 'Dockerfile', 'bytes': '2240'}, {'name': 'Go', 'bytes': '2275354'}, {'name': 'Makefile', 'bytes': '6395'}, {'name': 'Python', 'bytes': '14593'}, {'name': 'Shell', 'bytes': '310275'}]}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System.Collections.Generic; using System.Diagnostics; using Microsoft.CSharp.RuntimeBinder.Errors; using Microsoft.CSharp.RuntimeBinder.Syntax; namespace Microsoft.CSharp.RuntimeBinder.Semantics { internal sealed partial class ExpressionBinder { /* These are the predefined binary operator signatures (object, object) : == != (string, string) : == != (string, string) : + (string, object) : + (object, string) : + (int, int) : / % + - << >> == != < > <= >=&| ^ (uint, uint) : / % + - == != < > <= >=&| ^ (long, long) : / % + - == != < > <= >=&| ^ (ulong, ulong) : / % + - == != < > <= >=&| ^ (uint, int) : << >> (long, int) : << >> (ulong, int) : << >> (float, float) : / % + - == != < > <= >= (double, double) : / % + - == != < > <= >= (decimal, decimal) : / % + - == != < > <= >= (bool, bool) : == != &| ^ && || (Sys.Del, Sys.Del) : == != // Below here the types cannot be represented entirely by a PREDEFTYPE. (delegate, delegate) : + - == != (enum, enum) : - == != < > <= >=&| ^ (enum, under) : + - (under, enum) : + (ptr, ptr) : - (ptr, int) : + - (ptr, uint) : + - (ptr, long) : + - (ptr, ulong) : + - (int, ptr) : + (uint, ptr) : + (long, ptr) : + (ulong, ptr) : + (void, void) : == != < > <= >= There are the predefined unary operator signatures: int : + - ~ uint : + ~ long : + - ~ ulong : + ~ float : + - double : + - decimal : + - bool : ! // Below here the types cannot be represented entirely by a PREDEFTYPE. enum : ~ ptr : Note that pointer operators cannot be lifted over nullable. */ // BinOpBindMethod and UnaOpBindMethod are method pointer arrays to dispatch the appropriate operator binder. // Method pointers must be in the order of the corresponding enums. We check this when the full signature is set. // When the binding method is looked up in these arrays we ASSERT // if the array is out of bounds of the corresponding array. private readonly BinOpSig[] g_binopSignatures; // We want unary minus to bind to "operator -(ulong)" and then we // produce an error (since there is no pfn). We can't let - bind to a floating point type, // since they lose precision. See the language spec. // Increment and decrement operators are special. private readonly UnaOpSig[] g_rguos; private ExprBinOp BindUserDefinedBinOp(ExpressionKind ek, BinOpArgInfo info) { MethPropWithInst pmpwi = null; if (info.pt1 <= PredefinedType.PT_ULONG && info.pt2 <= PredefinedType.PT_ULONG) { return null; } Expr expr = null; switch (info.binopKind) { case BinOpKind.Logical: { // Logical operators cannot be overloaded, but use the bitwise overloads. ExprCall call = BindUDBinop((ExpressionKind)(ek - ExpressionKind.LogicalAnd + ExpressionKind.BitwiseAnd), info.arg1, info.arg2, true, out pmpwi); if (call != null) { if (call.IsOK) { expr = BindUserBoolOp(ek, call); } else { expr = call; } } break; } default: expr = BindUDBinop(ek, info.arg1, info.arg2, false, out pmpwi); break; } if (expr == null) { return null; } return GetExprFactory().CreateUserDefinedBinop(ek, expr.Type, info.arg1, info.arg2, expr, pmpwi); } // Adds special signatures to the candidate list. If we find an exact match // then it will be the last item on the list and we return true. private bool GetSpecialBinopSignatures(List<BinOpFullSig> prgbofs, BinOpArgInfo info) { Debug.Assert(prgbofs != null); if (info.pt1 <= PredefinedType.PT_ULONG && info.pt2 <= PredefinedType.PT_ULONG) { return false; } return GetDelBinOpSigs(prgbofs, info) || GetEnumBinOpSigs(prgbofs, info) || GetPtrBinOpSigs(prgbofs, info) || GetRefEqualSigs(prgbofs, info); } // Adds standard and lifted signatures to the candidate list. If we find an exact match // then it will be the last item on the list and we return true. private bool GetStandardAndLiftedBinopSignatures(List<BinOpFullSig> rgbofs, BinOpArgInfo info) { Debug.Assert(rgbofs != null); int ibosMinLift = 0; for (int ibos = 0; ibos < g_binopSignatures.Length; ibos++) { BinOpSig bos = g_binopSignatures[ibos]; if ((bos.mask & info.mask) == 0) { continue; } CType typeSig1 = GetPredefindType(bos.pt1); CType typeSig2 = GetPredefindType(bos.pt2); if (typeSig1 == null || typeSig2 == null) continue; ConvKind cv1 = GetConvKind(info.pt1, bos.pt1); ConvKind cv2 = GetConvKind(info.pt2, bos.pt2); LiftFlags grflt = LiftFlags.None; switch (cv1) { default: VSFAIL("Shouldn't happen!"); continue; case ConvKind.None: continue; case ConvKind.Explicit: if (!info.arg1.isCONSTANT_OK()) { continue; } // Need to try to convert. if (canConvert(info.arg1, typeSig1)) { break; } if (ibos < ibosMinLift || !bos.CanLift()) { continue; } Debug.Assert(typeSig1.IsValType()); typeSig1 = GetSymbolLoader().GetTypeManager().GetNullable(typeSig1); if (!canConvert(info.arg1, typeSig1)) { continue; } switch (GetConvKind(info.ptRaw1, bos.pt1)) { default: grflt = grflt | LiftFlags.Convert1; break; case ConvKind.Implicit: case ConvKind.Identity: grflt = grflt | LiftFlags.Lift1; break; } break; case ConvKind.Unknown: if (canConvert(info.arg1, typeSig1)) { break; } if (ibos < ibosMinLift || !bos.CanLift()) { continue; } Debug.Assert(typeSig1.IsValType()); typeSig1 = GetSymbolLoader().GetTypeManager().GetNullable(typeSig1); if (!canConvert(info.arg1, typeSig1)) { continue; } switch (GetConvKind(info.ptRaw1, bos.pt1)) { default: grflt = grflt | LiftFlags.Convert1; break; case ConvKind.Implicit: case ConvKind.Identity: grflt = grflt | LiftFlags.Lift1; break; } break; case ConvKind.Implicit: break; case ConvKind.Identity: if (cv2 == ConvKind.Identity) { BinOpFullSig newsig = new BinOpFullSig(this, bos); if (newsig.Type1() != null && newsig.Type2() != null) { // Exact match. rgbofs.Add(newsig); return true; } } break; } switch (cv2) { default: VSFAIL("Shouldn't happen!"); continue; case ConvKind.None: continue; case ConvKind.Explicit: if (!info.arg2.isCONSTANT_OK()) { continue; } // Need to try to convert. if (canConvert(info.arg2, typeSig2)) { break; } if (ibos < ibosMinLift || !bos.CanLift()) { continue; } Debug.Assert(typeSig2.IsValType()); typeSig2 = GetSymbolLoader().GetTypeManager().GetNullable(typeSig2); if (!canConvert(info.arg2, typeSig2)) { continue; } switch (GetConvKind(info.ptRaw2, bos.pt2)) { default: grflt = grflt | LiftFlags.Convert2; break; case ConvKind.Implicit: case ConvKind.Identity: grflt = grflt | LiftFlags.Lift2; break; } break; case ConvKind.Unknown: if (canConvert(info.arg2, typeSig2)) { break; } if (ibos < ibosMinLift || !bos.CanLift()) { continue; } Debug.Assert(typeSig2.IsValType()); typeSig2 = GetSymbolLoader().GetTypeManager().GetNullable(typeSig2); if (!canConvert(info.arg2, typeSig2)) { continue; } switch (GetConvKind(info.ptRaw2, bos.pt2)) { default: grflt = grflt | LiftFlags.Convert2; break; case ConvKind.Implicit: case ConvKind.Identity: grflt = grflt | LiftFlags.Lift2; break; } break; case ConvKind.Identity: case ConvKind.Implicit: break; } if (grflt != LiftFlags.None) { // We have a lifted signature. rgbofs.Add(new BinOpFullSig(typeSig1, typeSig2, bos.pfn, bos.grfos, grflt, bos.fnkind)); // NOTE: Can't skip any if we use a lifted signature because the // type might convert to int? and to long (but not to int) in which // case we should get an ambiguity. But we can skip the lifted ones.... ibosMinLift = ibos + bos.cbosSkip + 1; } else { // Record it as applicable and skip accordingly. rgbofs.Add(new BinOpFullSig(this, bos)); ibos += bos.cbosSkip; } } return false; } // Returns the index of the best match, or -1 if there is no best match. private int FindBestSignatureInList( List<BinOpFullSig> binopSignatures, BinOpArgInfo info) { Debug.Assert(binopSignatures != null); if (binopSignatures.Count == 1) { return 0; } int bestSignature = 0; int index; // Try to find a candidate for the best. for (index = 1; index < binopSignatures.Count; index++) { if (bestSignature < 0) { bestSignature = index; } else { int nT = WhichBofsIsBetter(binopSignatures[bestSignature], binopSignatures[index], info.type1, info.type2); if (nT == 0) { bestSignature = -1; } else if (nT > 0) { bestSignature = index; } } } if (bestSignature == -1) { return -1; } // Verify that the candidate really is not worse than all others. // Do we need to loop over the whole list here, or just // from 0 . bestSignature - 1? for (index = 0; index < binopSignatures.Count; index++) { if (index == bestSignature) { continue; } if (WhichBofsIsBetter(binopSignatures[bestSignature], binopSignatures[index], info.type1, info.type2) >= 0) { return -1; } } return bestSignature; } private ExprBinOp bindNullEqualityComparison(ExpressionKind ek, BinOpArgInfo info) { Expr arg1 = info.arg1; Expr arg2 = info.arg2; if (info.binopKind == BinOpKind.Equal) { CType typeBool = GetPredefindType(PredefinedType.PT_BOOL); ExprBinOp exprRes = null; if (info.type1 is NullableType && info.type2 is NullType) { arg2 = GetExprFactory().CreateZeroInit(info.type1); exprRes = GetExprFactory().CreateBinop(ek, typeBool, arg1, arg2); } if (info.type1 is NullType && info.type2 is NullableType) { arg1 = GetExprFactory().CreateZeroInit(info.type2); exprRes = GetExprFactory().CreateBinop(ek, typeBool, arg1, arg2); } if (exprRes != null) { exprRes.IsLifted = true; return exprRes; } } Expr pExpr = BadOperatorTypesError(ek, info.arg1, info.arg2, GetTypes().GetErrorSym()); pExpr.AssertIsBin(); return (ExprBinOp)pExpr; } /* This handles binding binary operators by first checking for user defined operators, then applying overload resolution to the predefined operators. It handles lifting over nullable. */ public Expr BindStandardBinop(ExpressionKind ek, Expr arg1, Expr arg2) { Debug.Assert(arg1 != null); Debug.Assert(arg2 != null); EXPRFLAG flags = 0; BinOpArgInfo info = new BinOpArgInfo(arg1, arg2); if (!GetBinopKindAndFlags(ek, out info.binopKind, out flags)) { // If we don't get the BinopKind and the flags, then we must have had some bad operator types. return BadOperatorTypesError(ek, arg1, arg2); } info.mask = (BinOpMask)(1 << (int)info.binopKind); List<BinOpFullSig> binopSignatures = new List<BinOpFullSig>(); int bestBinopSignature = -1; // First check if this is a user defined binop. If it is, return it. ExprBinOp exprUD = BindUserDefinedBinOp(ek, info); if (exprUD != null) { return exprUD; } // Get the special binop signatures. If successful, the special binop signature will be // the last item in the array of signatures that we give it. bool exactMatch = GetSpecialBinopSignatures(binopSignatures, info); if (!exactMatch) { // No match, try to get standard and lifted binop signatures. exactMatch = GetStandardAndLiftedBinopSignatures(binopSignatures, info); } // If we have an exact match in either the special binop signatures or the standard/lifted binop // signatures, then we set our best match. Otherwise, we check if we had any signatures at all. // If we didn't, then its possible where we have x == null, where x is nullable, so try to bind // the null equality comparison. Otherwise, we had some ambiguity - we have a match, but its not exact. if (exactMatch) { Debug.Assert(binopSignatures.Count > 0); bestBinopSignature = binopSignatures.Count - 1; } else if (binopSignatures.Count == 0) { // If we got no matches then it's possible that we're in the case // x == null, where x is nullable. return bindNullEqualityComparison(ek, info); } else { // We had some matches, try to find the best one. FindBestSignatureInList returns < 0 if // we don't have a best one, otherwise it returns the index of the best one in our list that // we give it. bestBinopSignature = FindBestSignatureInList(binopSignatures, info); if (bestBinopSignature < 0) { // Ambiguous. throw AmbiguousOperatorError(ek, arg1, arg2); } } // If we're here, we should have a binop signature that exactly matches. Debug.Assert(bestBinopSignature < binopSignatures.Count); // We've found the one to use, so lets go and bind it. return BindStandardBinopCore(info, binopSignatures[bestBinopSignature], ek, flags); } private Expr BindStandardBinopCore(BinOpArgInfo info, BinOpFullSig bofs, ExpressionKind ek, EXPRFLAG flags) { if (bofs.pfn == null) { return BadOperatorTypesError(ek, info.arg1, info.arg2); } if (!bofs.isLifted() || !bofs.AutoLift()) { Expr expr1 = info.arg1; Expr expr2 = info.arg2; if (bofs.ConvertOperandsBeforeBinding()) { expr1 = mustConvert(expr1, bofs.Type1()); expr2 = mustConvert(expr2, bofs.Type2()); } if (bofs.fnkind == BinOpFuncKind.BoolBitwiseOp) { return BindBoolBitwiseOp(ek, flags, expr1, expr2, bofs); } return bofs.pfn(ek, flags, expr1, expr2); } Debug.Assert(bofs.fnkind != BinOpFuncKind.BoolBitwiseOp); if (IsEnumArithmeticBinOp(ek, info)) { Expr expr1 = info.arg1; Expr expr2 = info.arg2; if (bofs.ConvertOperandsBeforeBinding()) { expr1 = mustConvert(expr1, bofs.Type1()); expr2 = mustConvert(expr2, bofs.Type2()); } return BindLiftedEnumArithmeticBinOp(ek, flags, expr1, expr2); } return BindLiftedStandardBinOp(info, bofs, ek, flags); } private ExprBinOp BindLiftedStandardBinOp(BinOpArgInfo info, BinOpFullSig bofs, ExpressionKind ek, EXPRFLAG flags) { Debug.Assert(bofs.Type1() is NullableType || bofs.Type2() is NullableType); Expr arg1 = info.arg1; Expr arg2 = info.arg2; // We want to get the base types of the arguments and attempt to bind the non-lifted form of the // method so that we error report (ie divide by zero etc), and then we store in the resulting // binop that we have a lifted operator. Expr pArgument1 = null; Expr pArgument2 = null; Expr nonLiftedArg1 = null; Expr nonLiftedArg2 = null; Expr nonLiftedResult = null; CType resultType = null; LiftArgument(arg1, bofs.Type1(), bofs.ConvertFirst(), out pArgument1, out nonLiftedArg1); LiftArgument(arg2, bofs.Type2(), bofs.ConvertSecond(), out pArgument2, out nonLiftedArg2); // Now call the non-lifted method to generate errors, and stash the result. if (!nonLiftedArg1.isNull() && !nonLiftedArg2.isNull()) { // Only compute the method if theres no nulls. If there are, we'll special case it // later, since operations with a null operand are null. nonLiftedResult = bofs.pfn(ek, flags, nonLiftedArg1, nonLiftedArg2); } // Check if we have a comparison. If so, set the result type to bool. if (info.binopKind == BinOpKind.Compare || info.binopKind == BinOpKind.Equal) { resultType = GetPredefindType(PredefinedType.PT_BOOL); } else { if (bofs.fnkind == BinOpFuncKind.EnumBinOp) { AggregateType enumType; resultType = GetEnumBinOpType(ek, nonLiftedArg1.Type, nonLiftedArg2.Type, out enumType); } else { resultType = pArgument1.Type; } resultType = resultType is NullableType ? resultType : GetSymbolLoader().GetTypeManager().GetNullable(resultType); } ExprBinOp exprRes = GetExprFactory().CreateBinop(ek, resultType, pArgument1, pArgument2); mustCast(nonLiftedResult, resultType, 0); exprRes.IsLifted = true; exprRes.Flags |= flags; Debug.Assert((exprRes.Flags & EXPRFLAG.EXF_LVALUE) == 0); return exprRes; } ///////////////////////////////////////////////////////////////////////////////// private void LiftArgument(Expr pArgument, CType pParameterType, bool bConvertBeforeLift, out Expr ppLiftedArgument, out Expr ppNonLiftedArgument) { Expr pLiftedArgument = mustConvert(pArgument, pParameterType); if (pLiftedArgument != pArgument) { MarkAsIntermediateConversion(pLiftedArgument); } Expr pNonLiftedArgument = pArgument; if (pParameterType is NullableType paramNub) { if (pNonLiftedArgument.isNull()) { pNonLiftedArgument = mustCast(pNonLiftedArgument, pParameterType); } pNonLiftedArgument = mustCast(pNonLiftedArgument, paramNub.GetUnderlyingType()); if (bConvertBeforeLift) { MarkAsIntermediateConversion(pNonLiftedArgument); } } else { pNonLiftedArgument = pLiftedArgument; } ppLiftedArgument = pLiftedArgument; ppNonLiftedArgument = pNonLiftedArgument; } /* Get the special signatures when at least one of the args is a delegate instance. Returns true iff an exact signature match is found. */ private bool GetDelBinOpSigs(List<BinOpFullSig> prgbofs, BinOpArgInfo info) { if (!info.ValidForDelegate()) { return false; } if (!info.type1.isDelegateType() && !info.type2.isDelegateType()) { return false; } // No conversions needed. Determine the lifting. This is the common case. if (info.type1 == info.type2) { prgbofs.Add(new BinOpFullSig(info.type1, info.type2, BindDelBinOp, OpSigFlags.Reference, LiftFlags.None, BinOpFuncKind.DelBinOp)); return true; } // Now, for each delegate type, if both arguments convert to that delegate type, that is a candidate // for this binary operator. It's possible that we add two candidates, in which case they will compete // in overload resolution. Or we could add no candidates. bool t1tot2 = info.type2.isDelegateType() && canConvert(info.arg1, info.type2); bool t2tot1 = info.type1.isDelegateType() && canConvert(info.arg2, info.type1); if (t1tot2) { prgbofs.Add(new BinOpFullSig(info.type2, info.type2, BindDelBinOp, OpSigFlags.Reference, LiftFlags.None, BinOpFuncKind.DelBinOp)); } if (t2tot1) { prgbofs.Add(new BinOpFullSig(info.type1, info.type1, BindDelBinOp, OpSigFlags.Reference, LiftFlags.None, BinOpFuncKind.DelBinOp)); } // Might be ambiguous so return false. return false; } /* Utility method to determine whether arg1 is convertible to typeDst, either in a regular scenario or lifted scenario. Sets pgrflt, ptypeSig1 and ptypeSig2 accordingly. */ private bool CanConvertArg1(BinOpArgInfo info, CType typeDst, out LiftFlags pgrflt, out CType ptypeSig1, out CType ptypeSig2) { ptypeSig1 = null; ptypeSig2 = null; Debug.Assert(!(typeDst is NullableType)); if (canConvert(info.arg1, typeDst)) pgrflt = LiftFlags.None; else { pgrflt = LiftFlags.None; typeDst = GetSymbolLoader().GetTypeManager().GetNullable(typeDst); if (!canConvert(info.arg1, typeDst)) return false; pgrflt = LiftFlags.Convert1; } ptypeSig1 = typeDst; if (info.type2 is NullableType) { pgrflt = pgrflt | LiftFlags.Lift2; ptypeSig2 = GetSymbolLoader().GetTypeManager().GetNullable(info.typeRaw2); } else ptypeSig2 = info.typeRaw2; return true; } /* Same as CanConvertArg1 but with the indices interchanged! */ private bool CanConvertArg2(BinOpArgInfo info, CType typeDst, out LiftFlags pgrflt, out CType ptypeSig1, out CType ptypeSig2) { Debug.Assert(!(typeDst is NullableType)); ptypeSig1 = null; ptypeSig2 = null; if (canConvert(info.arg2, typeDst)) pgrflt = LiftFlags.None; else { pgrflt = LiftFlags.None; typeDst = GetSymbolLoader().GetTypeManager().GetNullable(typeDst); if (!canConvert(info.arg2, typeDst)) return false; pgrflt = LiftFlags.Convert2; } ptypeSig2 = typeDst; if (info.type1 is NullableType) { pgrflt = pgrflt | LiftFlags.Lift1; ptypeSig1 = GetSymbolLoader().GetTypeManager().GetNullable(info.typeRaw1); } else ptypeSig1 = info.typeRaw1; return true; } /* Record the appropriate binary operator full signature from the given BinOpArgInfo. This assumes that any NullableType valued args should be lifted. */ private void RecordBinOpSigFromArgs(List<BinOpFullSig> prgbofs, BinOpArgInfo info) { LiftFlags grflt = LiftFlags.None; CType typeSig1; CType typeSig2; if (info.type1 != info.typeRaw1) { Debug.Assert(info.type1 is NullableType); grflt = grflt | LiftFlags.Lift1; typeSig1 = GetSymbolLoader().GetTypeManager().GetNullable(info.typeRaw1); } else typeSig1 = info.typeRaw1; if (info.type2 != info.typeRaw2) { Debug.Assert(info.type2 is NullableType); grflt = grflt | LiftFlags.Lift2; typeSig2 = GetSymbolLoader().GetTypeManager().GetNullable(info.typeRaw2); } else typeSig2 = info.typeRaw2; prgbofs.Add(new BinOpFullSig(typeSig1, typeSig2, BindEnumBinOp, OpSigFlags.Value, grflt, BinOpFuncKind.EnumBinOp)); } /* Get the special signatures when at least one of the args is an enum. Return true if we find an exact match. */ private bool GetEnumBinOpSigs(List<BinOpFullSig> prgbofs, BinOpArgInfo info) { if (!info.typeRaw1.isEnumType() && !info.typeRaw2.isEnumType()) { return false; } // (enum, enum) : - == != < > <= >=&| ^ // (enum, under) : + - // (under, enum) : + CType typeSig1 = null; CType typeSig2 = null; LiftFlags grflt = LiftFlags.None; // Look for the no conversions cases. Still need to determine the lifting. These are the common case. if (info.typeRaw1 == info.typeRaw2) { if (!info.ValidForEnum()) { return false; } RecordBinOpSigFromArgs(prgbofs, info); return true; } bool isValidForEnum; if (info.typeRaw1.isEnumType()) { isValidForEnum = (info.typeRaw2 == info.typeRaw1.underlyingEnumType() && info.ValidForEnumAndUnderlyingType()); } else { isValidForEnum = (info.typeRaw1 == info.typeRaw2.underlyingEnumType() && info.ValidForUnderlyingTypeAndEnum()); } if (isValidForEnum) { RecordBinOpSigFromArgs(prgbofs, info); return true; } // Now deal with the conversion cases. Since there are no conversions from enum types to other // enum types we never need to do both cases. if (info.typeRaw1.isEnumType()) { isValidForEnum = info.ValidForEnum() && CanConvertArg2(info, info.typeRaw1, out grflt, out typeSig1, out typeSig2) || info.ValidForEnumAndUnderlyingType() && CanConvertArg2(info, info.typeRaw1.underlyingEnumType(), out grflt, out typeSig1, out typeSig2); } else { isValidForEnum = info.ValidForEnum() && CanConvertArg1(info, info.typeRaw2, out grflt, out typeSig1, out typeSig2) || info.ValidForEnumAndUnderlyingType() && CanConvertArg1(info, info.typeRaw2.underlyingEnumType(), out grflt, out typeSig1, out typeSig2); } if (isValidForEnum) { prgbofs.Add(new BinOpFullSig(typeSig1, typeSig2, BindEnumBinOp, OpSigFlags.Value, grflt, BinOpFuncKind.EnumBinOp)); } return false; } private bool IsEnumArithmeticBinOp(ExpressionKind ek, BinOpArgInfo info) { switch (ek) { case ExpressionKind.Add: return info.typeRaw1.isEnumType() ^ info.typeRaw2.isEnumType(); case ExpressionKind.Subtract: return info.typeRaw1.isEnumType() | info.typeRaw2.isEnumType(); } return false; } /* Get the special signatures when at least one of the args is a pointer. Since pointers can't be type arguments, a nullable pointer is illegal, so no sense trying to lift any of these. NOTE: We don't filter out bad operators on void pointers since BindPtrBinOp gives better error messages than the operator overload resolution does. */ private bool GetPtrBinOpSigs(List<BinOpFullSig> prgbofs, BinOpArgInfo info) { if (!(info.type1 is PointerType) && !(info.type2 is PointerType)) { return false; } // (ptr, ptr) : - // (ptr, int) : + - // (ptr, uint) : + - // (ptr, long) : + - // (ptr, ulong) : + - // (int, ptr) : + // (uint, ptr) : + // (long, ptr) : + // (ulong, ptr) : + // (void, void) : == != < > <= >= // Check the common case first. if (info.type1 is PointerType && info.type2 is PointerType) { if (info.ValidForVoidPointer()) { prgbofs.Add(new BinOpFullSig(info.type1, info.type2, BindPtrCmpOp, OpSigFlags.None, LiftFlags.None, BinOpFuncKind.PtrCmpOp)); return true; } if (info.type1 == info.type2 && info.ValidForPointer()) { prgbofs.Add(new BinOpFullSig(info.type1, info.type2, BindPtrBinOp, OpSigFlags.None, LiftFlags.None, BinOpFuncKind.PtrBinOp)); return true; } return false; } CType typeT; if (info.type1 is PointerType) { if (info.type2 is NullType) { if (!info.ValidForVoidPointer()) { return false; } prgbofs.Add(new BinOpFullSig(info.type1, info.type1, BindPtrCmpOp, OpSigFlags.Convert, LiftFlags.None, BinOpFuncKind.PtrCmpOp)); return true; } if (!info.ValidForPointerAndNumber()) { return false; } for (uint i = 0; i < s_rgptIntOp.Length; i++) { if (canConvert(info.arg2, typeT = GetPredefindType(s_rgptIntOp[i]))) { prgbofs.Add(new BinOpFullSig(info.type1, typeT, BindPtrBinOp, OpSigFlags.Convert, LiftFlags.None, BinOpFuncKind.PtrBinOp)); return true; } } return false; } Debug.Assert(info.type2 is PointerType); if (info.type1 is NullType) { if (!info.ValidForVoidPointer()) { return false; } prgbofs.Add(new BinOpFullSig(info.type2, info.type2, BindPtrCmpOp, OpSigFlags.Convert, LiftFlags.None, BinOpFuncKind.PtrCmpOp)); return true; } if (!info.ValidForNumberAndPointer()) { return false; } for (uint i = 0; i < s_rgptIntOp.Length; i++) { if (canConvert(info.arg1, typeT = GetPredefindType(s_rgptIntOp[i]))) { prgbofs.Add(new BinOpFullSig(typeT, info.type2, BindPtrBinOp, OpSigFlags.Convert, LiftFlags.None, BinOpFuncKind.PtrBinOp)); return true; } } return false; } /* See if standard reference equality applies. Make sure not to return true if another == operator may be applicable and better (or ambiguous)! This also handles == on System.Delegate, since it has special rules as well. */ private bool GetRefEqualSigs(List<BinOpFullSig> prgbofs, BinOpArgInfo info) { if (info.mask != BinOpMask.Equal) { return false; } if (info.type1 != info.typeRaw1 || info.type2 != info.typeRaw2) { return false; } bool fRet = false; CType type1 = info.type1; CType type2 = info.type2; CType typeObj = GetPredefindType(PredefinedType.PT_OBJECT); CType typeCls = null; if (type1 is NullType && type2 is NullType) { typeCls = typeObj; fRet = true; goto LRecord; } // Check for: operator ==(System.Delegate, System.Delegate). CType typeDel = GetPredefindType(PredefinedType.PT_DELEGATE); if (canConvert(info.arg1, typeDel) && canConvert(info.arg2, typeDel) && !type1.isDelegateType() && !type2.isDelegateType()) { prgbofs.Add(new BinOpFullSig(typeDel, typeDel, BindDelBinOp, OpSigFlags.Convert, LiftFlags.None, BinOpFuncKind.DelBinOp)); } // The reference type equality operators only handle reference types. FUNDTYPE ft1 = type1.fundType(); FUNDTYPE ft2 = type2.fundType(); switch (ft1) { default: return false; case FUNDTYPE.FT_REF: break; case FUNDTYPE.FT_VAR: TypeParameterType parameterType1 = (TypeParameterType)type1; if (parameterType1.IsValueType() || (!parameterType1.IsReferenceType() && !(type2 is NullType))) return false; type1 = parameterType1.GetEffectiveBaseClass(); break; } if (type2 is NullType) { fRet = true; // We don't need to determine the actual best type since we're // returning true - indicating that we've found the best operator. typeCls = typeObj; goto LRecord; } switch (ft2) { default: return false; case FUNDTYPE.FT_REF: break; case FUNDTYPE.FT_VAR: TypeParameterType typeParam2 = (TypeParameterType)type2; if (typeParam2.IsValueType() || (!typeParam2.IsReferenceType() && !(type1 is NullType))) return false; type2 = typeParam2.GetEffectiveBaseClass(); break; } if (type1 is NullType) { fRet = true; // We don't need to determine the actual best type since we're // returning true - indicating that we've found the best operator. typeCls = typeObj; goto LRecord; } if (!canCast(type1, type2, CONVERTTYPE.NOUDC) && !canCast(type2, type1, CONVERTTYPE.NOUDC)) return false; if (type1.isInterfaceType() || type1.isPredefType(PredefinedType.PT_STRING) || GetSymbolLoader().HasBaseConversion(type1, typeDel)) type1 = typeObj; else if (type1 is ArrayType) type1 = GetPredefindType(PredefinedType.PT_ARRAY); else if (!type1.isClassType()) return false; if (type2.isInterfaceType() || type2.isPredefType(PredefinedType.PT_STRING) || GetSymbolLoader().HasBaseConversion(type2, typeDel)) type2 = typeObj; else if (type2 is ArrayType) type2 = GetPredefindType(PredefinedType.PT_ARRAY); else if (!type2.isClassType()) return false; Debug.Assert(type1.isClassType() && !type1.isPredefType(PredefinedType.PT_STRING) && !type1.isPredefType(PredefinedType.PT_DELEGATE)); Debug.Assert(type2.isClassType() && !type2.isPredefType(PredefinedType.PT_STRING) && !type2.isPredefType(PredefinedType.PT_DELEGATE)); if (GetSymbolLoader().HasBaseConversion(type2, type1)) typeCls = type1; else if (GetSymbolLoader().HasBaseConversion(type1, type2)) typeCls = type2; LRecord: prgbofs.Add(new BinOpFullSig(typeCls, typeCls, BindRefCmpOp, OpSigFlags.None, LiftFlags.None, BinOpFuncKind.RefCmpOp)); return fRet; } /* Determine which BinOpSig is better for overload resolution. Better means: at least as good in all Params, and better in at least one param. Better w/r to a param means: 1) same type as argument 2) implicit conversion from this one's param type to the other's param type Because of user defined conversion operators this relation is not transitive. Returns negative if ibos1 is better, positive if ibos2 is better, 0 if neither. */ private int WhichBofsIsBetter(BinOpFullSig bofs1, BinOpFullSig bofs2, CType type1, CType type2) { BetterType bt1; BetterType bt2; if (bofs1.FPreDef() && bofs2.FPreDef()) { // Faster to compare predefs. bt1 = WhichTypeIsBetter(bofs1.pt1, bofs2.pt1, type1); bt2 = WhichTypeIsBetter(bofs1.pt2, bofs2.pt2, type2); } else { bt1 = WhichTypeIsBetter(bofs1.Type1(), bofs2.Type1(), type1); bt2 = WhichTypeIsBetter(bofs1.Type2(), bofs2.Type2(), type2); } int res = 0; switch (bt1) { default: VSFAIL("Shouldn't happen"); break; case BetterType.Same: case BetterType.Neither: break; case BetterType.Left: res--; break; case BetterType.Right: res++; break; } switch (bt2) { default: VSFAIL("Shouldn't happen"); break; case BetterType.Same: case BetterType.Neither: break; case BetterType.Left: res--; break; case BetterType.Right: res++; break; } return res; } ///////////////////////////////////////////////////////////////////////////////// // Bind a standard unary operator. Takes care of user defined operators, predefined operators // and lifting over nullable. private static bool CalculateExprAndUnaryOpKinds( OperatorKind op, bool bChecked, out /*out*/ ExpressionKind ek, out /*out*/ UnaOpKind uok, out /*out*/ EXPRFLAG flags) { flags = 0; ek = 0; uok = 0; switch (op) { case OperatorKind.OP_UPLUS: uok = UnaOpKind.Plus; ek = ExpressionKind.UnaryPlus; break; case OperatorKind.OP_NEG: if (bChecked) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } uok = UnaOpKind.Minus; ek = ExpressionKind.Negate; break; case OperatorKind.OP_BITNOT: uok = UnaOpKind.Tilde; ek = ExpressionKind.BitwiseNot; break; case OperatorKind.OP_LOGNOT: uok = UnaOpKind.Bang; ek = ExpressionKind.LogicalNot; break; case OperatorKind.OP_POSTINC: flags |= EXPRFLAG.EXF_ISPOSTOP; if (bChecked) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } uok = UnaOpKind.IncDec; ek = ExpressionKind.Add; break; case OperatorKind.OP_PREINC: if (bChecked) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } uok = UnaOpKind.IncDec; ek = ExpressionKind.Add; break; case OperatorKind.OP_POSTDEC: flags |= EXPRFLAG.EXF_ISPOSTOP; if (bChecked) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } uok = UnaOpKind.IncDec; ek = ExpressionKind.Subtract; break; case OperatorKind.OP_PREDEC: if (bChecked) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } uok = UnaOpKind.IncDec; ek = ExpressionKind.Subtract; break; default: VSFAIL("Bad op"); return false; } return true; } public Expr BindStandardUnaryOperator(OperatorKind op, Expr pArgument) { Debug.Assert(pArgument != null); ExpressionKind ek; UnaOpKind unaryOpKind; EXPRFLAG flags; if (pArgument.Type == null || !CalculateExprAndUnaryOpKinds( op, Context.CheckedNormal, out ek/*out*/, out unaryOpKind/*out*/, out flags/*out*/)) { return BadOperatorTypesError(ExpressionKind.UnaryOp, pArgument, null); } UnaOpMask unaryOpMask = (UnaOpMask)(1 << (int)unaryOpKind); CType type = pArgument.Type; List<UnaOpFullSig> pSignatures = new List<UnaOpFullSig>(); Expr pResult = null; UnaryOperatorSignatureFindResult eResultOfSignatureFind = PopulateSignatureList(pArgument, unaryOpKind, unaryOpMask, ek, flags, pSignatures, out pResult); // nBestSignature is a 0-based index. int nBestSignature = pSignatures.Count - 1; if (eResultOfSignatureFind == UnaryOperatorSignatureFindResult.Return) { Debug.Assert(pResult != null); return pResult; } else if (eResultOfSignatureFind != UnaryOperatorSignatureFindResult.Match) { // If we didn't find a best match while populating, try to find while doing // applicability testing. if (!FindApplicableSignatures( pArgument, unaryOpMask, pSignatures)) { if (pSignatures.Count == 0) { return BadOperatorTypesError(ek, pArgument, null); } nBestSignature = 0; // If we couldn't find exactly one, then we need to do some betterness testing. if (pSignatures.Count != 1) { // Determine which is best. for (int iuofs = 1; iuofs < pSignatures.Count; iuofs++) { if (nBestSignature < 0) { nBestSignature = iuofs; } else { int nT = WhichUofsIsBetter(pSignatures[nBestSignature], pSignatures[iuofs], type); if (nT == 0) { nBestSignature = -1; } else if (nT > 0) { nBestSignature = iuofs; } } } if (nBestSignature < 0) { // Ambiguous. throw AmbiguousOperatorError(ek, pArgument, null); } // Verify that our answer works. for (int iuofs = 0; iuofs < pSignatures.Count; iuofs++) { if (iuofs == nBestSignature) { continue; } if (WhichUofsIsBetter(pSignatures[nBestSignature], pSignatures[iuofs], type) >= 0) { throw AmbiguousOperatorError(ek, pArgument, null); } } } } else { nBestSignature = pSignatures.Count - 1; } } Debug.Assert(nBestSignature < pSignatures.Count); UnaOpFullSig uofs = pSignatures[nBestSignature]; if (uofs.pfn == null) { if (unaryOpKind == UnaOpKind.IncDec) { return BindIncOp(ek, flags, pArgument, uofs); } return BadOperatorTypesError(ek, pArgument, null); } if (uofs.isLifted()) { return BindLiftedStandardUnop(ek, flags, pArgument, uofs); } // Try the conversion - if it fails, do a cast without user defined casts. Expr arg = tryConvert(pArgument, uofs.GetType()); if (arg == null) { arg = mustCast(pArgument, uofs.GetType(), CONVERTTYPE.NOUDC); } return uofs.pfn(ek, flags, arg); } ///////////////////////////////////////////////////////////////////////////////// private UnaryOperatorSignatureFindResult PopulateSignatureList(Expr pArgument, UnaOpKind unaryOpKind, UnaOpMask unaryOpMask, ExpressionKind exprKind, EXPRFLAG flags, List<UnaOpFullSig> pSignatures, out Expr ppResult) { // We should have already checked argument != null and argument.type != null. Debug.Assert(pArgument != null); Debug.Assert(pArgument.Type != null); ppResult = null; CType pArgumentType = pArgument.Type; CType pRawType = pArgumentType.StripNubs(); PredefinedType ptRaw = pRawType.isPredefined() ? pRawType.getPredefType() : PredefinedType.PT_COUNT; // Find all applicable operator signatures. // First check for special ones (enum, ptr) and check for user defined ops. if (ptRaw > PredefinedType.PT_ULONG) { // Enum types are special in that they carry a set of "predefined" operators (~ and inc/dec). if (pRawType.isEnumType()) { if ((unaryOpMask & (UnaOpMask.Tilde | UnaOpMask.IncDec)) != 0) { // We have an exact match. LiftFlags liftFlags = LiftFlags.None; CType typeSig = pArgumentType; if (typeSig is NullableType nubTypeSig) { if (nubTypeSig.GetUnderlyingType() != pRawType) { typeSig = GetSymbolLoader().GetTypeManager().GetNullable(pRawType); } liftFlags = LiftFlags.Lift1; } if (unaryOpKind == UnaOpKind.Tilde) { pSignatures.Add(new UnaOpFullSig( typeSig.getAggregate().GetUnderlyingType(), BindEnumUnaOp, liftFlags, UnaOpFuncKind.EnumUnaOp)); } else { // For enums, we want to add the signature as the underlying type so that we'll // perform the conversions to and from the enum type. pSignatures.Add(new UnaOpFullSig( typeSig.getAggregate().GetUnderlyingType(), null, liftFlags, UnaOpFuncKind.None)); } return UnaryOperatorSignatureFindResult.Match; } } else if (unaryOpKind == UnaOpKind.IncDec) { // Check for pointers if (pArgumentType is PointerType) { pSignatures.Add(new UnaOpFullSig( pArgumentType, null, LiftFlags.None, UnaOpFuncKind.None)); return UnaryOperatorSignatureFindResult.Match; } // Check for user defined inc/dec ExprMultiGet exprGet = GetExprFactory().CreateMultiGet(0, pArgumentType, null); Expr exprVal = bindUDUnop((ExpressionKind)(exprKind - ExpressionKind.Add + ExpressionKind.Inc), exprGet); if (exprVal != null) { if (exprVal.Type != null && !(exprVal.Type is ErrorType) && exprVal.Type != pArgumentType) { exprVal = mustConvert(exprVal, pArgumentType); } Debug.Assert(pArgument != null); ExprMulti exprMulti = GetExprFactory().CreateMulti(EXPRFLAG.EXF_ASSGOP | flags, pArgumentType, pArgument, exprVal); exprGet.OptionalMulti = exprMulti; // Check whether Lvalue can be assigned. checkLvalue may return true // despite reporting an error. if (!checkLvalue(pArgument, CheckLvalueKind.Increment)) { // This seems like it can never be reached - exprVal is only valid if // we have a UDUnop, and in order for checkLValue to return false, either the // arg has to not be OK, in which case we shouldn't get here, or we have an // AnonMeth, Lambda, or Constant, all of which cannot have UDUnops defined for them. exprMulti.SetError(); } ppResult = exprMulti; return UnaryOperatorSignatureFindResult.Return; } // Try for a predefined increment operator. } else { // Check for user defined. Expr expr = bindUDUnop(exprKind, pArgument); if (expr != null) { ppResult = expr; return UnaryOperatorSignatureFindResult.Return; } } } return UnaryOperatorSignatureFindResult.Continue; } ///////////////////////////////////////////////////////////////////////////////// private bool FindApplicableSignatures( Expr pArgument, UnaOpMask unaryOpMask, List<UnaOpFullSig> pSignatures) { // All callers should already assert this to be the case. Debug.Assert(pArgument != null); Debug.Assert(pArgument.Type != null); long iuosMinLift = 0; CType pArgumentType = pArgument.Type; CType pRawType = pArgumentType.StripNubs(); PredefinedType pt = pArgumentType.isPredefined() ? pArgumentType.getPredefType() : PredefinedType.PT_COUNT; PredefinedType ptRaw = pRawType.isPredefined() ? pRawType.getPredefType() : PredefinedType.PT_COUNT; for (int index = 0; index < g_rguos.Length; index++) { UnaOpSig uos = g_rguos[index]; if ((uos.grfuom & unaryOpMask) == 0) { continue; } ConvKind cv = GetConvKind(pt, g_rguos[index].pt); CType typeSig = null; switch (cv) { default: VSFAIL("Shouldn't happen!"); continue; case ConvKind.None: continue; case ConvKind.Explicit: if (!pArgument.isCONSTANT_OK()) { continue; } if (canConvert(pArgument, typeSig = GetPredefindType(uos.pt))) { break; } if (index < iuosMinLift) { continue; } typeSig = GetSymbolLoader().GetTypeManager().GetNullable(typeSig); if (!canConvert(pArgument, typeSig)) { continue; } break; case ConvKind.Unknown: if (canConvert(pArgument, typeSig = GetPredefindType(uos.pt))) { break; } if (index < iuosMinLift) { continue; } typeSig = GetSymbolLoader().GetTypeManager().GetNullable(typeSig); if (!canConvert(pArgument, typeSig)) { continue; } break; case ConvKind.Implicit: break; case ConvKind.Identity: { UnaOpFullSig result = new UnaOpFullSig(this, uos); if (result.GetType() != null) { pSignatures.Add(result); return true; } } break; } if (typeSig is NullableType) { // Need to use a lifted signature. LiftFlags grflt = LiftFlags.None; switch (GetConvKind(ptRaw, uos.pt)) { default: grflt = grflt | LiftFlags.Convert1; break; case ConvKind.Implicit: case ConvKind.Identity: grflt = grflt | LiftFlags.Lift1; break; } pSignatures.Add(new UnaOpFullSig(typeSig, uos.pfn, grflt, uos.fnkind)); // NOTE: Can't skip any if we use the lifted signature because the // type might convert to int? and to long (but not to int) in which // case we should get an ambiguity. But we can skip the lifted ones.... iuosMinLift = index + uos.cuosSkip + 1; } else { // Record it as applicable and skip accordingly. UnaOpFullSig newResult = new UnaOpFullSig(this, uos); if (newResult.GetType() != null) { pSignatures.Add(newResult); } index += uos.cuosSkip; } } return false; } private ExprOperator BindLiftedStandardUnop(ExpressionKind ek, EXPRFLAG flags, Expr arg, UnaOpFullSig uofs) { NullableType type = uofs.GetType() as NullableType; Debug.Assert(arg?.Type != null); if (arg.Type is NullType) { return BadOperatorTypesError(ek, arg, null, type); } Expr pArgument = null; Expr nonLiftedArg = null; LiftArgument(arg, uofs.GetType(), uofs.Convert(), out pArgument, out nonLiftedArg); // Now call the function with the non lifted arguments to report errors. Expr nonLiftedResult = uofs.pfn(ek, flags, nonLiftedArg); ExprUnaryOp exprRes = GetExprFactory().CreateUnaryOp(ek, type, pArgument); mustCast(nonLiftedResult, type, 0); exprRes.Flags |= flags; Debug.Assert((exprRes.Flags & EXPRFLAG.EXF_LVALUE) == 0); return exprRes; } /* Determine which UnaOpSig is better for overload resolution. Returns negative if iuos1 is better, positive if iuos2 is better, 0 if neither. */ private int WhichUofsIsBetter(UnaOpFullSig uofs1, UnaOpFullSig uofs2, CType typeArg) { BetterType bt; if (uofs1.FPreDef() && uofs2.FPreDef()) { // Faster to compare predefs. bt = WhichTypeIsBetter(uofs1.pt, uofs2.pt, typeArg); } else { bt = WhichTypeIsBetter(uofs1.GetType(), uofs2.GetType(), typeArg); } switch (bt) { default: VSFAIL("Shouldn't happen"); return 0; case BetterType.Same: case BetterType.Neither: return 0; case BetterType.Left: return -1; case BetterType.Right: return +1; } } /* Handles standard binary integer based operators. */ private ExprOperator BindIntBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(arg1.Type.isPredefined() && arg2.Type.isPredefined() && arg1.Type.getPredefType() == arg2.Type.getPredefType()); return BindIntOp(ek, flags, arg1, arg2, arg1.Type.getPredefType()); } /* Handles standard unary integer based operators. */ private ExprOperator BindIntUnaOp(ExpressionKind ek, EXPRFLAG flags, Expr arg) { Debug.Assert(arg.Type.isPredefined()); return BindIntOp(ek, flags, arg, null, arg.Type.getPredefType()); } /* Handles standard binary floating point (float, double) based operators. */ private ExprOperator BindRealBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(arg1.Type.isPredefined() && arg2.Type.isPredefined() && arg1.Type.getPredefType() == arg2.Type.getPredefType()); return bindFloatOp(ek, flags, arg1, arg2); } /* Handles standard unary floating point (float, double) based operators. */ private ExprOperator BindRealUnaOp(ExpressionKind ek, EXPRFLAG flags, Expr arg) { Debug.Assert(arg.Type.isPredefined()); return bindFloatOp(ek, flags, arg, null); } /* Handles standard increment and decrement operators. */ private Expr BindIncOp(ExpressionKind ek, EXPRFLAG flags, Expr arg, UnaOpFullSig uofs) { Debug.Assert(ek == ExpressionKind.Add || ek == ExpressionKind.Subtract); if (!checkLvalue(arg, CheckLvalueKind.Increment)) { Expr rval = GetExprFactory().CreateBinop(ek, arg.Type, arg, null); rval.SetError(); return rval; } CType typeRaw = uofs.GetType().StripNubs(); FUNDTYPE ft = typeRaw.fundType(); if (ft == FUNDTYPE.FT_R8 || ft == FUNDTYPE.FT_R4) { flags &= ~EXPRFLAG.EXF_CHECKOVERFLOW; } if (uofs.isLifted()) { return BindLiftedIncOp(ek, flags, arg, uofs); } else { return BindNonliftedIncOp(ek, flags, arg, uofs); } } private Expr BindIncOpCore(ExpressionKind ek, EXPRFLAG flags, Expr exprVal, CType type) { Debug.Assert(ek == ExpressionKind.Add || ek == ExpressionKind.Subtract); ConstVal cv; Expr pExprResult = null; if (type.isEnumType() && type.fundType() > FUNDTYPE.FT_LASTINTEGRAL) { // This is an error case when enum derives from an illegal type. Just treat it as an int. type = GetPredefindType(PredefinedType.PT_INT); } FUNDTYPE ft = type.fundType(); CType typeTmp = type; switch (ft) { default: { Debug.Assert(type.isPredefType(PredefinedType.PT_DECIMAL)); ek = ek == ExpressionKind.Add ? ExpressionKind.DecimalInc : ExpressionKind.DecimalDec; PREDEFMETH predefMeth = ek == ExpressionKind.DecimalInc ? PREDEFMETH.PM_DECIMAL_OPINCREMENT : PREDEFMETH.PM_DECIMAL_OPDECREMENT; pExprResult = CreateUnaryOpForPredefMethodCall(ek, predefMeth, type, exprVal); } break; case FUNDTYPE.FT_PTR: cv = ConstVal.Get(1); pExprResult = BindPtrBinOp(ek, flags, exprVal, GetExprFactory().CreateConstant(GetPredefindType(PredefinedType.PT_INT), cv)); break; case FUNDTYPE.FT_I1: case FUNDTYPE.FT_I2: case FUNDTYPE.FT_U1: case FUNDTYPE.FT_U2: typeTmp = GetPredefindType(PredefinedType.PT_INT); cv = ConstVal.Get(1); pExprResult = LScalar(ek, flags, exprVal, type, cv, pExprResult, typeTmp); break; case FUNDTYPE.FT_I4: case FUNDTYPE.FT_U4: cv = ConstVal.Get(1); pExprResult = LScalar(ek, flags, exprVal, type, cv, pExprResult, typeTmp); break; case FUNDTYPE.FT_I8: case FUNDTYPE.FT_U8: cv = ConstVal.Get((long)1); pExprResult = LScalar(ek, flags, exprVal, type, cv, pExprResult, typeTmp); break; case FUNDTYPE.FT_R4: case FUNDTYPE.FT_R8: cv = ConstVal.Get(1.0); pExprResult = LScalar(ek, flags, exprVal, type, cv, pExprResult, typeTmp); break; } Debug.Assert(pExprResult != null); Debug.Assert(!(pExprResult.Type is NullableType)); return pExprResult; } private Expr LScalar(ExpressionKind ek, EXPRFLAG flags, Expr exprVal, CType type, ConstVal cv, Expr pExprResult, CType typeTmp) { CType typeOne = type; if (typeOne.isEnumType()) { typeOne = typeOne.underlyingEnumType(); } pExprResult = GetExprFactory().CreateBinop(ek, typeTmp, exprVal, GetExprFactory().CreateConstant(typeOne, cv)); pExprResult.Flags |= flags; if (typeTmp != type) { pExprResult = mustCast(pExprResult, type, CONVERTTYPE.NOUDC); } return pExprResult; } private ExprMulti BindNonliftedIncOp(ExpressionKind ek, EXPRFLAG flags, Expr arg, UnaOpFullSig uofs) { Debug.Assert(ek == ExpressionKind.Add || ek == ExpressionKind.Subtract); Debug.Assert(!uofs.isLifted()); Debug.Assert(arg != null); ExprMultiGet exprGet = GetExprFactory().CreateMultiGet(EXPRFLAG.EXF_ASSGOP, arg.Type, null); Expr exprVal = exprGet; CType type = uofs.GetType(); Debug.Assert(!(type is NullableType)); // These used to be converts, but we're making them casts now - this is because // we need to remove the ability to call inc(sbyte) etc for all types smaller than int. // Note however, that this will give us different error messages on compile time versus runtime // for checked increments. // // Also, we changed it so that we now generate the cast to and from enum for enum increments. exprVal = mustCast(exprVal, type); exprVal = BindIncOpCore(ek, flags, exprVal, type); Expr op = mustCast(exprVal, arg.Type, CONVERTTYPE.NOUDC); ExprMulti exprMulti = GetExprFactory().CreateMulti(EXPRFLAG.EXF_ASSGOP | flags, arg.Type, arg, op); exprGet.OptionalMulti = exprMulti; return exprMulti; } private ExprMulti BindLiftedIncOp(ExpressionKind ek, EXPRFLAG flags, Expr arg, UnaOpFullSig uofs) { Debug.Assert(ek == ExpressionKind.Add || ek == ExpressionKind.Subtract); Debug.Assert(uofs.isLifted()); NullableType type = uofs.GetType() as NullableType; Debug.Assert(arg != null); ExprMultiGet exprGet = GetExprFactory().CreateMultiGet(EXPRFLAG.EXF_ASSGOP, arg.Type, null); Expr exprVal = exprGet; Expr nonLiftedArg = exprVal; // We want to give the lifted argument as the binop, but use the non-lifted argument as the // argument of the call. //Debug.Assert(uofs.LiftArg() || type.IsValType()); nonLiftedArg = mustCast(nonLiftedArg, type.GetUnderlyingType()); Expr nonLiftedResult = BindIncOpCore(ek, flags, nonLiftedArg, type.GetUnderlyingType()); exprVal = mustCast(exprVal, type); ExprUnaryOp exprRes = GetExprFactory().CreateUnaryOp((ek == ExpressionKind.Add) ? ExpressionKind.Inc : ExpressionKind.Dec, arg.Type/* type */, exprVal); mustCast(mustCast(nonLiftedResult, type), arg.Type); exprRes.Flags |= flags; ExprMulti exprMulti = GetExprFactory().CreateMulti(EXPRFLAG.EXF_ASSGOP | flags, arg.Type, arg, exprRes); exprGet.OptionalMulti = exprMulti; return exprMulti; } /* Handles standard binary decimal based operators. This function is called twice by the EE for every binary operator it evaluates Here is how it works. */ private ExprBinOp BindDecBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(arg1.Type.isPredefType(PredefinedType.PT_DECIMAL) && arg2.Type.isPredefType(PredefinedType.PT_DECIMAL)); CType typeDec = GetPredefindType(PredefinedType.PT_DECIMAL); Debug.Assert(typeDec != null); CType typeRet; switch (ek) { default: VSFAIL("Bad kind"); typeRet = null; break; case ExpressionKind.Add: case ExpressionKind.Subtract: case ExpressionKind.Multiply: case ExpressionKind.Divide: case ExpressionKind.Modulo: typeRet = typeDec; break; case ExpressionKind.LessThan: case ExpressionKind.LessThanOrEqual: case ExpressionKind.GreaterThan: case ExpressionKind.GreaterThanOrEqual: case ExpressionKind.Eq: case ExpressionKind.NotEq: typeRet = GetPredefindType(PredefinedType.PT_BOOL); break; } return GetExprFactory().CreateBinop(ek, typeRet, arg1, arg2); } /* Handles standard unary decimal based operators. */ private ExprUnaryOp BindDecUnaOp(ExpressionKind ek, EXPRFLAG flags, Expr arg) { Debug.Assert(arg.Type.isPredefType(PredefinedType.PT_DECIMAL)); Debug.Assert(ek == ExpressionKind.Negate || ek == ExpressionKind.UnaryPlus); CType typeDec = GetPredefindType(PredefinedType.PT_DECIMAL); Debug.Assert(typeDec != null); if (ek == ExpressionKind.Negate) { PREDEFMETH predefMeth = PREDEFMETH.PM_DECIMAL_OPUNARYMINUS; return CreateUnaryOpForPredefMethodCall(ExpressionKind.DecimalNegate, predefMeth, typeDec, arg); } return GetExprFactory().CreateUnaryOp(ExpressionKind.UnaryPlus, typeDec, arg); } /* Handles string concatenation. */ private Expr BindStrBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(ek == ExpressionKind.Add); Debug.Assert(arg1.Type.isPredefType(PredefinedType.PT_STRING) || arg2.Type.isPredefType(PredefinedType.PT_STRING)); return bindStringConcat(arg1, arg2); } /* Bind a shift operator: <<, >>. These can have integer or long first operands, and second operand must be int. */ private ExprBinOp BindShiftOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(ek == ExpressionKind.LeftShirt || ek == ExpressionKind.RightShift); Debug.Assert(arg1.Type.isPredefined()); Debug.Assert(arg2.Type.isPredefType(PredefinedType.PT_INT)); PredefinedType ptOp = arg1.Type.getPredefType(); Debug.Assert(ptOp == PredefinedType.PT_INT || ptOp == PredefinedType.PT_UINT || ptOp == PredefinedType.PT_LONG || ptOp == PredefinedType.PT_ULONG); return GetExprFactory().CreateBinop(ek, arg1.Type, arg1, arg2); } /* Bind a bool binary operator: ==, !=, &&, ||, , |, ^. If both operands are constant, the result will be a constant also. */ private ExprBinOp BindBoolBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(arg1 != null); Debug.Assert(arg2 != null); Debug.Assert(arg1.Type.isPredefType(PredefinedType.PT_BOOL) || (arg1.Type is NullableType argNubType1 && argNubType1.GetUnderlyingType().isPredefType(PredefinedType.PT_BOOL))); Debug.Assert(arg2.Type.isPredefType(PredefinedType.PT_BOOL) || (arg2.Type is NullableType argNubType2 && argNubType2.GetUnderlyingType().isPredefType(PredefinedType.PT_BOOL))); return GetExprFactory().CreateBinop(ek, GetPredefindType(PredefinedType.PT_BOOL), arg1, arg2); } private ExprOperator BindBoolBitwiseOp(ExpressionKind ek, EXPRFLAG flags, Expr expr1, Expr expr2, BinOpFullSig bofs) { Debug.Assert(ek == ExpressionKind.BitwiseAnd || ek == ExpressionKind.BitwiseOr); Debug.Assert(expr1.Type.isPredefType(PredefinedType.PT_BOOL) || expr1.Type is NullableType expNubType1 && expNubType1.GetUnderlyingType().isPredefType(PredefinedType.PT_BOOL)); Debug.Assert(expr2.Type.isPredefType(PredefinedType.PT_BOOL) || expr2.Type is NullableType expNubType2 && expNubType2.GetUnderlyingType().isPredefType(PredefinedType.PT_BOOL)); if (expr1.Type is NullableType || expr2.Type is NullableType) { CType typeBool = GetPredefindType(PredefinedType.PT_BOOL); CType typeRes = GetSymbolLoader().GetTypeManager().GetNullable(typeBool); // Get the non-lifted result. Expr nonLiftedArg1 = CNullable.StripNullableConstructor(expr1); Expr nonLiftedArg2 = CNullable.StripNullableConstructor(expr2); Expr nonLiftedResult = null; if (!(nonLiftedArg1.Type is NullableType) && !(nonLiftedArg2.Type is NullableType)) { nonLiftedResult = BindBoolBinOp(ek, flags, nonLiftedArg1, nonLiftedArg2); } // Make the binop and set that its lifted. ExprBinOp exprRes = GetExprFactory().CreateBinop(ek, typeRes, expr1, expr2); if (nonLiftedResult != null) { // Bitwise operators can have null non-lifted results if we have a nub sym somewhere. mustCast(nonLiftedResult, typeRes, 0); } exprRes.IsLifted = true; exprRes.Flags |= flags; Debug.Assert((exprRes.Flags & EXPRFLAG.EXF_LVALUE) == 0); return exprRes; } return BindBoolBinOp(ek, flags, expr1, expr2); } private Expr BindLiftedBoolBitwiseOp(ExpressionKind ek, EXPRFLAG flags, Expr expr1, Expr expr2) { return null; } /* Handles boolean unary operator (!). */ private Expr BindBoolUnaOp(ExpressionKind ek, EXPRFLAG flags, Expr arg) { Debug.Assert(arg.Type.isPredefType(PredefinedType.PT_BOOL)); Debug.Assert(ek == ExpressionKind.LogicalNot); // Get the result type and operand type. CType typeBool = GetPredefindType(PredefinedType.PT_BOOL); // Determine if arg has a constant value. // Strip off EXPRKIND.EK_SEQUENCE for constant checking. Expr argConst = arg.GetConst(); if (argConst == null) return GetExprFactory().CreateUnaryOp(ExpressionKind.LogicalNot, typeBool, arg); return GetExprFactory().CreateConstant(typeBool, ConstVal.Get(((ExprConstant)argConst).Val.Int32Val == 0)); } /* Handles string equality. */ private ExprBinOp BindStrCmpOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(ek == ExpressionKind.Eq || ek == ExpressionKind.NotEq); Debug.Assert(arg1.Type.isPredefType(PredefinedType.PT_STRING) && arg2.Type.isPredefType(PredefinedType.PT_STRING)); // Get the predefined method for string comparison, and then stash it in the Expr so we can // transform it later. PREDEFMETH predefMeth = ek == ExpressionKind.Eq ? PREDEFMETH.PM_STRING_OPEQUALITY : PREDEFMETH.PM_STRING_OPINEQUALITY; ek = ek == ExpressionKind.Eq ? ExpressionKind.StringEq : ExpressionKind.StringNotEq; return CreateBinopForPredefMethodCall(ek, predefMeth, GetPredefindType(PredefinedType.PT_BOOL), arg1, arg2); } /* Handles reference equality operators. Type variables come through here. */ private ExprBinOp BindRefCmpOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(ek == ExpressionKind.Eq || ek == ExpressionKind.NotEq); // Must box type variables for the verifier. arg1 = mustConvert(arg1, GetPredefindType(PredefinedType.PT_OBJECT), CONVERTTYPE.NOUDC); arg2 = mustConvert(arg2, GetPredefindType(PredefinedType.PT_OBJECT), CONVERTTYPE.NOUDC); return GetExprFactory().CreateBinop(ek, GetPredefindType(PredefinedType.PT_BOOL), arg1, arg2); } /* Handles delegate binary operators. */ private Expr BindDelBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(ek == ExpressionKind.Add || ek == ExpressionKind.Subtract || ek == ExpressionKind.Eq || ek == ExpressionKind.NotEq); Debug.Assert(arg1.Type == arg2.Type && (arg1.Type.isDelegateType() || arg1.Type.isPredefType(PredefinedType.PT_DELEGATE))); PREDEFMETH predefMeth = (PREDEFMETH)0; CType RetType = null; switch (ek) { case ExpressionKind.Add: predefMeth = PREDEFMETH.PM_DELEGATE_COMBINE; RetType = arg1.Type; ek = ExpressionKind.DelegateAdd; break; case ExpressionKind.Subtract: predefMeth = PREDEFMETH.PM_DELEGATE_REMOVE; RetType = arg1.Type; ek = ExpressionKind.DelegateSubtract; break; case ExpressionKind.Eq: predefMeth = PREDEFMETH.PM_DELEGATE_OPEQUALITY; RetType = GetPredefindType(PredefinedType.PT_BOOL); ek = ExpressionKind.DelegateEq; break; case ExpressionKind.NotEq: predefMeth = PREDEFMETH.PM_DELEGATE_OPINEQUALITY; RetType = GetPredefindType(PredefinedType.PT_BOOL); ek = ExpressionKind.DelegateNotEq; break; } return CreateBinopForPredefMethodCall(ek, predefMeth, RetType, arg1, arg2); } /* Handles enum binary operators. */ private Expr BindEnumBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { AggregateType typeEnum = null; AggregateType typeDst = GetEnumBinOpType(ek, arg1.Type, arg2.Type, out typeEnum); Debug.Assert(typeEnum != null); PredefinedType ptOp; switch (typeEnum.fundType()) { default: // Promote all smaller types to int. ptOp = PredefinedType.PT_INT; break; case FUNDTYPE.FT_U4: ptOp = PredefinedType.PT_UINT; break; case FUNDTYPE.FT_I8: ptOp = PredefinedType.PT_LONG; break; case FUNDTYPE.FT_U8: ptOp = PredefinedType.PT_ULONG; break; } CType typeOp = GetPredefindType(ptOp); arg1 = mustCast(arg1, typeOp, CONVERTTYPE.NOUDC); arg2 = mustCast(arg2, typeOp, CONVERTTYPE.NOUDC); Expr exprRes = BindIntOp(ek, flags, arg1, arg2, ptOp); if (!exprRes.IsOK) { return exprRes; } if (exprRes.Type != typeDst) { Debug.Assert(!typeDst.isPredefType(PredefinedType.PT_BOOL)); exprRes = mustCast(exprRes, typeDst, CONVERTTYPE.NOUDC); } return exprRes; } private Expr BindLiftedEnumArithmeticBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { Debug.Assert(ek == ExpressionKind.Add || ek == ExpressionKind.Subtract); CType nonNullableType1 = arg1.Type is NullableType arg1NubType ? arg1NubType.UnderlyingType : arg1.Type; CType nonNullableType2 = arg2.Type is NullableType arg2NubType ? arg2NubType.UnderlyingType : arg2.Type; if (nonNullableType1 is NullType) { nonNullableType1 = nonNullableType2.underlyingEnumType(); } else if (nonNullableType2 is NullType) { nonNullableType2 = nonNullableType1.underlyingEnumType(); } NullableType typeDst = GetTypes().GetNullable(GetEnumBinOpType(ek, nonNullableType1, nonNullableType2, out AggregateType typeEnum)); Debug.Assert(typeEnum != null); PredefinedType ptOp; switch (typeEnum.fundType()) { default: // Promote all smaller types to int. ptOp = PredefinedType.PT_INT; break; case FUNDTYPE.FT_U4: ptOp = PredefinedType.PT_UINT; break; case FUNDTYPE.FT_I8: ptOp = PredefinedType.PT_LONG; break; case FUNDTYPE.FT_U8: ptOp = PredefinedType.PT_ULONG; break; } NullableType typeOp = GetTypes().GetNullable(GetPredefindType(ptOp)); arg1 = mustCast(arg1, typeOp, CONVERTTYPE.NOUDC); arg2 = mustCast(arg2, typeOp, CONVERTTYPE.NOUDC); ExprBinOp exprRes = GetExprFactory().CreateBinop(ek, typeOp, arg1, arg2); exprRes.IsLifted = true; exprRes.Flags |= flags; Debug.Assert((exprRes.Flags & EXPRFLAG.EXF_LVALUE) == 0); if (!exprRes.IsOK) { return exprRes; } if (exprRes.Type != typeDst) { return mustCast(exprRes, typeDst, CONVERTTYPE.NOUDC); } return exprRes; } /* Handles enum unary operator (~). */ private Expr BindEnumUnaOp(ExpressionKind ek, EXPRFLAG flags, Expr arg) { Debug.Assert(ek == ExpressionKind.BitwiseNot); Debug.Assert((ExprCast)arg != null); Debug.Assert(((ExprCast)arg).Argument.Type.isEnumType()); PredefinedType ptOp; CType typeEnum = ((ExprCast)arg).Argument.Type; switch (typeEnum.fundType()) { default: // Promote all smaller types to int. ptOp = PredefinedType.PT_INT; break; case FUNDTYPE.FT_U4: ptOp = PredefinedType.PT_UINT; break; case FUNDTYPE.FT_I8: ptOp = PredefinedType.PT_LONG; break; case FUNDTYPE.FT_U8: ptOp = PredefinedType.PT_ULONG; break; } CType typeOp = GetPredefindType(ptOp); arg = mustCast(arg, typeOp, CONVERTTYPE.NOUDC); Expr exprRes = BindIntOp(ek, flags, arg, null, ptOp); if (!exprRes.IsOK) { return exprRes; } return mustCastInUncheckedContext(exprRes, typeEnum, CONVERTTYPE.NOUDC); } /* Handles pointer binary operators (+ and -). */ private Expr BindPtrBinOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { return null; } /* Handles pointer comparison operators. */ private Expr BindPtrCmpOp(ExpressionKind ek, EXPRFLAG flags, Expr arg1, Expr arg2) { return null; } /* Given a binary operator EXPRKIND, get the BinOpKind and flags. */ private bool GetBinopKindAndFlags(ExpressionKind ek, out BinOpKind pBinopKind, out EXPRFLAG flags) { flags = 0; switch (ek) { case ExpressionKind.Add: if (Context.CheckedNormal) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } pBinopKind = BinOpKind.Add; break; case ExpressionKind.Subtract: if (Context.CheckedNormal) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } pBinopKind = BinOpKind.Sub; break; case ExpressionKind.Divide: case ExpressionKind.Modulo: // EXPRKIND.EK_DIV and EXPRKIND.EK_MOD need to be treated special for hasSideEffects, // hence the EXPRFLAG.EXF_ASSGOP. Yes, this is a hack. flags |= EXPRFLAG.EXF_ASSGOP; if (Context.CheckedNormal) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } pBinopKind = BinOpKind.Mul; break; case ExpressionKind.Multiply: if (Context.CheckedNormal) { flags |= EXPRFLAG.EXF_CHECKOVERFLOW; } pBinopKind = BinOpKind.Mul; break; case ExpressionKind.BitwiseAnd: case ExpressionKind.BitwiseOr: pBinopKind = BinOpKind.Bitwise; break; case ExpressionKind.BitwiseExclusiveOr: pBinopKind = BinOpKind.BitXor; break; case ExpressionKind.LeftShirt: case ExpressionKind.RightShift: pBinopKind = BinOpKind.Shift; break; case ExpressionKind.LogicalOr: case ExpressionKind.LogicalAnd: pBinopKind = BinOpKind.Logical; break; case ExpressionKind.LessThan: case ExpressionKind.LessThanOrEqual: case ExpressionKind.GreaterThan: case ExpressionKind.GreaterThanOrEqual: pBinopKind = BinOpKind.Compare; break; case ExpressionKind.Eq: case ExpressionKind.NotEq: pBinopKind = BinOpKind.Equal; break; default: VSFAIL("Bad ek"); pBinopKind = BinOpKind.Add; return false; } return true; } /* Convert an expression involving I4, U4, I8 or U8 operands. The operands are assumed to be already converted to the correct types. */ private ExprOperator BindIntOp(ExpressionKind kind, EXPRFLAG flags, Expr op1, Expr op2, PredefinedType ptOp) { //Debug.Assert(kind.isRelational() || kind.isArithmetic() || kind.isBitwise()); Debug.Assert(ptOp == PredefinedType.PT_INT || ptOp == PredefinedType.PT_UINT || ptOp == PredefinedType.PT_LONG || ptOp == PredefinedType.PT_ULONG); CType typeOp = GetPredefindType(ptOp); Debug.Assert(typeOp != null); Debug.Assert(op1 != null && op1.Type == typeOp); Debug.Assert(op2 == null || op2.Type == typeOp); Debug.Assert((op2 == null) == (kind == ExpressionKind.Negate || kind == ExpressionKind.UnaryPlus || kind == ExpressionKind.BitwiseNot)); if (kind == ExpressionKind.Negate) { return BindIntegerNeg(flags, op1, ptOp); } CType typeDest = kind.IsRelational() ? GetPredefindType(PredefinedType.PT_BOOL) : typeOp; ExprOperator exprRes = GetExprFactory().CreateOperator(kind, typeDest, op1, op2); exprRes.Flags |= flags; Debug.Assert((exprRes.Flags & EXPRFLAG.EXF_LVALUE) == 0); return exprRes; } private ExprOperator BindIntegerNeg(EXPRFLAG flags, Expr op, PredefinedType ptOp) { // 14.6.2 Unary minus operator // For an operation of the form -x, unary operator overload resolution (14.2.3) is applied to select // a specific operator implementation. The operand is converted to the parameter type of the selected // operator, and the type of the result is the return type of the operator. The predefined negation // operators are: // // Integer negation: // // int operator -(int x); // long operator -(long x); // // The result is computed by subtracting x from zero. In a checked context, if the value of x is the // smallest int or long (-2^31 or -2^63, respectively), a System.OverflowException is thrown. In an // unchecked context, if the value of x is the smallest int or long, the result is that same value // and the overflow is not reported. // // If the operand of the negation operator is of type uint, it is converted to type long, and the // type of the result is long. An exception is the rule that permits the int value -2147483648 (-2^31) // to be written as a decimal integer literal (9.4.4.2). // // Negation of ulong is an error: // // void operator -(ulong x); // // Selection of this operator by unary operator overload resolution (14.2.3) always results in a // compile-time error. Consequently, if the operand of the negation operator is of type ulong, a // compile-time error occurs. An exception is the rule that permits the long value // -9223372036854775808 (-2^63) to be written as a decimal integer literal (9.4.4.2). Debug.Assert(ptOp == PredefinedType.PT_INT || ptOp == PredefinedType.PT_UINT || ptOp == PredefinedType.PT_LONG || ptOp == PredefinedType.PT_ULONG); CType typeOp = GetPredefindType(ptOp); Debug.Assert(typeOp != null); Debug.Assert(op != null && op.Type == typeOp); if (ptOp == PredefinedType.PT_ULONG) { return BadOperatorTypesError(ExpressionKind.Negate, op, null); } if (ptOp == PredefinedType.PT_UINT && op.Type.fundType() == FUNDTYPE.FT_U4) { ExprClass exprObj = GetExprFactory().CreateClass(GetPredefindType(PredefinedType.PT_LONG)); op = mustConvertCore(op, exprObj, CONVERTTYPE.NOUDC); } ExprOperator exprRes = GetExprFactory().CreateNeg(flags, op); Debug.Assert(0 == (exprRes.Flags & EXPRFLAG.EXF_LVALUE)); return exprRes; } /* Bind an float/double operator: +, -, , /, %, <, >, <=, >=, ==, !=. If both operations are constants, the result will be a constant also. op2 can be null for a unary operator. The operands are assumed to be already converted to the correct type. */ private ExprOperator bindFloatOp(ExpressionKind kind, EXPRFLAG flags, Expr op1, Expr op2) { //Debug.Assert(kind.isRelational() || kind.isArithmetic()); Debug.Assert(op2 == null || op1.Type == op2.Type); Debug.Assert(op1.Type.isPredefType(PredefinedType.PT_FLOAT) || op1.Type.isPredefType(PredefinedType.PT_DOUBLE)); // Allocate the result expression. CType typeDest = kind.IsRelational() ? GetPredefindType(PredefinedType.PT_BOOL) : op1.Type; ExprOperator exprRes = GetExprFactory().CreateOperator(kind, typeDest, op1, op2); flags = ~EXPRFLAG.EXF_CHECKOVERFLOW; exprRes.Flags &= flags; return exprRes; } private ExprConcat bindStringConcat(Expr op1, Expr op2) { // If the concatenation consists solely of two constants then we must // realize the concatenation into a single constant node at this time. // Why? Because we have to know whether // // string x = "c" + "d"; // // is legal or not. We also need to be able to determine during flow // checking that // // switch("a" + "b"){ case "a": ++foo; break; } // // contains unreachable code. // // However we can defer further merging of concatenation trees until // the optimization pass after flow checking. Debug.Assert(op1 != null); Debug.Assert(op2 != null); return GetExprFactory().CreateConcat(op1, op2); } /* Report an ambiguous operator types error. */ private RuntimeBinderException AmbiguousOperatorError(ExpressionKind ek, Expr op1, Expr op2) { Debug.Assert(op1 != null); // This is exactly the same "hack" that BadOperatorError uses. The first operand contains the // name of the operator in its errorString. string strOp = op1.ErrorString; // Bad arg types - report error to user. return op2 != null ? GetErrorContext().Error(ErrorCode.ERR_AmbigBinaryOps, strOp, op1.Type, op2.Type) : GetErrorContext().Error(ErrorCode.ERR_AmbigUnaryOp, strOp, op1.Type); } private Expr BindUserBoolOp(ExpressionKind kind, ExprCall pCall) { Debug.Assert(pCall != null); Debug.Assert(pCall.MethWithInst.Meth() != null); Debug.Assert(pCall.OptionalArguments != null); Debug.Assert(kind == ExpressionKind.LogicalAnd || kind == ExpressionKind.LogicalOr); CType typeRet = pCall.Type; Debug.Assert(pCall.MethWithInst.Meth().Params.Count == 2); if (!GetTypes().SubstEqualTypes(typeRet, pCall.MethWithInst.Meth().Params[0], typeRet) || !GetTypes().SubstEqualTypes(typeRet, pCall.MethWithInst.Meth().Params[1], typeRet)) { throw GetErrorContext().Error(ErrorCode.ERR_BadBoolOp, pCall.MethWithInst); } ExprList list = (ExprList)pCall.OptionalArguments; Debug.Assert(list != null); Expr pExpr = list.OptionalElement; ExprWrap pExprWrap = WrapShortLivedExpression(pExpr); list.OptionalElement = pExprWrap; // Reflection load the true and false methods. SymbolLoader.RuntimeBinderSymbolTable.PopulateSymbolTableWithName(SpecialNames.CLR_True, null, pExprWrap.Type.AssociatedSystemType); SymbolLoader.RuntimeBinderSymbolTable.PopulateSymbolTableWithName(SpecialNames.CLR_False, null, pExprWrap.Type.AssociatedSystemType); Expr pCallT = bindUDUnop(ExpressionKind.True, pExprWrap); Expr pCallF = bindUDUnop(ExpressionKind.False, pExprWrap); if (pCallT == null || pCallF == null) { throw GetErrorContext().Error(ErrorCode.ERR_MustHaveOpTF, typeRet); } pCallT = mustConvert(pCallT, GetPredefindType(PredefinedType.PT_BOOL)); pCallF = mustConvert(pCallF, GetPredefindType(PredefinedType.PT_BOOL)); return GetExprFactory().CreateUserLogOp(typeRet, kind == ExpressionKind.LogicalAnd ? pCallF : pCallT, pCall); } private AggregateType GetUserDefinedBinopArgumentType(CType type) { for (; ;) { switch (type.GetTypeKind()) { case TypeKind.TK_NullableType: type = type.StripNubs(); break; case TypeKind.TK_TypeParameterType: type = (type as TypeParameterType).GetEffectiveBaseClass(); break; case TypeKind.TK_AggregateType: AggregateType ats = (AggregateType)type; if ((ats.isClassType() || ats.isStructType()) && !ats.getAggregate().IsSkipUDOps()) { return ats; } return null; default: return null; } } } private int GetUserDefinedBinopArgumentTypes(CType type1, CType type2, AggregateType[] rgats) { int cats = 0; rgats[0] = GetUserDefinedBinopArgumentType(type1); if (rgats[0] != null) { ++cats; } rgats[cats] = GetUserDefinedBinopArgumentType(type2); if (rgats[cats] != null) { ++cats; } if (cats == 2 && rgats[0] == rgats[1]) { // Common case: they're the same. cats = 1; } return cats; } private bool UserDefinedBinaryOperatorCanBeLifted(ExpressionKind ek, MethodSymbol method, AggregateType ats, TypeArray Params) { if (!Params[0].IsNonNubValType()) { return false; } if (!Params[1].IsNonNubValType()) { return false; } CType typeRet = GetTypes().SubstType(method.RetType, ats); if (!typeRet.IsNonNubValType()) { return false; } switch (ek) { case ExpressionKind.Eq: case ExpressionKind.NotEq: if (!typeRet.isPredefType(PredefinedType.PT_BOOL)) { return false; } if (Params[0] != Params[1]) { return false; } return true; case ExpressionKind.GreaterThan: case ExpressionKind.GreaterThanOrEqual: case ExpressionKind.LessThan: case ExpressionKind.LessThanOrEqual: if (!typeRet.isPredefType(PredefinedType.PT_BOOL)) { return false; } return true; default: return true; } } // If the operator is applicable in either its regular or lifted forms, // add it to the candidate set and return true, otherwise return false. private bool UserDefinedBinaryOperatorIsApplicable(List<CandidateFunctionMember> candidateList, ExpressionKind ek, MethodSymbol method, AggregateType ats, Expr arg1, Expr arg2, bool fDontLift) { if (!method.isOperator || method.Params.Count != 2) { return false; } Debug.Assert(method.typeVars.Count == 0); TypeArray paramsCur = GetTypes().SubstTypeArray(method.Params, ats); if (canConvert(arg1, paramsCur[0]) && canConvert(arg2, paramsCur[1])) { candidateList.Add(new CandidateFunctionMember( new MethPropWithInst(method, ats, BSYMMGR.EmptyTypeArray()), paramsCur, 0, // No lifted arguments false)); return true; } if (fDontLift || !UserDefinedBinaryOperatorCanBeLifted(ek, method, ats, paramsCur)) { return false; } CType[] rgtype = new CType[2]; rgtype[0] = GetTypes().GetNullable(paramsCur[0]); rgtype[1] = GetTypes().GetNullable(paramsCur[1]); if (!canConvert(arg1, rgtype[0]) || !canConvert(arg2, rgtype[1])) { return false; } candidateList.Add(new CandidateFunctionMember( new MethPropWithInst(method, ats, BSYMMGR.EmptyTypeArray()), GetGlobalSymbols().AllocParams(2, rgtype), 2, // two lifted arguments false)); return true; } private bool GetApplicableUserDefinedBinaryOperatorCandidates( List<CandidateFunctionMember> candidateList, ExpressionKind ek, AggregateType type, Expr arg1, Expr arg2, bool fDontLift) { Name name = ekName(ek); Debug.Assert(name != null); bool foundSome = false; for (MethodSymbol methCur = GetSymbolLoader().LookupAggMember(name, type.getAggregate(), symbmask_t.MASK_MethodSymbol) as MethodSymbol; methCur != null; methCur = GetSymbolLoader().LookupNextSym(methCur, type.getAggregate(), symbmask_t.MASK_MethodSymbol) as MethodSymbol) { if (UserDefinedBinaryOperatorIsApplicable(candidateList, ek, methCur, type, arg1, arg2, fDontLift)) { foundSome = true; } } return foundSome; } private AggregateType GetApplicableUserDefinedBinaryOperatorCandidatesInBaseTypes( List<CandidateFunctionMember> candidateList, ExpressionKind ek, AggregateType type, Expr arg1, Expr arg2, bool fDontLift, AggregateType atsStop) { for (AggregateType atsCur = type; atsCur != null && atsCur != atsStop; atsCur = atsCur.GetBaseClass()) { if (GetApplicableUserDefinedBinaryOperatorCandidates(candidateList, ek, atsCur, arg1, arg2, fDontLift)) { return atsCur; } } return null; } private ExprCall BindUDBinop(ExpressionKind ek, Expr arg1, Expr arg2, bool fDontLift, out MethPropWithInst ppmpwi) { List<CandidateFunctionMember> methFirst = new List<CandidateFunctionMember>(); ppmpwi = null; AggregateType[] rgats = { null, null }; int cats = GetUserDefinedBinopArgumentTypes(arg1.Type, arg2.Type, rgats); if (cats == 0) { return null; } else if (cats == 1) { GetApplicableUserDefinedBinaryOperatorCandidatesInBaseTypes(methFirst, ek, rgats[0], arg1, arg2, fDontLift, null); } else { Debug.Assert(cats == 2); AggregateType atsStop = GetApplicableUserDefinedBinaryOperatorCandidatesInBaseTypes(methFirst, ek, rgats[0], arg1, arg2, fDontLift, null); GetApplicableUserDefinedBinaryOperatorCandidatesInBaseTypes(methFirst, ek, rgats[1], arg1, arg2, fDontLift, atsStop); } if (methFirst.IsEmpty()) { return null; } ExprList args = GetExprFactory().CreateList(arg1, arg2); ArgInfos info = new ArgInfos(); info.carg = 2; FillInArgInfoFromArgList(info, args); CandidateFunctionMember pmethAmbig1; CandidateFunctionMember pmethAmbig2; CandidateFunctionMember pmethBest = FindBestMethod(methFirst, null, info, out pmethAmbig1, out pmethAmbig2); if (pmethBest == null) { // No winner, so its an ambiguous call... throw GetErrorContext().Error(ErrorCode.ERR_AmbigCall, pmethAmbig1.mpwi, pmethAmbig2.mpwi); } ppmpwi = pmethBest.mpwi; if (pmethBest.ctypeLift != 0) { Debug.Assert(pmethBest.ctypeLift == 2); return BindLiftedUDBinop(ek, arg1, arg2, pmethBest.@params, pmethBest.mpwi); } CType typeRetRaw = GetTypes().SubstType(pmethBest.mpwi.Meth().RetType, pmethBest.mpwi.GetType()); return BindUDBinopCall(arg1, arg2, pmethBest.@params, typeRetRaw, pmethBest.mpwi); } private ExprCall BindUDBinopCall(Expr arg1, Expr arg2, TypeArray Params, CType typeRet, MethPropWithInst mpwi) { arg1 = mustConvert(arg1, Params[0]); arg2 = mustConvert(arg2, Params[1]); ExprList args = GetExprFactory().CreateList(arg1, arg2); checkUnsafe(arg1.Type); // added to the binder so we don't bind to pointer ops checkUnsafe(arg2.Type); // added to the binder so we don't bind to pointer ops checkUnsafe(typeRet); // added to the binder so we don't bind to pointer ops ExprMemberGroup pMemGroup = GetExprFactory().CreateMemGroup(null, mpwi); ExprCall call = GetExprFactory().CreateCall(0, typeRet, args, pMemGroup, null); call.MethWithInst = new MethWithInst(mpwi); verifyMethodArgs(call, mpwi.GetType()); return call; } private ExprCall BindLiftedUDBinop(ExpressionKind ek, Expr arg1, Expr arg2, TypeArray Params, MethPropWithInst mpwi) { Expr exprVal1 = arg1; Expr exprVal2 = arg2; CType typeRet; CType typeRetRaw = GetTypes().SubstType(mpwi.Meth().RetType, mpwi.GetType()); // This is a lifted user defined operator. We know that both arguments // go to the nullable formal parameter types, and that at least one // of the arguments does not go to the non-nullable formal parameter type. // (If both went to the non-nullable types then we would not be lifting.) // We also know that the non-nullable type of the argument goes to the // non-nullable type of formal parameter. However, if it does so only via // a user-defined conversion then we should bind the conversion from the // argument to the nullable formal parameter type first, before we then // do the cast for the non-nullable call. TypeArray paramsRaw = GetTypes().SubstTypeArray(mpwi.Meth().Params, mpwi.GetType()); Debug.Assert(Params != paramsRaw); Debug.Assert(paramsRaw[0] == Params[0].GetBaseOrParameterOrElementType()); Debug.Assert(paramsRaw[1] == Params[1].GetBaseOrParameterOrElementType()); if (!canConvert(arg1.Type.StripNubs(), paramsRaw[0], CONVERTTYPE.NOUDC)) { exprVal1 = mustConvert(arg1, Params[0]); } if (!canConvert(arg2.Type.StripNubs(), paramsRaw[1], CONVERTTYPE.NOUDC)) { exprVal2 = mustConvert(arg2, Params[1]); } Expr nonLiftedArg1 = mustCast(exprVal1, paramsRaw[0]); Expr nonLiftedArg2 = mustCast(exprVal2, paramsRaw[1]); switch (ek) { default: typeRet = GetTypes().GetNullable(typeRetRaw); break; case ExpressionKind.Eq: case ExpressionKind.NotEq: Debug.Assert(paramsRaw[0] == paramsRaw[1]); Debug.Assert(typeRetRaw.isPredefType(PredefinedType.PT_BOOL)); // These ones don't lift the return type. Instead, if either side is null, the result is false. typeRet = typeRetRaw; break; case ExpressionKind.GreaterThan: case ExpressionKind.GreaterThanOrEqual: case ExpressionKind.LessThan: case ExpressionKind.LessThanOrEqual: Debug.Assert(typeRetRaw.isPredefType(PredefinedType.PT_BOOL)); // These ones don't lift the return type. Instead, if either side is null, the result is false. typeRet = typeRetRaw; break; } // Now get the result for the pre-lifted call. Debug.Assert(!(ek == ExpressionKind.Eq || ek == ExpressionKind.NotEq) || nonLiftedArg1.Type == nonLiftedArg2.Type); ExprCall nonLiftedResult = BindUDBinopCall(nonLiftedArg1, nonLiftedArg2, paramsRaw, typeRetRaw, mpwi); ExprList args = GetExprFactory().CreateList(exprVal1, exprVal2); ExprMemberGroup pMemGroup = GetExprFactory().CreateMemGroup(null, mpwi); ExprCall call = GetExprFactory().CreateCall(0, typeRet, args, pMemGroup, null); call.MethWithInst = new MethWithInst(mpwi); switch (ek) { case ExpressionKind.Eq: call.NullableCallLiftKind = NullableCallLiftKind.EqualityOperator; break; case ExpressionKind.NotEq: call.NullableCallLiftKind = NullableCallLiftKind.InequalityOperator; break; default: call.NullableCallLiftKind = NullableCallLiftKind.Operator; break; } call.CastOfNonLiftedResultToLiftedType = mustCast(nonLiftedResult, typeRet, 0); return call; } private AggregateType GetEnumBinOpType(ExpressionKind ek, CType argType1, CType argType2, out AggregateType ppEnumType) { Debug.Assert(argType1.isEnumType() || argType2.isEnumType()); AggregateType type1 = argType1 as AggregateType; AggregateType type2 = argType2 as AggregateType; AggregateType typeEnum = type1.isEnumType() ? type1 : type2; Debug.Assert(type1 == typeEnum || type1 == typeEnum.underlyingEnumType()); Debug.Assert(type2 == typeEnum || type2 == typeEnum.underlyingEnumType()); AggregateType typeDst = typeEnum; switch (ek) { case ExpressionKind.BitwiseAnd: case ExpressionKind.BitwiseOr: case ExpressionKind.BitwiseExclusiveOr: Debug.Assert(type1 == type2); break; case ExpressionKind.Add: Debug.Assert(type1 != type2); break; case ExpressionKind.Subtract: if (type1 == type2) typeDst = typeEnum.underlyingEnumType(); break; default: Debug.Assert(ek.IsRelational()); typeDst = GetPredefindType(PredefinedType.PT_BOOL); break; } ppEnumType = typeEnum; return typeDst; } private ExprBinOp CreateBinopForPredefMethodCall(ExpressionKind ek, PREDEFMETH predefMeth, CType RetType, Expr arg1, Expr arg2) { MethodSymbol methSym = GetSymbolLoader().getPredefinedMembers().GetMethod(predefMeth); ExprBinOp binop = GetExprFactory().CreateBinop(ek, RetType, arg1, arg2); // Set the predefined method to call. if (methSym != null) { AggregateSymbol agg = methSym.getClass(); AggregateType callingType = GetTypes().GetAggregate(agg, BSYMMGR.EmptyTypeArray()); binop.PredefinedMethodToCall = new MethWithInst(methSym, callingType, null); binop.UserDefinedCallMethod = binop.PredefinedMethodToCall; } else { // Couldn't find it. binop.SetError(); } return binop; } private ExprUnaryOp CreateUnaryOpForPredefMethodCall(ExpressionKind ek, PREDEFMETH predefMeth, CType pRetType, Expr pArg) { MethodSymbol methSym = GetSymbolLoader().getPredefinedMembers().GetMethod(predefMeth); ExprUnaryOp pUnaryOp = GetExprFactory().CreateUnaryOp(ek, pRetType, pArg); // Set the predefined method to call. if (methSym != null) { AggregateSymbol pAgg = methSym.getClass(); AggregateType pCallingType = GetTypes().GetAggregate(pAgg, BSYMMGR.EmptyTypeArray()); pUnaryOp.PredefinedMethodToCall = new MethWithInst(methSym, pCallingType, null); pUnaryOp.UserDefinedCallMethod = pUnaryOp.PredefinedMethodToCall; } else { pUnaryOp.SetError(); } return pUnaryOp; } } }
{'content_hash': 'f10dc4f14923bc330f05a7c80d74b963', 'timestamp': '', 'source': 'github', 'line_count': 2920, 'max_line_length': 224, 'avg_line_length': 41.06335616438356, 'alnum_prop': 0.5004295066927985, 'repo_name': 'nchikanov/corefx', 'id': '5d250aab53f5d22871c954238a7b034eda44de96', 'size': '119905', 'binary': False, 'copies': '2', 'ref': 'refs/heads/master', 'path': 'src/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/Semantics/Operators.cs', 'mode': '33188', 'license': 'mit', 'language': [{'name': '1C Enterprise', 'bytes': '327222'}, {'name': 'ASP', 'bytes': '1687'}, {'name': 'Batchfile', 'bytes': '24745'}, {'name': 'C', 'bytes': '1113348'}, {'name': 'C#', 'bytes': '141101806'}, {'name': 'C++', 'bytes': '696240'}, {'name': 'CMake', 'bytes': '63626'}, {'name': 'DIGITAL Command Language', 'bytes': '26402'}, {'name': 'Groovy', 'bytes': '41755'}, {'name': 'HTML', 'bytes': '653'}, {'name': 'Makefile', 'bytes': '9085'}, {'name': 'Objective-C', 'bytes': '9948'}, {'name': 'OpenEdge ABL', 'bytes': '139178'}, {'name': 'Perl', 'bytes': '3895'}, {'name': 'PowerShell', 'bytes': '43073'}, {'name': 'Python', 'bytes': '1535'}, {'name': 'Roff', 'bytes': '4236'}, {'name': 'Shell', 'bytes': '73311'}, {'name': 'Visual Basic', 'bytes': '827108'}, {'name': 'XSLT', 'bytes': '462346'}]}