content
stringlengths
10
4.9M
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.master.backup; import alluxio.exception.AlluxioException; import alluxio.grpc.BackupPRequest; import alluxio.grpc.BackupStatusPRequest; import alluxio.master.StateLockOptions; import alluxio.wire.BackupStatus; /** * Interface for backup operations. */ public interface BackupOps { /** * Takes a backup. * * Note: If backup request ask for async execution, it will return after initiating the backup. * Status for the in-progress backup will be returned. {@link #getBackupStatus} should be called * for querying the status of the on-going backup. * * Note: When leader has no standby in an HA cluster, it will reject backup. This could be * allowed by passing "AllowLeader" option in the request. * * @param request the backup request * @param stateLockOptions the state lock options during the backup * @return the backup status response * @throws AlluxioException if backup fails */ BackupStatus backup(BackupPRequest request, StateLockOptions stateLockOptions) throws AlluxioException; /** * Used to query the status of a backup. * * @param statusPRequest status request * @return the status of the latest backup * @throws AlluxioException if request fails */ BackupStatus getBackupStatus(BackupStatusPRequest statusPRequest) throws AlluxioException; }
<filename>common/dao-api/src/main/java/org/thingsboard/server/dao/user/UserService.java<gh_stars>1-10 package org.thingsboard.server.dao.user; import com.google.common.util.concurrent.ListenableFuture; import org.thingsboard.server.common.data.User; import org.thingsboard.server.common.data.id.CustomerId; import org.thingsboard.server.common.data.id.TenantId; import org.thingsboard.server.common.data.id.UserCredentialsId; import org.thingsboard.server.common.data.id.UserId; import org.thingsboard.server.common.data.page.PageData; import org.thingsboard.server.common.data.page.PageLink; import org.thingsboard.server.common.data.security.UserCredentials; public interface UserService { User findUserById(TenantId tenantId, UserId userId); ListenableFuture<User> findUserByIdAsync(TenantId tenantId, UserId userId); User findUserByEmail(TenantId tenantId, String email); User saveUser(User user); UserCredentials findUserCredentialsByUserId(TenantId tenantId, UserId userId); UserCredentials findUserCredentialsByActivateToken(TenantId tenantId, String activateToken); UserCredentials findUserCredentialsByResetToken(TenantId tenantId, String resetToken); UserCredentials saveUserCredentials(TenantId tenantId, UserCredentials userCredentials); UserCredentials activateUserCredentials(TenantId tenantId, String activateToken, String password); UserCredentials requestPasswordReset(TenantId tenantId, String email); UserCredentials requestExpiredPasswordReset(TenantId tenantId, UserCredentialsId userCredentialsId); UserCredentials replaceUserCredentials(TenantId tenantId, UserCredentials userCredentials); void deleteUser(TenantId tenantId, UserId userId); PageData<User> findUsersByTenantId(TenantId tenantId, PageLink pageLink); PageData<User> findTenantAdmins(TenantId tenantId, PageLink pageLink); void deleteTenantAdmins(TenantId tenantId); PageData<User> findCustomerUsers(TenantId tenantId, CustomerId customerId, PageLink pageLink); void deleteCustomerUsers(TenantId tenantId, CustomerId customerId); void setUserCredentialsEnabled(TenantId tenantId, UserId userId, boolean enabled); void onUserLoginSuccessful(TenantId tenantId, UserId userId); int onUserLoginIncorrectCredentials(TenantId tenantId, UserId userId); }
/** * Checks the behavior of {@link FailedPublisherStageFactory} when running on the Vert.x Context. * * @author <a href="http://escoffier.me">Clement Escoffier</a> */ public class FailedPublisherStageFactoryTest extends StageTestBase { @Test public void createWithErrorFromVertxContext() { Exception failure = new Exception("Boom"); Callable<CompletionStage<Optional<Integer>>> callable = () -> ReactiveStreams.fromPublisher(Flowable.just(1) .observeOn(Schedulers.newThread())) .<Integer> flatMap(x -> ReactiveStreams.failed(failure)) .to(ReactiveStreams.<Integer> builder().findFirst()).run(); executeOnEventLoop(callable).assertFailure("Boom"); } }
// ProcessAddress is the entrypoint for the StructProcessor func (gp StructProcessor) ProcessAddress(algodData, indexerData []byte) (Result, error) { var indexerResponse generated.AccountResponse err := json.Unmarshal(indexerData, &indexerResponse) if err != nil { return Result{}, fmt.Errorf("unable to parse indexer data: %v", err) } indexerAcct := indexerResponse.Account var algodAcct generated.Account err = json.Unmarshal(algodData, &algodAcct) if err != nil { return Result{}, fmt.Errorf("unable to parse algod data: %v", err) } differences := equals(indexerAcct, algodAcct) if len(differences) > 0 { return Result{ Equal: false, Retries: 0, Details: &ErrorDetails{ algod: mustEncode(algodAcct), indexer: mustEncode(indexerAcct), diff: differences, }, }, nil } return Result{Equal: true}, nil }
import { Fragment } from 'react'; import { Member } from '../../models'; interface IMembershipProfileProps { member: Member; } const MembershipProfile: React.FC<IMembershipProfileProps> = ({ member }) => { const details = [ { key: '<NAME>', value: member['<NAME>'] }, { key: 'Gender', value: member['Gender'] }, { key: 'Email Address', value: member['Email'] }, { key: 'Contact Number', value: member['Contact Number'] }, { key: 'Home Number', value: member['Home Number'] }, { key: 'Current School', value: member['Current School'] }, { key: 'Graduating Class', value: member['Graduating Class'] }, { key: 'Graduating Year', value: member['Graduating Year'] }, ]; return ( <Fragment> {details.map((data) => ( <div key={data['key']} className='row'> <div className='col-sm-12 col-md-6 col-lg-6 mb-2 mb-md-0 mb-lg-0'> <h6>{data['key']}</h6> </div> <div className='col-sm-12 col-md-6 col-lg-6 mb-2 mb-md-0 mb-lg-0'> <h5 className='w-100'>{data['value'] || '-'}</h5> </div> </div> ))} </Fragment> ); }; export default MembershipProfile;
// Unzip unzips the input byte slice. func Unzip(in []byte) ([]byte, error) { inReader := bytes.NewReader(in) gzipReader, err := gzip.NewReader(inReader) if err != nil { return nil, err } defer gzipReader.Close() var result bytes.Buffer _, err = result.ReadFrom(gzipReader) if err != nil { return nil, err } err = gzipReader.Close() if err != nil { return nil, err } return result.Bytes(), nil }
<reponame>ebegen/Dunner import numpy as np import pandas as pd import swifter class DataCleaner(): def _getDuplicateColumns(self, df,verbose=False): groups = df.columns.to_series().groupby(df.dtypes).groups duplicated_columns = [] for dtype, col_names in groups.items(): column_values = df[col_names] num_columns = len(col_names) for i in range(num_columns): column_i = column_values.iloc[:,i].values for j in range(i + 1, num_columns): column_j = column_values.iloc[:,j].values if np.array_equal(column_i, column_j): if verbose: print("column {} is a duplicate of column {}".format(col_names[i], col_names[j])) duplicated_columns.append(col_names[i]) break return duplicated_columns def remove_columns(self, df, columns=None): ''' Removes selected columns from selected dataframe :param df: Dataframe which was removed selected columns :param columns: The columns that were removed from selected Dataframe :return: dataframe: Dataframe that we removed unnecessary columns ''' if columns != None: if type(df) == type(pd.DataFrame()): df.drop(columns, axis=1, inplace=True) return df else: raise TypeError('df parameter must be Pandas Dataframe') else: return "Columns name are empty!!" def remove_rows_by_condition(self,df, dict): ''' Filter selected DataFrame by given dictionary key('columnName') and value('condition') :param df: Pandas Dataframe which will be filtered :param dict: Dictionary which has column name and condition :return: New filtered dataframe ''' if dict != None: if type(df) == type(pd.DataFrame()): new_df = df.copy() for column, condition in dict.items(): f = lambda x: eval(condition) new_df = new_df.loc[new_df[column].swifter.apply(f)] return new_df def remove_outliers(self, df, low,high): low = low high = high quant_df = df.quantile([low, high]) for name in list(df.columns): if pd.api.types.is_numeric_dtype(df[name]): df = df[(df[name] > quant_df.loc[low, name]) & (df[name] < quant_df.loc[high, name])] return df #TODO remove unique columns def remove_unique_columns(self,df): for i in df.columns: if len(df[i].unique()) == 1: df = df.drop(i, axis=1) return df #TODO remove duplicate columns def remove_duplicates(self, df): df = df.drop_duplicates() df = df.drop(columns=self._getDuplicateColumns(df)) return df
class InstanceManager: """ The static (singleton) hero instance manager. We avoid using the defualt Malmo instance management because it only allows one host. """ MINECRAFT_DIR = os.path.join("/minerl.herobraine", "scripts") MC_COMMAND = os.path.join(MINECRAFT_DIR, 'launchHero.sh') MAXINSTANCES = 10 DEFAULT_IP = "127.0.0.1" _instance_pool = [] X11_DIR = '/tmp/.X11-unix' headless = False managed = False @classmethod @contextmanager def get_instance(cls): """ Gets an instance :return: The available instances port and IP. """ # Find an available instance. for inst in cls._instance_pool: if not inst.locked: inst._acquire_lock() yield inst inst.release_lock() return # Otherwise make a new instance if possible if cls.managed: if len(cls._instance_pool) < cls.MAXINSTANCES: inst = cls._Instance(cls._get_valid_port()) cls._instance_pool.append(inst) inst._acquire_lock() yield inst inst.release_lock() return else: raise RuntimeError("No available instances and max instances reached! :O :O") else: raise RuntimeError("No available instances and managed flag is off") @classmethod def shutdown(cls): # Iterate over a copy of instance_pool because _stop removes from list # This is more time/memory intensive, but allows us to have a modular # stop function for inst in cls._instance_pool[:]: inst.release_lock() inst._stop() @classmethod @contextmanager def allocate_pool(cls, num): for _ in range(num): inst = cls._Instance(cls._get_valid_port()) cls._instance_pool.append(inst) yield None cls.shutdown() @classmethod def add_existing_instance(cls, port): assert cls._is_port_taken(port), "No Malmo mod utilizing the port specified." cls._instance_pool.append(InstanceManager._Instance(port=port, existing=True)) class _Instance: def __init__(self, port=None, existing=False): self.existing = existing if not existing: if not port: port = InstanceManager._get_valid_port() cmd = InstanceManager.MC_COMMAND if InstanceManager.headless: cmd += " -headless " cmd += " -port " + str(port) logger.info("Starting Minecraft process: " + cmd) args = shlex.split(cmd) proc = subprocess.Popen(args, cwd=InstanceManager.MINECRAFT_DIR, # pipe entire output stdout=subprocess.PIPE, stderr=subprocess.STDOUT, # use process group, see http://stackoverflow.com/a/4791612/18576 preexec_fn=os.setsid) # wait until Minecraft process has outputed "CLIENT enter state: DORMANT" while True: line = proc.stdout.readline() logger.debug(line) if not line: raise EOFError("Minecraft process finished unexpectedly") if b"CLIENT enter state: DORMANT" in line: break logger.info("Minecraft process ready") # supress entire output, otherwise the subprocess will block # NB! there will be still logs under Malmo/Minecraft/run/logs # FNULL = open(os.devnull, 'w') FMINE = open('./minecraft.log', 'w') proc.stdout = FMINE self.proc = proc else: assert port is not None, "No existing port specified." self.ip = InstanceManager.DEFAULT_IP self.port = port self.existing = existing self.locked = False # Creating client pool. logger.info("Creating client pool for {}".format(self)) self.client_pool = MalmoPython.ClientPool() self.client_pool.add(MalmoPython.ClientInfo(self.ip, self.port)) # Set the lock. def _stop(self): if not self.existing: # Kill the VNC server if we started it cmd = "/opt/TurboVNC/bin/vncserver " cmd += "-kill :" + str(self.port - 10000) args = shlex.split(cmd) subprocess.Popen(args) # send SIGTERM to entire process group, see http://stackoverflow.com/a/4791612/18576 os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM) logger.info("Minecraft process terminated") if self in InstanceManager._instance_pool: InstanceManager._instance_pool.remove(self) self.release_lock() self.terminated = True def _acquire_lock(self): self.locked = True def release_lock(self): self.locked = False def __repr__(self): return ("Malmo[proc={}, addr={}:{}, locked={}]".format( self.proc.pid if not self.existing else "EXISTING", self.ip, self.port, self.locked )) @staticmethod def _is_port_taken(port, address='0.0.0.0'): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((address, port)) taken = False except socket.error as e: if e.errno in [98, 10048]: taken = True else: raise e s.close() return taken @staticmethod def _is_display_port_taken(port, x11_path): # Returns a display port that is unused xs = os.listdir(x11_path) return ('X' + str(port)) in xs @classmethod def _port_in_instance_pool(cls, port): # Ideally, this should be covered by other cases, but there may be delay # in when the ports get "used" return port in [instance.port for instance in cls._instance_pool] @classmethod def _get_valid_port(cls): port = 10000 while cls._is_port_taken(port) or \ cls._is_display_port_taken(port - 10000, cls.X11_DIR) or \ cls._port_in_instance_pool(port): port += 1 return port
from core.constants import VISIBILITY_CHOICES, Visibility from core.models import TimestampModel from django.db import models class MStreamManager(models.Manager): def visible(self, user): if user.is_authenticated: return super().get_queryset() else: return super().get_queryset().filter(visibility=Visibility.PUBLIC) class MStream(TimestampModel): icon = models.CharField(max_length=2, help_text="Select an emoji") name = models.CharField(max_length=32) slug = models.SlugField(unique=True) visibility = models.SmallIntegerField(choices=VISIBILITY_CHOICES, default=Visibility.PUBLIC) posts = models.ManyToManyField("post.TPost", through="TStreamPost", through_fields=("m_stream", "t_post")) objects = MStreamManager() class Meta: db_table = "m_stream" verbose_name = "Stream" verbose_name_plural = "Streams" def __str__(self): return self.name class TStreamPost(TimestampModel): m_stream = models.ForeignKey(MStream, on_delete=models.CASCADE) t_post = models.ForeignKey("post.TPost", on_delete=models.CASCADE) class Meta: db_table = "t_stream_post" unique_together = ("m_stream", "t_post") verbose_name = "Stream-Post" verbose_name_plural = "Stream-Posts"
import static org.junit.jupiter.api.Assertions.*; import org.junit.jupiter.api.Test; class ByteInOutExample { @Test void testInt() throws Exception { final long value1 = 1234567890987654321L; final byte[] bs = new byte[8]; bs[0] = (byte) ((value1 >>> 56) & 0xff); bs[1] = (byte) ((value1 >>> 48) & 0xff); bs[2] = (byte) ((value1 >>> 40) & 0xff); bs[3] = (byte) ((value1 >>> 32) & 0xff); bs[4] = (byte) ((value1 >>> 24) & 0xff); bs[5] = (byte) ((value1 >>> 16) & 0xff); bs[6] = (byte) ((value1 >>> 8) & 0xff); bs[7] = (byte) ((value1 >>> 0) & 0xff); final long value2 = ((long) (bs[0] & 0xff) << 56L) + ((long) (bs[1] & 0xff) << 48L) + ((long) (bs[2] & 0xff) << 40L) + ((long) (bs[3] & 0xff) << 32L) + ((long) (bs[4] & 0xff) << 24L) + ((long) (bs[5] & 0xff) << 16L) + ((long) (bs[6] & 0xff) << 8L) + ((long) (bs[7] & 0xff) << 0L); assertEquals(value1, value2); } }
YOOO LET’S TALK ABOUT CARPET DIEM FOR A MINUTE, SHALL WE? Not only is it a great episode with body switching antics and all that Stanley foreshadowing, but what I love about this episode is how the twins react to getting their bodies switched. With most TV shows, when characters switch bodies they only freak out a tiny bit if at all, mostly reacting like “haha I’m in your body now” or “teehee boobs” (N/A here but you know what I mean). No, the Pines twins literally FREAK THE FUCK OUT. One of them starts VOMITING, the other’s rocking in the corner. I’ve never seen a more realistic reaction to being gender-bent; suddenly your anatomy has changed and everything feels different, you’re not yourself, you’re literally TRAPPED in someone else’s body. I think with all the “fun and playful” body switching TV shows and movies we’ve seen, we forget how disturbing and ultimately horrific it can actually be–especially being gender-bent.
ctvbc.ca The two boys who have admitted to the brutal murder of Kimberly Proctor were avid players in an online role-playing game and experts say it's likely the line between fantasy and reality became blurred. On Wednesday, a B.C. court heard how two teens, aged 16 and 18, planned the murder of the Colwood, B.C., girl online. They came up with code words to initiate the attack, maps of where to dispose her body and what kind of fuel to buy to burn her body. They were also fans of World of Warcraft, a sometimes violent online fantasy game with 12 million subscribers around the world. Experts believe it perpetuated the violence against the 18-year-old girl and provided key information to police. Bonnie Leadbeater, a psychology professor at the University of Victoria, says some kids have trouble knowing that what's acceptable in a game may not be in real life. "You don't know which aggressive kid is going to take the fantasies of video games and try them out in reality. You just can't predict those very rare occurrences," she said. "There would have been signs early. I don't know these two boys at all, but generally, kids who go on to kill other kids or to act out in this sort of extreme manner are having problems early. After Proctor's murder, the boys told a friend on World of Warcraft what they had done. One of the teens admitted that the murder didn't feel like he thought it would. Those messages were collected by police and used by prosecutors. Now the teenage killers are undergoing psychiatric exams, and a judge will decide in March if they should be sentenced as adults. With a report from CTV British Columbia's Lisa Rossington
package handlers import ( "encoding/json" "automata/devices" "log" "strings" ) func SmokerHandler(message []byte) bool { decoder := json.NewDecoder(strings.NewReader(string(message))) //Initialize the struct var smokerUpdate devices.SmokerRead err := decoder.Decode(&smokerUpdate) if err != nil { //panic(err) //return false } log.Println(smokerUpdate.Reading) return true }
// RGB Core Library: a reference implementation of RGB smart contract standards. // Written in 2019-2022 by // Dr. <NAME> <<EMAIL>> // // To the extent possible under law, the author(s) have dedicated all copyright // and related and neighboring rights to this software to the public domain // worldwide. This software is distributed without any warranty. // // You should have received a copy of the MIT License along with this software. // If not, see <https://opensource.org/licenses/MIT>. use std::collections::{BTreeMap, BTreeSet}; use std::io::Write; use bitcoin::hashes::{sha256, sha256t, Hash}; use commit_verify::{ lnpbp4, CommitEncode, CommitVerify, ConsensusCommit, PrehashedProtocol, TaggedHash, }; use strict_encoding::StrictEncode; use crate::{Node, NodeId, Transition}; // "rgb:bundle" static MIDSTATE_BUNDLE_ID: [u8; 32] = [ 219, 42, 125, 118, 252, 62, 163, 226, 43, 104, 76, 97, 218, 62, 92, 108, 200, 133, 207, 235, 35, 72, 210, 0, 122, 143, 80, 88, 238, 145, 95, 89, ]; /// Tag used for [`BundleId`] hash type pub struct BundleIdTag; impl sha256t::Tag for BundleIdTag { #[inline] fn engine() -> sha256::HashEngine { let midstate = sha256::Midstate::from_inner(MIDSTATE_BUNDLE_ID); sha256::HashEngine::from_midstate(midstate, 64) } } /// Unique state transition bundle identifier equivalent to the bundle commitment hash #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(crate = "serde_crate", transparent) )] #[derive(Wrapper, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, From)] #[derive(StrictEncode, StrictDecode)] #[wrapper(Debug, Display)] pub struct BundleId(sha256t::Hash<BundleIdTag>); impl<Msg> CommitVerify<Msg, PrehashedProtocol> for BundleId where Msg: AsRef<[u8]> { #[inline] fn commit(msg: &Msg) -> BundleId { BundleId::hash(msg) } } pub trait ConcealTransitions { fn conceal_transitions(&mut self) -> usize { self.conceal_transitions_except(&vec![]) } fn conceal_transitions_except(&mut self, node_ids: &[NodeId]) -> usize; } impl From<BundleId> for lnpbp4::Message { fn from(id: BundleId) -> Self { lnpbp4::Message::from_inner(id.into_inner()) } } impl From<lnpbp4::Message> for BundleId { fn from(id: lnpbp4::Message) -> Self { BundleId(sha256t::Hash::from_inner(id.into_inner())) } } #[derive(Clone, PartialEq, Eq, Debug, Default, AsAny)] #[derive(StrictEncode, StrictDecode)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate = "serde_crate"))] pub struct TransitionBundle { revealed: BTreeMap<Transition, BTreeSet<u16>>, concealed: BTreeMap<NodeId, BTreeSet<u16>>, } impl CommitEncode for TransitionBundle { fn commit_encode<E: Write>(&self, mut e: E) -> usize { let mut concealed = self.clone(); concealed.conceal_transitions(); let mut count = 0usize; for (node_id, inputs) in concealed.concealed { count += node_id .strict_encode(&mut e) .expect("memory encoders do not fail"); count += inputs .strict_encode(&mut e) .expect("memory encoders do not fail"); } count } } impl ConsensusCommit for TransitionBundle { type Commitment = BundleId; } impl ConcealTransitions for TransitionBundle { fn conceal_transitions_except(&mut self, node_ids: &[NodeId]) -> usize { let mut concealed = bmap! {}; self.revealed = self .revealed .iter() .filter_map(|(transition, inputs)| { let node_id = transition.node_id(); if !node_ids.contains(&node_id) { concealed.insert(node_id, inputs.clone()); None } else { Some((transition.clone(), inputs.clone())) } }) .collect(); let count = concealed.len(); self.concealed.extend(concealed); count } } impl From<BTreeMap<Transition, BTreeSet<u16>>> for TransitionBundle { fn from(revealed: BTreeMap<Transition, BTreeSet<u16>>) -> Self { TransitionBundle { revealed, concealed: empty!(), } } } impl<'me> IntoIterator for &'me TransitionBundle { type Item = (&'me Transition, &'me BTreeSet<u16>); type IntoIter = std::collections::btree_map::Iter<'me, Transition, BTreeSet<u16>>; fn into_iter(self) -> Self::IntoIter { self.revealed.iter() } } impl TransitionBundle { pub fn bundle_id(&self) -> BundleId { self.consensus_commit() } pub fn known_transitions( &self, ) -> std::collections::btree_map::Keys<Transition, BTreeSet<u16>> { self.revealed.keys() } pub fn known_node_ids(&self) -> BTreeSet<NodeId> { self.known_transitions().map(Transition::node_id).collect() } } impl TransitionBundle { pub fn validate(&self) -> bool { let mut used_inputs = bset! {}; for set in self.revealed.values() { if used_inputs.intersection(set).count() > 0 { return false; } used_inputs.extend(set); } for set in self.concealed.values() { if used_inputs.intersection(set).count() > 0 { return false; } used_inputs.extend(set); } true } } #[cfg(test)] mod test { use amplify::Wrapper; use commit_verify::tagged_hash; use super::*; #[test] fn test_bundle_id_midstate() { let midstate = tagged_hash::Midstate::with(b"rgb:bundle"); assert_eq!(midstate.into_inner().into_inner(), MIDSTATE_BUNDLE_ID); } }
def file_version_exists_in_file_system(self, product_version_id: int): file_path = self.file_version_path_for_id(product_version_id=product_version_id, full_path=True) return os.path.isfile(path=file_path)
import React from 'react' import { renderRoutes } from 'react-router-config' import ReactConfig from './routers' import { history } from '@/assets/js/history' import { ConnectedRouter } from 'connected-react-router' const Routers: React.FC = () => { return ( <ConnectedRouter history={history}> {renderRoutes(ReactConfig)} </ConnectedRouter> ) } export default Routers;
/** * Create an internmediate model of the external resource (JSON string) * * @param externalId * @param externalResource * @return */ @Override public ResourceModel makeResourceModel(String externalId, String externalResource) { CrossrefResolverAPI resolverAPI = new CrossrefResolverAPI(); ResourceModel resourceModel = resolverAPI.makeResourceModel(externalResource); if (resourceModel == null) { CrossrefNativeAPI nativeAPI = new CrossrefNativeAPI(); resourceModel = nativeAPI.makeResourceModel(externalResource); } return resourceModel; }
import { Field, Int, ObjectType } from '@nestjs/graphql' import { Channel } from './channel' import { Emojis } from './emojis' import { GuildMember } from './guildMember' import { Roles } from './roles' import { Sticker } from './sticker' @ObjectType() export class Guild { @Field({ nullable: true }) owner?: boolean @Field() owner_id: string @Field(() => Int) afk_timeout: number @Field(() => [Roles]) roles: Roles[] @Field(() => [Emojis]) emojis: Emojis[] @Field(() => GuildMember, { nullable: true }) members?: GuildMember[] @Field(() => [Channel], { nullable: true }) channels?: Channel[] @Field(() => [Channel], { nullable: true }) threads?: Channel[] @Field({ nullable: true }) description?: string @Field() premium_tier: GuildPremiumTier @Field() premium_subscription_count?: number @Field() preferred_locale: string @Field() nsfw_level: GuildNSFWLevel @Field(() => [Sticker]) stickers: Sticker[] @Field() name: string @Field({ nullable: true }) icon?: string @Field() splash: string @Field({ nullable: true }) unavailable?: boolean @Field(() => String) id: string @Field(() => [String]) features: string[] @Field({ nullable: true }) icon_hash?: string @Field() discovery_splash: string @Field({ nullable: true }) permissions?: string // @Field() // region: string // @Field() // afk_channel_id: string // @Field({ nullable: true }) // widget_enabled?: boolean // @Field({ nullable: true }) // widget_channel_id?: string // @Field() // verification_level: GuildVerificationLevel // @Field() // default_message_notifications: GuildDefaultMessageNotifications // @Field() // explicit_content_filter: GuildExplicitContentFilter // @Field() // mfa_level: GuildMFALevel // @Field() // application_id: string // @Field() // system_channel_id: string // @Field() // system_channel_flags: GuildSystemChannelFlags // @Field() // rules_channel_id: string // @Field() // joined_at?: string // @Field() // large?: boolean @Field() member_count?: number // @Field() // voice_states?: Omit<GatewayVoiceState, 'guild_id'>[] // @Field() // presences?: GatewayPresenceUpdate[] // @Field() // max_presences?: number // @Field() // max_members?: number // @Field() // vanity_url_code: string // @Field() // banner: string // @Field() // public_updates_channel_id: string // @Field() // max_video_channel_users?: number // @Field() // approximate_member_count?: number // @Field() // approximate_presence_count?: number // @Field() // welcome_screen?: APIGuildWelcomeScreen // @Field(() => [APIStageInstance]) // stage_instances?: APIStageInstance[] } enum GuildPremiumTier { None = 0, Tier1 = 1, Tier2 = 2, Tier3 = 3, } enum GuildNSFWLevel { Default = 0, Explicit = 1, Safe = 2, AgeRestricted = 3, }
/** * Invokes a previously open invocation stream. The stream must not be * closed. * * @param rid Previous invocation request ID * @param params Parameters of the invocation, can be {@code null} * @see #invoke */ public void continuousInvoke(int rid, JsonObject params) { Request req = new ContinuousInvokeRequest(params); RequestWrapper wrapper = new RequestWrapper(req); sendRequest(wrapper, rid, false); }
// RUN: %clang_cc1 -fsyntax-only -verify %s struct A { A() : value(), cvalue() { } // expected-error {{reference to type 'int' requires an initializer}} int &value; const int cvalue; }; struct B { int field; }; struct X { X() { } // expected-error {{constructor for 'X' must explicitly initialize the reference member 'value'}} \ // expected-error {{constructor for 'X' must explicitly initialize the const member 'cvalue'}} \ // expected-error {{constructor for 'X' must explicitly initialize the reference member 'b'}} \ // expected-error {{constructor for 'X' must explicitly initialize the const member 'cb'}} int &value; // expected-note{{declared here}} const int cvalue; // expected-note{{declared here}} B& b; // expected-note{{declared here}} const B cb; // expected-note{{declared here}} }; // PR5924 struct bar {}; bar xxx(); struct foo { foo_t a; // expected-error {{unknown type name 'foo_t'}} foo() : a(xxx()) {} // no error here. };
FDG PET/CT and MR imaging of CD34-negative soft-tissue solitary fibrous tumor with NAB2-STAT6 fusion gene. Extrapleural solitary fibrous tumor (SFT) is an uncommon mesenchymal neoplasm of intermediate biological potential. Herein, we describe the radiological, histological, immunohistochemical and molecular genetic features of an SFT arising in the left thigh of a 55-year-old woman. Magnetic resonance imaging exhibited a well-defined mass with intermediate signal intensity on T1-weighted sequences and heterogeneous high signal intensity on T2-weighted sequences. Contrast-enhanced T1-weighted sequences showed strong homogeneous enhancement of the mass. A prominent vascular pedicle was visible. Integrated positron-emission tomography (PET)/computed tomographic (CT) scan demonstrated a moderate 18F-fluorodeoxyglucose (FDG) uptake (maximum standardized uptake value, 4.45) in the mass. Following an open biopsy, wide excision of the tumor was performed. Histologically, the tumor was composed of a proliferation of spindle cells in a fibrous stroma with focal hyalinization. Thin-walled branching hemangiopericytoma-like vessels were observed. Immunohistochemically, the tumor cells were diffusely positive for signal transducer and activator of transcription 6 (STAT6) but negative for CD34. The MIB-1 labeling index was less than 5%. Subsequent reverse transcriptase-polymerase chain reaction analysis identified a nerve growth factor inducible-A binding protein 2-STAT6 gene fusion. Our case supports the utility of STAT6 immunohistochemistry as an adjunct in the diagnosis of soft-tissue SFT with loss of CD34 positivity. To the best of our knowledge, this is the first report showing the FDG PET/CT findings of soft-tissue SFT.
/** * * @author <a href="mailto:[email protected]">Alex Loubyansky</a> */ public class FKStackOverflowUnitTestCase extends JBossTestCase { // Constructor public FKStackOverflowUnitTestCase(String name) { super(name); } // Suite public static Test suite() throws Exception { return getDeploySetup(FKStackOverflowUnitTestCase.class, "cmp2-fkstackoverflow.jar"); } // Tests public void testSimpleScenario() throws Exception { FacadeUtil.getHome().create().testSimple(); } public void testComplexScenario() throws Exception { FacadeUtil.getHome().create().testComplex(); } }
def update_access_url_service( self, project_id, service_id, access_url_changes): try: service_old = self.storage_controller.get_service( project_id, service_id ) except ValueError as e: LOG.warning('Get service {0} failed. ' 'Error message: {1}'.format(service_id, e)) raise errors.ServiceNotFound(e) updated_details = False provider_details = service_old.provider_details domain_name = access_url_changes.get('domain_name') for provider in provider_details: for access_url in provider_details[provider].access_urls: if access_url.get('domain') == domain_name: if ( 'operator_url' in access_url and 'provider_url' in access_url ): new_access_url = access_url_changes['operator_url'] new_provider_url = access_url_changes['provider_url'] if access_url.get('shared_ssl_flag', False) is True: raise errors.InvalidOperation( 'Changing access urls for shared ssl domains ' 'is not supported.') if not new_access_url.startswith(domain_name): LOG.info('Invalid access_url/domain_name.') raise errors.InvalidResourceName( 'Invalid access_url/domain_name.') if new_access_url == access_url['operator_url']: LOG.info( "No changes made, both old and new access " "urls are the same. " "Domain '{0}'.".format(domain_name)) return False if new_provider_url != access_url['provider_url']: raise errors.InvalidOperation( 'Please use the migrate domain functionality ' 'to migrate the domain to a new cert.' ) certificate = ( "shared" if access_url.get('shared_ssl_flag', False) is True else None ) self.dns_controller._create_preferred_cname_record( domain_name, certificate, new_access_url, new_provider_url ) self.dns_controller._delete_cname_record( access_url['operator_url'], access_url.get('shared_ssl_flag', False) ) access_url['provider_url'] = new_provider_url access_url['operator_url'] = new_access_url updated_details = True break if updated_details is True: self.storage_controller.update_provider_details( project_id, service_id, provider_details ) else: err_msg = 'Domain {0} could not be found on service {1}.'.format( domain_name, service_id) LOG.error(err_msg) raise ValueError(err_msg) return updated_details
// NewAgent will connect to nats in main cluster and proxy connections locally // to an http.Handler. // TODO: add reasonable defaults for keepAliveInterval func NewAgent(nc *nats.Conn, id uuid.UUID, handler http.Handler, subject string, keepAliveInterval time.Duration) *Agent { return &Agent{ nc: nc, id: id, handler: handler, natsSubject: subject, transports: transportManager{ nc: nc, mux: sync.Mutex{}, transports: map[string]*responseTransport{}, keepAliveInterval: keepAliveInterval, }, } }
import request from 'supertest' import app from '../app' import { genApiData } from '../util' import { genErr, genErrRes } from '../util/err' describe('General app endpoints', () => { it('should respond to GET request with 200 and API data', () => request(app) .get('/') .then(res => { expect(res.statusCode).toBe(200) expect(res.text).toBe(JSON.stringify(genApiData())) })) it('should handle not found endpoints with 404 and error data', () => request(app) .get('/anakin') .then(res => { expect(res.statusCode).toBe(404) expect(res.text).toBe(JSON.stringify(genErrRes(genErr(404)))) })) })
<filename>lock/helpers_test.go package lock import ( "log" "net" etcd "go.etcd.io/etcd/client/v3" ) func init() { s, err := net.Dial("tcp", "127.0.0.1:2379") if err != nil { log.Fatalln("etcd is not running on localhost", err) } s.Close() } func client() *etcd.Client { client, err := etcd.New(etcd.Config{Endpoints: []string{"http://localhost:2379"}}) if err != nil { panic(err) } return client }
<reponame>grj1046/go-cnblogs package ing import ( "encoding/json" "log" "net/http" "strconv" "strings" "time" "errors" "github.com/PuerkitoBio/goquery" ) //Client ing.cnblogs.com type Client struct { //IngID int authCookie string urlStr string httpClient *http.Client } //Content ing Content struct type Content struct { IngID int AuthorID string AuthorUserName string AuthorNickName string Time string Status int Lucky bool IsPrivate bool IsNewbie bool AcquiredAt time.Time Body string Comments []Comment } //Comment Ing.Content's Comment type Comment struct { IngID int CommentID string AuthorID string AuthorUserName string AuthorNickName string Body string Time string IsDelete bool } //OriginContent store the origin ing html type OriginContent struct { IngID int Status int //200 404 AcquiredAt time.Time Exception string HTML string } //Init Initialize httpClient with authCookie func (client *Client) Init(authCookie string) { client.httpClient = &http.Client{} client.authCookie = authCookie } //GetIngByID Get Ing Html Document by ingID func (client *Client) GetIngByID(ingID int) (*Content, *OriginContent, error) { //urlStr := "https://ing.cnblogs.com/u/grj1046/status/" + strconv.Itoa(ingID) + "/" //http://home.cnblogs.com/ing/1115171/ urlStr := "https://ing.cnblogs.com/redirect/" + strconv.Itoa(ingID) + "/" req, err := http.NewRequest("GET", urlStr, nil) if err != nil { return nil, nil, err } req.Header.Add("Cookie", client.authCookie) resp, err := client.httpClient.Do(req) if err != nil { return nil, nil, err } defer resp.Body.Close() nowTime := time.Now() originContent := &OriginContent{} originContent.IngID = ingID originContent.HTML = "" originContent.Status = 200 originContent.AcquiredAt = nowTime content := &Content{} content.IngID = ingID content.AcquiredAt = nowTime content.Status = 200 if resp.StatusCode != 200 { content.Status = resp.StatusCode originContent.Status = resp.StatusCode return content, originContent, nil } doc, err := goquery.NewDocumentFromResponse(resp) if err != nil { return nil, nil, err } feedBlock, err := doc.Find(".feed_block").Html() if err != nil { originContent.Exception += " Get feed_block error: " + err.Error() } else { originContent.HTML = feedBlock } errBody := doc.Find(".error_body") //if return 404 if errBody.Text() != "" { content.Status = 404 originContent.Status = 404 return content, originContent, nil } if doc.Find("#Main form #Heading").Text() == "登录博客园 - 代码改变世界" { //need re acquired content.Status = 403 originContent.Status = 403 return content, originContent, nil } //AuthorID authorID, exists := doc.Find(".ing_item_face").Attr("src") if exists { //https://pic.cnblogs.com/face/sample_face.gif test case: ingid=26 //https://pic.cnblogs.com/face/289132/20130423092122.png if strings.Index(authorID, "sample_face.gif") != -1 { //replyToSpaceUserId=9931;isIngItem=true ret, err := doc.Html() if err != nil { originContent.Exception += " Get sample_face.gif error: " + err.Error() } else { start := strings.Index(ret, "replyToSpaceUserId=") + len("replyToSpaceUserId=") end := strings.Index(ret, ";isIngItem=") if start != -1 && end != -1 { authorID = ret[start:end] } else { originContent.Exception += " get AuthorID failed: sample_face.gif" } } } else if strings.Index(authorID, "https://pic.cnblogs.com/face/u") != -1 { tmplen := len("https://pic.cnblogs.com/face/u") if strings.Index(authorID, ".jpg") != -1 { authorID = authorID[tmplen:strings.Index(authorID, ".jpg")] } else if strings.Index(authorID, ".gif") != -1 { authorID = authorID[tmplen:strings.Index(authorID, ".gif")] } else if strings.Index(authorID, ".jpeg") != -1 { authorID = authorID[tmplen:strings.Index(authorID, ".jpeg")] } else if strings.Index(authorID, ".png") != -1 { authorID = authorID[tmplen:strings.Index(authorID, ".png")] } else if strings.Index(authorID, ".bmp") != -1 { authorID = authorID[tmplen:strings.Index(authorID, ".bmp")] } else { originContent.Exception += " get AuthorID failed: (face/u)" + authorID } } else { tmplen := len("https://pic.cnblogs.com/face/") if strings.LastIndex(authorID, "/") > tmplen { authorID = authorID[tmplen:strings.LastIndex(authorID, "/")] } else { originContent.Exception += " get AuthorID failed: (face)" + authorID } } content.AuthorID = authorID } else { originContent.Exception += " get ing_item_face failed" } //AuthorUserName authorUserName, exists := doc.Find(".ing_item_author").Attr("href") if exists { tmplen := len("//home.cnblogs.com/u/") if strings.Index(authorUserName, "//home.cnblogs.com/u/") != -1 { authorUserName = authorUserName[tmplen : len(authorUserName)-1] content.AuthorUserName = authorUserName } else { originContent.Exception += " get AuthorUserName failed" } } else { originContent.Exception += " Get ing_item_author error: " + err.Error() } //AuthorNickName authorNickName := doc.Find(".ing_item_author").Text() content.AuthorNickName = authorNickName publishTime := doc.Find(".ing_detail_title").Text() publishTime = publishTime[strings.LastIndex(publishTime, ":")+3:] publishTime = strings.TrimSpace(publishTime) content.Time = publishTime //Lucky ingDetailBody := doc.Find("#ing_detail_body") luckyNode := ingDetailBody.Find("img[title='这是幸运闪']") _, exists = luckyNode.Attr("title") if exists { content.Lucky = true luckyNode.Remove() } else { content.Lucky = false } //Private privateNode := ingDetailBody.Find("img[title='私有闪存']") _, exists = privateNode.Attr("title") if exists { content.IsPrivate = true privateNode.Remove() } else { content.IsPrivate = false } //newbie newbieNode := ingDetailBody.Find("img[title='欢迎新人']") _, exists = newbieNode.Attr("title") if exists { content.IsNewbie = true newbieNode.Remove() } else { content.IsNewbie = false } //ingBody ingBody, err := ingDetailBody.Html() if err != nil { originContent.Exception += " Get ing_detail_body error: " + err.Error() } else { content.Body = ingBody } commentCount := doc.Find("#comment_block_" + strconv.Itoa(ingID) + " li").Length() content.Comments = make([]Comment, commentCount) doc.Find("#comment_block_" + strconv.Itoa(ingID) + " li").Each(func(index int, selection *goquery.Selection) { //IngID, CommentID, Comment, CommentTime, AuthorID AuthorUserName, AuthorNickName comment := &Comment{} //IngID comment.IngID = ingID //CommentID id="comment_1400623" commentID, exists := selection.Attr("id") if !exists { originContent.Exception += " commentID not found by id='comment_1400623', index: " + string(index) } else { tmplen := len("comment_") comment.CommentID = commentID[tmplen:] } //CommentTime class="text_green" time, exists := selection.Find(".text_green").Attr("title") if !exists { originContent.Exception += " comment time not found by .text_green, index: " + string(index) } else { comment.Time = time } //AuthorID //commentReply(1129969,1415060,9487);return false authorID, exists := selection.Find(".gray3").Attr("onclick") //https://pic.cnblogs.com/face/sample_face.gif //https://pic.cnblogs.com/face/289132/20130423092122.png if !exists { //search with class="ing_comment_face".src selfAuthorID, selfExists := selection.Find(".ing_comment_face").Attr("src") if selfExists { if strings.Index(selfAuthorID, "https://pic.cnblogs.com/face/u") != -1 { tmplen := len("https://pic.cnblogs.com/face/u") if strings.Index(selfAuthorID, ".jpg") != -1 { selfAuthorID = selfAuthorID[tmplen:strings.Index(selfAuthorID, ".jpg")] } else if strings.Index(selfAuthorID, ".gif") != -1 { selfAuthorID = selfAuthorID[tmplen:strings.Index(selfAuthorID, ".gif")] } else if strings.Index(selfAuthorID, ".jpeg") != -1 { selfAuthorID = selfAuthorID[tmplen:strings.Index(selfAuthorID, ".jpeg")] } else if strings.Index(selfAuthorID, ".png") != -1 { selfAuthorID = selfAuthorID[tmplen:strings.Index(selfAuthorID, ".png")] } else if strings.Index(selfAuthorID, ".bmp") != -1 { selfAuthorID = selfAuthorID[tmplen:strings.Index(selfAuthorID, ".bmp")] } else { originContent.Exception += " get selfAuthorID failed: (face/u)" + selfAuthorID } } else { tmplen := len("https://pic.cnblogs.com/face/") if strings.LastIndex(selfAuthorID, "/") > tmplen { selfAuthorID = selfAuthorID[tmplen:strings.LastIndex(selfAuthorID, "/")] } else { originContent.Exception += " get selfAuthorID failed: (face)" + selfAuthorID } } if selfAuthorID != "" { comment.AuthorID = selfAuthorID } } else { originContent.Exception += " AuthorID not found by .gray3, ing_comment_face, index: " + string(index) } } else { start := strings.LastIndex(authorID, ",") end := strings.Index(authorID, ");") if start != -1 && end != -1 { authorID = authorID[start+1 : end] comment.AuthorID = authorID } else { originContent.Exception += "get comment AuthorID error" } } authorNode := selection.Find("#comment_author_" + comment.CommentID) //AuthorName //home.cnblogs.com/u/grj1046/ authorUserName, exists := authorNode.Attr("href") if !exists { originContent.Exception += " AuthorName not found by #comment_author_.href, index: " + string(index) } else { tmplen := len("//home.cnblogs.com/u/") authorUserName = authorUserName[tmplen : len(authorUserName)-1] comment.AuthorUserName = authorUserName } //AuthorNickName comment_author_1400623 comment.AuthorNickName = authorNode.Text() //Comment tmpBody := selection.Find("div") /*<a target="_blank" href="//home.cnblogs.com/u/grj1046/"> <img src="https://pic.cnblogs.com/face/289132/20130423092122.png" class="ing_comment_face" alt=""> </a> */ tmpBody.Find(".ing_comment_face").Parent().Remove() //<a target="_blank" id="comment_author_1400623" title="nil的闪存" href="//home.cnblogs.com/u/grj1046/">nil</a> tmpBody.Find("#comment_author_" + comment.CommentID).Remove() tmpBody = tmpBody.First().Remove() //if delete button exists, remove textGreenNode := tmpBody.Find(".text_green") textGreenNode.NextAll().Remove() textGreenNode.Remove() body, err := tmpBody.Html() if err != nil { originContent.Exception += " Get comment detail exception, index: " + err.Error() } else { body = body[strings.Index(body, ": ")+1:] body = strings.TrimSpace(body) comment.Body = body } content.Comments[index] = *comment //printToConsole("comment => ", comment) }) return content, originContent, nil } //GetLatestIngList get the latest ingList //https://ing.cnblogs.com/ajax/ing/GetIngList?IngListType=all&PageIndex=1&PageSize=30&Tag=&_=1495616106104 func (client *Client) GetMaxIngID() (int, error) { urlStr := "https://ing.cnblogs.com/ajax/ing/GetIngList?IngListType=all&PageIndex=1&PageSize=1&Tag=&_=" + strconv.FormatInt(time.Now().Unix(), 10) req, err := http.NewRequest("GET", urlStr, nil) if err != nil { return 0, err } //req.Header.Add("Accept", "text/plain, */*; q=0.01") req.Header.Add("Cookie", client.authCookie) //req.Header.Add("Referer", "https://ing.cnblogs.com/") //req.Header.Add("Host", "ing.cnblogs.com") //req.Header.Add("Content-Type", "application/json; charset=utf-8") req.Header.Add("X-Requested-With", "XMLHttpRequest") resp, err := client.httpClient.Do(req) if err != nil { return 0, err } defer resp.Body.Close() if resp.StatusCode != 200 { return 0, errors.New("Response StatusCode" + strconv.Itoa(resp.StatusCode)) } doc, err := goquery.NewDocumentFromResponse(resp) if err != nil { return 0, err } maxIngID := doc.Find("#max_ing_id").Text() intMaxIngID, err := strconv.Atoi(maxIngID) if err != nil { return 0, err } return intMaxIngID, nil } //GetLatestIngFromComment get the latest ing's comment list ingID //https://ing.cnblogs.com/ajax/ing/GetIngList?IngListType=recentcomment&PageIndex=1&PageSize=30&Tag=&_=1495616250086 //it seems pageSize forever equals 30 func (client *Client) GetLatestIngFromComment(pageIndex int, pageSize int) ([]int, error) { if pageIndex <= 0 { pageIndex = 1 } if pageSize <= 0 { pageSize = 30 } urlStr := "https://ing.cnblogs.com/ajax/ing/GetIngList?IngListType=recentcomment&PageIndex=" + strconv.Itoa(pageIndex) + "&PageSize=" + strconv.Itoa(pageSize) + "&Tag=&_=" + strconv.FormatInt(time.Now().Unix(), 10) req, err := http.NewRequest("GET", urlStr, nil) if err != nil { return nil, err } req.Header.Add("Cookie", client.authCookie) req.Header.Add("X-Requested-With", "XMLHttpRequest") resp, err := client.httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, errors.New("Response StatusCode" + strconv.Itoa(resp.StatusCode)) } doc, err := goquery.NewDocumentFromResponse(resp) if err != nil { return nil, err } ingList := make([]int, pageSize) //fmt.Println("length", doc.Find("#feed_list ul li .ing-item .ing_body").Length()) doc.Find("#feed_list ul li .ing-item .ing_body").Each(func(index int, selection *goquery.Selection) { attrID, exists := selection.Attr("id") if exists { tmpLen := len("ing_body_") intVal, err := strconv.Atoi(attrID[tmpLen:]) if err == nil { ingList[index] = intVal } } }) return ingList, nil } func printToConsole(str string, v interface{}) { strr, _ := json.Marshal(v) log.Println(str, string(strr)) }
Johnny 3 Tears recently took some time to sit down with French-Canadian music blog Metal Universe. Johnny takes time to speak on the creative process behind V, tour-mates, and the first official word on the fate of former member Da Kurlzz. You can read the original French interview on the Metal Universe site. We have taken some time to clean up a rough English version (via Google Translate) of the article and fix some grammar/flow issues created by the automated translation process. The verbiage may not be exactly identical to the words Johnny and interviewer Jonathan Gamache exchanged with each other, but we hope the original idea is still present after being translated from English to French and back. Read on below for more: A few weeks before the release of Hollywood Undead’s 5th album, Jonathan Gamache was lucky enough to have an interview with Johnny 3 Tears. MU: Since the release of your first album, Swan Songs , in 2008, you have released a new album about every two years. How do you find the time to create so much music while continuing the touring life? Johnny 3 Tears: Actually, I’d like to publish more albums if we were allowed. We have composed several songs that have never been on albums because we are writing all the time. To be honest, by the time we’ve completed an album, we start writing the next one. We have a studio on the bus that allows us to compose when we are on tour. Also, even if we are not in the studio or on tour, I still write at home. So we always have a lot of results. MU: Is there a song that you thought would be a success when you composed it, but was not as welcomed by the audience as you would have hoped? Johnny 3 Tears: (Laughter) Yes! Many! (Laughs) I like your question, no one has asked me before! We wrote songs saying “it’s going to be huge, it’s going to be the best song of the summer,” but then no one else had that feeling. Our manager has already told me that if there was an exact formula for writing a hit, everyone would use it. The musical tastes of people change constantly. Rock music was number 1 in the past, but now Pop music and Country music are in front, I think. So it’s almost impossible to write a hit because musical tastes change drastically over time. All that is possible to do is to stay true to yourself and write songs on subjects that interest you and that interest your fans. If it is a hit, cool, otherwise we continue to write music. In summary, yes, we wrote several songs thinking that they would be successful, but they weren’t. MU: Can you tell us a bit about Da Kurlzz’s departure earlier this year? Johnny 3 Tears: There’s not much to say. He wanted to do something else. We are always friends, but sometimes the lives of people change. That is what happened in this case. Sometimes I tell myself that I do not want to go on tour and the next day I tell myself that it is not true. It’s important to stay true to yourself and do not do what we do not want. There was no anger or whatever, and we have stayed on good terms. MU: Your new album, Five, will be released on October 27th and will be your longest album to date. What can we expect out of this album? Johnny 3 Tears: If it were up to us, the albums would last two hours, longer than a movie, since in general we publish only about half of the songs we write for an album. This isn’t what the record companies want. They don’t want very long albums, because it’s not profitable for them. It’s unfortunate, because I would give it out for free because people will take it like that anyway, but we cannot. That being said, I know that all artists have the same conversation when they release a new album saying that “it’s their best album so far,” but I can say that it’s the best we’ve done, since the group has evolved considerably over the past ten years. I think we did a great job. We were honest with ourselves and wrote what we wanted with enough experimentation, but not to the point of wondering what we were doing. There are more rock songs and a ballad. It’s a very complete album. Personally, I leave a lot of time after completing the album before I can listen to it objectively. I’ve listened to it several times lately and I’m very satisfied. I hope fans will appreciate it as much as we do. MU: You have recently released two video clips, California Dreaming and Whatever It Takes, and both have been seen more than four million (4,000,000) times in less than a month. What is your reaction to that? Johnny 3 Tears: You know, I’m not very familiar with YouTube. So I do not know whether it’s good or bad. If it’s good, then I’m very happy. Otherwise, I’m not (laughs)! MU: Could you tell us about the creative process surrounding Five? Johnny 3 Tears: I will speak personally, because I don’t want to speak for others. Each time it’s a little different, because each album is different. We need to ensure two things: 1) that we work together and 2) that life inspires us enough to write more songs that make sense to us. When the creation of Five began, we sat down and discussed the direction we wanted. We don’t make concept albums, but the key is to have enough experience in your own life to be able to write songs that other people will find meaningful. I think we really took the time to explore some situations we hadn’t explored before. Another key is honesty. Five is an honest album, and we have tackled subjects that you normally would not want to discuss. Five’s writing process allowed us to go to places we had never been in the past, and it is important for a group to evolve and make different music. I am very proud of the result! MU: Since all members of the band sing, how do you decide which member sings which part? Johnny 3 Tears: Everyone has his preferences. Not all members of the group are involved in the writing process. First we write the songs like any band with an acoustic guitar and an acoustic bass and we plan everything before entering the studio. It’s a fairly natural process when we start to get an idea for the sound of the song. Some members are better at heavier verses and others are better at hip-hop and industrial sounds. This is done quite naturally with the tone of the voices and what is said. It is not like “you, you sing this part!”. None of that. I start songs by myself when I write them and I sing until everything is naturally connected. MU: Two part question: Which band did you most enjoy touring with? Which band would you like to go on tour with? Johnny 3 Tears: Atreyu is a band that I really enjoyed being on tour with. I would also say Ten Years, and, of course, there is Avenged Sevenfold, which I loved. I loved to tour with these three groups, they are awesome guys. It’s cool when you’re on tour and you feel like you’ve known each other for a very long time. There are also other guys who are assholes, but I don’t want to talk about them. With whom I would like to go on tour? … Nine Inch Nails! It will never happen, I understand, but I would like to watch their performance every night. I love those guys. There’s a few other bands I’d like, like the Beastie Boys, but, you know, it’s impossible. The same goes for the Beatles, but for current bands, I would say Nine Inch Nails. MU: You’re going to be touring a few Canadian cities in the West soon with Butcher Babies. Can we hope for a tour of cities in Eastern Canada? Johnny 3 Tears: Yes, of course! When we return, next spring, I know we will be doing the whole of Eastern Canada. We will definitely be playing some shows! MU: Do you want to say a few words to your fans in Quebec City? Johnny 3 Tears: I love Quebec City. It is one of my favorite cities in the world. It’s like being in Europe without actually going there. It’s “old school”, the fans are great. We have always been supported by the City of Quebec. This is one of the first cities in which we performed in front of a packed house. It will always be engraved in my memory as it was one of the first times I was told “hey, the show’s full”. I didn’t even know what it meant. I was 20 years old and I thought it was the coolest thing in the world. Thanks for the support, and we can’t wait to come back. MU: On behalf of metaluniverse.net, thank you for your time. Johnny 3 Tears: Until next time!
def closed(self): return not self.__parser or self.__parser.closed
package glm import "fmt" type Sphere struct { Center Vec3 Radius float32 } func NewSphere(v *Vec3, r float32) *Sphere { return &Sphere{*v, r} } func (s *Sphere) CylinderCoord(n *Vec3) *Vec2 { a := Atan2(n.X, n.Y) return &Vec2{a / Pi2, (1 - n.X) / 2} } func (s *Sphere) Midpoint(a, b *Vec3) (mid *Vec3) { mid = Vec3Add(a, b) mid.Normalize() mid.Scale(s.Radius) return } func (s *Sphere) DistanceToVec3(k *Vec3) float32 { return s.Center.DistanceTo(k) - s.Radius } func (s *Sphere) ContainsVec3(k *Vec3) bool { return s.Center.DistanceToSquared(k) <= s.Radius*s.Radius } func (s *Sphere) IntersectSphere(o *Sphere) bool { ls := LengthSquared(s.Center.X-o.Center.X, s.Center.Y-o.Center.Y, s.Center.Z-o.Center.Z) rs := s.Radius*s.Radius + o.Radius*o.Radius return ls < rs } func (s *Sphere) String() string { return fmt.Sprintf("%v %v", s.Center, s.Radius) }
<gh_stars>1-10 use frunk_core::{hlist, HList}; use serde::{Deserialize, Serialize}; use serde_json::json; use crate::{HLabelledMap, Labelled}; #[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] struct A { a: usize, } impl Labelled for A { const KEY: &'static str = "a"; } #[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] struct B { b1: String, b2: bool, } impl Labelled for B { const KEY: &'static str = "b"; } #[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] struct C { c: usize, } impl Labelled for C { const KEY: &'static str = "c"; } #[test] fn must_serialize_to_json() { let l = hlist![ Some(A { a: 1 }), B { b1: String::from("test"), b2: false }, C { c: 2 } ]; let m = HLabelledMap(l); let s = serde_json::to_string(&m).expect("unable to serialize to json"); assert_eq!( s, "{\"a\":{\"a\":1},\"b\":{\"b1\":\"test\",\"b2\":false},\"c\":{\"c\":2}}" ); } #[test] fn must_deserialize_owned_label() { type List = HList![A]; let l: List = hlist![A { a: 1 }]; // let mut map = Map::new(); // map.insert(String::from("a"), json!({"a": 1})); // let v = Value::Object(map); let v = json!({"a": {"a": 1}}); let de: HLabelledMap<List> = serde_json::from_value(v).expect("unable to deserialize owned label"); assert_eq!(HLabelledMap(l), de); } // TODO fuzzer #[test] fn must_ser_de_refl_without_option() { type List = HList![B, A]; let l: List = hlist![ B { b1: String::from("test"), b2: false }, A { a: 1 } ]; let m = HLabelledMap(l); let ser = serde_json::to_string(&m).expect("unable to serialize to json"); let de: HLabelledMap<List> = serde_json::from_str(ser.as_str()).expect("unable to deserialize from json"); assert_eq!(m, de, "ser_de_refl doesn't hold"); } // TODO fuzzer #[test] fn must_ser_de_refl_with_option() { type List = HList![Option<B>, A]; let l: List = hlist![ Some(B { b1: String::from("test"), b2: false }), A { a: 1 } ]; let m = HLabelledMap(l); let ser = serde_json::to_string(&m).expect("unable to serialize to json"); let de: HLabelledMap<List> = serde_json::from_str(ser.as_str()).expect("unable to deserialize from json"); assert_eq!(m, de, "ser_de_refl doesn't hold"); } #[test] fn must_serialize_skip_none() { let l = hlist![None::<A>, C { c: 2 }]; let m = HLabelledMap(l); let s = serde_json::to_string(&m).expect("unable to serialize to json"); assert_eq!(s, "{\"c\":{\"c\":2}}"); } #[test] fn must_deserialize_skip_none() { let l = hlist![A { a: 1 }, C { c: 2 }]; let m = HLabelledMap(l); let ser = serde_json::to_string(&m).expect("unable to serialize to json"); let de: HLabelledMap<HList![Option<B>, C]> = serde_json::from_str(ser.as_str()).expect("unable to deserialize from json"); assert_eq!(de, HLabelledMap(hlist![None, C { c: 2 }])); } #[test] fn must_deserialize_unordered() { let l = hlist![A { a: 1 }, C { c: 2 }]; let m = HLabelledMap(l); let ser = serde_json::to_string(&m).expect("unable to serialize to json"); let de: HLabelledMap<HList![C, A]> = serde_json::from_str(ser.as_str()).expect("unable to deserialize from json"); assert_eq!(de, HLabelledMap(hlist![C { c: 2 }, A { a: 1 }])); }
// UPDATE di una entry nella mappa public void update(Note note) throws InvalidKeyException { if(!reg.containsKey(note.getTitle())) throw new InvalidKeyException("Nota non presente"); reg.put(note.getTitle(), note); }
/** * Storage for the raw results of finding similar images. Everything is stored in memory in an uncompressed format. * * @author Peter Abeles */ public class SimilarImagesData implements LookUpSimilarImages { public final List<String> listImages = new ArrayList<>(); public final Map<String, Info> imageMap = new HashMap<>(); // Which view was referenced when findSImilar() was last called. @Nullable Info targetInfo; /** * Clears all references to other objects */ public void reset() { listImages.clear(); imageMap.clear(); targetInfo = null; } /** * Adds a new view. * * @param id The unique ID for the view * @param features List of image features. Pixel coordinates. */ public void add( String id, List<Point2D_F64> features ) { Info info = new Info(); info.index = listImages.size(); info.features.copyAll(features, ( src, dst ) -> dst.setTo(src)); listImages.add(id); imageMap.put(id, info); } /** * Used to specify the relationship between two similar views by providing which features match up * * @param viewA The 'src' view of the matches * @param viewB The 'dst' view of the matches * @param matches List of matching image features */ public void setRelationship( String viewA, String viewB, List<AssociatedIndex> matches ) { Info infoA = imageMap.get(viewA); Info infoB = imageMap.get(viewB); infoA.similarViews.add(viewB); infoB.similarViews.add(viewA); // Similar data is only stored in the low index view because it's symmetric boolean swapped = false; if (infoA.index > infoB.index) { Info tmp = infoA; infoA = infoB; infoB = tmp; swapped = true; } // Copy over the matches, but make sure infoA is the src Relationship related = new Relationship(listImages.get(infoB.index)); infoA.relationships.add(related); related.pairs.resize(matches.size()); for (int i = 0; i < matches.size(); i++) { AssociatedIndex a = related.pairs.get(i); AssociatedIndex b = matches.get(i); if (swapped) { a.setTo(b.dst, b.src); } else { a.setTo(b); } } } @Override public List<String> getImageIDs() { return listImages; } @Override public void findSimilar( String target, @Nullable BoofLambdas.Filter<String> filter, List<String> similarImages ) { similarImages.clear(); Info info = imageMap.get(target); for (int i = 0; i < info.similarViews.size(); i++) { if (filter == null || filter.keep(info.similarViews.get(i))) { similarImages.add(info.similarViews.get(i)); } } this.targetInfo = info; } @Override public void lookupPixelFeats( String target, DogArray<Point2D_F64> features ) { features.reset(); features.copyAll(imageMap.get(target).features.toList(), ( src, dst ) -> dst.setTo(src)); } @Override public boolean lookupAssociated( String similarD, DogArray<AssociatedIndex> pairs ) { Objects.requireNonNull(targetInfo, "Must call findSimilar first"); Info similarInfo = imageMap.get(similarD); boolean swapped = targetInfo.index > similarInfo.index; if (swapped) { String targetID = listImages.get(targetInfo.index); Relationship related = Objects.requireNonNull(similarInfo.findRelated(targetID)); pairs.resetResize(related.pairs.size); for (int i = 0; i < pairs.size; i++) { AssociatedIndex b = related.pairs.get(i); pairs.get(i).setTo(b.dst, b.src); } } else { Relationship related = Objects.requireNonNull(targetInfo.findRelated(similarD)); pairs.resetResize(related.pairs.size); for (int i = 0; i < pairs.size; i++) { pairs.get(i).setTo(related.pairs.get(i)); } } return true; } /** * All the information for a single view. Images being similar is symmetric, however matches are only * saved in the lower indexed view to reduce memory. */ public static class Info { public int index; public final List<String> similarViews = new ArrayList<>(); public final List<Relationship> relationships = new ArrayList<>(); public final DogArray<Point2D_F64> features = new DogArray<>(Point2D_F64::new); public @Nullable SimilarImagesData.Relationship findRelated( String id ) { for (int i = 0; i < relationships.size(); i++) { if (relationships.get(i).id.equals(id)) return relationships.get(i); } return null; } } /** * Specifies how two views are related by saying which image features are matched with which other image features. */ public static class Relationship { public String id; DogArray<AssociatedIndex> pairs = new DogArray<>(AssociatedIndex::new); public Relationship( String id ) { this.id = id; } } }
def parse_stack(par, post_fix, ann, root, output) -> List[Tuple[List[RGoal], List[RGoal]]]: items = serlib.cparser.children(post_fix, ann, root).tolist() res = [] for item in items: _first, _second = serlib.cparser.children(post_fix, ann, item).tolist() first = parse_goals(par, post_fix, ann, _first, output) second = parse_goals(par, post_fix, ann, _second, output) res.append((first, second)) return res
Adam West Guest Star Information Gender Status Birth September 19, 1928 Walla Walla, Washington, USA Death June 9, 2017 (aged 88) Los Angeles, California, USA Nationality American Claim to fame Actor Character Himself Batman Mayor Adam West First appearance Mr. Plow" This article is about Adam West the actor and guest star. For the character, please see Adam West (character). William West Anderson, better known as Adam West (September 19, 1928 - June 9, 2017), was an actor who was most famous for his portrayal of Bruce Wayne/Batman on the popular 1960s TV series "Batman". On The Simpsons, he guest starred as himself in the episode "Mr. Plow". Here, Homer once cleared snow off of Adam's house entrance with his Mr. Plow truck. Later, Adam called Homer again, but Barney cleared his entrance instead and he told Adam that he won't reveal his identity. Adam then got in his Batman car, all wrecked up.[1] He later guest starred again in "Large Marge" as his character, Batman, with co-star Burt Ward as his character, Robin.[2] West also voiced a character on Family Guy of the same name, who is the Mayor of Quahog, Rhode Island. This character also appeared in The Simpsons Guy, the Simpsons/Family Guy crossover. But, since West died, it is unknown who's going to voice Mayor Adam West now. On June 9th, 2017, West died due to leukemia, which he had a brief battle with prior to his death. Many Adam West and Batman fans heard about this, and are very sad about his death. Appearances
import { Routes, RouterModule } from '@angular/router'; import { AuthComponent } from './auth.component'; import { MetaGuard } from '@ngx-meta/core'; const authRoutes: Routes = [ { path: '', redirectTo: 'login', pathMatch: 'full' }, { path: '', component: AuthComponent, // Wrapper canActivateChild: [ MetaGuard ], children: [ { path: 'login', loadChildren: './components/login/login.module#LoginModule' }, { path: 'registration', loadChildren: './components/registration/registration.module#RegistrationModule', }, ], }, ]; export const AuthRoutes = RouterModule.forChild(authRoutes);
<reponame>GiovanniSM20/java<gh_stars>1-10 package Controller; public interface IUsuario { boolean validarLogin (String login, String senha); }
package test_persistence import ( "testing" cconf "github.com/pip-services3-go/pip-services3-commons-go/config" persist "github.com/pip-templates/pip-templates-microservice-go/persistence" ) func TestBeaconsFilePersistence(t *testing.T) { var persistence *persist.BeaconsFilePersistence var fixture *BeaconsPersistenceFixture persistence = persist.NewBeaconsFilePersistence("../../temp/beacons.test.json") persistence.Configure(cconf.NewEmptyConfigParams()) fixture = NewBeaconsPersistenceFixture(persistence) opnErr := persistence.Open("") if opnErr == nil { persistence.Clear("") } defer persistence.Close("") t.Run("BeaconsFilePersistence:CRUD Operations", fixture.TestCrudOperations) persistence.Clear("") t.Run("BeaconsFilePersistence:Get with Filters", fixture.TestGetWithFilters) }
def _mul_matrix(self, other): from .non_lazy_tensor import NonLazyTensor from .mul_lazy_tensor import MulLazyTensor self = self.evaluate_kernel() other = other.evaluate_kernel() if isinstance(self, NonLazyTensor) or isinstance(other, NonLazyTensor): return NonLazyTensor(self.evaluate() * other.evaluate()) else: left_lazy_tensor = self if self._root_decomposition_size() < other._root_decomposition_size() else other right_lazy_tensor = other if left_lazy_tensor is self else self return MulLazyTensor(left_lazy_tensor.root_decomposition(), right_lazy_tensor.root_decomposition())
/** * Returns the next random number in the sequence. * * @return The next random number. */ public final synchronized double nextDouble() { int k; k = m[i] - m[j]; if (k < 0) { k += m1; } m[j] = k; if (i == 0) { i = 16; } else { i--; } if (j == 0) { j = 16; } else { j--; } if (haveRange) { return left + dm1 * (double) k * width; } else { return dm1 * (double) k; } }
/** * This class exists solely so GSON can correctly * parse the JSON returned from the database representing * the team stats */ public class Score { private String score; private String win; private String lose; public String getScore() { return score; } public void setScore(String score) { this.score = score; } public String getWin() { return win; } public void setWin(String win) { this.win = win; } public String getLose() { return lose; } public void setLose(String lose) { this.lose = lose; } }
import sys from math import * readints=lambda:map(int, input().strip('\n').split()) la,ra,ta=readints() lb,rb,tb=readints() k=(la-lb)/gcd(ta,tb) klo=floor(k) khi=ceil(k) def intersection(ax,ay,bx,by): # print(ax,ay,bx,by) l,r=max(ax,bx),min(ay,by) if l>r: return 0 return r-l+1 # print(klo,khi) ans = 0 for dk in [klo,khi]: df=lb-la + dk*gcd(ta,tb) bx=la+df by=bx+(rb-lb) #print(ax,ay,'df',df) ans = max(ans, intersection(la,ra,bx,by)) print(ans)
. The effect of lipoxygenase derivatives of 13-hydroperoxylinoleic acid (13-HPODE) and 13-hydroxylinoleic acid (13-HODE) on zymosan-induced chemiluminescence of rat neutrophils in vitro was evaluated. It was found that both derivatives inhibit functional activity of neutrophils. The extent of inhibition was changed by preincubation of neutrophils with arachidonic or linoleic acid. On the other hand, in experiments with dogs it was shown that the extent of such inhibition considerably increases after ischemia and reperfusion of myocardium. Thus we assume that the ratio of lipoxygenase derivatives of arachidonic and linoleic acid play the regulative role in functional activity of neutrophils. It was concluded, that lipoxygenase derivatives of linoleic acid inhibited the neutrophils functional activity.
// Put stores the provided key/value pair to the mock address index bucket. // // This is part of the internalBucket interface. func (b *addrIndexBucket) Put(key []byte, value []byte) error { var levelKey [levelKeySize]byte copy(levelKey[:], key) b.levels[levelKey] = value return nil }
<filename>research_pyutils/path_related.py # Copyright (C) 2015 <NAME> # available under the terms of the Apache License, Version 2.0 import os from os import system, listdir, walk from os.path import isdir, isfile, sep, join, getmtime import shutil import errno from glob import glob from subprocess import check_output from pathlib import Path # TO-DO: put progress bars in all the packages with time-consuming loops. def mkdir_p(path, mode=500): """ 'mkdir -p' in Python. """ try: # http://stackoverflow.com/a/11860637/1716869 os.makedirs(path, mode=mode) return path except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and isdir(path): return path else: raise def rm_if_exists(path): """ :param path: Path that will be removed (if it exists). """ try: shutil.rmtree(path) except OSError: pass def is_path(path, msg=None, stop_execution=False): """ Checks if a path exists. :param path: Path to be checked. :param msg: (optional) Message that will be printed in case it does not exist. :param stop_execution: (optional) Boolean to declare if we want to stop execution if the path does not exist. If True, then a RuntimeError will be raised, the function will return False. :return: Boolean value. True if the path exists, False otherwise. """ if not (isdir(path)): if msg is None: msg = 'Not valid path ({})'.format(path) if stop_execution: raise RuntimeError(msg) print(msg) return False return True def remove_empty_paths(path, removeRoot=False, verbose=True): """ Removes empty folders recursively. It searches for empty sub-folders, deletes them and then searches the initial path. :param path: Initial path to remove empty (sub-)folders. :param removeRoot: (optional) Boolean, if True, removes the initial path if empty. :param verbose: (optional) Boolean, if True, prints info during execution. :return: """ # code inspired from: # http://www.jacobtomlinson.co.uk/2014/02/16/python-script-recursively-remove-empty-folders-directories/ if not isdir(path): if verbose: print('The path {} does not exist.'.format(path)) return # recursively check for empty sub-folders and delete them if they are empty. file_list = os.listdir(path) if len(file_list): for f in file_list: new_path = join(path, f) if isdir(new_path): remove_empty_paths(new_path, removeRoot=True, verbose=verbose) # if the (initial) folder is empty, delete it. file_list = os.listdir(path) if len(file_list) == 0 and removeRoot: if verbose: print('Removing the empty path: {}.'.format(path)) os.rmdir(path) def copy_contents_of_folder(src, dest, suffix=''): """ Performs the unix command of 'cp -r [path_0]/*[suffix] [path_1]'. :param src: (str) Path to copy from. :param dest: (str) Path to copy to. :param suffix: (Optional, str) The suffix/extension of the files. :return: """ assert (isdir(src)) system('cp -r {}{}*{} {}'.format(src, sep, suffix, dest)) def _format_string_name_number(pad, name): return '{nam1:0{pad}d}'.format(pad=pad, nam1=name) def copy_the_previous_if_missing(p, expected_list=None, suffix=None, verbose=False): """ Copies the previous file if it is missing. If the expected_list is provided, it ensures that all the filenames in the expected list exist in the p path as well. Use case: Fill the missing files, e.g. 1st order markov assumption. ASSUMPTIONS: a) The naming should be only numbers, e.g. '000034.[suffix]', b) The [suffix] of the first file (listdir) will be copied in case no suffix is provided. :param p: (string) Base path for performing the copying/checking. :param expected_list: (list, optional) The list of filenames to ensure that exist. If it is not provided, then a sequential structure from the first till the last element of the p dir is assumed. :param suffix: (string, optional) The suffix of the files to glob. If None is provided, then the extension of the first file is used. :param verbose: (bool, optional) Whether to print info for the copying. :return: """ from shutil import copy2 if suffix is None: init_l = sorted(listdir(p)) suffix = Path(init_l[0]).suffix if verbose: m1 = ('The suffix {} chosen, only the files with this' 'suffix will be affected.') print(m1.format(suffix)) # update the init_l list with a glob init_l = sorted(glob(p + '*' + suffix)) assert len(init_l) >= 1 init_l = [Path(el).stem for el in init_l] if expected_list is None: # as a workaround, we accept the first and the last element # we find in the original list and then we just form the # expected list with those. end_el = int(init_l[-1]) start_el = int(init_l[0]) # get the number of digits from the length of the start_el. pad = len(init_l[0]) # format the expected list now. expected_list = [_format_string_name_number(pad, el) for el in range(start_el, end_el)] else: # ensure that there is no extension in the list provided. try: int(expected_list[0]) except ValueError: # in this case, get rid of the extension. expected_list = [Path(el).stem for el in expected_list] expected_list = sorted(expected_list) # iterator counting the parsed elements of the init list. cnt_init = 0 # ready to iterate over the expected list and if the respective # element of the init_l is missing, copy the previous (or the next # if the initia are missing). for el_exp in expected_list: if cnt_init >= len(init_l): # we reached the end of the list, but there are more # elements that should be copied. # Trick, append a new element to the end, which is greater # than al lin the expected list, ensure the rest are copied. el1 = int(expected_list[-1]) + 5 init_l.append(_format_string_name_number(len(init_l[0]), el1)) if el_exp == init_l[cnt_init]: # we have a match, no need to copy anything. cnt_init += 1 continue diff = int(el_exp) - int(init_l[cnt_init]) if diff > 0 and cnt_init < len(init_l) - 1: # we need to fast forward the parsing of the second list while diff > 0 and cnt_init < len(init_l) - 1: cnt_init += 1 diff = int(el_exp) - int(init_l[cnt_init]) else: # We actually need to copy the previous. target = el_exp if cnt_init == 0: # corner case where the first is missing. source = init_l[cnt_init] else: source = init_l[cnt_init - 1] # format the filenames paths for the new (i.e. to be copied) and # old (i.e. to copy)files. p_src = p + source + suffix p_trg = p + target + suffix assert (isfile(p_src) and not isfile(p_trg)) if verbose: print('Copying the file {} to the {}.'.format(p_src, p_trg)) copy2(p_src, p_trg) def unzip_all_dir(p, extension='zip'): """ Unzips all the zip folders in the directory. :param p: (string) Path with all the zips. :param extension: (string, optional) The extension/compressed format of the files. :return: None """ from zipfile import ZipFile import tarfile m = 'There is no such path with zips (p = {}).' assert isdir(p), m.format(p) all_zips = glob(join(p, '*.{}'.format(extension))) for zi in all_zips: if extension == 'zip': compr_ref = ZipFile(zi, 'r') else: # right now only these two formats supported. compr_ref = tarfile.open(zi, 'r') compr_ref.extractall(p) compr_ref.close() def count_files(path='.', ending='', directory=False, subdirs=False): """ It counts the files in the current directory and the subfolders. There is the options to count only in the current directory (and not in the subfolders), or to count only files with certain extensions. :param path: (string, optional) The base path to count the files in. :param ending: (string, optional) The extension/suffix of the files to search for. Only makes sense if directory=False. :param directory: (string, optional) If True, then it *only* counts the number of directories (folders). The called one is also counted. If False, then it just counts the files. :param subdirs: (string, optional) If False, it counts only in this directory and not the subfolders. If True, it recursively counts the files in the subfolders as well. :return: (int) The number of files. """ assert isdir(path), 'The initial path does not exist.' # subtr is for avoiding counting the '.' and '..' paths. subtr = 0 if not subdirs: # in this case, we just care for the first # level, apply the fastest method. if ending == '': cmd = 'ls -f {} | wc -l'.format(path) subtr = 2 else: # This is a workaround because ls -f [path]/* returns also # the results of subfolders. cmd = 'ls -f {}*{} | wc -l'.format(path, ending) else: subtr = 1 if len(ending) == 0 else 0 add_arg = '' if directory: add_arg = ' -type d ' cmd = 'find {}{} -name "*{}" -print | wc -l'.format(path, add_arg, ending) nr_files = check_output(cmd, shell=True) # get rid of the \n in the end and return. return int(nr_files[:-1]) - subtr def folders_last_modification(path, return_vars=True, verbose=True, only_dir=False): """ Iteratively computes the last modification of the files/subfolders in the path. :param path: (string) The path to check. :param return_vars: (bool, optional) If True, return the last modification. This is returned in a datetime format. :param verbose: (bool, optional) If True, print the last modification time. :param only_dir: (bool, optional) If True, search also the filenames in the leaf folders ONLY. :return: """ if only_dir: last_mod = max(getmtime(root) for root,_,_ in walk(path)) else: last_mod = getmtime(path) for root, dirname, filename in walk(path): if len(dirname) == 0 and len(filename) > 0: # # we are in a leaf folder, consider the files. last_mod1 = max(getmtime(join(root, fn)) for fn in filename) last_mod = max(last_mod, last_mod1) # # get the modification of the root (folder); same as only_dir case. last_mod = max(last_mod, getmtime(root)) if verbose: import time print(time.ctime(last_mod)) if return_vars: from datetime import datetime return datetime.fromtimestamp(last_mod) def apply_fn_all_subfolders(pb, func): """ Applies a function in each folder and its subfolders (recursive calls). :param pb: (str) Base path. :param func: (function) The function to apply to each folder. :return: - """ func(pb) for fn in sorted(listdir(pb)): if isdir(pb + fn): # # then call the function recursively for this dir. print(fn) apply_fn_all_subfolders(join(pb, fn, ''), func)
How many of the NYC rats survived hurricane Sandy? This question has been asked in the wake of Sandy's flooding of lower and east Manhattan. See, for example, articles in Huffington Post Green, Forbes, National Geographic, Business Insider, Mother Nature Network and NYMag. The short answer is: some rats drowned, some survived. The complicated question, how many drowned and how many survived, is probably impossible to answer. But we can speculate using the information and knowledge we have in our possession. But things we really need to know, we don't - information is just not available (and some of it never will be). How many rats are in NYC? Nobody knows. Nobody seems to even be attempting to estimate. Beware of the myth that there is one rat per person. That is a very old myth. It started in 1909 when W.R.Boelter published a study of rats in England. He asked farmers (but never bothered to look in the cities) to estimate how many rats they have in their fields. From that informal survey, Boelter came up with an average of one rat per acre (yes, of agricultural land). At that time, there were 40 million cultivated acres in England. From that, he estimated the total population of rats on agricultural land to be about 40 million. Completely coincidentally, England in 1909 also had a population of 40 million people. So, the 1:1 ratio stuck. And it has been repeated for more than a century, by media, by scientists, by United Nations, by pest control companies, by health departments, and apparently everyone else. In 1949, Dave Davis did a systematic study of rats, by trapping and capturing them, and estimated that rat population in New York City was only about 250,000. Not even close to 8 million. An aside - I have an indirect personal connection to Davis. For a while he was a professor in the Department of Zoology at NCSU, that is, in my own department. At the time he was ready to retire, in the 1970s, he was actively working on daily and seasonal rhythms in various animals. He used to work with Curt Richter before, at Johns Hopkins, and Curt is one of the pioneers of chronobiology. David sent some woodchucks on a ship from Philadelphia to Australia. While on the ship, rats kept EST time, but quickly re-entrained to the Australian local time once they arrived there and were exposed to ambient light. Although the field was still very young, Davis' work made the rest of the department aware of it (they did not think it was Biorrhythms silliness, as many assumed at the time), so they were interested in hiring a replacement who was doing something similar. So they hired this bright, young lad from Texas in his spot - two Science papers already published and he took only 3.5 years to get both MS and PhD. The new faculty's name was Herbert Underwood. Two decades later I joined the Underwood lab. The rest is history. Anyway, back to rat population. Estimates vary wildly, to as high as 32 million. Nobody really knows. New York City is old. It was built and rebuilt. New buildings were built on top of the old ones. There are old, buried tunnels, rooms, chambers, now not accessible to humans but perfectly accessible to rats. Gradually, the city dug out more and more sewers, more and more various pipes, more subways and other tunnels. Thus more places for rats to nest. We gradually built comfortable homes for more and more rats. The rat population is not evenly distributed either. They tend to be where poor people live, and where the restaurants are. That's where there is food. And not all rats go to the surface. Rats are pretty loyal to the place of birth, and rarely venture more than about 60 feet from it, throughout their lives. If displaced, they can find their way home from as far as 4 miles, but for a foot-long animal, that is an extremely long distance. If they can get food down under, e.g., from subway passengers throwing out uneaten food onto the tracks (which they do), rats never need to go up to the surface. They never get captured and counted in surface surveys. Can rats swim? Yes, rats are strong swimmers. They can even dive for a little while - see this video: if a domesticated rat can be trained to dive (and enjoy it), I assume that a wild rat can do it when its life is threatened: The thing is, swimming in a water maze in the lab, or on the surface of a body of water is one thing. Swimming upward, against the powerful stream of water streaming downward is a completely different thing. They may be strong swimmers, but they are not Johnny Weissmullers. There are many ways up to the surface, but they all go up. And if the water was mainly gushing into the tunnels from above, from the streets as Sandy was flooding, they would have had to swim or dive up narrow pipes, essentially vertically up against the water. No way. Those guys drowned. To go up to the surface, rats need to know the way to the surface. Rats know their own territory very well. But rats that never go to the surface do not know how to get there. They may still want to instinctually go up, but they don't know the way so would have to get lucky to actually find the stairs and then fight their way up against the gushing water. Rats already on the surface would probably be fine. The water and wind from Battery would carry them north until they reach the dry ground. They can certainly stay on the surface. Salty water is denser than fresh water, so they would find it even easier to stay on the surface, though their eyes may not like all of the salt. What was flooded, when and how? Right now, we do not know exactly where, when and how the water entered the subway tunnels, sewers, etc. MTA site does not provide much information. New York Times does not either - they are concerned with information useful to people, e.g., when will the subway open again, not where, when and how the subway initially flooded. Most likely the water came from above, from the flooded streets after sea water rose high at the Battery and the East side. This is important. It is easier for rats to float on the surface of water rising from below, than to fight against the water falling from above. Also, most of Manhattan (and rest of NYC) did not flood at all. Most of the rats probably survived just fine where they were. Who lived, who died? So, from above, we can speculate that many rats survived. Some were never affected by flooding. Some were on the surface already and managed to run or swim to the higher ground. Some knew their way out to the surface and made it there. Rats are smart and crafty - if they can find a way to hide or go out, they will. But some rats certainly drowned. Those are the rats that live deep inside holes we never know about, let alone visit. Rats that never go up to the surface. Rats that had the misfortune to have to try to escape essentially vertically up against strong gushing water. There is a rule of thumb - if you see a rat on the surface during the daylight time, this means that the underground population is enormous. And I see them every month I go up to New York. When the rats are crowded, dominant rats take the best spots. If the population forages on the surface, dominant rats forage during the night. Subdominant (or submissive) rats are temporally displaced to the daytime shift. This is important. If Sandy started to flood the tunnels during the day (and nobody knows, or makes public, this information as the subway was already closed to people by then), it will be the non-dominant rats who are on the surface, and thus more likely to survive. If the flooding started at night, it will be dominant rats on the surface, floating away into safety. Dominant rats are more likely to be able to relocate and survive in other places where they have to compete with locals. Non-dominant rats would have a much harder time finding a new home. So, my guess is that most of the rats survived. But quite a large number of rats drowned - depending on exact location, depth, how much they know how to get to the surface at all, their exact route to the surface, and their status in the social hierarchy. You can learn much more about New York City rats from Rats: Observations on the History and Habitat of the City's Most Unwanted Inhabitants by Robert Sullivan, one of the most wonderful popular science books I have read over the past decade. I will also be doing a HuffPoLive segment about this at 1pm EDT, will post the link in the comments once I have it. Update: More from The Urban Scientist, Jezebel, Tha Daily Beast, Live Science, Forbes.
from sklearn.base import BaseEstimator, RegressorMixin from sklearn.utils.validation import check_is_fitted class FunctionRegressor(BaseEstimator, RegressorMixin): """ This class allows you to pass a function to make the predictions you're interested in. Arguments: func: the function that can make predictions kwargs: extra keyword arguments will be pass to the function, can be grid-search-able The functions that are passed need to be pickle-able. That means no lambda functions! """ def __init__(self, func, **kwargs): self.func = func self.kwargs = kwargs def fit(self, X, y): """ Fit the classifier. No-Op. """ # Run it to confirm no error happened. _ = self.func(X, **self.kwargs) self.fitted_ = True return self def partial_fit(self, X, y=None): """ Fit the classifier partially. No-Op. """ # Run it to confirm no error happened. _ = self.func(X, **self.kwargs) self.fitted_ = True return self def predict(self, X): """ Make predictions using the passed function. """ check_is_fitted(self, ["fitted_"]) return self.func(X, **self.kwargs) def get_params(self, deep=True): """""" return {**self.kwargs, "func": self.func} def set_params(self, **params): """""" for k, v in params.items(): if k == "func": self.func = v else: self.kwargs[k] = v return self
#include<bits/stdc++.h> using namespace std; #define ll long long #define DEBUG(x) cout << '>' << #x << ':' << x << endl; #define REP(i,n) for(ll i=0;i<(n);i++) #define FOR(i,a,b) for(ll i=(a);i<(b);i++) #define FORC(i,a,b,c) for(ll i=(a);i<(b);i+=(c)) #define pb(x) push_back(x) #define ff first #define ss second #define dd double #define mod 1000000007 inline ll md(ll x){ return x<mod?x:x%mod; } ll power(ll x, ll y){ if(y==0){ return 1; } ll x_=power(x,y/2); if(y & 1){ return md(md(x_*x_)*md(x)); } else{ return md(x_*x_); } } ll mod_inverse(ll x){ return power(x,mod-2); } const ll N=2e5+100; ll tree[4*N]; ll lazy[4*N]; void constructUtil(ll * arr, ll ss, ll se, ll si){ if(ss>se){ return; } if(ss==se){ tree[si]=arr[ss]; return; } ll mid=(ss+se)/2; constructUtil(arr,ss,mid,2*si+1); constructUtil(arr,mid+1,se,2*si+2); tree[si]=min(tree[2*si+1],tree[2*si+2]); } void construct(ll * arr, ll n){ constructUtil(arr,0,n-1,0); } ll getMinUtil(ll ss, ll se, ll qs, ll qe, ll si){ if(lazy[si]!=0){ tree[si]+=lazy[si]; if(se!=ss){ lazy[2*si+1]+=lazy[si]; lazy[2*si+2]+=lazy[si]; } lazy[si]=0; } if(ss>se || qs>se || ss>qe){ return 1e18; } if(ss>=qs && se<=qe){ return tree[si]; } ll mid=(ss+se)/2; return min(getMinUtil(ss,mid,qs,qe,2*si+1),getMinUtil(mid+1,se,qs,qe,2*si+2)); } ll getMin(ll qs, ll qe, ll n){ return getMinUtil(0, n-1, qs, qe, 0); } void updateUtil(ll si, ll ss, ll se, ll us, ll ue, ll diff){ if(lazy[si]!=0){ tree[si]+=lazy[si]; if(ss!=se){ lazy[2*si+1]+=lazy[si]; lazy[2*si+2]+=lazy[si]; } lazy[si]=0; } if(ss>se || ss>ue || se<us){ return; } if(ss>=us && se<=ue){ tree[si]+=diff; if(se!=ss){ lazy[2*si+1]+=diff; lazy[2*si+2]+=diff; } return; } ll mid=(ss+se)/2; updateUtil(2*si+1,ss,mid,us,ue,diff); updateUtil(2*si+2,mid+1,se,us,ue,diff); tree[si]=min(tree[2*si+1],tree[2*si+2]); } void update(ll n, ll us, ll ue, ll diff){ updateUtil(0,0,n-1,us,ue,diff); } int main(){ ios_base::sync_with_stdio(false); cin.tie(NULL); ll n; cin>>n; ll p[n]; ll a[n]; ll idx[n]; REP(i,n){ cin>>p[i]; idx[p[i]]=i; } ll c[n]; REP(i,n){ cin>>a[i]; c[i]=a[i]; } FOR(i,1,n){ c[i]+=c[i-1]; } construct(c,n); ll ans=getMin(0,n-2,n); FOR(i,1,n+1){ ll index=idx[i]; ll cost=a[index]; update(n,0,index-1,cost); update(n,index,n-1,-cost); ans=min(ans,getMin(0,n-2,n)); } cout<<ans<<endl; }
{-# LANGUAGE DataKinds #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -fno-warn-orphans #-} module Cardano.Wallet.Network.BlockHeadersSpec ( spec ) where import Prelude import Cardano.Wallet.Network.BlockHeaders ( BlockHeaders (..) , dropStartingFromSlotNo , greatestCommonBlockHeader , updateUnstableBlocks ) import Cardano.Wallet.Primitive.Types ( BlockHeader (..), SlotNo (..) ) import Cardano.Wallet.Primitive.Types.Hash ( Hash (..) ) import Control.Monad.Trans.Class ( lift ) import Control.Monad.Trans.Writer ( execWriterT, tell ) import Data.Coerce ( coerce ) import Data.Either ( isLeft ) import Data.List ( find, findIndex, foldl', isPrefixOf ) import Data.Maybe ( fromMaybe ) import Data.Quantity ( Quantity (..) ) import Data.Word ( Word32 ) import Safe ( headMay, lastMay ) import Test.Hspec ( Spec, describe, it, parallel ) import Test.QuickCheck ( Arbitrary (..) , Gen , Property , choose , counterexample , frequency , label , property , vectorOf , withMaxSuccess , (.&&.) , (.||.) , (=/=) , (===) ) import qualified Data.ByteString.Char8 as B8 import qualified Data.Foldable as F import qualified Data.Sequence as Seq import qualified Data.Set as Set {------------------------------------------------------------------------------- Spec -------------------------------------------------------------------------------} spec :: Spec spec = do parallel $ describe "Test Chain" $ do it "Always generate valid test chains" $ property prop_generator parallel $ describe "Unstable block headers" $ do it "Are updated by fetching blocks" $ withMaxSuccess 10000 $ property prop_unstableBlockHeaders it "Does not fetch block headers which we already have" $ withMaxSuccess 1000 $ property prop_updateUnstableBlocksIsEfficient it "Handles failure of node" $ withMaxSuccess 10000 $ property prop_updateUnstableBlocksFailure parallel $ describe "Chain intersection" $ do it "Calculates GCBH" $ withMaxSuccess 1000 $ property prop_greatestCommonBlockHeader {------------------------------------------------------------------------------- Unstable Block Headers Property -------------------------------------------------------------------------------} data TestCase = TestCase { k :: Quantity "block" Word32 , nodeChain :: [BlockHeader] , localChain :: [BlockHeader] } deriving (Show, Eq) prop_unstableBlockHeaders :: TestCase -> Property prop_unstableBlockHeaders TestCase{k, nodeChain, localChain} = counterexample ce prop where prop | hasTip nodeChain = label lbl (bs' === Just expected) | otherwise = label "node has no chain" (bs' === Nothing) expected = mkBlockHeaders $ modelIntersection k localChain nodeChain isect = intersectionPoint localChain nodeChain lbl = case isect of Just i | chainTip localChain == chainTip nodeChain -> "synced" | i == chainEnd nodeChain -> "local chain ahead" | i == chainEnd localChain -> "node chain ahead" | otherwise -> "rollback" Nothing | chainLength localChain == 0 -> "local chain empty" | otherwise -> "no common chain" ce = unlines [ "k = " ++ show k , "Local chain: " ++ showChain localChain , "Node chain: " ++ showChain nodeChain , "Intersects: " ++ maybe "-" show isect , "Expected: " ++ showBlockHeaders expected , "Actual: " ++ maybe "-" showBlockHeaders bs' ] bs = mkBlockHeaders localChain bs' = updateUnstableBlocks (coerce k) getTip getBlockHeader bs where getTip = tipId nodeChain getBlockHeader hh = find ((== hh) . headerHash) nodeChain -- | 'updateUnstableBlocks' should not fetch blocks that it already has headers -- for. prop_updateUnstableBlocksIsEfficient :: TestCase -> Property prop_updateUnstableBlocksIsEfficient TestCase{k, nodeChain, localChain} = counterexample ce prop where prop = Set.size (Set.intersection localHashes fetchedHashes) <= 1 localHashes = Set.fromList (map parentHeaderHash (drop 1 localChain)) fetchedHashes = maybe mempty Set.fromList (execWriterT bs') ce = unlines [ "k = " ++ show k , "Local chain: " ++ showChain localChain , "Node chain: " ++ showChain nodeChain , "Fetched: " ++ unwords (map showHash (Set.toList fetchedHashes)) ] bs = mkBlockHeaders localChain -- Run updateUnstableBlocks and record which blocks were fetched. bs' = updateUnstableBlocks (coerce k) getTip getBlockHeader bs where getTip = lift (tipId nodeChain) getBlockHeader h = tell [h] *> lift (find ((== h) . headerHash) nodeChain) prop_updateUnstableBlocksFailure :: TestCase -> Property prop_updateUnstableBlocksFailure TestCase{k, nodeChain, localChain} = label lbl prop where prop | hasTip nodeChain = either ("injected" `isPrefixOf`) (const True) res | otherwise = isLeft res lbl = case res of Right _ -> "success" Left e | not (hasTip nodeChain) -> "node has no chain" | otherwise -> e res = updateUnstableBlocks (coerce k) getTip getBlockHeader bs bs = mkBlockHeaders localChain getTip | chainLength nodeChain `mod` 5 == 0 = Left "injected getTip failed" | otherwise = maybe (Left "no tip") Right $ tipId nodeChain getBlockHeader h = case findIndex ((== h) . headerHash) nodeChain of Just ix | ix `mod` 3 == 0 -> Left "injected getBlock failed" | otherwise -> Right (nodeChain !! ix) Nothing -> Left "block not found" {------------------------------------------------------------------------------- TestCase helpers -------------------------------------------------------------------------------} -- | Convert a test chain to 'BlockHeaders' so that it can be compared for -- equality. mkBlockHeaders :: [BlockHeader] -> BlockHeaders mkBlockHeaders bs = BlockHeaders (Seq.fromList bs) {------------------------------------------------------------------------------- Test chain functions -------------------------------------------------------------------------------} -- | Tip of a test chain tipId :: [BlockHeader] -> Maybe (Hash "BlockHeader") tipId = fmap headerHash . chainTip -- | A test chain needs a headers to have a tip. hasTip :: [BlockHeader] -> Bool hasTip = not . null -- | Length of a test chain, not including the block header after the tip. chainLength :: [BlockHeader] -> Int chainLength = length chainTip :: [BlockHeader] -> Maybe BlockHeader chainTip = lastMay -- | Slot index of the tip of a chain. chainEnd :: [BlockHeader] -> SlotNo chainEnd = maybe (SlotNo 0) slotNo . chainTip -- | Limit the sequence to a certain size by removing items from the beginning. limitChain :: Quantity "block" Word32 -> [BlockHeader] -> [BlockHeader] limitChain (Quantity k) bs = drop (max 0 (length bs - fromIntegral k)) bs showChain :: [BlockHeader] -> String showChain [] = "<empty chain>" showChain bs = unwords (map (showHash . headerHash) bs) {------------------------------------------------------------------------------- Test chain pure model of intersection -------------------------------------------------------------------------------} -- | Merge node chain with local unstable blocks. modelIntersection :: Quantity "block" Word32 -- ^ Maximum number of unstable blocks. -> [BlockHeader] -- ^ Local test chain. -> [BlockHeader] -- ^ Remote test chain. -> [BlockHeader] -- ^ New local test chain. modelIntersection k localChain nodeChain = -- We have at most k block headers ... limitChain k $ -- ... added to the end of current unstable block headers ... maybe id dropToSlot (min p (p >>= fixup)) -- ... fetched from the node. nodeChain where p = intersectionPoint localChain nodeChain -- The real code under test will fetch the full k if the local chain is -- ahead of the node chain. It's not incorrect behaviour, and the situation -- should never happen anyway. But we need to add this to the model so that -- the tests pass. fixup q | chainEnd nodeChain > q = Just q | otherwise = Nothing -- | Find the last slot index of the node chain which is the same as the local -- chain. intersectionPoint :: [BlockHeader] -> [BlockHeader] -> Maybe SlotNo intersectionPoint localChain nodeChain = res >>= checkAfter where res = slotNo . fst <$> find (uncurry (==)) pairs pairs = zip localChain' nodeChain localChain' = spliceChains localChain nodeChain -- The block header *after* a block contains the hash of that block. -- So compare that, if it exists. checkAfter sl | after localChain' == after nodeChain = Just sl | otherwise = Nothing where after xs = fmap snd $ find ((== sl) . slotNo . fst) $ zip xs (drop 1 xs) -- | Prepend a source chain before a local chain. spliceChains :: [BlockHeader] -> [BlockHeader] -> [BlockHeader] spliceChains localChain nodeChain = takeToSlot start chaff ++ localChain where start = fromMaybe (SlotNo 0) $ firstSlot localChain -- chaff is the same shape as the node chain, but with different hashes chaff = [bh { parentHeaderHash = Hash "x" } | bh <- nodeChain] -- | The slot index at which the local chain starts. firstSlot :: [BlockHeader] -> Maybe SlotNo firstSlot bs | hasTip bs = slotNo <$> headMay bs | otherwise = Nothing dropToSlot :: SlotNo -> [BlockHeader] -> [BlockHeader] dropToSlot sl = dropWhile ((< sl) . slotNo) takeToSlot :: SlotNo -> [BlockHeader] -> [BlockHeader] takeToSlot sl = takeWhile ((< sl) . slotNo) {------------------------------------------------------------------------------- Intersection of BlockHeaders -------------------------------------------------------------------------------} prop_greatestCommonBlockHeader :: TestCase -> Property prop_greatestCommonBlockHeader TestCase{nodeChain, localChain} = counterexample ce prop where prop = case gcbh of Just bh -> -- The block after gcbh is different (nextBlock bh ubs =/= nextBlock bh lbs .||. nextBlock bh ubs === Nothing) .&&. -- All blocks up to and including the gcbh are the same. (dropStartingFromSlotNo (slotNo bh) ubs === dropStartingFromSlotNo (slotNo bh) lbs) Nothing -> -- No common block means that the first blocks are different, or one -- chain is empty. firstUbs ubs =/= firstUbs lbs .||. isEmpty ubs ce = unlines [ "Local chain: " ++ showChain localChain , "Node chain: " ++ showChain nodeChain , "GCBH: " ++ show gcbh ] gcbh = greatestCommonBlockHeader ubs lbs ubs = mkBlockHeaders nodeChain lbs = mkBlockHeaders localChain -- Utils for poking around BlockHeaders. nextBlock bh (BlockHeaders bs) = seqHead $ Seq.drop 1 $ Seq.dropWhileL (/= bh) bs firstUbs (BlockHeaders bs) = seqHead bs seqHead = Seq.lookup 0 . Seq.take 1 isEmpty (BlockHeaders bs) = Seq.null bs {------------------------------------------------------------------------------- Test data generation -------------------------------------------------------------------------------} prop_generator :: TestCase -> Property prop_generator TestCase{nodeChain, localChain} = valid nodeChain .&&. valid localChain where valid c = continuous c .&&. slotsIncreasing c continuous c = counterexample ("Chain not continuous: " <> showChain c) $ and (zipWith (==) (map headerHash c) (map parentHeaderHash (drop 1 c))) slotsIncreasing c = counterexample ("Slots not increasing: " ++ showChain c) $ let sls = map slotNo c in and (zipWith (<) sls (drop 1 sls)) -- | Generate an infinite test chain. Take a slice of the list and use 'tipId' -- and 'headerIds' to access the tip and block headers. The tip of a test chain -- is the penultimate block. chain :: String -> Hash "BlockHeader" -> [BlockHeader] chain p hash0 = [ BlockHeader (SlotNo (fromIntegral n)) (mockBlockHeight n) (hash n) (hash (n - 1)) | n <- [1..] ] where mockBlockHeight = Quantity . fromIntegral hash :: Int -> Hash "BlockHeader" hash n = if n == 0 then hash0 else Hash . B8.pack $ p ++ show n -- | Filter out test chain blocks that correspond to False values, and update -- parent hashes so that the chain is still continuous. removeBlocks :: [Bool] -> [BlockHeader] -> [BlockHeader] removeBlocks holes bs = reverse $ snd $ foldl' maybeMkHole (parentHeaderHash (head bs), []) $ zip holes (zip (map parentHeaderHash $ tail bs) bs) where maybeMkHole (prev, ac) (True, (h, BlockHeader sl _ hh _)) = (h, ((BlockHeader sl bh hh prev):ac)) maybeMkHole pbs _ = pbs bh = Quantity 0 genChain :: Quantity "block" Word32 -> String -> Hash "BlockHeader" -> Gen [BlockHeader] genChain (Quantity k) prefix hash0 = do len <- choose (0, fromIntegral k) holes <- genHoles len return $ take len $ removeBlocks holes $ chain prefix hash0 where genHoles :: Int -> Gen [Bool] genHoles n = let genOne = frequency [(1, pure False), (4, pure True)] in (True:) <$> vectorOf (n - 1) genOne instance Arbitrary TestCase where arbitrary = do k <- arbitrary let genesis = BlockHeader (SlotNo 0) (Quantity 0) (Hash "genesis") (Hash "void") base <- genChain k "base" (headerHash genesis) let nextHash = (headerHash . fromMaybe genesis) (chainTip base) local <- genChain k "local" nextHash node <- genChain k "node" nextHash let baseTip = chainEnd base return TestCase { k = k , nodeChain = [genesis] <> base <> startFrom baseTip node , localChain = [genesis] <> base <> startFrom baseTip local } where startFrom (SlotNo n) xs = [ BlockHeader (SlotNo $ sl+fromIntegral n) bh' hh prev | BlockHeader (SlotNo sl) (Quantity bh) hh prev <- xs , let bh' = Quantity (bh+fromIntegral n+1) ] shrink TestCase{k, nodeChain, localChain} = [ TestCase k' (take n nodeChain) (take l localChain) | (k', n, l) <- shrink (k, length nodeChain, length localChain) ] instance Arbitrary (Quantity "block" Word32) where -- k doesn't need to be large for testing this arbitrary = Quantity . fromIntegral <$> choose (2 :: Int, 30) shrink (Quantity k) = [ Quantity (fromIntegral k') | k' <- shrink (fromIntegral k :: Int) , k' >= 2 ] {------------------------------------------------------------------------------- Extra unstable blocks functions for properties -------------------------------------------------------------------------------} -- | Shows just the headers of the unstable blocks. showBlockHeaders :: BlockHeaders -> String showBlockHeaders = unwords . map showBlockHeader . F.toList . getBlockHeaders where showBlockHeader = showHash . headerHash showHash :: Hash a -> String showHash (Hash h) = B8.unpack h
<gh_stars>1-10 #include<fcntl.h> #include<stdio.h> #include<unistd.h> #include<string.h> int main(int argc, const char* const* argv) { if (argc <= 1) { return 1; } for(int i = 1; i<argc; i++) { char location[20] = "proc/"; strcat(location, argv[i]); strcat(location, "/cwd"); char buffer; ssize_t count; int fd = open(location, O_RDONLY); if (fd == -1) { perror(argv[1]); return 2; } while((count = read(fd, &buffer, 1)) != 0) { write(STDOUT_FILENO, &buffer, 1); } } return 0; }
a=[] n,m=(map(int,input().strip().split(' '))) d={} for i in range(n): arr=list(map(str,input().strip().split(' '))) if arr[1] in d: d[arr[1]].append(arr[0]) else: d[arr[1]]=[arr[0]] for i in range(m): ss=input() arr=list(ss.split(' ')) l=len(arr[1]) s='' for j in range(l-1): s=s+arr[1][j] print(ss,end=' ') for j in d[s]: print("#",end='') print(j,end=' ') print()
/** * https://wiki.vg/Protocol#Craft_Recipe_Request<br> * <br> * This packet is sent when a player clicks a recipe in the crafting book that * is craftable (white border).<br> * <br> * Packet ID: 0x19<br> * State: Play<br> * Bound To: Server * * @author Martin * */ public class PacketPlayInAutoRecipeEvent extends PacketPlayInInventoryEvent { private NamespacedKey recipeId; /** * Affects the amount of items processed; true if shift is down when clicked. */ private boolean makeAll; public PacketPlayInAutoRecipeEvent(Player injectedPlayer, PacketPlayInAutoRecipe packet) { super(injectedPlayer, packet.b()); Validate.notNull(packet); recipeId = PacketUtils.toNamespacedKey(packet.c()); makeAll = packet.d(); } public PacketPlayInAutoRecipeEvent(Player injectedPlayer, int windowID, NamespacedKey recipeId, boolean makeAll) { super(injectedPlayer, windowID); Validate.notNull(recipeId); this.recipeId = recipeId; this.makeAll = makeAll; } public NamespacedKey getRecipeID() { return recipeId; } public boolean isMakeAll() { return makeAll; } @Override public Packet<PacketListenerPlayIn> getNMS() { final PacketPlayInAutoRecipe packet = new PacketPlayInAutoRecipe(); Field.set(packet, "a", getInventoryId()); Field.set(packet, "b", PacketUtils.toMinecraftKey(recipeId)); Field.set(packet, "c", makeAll); return packet; } @Override public int getPacketID() { return 0x19; } @Override public String getProtocolURLString() { return "https://wiki.vg/Protocol#Craft_Recipe_Request"; } }
N = int(input()) al = "abcdefghijklmnopqrstuvwxyz" atoi = {s:i for i, s in enumerate(al)} def func(s, i): if i == N: print(s) return maxi = max([atoi[c] for c in s]) for c in al[:maxi+2]: func(s+c, i+1) func("a", 1)
Finite-size scaling analysis of the S=1 Ising model on the triangular lattice. We study the S=1 Ising model, equivalent to the three-state lattice-gas model, with nearest-neighbor, pairwise interactions on a two-dimensional, triangular lattice. We pay particular attention to the antiferromagnetic phase diagrams. We show its relation to other well-studied models (S=(1/2 Ising, Blume-Capel, Blume-Emery-Griffiths), classify the ground states, and calculate finite-temperature phase diagrams using transfer matrices and finite-size scaling for infinite strips of three and six sites width. The phase diagrams are quite complicated, with surfaces of first- and second-order transitions that intersect along lines of multicritical points of various kinds, providing a rich laboratory for studying a number of first-order phase transitions, critical and multicritical phenomena within the framework of one single model.
#include <boost/lexical_cast.hpp> #include <assert.h> #include "Iop_Spu2_Core.h" #include "../Log.h" #define LOG_NAME_PREFIX ("iop_spu2_core_") #define SPU_BASE_SAMPLING_RATE (48000) using namespace Iop; using namespace Iop::Spu2; #define MAX_ADDRESS_REGISTER (22) #define MAX_COEFFICIENT_REGISTER (10) static unsigned int g_addressRegisterMapping[MAX_ADDRESS_REGISTER] = { CSpuBase::FB_SRC_A, CSpuBase::FB_SRC_B, CSpuBase::IIR_DEST_A0, CSpuBase::IIR_DEST_A1, CSpuBase::ACC_SRC_A0, CSpuBase::ACC_SRC_A1, CSpuBase::ACC_SRC_B0, CSpuBase::ACC_SRC_B1, CSpuBase::IIR_SRC_A0, CSpuBase::IIR_SRC_A1, CSpuBase::IIR_DEST_B0, CSpuBase::IIR_DEST_B1, CSpuBase::ACC_SRC_C0, CSpuBase::ACC_SRC_C1, CSpuBase::ACC_SRC_D0, CSpuBase::ACC_SRC_D1, CSpuBase::IIR_SRC_B1, CSpuBase::IIR_SRC_B0, CSpuBase::MIX_DEST_A0, CSpuBase::MIX_DEST_A1, CSpuBase::MIX_DEST_B0, CSpuBase::MIX_DEST_B1 }; static unsigned int g_coefficientRegisterMapping[MAX_COEFFICIENT_REGISTER] = { CSpuBase::IIR_ALPHA, CSpuBase::ACC_COEF_A, CSpuBase::ACC_COEF_B, CSpuBase::ACC_COEF_C, CSpuBase::ACC_COEF_D, CSpuBase::IIR_COEF, CSpuBase::FB_ALPHA, CSpuBase::FB_X, CSpuBase::IN_COEF_L, CSpuBase::IN_COEF_R }; CCore::CCore(unsigned int coreId, CSpuBase& spuBase) : m_coreId(coreId) , m_spuBase(spuBase) { m_logName = LOG_NAME_PREFIX + boost::lexical_cast<std::string>(m_coreId); m_readDispatch.core = &CCore::ReadRegisterCore; m_readDispatch.channel = &CCore::ReadRegisterChannel; m_writeDispatch.core = &CCore::WriteRegisterCore; m_writeDispatch.channel = &CCore::WriteRegisterChannel; Reset(); } CCore::~CCore() { } void CCore::Reset() { } CSpuBase& CCore::GetSpuBase() const { return m_spuBase; } uint16 CCore::GetAddressLo(uint32 address) { return static_cast<uint16>((address >> 1) & 0xFFFF); } uint16 CCore::GetAddressHi(uint32 address) { return static_cast<uint16>((address >> (16 + 1)) & 0xFFFF); } uint32 CCore::SetAddressLo(uint32 address, uint16 value) { address &= 0xFFFF << (1 + 16); address |= value << 1; return address; } uint32 CCore::SetAddressHi(uint32 address, uint16 value) { address &= 0xFFFF << 1; address |= value << (1 + 16); return address; } uint32 CCore::ReadRegister(uint32 address, uint32 value) { return ProcessRegisterAccess(m_readDispatch, address, value); } uint32 CCore::WriteRegister(uint32 address, uint32 value) { return ProcessRegisterAccess(m_writeDispatch, address, value); } uint32 CCore::ProcessRegisterAccess(const REGISTER_DISPATCH_INFO& dispatchInfo, uint32 address, uint32 value) { if(address < S_REG_BASE) { //Channel access unsigned int channelId = (address >> 4) & 0x3F; address &= ~(0x3F << 4); return ((this)->*(dispatchInfo.channel))(channelId, address, value); } else if(address >= VA_REG_BASE && address < R_REG_BASE) { //Channel access unsigned int channelId = (address - VA_REG_BASE) / 12; address -= channelId * 12; return ((this)->*(dispatchInfo.channel))(channelId, address, value); } else { //Core write return ((this)->*(dispatchInfo.core))(0, address, value); } } uint32 CCore::ReadRegisterCore(unsigned int channelId, uint32 address, uint32 value) { uint32 result = 0; switch(address) { case STATX: result = 0x0000; if(m_spuBase.GetControl() & CSpuBase::CONTROL_DMA) { result |= 0x80; } break; case S_ENDX_HI: result = m_spuBase.GetEndFlags().h0; break; case S_ENDX_LO: result = m_spuBase.GetEndFlags().h1; break; case CORE_ATTR: result = m_spuBase.GetControl(); break; case A_TS_MODE: result = m_spuBase.GetTransferMode(); break; case A_TSA_HI: result = GetAddressHi(m_spuBase.GetTransferAddress()); break; case A_ESA_LO: result = GetAddressLo(m_spuBase.GetReverbWorkAddressStart()); break; case A_EEA_HI: result = GetAddressHi(m_spuBase.GetReverbWorkAddressEnd()); break; } LogRead(address, result); return result; } uint32 CCore::WriteRegisterCore(unsigned int channelId, uint32 address, uint32 value) { if(address >= RVB_A_REG_BASE && address < RVB_A_REG_END) { //Address reverb register unsigned int regIndex = (address - RVB_A_REG_BASE) / 4; assert(regIndex < MAX_ADDRESS_REGISTER); unsigned int reverbParamId = g_addressRegisterMapping[regIndex]; uint32 previousValue = m_spuBase.GetReverbParam(reverbParamId); if(address & 0x02) { value = SetAddressLo(previousValue, static_cast<uint16>(value)); } else { value = SetAddressHi(previousValue, static_cast<uint16>(value)); } m_spuBase.SetReverbParam(reverbParamId, value); } else if(address >= RVB_C_REG_BASE && address < RVB_C_REG_END) { //Coefficient reverb register unsigned int regIndex = (address - RVB_C_REG_BASE) / 2; assert(regIndex < MAX_COEFFICIENT_REGISTER); m_spuBase.SetReverbParam(g_coefficientRegisterMapping[regIndex], value); } else { switch(address) { case CORE_ATTR: m_spuBase.SetBaseSamplingRate(SPU_BASE_SAMPLING_RATE); m_spuBase.SetControl(static_cast<uint16>(value)); break; case A_STD: m_spuBase.WriteWord(static_cast<uint16>(value)); break; case A_TS_MODE: m_spuBase.SetTransferMode(static_cast<uint16>(value)); break; case S_VMIXER_HI: m_spuBase.SetChannelReverbLo(static_cast<uint16>(value)); break; case S_VMIXER_LO: m_spuBase.SetChannelReverbHi(static_cast<uint16>(value)); break; case A_KON_HI: m_spuBase.SendKeyOn(value); break; case A_KON_LO: m_spuBase.SendKeyOn(value << 16); break; case A_KOFF_HI: m_spuBase.SendKeyOff(value); break; case A_KOFF_LO: m_spuBase.SendKeyOff(value << 16); break; case S_ENDX_LO: case S_ENDX_HI: if(value) { m_spuBase.ClearEndFlags(); } break; case A_IRQA_HI: m_spuBase.SetIrqAddress(SetAddressHi(m_spuBase.GetIrqAddress(), static_cast<uint16>(value))); break; case A_IRQA_LO: m_spuBase.SetIrqAddress(SetAddressLo(m_spuBase.GetIrqAddress(), static_cast<uint16>(value))); break; case A_TSA_HI: m_spuBase.SetTransferAddress(SetAddressHi(m_spuBase.GetTransferAddress(), static_cast<uint16>(value))); break; case A_TSA_LO: m_spuBase.SetTransferAddress(SetAddressLo(m_spuBase.GetTransferAddress(), static_cast<uint16>(value))); break; case A_ESA_HI: m_spuBase.SetReverbWorkAddressStart(SetAddressHi(m_spuBase.GetReverbWorkAddressStart(), static_cast<uint16>(value))); break; case A_ESA_LO: m_spuBase.SetReverbWorkAddressStart(SetAddressLo(m_spuBase.GetReverbWorkAddressStart(), static_cast<uint16>(value))); break; case A_EEA_HI: m_spuBase.SetReverbWorkAddressEnd(((value & 0x0F) << 17) | 0x1FFFF); break; } } LogWrite(address, value); return 0; } uint32 CCore::ReadRegisterChannel(unsigned int channelId, uint32 address, uint32 value) { assert(channelId < MAX_CHANNEL); if(channelId >= MAX_CHANNEL) { return 0; } uint32 result = 0; CSpuBase::CHANNEL& channel(m_spuBase.GetChannel(channelId)); switch(address) { case VP_VOLL: result = channel.volumeLeft; break; case VP_VOLR: result = channel.volumeRight; break; case VP_PITCH: result = channel.pitch; break; case VP_ADSR1: result = channel.adsrLevel; break; case VP_ADSR2: result = channel.adsrRate; break; case VP_ENVX: result = (channel.adsrVolume >> 16); break; case VP_VOLXL: result = (channel.volumeLeftAbs >> 16); break; case VP_VOLXR: result = (channel.volumeRightAbs >> 16); break; case VA_SSA_HI: result = GetAddressHi(channel.address); break; case VA_SSA_LO: result = GetAddressLo(channel.address); break; case VA_LSAX_HI: result = GetAddressHi(channel.repeat); break; case VA_LSAX_LO: result = GetAddressLo(channel.repeat); break; case VA_NAX_HI: result = GetAddressHi(channel.current); break; case VA_NAX_LO: result = GetAddressLo(channel.current); break; } LogChannelRead(channelId, address, result); return result; } uint32 CCore::WriteRegisterChannel(unsigned int channelId, uint32 address, uint32 value) { assert(channelId < MAX_CHANNEL); if(channelId >= MAX_CHANNEL) { return 0; } LogChannelWrite(channelId, address, value); CSpuBase::CHANNEL& channel(m_spuBase.GetChannel(channelId)); switch(address) { case VP_VOLL: channel.volumeLeft <<= static_cast<uint16>(value); if(channel.volumeLeft.mode.mode == 0) { channel.volumeLeftAbs = channel.volumeLeft.volume.volume << 17; } break; case VP_VOLR: channel.volumeRight <<= static_cast<uint16>(value); if(channel.volumeRight.mode.mode == 0) { channel.volumeRightAbs = channel.volumeRight.volume.volume << 17; } break; case VP_PITCH: channel.pitch = static_cast<uint16>(value); break; case VP_ADSR1: channel.adsrLevel <<= static_cast<uint16>(value); break; case VP_ADSR2: channel.adsrRate <<= static_cast<uint16>(value); break; case VP_ENVX: channel.adsrVolume = static_cast<uint16>(value); break; case VA_SSA_HI: channel.address = SetAddressHi(channel.address, static_cast<uint16>(value)); break; case VA_SSA_LO: channel.address = SetAddressLo(channel.address, static_cast<uint16>(value)); break; case VA_LSAX_HI: channel.repeat = SetAddressHi(channel.repeat, static_cast<uint16>(value)); break; case VA_LSAX_LO: channel.repeat = SetAddressLo(channel.repeat, static_cast<uint16>(value)); break; } return 0; } void CCore::LogRead(uint32 address, uint32 value) { auto logName = m_logName.c_str(); #define LOG_GET(registerId) case registerId: CLog::GetInstance().Print(logName, "= " #registerId " = 0x%04X\r\n", value); break; switch(address) { LOG_GET(CORE_ATTR) LOG_GET(STATX) LOG_GET(S_PMON_HI) LOG_GET(S_PMON_LO) LOG_GET(S_NON_HI) LOG_GET(S_NON_LO) LOG_GET(S_VMIXL_HI) LOG_GET(S_VMIXL_LO) LOG_GET(S_VMIXEL_HI) LOG_GET(S_VMIXEL_LO) LOG_GET(S_VMIXR_HI) LOG_GET(S_VMIXR_LO) LOG_GET(S_VMIXER_HI) LOG_GET(S_VMIXER_LO) LOG_GET(S_ENDX_HI) LOG_GET(S_ENDX_LO) LOG_GET(A_TSA_HI) LOG_GET(A_TSA_LO) LOG_GET(A_TS_MODE) LOG_GET(A_ESA_HI) LOG_GET(A_ESA_LO) LOG_GET(A_EEA_HI) LOG_GET(A_EEA_LO) default: CLog::GetInstance().Print(logName, "Read an unknown register 0x%04X.\r\n", address); break; } #undef LOG_GET } void CCore::LogWrite(uint32 address, uint32 value) { auto logName = m_logName.c_str(); #define LOG_SET(registerId) case registerId: CLog::GetInstance().Print(logName, #registerId " = 0x%04X\r\n", value); break; switch(address) { LOG_SET(S_PMON_HI) LOG_SET(S_PMON_LO) LOG_SET(S_NON_HI) LOG_SET(S_NON_LO) LOG_SET(S_VMIXL_HI) LOG_SET(S_VMIXL_LO) LOG_SET(S_VMIXEL_HI) LOG_SET(S_VMIXEL_LO) LOG_SET(S_VMIXR_HI) LOG_SET(S_VMIXR_LO) LOG_SET(S_VMIXER_HI) LOG_SET(S_VMIXER_LO) LOG_SET(P_MMIX) LOG_SET(CORE_ATTR) LOG_SET(A_KON_HI) LOG_SET(A_KON_LO) LOG_SET(A_KOFF_HI) LOG_SET(A_KOFF_LO) LOG_SET(S_ENDX_HI) LOG_SET(S_ENDX_LO) LOG_SET(A_IRQA_HI) LOG_SET(A_IRQA_LO) LOG_SET(A_TSA_HI) LOG_SET(A_TSA_LO) LOG_SET(A_STD) LOG_SET(A_TS_MODE) LOG_SET(A_ESA_HI) LOG_SET(A_ESA_LO) LOG_SET(A_EEA_HI) LOG_SET(A_EEA_LO) LOG_SET(P_MVOLL) LOG_SET(P_MVOLR) LOG_SET(P_EVOLL) LOG_SET(P_EVOLR) LOG_SET(P_BVOLL) LOG_SET(P_BVOLR) default: CLog::GetInstance().Print(logName, "Write 0x%04X to an unknown register 0x%04X.\r\n", value, address); break; } #undef LOG_SET } void CCore::LogChannelRead(unsigned int channelId, uint32 address, uint32 value) { auto logName = m_logName.c_str(); #define LOG_GET(registerId) case registerId: CLog::GetInstance().Print(logName, "ch%02d: = " #registerId " = 0x%04X\r\n", channelId, value); break; switch(address) { LOG_GET(VP_VOLL) LOG_GET(VP_VOLR) LOG_GET(VP_PITCH) LOG_GET(VP_ADSR1) LOG_GET(VP_ADSR2) LOG_GET(VP_ENVX) LOG_GET(VP_VOLXL) LOG_GET(VP_VOLXR) LOG_GET(VA_SSA_HI) LOG_GET(VA_SSA_LO) LOG_GET(VA_LSAX_HI) LOG_GET(VA_LSAX_LO) LOG_GET(VA_NAX_HI) LOG_GET(VA_NAX_LO) default: CLog::GetInstance().Print(logName, "ch%02d: Read an unknown register 0x%04X.\r\n", channelId, address); break; } #undef LOG_GET } void CCore::LogChannelWrite(unsigned int channelId, uint32 address, uint32 value) { auto logName = m_logName.c_str(); #define LOG_SET(registerId) case registerId: CLog::GetInstance().Print(logName, "ch%02d: " #registerId " = 0x%04X\r\n", channelId, value); break; switch(address) { LOG_SET(VP_VOLL) LOG_SET(VP_VOLR) LOG_SET(VP_PITCH) LOG_SET(VP_ADSR1) LOG_SET(VP_ADSR2) LOG_SET(VP_ENVX) LOG_SET(VP_VOLXL) LOG_SET(VP_VOLXR) LOG_SET(VA_SSA_HI) LOG_SET(VA_SSA_LO) LOG_SET(VA_LSAX_HI) LOG_SET(VA_LSAX_LO) default: CLog::GetInstance().Print(logName, "ch%02d: Wrote %04X to an unknown register 0x%04X.\r\n", channelId, value, address); break; } #undef LOG_SET }
// Copyright (c) 2018 Chef Software Inc. and/or applicable contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use bldr_core::logger::Logger; use hab_core::package::{Identifiable, PackageIdent, PackageTarget}; use hab_net::socket::DEFAULT_CONTEXT; use http_gateway::conn::RouteBroker; use http_gateway::http::helpers::all_visibilities; use iron::typemap::Key; use protobuf::{parse_from_bytes, Message}; use protocol::originsrv::*; use std::collections::{HashSet, VecDeque}; use std::path::PathBuf; use std::sync::mpsc; use std::thread::{self, JoinHandle}; use std::time::{Duration, Instant}; use zmq; use config::Config; use error::{Error, Result}; use protocol::originsrv::{OriginPackageIdent, UpstreamRequest}; use depot_client::Client as DepotClient; use server::download_package_from_upstream_depot; const UPSTREAM_MGR_ADDR: &'static str = "inproc://upstream"; const DEFAULT_POLL_TIMEOUT_MS: u64 = 60_000; // 60 secs pub struct UpstreamClient; pub struct UpstreamCli; impl Key for UpstreamCli { type Value = UpstreamClient; } impl UpstreamClient { pub fn refresh(&self, ident: &OriginPackageIdent, target: &PackageTarget) -> Result<()> { let mut req = UpstreamRequest::new(); req.set_ident(ident.clone()); req.set_target(target.to_string()); // TODO: Use a per-thread socket when we move to a post-Iron framework let socket = (**DEFAULT_CONTEXT).as_mut().socket(zmq::DEALER).unwrap(); socket.connect(UPSTREAM_MGR_ADDR).map_err(Error::Zmq)?; socket .send(&req.write_to_bytes().unwrap(), 0) .map_err(Error::Zmq)?; Ok(()) } } impl Default for UpstreamClient { fn default() -> UpstreamClient { UpstreamClient {} } } pub struct UpstreamMgr { config: Config, depot_client: Option<DepotClient>, upstream_mgr_sock: zmq::Socket, want_origins: HashSet<String>, logger: Logger, msg: zmq::Message, } impl UpstreamMgr { pub fn new(cfg: &Config) -> Result<Self> { let upstream_mgr_sock = (**DEFAULT_CONTEXT) .as_mut() .socket(zmq::DEALER) .map_err(Error::Zmq)?; let depot_client = if let Some(ref upstream_depot) = cfg.upstream_depot { Some(DepotClient::new(upstream_depot, "builder-upstream", "0.0.0", None).unwrap()) } else { None }; let log_path = cfg.log_dir.clone(); let mut logger = Logger::init(PathBuf::from(log_path), "builder-upstream.log"); let want_origins: HashSet<String> = cfg.upstream_origins.iter().map(|s| s.to_owned()).collect(); let msg = format!( "UPSTREAM {:?} (origins: {:?})", cfg.upstream_depot, cfg.upstream_origins ); logger.log_ident(&msg); Ok(UpstreamMgr { config: cfg.clone(), depot_client: depot_client, upstream_mgr_sock: upstream_mgr_sock, want_origins: want_origins, logger: logger, msg: zmq::Message::new().map_err(Error::Zmq)?, }) } pub fn start(cfg: &Config) -> Result<JoinHandle<()>> { let mut manager = Self::new(cfg)?; let (tx, rx) = mpsc::sync_channel(1); let handle = thread::Builder::new() .name("upstream-manager".to_string()) .spawn(move || { manager.run(tx).unwrap(); }) .unwrap(); match rx.recv() { Ok(()) => Ok(handle), Err(e) => panic!("upstream-manager thread startup error, err={}", e), } } fn run(&mut self, rz: mpsc::SyncSender<()>) -> Result<()> { self.upstream_mgr_sock .bind(UPSTREAM_MGR_ADDR) .map_err(Error::Zmq)?; let mut upstream_mgr_sock = false; let mut last_processed = Instant::now(); let mut requests = VecDeque::new(); rz.send(()).unwrap(); info!( "upstream-manager is ready to go (upstream_depot: {:?}).", self.config.upstream_depot ); loop { { let mut items = [self.upstream_mgr_sock.as_poll_item(1)]; if let Err(err) = zmq::poll(&mut items, DEFAULT_POLL_TIMEOUT_MS as i64) { warn!( "Upstream-manager unable to complete ZMQ poll: err {:?}", err ); }; if (items[0].get_revents() & zmq::POLLIN) > 0 { upstream_mgr_sock = true; } } if upstream_mgr_sock { upstream_mgr_sock = false; if let Err(err) = self.upstream_mgr_sock.recv(&mut self.msg, 0) { warn!( "Upstream-manager unable to complete socket receive: err {:?}", err ); continue; } let mut upstream_request: UpstreamRequest = parse_from_bytes(&self.msg).map_err(Error::Protobuf)?; // we have to assume ownership of these values here to appease the borrow checker // - otherwise it complains about immutable vs mutable borrows let msg_ident = upstream_request.take_ident(); let target = upstream_request.take_target(); debug!("Upstream received message: {:?}", &upstream_request); // We only care about the base ident let ident = PackageIdent::new(msg_ident.get_origin(), msg_ident.get_name(), None, None); upstream_request.set_ident(OriginPackageIdent::from(ident.clone())); upstream_request.set_target(target.clone()); if self.config.upstream_depot.is_some() && self.want_origins.contains(ident.origin()) && !requests.contains(&upstream_request) { debug!("Adding {}-{} to work queue", &ident, &target); requests.push_back(upstream_request.clone()); } } // Handle potential work in requests queue let now = Instant::now(); if &now > &(last_processed + Duration::from_millis(DEFAULT_POLL_TIMEOUT_MS)) { while let Some(upstream_request) = requests.pop_front() { match self.check_request(&upstream_request) { Ok(None) => (), Ok(Some(ref ident)) => { let msg = format!("UPDATED: {}", ident); self.logger.log(&msg); } Err(err) => { let msg = format!( "FAILURE: {}-{} ({:?})", upstream_request.get_ident(), upstream_request.get_target(), err ); self.logger.log(&msg); } } } last_processed = now; } } } fn latest_ident(&mut self, ident: &OriginPackageIdent, target: &str) -> Result<PackageIdent> { let mut conn = RouteBroker::connect().unwrap(); let mut request = OriginChannelPackageLatestGet::new(); request.set_name("stable".to_owned()); request.set_target(target.to_owned()); request.set_visibilities(all_visibilities()); request.set_ident(ident.clone()); match conn.route::<OriginChannelPackageLatestGet, OriginPackageIdent>(&request) { Ok(id) => Ok(id.into()), Err(err) => Err(Error::NetError(err)), } } fn check_request( &mut self, upstream_request: &UpstreamRequest, ) -> Result<Option<PackageIdent>> { let ident = upstream_request.get_ident(); let target = upstream_request.get_target(); debug!("Checking upstream package: {}-{}", ident, target); assert!(!ident.fully_qualified()); let local_ident = match self.latest_ident(ident, target) { Ok(i) => Some(i), Err(_) => None, }; debug!("Latest local ident: {:?}", local_ident); match self.depot_client { // We only sync down stable packages from the upstream for now Some(ref depot_cli) => { match depot_cli.show_package(ident, Some("stable"), None, Some(target)) { Ok(mut package) => { let remote_pkg_ident: PackageIdent = package.take_ident().into(); debug!("Got remote ident: {}", remote_pkg_ident); if local_ident.is_none() || remote_pkg_ident > local_ident.unwrap() { let opi: OriginPackageIdent = OriginPackageIdent::from(remote_pkg_ident.clone()); debug!("Downloading package {:?} from upstream", opi); if let Err(err) = download_package_from_upstream_depot( &self.config, depot_cli, opi, Some("stable".to_string()), Some(target.to_string()), ) { warn!("Failed to download package from upstream, err {:?}", err); return Err(err); } return Ok(Some(remote_pkg_ident)); } Ok(None) } Err(err) => { warn!( "Failed to get package metadata for {} from {:?}, err {:?}", ident, self.config.upstream_depot, err ); Err(Error::DepotClientError(err)) } } } _ => Ok(None), } } }
These are R.E.G.R.E.T.'s website, Facebook page (Update: the page's content seems now to be hidden) and Twitter. This is the US Food & Drug Administration page that gives Gardasil information, which references 772 serious adverse events following administration of Gardasil, out of 23,000,000 doses administered. There is no requirement for proof that the adverse event be connected to Gardasil to be included. Dr Brenda Corcoran, MB, MPH, FFPHMI, holds a diploma in leadership and quality in healthcare, and is a consultant in public health medicine responsible for the coordination of all national immunisation programmes. She is a member of the National Immunisation Advisory Committee. Anna Cannon has claimed at various times that one in 30 or 1 in 40 girls who receive Gardasil experience 'very serious side effects' including 'long-term health impairments, hospitalisation long term, wheelchair for life, death, birth defects'. The US Food and Drug Administration's website says that 772 serious adverse events were reported out of 23m doses administered, a rate of 1 in 29,792. The FDA have confirmed to me that these are reported adverse events, and there is no certainty that they were caused by the vaccination. In addition the FDA pointed out that there is both under- and over-reporting. Nobody may bother to report some adverse events, and sometimes a patient or their family might report an adverse event to the FDA, and also to their doctor who then reports it to both the FDA and the drug company, and the drug company then also reports it to the FDA, and they have had incidences where a single event accounts for three separate reports. Anna Cannon has claimed that the vaccine causes birth defects. I searched the scientific literature for any reference to this, and found this study which concluded "Rates of ... major birth defects were not greater than the unexposed [to the vaccine] population rates". I have asked Anna Cannon for her source on this. She has not responded. Anna Cannon has claimed that the Gardasil product information leaflet is being hidden. In fact the US version is on the FDA website and the Irish version is on the Health Products Regulatory Authority website. A post on the Regret Facebook page claims that this 'hidden' leaflet shows that Merck and the US Centres for Disease Control 'determined' that Gardasil 'kills' one in 912 girls who receive it. In fact, page seven of the US version of the document makes it clear that the deaths were attributable to the normal death rate in the population. Whoever compiled that figure would have to read through the paragraph stating that the deaths were as a result of car accidents, suicide and other causes unrelated to vaccination. An excellent analysis of the this is posted here. Another post was made on Regret's Facebook page, linking to a hoax story that a girl called Meredith Prohaska died as a result of getting the vaccine. The claim is debunked on Snopes here. When I posted the Snopes link without comment, I was subjected to hostile comments and banned from the page. The story is a transparent hoax, falsely implying that the girl died this year; in fact she died in 2014. Update: I have talked to the FDA and asked them specifically about R.E.G.R.E.T.'s claims. In relation to Anna Cannon saying that one in 30 or one in 40 suffers severe side effects, Lyndsay Meyer of the FDA said "the agency is not aware of any data to support these claims". In addition, Meyer said, "The FDA takes its responsibilities regarding vaccine safety very seriously ... Both the FDA and CDC are transparent with respect to VAERS reports. VAERS data is freely available to the public through CDC Wonder: https://wonder.cdc.gov/vaers.html." Share this: Facebook Twitter Comments
Men have been wearing wrist watches for over 100 years. It was during World War I when soldiers in the trenches realized the traditional pocket watch of the time was not going to cut it on the battle field. With limited communications during that time, precision timing was everything and soldiers couldn’t be fishing around for their pocket watches. A rugged wristwatch was soon invented with a tough exterior and glass to protect from the rigors of battle. Many people will propose that the wristwatch is slowly becoming obsolete, mostly due to Millennials who can’t seem to focus on anything in the real world. They will go into anaphylactic shock if they are without their iPhone for thirty seconds, which of course also keeps the time very well. We got together with one of our favorite watch designers – Chronodos, to give you Five Reasons Why Men Should Wear Wristwatches. Chronodos offers men a stylish, yet rugged alternative when it comes to wristwatches. No sissy bands or metro looking faces, see the Chronodos watch collection here. 1. Watches Are Polite – Checking the time via a watch is much more polite than checking on your phone or asking someone. When in the company of others its polite to give them your full attention. Looking away to fish around for your iPhone is just rude. A subtle glance to your watch is much more polite. Be polite. 2. Watches Are Becoming Retro – There is truth that less people are wearing watches. There is also truth that most things considered retro, vintage or just old school, really are cool! You know what’s not cool? Wearing a Google, Nike Shock-Jock Fit-Bit metro looking watch that records how many steps you have taken. Really, we’re counting our steps now? Retro, not metro is always in style. Be stylish. Wear a cool watch with a rugged (wide) leather band. 3. Watches Support True Craftsmanship – A man’s watch is more than just a time piece. It embodies tradition, history & everything that is man made. Wearing a quality watch is supporting and acknowledging true craftsmanship. You are wearing a highly detailed piece of art on your wrist. There are many generic, sheep-looking watches out there, but the true watchmakers are creating impressive and genuine art, that also keeps the time. Why wouldn’t you want to support a trade like this? 4. Watches Are Possibly The Best Heirloom – Not only is a timepiece a thoughtful gift – it is the quintessential heirloom. Giving a watch you wore for a long time to your son or daughter is a great way to say “I will always be with you”. It also sends the message that if you take care of things, they will last. My father gave me his father’s watch when I was in my teens. I still have it. Leaving your watch to someone you care about passes on the sentiment that they were important to you. 5. Watches Keep You On Time – Being on time is cool. Arriving late with no excuse has become the norm in today’s text-driven society. Want to stand out from the sheep? Be on time without texting. Just show up where you are suppose to be, at the time you are suppose to be there. It will be noticed and appreciated. Using a rugged, hand made, beautifully designed men’s watch from Chronodos to get you there on time is even better! Thank You For Reading and Supporting Our Sponsors! You Might Also Like
package main import ( "encoding/csv" "fmt" "io" "log" "os" ) func main() { csvFile, err := os.Open("file-to-read.csv") defer csvFile.Close() if err != nil { log.Fatal(err) } reader := csv.NewReader(csvFile) var aSliceOfMaps []map[string]string for { values, err := reader.Read() if err == io.EOF { break } if err != nil { log.Fatal(err) } element := make(map[string]string) element["policyID"] = values[0] element["statecode"] = values[1] element["county"] = values[2] element["eq_site_limit"] = values[3] element["hu_site_limit"] = values[4] element["fl_site_limit"] = values[5] element["fr_site_limit"] = values[6] element["tiv_2011"] = values[7] element["tiv_2012"] = values[8] element["eq_site_deductible"] = values[9] element["hu_site_deductible"] = values[10] element["fl_site_deductible"] = values[11] element["fr_site_deductible"] = values[12] element["point_latitude"] = values[13] aSliceOfMaps = append(aSliceOfMaps, element) } for _, value := range aSliceOfMaps { fmt.Println(value["policyID"], "-", value["county"]) } }
import numpy as np from rllab.misc.instrument import VariantGenerator from sac.misc.utils import flatten, get_git_rev, deep_update M = 256 REPARAMETERIZE = True LSP_POLICY_PARAMS_BASE = { 'type': 'lsp', 'coupling_layers': 2, 's_t_layers': 1, 'action_prior': 'uniform', # 'preprocessing_hidden_sizes': None, 'preprocessing_output_nonlinearity': 'relu', 'reparameterize': REPARAMETERIZE, 'squash': True } LSP_POLICY_PARAMS = { 'swimmer-gym': { # 2 DoF 'preprocessing_hidden_sizes': (M, M, 4), 's_t_units': 2, }, 'swimmer-rllab': { # 2 DoF 'preprocessing_hidden_sizes': (M, M, 4), 's_t_units': 2, }, 'hopper': { # 3 DoF 'preprocessing_hidden_sizes': (M, M, 6), 's_t_units': 3, }, 'half-cheetah': { # 6 DoF 'preprocessing_hidden_sizes': (M, M, 12), 's_t_units': 6, }, 'walker': { # 6 DoF 'preprocessing_hidden_sizes': (M, M, 12), 's_t_units': 6, }, 'ant': { # 8 DoF 'preprocessing_hidden_sizes': (M, M, 16), 's_t_units': 8, }, 'humanoid-gym': { # 17 DoF 'preprocessing_hidden_sizes': (M, M, 34), 's_t_units': 17, }, 'humanoid-rllab': { # 21 DoF 'preprocessing_hidden_sizes': (M, M, 42), 's_t_units': 21, } } GMM_POLICY_PARAMS_BASE = { 'type': 'gmm', 'K': 1, 'reg': 1e-3, 'action_prior': 'uniform', 'reparameterize': REPARAMETERIZE } GMM_POLICY_PARAMS = { 'swimmer-rllab': { # 2 DoF }, 'hopper': { # 3 DoF }, 'half-cheetah': { # 6 DoF }, 'walker': { # 6 DoF }, 'ant': { # 8 DoF }, 'humanoid-gym': { # 17 DoF }, 'humanoid-rllab': { # 21 DoF }, 'humanoid-standup-gym': { # 17 DoF }, } GAUSSIAN_POLICY_PARAMS_BASE = { 'type': 'gaussian', 'reg': 1e-3, 'action_prior': 'uniform', 'reparameterize': REPARAMETERIZE } GAUSSIAN_POLICY_PARAMS = { 'swimmer-rllab': { # 2 DoF }, 'hopper': { # 3 DoF }, 'half-cheetah': { # 6 DoF }, 'walker': { # 6 DoF }, 'ant': { # 8 DoF }, 'humanoid-gym': { # 17 DoF }, 'humanoid-rllab': { # 21 DoF }, 'humanoid-standup-gym': { # 17 DoF }, } POLICY_PARAMS = { 'lsp': { k: dict(LSP_POLICY_PARAMS_BASE, **v) for k, v in LSP_POLICY_PARAMS.items() }, 'gmm': { k: dict(GMM_POLICY_PARAMS_BASE, **v) for k, v in GMM_POLICY_PARAMS.items() }, 'gaussian': { k: dict(GAUSSIAN_POLICY_PARAMS_BASE, **v) for k, v in GAUSSIAN_POLICY_PARAMS.items() }, } VALUE_FUNCTION_PARAMS = { 'layer_size': M, } ENV_DOMAIN_PARAMS = { 'swimmer-rllab': { # 2 DoF }, 'hopper': { # 3 DoF }, 'half-cheetah': { # 6 DoF }, 'walker': { # 6 DoF }, 'ant': { # 8 DoF }, 'humanoid-gym': { # 17 DoF }, 'humanoid-rllab': { # 21 DoF }, 'humanoid-standup-gym': { # 17 DoF }, } ENV_PARAMS = { 'swimmer-rllab': { # 2 DoF }, 'hopper': { # 3 DoF }, 'half-cheetah': { # 6 DoF }, 'walker': { # 6 DoF }, 'ant': { # 8 DoF 'resume-training': { 'low_level_policy_path': [ # 'ant-low-level-policy-00-00/itr_4000.pkl', ] }, 'cross-maze': { 'terminate_at_goal': True, 'goal_reward_weight': [1000], 'goal_radius': 2, 'velocity_reward_weight': 0, 'ctrl_cost_coeff': 0, # 1e-2, 'contact_cost_coeff': 0, # 1e-3, 'survive_reward': 0, # 5e-2, 'goal_distance': 12, 'goal_angle_range': (0, 2*np.pi), 'env_fixed_goal_position': [[6, -6], [6, 6], [12, 0]], 'pre_trained_policy_path': [] }, }, 'humanoid-gym': { # 17 DoF 'resume-training': { 'low_level_policy_path': [ # 'humanoid-low-level-policy-00-00/itr_4000.pkl', ] } }, 'humanoid-rllab': { # 21 DOF }, 'humanoid-standup-gym': { # 17 DoF }, } ALGORITHM_PARAMS_BASE = { 'lr': 3e-4, 'discount': 0.99, 'target_update_interval': 1, 'tau': 0.005, 'reparameterize': REPARAMETERIZE, 'base_kwargs': { 'epoch_length': 1000, 'n_train_repeat': 1, 'n_initial_exploration_steps': 1000, 'eval_render': False, 'eval_n_episodes': 1, 'eval_deterministic': True, } } ALGORITHM_PARAMS = { 'swimmer-rllab': { # 2 DoF 'scale_reward': 25, 'base_kwargs': { 'n_epochs': 1e3, } }, 'hopper': { # 3 DoF 'scale_reward': 5, 'base_kwargs': { 'n_epochs': 1e3, } }, 'half-cheetah': { # 6 DoF 'scale_reward': 5, 'base_kwargs': { 'n_epochs': 3e3, 'n_initial_exploration_steps': 10000, } }, 'walker': { # 6 DoF 'scale_reward': 5, 'base_kwargs': { 'n_epochs': 1e3, } }, 'ant': { # 8 DoF 'scale_reward': 5, 'base_kwargs': { 'n_epochs': 3e3, 'n_initial_exploration_steps': 10000, } }, 'humanoid-gym': { # 17 DoF 'scale_reward': 20, 'base_kwargs': { 'n_epochs': 1e4, } }, 'humanoid-rllab': { # 21 DoF 'scale_reward': 10, 'base_kwargs': { 'n_epochs': 1e4, } }, 'humanoid-standup-gym': { # 17 DoF 'scale_reward': 100, 'base_kwargs': { 'n_epochs': 1e4, } }, } REPLAY_BUFFER_PARAMS = { 'max_replay_buffer_size': 1e6, } SAMPLER_PARAMS = { 'max_path_length': 1000, 'min_pool_size': 1000, 'batch_size': 256, } RUN_PARAMS_BASE = { 'seed': [1,2,3,4,5], 'snapshot_mode': 'gap', 'snapshot_gap': 1000, 'sync_pkl': True, } RUN_PARAMS = { 'swimmer-rllab': { # 2 DoF 'snapshot_gap': 200 }, 'hopper': { # 3 DoF 'snapshot_gap': 600 }, 'half-cheetah': { # 6 DoF 'snapshot_gap': 2000 }, 'walker': { # 6 DoF 'snapshot_gap': 1000 }, 'ant': { # 8 DoF 'snapshot_gap': 2000 }, 'humanoid-gym': { # 21 DoF 'snapshot_gap': 4000 }, 'humanoid-rllab': { # 21 DoF 'snapshot_gap': 4000 }, } DOMAINS = [ 'swimmer-rllab', # 2 DoF 'hopper', # 3 DoF 'half-cheetah', # 6 DoF 'walker', # 6 DoF 'ant', # 8 DoF 'humanoid-gym', # 17 DoF # gym_humanoid 'humanoid-rllab', # 21 DoF 'humanoid-standup-gym', # 17 DoF # gym_humanoid ] TASKS = { 'swimmer-rllab': [ 'default', 'multi-direction', ], 'hopper': [ 'default', ], 'half-cheetah': [ 'default', ], 'walker': [ 'default', ], 'ant': [ 'default', 'multi-direction', 'cross-maze' ], 'humanoid-gym': [ 'default', ], 'humanoid-rllab': [ 'default', 'multi-direction' ], 'humanoid-standup-gym': [ 'default', ], } def parse_domain_and_task(env_name): domain = next(domain for domain in DOMAINS if domain in env_name) domain_tasks = TASKS[domain] task = next((task for task in domain_tasks if task in env_name), 'default') return domain, task def get_variants(domain, task, policy): params = { 'prefix': '{}/{}'.format(domain, task), 'domain': domain, 'task': task, 'git_sha': get_git_rev(), 'env_params': ENV_PARAMS[domain].get(task, {}), 'policy_params': POLICY_PARAMS[policy][domain], 'value_fn_params': VALUE_FUNCTION_PARAMS, 'algorithm_params': deep_update( ALGORITHM_PARAMS_BASE, ALGORITHM_PARAMS[domain] ), 'replay_buffer_params': REPLAY_BUFFER_PARAMS, 'sampler_params': SAMPLER_PARAMS, 'run_params': deep_update(RUN_PARAMS_BASE, RUN_PARAMS[domain]), } # TODO: Remove flatten. Our variant generator should support nested params params = flatten(params, separator='.') vg = VariantGenerator() for key, val in params.items(): if isinstance(val, list) or callable(val): vg.add(key, val) else: vg.add(key, [val]) return vg
/** * The PredicateSplitUpRule might have rewritten disjunctions into complex predicate/union chains. If no rule between * the PredicateSplitUpRule and this rule has benefited from these chains, executing them as predicates and unions might * be more expensive than having the ExpressionEvaluator run on the original monolithic expression. Controlled by * minimum_union_count, this rule reverts the PredicateSplitUpRule's changes by merging multiple PredicateNodes and * UnionNodes into single a PredicateNode. * * EXAMPLE: * TPC-DS query 41 benefits from this rule because the PredicateSplitUpRule creates a huge LQP. */ class PredicateMergeRule : public AbstractRule { public: void apply_to(const std::shared_ptr<AbstractLQPNode>& root) const override; size_t minimum_union_count{4}; private: void _merge_disjunction(const std::shared_ptr<UnionNode>& union_node) const; void _merge_conjunction(const std::shared_ptr<PredicateNode>& predicate_node) const; }
/* Enable or disable irq according to the 'disable' flag. */ static inline void maskInterrupt(bool_t disable, interrupt_t irq) { if (disable) { avic->intdisnum = irq; } else { avic->intennum = irq; } }
A shipwreck in the Namib desert on the Skeleton Coast of Namibia. Photo: Wolfgang Steiner/Getty Images/iStockphoto This week, to accompany our cover story on worst-case climate scenarios, we’re publishing a series of extended interviews with climatologists on the subject — most of them from the “godfather generation” of scientists who first raised the alarm about global warming several decades ago. Peter Ward is one of the paleontologists responsible for overturning our understanding of most of the Earth’s mass extinctions, which, long thought to be caused by asteroid impacts, turned out to have been the result of climate change produced by greenhouse gases (all but the one that killed the dinosaurs, anyway). We spoke the day after Donald Trump announced the United States’ withdrawal from the Paris climate accords. Peter Ward: I wrote an op-ed for the Post, I don’t know if they’ll publish it. Probably calling Trump a war criminal was a bit much. Anyway how can I help you? Honestly, what I’d love from you, and what would be most helpful, is if you could first walk me through the analogy you made in that op-ed — exactly how our current situation could lead to the end-Permian Extinction. My own sense in the short run — strangely enough, the most dangerous thing facing us isn’t the extinction scenario, since that’s centuries in the future. What scares me more is the economic effect of simple sea-level rise. CNN did a very interesting article some years ago about how much it would cost to retrofit ports around the world for a six-foot sea-level rise. It’s literally trillions of dollars. Not only docks, but every airport facing the ocean—Sydney, China, San Francisco especially. Secondly there’s the effect on human food. If there’s a six-foot sea-level rise, it’s astonishing how much food will be wiped out — most rice is in low-lying areas, for instance. That’s in the short term — what about the long term? The long term is these greenhouse extinctions are devastating. The most recent one was the Paleocene-Eocene Thermal Maximum, and that was caused almost entirely by methane. So the scariest thing we’re seeing today is the liberation of methane from higher latitudes, and it’s happening far faster than anybody ever predicted. Did you see the news about the Larsen ice shelf? It’s starting to break off. I spent four expeditions down there, just to the north of that, and the amount of retreat is huge. The reason this is important is Antarctica has always been used by the naysayers to say, At least in Antarctica we’re not seeing retreating glaciers. Well, now we are — we really are. And it seems as though the IPCC predictions have been relatively on-target on emissions and warming, that the predictions on ice loss have been far too conservative, and things are happening much faster than anybody expected. Absolutely. One thing about IPCC is that the modelers have yet to figure out how to deal with cloud cover. It’s very difficult to predict. As you’re going to get more water vapor in the atmosphere, you’re obviously going to get more cloud cover. That’s where the models are still breaking down. It will take an enormous amount of mathematical work. And if anything the sea-level estimates have been underestimates. More than anything else, the ice sheets really control our fates — and Greenland, of course. If we melt all the ice, we get at least 140 meters and probably more — Greenland is 15 meters by itself. But one of the scariest places on Earth is right off Namibia. When I give a slideshow on global warming and past extinctions, the killer we’re seeing is hydrogen sulfide. And right now there is something called the Skeleton Coast off Namibia, and the reason is that we’re seeing hydrogen sulfide coming right out of the ocean. The final part of a greenhouse extinction is when that happens worldwide. But the worst place on Earth has to be Bangladesh. They are doomed. And it’s such a densely populated place. Absolutely. As you know, as the sea level rises, it’s like a diving board for storm surge. You’re causing storm surge to jump ever farther inland, and that in itself means huge inundation from storm surge — it doesn’t have to be the rise to destroy the crops. It’s just a bad, bad situation. Well, walk me through some of the other things you’re worrying about. Food seems like one part of the doomsday picture, but what else are you concerned about? Well, heat. I believe that there are going to be some places that become uninhabitable for humans. How big a portion of the world’s surface do you think that kind of effect will hit? Certainly Australia. Australia will be deemed uninhabitable. Already Australia — the outback produces not much in the way of crops. But there are kangaroos. I lived in Adelaide, and I’ve lived through some heat before, but we had 40 degrees centigrade, and 42 and 43 for weeks on end. It really has an effect. You get depressed, you don’t want to go procreate because it’s too damn hot. You just can’t escape it. Everywhere you go on the equator, there is some sort of drug — for the human population to try to get through the day. How do you get through living on the equator? It’s so damn miserable. So I think the equator will become uninhabitable. We don’t do well in heat. In your mind, what is our likely warming ceiling — where do you think we’re heading, and where do you think the range of uncertainty around that is? I’m not a modeler, so I’m not the most prescient to do this, but we certainly could hit 5 degrees centigrade in the next century if we consider the status quo path. Coal is still amazingly cheap. Australia still exports coal. Coal continues to be a major problem. The simple fact is, you have 9 billion people, and the standard of living is increasing, so they’re all going to want automobiles, they’re all going to want steel and iron. And in order to produce that, you need to have sources of energy — huge sources of energy. I don’t see wind turbines powering great steel mills. And as India and China become ever greater consumers of consumer goods, this is what’s going to drive it, I think. To me, it’s been striking how much the green energy sources are growing and how much the prices are falling — much faster than most prognosticators were predicting a few years ago. But it’s also the case that there are all these warming feedback loops that are already in motion. And if some of them get sped up quickly then there’s almost nothing we can do to counteract those effects. You’re absolutely right. I’ve seen something, maybe as much as 10 percent, maybe as much as 15 percent of carbon may be coming from sources we don’t even know about. These methane clathrates may be having a huge significance. They are not being modeled. And we really are going to have unintended consequences and much more rapid heating than even the models say — for the simple reason that the models are highly conservative, too conservative. We’ve talked about food, we’ve talked about flooding and sea-level rise, we’ve talked about direct heat effects. Are there other sorts of broad categories of things you’re worried about? Disease — one of the great killers that people are not recognizing is dengue fever. Malaria gets all the press, HIV of course. But dengue is increasing. The mosquitoes themselves are obviously speciating and becoming ever more immune to pesticides, because we’ve had these jumping genes. Jumping genes? What people are not really understanding is that genes can jump from species to species very easily. My 19th book, actually, is called Lamarck’s Revenge, the story of epigenetics. My unique new take is epigenetics in the history of life. The reason we get these outpourings of new body types in the aftermath of mass extinctions is not Darwinian — it’s too slow. Epigenetics occur when there is environmental change. It works for microbes all the way up. And so we can expect much more rapid changes in microbial genetics. I think the mode of evolution is going to switch from this random slow Darwinian to a much more rapid form. And you mentioned a few minutes ago the runaway effects. Can you walk me through exactly what we should be looking out for, most worried about, on that possible track? How does a runaway greenhouse effect get started on this planet? Let’s say we have a deep ocean basin off California and it’s not getting its cold-water oxygen because the surface water isn’t water — warm water holds less oxygen than cold water. So any ocean that is warmer is going to be less oxygenated. And once you switch over to zero oxygen, the microbes that were down there, and the anaerobic microbes start taking over. And as these things take over, you get this black sea effect, and it begins spreading out, and more and more of these microbes start producing hydrogen sulfide. So as it starts spreading, it grows — like cancer. Cancer of the deep ocean. They start spilling over the deep basin and start moving up — that’s called a chemocline. And we’re seeing the first part of this in the Pacific. Are these effects confined to the ocean? Well, hydrogen sulfide does come out of ocean. This is why Namibia is so scary. What else? One of my Ph.D.s just finished his study on sea-grass die-out. We’re losing sea grass globally, and it’s a huge economic blow. And it’s definitely caused by global warming. Why is it important? Because most seafood spends some time, as juveniles, dining on sea grass. An article on sea grass die-out itself would be huge. Tell me about your background and how you came to study mass extinctions generally, but also when you came to think of them in terms of our present day. I was really just a classically trained paleontologist. To me, the mass extinctions were really interesting in terms of what happens after them — we have this dead period, and the recovery fauna is totally different. And that leads to the idea of, Gee, how much longer will the recovery be if we have an extinction now? Impact was key, and king, for the 1980s and 1990s — every one of the big extinctions was attributed to impact. But it became clear that, in fact, no, these were not impact extinctions. We had to invent a new term. I don’t know who came up with it first, but I was in there pretty early calling them greenhouse extinctions. And this new paradigm started coming into play. We’re even starting to see that KT also has a greenhouse component — because there was warming right at the impact. Tell me more about the Permian Extinction, because that’s the most dramatic. I wonder what makes it so exceptional and in what ways we can watch out for our heading down that same path. People always think the intensity of a mass extinction should be related to the extinction — what percentage of creatures were extinct. Increasingly, we’re thinking that’s a metric, but a more important metric that tells you something about the nature of the devastation is how different is the fauna that comes afterward. I think people really don’t appreciate how much, over the coming decades, nature will be at war with the way that we live. Absolutely. Absolutely. Look at the storms that are taking place now. You talk about habitability. I’ve been talking about heat. At what point do hurricanes in the tropics make living there just not worth it? You’re being mowed down by these huge number of tornadoes. Sooner or later people are going to get the hell out of Dodge. But this is the sort of storm ferocity that’s coming. And we’re sort of used to the idea that parts of the world are more prone to things like hurricanes, as part of the cost of living in the Caribbean or whatever. But it seems like those events are going to become much more common still in those areas, but there are also events that are going to become much more common in all the areas where one might flee to from there. So there’s a risk of our running out of safe spaces — nothing is going to be protected from extreme weather. The best case to look at is in the Philippines. That last couple of typhoons they’ve had — the ferocity and the increase in those things that’s been happening. That’s the model for what’s coming. It isn’t anything [like that] in the Caribbean; as brutal as those hurricanes can be, they have nothing on these typhoons. Looking at recent weather history, are there things that stand out as harbingers? Well, with the warming you get less and less snowfall in the winter. And one of the areas that’s really being hit hardest right now are the low countries of Europe. Because the Alps used to get all this snow. People think the Dutch worry most about the dikes and the floods. But no longer. The Alps are having ever lower snowfall, and you get these enormous storms, so we’re getting an increasing rainfall, and that in itself is a gigantic human problem. Obviously floods — the increased flooding caused by ever more water in the atmosphere is going to be really as bad as storms. We’ll get these floods all over the planet. And the problem is twofold: They kill people, but they also wash away the soil. One thing we haven’t really talked about is fresh water and the coming threat there — the water scarcity threat. Absolutely. I really do think we’re going to see … The flash points appear, to me, to be China, India, and Pakistan fighting over the water coming from the Himalayas. Water will be the great fight. Water and food will be the two things that the 21st century will fight over. How do you see those fights playing out? It’s going to be the haves versus the have-nots, as is always the case. But the places that have the highest rates of human population growth are those where water might be most crucial. Nigeria has a huge growth rate, but it’s Tunisia and Egypt and Algeria that give Africa its enormous population growth. These are countries in which waters are being reduced. This is where we have this ever-increasing jihad that is going to be driven not so much by being mad at religion but just trying to get along, and cranky angry people in huge numbers are filling up … Tunisia used to be the granary of Rome — Carthage kept Rome going! I’ve been to Tunisia, and boy you don’t see much wheat there anymore. You see the Sahara moving ever farther north, and reducing crop yields, just as human population is increasing there. It’s all pretty bleak. Yeah, it is. We need to slow human population growth. But our White House is doing everything it can to make sure climate change happens. It’s strange I have a longing for the Bush years — I thought nothing could be worse, but now those are the good old days! The places that are going to be hit hardest by climate change are the places where his voters are — the Midwest, the Dust Bowl states. Which means the anger that elected him is going to continue. Big-picture question: A while ago, Stephen Hawking made some headlines by saying that in order for humanity to survive we had to figure out a way to colonize at least one planet within 100 years. How reasonable do you find that kind of warning, or how insanely alarmist? Well, he may be the smartest guy on the planet, but boy, I just think this is inane. My sense of it is, with our technology, we’re just too good, we can engineer and keep some part of us alive on Earth. The only way out of it would be a wholesale nuclear exchange. But barring that, a greenhouse world won’t kill us all off. If worse comes to worst, we’ll have gas masks. But what I would advocate is — just like that seed vault, in Norway, that there should be hundreds of thousands of frozen eggs, human eggs, that are taken off-planet. This could just be an orbiting facility carrying seed stock itself. But colonizing Mars? Why? There’s lots of areas that would be easier to put a dome over on Earth than it would be on Mars, because at least you can breathe the air. That alone! Humans will never be able to send off a colony, a breeding colony, to another star system. The only way you could do this is to send fertilized eggs. But one possibility is we are stuck here. And if you look at the Fermi paradox, it could be that lots of organisms are stuck on their planets because the galaxy has not been colonized. I’ve been doing these web talks with NASA people. One of the really interesting concepts around the Fermi paradox is the Great Filter — that civilizations rise, but there’s an environmental filter that causes them to die off again, and disappear fairly quickly. If you look at planet Earth, the filtering we’ve had has been in these mass extinctions. I think that’s about it from me. Thank you. It’s been a fun talk. Go get ’em, man. We need people out there like you. I mean it. Though you’re not going to get thanked for it, you know.
/** * This class represents a color expressed in the indexed XTerm 256 color extension, where each color is defined in a * lookup-table. All in all, there are 256 codes, but in order to know which one to know you either need to have the * table at hand, or you can use the two static helper methods which can help you convert from three 8-bit * RGB values to the closest approximate indexed color number. If you are interested, the 256 index values are * actually divided like this:<br> * 0 .. 15 - System colors, same as ANSI, but the actual rendered color depends on the terminal emulators color scheme<br> * 16 .. 231 - Forms a 6x6x6 RGB color cube<br> * 232 .. 255 - A gray scale ramp (without black and white endpoints)<br> * <p> * Support for indexed colors is somewhat widely adopted, not as much as the ANSI colors (TextColor.ANSI) but more * than the RGB (TextColor.RGB). * <p> * For more details on this, please see <a * href="https://github.com/robertknight/konsole/blob/master/user-doc/README.moreColors"> * this</a> commit message to Konsole. */ class Indexed implements TextColor { private static final byte[][] COLOR_TABLE = new byte[][]{ //These are the standard 16-color VGA palette entries {(byte) 0, (byte) 0, (byte) 0}, {(byte) 170, (byte) 0, (byte) 0}, {(byte) 0, (byte) 170, (byte) 0}, {(byte) 170, (byte) 85, (byte) 0}, {(byte) 0, (byte) 0, (byte) 170}, {(byte) 170, (byte) 0, (byte) 170}, {(byte) 0, (byte) 170, (byte) 170}, {(byte) 170, (byte) 170, (byte) 170}, {(byte) 85, (byte) 85, (byte) 85}, {(byte) 255, (byte) 85, (byte) 85}, {(byte) 85, (byte) 255, (byte) 85}, {(byte) 255, (byte) 255, (byte) 85}, {(byte) 85, (byte) 85, (byte) 255}, {(byte) 255, (byte) 85, (byte) 255}, {(byte) 85, (byte) 255, (byte) 255}, {(byte) 255, (byte) 255, (byte) 255}, //Starting 6x6x6 RGB color cube from 16 {(byte) 0x00, (byte) 0x00, (byte) 0x00}, {(byte) 0x00, (byte) 0x00, (byte) 0x5f}, {(byte) 0x00, (byte) 0x00, (byte) 0x87}, {(byte) 0x00, (byte) 0x00, (byte) 0xaf}, {(byte) 0x00, (byte) 0x00, (byte) 0xd7}, {(byte) 0x00, (byte) 0x00, (byte) 0xff}, {(byte) 0x00, (byte) 0x5f, (byte) 0x00}, {(byte) 0x00, (byte) 0x5f, (byte) 0x5f}, {(byte) 0x00, (byte) 0x5f, (byte) 0x87}, {(byte) 0x00, (byte) 0x5f, (byte) 0xaf}, {(byte) 0x00, (byte) 0x5f, (byte) 0xd7}, {(byte) 0x00, (byte) 0x5f, (byte) 0xff}, {(byte) 0x00, (byte) 0x87, (byte) 0x00}, {(byte) 0x00, (byte) 0x87, (byte) 0x5f}, {(byte) 0x00, (byte) 0x87, (byte) 0x87}, {(byte) 0x00, (byte) 0x87, (byte) 0xaf}, {(byte) 0x00, (byte) 0x87, (byte) 0xd7}, {(byte) 0x00, (byte) 0x87, (byte) 0xff}, {(byte) 0x00, (byte) 0xaf, (byte) 0x00}, {(byte) 0x00, (byte) 0xaf, (byte) 0x5f}, {(byte) 0x00, (byte) 0xaf, (byte) 0x87}, {(byte) 0x00, (byte) 0xaf, (byte) 0xaf}, {(byte) 0x00, (byte) 0xaf, (byte) 0xd7}, {(byte) 0x00, (byte) 0xaf, (byte) 0xff}, {(byte) 0x00, (byte) 0xd7, (byte) 0x00}, {(byte) 0x00, (byte) 0xd7, (byte) 0x5f}, {(byte) 0x00, (byte) 0xd7, (byte) 0x87}, {(byte) 0x00, (byte) 0xd7, (byte) 0xaf}, {(byte) 0x00, (byte) 0xd7, (byte) 0xd7}, {(byte) 0x00, (byte) 0xd7, (byte) 0xff}, {(byte) 0x00, (byte) 0xff, (byte) 0x00}, {(byte) 0x00, (byte) 0xff, (byte) 0x5f}, {(byte) 0x00, (byte) 0xff, (byte) 0x87}, {(byte) 0x00, (byte) 0xff, (byte) 0xaf}, {(byte) 0x00, (byte) 0xff, (byte) 0xd7}, {(byte) 0x00, (byte) 0xff, (byte) 0xff}, {(byte) 0x5f, (byte) 0x00, (byte) 0x00}, {(byte) 0x5f, (byte) 0x00, (byte) 0x5f}, {(byte) 0x5f, (byte) 0x00, (byte) 0x87}, {(byte) 0x5f, (byte) 0x00, (byte) 0xaf}, {(byte) 0x5f, (byte) 0x00, (byte) 0xd7}, {(byte) 0x5f, (byte) 0x00, (byte) 0xff}, {(byte) 0x5f, (byte) 0x5f, (byte) 0x00}, {(byte) 0x5f, (byte) 0x5f, (byte) 0x5f}, {(byte) 0x5f, (byte) 0x5f, (byte) 0x87}, {(byte) 0x5f, (byte) 0x5f, (byte) 0xaf}, {(byte) 0x5f, (byte) 0x5f, (byte) 0xd7}, {(byte) 0x5f, (byte) 0x5f, (byte) 0xff}, {(byte) 0x5f, (byte) 0x87, (byte) 0x00}, {(byte) 0x5f, (byte) 0x87, (byte) 0x5f}, {(byte) 0x5f, (byte) 0x87, (byte) 0x87}, {(byte) 0x5f, (byte) 0x87, (byte) 0xaf}, {(byte) 0x5f, (byte) 0x87, (byte) 0xd7}, {(byte) 0x5f, (byte) 0x87, (byte) 0xff}, {(byte) 0x5f, (byte) 0xaf, (byte) 0x00}, {(byte) 0x5f, (byte) 0xaf, (byte) 0x5f}, {(byte) 0x5f, (byte) 0xaf, (byte) 0x87}, {(byte) 0x5f, (byte) 0xaf, (byte) 0xaf}, {(byte) 0x5f, (byte) 0xaf, (byte) 0xd7}, {(byte) 0x5f, (byte) 0xaf, (byte) 0xff}, {(byte) 0x5f, (byte) 0xd7, (byte) 0x00}, {(byte) 0x5f, (byte) 0xd7, (byte) 0x5f}, {(byte) 0x5f, (byte) 0xd7, (byte) 0x87}, {(byte) 0x5f, (byte) 0xd7, (byte) 0xaf}, {(byte) 0x5f, (byte) 0xd7, (byte) 0xd7}, {(byte) 0x5f, (byte) 0xd7, (byte) 0xff}, {(byte) 0x5f, (byte) 0xff, (byte) 0x00}, {(byte) 0x5f, (byte) 0xff, (byte) 0x5f}, {(byte) 0x5f, (byte) 0xff, (byte) 0x87}, {(byte) 0x5f, (byte) 0xff, (byte) 0xaf}, {(byte) 0x5f, (byte) 0xff, (byte) 0xd7}, {(byte) 0x5f, (byte) 0xff, (byte) 0xff}, {(byte) 0x87, (byte) 0x00, (byte) 0x00}, {(byte) 0x87, (byte) 0x00, (byte) 0x5f}, {(byte) 0x87, (byte) 0x00, (byte) 0x87}, {(byte) 0x87, (byte) 0x00, (byte) 0xaf}, {(byte) 0x87, (byte) 0x00, (byte) 0xd7}, {(byte) 0x87, (byte) 0x00, (byte) 0xff}, {(byte) 0x87, (byte) 0x5f, (byte) 0x00}, {(byte) 0x87, (byte) 0x5f, (byte) 0x5f}, {(byte) 0x87, (byte) 0x5f, (byte) 0x87}, {(byte) 0x87, (byte) 0x5f, (byte) 0xaf}, {(byte) 0x87, (byte) 0x5f, (byte) 0xd7}, {(byte) 0x87, (byte) 0x5f, (byte) 0xff}, {(byte) 0x87, (byte) 0x87, (byte) 0x00}, {(byte) 0x87, (byte) 0x87, (byte) 0x5f}, {(byte) 0x87, (byte) 0x87, (byte) 0x87}, {(byte) 0x87, (byte) 0x87, (byte) 0xaf}, {(byte) 0x87, (byte) 0x87, (byte) 0xd7}, {(byte) 0x87, (byte) 0x87, (byte) 0xff}, {(byte) 0x87, (byte) 0xaf, (byte) 0x00}, {(byte) 0x87, (byte) 0xaf, (byte) 0x5f}, {(byte) 0x87, (byte) 0xaf, (byte) 0x87}, {(byte) 0x87, (byte) 0xaf, (byte) 0xaf}, {(byte) 0x87, (byte) 0xaf, (byte) 0xd7}, {(byte) 0x87, (byte) 0xaf, (byte) 0xff}, {(byte) 0x87, (byte) 0xd7, (byte) 0x00}, {(byte) 0x87, (byte) 0xd7, (byte) 0x5f}, {(byte) 0x87, (byte) 0xd7, (byte) 0x87}, {(byte) 0x87, (byte) 0xd7, (byte) 0xaf}, {(byte) 0x87, (byte) 0xd7, (byte) 0xd7}, {(byte) 0x87, (byte) 0xd7, (byte) 0xff}, {(byte) 0x87, (byte) 0xff, (byte) 0x00}, {(byte) 0x87, (byte) 0xff, (byte) 0x5f}, {(byte) 0x87, (byte) 0xff, (byte) 0x87}, {(byte) 0x87, (byte) 0xff, (byte) 0xaf}, {(byte) 0x87, (byte) 0xff, (byte) 0xd7}, {(byte) 0x87, (byte) 0xff, (byte) 0xff}, {(byte) 0xaf, (byte) 0x00, (byte) 0x00}, {(byte) 0xaf, (byte) 0x00, (byte) 0x5f}, {(byte) 0xaf, (byte) 0x00, (byte) 0x87}, {(byte) 0xaf, (byte) 0x00, (byte) 0xaf}, {(byte) 0xaf, (byte) 0x00, (byte) 0xd7}, {(byte) 0xaf, (byte) 0x00, (byte) 0xff}, {(byte) 0xaf, (byte) 0x5f, (byte) 0x00}, {(byte) 0xaf, (byte) 0x5f, (byte) 0x5f}, {(byte) 0xaf, (byte) 0x5f, (byte) 0x87}, {(byte) 0xaf, (byte) 0x5f, (byte) 0xaf}, {(byte) 0xaf, (byte) 0x5f, (byte) 0xd7}, {(byte) 0xaf, (byte) 0x5f, (byte) 0xff}, {(byte) 0xaf, (byte) 0x87, (byte) 0x00}, {(byte) 0xaf, (byte) 0x87, (byte) 0x5f}, {(byte) 0xaf, (byte) 0x87, (byte) 0x87}, {(byte) 0xaf, (byte) 0x87, (byte) 0xaf}, {(byte) 0xaf, (byte) 0x87, (byte) 0xd7}, {(byte) 0xaf, (byte) 0x87, (byte) 0xff}, {(byte) 0xaf, (byte) 0xaf, (byte) 0x00}, {(byte) 0xaf, (byte) 0xaf, (byte) 0x5f}, {(byte) 0xaf, (byte) 0xaf, (byte) 0x87}, {(byte) 0xaf, (byte) 0xaf, (byte) 0xaf}, {(byte) 0xaf, (byte) 0xaf, (byte) 0xd7}, {(byte) 0xaf, (byte) 0xaf, (byte) 0xff}, {(byte) 0xaf, (byte) 0xd7, (byte) 0x00}, {(byte) 0xaf, (byte) 0xd7, (byte) 0x5f}, {(byte) 0xaf, (byte) 0xd7, (byte) 0x87}, {(byte) 0xaf, (byte) 0xd7, (byte) 0xaf}, {(byte) 0xaf, (byte) 0xd7, (byte) 0xd7}, {(byte) 0xaf, (byte) 0xd7, (byte) 0xff}, {(byte) 0xaf, (byte) 0xff, (byte) 0x00}, {(byte) 0xaf, (byte) 0xff, (byte) 0x5f}, {(byte) 0xaf, (byte) 0xff, (byte) 0x87}, {(byte) 0xaf, (byte) 0xff, (byte) 0xaf}, {(byte) 0xaf, (byte) 0xff, (byte) 0xd7}, {(byte) 0xaf, (byte) 0xff, (byte) 0xff}, {(byte) 0xd7, (byte) 0x00, (byte) 0x00}, {(byte) 0xd7, (byte) 0x00, (byte) 0x5f}, {(byte) 0xd7, (byte) 0x00, (byte) 0x87}, {(byte) 0xd7, (byte) 0x00, (byte) 0xaf}, {(byte) 0xd7, (byte) 0x00, (byte) 0xd7}, {(byte) 0xd7, (byte) 0x00, (byte) 0xff}, {(byte) 0xd7, (byte) 0x5f, (byte) 0x00}, {(byte) 0xd7, (byte) 0x5f, (byte) 0x5f}, {(byte) 0xd7, (byte) 0x5f, (byte) 0x87}, {(byte) 0xd7, (byte) 0x5f, (byte) 0xaf}, {(byte) 0xd7, (byte) 0x5f, (byte) 0xd7}, {(byte) 0xd7, (byte) 0x5f, (byte) 0xff}, {(byte) 0xd7, (byte) 0x87, (byte) 0x00}, {(byte) 0xd7, (byte) 0x87, (byte) 0x5f}, {(byte) 0xd7, (byte) 0x87, (byte) 0x87}, {(byte) 0xd7, (byte) 0x87, (byte) 0xaf}, {(byte) 0xd7, (byte) 0x87, (byte) 0xd7}, {(byte) 0xd7, (byte) 0x87, (byte) 0xff}, {(byte) 0xd7, (byte) 0xaf, (byte) 0x00}, {(byte) 0xd7, (byte) 0xaf, (byte) 0x5f}, {(byte) 0xd7, (byte) 0xaf, (byte) 0x87}, {(byte) 0xd7, (byte) 0xaf, (byte) 0xaf}, {(byte) 0xd7, (byte) 0xaf, (byte) 0xd7}, {(byte) 0xd7, (byte) 0xaf, (byte) 0xff}, {(byte) 0xd7, (byte) 0xd7, (byte) 0x00}, {(byte) 0xd7, (byte) 0xd7, (byte) 0x5f}, {(byte) 0xd7, (byte) 0xd7, (byte) 0x87}, {(byte) 0xd7, (byte) 0xd7, (byte) 0xaf}, {(byte) 0xd7, (byte) 0xd7, (byte) 0xd7}, {(byte) 0xd7, (byte) 0xd7, (byte) 0xff}, {(byte) 0xd7, (byte) 0xff, (byte) 0x00}, {(byte) 0xd7, (byte) 0xff, (byte) 0x5f}, {(byte) 0xd7, (byte) 0xff, (byte) 0x87}, {(byte) 0xd7, (byte) 0xff, (byte) 0xaf}, {(byte) 0xd7, (byte) 0xff, (byte) 0xd7}, {(byte) 0xd7, (byte) 0xff, (byte) 0xff}, {(byte) 0xff, (byte) 0x00, (byte) 0x00}, {(byte) 0xff, (byte) 0x00, (byte) 0x5f}, {(byte) 0xff, (byte) 0x00, (byte) 0x87}, {(byte) 0xff, (byte) 0x00, (byte) 0xaf}, {(byte) 0xff, (byte) 0x00, (byte) 0xd7}, {(byte) 0xff, (byte) 0x00, (byte) 0xff}, {(byte) 0xff, (byte) 0x5f, (byte) 0x00}, {(byte) 0xff, (byte) 0x5f, (byte) 0x5f}, {(byte) 0xff, (byte) 0x5f, (byte) 0x87}, {(byte) 0xff, (byte) 0x5f, (byte) 0xaf}, {(byte) 0xff, (byte) 0x5f, (byte) 0xd7}, {(byte) 0xff, (byte) 0x5f, (byte) 0xff}, {(byte) 0xff, (byte) 0x87, (byte) 0x00}, {(byte) 0xff, (byte) 0x87, (byte) 0x5f}, {(byte) 0xff, (byte) 0x87, (byte) 0x87}, {(byte) 0xff, (byte) 0x87, (byte) 0xaf}, {(byte) 0xff, (byte) 0x87, (byte) 0xd7}, {(byte) 0xff, (byte) 0x87, (byte) 0xff}, {(byte) 0xff, (byte) 0xaf, (byte) 0x00}, {(byte) 0xff, (byte) 0xaf, (byte) 0x5f}, {(byte) 0xff, (byte) 0xaf, (byte) 0x87}, {(byte) 0xff, (byte) 0xaf, (byte) 0xaf}, {(byte) 0xff, (byte) 0xaf, (byte) 0xd7}, {(byte) 0xff, (byte) 0xaf, (byte) 0xff}, {(byte) 0xff, (byte) 0xd7, (byte) 0x00}, {(byte) 0xff, (byte) 0xd7, (byte) 0x5f}, {(byte) 0xff, (byte) 0xd7, (byte) 0x87}, {(byte) 0xff, (byte) 0xd7, (byte) 0xaf}, {(byte) 0xff, (byte) 0xd7, (byte) 0xd7}, {(byte) 0xff, (byte) 0xd7, (byte) 0xff}, {(byte) 0xff, (byte) 0xff, (byte) 0x00}, {(byte) 0xff, (byte) 0xff, (byte) 0x5f}, {(byte) 0xff, (byte) 0xff, (byte) 0x87}, {(byte) 0xff, (byte) 0xff, (byte) 0xaf}, {(byte) 0xff, (byte) 0xff, (byte) 0xd7}, {(byte) 0xff, (byte) 0xff, (byte) 0xff}, //Grey-scale ramp from 232 {(byte) 0x08, (byte) 0x08, (byte) 0x08}, {(byte) 0x12, (byte) 0x12, (byte) 0x12}, {(byte) 0x1c, (byte) 0x1c, (byte) 0x1c}, {(byte) 0x26, (byte) 0x26, (byte) 0x26}, {(byte) 0x30, (byte) 0x30, (byte) 0x30}, {(byte) 0x3a, (byte) 0x3a, (byte) 0x3a}, {(byte) 0x44, (byte) 0x44, (byte) 0x44}, {(byte) 0x4e, (byte) 0x4e, (byte) 0x4e}, {(byte) 0x58, (byte) 0x58, (byte) 0x58}, {(byte) 0x62, (byte) 0x62, (byte) 0x62}, {(byte) 0x6c, (byte) 0x6c, (byte) 0x6c}, {(byte) 0x76, (byte) 0x76, (byte) 0x76}, {(byte) 0x80, (byte) 0x80, (byte) 0x80}, {(byte) 0x8a, (byte) 0x8a, (byte) 0x8a}, {(byte) 0x94, (byte) 0x94, (byte) 0x94}, {(byte) 0x9e, (byte) 0x9e, (byte) 0x9e}, {(byte) 0xa8, (byte) 0xa8, (byte) 0xa8}, {(byte) 0xb2, (byte) 0xb2, (byte) 0xb2}, {(byte) 0xbc, (byte) 0xbc, (byte) 0xbc}, {(byte) 0xc6, (byte) 0xc6, (byte) 0xc6}, {(byte) 0xd0, (byte) 0xd0, (byte) 0xd0}, {(byte) 0xda, (byte) 0xda, (byte) 0xda}, {(byte) 0xe4, (byte) 0xe4, (byte) 0xe4}, {(byte) 0xee, (byte) 0xee, (byte) 0xee} }; private final int colorIndex; /** * Creates a new TextColor using the XTerm 256 color indexed mode, with the specified index value. You must * choose a value between 0 and 255. * * @param colorIndex Index value to use for this color. */ public Indexed(int colorIndex) { if (colorIndex > 255 || colorIndex < 0) { throw new IllegalArgumentException("Cannot create a Color.Indexed with a color index of " + colorIndex + ", must be in the range of 0-255"); } this.colorIndex = colorIndex; } /** * Picks out a color approximated from the supplied RGB components * * @param red Red intensity, from 0 to 255 * @param green Red intensity, from 0 to 255 * @param blue Red intensity, from 0 to 255 * @return Nearest color from the 6x6x6 RGB color cube or from the 24 entries grey-scale ramp (whichever is closest) */ public static Indexed fromRGB(int red, int green, int blue) { if (red < 0 || red > 255) { throw new IllegalArgumentException("fromRGB: red is outside of valid range (0-255)"); } if (green < 0 || green > 255) { throw new IllegalArgumentException("fromRGB: green is outside of valid range (0-255)"); } if (blue < 0 || blue > 255) { throw new IllegalArgumentException("fromRGB: blue is outside of valid range (0-255)"); } int rescaledRed = (int) (((double) red / 255.0) * 5.0); int rescaledGreen = (int) (((double) green / 255.0) * 5.0); int rescaledBlue = (int) (((double) blue / 255.0) * 5.0); int index = rescaledBlue + (6 * rescaledGreen) + (36 * rescaledRed) + 16; Indexed fromColorCube = new Indexed(index); Indexed fromGreyRamp = fromGreyRamp((red + green + blue) / 3); //Now figure out which one is closest int coloredDistance = ((red - fromColorCube.getRed()) * (red - fromColorCube.getRed())) + ((green - fromColorCube.getGreen()) * (green - fromColorCube.getGreen())) + ((blue - fromColorCube.getBlue()) * (blue - fromColorCube.getBlue())); int greyDistance = ((red - fromGreyRamp.getRed()) * (red - fromGreyRamp.getRed())) + ((green - fromGreyRamp.getGreen()) * (green - fromGreyRamp.getGreen())) + ((blue - fromGreyRamp.getBlue()) * (blue - fromGreyRamp.getBlue())); if (coloredDistance < greyDistance) { return fromColorCube; } else { return fromGreyRamp; } } /** * Picks out a color from the grey-scale ramp area of the color index. * * @param intensity Intensity, 0 - 255 * @return Indexed color from the grey-scale ramp which is the best match for the supplied intensity */ private static Indexed fromGreyRamp(int intensity) { int rescaled = (int) (((double) intensity / 255.0) * 23.0) + 232; return new Indexed(rescaled); } @Override public byte[] getForegroundSGRSequence() { return ("38;5;" + colorIndex).getBytes(); } @Override public byte[] getBackgroundSGRSequence() { return ("48;5;" + colorIndex).getBytes(); } @Override public int getRed() { return COLOR_TABLE[colorIndex][0] & 0x000000ff; } @Override public int getGreen() { return COLOR_TABLE[colorIndex][1] & 0x000000ff; } @Override public int getBlue() { return COLOR_TABLE[colorIndex][2] & 0x000000ff; } @Override public Color toColor() { return new Color(getRed(), getGreen(), getBlue()); } @Override public String toString() { return "{IndexedColor:" + colorIndex + "}"; } @Override public int hashCode() { int hash = 3; hash = 43 * hash + this.colorIndex; return hash; } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final Indexed other = (Indexed) obj; return this.colorIndex == other.colorIndex; } }
<gh_stars>10-100 /** * @file ruuvi_device_id.h * @author TheSomeMan * @date 2021-07-08 * @copyright Ruuvi Innovations Ltd, license BSD-3-Clause. */ #ifndef RUUVI_GATEWAY_ESP_DEVICE_ID_H #define RUUVI_GATEWAY_ESP_DEVICE_ID_H #include <stdint.h> #include <stdbool.h> #include "mac_addr.h" #ifdef __cplusplus extern "C" { #endif #define NRF52_DEVICE_ID_SIZE (8U) typedef struct nrf52_device_id_t { uint8_t id[NRF52_DEVICE_ID_SIZE]; } nrf52_device_id_t; typedef struct nrf52_device_id_str_t { char str_buf[(NRF52_DEVICE_ID_SIZE * 2) + (NRF52_DEVICE_ID_SIZE - 1) + 1]; // format: XX:XX:XX:XX:XX:XX:XX:XX } nrf52_device_id_str_t; void ruuvi_device_id_init(void); void ruuvi_device_id_deinit(void); mac_address_bin_t ruuvi_device_id_get_nrf52_mac_address(void); mac_address_str_t ruuvi_device_id_get_nrf52_mac_address_str(void); nrf52_device_id_t ruuvi_device_id_get(void); nrf52_device_id_str_t ruuvi_device_id_get_str(void); void ruuvi_device_id_set(const nrf52_device_id_t *const p_nrf52_device_id, const mac_address_bin_t *const p_nrf52_mac_addr); bool ruuvi_device_id_is_set(void); #ifdef __cplusplus } #endif #endif // RUUVI_GATEWAY_ESP_DEVICE_ID_H
def manage_actors(self, monitor, stop=False): alive = 0 if self.managed_actors: for aid, actor in list(self.managed_actors.items()): alive += self.manage_actor(monitor, actor, stop) return alive
<gh_stars>10-100 package replication import ( "context" "fmt" "testing" "time" apis_composition "github.com/atlassian/voyager/pkg/apis/composition" comp_v1 "github.com/atlassian/voyager/pkg/apis/composition/v1" "github.com/atlassian/voyager/pkg/k8s" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" admissionv1beta1 "k8s.io/api/admission/v1beta1" authn_v1 "k8s.io/api/authentication/v1" authz_v1 "k8s.io/api/authorization/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" coreclient_fake "k8s.io/client-go/kubernetes/fake" kube_testing "k8s.io/client-go/testing" ) func admitAuthzWithContextAndLogger(t *testing.T, admissionRequest *admissionv1beta1.AdmissionRequest, resultStatus authz_v1.SubjectAccessReviewStatus) (*admissionv1beta1.AdmissionResponse, []kube_testing.Action, error) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() nsClient := coreclient_fake.NewSimpleClientset() // Return stub SubjectAccessReview result nsClient.PrependReactor("create", "subjectaccessreviews", func(action kube_testing.Action) (bool, runtime.Object, error) { createAction := action.(kube_testing.CreateAction) obj := createAction.GetObject() sar := obj.(*authz_v1.SubjectAccessReview).DeepCopy() sar.Status = resultStatus return true, sar, nil }) ac := AdmissionContext{ AuthzClient: nsClient.AuthorizationV1().SubjectAccessReviews(), } response, err := ac.servicedescriptorAuthzAdmitFunc(ctx, zaptest.NewLogger(t), admissionv1beta1.AdmissionReview{Request: admissionRequest}) return response, nsClient.Actions(), err } func assertSubjectAccessReview(t *testing.T, allowed bool, reason string) { ar, actions, err := admitAuthzWithContextAndLogger(t, &admissionv1beta1.AdmissionRequest{ UserInfo: authn_v1.UserInfo{ Username: "user", Groups: []string{"group1"}, Extra: map[string]authn_v1.ExtraValue{"foo": {"bar"}}, }, Operation: admissionv1beta1.Create, Resource: sdResource, Object: objectToRawExtension(t, &comp_v1.ServiceDescriptor{ ObjectMeta: meta_v1.ObjectMeta{ Name: "my-service", }, TypeMeta: meta_v1.TypeMeta{ Kind: "servicedescriptor", }, Spec: comp_v1.ServiceDescriptorSpec{ Locations: []comp_v1.ServiceDescriptorLocation{}, }, }), }, authz_v1.SubjectAccessReviewStatus{ Allowed: allowed, Reason: reason, }) require.NoError(t, err) assert.Equal(t, allowed, ar.Allowed) assert.Equal(t, reason, ar.Result.Message) require.Len(t, actions, 1) reviewRequest := actions[0].(kube_testing.CreateAction).GetObject().(*authz_v1.SubjectAccessReview) assert.Equal(t, "user", reviewRequest.Spec.User) assert.Equal(t, []string{"group1"}, reviewRequest.Spec.Groups) assert.Equal(t, map[string]authz_v1.ExtraValue{"foo": {"bar"}}, reviewRequest.Spec.Extra) assert.Equal(t, &authz_v1.ResourceAttributes{ Group: apis_composition.GroupName, Resource: comp_v1.ServiceDescriptorResourcePlural, Namespace: meta_v1.NamespaceNone, // cluster-scoped Name: "my-service", Verb: k8s.ServiceDescriptorClaimVerb, }, reviewRequest.Spec.ResourceAttributes) } func TestAuthzCreateAllow(t *testing.T) { t.Parallel() assertSubjectAccessReview(t, true, `RBAC: allowed by ClusterRoleBinding "paas:composition:servicedescriptor:foo:crud" of ClusterRole "paas:composition:servicedescriptor:foo:crud"" to Group "paas-foo-dl-dev"`) } func TestAuthzCreateForbid(t *testing.T) { t.Parallel() assertSubjectAccessReview(t, false, `RBAC: user not allowed to create ServiceDescriptors with name "my-service"`) } func assertOperationRejected(t *testing.T, operation admissionv1beta1.Operation) { ar, actions, err := admitAuthzWithContextAndLogger(t, &admissionv1beta1.AdmissionRequest{ Operation: operation, Resource: sdResource, Object: objectToRawExtension(t, &comp_v1.ServiceDescriptor{ Spec: comp_v1.ServiceDescriptorSpec{ Locations: []comp_v1.ServiceDescriptorLocation{}, }, }), }, authz_v1.SubjectAccessReviewStatus{}) require.NoError(t, err) require.Equal(t, int32(422), ar.Result.Code) require.Equal(t, fmt.Sprintf("unsupported operation %q", string(operation)), ar.Result.Message) require.Empty(t, actions) } func TestAuthzRejectsUpdate(t *testing.T) { t.Parallel() assertOperationRejected(t, admissionv1beta1.Update) } func TestAuthzRejectsDelete(t *testing.T) { t.Parallel() assertOperationRejected(t, admissionv1beta1.Delete) }
def _check_directory(host_path: str, output_name: str) -> None: def raiser(exc: OSError): raise exc for root, subdirs, files in os.walk(host_path, onerror=raiser, followlinks=False): for fn in files: fn = os.path.join(root, fn) if os.path.islink(fn) and ( not os.path.exists(fn) or os.path.isabs(os.readlink(fn)) or not path_really_within(fn, host_path) ): raise OutputError(f"Directory in output {output_name} contains unusable symlink")
Belsnickel (also Belschnickel , Belznickle , Belznickel , Pelznikel , Pelznickel , from pelzen (or belzen , German for to wallop or to drub [1] ) and Nickel being a hypocorism of the given name Nikolaus ) is a crotchety, fur-clad Christmas gift-bringer figure in the folklore of the Palatinate region of southwestern Germany along the Rhine, the Saarland , and the Odenwald area of Baden-Württemberg . The figure is also preserved in Pennsylvania Dutch communities. [2] Belsnickel is related to other companions of Saint Nicholas in the folklore of German-speaking Europe. He may have been based on another older German myth, Knecht Ruprecht, a servant of Saint Nicholas, and a character from northern Germany.[3] Unlike those figures, Belsnickel does not accompany Saint Nicholas but instead visits alone[3] and combines both the threatening and the benign aspects which in other traditions are divided between the Saint Nicholas and the companion figure. Belsnickel is a man wearing furs and sometimes a mask with a long tongue. He is typically very ragged and disheveled. He wears torn, tattered, and dirty clothes, and he carries a switch in his hand with which to beat naughty children, but also pocketsful of cakes, candies, and nuts for good children. A first-hand 19th-century account of the "Beltznickle" tradition in Allegany County, Maryland, can be found in Brown's Miscellaneous Writings, a collection of essays by Jacob Brown (born 1824). Writing of a period around 1830, Brown says, "we did not hear of" Santa Claus. Instead, the tradition called for a visit by a different character altogether: He was known as Kriskinkle, Beltznickle and sometimes as the Christmas woman. Children then not only saw the mysterious person, but felt him or rather his stripes upon their backs with his switch. The annual visitor would make his appearance some hours after dark, thoroughly disguised, especially the face, which would sometimes be covered with a hideously ugly phiz - generally wore a female garb - hence the name Christmas woman - sometimes it would be a veritable woman but with masculine force and action. He or she would be equipped with an ample sack about the shoulders filled with cakes, nuts, and fruits, and a long hazel switch which was supposed to have some kind of a charm in it as well as a sting. One would scatter the goodies upon the floor, and then the scramble would begin by the delighted children, and the other hand would ply the switch upon the backs of the excited youngsters - who would not show a wince, but had it been parental discipline there would have been screams to reach a long distance.[4] Outside Europe Edit The Belsnickel character originated in the Palatinate. When people immigrated to Pennsylvania, they brought their German traditions with them.[5] Belsnickel was known in Pennsylvania in the early 1800s.[3] Amongst the Pennsylvania Germans, Belsnickel is the character who visits homes prior to Christmas to check up on the behavior of the children. The traditional Belsnickel showed up at houses 1–2 weeks before Christmas and often created fright because he always knew exactly which of the children misbehaved.[6] He would rap on the door or window with his stick and often the children would have to answer a question for him or sing some type of song. In exchange he would toss candies onto the floor. If the children jumped too quick for the treats, they may end up getting struck with Belsnickel's switch. An 1853 article in a British magazine describing Pennsylvanian customs refers to "Pelsnichol, or Nicholas with the fur, alluding to the dress of skins in which he is said to be clad. Some make Pelsnichol identical with Krishkinkle, but the more general opinion is that they are two personages, one the rewarder of the good, the other the punisher of the bad." According to this article, Pelsnichol merely leaves a birch rod in the stockings of naughty children.[7] There are two versions of Belsnickel, the rural and the urban characters. Both are described in the book, Christmas in Pennsylvania: a folk cultural study, by Alfred L. Shoemaker and Don Yoder. The tradition fell into decline toward the end of the nineteenth century, but has seen a revival in recent years.[3] The tradition of Belsnickel was brought to Indiana by immigrants from the Palatinate. His garb could vary from one locality to another. He might wear a long, black or brown coat or robe, held together at the waist with a rope, and a fur cap or bear skin hat, decorated with bells. In this branch of the tradition, the father or other older male relative was often "busy working outside" or had to see to some matter elsewhere in the house when Pelznickel (or Belsnickel) arrived. "Belsnickling" or "Klausentreiben," was the "running" of groups of young men or youth dressed in false faces and fantastic costumes on "Belsnickle Night", the eve of the Feast of St. Nicholas" (St. Nikolaustag), and was the occasion of good-natured boisterousness. Young men, dressed in skins and furs, would move through the streets of town or village, rattling chains and bells.[8] The tradition also exists in parts of Newfoundland (see mummering), Nova Scotia,[9] the prairie provinces of Canada and some communities in the Brazilian state of Santa Catarina.[10]
/* eslint-disable no-console */ import * as core from '@actions/core'; import * as github from '@actions/github'; import { CrawlerApiClient } from './crawler-api-client'; import type { ConfigJson } from './types/configJson'; import type { GetCrawlersResponseBody } from './types/publicApiJsonResponses'; // CREDENTIALS const CRAWLER_USER_ID = core.getInput('crawler-user-id'); const CRAWLER_API_KEY = core.getInput('crawler-api-key'); const CRAWLER_API_BASE_URL = core.getInput('crawler-api-base-url'); const GITHUB_TOKEN = core.getInput('github-token'); // CRAWLER CONFIGURATION const CRAWLER_NAME = core.getInput('crawler-name'); const INDEX_NAME = CRAWLER_NAME.replace(/[ /]/g, '-').replace( /[/~,[\]`&|;$*\\]/g, '' ); const ALGOLIA_APP_ID = core.getInput('algolia-app-id'); const ALGOLIA_API_KEY = core.getInput('algolia-api-key'); const SITE_URL = core.getInput('site-url'); const OVERRIDE_CONFIG = core.getInput('override-config') === 'true'; interface Comment { id: number; body?: string; user: { login: string; } | null; } const client = new CrawlerApiClient({ crawlerApiBaseUrl: CRAWLER_API_BASE_URL, crawlerUserId: CRAWLER_USER_ID, crawlerApiKey: CRAWLER_API_KEY, }); const octokit = github.getOctokit(GITHUB_TOKEN); function getConfig(): ConfigJson { return { appId: ALGOLIA_APP_ID, apiKey: ALGOLIA_API_KEY, indexPrefix: 'crawler_', rateLimit: 8, startUrls: [SITE_URL], ignoreQueryParams: ['source', 'utm_*'], ignoreNoIndex: false, ignoreNoFollowTo: false, ignoreRobotsTxtRules: false, actions: [ { indexName: `${INDEX_NAME}_index`, pathsToMatch: [`${SITE_URL}**`], recordExtractor: { __type: 'function', source: getRecordExtractorSource(), }, }, ], }; } function getRecordExtractorSource(): string { return `({ helpers }) => { return helpers.netlifyExtractor({ template: 'default' }); }`; } function findCommentPredicate(crawlerId: string, comment: Comment): boolean { return ( (comment.user ? comment.user.login === 'github-actions[bot]' : false) && (comment.body ? comment.body.includes(crawlerId) : false) ); } async function findComment( prNumber: number, crawlerId: string ): Promise<Comment | undefined> { const parameters = { owner: github.context.repo.owner, repo: github.context.repo.repo, issue_number: prNumber, }; for await (const { data: comments } of octokit.paginate.iterator( octokit.rest.issues.listComments, parameters )) { // Search each page for the comment const gaComment = comments.find((comment) => findCommentPredicate(crawlerId, comment) ); if (gaComment) return gaComment; } return undefined; } async function addComment(crawlerId: string): Promise<void> { try { const context = github.context; if (context.payload.pull_request === undefined) { core.info('No pull request found.'); return; } const prNumber = context.payload.pull_request.number; // First check if the comment doesn't already exist const comment = await findComment(prNumber, crawlerId); const pathArray = CRAWLER_API_BASE_URL.split('/'); const protocol = pathArray[0]; const host = pathArray[2]; const baseUrl = `${protocol}//${host}`; const message = `<p>Check your created <a href="${baseUrl}/admin/crawlers/${crawlerId}/overview" target="_blank">Crawler</a></p> <p>Check your created index on your <a href="https://www.algolia.com/apps/${ALGOLIA_APP_ID}/explorer/browse/${CRAWLER_NAME}" target="_blank">Algolia Application</a></p>`; // If the comment exists, we update it if (comment !== undefined) { core.info('Existing comment found.'); await octokit.rest.issues.updateComment({ ...context.repo, comment_id: comment.id, body: message, }); core.info(`Updated comment id '${comment.id}'.`); return; } octokit.rest.issues.createComment({ ...context.repo, issue_number: prNumber, body: message, }); } catch (error) { let errorMessage = 'An unexpected error happened.'; if (error instanceof Error) { errorMessage = error.message; } else { console.log(error); } core.setFailed(errorMessage); } } async function crawlerReindex(): Promise<void> { let crawlerId = ''; // Searching for the crawler, based on the name and application ID const crawlers: GetCrawlersResponseBody | undefined = await client.getCrawlers({ name: CRAWLER_NAME, appId: ALGOLIA_APP_ID }); if (typeof crawlers === 'undefined') { return; } if (crawlers.items.length !== 0) { // If the crawler exists : update it crawlerId = crawlers.items[0].id; if (OVERRIDE_CONFIG) { const config = getConfig(); await client.updateConfig(crawlerId, config); } } else { // If it doesn't exist yet: create it const crawler = await client.createCrawler(CRAWLER_NAME, getConfig()); crawlerId = crawler.id; } console.log(`---------- Reindexing crawler ${crawlerId} ----------`); await client.reindex(crawlerId); addComment(crawlerId); } console.log('---------CRAWLER CONFIG---------'); console.log(`CRAWLER_NAME : ${CRAWLER_NAME}`); crawlerReindex().catch((error) => { core.setFailed(error); });
import sys try: sys.stdin=open('inputf.in', 'r') sys.stdout=open('outputf.in','w') except: pass # hello # sys.setrecursionlimit(2000) #------------------------------------------------ #importing libraries import math import sys from math import sqrt from collections import defaultdict from collections import Counter import string from collections import deque from itertools import permutations,combinations #------------------------------------------------ #input snippets def ii():return int(sys.stdin.readline().strip()) def si():return sys.stdin.readline().strip() def li():return list(map(int,sys.stdin.readline().strip().split())) def mi():return map(int,sys.stdin.readline().strip().split()) def dparr(m,n): return [[0]*n for i in range(m)] #------------------------------------------------ def check(u,r,d,l,p): x=True for i in p: if(i=='ul'): if(u>0 and l>0): u-=1 l-=1 if(i=='ru'): if(r>0 and u>0): r-=1 u-=1 if(i=='dl'): if(d>0 and l>0): d-=1 l-=1 if(i=='dr'): if(d>0 and r>0): d-=1 r-=1 if(u>n-2): x=False if(r>n-2): x=False if(d>n-2): x=False if(l>n-2): x=False return x t=ii() x=list(permutations(["ul","ru","dl","dr"])) for _ in range(t): n,u,r,d,l=mi() ans=False for i in x: ans=ans or check(u,r,d,l,i) if(ans): break if(ans): print("YES") else: print("NO")
#include <cstring> #include "aergo.hpp" unsigned char privkey[32] = { 0xDB, 0x85, 0xDD, 0x0C, 0xBA, 0x47, 0x32, 0xA1, 0x1A, 0xEB, 0x3C, 0x7C, 0x48, 0x91, 0xFB, 0xD2, 0xFE, 0xC4, 0x5F, 0xC7, 0x2D, 0xB3, 0x3F, 0xB6, 0x1F, 0x31, 0xEB, 0x57, 0xE7, 0x24, 0x61, 0x76 }; int main() { Aergo aergo("testnet-api.aergo.io", 7845); aergo_account account = {0}; /* load the private key in the account */ std::memcpy(account.privkey, privkey, 32); /* get the account state (public key, address, balance, nonce...) */ if (aergo.get_account_state(&account) == true) { std::cout << "------------------------------------\n"; std::cout << "Account address: " << account.address << "\n"; std::cout << "Account balance: " << account.balance << "\n"; std::cout << "Account nonce: " << account.nonce << "\n"; //std::cout << "Account state_root: " << account.state_root << "\n"; } else { std::cout << "FAILED to get the account state\n"; return 1; } std::cout << "\nsending transaction...\n"; struct transaction_receipt receipt; bool ret = aergo.transfer( &receipt, &account, "AmQFpC4idVstgqhnn7ihyueadTBVBq55LzDLbK8XbzHuhMxKAQ72", 1.5); if (ret == true) { std::cout << "done.\n"; std::cout << "\nTransaction Receipt:\n"; std::cout << "status: " << receipt.status << "\n"; std::cout << "ret: " << receipt.ret << "\n"; std::cout << "blockNo: " << receipt.blockNo << "\n"; std::cout << "txIndex: " << receipt.txIndex << "\n"; std::cout << "gasUsed: " << receipt.gasUsed << "\n"; std::cout << "feeUsed: " << receipt.feeUsed << "\n"; } else { std::cout << "transfer FAILED\n"; } std::cout << "Disconnected\n"; return 0; }
<reponame>zchen0211/ELF_inf #!/usr/bin/env python import sys import torch # Thanks jerry! def main(): f = sys.argv[1] model_data = torch.load(f) state_dict = model_data['state_dict'] keys_to_delete = [] for key in state_dict.keys(): if key.endswith('.num_batches_tracked'): keys_to_delete.append(key) for key in keys_to_delete: del state_dict[key] torch.save(model_data, sys.stdout.buffer) sys.stdout.buffer.close() if __name__ == '__main__': main()
<reponame>dragen1860/MAML-Pytorch-RL<filename>test/test_pipe2.py from multiprocessing import Process, Pipe import numpy as np def writeToConnection(conn): conn.send(np.ones(3)) conn.close() if __name__ == '__main__': recv_conn, send_conn = Pipe(duplex=False) p = Process(target=writeToConnection, args=(send_conn,)) p.start() print(recv_conn.recv()) p.join() recv_conn, send_conn = Pipe(duplex=False) send_conn.send('hello') res = recv_conn.recv() print(res)
Relationship between changes in antigen expression and protein synthesis in human melanoma cells after hyperthermia and photodynamic treatment. Hyperthermia and photoactivated hematoporphyrin derivative induce a dose-dependent reduction in the expression of the p250 surface melanoma-associated antigen on the human FME cell line. Expression of this glycoprotein antigen was quantitated by immunofluorescence flow cytometry based on the monoclonal antibody 9.2.27. Decrease in antigen expression was followed by a transient increase above the level for untreated cells, before normalization occurred about one week after treatment. These treatment-induced changes in antigen expression could partly be explained by changes in protein synthesis. This conclusion was based on the following observations: Hyperthermia and photoactivated hematoporphyrin derivative both inhibited protein synthesis. The latter increased again rapidly to rates above normal until antigen expression reached normal level, whereupon the protein synthesis rate decreased to normal. Inhibition of protein synthesis by cycloheximide 1 day after heating, prevented the recovery of antigen expression, demonstrating that protein synthesis is necessary for resumption of normal antigen expression. The changes in both antigen expression and protein synthesis were dose-dependent, and the magnitude and duration of the changes increased with increasing dose. The time courses of the changes in protein synthesis after two different treatments which both inactivated two logs of cells were almost identical, as were the time courses after two lower heat doses inactivating one log of cells. These similarities were reflected in the changes in antigen expression. At the same time as protein synthesis reached its maximum and antigen expression resumed normal level, an increase in the Golgi apparatus was observed ultrastructurally, indicating an increased synthesis rate and transportation of glycoproteins to the cell surface. ImagesFigure 5 Photodynamic therapy is another modality of cancer treatment where damage to the plasma membrane may lead to cell inactivation (Kessel, 1977;Bellnier & Dougherty, 1982), and membrane proteins are reported to be sensitive targets (Girotti & Deziel, 1983;van Steveninck et al., 1983;Moan & Vistnes, 1986). The mechanisms for cell inactivation seem to some extent to depend on the photosensitizer concentration and on the incubation time with the photosensitizer (Fritsch et al., 1976;Kessel, 1981;Christensen et al., 1983). Photoactivation of porphyrin after incubation for a short time (1 h) has been reported to damage the membrane to a relatively greater degree than photoactivation of porphyrin after a long (22h) incubation, using light doses inactivating the same number of cells (Christensen et al., 1983;. A number of intracellular effects have been reported after photoactivation of porphyrins over short and long incubations. One of the most pronounced effects is inhibition of DNA synthesis. Inhibition of protein synthesis has also been observed, but this process is less sensitive Lln et al., 1986). Since plasma membrane proteins may be important targets in both hyperthermia and photodynamic therapy, we have studied changes in the expression of surface melanoma-Correspondence: C. de L. Davies. Received 23 February 1988; and in revised form, 19 May 1988. associated antigens after these treatments (Davies et al., 1985(Davies et al., , 1986. Chemotherapy-induced changes in the expression of surface antigens on tumour cells have also been reported (Leibson et al., 1978;Shapiro et al., 1982;Chakrabarty et al., 1984). A lot of work is going on to develop and characterize new monoclonal antibodies to tumourassociated antigens; and monoclonal antibodies including the one used in the present work, are being evaluated in clinical trials. In a clinical situation, therapy or disease monitoring based on monoclonal antibodies might be used in combination with various forms of cancer therapy. Therefore, it is of importance to know how the expression of tumourassociated antigens is affected by various treatments, e.g., for optimal time scheduling of antibody administration in relation to other treatments. The objective of the present work was to clarify the mechanisms behind treatment-induced changes in antigen expression, and investigate to what extent changes in protein synthesis could explain changes in antigen expression. Cells The human melanoma cell line FME was established at our institute from a xenograft growing in athymic mice (Tveit et al., 1980). Monolayer cultures of FME cells were grown at 37°C and 5% CO2 in RPMI 1640 medium supplemented with 10% foetal calf serum (FCS), ImM 1-glutamine, and penicillin-streptomycin solution to a final concentration of 100 unitsml-1, all from Gibco (Paisley, UK). Grown under these conditions the doubling time was 18 h and the plating efficiency 0.60+0.15. The medium was changed every second day, and always the day before treatment since medium starvation was found to cause a reduction in the antigen expression of these cells (Lindmo et al., 1984b). The melanoma-associated antigen p250 (Bumol & Reisfeldt, 1982;Bumol et al., 1984) is sensitive to trypsin treatment (Lindmo et al., 1984a), therefore trypsin was avoided and the cells were detached from the culture flasks with 10mM EDTA (Merck, Darmstadt, FRG) in Dulbecco's PBS supplemented with 0.05% KCl. Heat treatment Exponentially growing cells were harvested, and suspensions of 5 x 106 cells in 5ml complete medium were flushed with 5% CO2, and heated in a thermostatically regulated waterbath. The cells were kept in suspension by shaking the tubes every 10 minutes. The pH during the heating was 7.1+0.1. Immediately after heating, the cells were seeded in culture flasks for measurement of protein synthesis and antigen expression at different times after treatment. Photodynamic treatment Exponentially growing cells in monolayer were incubated with hematoporphyrin derivative (HpD) in the dark at 37°C for either 1 or 18 h, followed by light exposure in the presence of HpD at room temperature. Cells in monolayer were preferred to suspensions in order to obtain the most exact light exposure of the cells. A nontoxic HpD concentration, 12.5 jugml-1 in medium containing 15% FCS, was used. HpD was prepared from hematoporphyrin dihydrochlorine (Koch Light Laboratories Ltd., Berkshire, UK) as described by Lipson et al. (1961). The cells were exposed to light from a bank of four fluorescent tubes (Phillips TL 20W/09) with maximum emission at 360 nm. The fluence rate at the position of the cells was 12 W m-2 as measured with a calibrated thermophile (65A, YSI, Yellow Spring, OH) (Moan, 1986). After exposure to photoactivated HpD, the cells were rinsed once with PBS, and allowed to continue as monolayer cultures in complete medium. At different times after treatment protein synthesis and antigen expression were measured. Protein synthesis measurements Protein synthesis was measured by incorporation of 3Hvaline (30 Ci mmol-1, Amersham, Buckinghamshire, UK). At selected times after treatment, the cells were rinsed once with PBS and harvested with EDTA, and the cell number determined by Coulter counter. Thus, only cells attached to the culture flasks were used, and floating, damaged cells were excluded from the measurements. In the measurements immediately and 3h after hyperthermia, however, the total cell population was used, since 4-5h were needed for the cells to attach to the culture flasks. Protein synthesis was measured in 3 parallel samples, each prepared as follows: About 2 x 106 cells were suspended in 2 ml complete medium supplemented with 1.5mM cold valine (Sigma) and 3H-valine at a final specific radioactivity of 3 Cimol-1, and incubated at 37°C for 30 min. The cold valine was added to keep the specific radioactivity in the medium at a constant level (see Figure 1). After the incubation, the cells were centrifuged at 0°C, resuspended in 1 ml water, before 1 ml 20% trichloroacetic acid was added. The samples were then heated at 90°C for 20 min. After cooling the precipitates were collected on Gelman A/E filters. The filters were washed with 5% trichloroacetic acid, dried, and the radioactivity measured by liquid scintillation counting. The protein synthesis rate was calculated as the average DPM-value per cell of the three parallel samples. Measurements of antigen expression after treatment The expression of the melanoma-associated antigen p250 was measured by flow cytometry. The monoclonal antibody 9.2.27 against the glycoprotein p250 (Morgan et al., 1981) was a gift from A.C. Morgan. IgG antibody was purified from ascites fluid by ammonium-sulphate precipitation followed by separation on a DEAE-52 agarose ion exchange column. Fluorescein isothiocyanate (FITC) was conjugated to the antibody such that a direct immunofluorescence staining reaction was used in the measurements of antigen expression after hyperthermia. In the case of photodynamic treatment, an indirect immunofluorescence staining reaction was used, and antigen expression was measured as described by Davies et al. (1986). In the direct immunofluorescence staining reaction, cells were incubated at 4°C for 45min with the FITC-conjugated monoclonal antibody at a concentration of 10 jgml-. The cells were subsequently washed twice with PBS with 1% bovine serum albumin and 0.1% azide (NaN3), and resuspended in PBS with 0.1% azide. Quantitative measurements of antigen expression were determined with a Coulter EPICS V flow cytometer as described by Davies et al. (1985Davies et al. ( , 1986. Briefly, the 488nm argon laser line was used to excite fluorescein fluorescence which was detected in the spectral interval from 515 to 550 nm. The fluorescence histograms were gated on the forward angle light scatter signal to eliminate signals from damaged cells and debris, as well as from aggregates of cells. Antigen expression was quantified by determining the median immunofluorescence intensity from the logarithmic fluorescence histogram, and antigen expression of treated cells was calculated relative to that of untreated control. Preparation for electron microscopy Cultures were fixed in 2.5% glutaraldehyde in phosphate buffer at pH 7.2. After fixation the cells were collected, postfixed in osmium tetroxide, embedded in Epon 812, thin sectioned by a LKB Ultratome III, and stained with uranyl acetate and lead citrate. Samples were examined by a Jeol 1200EX electron microscope. Results It has been found that the amount of radioactivity incorporated into protein is proportional to the specific radioactivity in the precursor pool (Seglen et al., 1978). In order to keep constant specific radioactivity in the medium during the incorporation period, a valine concentration which is sufficiently high to minimize the effects of isotope consumption and dilution of isotope by proteolytically released valine, is required. The rate of incorporation of 3H-valine into protein as a function of the valine concentration in the medium is shown in Figure 1, and is in agreement with the methodological work of R0nning et al. (1979). The cells were incubated for 30 min with various concentrations of cold valine and 3H-valine at a constant final specific radioactivity of 3.0Cimol-1. The radioactivity bound to protein in the cells reached a maximum level for valine concentrations above 1 mM. The intracellular acid-soluble 3H-valine increased further as a function of the valine concentration in the medium. This indicates that limitation in amino acid transport across the cell membrane was not responsible for flattening the incorporation curve. Based on these observations, 1.5 mM valine was always added to the medium during 3H-valine incorporation. The inhibition of protein synthesis immediately (i.e., within 10min) after heating at 43.5°C and 45.0°C is shown as a function of heating time in Figure 2. Figure 3A shows the recovery of protein synthesis after heating with doses inhibiting -90% (43.5°C for 90min and 45.0°C for 20min) and 50% of the protein synthesis (43.5°C for 30 min and 45.0°C for 5 min). Cell survival data after these treatments are shown in Table I. The time courses of recovery of protein synthesis after the two treatments resulting in 90% initial inhibition were almost identical, as were the two curves after the two lower doses. Protein synthesis started to increase immediately after end of the heating, reached the normal rate, and increased temporarily above the normal synthesis rate. The duration and degree of the heat-induced changes in the protein synthesis increased with increasing heat dose. Figure 3B shows the heat-induced changes in the expression of the melanoma-associated antigen p250. The same heat doses were used as in the measurements of protein synthesis. The similarity in the time courses after exposure to the two smaller, and to the two higher heat doses, was also seen in the changes of antigen expression. Immediately after heating and the following day, a dose-dependent reduction in antigen expression was observed as earlier reported (Davies et al., 1985). Antigen expression started to increase, and reached the normal level 2-3 days after heating. The two most severe heat doses induced a small temporary enhancement in antigen expression above the normal level. In order to see if protein synthesis was necessary for the recovery of antigen expression, heated cells were incubated with 1 gM cycloheximide from 24 to 48 h after the heat treatment. During this period antigen expression of heated Davies et al. (1985Davies et al. ( , 1986). cells normally started to recover. However, incubation with cycloheximide resulted in a further decrease in antigen expression as indicated in Figure 3B. Incubation for 24h with 1 gM cycloheximide alone reduced the antigen expression -40%. In order to quantitatively relate the protein synthesis data in Figure 3A to the antigen expression shown in Figure 3B, the amount of antigen predicted mathematically from the synthesis data was determined for the larger dose of hyperthermia. (The mathematical analysis is shown in the Appendix.) The time course for the reduction in antigen expression was well represented by the theoretical curve, but the mathematical model predicted a much larger and somewhat earlier transient increase above normal expression than what was actually observed. Recovery of protein synthesis after incubation with HpD (12.5pgml-1) for 1 and 18h followed by light exposure is shown in Figures 4A and C (Table I). Incubation with HpD in the dark had no significant effect on protein synthesis. Exposure to photoactivated HpD induced a dose dependent reduction in protein synthesis. Immediately after the treatment the protein synthesis started to increase, and increased temporarily above the normal synthesis rate. Photoactivation of HpD incubated for 18 h inhibited protein synthesis somewhat more than short time HpD incubation, and the normal synthesis rate was reached later. Figures 4B and D show the changes in the expression of the melanoma-associated antigen p250 after photoactivation of HpD incubated for 1 or 18h, respectively. The same HpD concentration and light doses as in the measurements of protein synthesis were used. HpD in the dark did not affect antigen expression significantly. Immediately after the treatment (within 20 min) there was no significant change in antigen expression. Three to 6 h later a dose-dependent reduction in antigen expression was observed as earlier reported (Davies, et al., 1986). The duration of the reduction was dependent on the time of HpD-incubation and the light dose. The reduced antigen expression was followed by a small transient increase in the antigen expression above the normal level. Ultrastructural studies of cells exposed to hyperthermia and photoactivated HpD were done to examine whether these treatments affected intracellular organelles involved in the synthesis and transportation of membrane glycoproteins. Figure SA shows an untreated control cell containing an undamaged nucleus, preserved mitochondria and cristae, many Golgi fields, some rough endoplasmic reticulum and microtubules. Panel B shows a cell immediately after incubation with HpD for 1 h followed by 170s light exposure. The mitochondria were condensed with large spaces between the cristae, and the cisternae of the Golgi apparatus were swollen. The cytoplasm contained a lot of recycling vesicles, probably originating from the outer membrane. The nucleus was unchanged, and no condensation or pyknosis was observed. Panel C shows a cell immediately after incubation with HpD for 18h followed by 30s light. The mitochondria were swollen, and many vesicles of recycling membranes were seen in the cytoplasm. In addition, secondary lysosomes Figure 5 Transmission electron micrograph of cells exposed to photoactivated HpD (12.5 pg ml -1). Untreated control cells show normal mitochondria, Golgi apparatus, rough endoplasmic reticulum, microtubulus, and nucleus (a). Cells immediately after incubation with HpD for I h followed by 170s light, show condensed mitochondria, swollen Golgi apparatus, vesicles of recycling membranes, and undamaged nucleus (b). Cells immediately after incubation with HpD for 18 h followed by 30s light, show swollen mitochondria, vesicles of recycling membranes, secondary lysosomes, and undamaged nucleus (c). Cells two days after incubation with HpD for I h followed by 170s light (d) and HpD for 18 h followed by 30s light (e), show recovered mitochondria, an extended Golgi apparatus, remnants of secondary lysosomes. and undamaged nucleus. Scale bar=0.5p with degraded membranes appeared in the cytoplasm. As in panel B the nucleus was unchanged. Panels D and E show cells 2 days after incubation with HpD for I and 18 h followed by 170 and 30 s light, respectively. The mitochondria had recovered and the cells contained more Golgi fields than the control cells; in addition remnants of secondary lysosomes were observed. Ultrastructural studies of heat-treated cells demonstrated the same type of cellular damage as after exposure to photoactivated HpD. Immediately after heating the nucleus was unchanged, but the mitochondria and Golgi apparatus were damaged. Two days later the mitochondria had recovered and an increase in the number of Golgi apparatus was seen (data not shown). Discussion Hyperthermia and photodynamic treatment both inhibit protein synthesis in a dose-dependent manner. The human melanoma FME cell line used in the present work, showed somewhat less inhibition of protein synthesis than the more heat sensitive Chinese hamster ovary (CHO) cells (Henle & Leeper, 1979;Hahn & Shiu, 1985) and Novikoff hepatoma cells (Mondovi et al., 1969). The recovery of protein synthesis started immediately after heating, while CHO cells showed a delay in the recovery of 2-4h. Photoactivated HpD inhibited protein synthesis in the FME cells more than in CHO cells (Lin et al., 1986) and NHIK 3025 cells , although recovery was more rapid than in CHO cells. The objective of the present work was to study whether changes in the expression of surface melanoma-associated antigens observed after hyperthermia or photodynamic treatment could be related to changes in protein synthesis. Heat induced a reduction in antigen expression immediately after treatment. This initial reduction was probably caused by some kind of direct damage of the antigenic determinants and not by inhibition of protein synthesis, since the half life of the p250 antigen on melanoma cells is reported to be 15.6h (Bumol et al., 1984). Heating for 5-20min is therefore too short a treatment to induce a significant reduction in the expression of this antigen only by inhibition of protein synthesis. In contrast, photoactivated HpD did not induce any reduction in the expression of the p250 antigen immediately after treatment. The subsequent time courses of the changes in antigen expression after both hyperthermia and photodynamic therapy could partly be explained by treatment-induced changes in protein synthesis. The changes of both antigen expression and protein synthesis were dose-dependent, and the magnitude and duration of the changes increased with increasing dose. In the case of hyperthermia, two doses inactivating one log and two doses inactivating two logs of cells were used. The recovery kinetics of protein synthesis after heating with the two lower doses were similar, as were the recovery kinetics after heating with the two higher doses. These similarities were also reflected in the time courses of antigen expression, i.e., the two lower doses induced about the same changes in antigen expression, and the time courses after the two higher doses were almost identical. These results demonstrate that different heat treatments inactivating the same number of cells induced the same changes in protein synthesis, and these changes were reflected in the heat-induced changes in antigen expression. Protein synthesis was found to be necessary for recovery of antigen expression. Protein synthesis started to increase immediately after both hyperthermia and photodynamic treatment, while antigen expression did not start to increase until protein synthesis had reached 50-100% of the normal rate. Inhibition of protein synthesis by cycloheximide 1 day after heating prevented the recovery of antigen expression, demonstrating that protein synthesis is required for the resumption of normal antigen expression. Both exposure to hyperthermia and photoactivated HpD induced a transient enhancement in the protein synthesis rate above the normal level. At the same time as antigen expression reached the normal level, the protein synthesis decreased from its hypernormal rate, suggesting a negative feedback control mechanism driven by the discrepancy in plasma membrane proteins. The theoretical curve shown in Figure 3B predicted a much higher and somewhat earlier overexpression of antigen than that actually observed. The theoretical curve represents the total cellular amount of the p250 antigen, therefore reduced ability to transport antigen to the membrane and increased degradation or shedding of the antigen may explain the difference. A corresponding analysis of the data in Figures 4A and B for the photo-dynamic effects after short term incubation with HpD would be qualitatively similar to that in Figure 3. After a long incubation with HpD followed by the most severe light dose, however, antigen expression remained at a reduced level for three days after the protein synthesis had reached the normal rate. In this case a mathematical prediction of antigen expression from protein synthesis data would differ widely from the observed data. This suggests a more severe disturbance of the relationship between overall protein synthesis and antigen expression, possibly due to severe damage of the Golgi apparatus and other organelles involved in the transportation of newly synthesized plasma membrane proteins. It should be noted that the cell populations for measurements of protein synthesis and antigen expression were comparable but not identical. Measurements of antigen expression were representative for surviving cells, since membrane damaged cells which were not removed by changing the medium, were gated out during the flow cytometric analysis. However, there was no significant difference between the immunofluorescence histograms including and excluding these membrane damaged cells (Davies et al., 1985(Davies et al., , 1986. In the protein synthesis measurements membrane damaged cells were removed only by changing the medium. In the first hours after hyperthermia it is not possible to distinguish between thoge cells that eventually survive and those that are inactivated. Bleiberg & Sohar (1975) found that protein synthesis might resume to some extent in cells unable to make clones. Hahn & Shiu (1985) compared the recovery of protein synthesis in cells heated with doses resulting in cell survival of 75% and 0.01%, and found that cells heated with the lower dose resumed normal protein synthesis within 4h followed by an overshoot in this parameter while the most severely heated cells were unable to resume the normal protein synthesis rate. Our results showed that protein synthesis resumed the normal rate 1-2 days after heating, suggesting that these and the subsequent measurements were representative of surviving cells. Ultrastructural studies showed that the Golgi apparatus was damaged immediately after both exposure to hyperthermia and photoactivated HpD. Mitranic et al. (1976) have also reported that hyperthermia affects the Golgi apparatus. They isolated Golgi fractions and observed ultrastructural changes in the membrane surface of the Golgi at 43°C. Our ultrastructural studies showed that two days after exposure to hyperthermia and photoactivated HpD, an increase in the Golgi apparatus was observed. This increase indicated that new glycoproteins were synthesized and transported to the cell surface in vacuoles delivered by the Golgi cisternae. The increase in Golgi apparatus appeared at the same time as the protein synthesis rate reached its maximum, and antigen expression was almost recovered. These observations are consistent with the hypothesis that recovery kinetics after treatment-induced changes in antigen expression are related to changes in the rate of protein synthesis. It is interesting that two therapeutic treatments based on quite different mechanisms induced similar recovery kinetics of protein synthesis, and that the treatment-induced changes in protein synthesis were reflected in changes in the expression of a surface antigen. This may indicate that such changes will not only be found for these two treatments and for the p250 surface antigen, but also probably indicate a more general effect on several membrane proteins. total cellular amount of p250 antigen can be found by using the protein synthesis data as the input function in a one-compartment first order kinetic analysis as shown in the following. If A(t) is the time-dependent total cellular amount of antigen, dA(t)/dt = -kA(t) +g(t) expresses the differential changes in A(t) with time. There is assumed to be a constant relative decrease, k, in A per time unit, reflected in the biological half-life of this antigen; and g(t) is the synthesis rate of the antigen. Under stationary conditions with constant generation rate g, there is no change in A , resulting in the stationary value A(t) = A.=g/k. For the dose of 43.50 for 90 min, the hyperthermia-induced changes in protein synthesis may be approximated by the following function: I) t<0: g(t)=g.
// UnmarshalResponse reads the response from the given request and unmarshals // the value into the given result. func UnmarshalResponse(req *http.Request, result interface{}) (*http.Response, error) { resp, err := http.DefaultClient.Do(req) if err != nil { return resp, err } if resp.StatusCode == http.StatusNoContent { return resp, ErrNoContent } defer resp.Body.Close() b, err := ioutil.ReadAll(resp.Body) if err != nil { return resp, err } return resp, json.Unmarshal(b, result) }
<gh_stars>1-10 import muster, { action, applyTransforms, array, call, catchError, computed, createCaller, createSetter, defer, DoneNodeType, entries, error, fields, filter, first, fromPromise, get, getInvalidTypeError, includes, isPending, isQueryNodeDefinition, key, last, length, match, Muster, MusterError, nil, NodeDefinition, NOT_FOUND, nth, OkNodeType, ref, relative, root, set, startsWith, thenable, toNode, types, value, ValueNodeType, valueOf, variable, withErrorPath, } from '../..'; import { operation, runScenario } from '../../test'; import { query } from './query'; describe('query', () => { describe('WHEN checking if a node is an query node', () => { describe('AND the node is query node', () => { it('SHOULD return true', () => { const queryNode = query(root(), fields({})); expect(isQueryNodeDefinition(queryNode)).toBe(true); }); }); describe('AND the node is not query node', () => { it('SHOULD return false', () => { expect(isQueryNodeDefinition({} as any)).toBe(false); }); }); }); runScenario({ description: 'GIVEN a graph with a value root node', graph: () => muster(value('foo')), operations: [ operation({ description: 'AND a single-level query', input: query( root(), fields({ item1: key(value('foo')), item2: key(value('bar')), item3: key(value('baz')), }), ), expected: withErrorPath( error( getInvalidTypeError('Node does not support getChild operation', { expected: 'Node supporting getChild operation', received: value('foo'), }), ), { path: [] }, ), }), operation({ description: 'AND a single-level items query', input: query( root(), entries( fields({ item1: key(value('foo')), item2: key(value('bar')), item3: key(value('baz')), }), ), ), expected: withErrorPath( error( getInvalidTypeError('Node does not support getItems operation', { expected: 'Node supporting getItems operation', received: value('foo'), }), ), { path: [] }, ), }), ], }); runScenario({ description: 'GIVEN a graph with a branch root node', graph: () => muster({ foo: value('value:foo'), bar: value('value:bar'), baz: value('value:baz'), qux: value('value:qux'), }), operations: [ operation({ description: 'AND an empty query', input: query(root(), fields({})), expected: value({}), }), operation({ description: 'AND a single-level query with shorthand fields', input: query(root(), { foo: true, bar: true, baz: true, }), expected: value({ foo: 'value:foo', bar: 'value:bar', baz: 'value:baz', }), }), operation({ description: 'AND a single-level query', input: query( root(), fields({ first: key(value('foo')), second: key(value('bar')), third: key(value('baz')), }), ), expected: value({ first: 'value:foo', second: 'value:bar', third: 'value:baz', }), }), operation({ description: 'AND a single-level query containing invalid paths', input: query( root(), fields({ first: key(value('foo')), second: key(value('bar')), third: key(value('asdf')), }), ), expected: withErrorPath(error('Invalid child key: "asdf"', { code: NOT_FOUND }), { path: [], }), }), operation({ description: 'AND a single-level query containing invalid fragment names', input: query( root(), fields({ first: key(value('foo')), second: key(value('bar')), third: key(error('error:baz')), }), ), expected: withErrorPath( error( getInvalidTypeError('Query does not support non-value keys', { expected: [ValueNodeType], received: error('error:baz'), }), ), { path: [] }, ), }), ], }); runScenario({ description: 'GIVEN a graph with a nested branch root node', graph: () => muster({ foo: value('value:foo'), bar: value('value:bar'), baz: value('value:baz'), qux: value('value:qux'), nested: { items: { a: value('value:a'), b: value('value:b'), c: value('value:c'), d: value('value:d'), }, }, }), operations: [ operation({ description: 'AND a nested query', input: query( root(), fields({ req1: key(value('foo')), req2: key(value('bar')), req3: key(value('baz')), req4: key(value('nested'), { req5: key(value('items'), { req6: key(value('a')), req7: key(value('b')), req8: key(value('c')), }), }), }), ), expected: value({ req1: 'value:foo', req2: 'value:bar', req3: 'value:baz', req4: { req5: { req6: 'value:a', req7: 'value:b', req8: 'value:c', }, }, }), }), operation({ description: 'AND a nested query containing invalid paths', input: query( root(), fields({ req1: key(value('foo')), req2: key(value('bar')), req3: key(value('nested'), { req4: key(value('items'), { req5: key(value('a')), req6: key(value('b')), req7: key(value('c')), }), }), req8: key(value('missing'), { req9: key(value('deep'), { req10: key(value('path1')), req11: key(value('path2')), req12: key(value('path3')), }), }), }), ), expected: withErrorPath(error('Invalid child key: "missing"', { code: NOT_FOUND }), { path: [], }), }), operation({ description: 'AND a nested query containing error paths', input: query( root(), fields({ req1: key(value('foo')), req2: key(value('bar')), req3: key(value('nested'), { req4: key(value('items'), { req5: key(value('a')), req6: key(value('b')), req7: key(value('c')), }), req8: key(error('error:baz')), }), }), ), expected: withErrorPath( error( getInvalidTypeError('Query does not support non-value keys', { expected: [ValueNodeType], received: error('error:baz'), }), ), { path: [] }, ), }), operation({ description: 'AND a nested query containing invalid fragment names', input: query( root(), fields({ req1: key(value('foo')), req2: key(value('bar')), req3: key(error('error:baz')), }), ), expected: withErrorPath( error( getInvalidTypeError('Query does not support non-value keys', { expected: [ValueNodeType], received: error('error:baz'), }), ), { path: [] }, ), }), operation({ description: 'AND a query containing a non-nested request for a tree node', input: query( root(), fields({ req1: key(value('foo')), req2: key(value('bar')), req3: key(value('nested')), }), ), expected: withErrorPath(error('Invalid query: missing child fields'), { path: [] }), }), ], }); runScenario({ description: 'GIVEN a graph with a branch that returns an error', graph: () => muster({ foo: value('value:foo'), bar: value('value:bar'), baz: value('value:baz'), qux: value('value:qux'), nested: error('invalid'), }), operations: [ operation({ description: 'AND a nested query containing children of the error branch', input: query( root(), fields({ test1: key(value('foo')), test2: key(value('bar')), test3: key(value('baz')), test4: key( value('nested'), fields({ test5: key( value('items'), fields({ test6: key(value('a')), test7: key(value('b')), test8: key(value('c')), }), ), }), ), }), ), expected: withErrorPath(error('invalid'), { path: ['nested'] }), }), ], }); runScenario({ description: 'GIVEN a graph with a ref to an error', graph: () => muster({ error: error('Some error'), refToError: ref('error'), }), operations: [ operation({ description: 'WHEN making a query to a `refToError`', input: query(root(), { refToError: key('refToError'), }), expected: withErrorPath(error('Some error'), { path: ['error'] }), }), ], }); runScenario({ description: 'GIVEN a graph with a branch that returns an error wrapped in a value node', graph: () => muster({ foo: value('value:foo'), bar: value('value:bar'), baz: value('value:baz'), qux: value('value:qux'), invalid: value(error('invalid')), }), operations: [ operation({ description: 'AND a nested query containing the error branch', input: query( root(), fields({ a: key(value('foo')), b: key(value('bar')), c: key(value('baz')), d: key(value('invalid')), }), ), expected: value({ a: 'value:foo', b: 'value:bar', c: 'value:baz', d: error('invalid'), }), }), ], }); function createItemBranch(num: number) { return { name: value(`Item ${num}`), someNumber: value(num + 1), }; } runScenario({ description: 'GIVEN a graph with a branch containing a nested collection', graph: () => muster({ item1: createItemBranch(1), item2: createItemBranch(2), item3: createItemBranch(3), items: array([ref('item1'), ref('item2'), ref('item3')]), nested: { collection: ref('items'), }, }), operations: [ operation({ description: 'WHEN requesting a collection as a primitive value', input: query( root(), fields({ r1: key(value('items')), }), ), expected: withErrorPath(error('Invalid query: missing list item fields'), { path: [], }), }), operation({ description: 'WHEN requesting a collection item branches as primitive values', input: query( root(), fields({ r1: key(value('items'), entries()), }), ), expected: withErrorPath(error('Invalid query: missing child fields'), { path: ['item1'] }), }), operation({ description: 'WHEN requesting collection', input: query( root(), fields({ r1: key( value('nested'), fields({ r2: key( value('collection'), entries({ r3: key(value('name')), r4: key(value('someNumber')), }), ), }), ), }), ), expected: value({ r1: { r2: [ { r3: 'Item 1', r4: 2, }, { r3: 'Item 2', r4: 3, }, { r3: 'Item 3', r4: 4, }, ], }, }), }), ], }); runScenario({ description: 'GIVEN a graph with deeply nested collections', graph: () => muster({ deeply: [ { nested: [ { items: [{ id: '1.1.1' }, { id: '1.1.2' }, { id: '1.1.3' }] }, { items: [{ id: '1.2.1' }, { id: '1.2.2' }, { id: '1.2.3' }] }, { items: [{ id: '1.3.1' }, { id: '1.3.2' }, { id: '1.3.3' }] }, ], }, { nested: [ { items: [{ id: '2.1.1' }, { id: '2.1.2' }, { id: '2.1.3' }] }, { items: [{ id: '2.2.1' }, { id: '2.2.2' }, { id: '2.2.3' }] }, { items: [{ id: '2.3.1' }, { id: '2.3.2' }, { id: '2.3.3' }] }, ], }, ], }), operations: [ operation({ description: 'WHEN requesting nested collection items with intermediate nodes', input: query( root(), fields({ deeply: key( 'deeply', entries( fields({ nested: key( 'nested', entries( fields({ items: key( 'items', entries( fields({ id: key('id'), }), ), ), }), ), ), }), ), ), }), ), expected: value({ deeply: [ { nested: [ { items: [{ id: '1.1.1' }, { id: '1.1.2' }, { id: '1.1.3' }] }, { items: [{ id: '1.2.1' }, { id: '1.2.2' }, { id: '1.2.3' }] }, { items: [{ id: '1.3.1' }, { id: '1.3.2' }, { id: '1.3.3' }] }, ], }, { nested: [ { items: [{ id: '2.1.1' }, { id: '2.1.2' }, { id: '2.1.3' }] }, { items: [{ id: '2.2.1' }, { id: '2.2.2' }, { id: '2.2.3' }] }, { items: [{ id: '2.3.1' }, { id: '2.3.2' }, { id: '2.3.3' }] }, ], }, ], }), }), ], }); runScenario({ description: 'GIVEN a graph with deeply nested collections with no intermediate nodes', graph: () => muster({ items: [ [ [{ id: '1.1.1' }, { id: '1.1.2' }, { id: '1.1.3' }], [{ id: '1.2.1' }, { id: '1.2.2' }, { id: '1.2.3' }], [{ id: '1.3.1' }, { id: '1.3.2' }, { id: '1.3.3' }], ], [ [{ id: '2.1.1' }, { id: '2.1.2' }, { id: '2.1.3' }], [{ id: '2.2.1' }, { id: '2.2.2' }, { id: '2.2.3' }], [{ id: '2.3.1' }, { id: '2.3.2' }, { id: '2.3.3' }], ], ], }), operations: [ operation({ description: 'WHEN requesting nested collection items', input: query( root(), fields({ items: key( 'items', entries( entries( entries( fields({ id: key('id'), }), ), ), ), ), }), ), expected: value({ items: [ [ [{ id: '1.1.1' }, { id: '1.1.2' }, { id: '1.1.3' }], [{ id: '1.2.1' }, { id: '1.2.2' }, { id: '1.2.3' }], [{ id: '1.3.1' }, { id: '1.3.2' }, { id: '1.3.3' }], ], [ [{ id: '2.1.1' }, { id: '2.1.2' }, { id: '2.1.3' }], [{ id: '2.2.1' }, { id: '2.2.2' }, { id: '2.2.3' }], [{ id: '2.3.1' }, { id: '2.3.2' }, { id: '2.3.3' }], ], ], }), }), ], }); runScenario({ description: 'GIVEN a collection and various transforms', graph: () => muster({ items: ['foo', 'bar', 'baz'], filteredItems: applyTransforms(ref('items'), [ filter((item: NodeDefinition) => startsWith('b', item)), ]), firstItem: get(ref('items'), first()), secondItem: get(ref('items'), nth(1)), lastItem: get(ref('items'), last()), numItems: get(ref('items'), length()), numFilteredItems: get(ref('filteredItems'), length()), }), operations: [ operation({ description: 'AND a query requests multiple transformed versions of the collection', input: query(root(), { items: entries(), filteredItems: entries(), firstItem: true, secondItem: true, lastItem: true, numItems: true, numFilteredItems: true, }), expected: value({ items: ['foo', 'bar', 'baz'], filteredItems: ['bar', 'baz'], firstItem: 'foo', secondItem: 'bar', lastItem: 'baz', numItems: 3, numFilteredItems: 2, }), }), ], }); describe('GIVEN a graph with values, variables and actions', () => { let app: Muster; let testError: Error; beforeEach(() => { app = muster({ readOnlyValue: value('Static value'), variable: variable('Dynamic value'), testAction: action((arg1: number, arg2: number) => arg1 + arg2), errorAction: action(() => { throw (testError = new Error('error:foo')); }), }); }); describe('WHEN requesting the getters, setters and callers from the graph', () => { let queryResult: any; beforeEach(async () => { queryResult = valueOf( await thenable( app.resolve( query( root(), fields({ value: key('readOnlyValue'), variable: key('variable'), setVariable: createSetter('variable'), callTestAction: createCaller('testAction'), }), ), { raw: true }, ), ), ); }); it('SHOULD return correct value', () => { expect(queryResult).toEqual({ value: 'Static value', variable: 'Dynamic value', setVariable: expect.any(Function), callTestAction: expect.any(Function), }); }); describe('AND then calling the returned createSetter', () => { let setResult: any; beforeEach(async () => { const setVariable = queryResult.setVariable; setResult = await setVariable('Modified dynamic value'); }); it('SHOULD return correct value', () => { expect(setResult).toEqual('Modified dynamic value'); }); describe('AND then requesting the updated variable out of the graph', () => { let updatedVariableResult: NodeDefinition; beforeEach(async () => { updatedVariableResult = await thenable(app.resolve(ref('variable'), { raw: true })); }); it('SHOULD return correct updated value', () => { expect(updatedVariableResult).toEqual(value('Modified dynamic value')); }); }); }); describe('AND then calling the returned createCaller', () => { let callResult: any; beforeEach(async () => { const callTestAction = queryResult.callTestAction; callResult = await callTestAction(5, 2); }); it('SHOULD return correct value', () => { expect(callResult).toEqual(7); }); }); }); describe('WHEN requesting a caller that throws an error', () => { let queryResult: any; beforeEach(async () => { queryResult = valueOf( await thenable( app.resolve( query( root(), fields({ callErrorAction: createCaller('errorAction'), }), ), { raw: true }, ), ), ); }); it('SHOULD return correct value', () => { expect(queryResult).toEqual({ callErrorAction: expect.any(Function), }); }); describe('AND the caller is called', () => { let error: any; let result: any; beforeEach(() => { return queryResult .callErrorAction() .then((val: any) => { result = val; }) .catch((e: any) => { error = e; }); }); it('SHOULD throw an error', () => { expect(result).toBe(undefined); expect(error).toEqual( new MusterError(testError, { code: undefined, data: undefined, path: undefined, remotePath: undefined, }), ); expect(error.error).toBe(testError); expect(error.code).toBe(undefined); expect(error.data).toBe(undefined); expect(error.path).toBe(undefined); expect(error.remotePath).toBe(undefined); }); }); }); describe('WHEN requesting a setter that throws an error', () => { let queryResult: any; beforeEach(async () => { queryResult = valueOf( await thenable( app.resolve( query( root(), fields({ setErrorSetter: createSetter('nonexistent'), }), ), { raw: true }, ), ), ); }); it('SHOULD return correct value', () => { expect(queryResult).toEqual({ setErrorSetter: expect.any(Function), }); }); describe('AND the setter is called', () => { let error: any; let result: any; beforeEach(() => { return queryResult .setErrorSetter('value:foo') .then((val: any) => { result = val; }) .catch((e: any) => { error = e; }); }); it('SHOULD throw an error', () => { expect(result).toBe(undefined); expect(error).toEqual( new MusterError(new Error('Invalid child key: "nonexistent"'), { code: 'NOT_FOUND', data: undefined, path: undefined, remotePath: undefined, }), ); expect(error.error).toEqual(new Error('Invalid child key: "nonexistent"')); expect(error.code).toBe('NOT_FOUND'); expect(error.data).toBe(undefined); expect(error.path).toBe(undefined); expect(error.remotePath).toBe(undefined); }); }); }); }); describe('GIVEN a graph with an asynchronous action', () => { let app: Muster; beforeEach(() => { app = muster({ testAction: action(function*() { const result = yield fromPromise(() => Promise.resolve(value('foo'))); return `Result: ${result}`; }), }); }); describe('WHEN requesting the getters, setters and callers from the graph', () => { let queryResult: any; beforeEach(async () => { queryResult = valueOf( await thenable( app.resolve( query( root(), fields({ testAction: createCaller('testAction'), }), ), { raw: true }, ), ), ); }); it('SHOULD return correct value', () => { expect(queryResult).toEqual({ testAction: expect.any(Function), }); }); describe('AND then calling the returned action', () => { it('SHOULD return correct value', async () => { expect(await queryResult.testAction()).toEqual('Result: foo'); }); }); }); }); runScenario({ description: 'GIVEN a graph with a branch containing a nil node', graph: () => muster({ myCollection: nil(), }), operations: [ operation({ description: 'WHEN making a query to the nil collection for items', input: query( root(), fields({ myCollection: key('myCollection', entries()), }), ), expected: value({ myCollection: [], }), }), ], }); runScenario({ description: 'GIVEN a muster graph containing a variable and an action in a branch', graph: () => muster({ nested: { foo: variable('foo'), getFoo: action(() => ref(relative('foo'))), setFoo: action(() => set(relative('foo'), 'bar')), }, }), operations: [ operation({ description: 'WHEN the variable gets set', input: set(['nested', 'foo'], 'bar'), expected: value('bar'), operations: [ operation({ description: 'AND getting a createCaller for `getFoo`', input: query(root(), { nested: key('nested', { getFoo: createCaller('getFoo'), }), }), operations: (subscriber, results) => [ operation({ description: 'AND the returned createCaller gets called', input: fromPromise(() => valueOf(results()[0]).nested.getFoo()), expected: value('bar'), }), ], }), operation({ description: 'AND the `getFoo` gets called', input: call(['nested', 'getFoo']), expected: value('bar'), }), ], }), operation({ description: 'WHEN getting a createCaller for `setFoo`', input: query(root(), { nested: key('nested', { setFoo: createCaller('setFoo'), }), }), operations: (subscriber, results) => [ operation({ description: 'AND the `setFoo` gets called', input: fromPromise(() => valueOf(results()[0]).nested.setFoo()), expected: value('bar'), operations: [ operation({ description: 'WHEN calling `getFoo`', input: ref('nested', 'foo'), expected: value('bar'), }), ], }), ], }), ], }); runScenario(() => { let promisesToResolve: Array<() => void>; function resolvePromises() { promisesToResolve.forEach((resolve) => resolve()); promisesToResolve = []; } return { description: 'GIVEN a muster graph containing async nodes', before() { promisesToResolve = []; }, graph: () => muster({ name: variable('initial'), asyncName: computed([ref('name')], (name) => ref(name)), [match(types.string, 'name')]: fromPromise(({ name }) => new Promise((res) => promisesToResolve.push(res)).then(() => name), ), }), operations: [ operation({ description: 'WHEN making a query to muster for deferred async node', input: query(root(), { asyncName: defer('asyncName'), isLoadingAsyncName: isPending('asyncName'), }), expected: value({ asyncName: undefined, isLoadingAsyncName: true, }), operations: (subscriber) => [ operation({ description: 'AND the initial promise resolves', before() { jest.clearAllMocks(); resolvePromises(); }, assert() { expect(subscriber().next).toHaveBeenCalledTimes(1); expect(subscriber().next).toHaveBeenCalledWith( value({ asyncName: 'initial', isLoadingAsyncName: false, }), ); }, operations: [ operation({ description: 'AND the name changes', before() { jest.clearAllMocks(); }, input: set('name', 'updated'), assert() { expect(subscriber().next).toHaveBeenCalledTimes(1); expect(subscriber().next).toHaveBeenCalledWith( value({ asyncName: 'initial', isLoadingAsyncName: true, }), ); }, operations: [ operation({ description: 'AND the promise resolves', before() { jest.clearAllMocks(); resolvePromises(); }, assert() { expect(subscriber().next).toHaveBeenCalledTimes(1); expect(subscriber().next).toHaveBeenCalledWith( value({ asyncName: 'updated', isLoadingAsyncName: false, }), ); }, }), ], }), ], }), ], }), ], }; }); runScenario({ description: 'GIVEN a muster graph containing a value node and a nil node', graph: () => muster({ name: 'Bob', other: nil(), }), operations: [ operation({ description: 'WHEN making a query for the value node and the nil node', input: query(root(), { name: key('name'), other: key('other'), }), expected: value({ name: 'Bob', other: undefined, }), }), operation({ description: 'WHEN making a query for the value node and deeply nested children of the nil', input: query(root(), { name: key('name'), other: key( 'other', fields({ deeply: key( 'deeply', fields({ nested: key('nested'), }), ), }), ), }), expected: value({ name: 'Bob', other: { deeply: { nested: undefined, }, }, }), }), ], }); runScenario(() => { let promisesToResolve: Array<() => void>; function resolvePromises() { promisesToResolve.forEach((resolve) => resolve()); } return { description: 'GIVEN an asynchronous branch', before() { promisesToResolve = []; }, graph: () => muster({ user: fromPromise(() => new Promise((resolve) => promisesToResolve.push(resolve)).then(() => toNode({ firstName: 'Bob', lastName: 'Smith', }), ), ), }), operations: [ operation({ description: 'WHEN making a deferred query with a fallback', input: query(root(), { user: defer( value('Loading...'), key('user', { firstName: key('firstName'), lastName: key('lastName'), }), ), }), expected: value({ user: 'Loading...', }), operations: (subscriber) => [ operation({ description: 'AND the promise resolves', before() { jest.clearAllMocks(); resolvePromises(); }, assert() { expect(subscriber().next).toHaveBeenCalledTimes(1); expect(subscriber().next).toHaveBeenCalledWith( value({ user: { firstName: 'Bob', lastName: 'Smith', }, }), ); }, }), ], }), ], }; }); runScenario({ description: 'GIVEN a leaf that resolves to an error', graph: () => muster({ name: error('Test error'), }), operations: [ operation({ description: 'WHEN making a query to get the leaf with fallback', input: query(root(), { name: catchError(value('some fallback value'), 'name'), }), expected: value({ name: 'some fallback value', }), }), ], }); runScenario({ description: 'GIVEN a branch that resolves to an error', graph: () => muster({ user: error('test error'), }), operations: [ operation({ description: 'WHEN making a query to get the branch with fallback', input: query(root(), { user: catchError(value('some fallback user'), { firstName: key('firstName'), lastName: key('lastName'), }), }), expected: value({ user: 'some fallback user', }), }), ], }); runScenario({ description: 'GIVEN a graph containing a nested tree', graph: () => muster({ user: { age: 25, firstName: 'Bob', lastName: 'Smith', fullName: computed( [ref(relative('firstName')), ref(relative('lastName'))], (firstName, lastName) => `${firstName} ${lastName}`, ), }, }), operations: [ operation({ description: 'WHEN the query is made using a short-hand syntax', input: query(root(), { user: { age: true, fullName: true, }, }), expected: value({ user: { age: 25, fullName: '<NAME>', }, }), }), ], }); runScenario({ description: 'GIVEN a graph containing a nested tree', graph: () => muster({ user: { name: 'Bob', }, }), operations: [ operation({ description: 'WHEN making a query to get the tree as a leaf', input: query(root(), { user: true, }), expected: withErrorPath(error('Invalid query: missing child fields'), { path: [] }), }), ], }); runScenario({ description: 'GIVEN a graph containing a collection', graph: () => muster({ numbers: [1, 2, 3], }), operations: [ operation({ description: 'WHEN making a short-hand query to get these items', input: query(root(), { numbers: entries(), }), expected: value({ numbers: [1, 2, 3], }), }), ], }); runScenario({ description: 'GIVEN a graph containing a collection of people filtered by first name', graph: () => muster({ filteredPeople: applyTransforms( [ { firstName: 'Bob', lastName: 'Smith' }, { firstName: 'Kate', lastName: 'Doe' }, { firstName: 'Jane', lastName: 'Jonson' }, ], [filter((person: any) => includes('a', get(person, 'firstName')))], ), }), operations: [ operation({ description: 'WHEN a query to get filtered people is made', input: query( ref('filteredPeople'), entries({ firstName: true, }), ), expected: value([{ firstName: 'Kate' }, { firstName: 'Jane' }]), }), ], }); runScenario({ description: 'GIVEN a computed node that depends on a tree() node', graph: () => muster({ user: { firstName: 'Bob', lastName: 'Smith', }, fullName: computed([ref('user')], (user) => `${user.firstName} ${user.lastName}`), }), operations: [ operation({ description: 'WHEN a query is made to get the full name', input: query(root(), { fullName: true, }), expected: withErrorPath( error( getInvalidTypeError('Invalid computed node dependencies', { expected: [ValueNodeType, OkNodeType, DoneNodeType], received: toNode({ firstName: 'Bob', lastName: 'Smith', }), }), ), { path: ['user'] }, ), }), ], }); });
/* * called in either blk_queue_cleanup or elevator_switch, tagset * is required for freeing requests */ void blk_mq_sched_free_requests(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); } }
/** * @return the indication of whether objects will be validated before being returned to the pool * @see ConnectionProperties#setTestOnReturn(boolean) */ public final boolean isTestOnReturn() { return testOnReturn; }
#include <iomanip> #include <sstream> #include <chrono> #include <ctime> #include <Log.h> std::ostream &Log::stream() { return std::cout; } std::string Log::header(const LogLevel &level) { auto now = std::chrono::system_clock::now(); auto time = std::chrono::system_clock::to_time_t(now); std::stringstream ss; ss << "[" << std::put_time(std::localtime(&time), "%Y-%d-%m %X") << "]" << " "; ss << "["; switch (level) { case LogLevel::Error: ss << "ERROR"; break; case LogLevel::Info: ss << "INFO"; break; case LogLevel::Debug: ss << "DEBUG"; break; case LogLevel::Warning: ss << "WARNING"; break; } ss << "] "; return ss.str(); }
/* * Copyright WSO2 Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.carbon.apimgt.impl; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.carbon.apimgt.api.APIManagementException; import org.wso2.carbon.apimgt.api.FaultGatewaysException; import org.wso2.carbon.apimgt.api.dto.CertificateInformationDTO; import org.wso2.carbon.apimgt.api.dto.CertificateMetadataDTO; import org.wso2.carbon.apimgt.api.dto.ClientCertificateDTO; import org.wso2.carbon.apimgt.api.model.API; import org.wso2.carbon.apimgt.api.model.APIIdentifier; import org.wso2.carbon.apimgt.api.model.APIStateChangeResponse; import org.wso2.carbon.apimgt.api.model.APIStore; import org.wso2.carbon.apimgt.api.model.Documentation; import org.wso2.carbon.apimgt.api.model.Identifier; import org.wso2.carbon.apimgt.api.model.LifeCycleEvent; import org.wso2.carbon.apimgt.api.model.Mediation; import org.wso2.carbon.apimgt.api.model.ResourceFile; import org.wso2.carbon.apimgt.api.model.SubscribedAPI; import org.wso2.carbon.apimgt.api.model.Subscriber; import org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder; import org.wso2.carbon.apimgt.impl.utils.APIUtil; import org.wso2.carbon.governance.api.generic.dataobjects.GenericArtifact; import org.wso2.carbon.registry.core.Registry; import org.wso2.carbon.registry.core.Resource; import org.wso2.carbon.utils.multitenancy.MultitenantUtils; import java.io.ByteArrayInputStream; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; /** * User aware APIProvider implementation which ensures that the invoking user has the * necessary privileges to execute the operations. Users can use this class as an * entry point to accessing the core API provider functionality. In order to ensure * proper initialization and cleanup of these objects, the constructors of the class * has been hidden. Users should use the APIManagerFactory class to obtain an instance * of this class. This implementation also allows anonymous access to some of the * available operations. However if the user attempts to execute a privileged operation * when the object had been created in the anonymous mode, an exception will be thrown. */ @MethodStats public class UserAwareAPIProvider extends APIProviderImpl { protected String username; private static final Log log = LogFactory.getLog(UserAwareAPIProvider.class); UserAwareAPIProvider(String username) throws APIManagementException { super(username); this.username = username; this.tenantDomain = MultitenantUtils.getTenantDomain(username); APIManagerConfiguration config = ServiceReferenceHolder.getInstance(). getAPIManagerConfigurationService().getAPIManagerConfiguration(); isAccessControlRestrictionEnabled = Boolean .parseBoolean(config.getFirstProperty(APIConstants.API_PUBLISHER_ENABLE_ACCESS_CONTROL_LEVELS)); } @Override public API addAPI(API api) throws APIManagementException { return super.addAPI(api); } @Override public List<String> getCustomInSequences(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getCustomInSequences(apiIdentifier); } @Override public void updateAPI(API api) throws APIManagementException, FaultGatewaysException { checkAccessControlPermission(api.getId()); super.updateAPI(api); } @Override public ResourceFile getWSDL(APIIdentifier apiId) throws APIManagementException { checkAccessControlPermission(apiId); return super.getWSDL(apiId); } @Override public boolean updateAPIsInExternalAPIStores(API api, Set<APIStore> apiStoreSet, boolean apiOlderVersionExist) throws APIManagementException { checkAccessControlPermission(api.getId()); return super.updateAPIsInExternalAPIStores(api, apiStoreSet, apiOlderVersionExist); } @Override public List<String> getCustomOutSequences(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getCustomOutSequences(apiIdentifier); } @Override public List<String> getCustomApiFaultSequences(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getCustomApiFaultSequences(apiIdentifier); } @Override public List<String> getCustomFaultSequences(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getCustomFaultSequences(apiIdentifier); } @Override public API getLightweightAPIByUUID(String uuid, String organization) throws APIManagementException { API api = super.getLightweightAPIByUUID(uuid, organization); if (api != null) { checkAccessControlPermission(api.getId()); } return api; } @Override public API getLightweightAPI(APIIdentifier identifier) throws APIManagementException { checkAccessControlPermission(identifier); return super.getLightweightAPI(identifier); } @Override public String getOpenAPIDefinition(Identifier apiId, String organization) throws APIManagementException { checkAccessControlPermission(apiId); return super.getOpenAPIDefinition(apiId, organization); } @Override public void removeDocumentation(APIIdentifier apiId, String docName, String docType, String orgId) throws APIManagementException { checkAccessControlPermission(apiId); super.removeDocumentation(apiId, docName, docType, orgId); } @Override public void removeDocumentation(Identifier id, String docId, String orgId) throws APIManagementException { checkAccessControlPermission(id); super.removeDocumentation(id, docId, orgId); } @Override public boolean checkIfAPIExists(APIIdentifier apiId) throws APIManagementException { return super.checkIfAPIExists(apiId); } @Override public Documentation updateDocumentation(String apiId, Documentation documentation, String organization) throws APIManagementException { //checkAccessControlPermission(apiId); return super.updateDocumentation(apiId, documentation, organization); } @Override public void addDocumentationContent(API api, String documentationName, String text) throws APIManagementException { checkAccessControlPermission(api.getId()); super.addDocumentationContent(api, documentationName, text); } @Override public void copyAllDocumentation(APIIdentifier apiId, String toVersion) throws APIManagementException { checkAccessControlPermission(apiId); super.copyAllDocumentation(apiId, toVersion); } @Override public List<LifeCycleEvent> getLifeCycleEvents(APIIdentifier apiId, String organization) throws APIManagementException { checkAccessControlPermission(apiId); return super.getLifeCycleEvents(apiId, organization); } @Override public void updateSubscription(APIIdentifier apiId, String subStatus, int appId, String organization) throws APIManagementException { apiMgtDAO.updateSubscription(apiId, subStatus, appId, organization); } @Override public void updateSubscription(SubscribedAPI subscribedAPI) throws APIManagementException { super.updateSubscription(subscribedAPI); } @Override public SubscribedAPI getSubscriptionByUUID(String uuid) throws APIManagementException { return super.getSubscriptionByUUID(uuid); } public APIStateChangeResponse changeLifeCycleStatus(APIIdentifier apiIdentifier, String targetStatus, String organization) throws APIManagementException, FaultGatewaysException { checkAccessControlPermission(apiIdentifier); return super.changeLifeCycleStatus(apiIdentifier, targetStatus, organization); } @Override public boolean checkAndChangeAPILCCheckListItem(APIIdentifier apiIdentifier, String checkItemName, boolean checkItemValue) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.checkAndChangeAPILCCheckListItem(apiIdentifier, checkItemName, checkItemValue); } public boolean changeAPILCCheckListItems(APIIdentifier apiIdentifier, int checkItem, boolean checkItemValue) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.changeAPILCCheckListItems(apiIdentifier, checkItem, checkItemValue); } @Override public Map<String, Object> getAPILifeCycleData(APIIdentifier apiId) throws APIManagementException { checkAccessControlPermission(apiId); return super.getAPILifeCycleData(apiId); } @Override public int addClientCertificate(String userName, APIIdentifier apiIdentifier, String certificate, String alias, String tierName, String organization) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.addClientCertificate(userName, apiIdentifier, certificate, alias, tierName, organization); } @Override public int deleteClientCertificate(String userName, APIIdentifier apiIdentifier, String alias) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.deleteClientCertificate(userName, apiIdentifier, alias); } @Override public Set<Subscriber> getSubscribersOfAPI(APIIdentifier identifier) throws APIManagementException { checkAccessControlPermission(identifier); return super.getSubscribersOfAPI(identifier); } @Override public String getAPILifeCycleStatus(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getAPILifeCycleStatus(apiIdentifier); } @Override public long getAPISubscriptionCountByAPI(APIIdentifier identifier) throws APIManagementException { checkAccessControlPermission(identifier); return super.getAPISubscriptionCountByAPI(identifier); } @Override public String getDefaultVersion(APIIdentifier apiid) throws APIManagementException { checkAccessControlPermission(apiid); return super.getDefaultVersion(apiid); } @Override public void saveSwagger20Definition(APIIdentifier apiId, String jsonText, String orgId) throws APIManagementException { checkAccessControlPermission(apiId); super.saveSwagger20Definition(apiId, jsonText, orgId); } @Override public List<Documentation> getAllDocumentation(Identifier id) throws APIManagementException { checkAccessControlPermission(id); return super.getAllDocumentation(id); } @Override public String getDocumentationContent(Identifier identifier, String documentationName) throws APIManagementException { checkAccessControlPermission(identifier); return super.getDocumentationContent(identifier, documentationName); } @Override public List<Mediation> getAllApiSpecificMediationPolicies(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getAllApiSpecificMediationPolicies(apiIdentifier); } @Override public boolean isAPIUpdateValid(API api) throws APIManagementException { checkAccessControlPermission(api.getId()); return super.isAPIUpdateValid(api); } @Override public String addResourceFile(Identifier identifier, String resourcePath, ResourceFile resourceFile) throws APIManagementException { checkAccessControlPermission(identifier); return super.addResourceFile(identifier, resourcePath, resourceFile); } @Override protected API getAPI(GenericArtifact apiArtifact) throws APIManagementException { API api = APIUtil.getAPI(apiArtifact, registry); if (api != null) { APIUtil.updateAPIProductDependencies(api, registry); checkAccessControlPermission(api.getId()); } return api; } @Override public ResourceFile getIcon(APIIdentifier identifier) throws APIManagementException { checkAccessControlPermission(identifier); return super.getIcon(identifier); } @Override protected GenericArtifact getAPIArtifact(APIIdentifier apiIdentifier) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getAPIArtifact(apiIdentifier); } @Override public Map<Documentation, API> searchAPIDoc(Registry registry, int tenantID, String username, String searchTerm) throws APIManagementException { Map<Documentation, API> apiByDocumentation = APIUtil .searchAPIsByDoc(registry, tenantId, username, searchTerm, APIConstants.PUBLISHER_CLIENT); Map<Documentation, API> filteredAPIDocumentation = new HashMap<>(); for (Map.Entry<Documentation, API> entry : apiByDocumentation.entrySet()) { API api = entry.getValue(); if (api != null) { checkAccessControlPermission(api.getId()); filteredAPIDocumentation.put(entry.getKey(), api); } } return filteredAPIDocumentation; } @Override public Boolean deleteApiSpecificMediationPolicy(Identifier identifier, String apiResourcePath, String mediationPolicyId) throws APIManagementException { checkAccessControlPermission(identifier); return super.deleteApiSpecificMediationPolicy(identifier, apiResourcePath, mediationPolicyId); } @Override public Mediation getApiSpecificMediationPolicy(Identifier identifier, String apiResourcePath, String mediationPolicyId) throws APIManagementException { checkAccessControlPermission(identifier); return super.getApiSpecificMediationPolicy(identifier, apiResourcePath, mediationPolicyId); } @Override public Resource getApiSpecificMediationResourceFromUuid(Identifier identifier, String uuid, String resourcePath) throws APIManagementException { checkAccessControlPermission(identifier); return super.getApiSpecificMediationResourceFromUuid(identifier, uuid, resourcePath); } @Override public String getSequenceFileContent(APIIdentifier apiIdentifier, String sequenceType, String sequenceName) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.getSequenceFileContent(apiIdentifier, sequenceType, sequenceName); } @Override public int addCertificate(String userName, String certificate, String alias, String endpoint) throws APIManagementException { return super.addCertificate(userName, certificate, alias, endpoint); } @Override public int deleteCertificate(String userName, String alias, String endpoint) throws APIManagementException { return super.deleteCertificate(userName, alias, endpoint); } @Override public List<CertificateMetadataDTO> getCertificates(String userName) throws APIManagementException { return super.getCertificates(userName); } @Override public List<CertificateMetadataDTO> searchCertificates(int tenantId, String alias, String endpoint) throws APIManagementException { return super.searchCertificates(tenantId, alias, endpoint); } @Override public boolean isCertificatePresent(int tenantId, String alias) throws APIManagementException { return super.isCertificatePresent(tenantId, alias); } @Override public ClientCertificateDTO getClientCertificate(int tenantId, String alias, String organization) throws APIManagementException { ClientCertificateDTO clientCertificateDTO = super.getClientCertificate(tenantId, alias, organization); if (clientCertificateDTO != null) { checkAccessControlPermission(clientCertificateDTO.getApiIdentifier()); } return clientCertificateDTO; } @Override public ClientCertificateDTO getClientCertificate(int tenantId, String alias, APIIdentifier apiIdentifier, String organization) throws APIManagementException { ClientCertificateDTO clientCertificateDTO = super.getClientCertificate(tenantId, alias, organization); if (clientCertificateDTO != null) { checkAccessControlPermission(clientCertificateDTO.getApiIdentifier()); } return clientCertificateDTO; } @Override public CertificateInformationDTO getCertificateStatus(String alias) throws APIManagementException { return super.getCertificateStatus(alias); } @Override public int updateCertificate(String certificateString, String alias) throws APIManagementException { return super.updateCertificate(certificateString, alias); } @Override public int updateClientCertificate(String certificate, String alias, APIIdentifier apiIdentifier, String tier, int tenantId, String organization) throws APIManagementException { checkAccessControlPermission(apiIdentifier); return super.updateClientCertificate(certificate, alias, apiIdentifier, tier, tenantId, organization); } @Override public ByteArrayInputStream getCertificateContent(String alias) throws APIManagementException { return super.getCertificateContent(alias); } @Override public void deleteWorkflowTask(String uuid) throws APIManagementException { super.deleteWorkflowTask(uuid); } }
Influence of egg storage time and preincubation warming profile on embryonic development, hatchability, and chick quality. When eggs are stored beyond 7 d, hatchability and chick quality decrease. The cause of the negative effects of prolonged egg storage is not clear. The negative effects may be caused by a decrease in embryo viability due to an increase in cell death. The optimal time and curve of preincubation warming (the preincubation warming profile) may be different for eggs stored over short and long periods of time because embryo viability is dependent on egg storage time. The aim of this study was to investigate whether preincubation warming profiles affect embryonic development, hatchability, and chick quality when eggs are stored for a short or prolonged time. Two experiments were conducted. In both experiments, a 2x2 completely randomized design was used with 2 storage times (4 and 14 d at 17 degrees C in experiment I and 4 and 13 d at 19 degrees C in experiment II) and 2 preincubation warming profiles (within 4 or 24 h from storage temperature to 37.8 degrees C). In experiment I, results suggested that the effect of preincubation warming profile on hatchability was dependent on storage time. However, because a low number of eggs were used in this experiment, these differences were not significant. In experiment II, the interaction between storage time and preincubation warming profile was observed for embryonic mortality during the first 2 d of incubation and hatchability (P=0.006 and P=0.01, respectively). When storage time was 13 d, embryonic mortality during the first 2 d of incubation decreased by 4.4% and hatchability increased by 5.7% when the 24-h preincubation warming profile was used instead of the 4-h preincubation warming profile. However, no effect of preincubation warming profile was observed when storage time was 4 d. In both experiments, chick quality decreased when storage time increased but was not affected by preincubation warming profile. We concluded that a slow preincubation warming profile is beneficial for hatchability when storage time is prolonged but does not affect chick quality.
// ChangePassword changes user's password func (s *Service) ChangePassword(c *gin.Context, oldPass, newPass string, id int) error { if !s.rbac.EnforceUser(c, id) { return apperr.Forbidden } u, err := s.udb.View(c, id) if err != nil { return err } if !auth.HashMatchesPassword(u.Password, oldPass) { return apperr.New(http.StatusBadRequest, "old password is not correct") } u.Password = auth.HashPassword(newPass) return s.adb.ChangePassword(c, u) }
<gh_stars>0 import { AbstractExpressRoutes } from '../expressRoutesManager'; import { ExpressControllerCommunicationDelegate } from '../../controllers/communicationDelegates/communicationDelegates'; import { BuildingsController } from '../../controllers/buildings/buildingsController'; import { DAOBundle } from '../../models/dataAccessObjects/DAOBundle'; const express = require('express'); export class BuildingsExpressRoutes extends AbstractExpressRoutes { private m_buildingsController: BuildingsController; constructor(baseEndpoint: string, daoBundle: DAOBundle) { super(baseEndpoint, express.Router()); this.m_buildingsController = new BuildingsController(daoBundle.cityDAO, daoBundle.buildingDAO, daoBundle.unitDAO, daoBundle.contractDAO); // Obviously we will need to put in the proper routes here. This is just a placeholder this.router.get('/', (req: any, res: any) => { this.m_buildingsController.buildingList(new ExpressControllerCommunicationDelegate(req, res)); }); } }
// checkWorkerMCPStatus is for reconciling update status of all machines in profiling MCP func (r *MachineConfigReconciler) checkWorkerMCPStatus(ctx context.Context) (ctrl.Result, error) { mcp := &mcv1.MachineConfigPool{} if err := r.ClientGet(ctx, types.NamespacedName{Name: WorkerNodeMCPName}, mcp); err != nil { return ctrl.Result{}, err } r.Lock() if mcv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcv1.MachineConfigPoolUpdating) && mcp.Status.DegradedMachineCount == 0 { var msg string if !r.CtrlConfig.Status.IsDebuggingEnabled() { msg = "Machine config update to disable debugging in progress" r.CtrlConfig.Status.SetCondition(v1alpha1.DebugReady, metav1.ConditionFalse, v1alpha1.ReasonInProgress, msg) } if r.CtrlConfig.Status.IsDebuggingFailed() { msg = "Reverting machine config changes due to failure on all machines" r.CtrlConfig.Status.SetCondition(v1alpha1.DebugReady, metav1.ConditionFalse, v1alpha1.ReasonInProgress, msg) } r.Log.V(1).Info(msg) r.Unlock() return ctrl.Result{}, nil } if mcv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcv1.MachineConfigPoolUpdated) { r.Unlock() if err := r.ensureReqMCNotExists(ctx); err != nil { return ctrl.Result{RequeueAfter: defaultRequeueTime}, err } if err := r.ensureReqMCPNotExists(ctx); err != nil { return ctrl.Result{RequeueAfter: defaultRequeueTime}, err } var msg string r.Lock() if !r.CtrlConfig.Status.IsDebuggingEnabled() { r.EventRecorder.Eventf(r.CtrlConfig, corev1.EventTypeNormal, "ConfigUpdate", "debug config disabled on all machines") msg = "Machine config update to disable debugging completed on all machines" r.CtrlConfig.Status.SetCondition(v1alpha1.DebugReady, metav1.ConditionFalse, v1alpha1.ReasonDisabled, msg) } if r.CtrlConfig.Status.IsDebuggingFailed() { r.EventRecorder.Eventf(r.CtrlConfig, corev1.EventTypeNormal, "ConfigUpdate", "debug config reverted on all machines") msg = "Reverted machine config changes due to failure on all machines" r.CtrlConfig.Status.SetCondition(v1alpha1.DebugReady, metav1.ConditionFalse, v1alpha1.ReasonFailed, msg) } r.Unlock() r.Log.V(1).Info(msg) return ctrl.Result{}, nil } if mcp.Status.DegradedMachineCount != 0 { msg := fmt.Sprintf("%s MCP has %d machines in degraded state", mcp.Name, mcp.Status.DegradedMachineCount) r.EventRecorder.Eventf(r.CtrlConfig, corev1.EventTypeWarning, "ConfigUpdate", msg) if !r.CtrlConfig.Status.IsDebuggingEnabled() { msg = fmt.Sprintf("%s, failed to disable debugging, reconcile again", msg) r.CtrlConfig.Status.SetCondition(v1alpha1.DebugReady, metav1.ConditionFalse, v1alpha1.ReasonInProgress, msg) } if r.CtrlConfig.Status.IsDebuggingFailed() { msg = fmt.Sprintf("%s, failed to revert changes, reconcile again", msg) r.CtrlConfig.Status.SetCondition(v1alpha1.DebugReady, metav1.ConditionFalse, v1alpha1.ReasonInProgress, msg) } r.Unlock() return ctrl.Result{RequeueAfter: defaultRequeueTime}, nil } r.Unlock() if !r.CtrlConfig.Status.IsDebuggingEnabled() { r.Log.V(1).Info("Waiting for disabling debugging to complete on all machines", "MCP", mcp.Name) } if r.CtrlConfig.Status.IsDebuggingFailed() { r.Log.V(1).Info("Waiting for reverting to complete on all machines", "MCP", mcp.Name) } return ctrl.Result{}, nil }
<reponame>jbarratt/goadvent2016<filename>day11/main.go<gh_stars>0 package main import ( "bytes" "fmt" ) //go:generate stringer -type=Element // Floors represents the number of floors const Floors = 4 var numComponents int // Element is the element of the generator or chip type Element uint8 // Element types const ( Promethium Element = iota Cobalt Curium Ruthenium Plutonium Hydrogen Lithium ) // Component represents a given component (generator or chip) type Component struct { elem Element generator bool } func (c Component) String() string { if c.generator { return fmt.Sprintf("%v [G]", c.elem) } return fmt.Sprintf("%v [C]", c.elem) } // Safe implements a test for each object, given the other objects on it's floor // Generators are always Safe // Chips must have a matching generator with them func (c Component) Safe(floor []Component) bool { if c.generator { return true } for _, comp := range floor { if comp.generator && comp.elem == c.elem { return true } } return false } // GameState represents a snapshot of game state type GameState struct { elevator int floors [][]Component } func (gs GameState) String() string { var buffer bytes.Buffer for i := Floors - 1; i >= 0; i-- { buffer.WriteString(fmt.Sprintf("%d ", i)) if gs.elevator == i { buffer.WriteString("X | ") } else { buffer.WriteString(" | ") } for _, comp := range gs.floors[i] { buffer.WriteString(comp.String()) buffer.WriteString(" ") } buffer.WriteString("\n") } return buffer.String() } // fingerprint returns a unique int that represents the game state func (gs *GameState) fingerprint() int { return 0 } // NewGameState Builds a new gamestate object func NewGameState() *GameState { gs := &GameState{} gs.elevator = 0 gs.floors = make([][]Component, 4) return gs } // InitialGame creates a Part 1 Game object func InitialGame() *GameState { gs := NewGameState() gs.floors[0] = []Component{Component{Hydrogen, false}, Component{Lithium, false}} gs.floors[1] = []Component{Component{Hydrogen, true}} gs.floors[2] = []Component{Component{Lithium, true}} gs.floors[3] = []Component{} numComponents = 4 return gs } func main() { gameState := InitialGame() fmt.Printf("%v\n", gameState) }
/** * Defines the parameters for the origin group override action. */ public class OriginGroupOverrideActionParameters { /** * The odatatype property. */ @JsonProperty(value = "@odata\\.type", required = true) private String odatatype; /** * defines the OriginGroup that would override the DefaultOriginGroup. */ @JsonProperty(value = "originGroup", required = true) private ResourceReference originGroup; /** * Creates an instance of OriginGroupOverrideActionParameters class. */ public OriginGroupOverrideActionParameters() { odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleOriginGroupOverrideActionParameters"; } /** * Get the odatatype value. * * @return the odatatype value */ public String odatatype() { return this.odatatype; } /** * Set the odatatype value. * * @param odatatype the odatatype value to set * @return the OriginGroupOverrideActionParameters object itself. */ public OriginGroupOverrideActionParameters withOdatatype(String odatatype) { this.odatatype = odatatype; return this; } /** * Get defines the OriginGroup that would override the DefaultOriginGroup. * * @return the originGroup value */ public ResourceReference originGroup() { return this.originGroup; } /** * Set defines the OriginGroup that would override the DefaultOriginGroup. * * @param originGroup the originGroup value to set * @return the OriginGroupOverrideActionParameters object itself. */ public OriginGroupOverrideActionParameters withOriginGroup(ResourceReference originGroup) { this.originGroup = originGroup; return this; } }
Billionaire Michael Dell‘s investment firm MSD Capital, L.P has purchased the entire New York print archive of renowned photo agency Magnum Photos, totaling nearly 200,000 images. The collection includes some of the most iconic images throughout history, including photos of world leaders, celebrities, and major events such as World War II. Though the price was not disclosed, the collection was previously insured for over $100 million. Under the agreement, the prints will be preserved, catalogued, and made accessible by the Harry Ransom Center at The University of Texas at Austin. While MSD Capital purchased all of the physical prints, Magnum’s member photographers will still retain the copyright and licensing rights to all of the photographs. Thomas F. Staley, director of the Ransom Center, states, This is a singularly valuable collection in the history of photography, […] It brings together some of the finest photojournalists of the profession and spans more than a half century of contributions to the medium. The collection was relocated to Texas from New York City in December 2009 on two trailer trucks. (via Bloomberg) Update: Jonathan from Magnum Photos informs us that the acquisition encompasses the entire press print archive, not the entire archive of the agency. We’ve changed “print” to “press” in the title to reflect this. Update: We’ve fixed a couple typos that ajehals pointed out. Thanks!
/** Internal method to add rule. */ @SuppressWarnings({ "unchecked", "rawtypes" }) static void addRule( TypeAnnotationToRule toRule, Annotation a, Class<?> clazz, List<SerializationRule> newRules, List<SerializationRule> rules ) { SerializationRule rule; try { rule = toRule.createRule(a, clazz); } catch (Exception t) { LCCore.getApplication().getDefaultLogger().error( "Error creating rule from annotation " + a.annotationType().getName() + " using " + toRule.getClass().getName(), t); return; } SerializationRule.addRuleIfNoEquivalent(rule, newRules, rules); }
Transfer Therapy Cancer Regression in Patients Receiving Cell Lymphocyte Clonotypes Correlates with Cutting Edge: Persistence of Transferred The lack of persistence of transferred autologous mature lymphocytes in humans has been a major limitation to the application of effective cell transfer therapies. The results of a pilot clinical trial in 13 patients with metastatic melanoma suggested that conditioning with nonmyeloablative chemotherapy before adoptive transfer of activated tumor-reactive T cells enhances tumor regression and in-creases the overall rates of objective clinical responses. The present report examines the relationship between T cell persistence and tumor regression through analysis of the TCR (cid:1) -chain V region gene products expressed in samples obtained from 25 patients treated with this protocol. Sequence analysis demonstrated that there was a significant correlation between tumor regression and the degree of persistence in peripheral blood of adoptively transferred T cell clones, suggesting that inadequate T cell persistence may represent a major factor limiting responses to adoptive immunotherapy. The Journal of Immunology, 2004, 173: 7125–7130. CA) to amplify the TRBV region se-quencesexpressedbypolyclonalTILsamples.ThegermlinegenesthatencodedtheexpressedTRBVproductswereidentifiedbyaligningtheclonedsequenceswiththeknownTRBVgenesequencesusingtheVectorNTIAlignXprotocol(InvitrogenLifeTechnologies,Carlsbad,CA)andthehighlyvariablesequencesthatresultedfromjoiningtheTRBVgenestotheD (cid:1) and J (cid:1) regions were then compared to identify T cell clonotypes.
// openFile opens fn, a file within the testdata dir, and returns an FD and the file's size. func openFile(fn string) (*os.File, int64, error) { f, err := os.Open(filepath.Join("testdata", fn)) if err != nil { return nil, 0, err } s, err := f.Stat() if err != nil { f.Close() return nil, 0, err } return f, s.Size(), nil }
/** * Handles parsing of the user's input when an autocomplete is requested. */ public class PartialInputParser { /** * Searches for autocomplete results based on the user's input and the provided application model. */ public static PartialInput parse(String partialInputString, Model model) throws ParseException { Finder finder = new Finder(model); List<String> completions; if (partialInputString.length() == 0 // suggest every valid command || ArgumentTokenizer.isSingleWordWithoutTrailingWhitespace(partialInputString)) { // try to autocomplete command word return new PartialInput(partialInputString, partialInputString, finder.autocompleteCommandWord(partialInputString)); } ArgumentSingleValue lastPrefixValue = ArgumentTokenizer.tokenizeLastArgument(partialInputString, PREFIX_EMAIL, PREFIX_NAME, PREFIX_NUSID, PREFIX_MATNO, PREFIX_MODULE, PREFIX_TUTORIAL_NAME, PREFIX_TUTORIAL_DAY); Prefix prefix = lastPrefixValue.getPrefix(); String value = lastPrefixValue.getValue(); if (prefix.equals(PREFIX_EMAIL)) { completions = finder.autocompleteEmail(value); } else if (prefix.equals(PREFIX_NAME)) { completions = finder.autocompleteName(value); } else if (prefix.equals(PREFIX_NUSID)) { completions = finder.autocompleteNusId(value); } else if (prefix.equals(PREFIX_MATNO)) { completions = finder.autocompleteMatNo(value); } else if (prefix.equals(PREFIX_MODULE)) { completions = finder.autocompleteModCode(value); } else if (prefix.equals(PREFIX_TUTORIAL_NAME)) { completions = finder.autocompleteTutName(value); } else if (prefix.equals(PREFIX_TUTORIAL_DAY)) { completions = finder.autocompleteDay(value); } else { return null; } return new PartialInput(partialInputString, value, completions); } }
def remove_random_edge(self): u, v, k = self.get_random_edge() logger.log(5, 'removing %s, %s (%s)', u, v, k) self.graph.remove_edge(u, v, k)
/** * Highchart by default puts a credits label in the lower right corner of the * chart. This can be changed using these options. */ @Generated(value = "This class is generated and shouldn't be modified", comments = "Incorrect and missing API should be reported to https://github.com/vaadin/vaadin-charts-flow/issues/new") public class Credits extends AbstractConfigurationObject { private Boolean enabled; private String href; private Position position; private Style style; private String text; public Credits() { } public Credits(Boolean enabled) { this.enabled = enabled; } /** * @see #setEnabled(Boolean) */ public Boolean getEnabled() { return enabled; } /** * Whether to show the credits text. * <p> * Defaults to: true */ public void setEnabled(Boolean enabled) { this.enabled = enabled; } /** * @see #setHref(String) */ public String getHref() { return href; } /** * The URL for the credits label. * <p> * Defaults to: http://www.highcharts.com */ public void setHref(String href) { this.href = href; } /** * @see #setPosition(Position) */ public Position getPosition() { if (position == null) { position = new Position(); } return position; } /** * Position configuration for the credits label. */ public void setPosition(Position position) { this.position = position; } /** * @see #setStyle(Style) */ public Style getStyle() { if (style == null) { style = new Style(); } return style; } /** * CSS styles for the credits label. * <p> * Defaults to: { "cursor": "pointer", "color": "#999999", "fontSize": * "10px" } */ public void setStyle(Style style) { this.style = style; } public Credits(String text) { this.text = text; } /** * @see #setText(String) */ public String getText() { return text; } /** * The text for the credits label. * <p> * Defaults to: Highcharts.com */ public void setText(String text) { this.text = text; } }
We spend too much attention and energy on the Union Budget. It focusses on doing more things. It’s about building more roads, more low income housing, allowing investment, changing tax structures and of course, about telling us how badly the government has been doing this. We pay too little attention to building things better, so that they last, and they cost less to maintain. So that they stay built, and free us up to focus on building other newer things in the future. We often don’t address the “better” piece in a budget and, in order to protect the incumbent, established and inefficient industries, actually disincentivize newer technology. Here are a few things that we can change. Making better roads and bridges with high-density polystyrene The very material that we call “thermocol” is very rigid and tough, at a higher density. It has been used since 1972 in Norway to construct roads, as a filler that can replace soil or gravel, while we widen or build new roads. It resists compression, but the most important part is that it is very light and easily transportable (and can be cut on site). Also called Expanded PolyStyrene (EPS) or Geofoam, it has been used to build fills for bridges, for stadium seating and even for constructing houses. EPS is light and so doesn’t load the underlying soil, and its rigidity allows for roads that don’t sink easy. And because it’s easy to cut and place on site, it reduces construction time and labour cost by not involving earth moving machinery, doing area-constrained expansion of existing roads and also for building in any weather. The cost savings might be substantial in terms of reduced labour requirements (more road per person employed)—a highway in the US saw a cost reduction from $1 million to $160,000. And houses too! EPS can also be used to make houses quickly, efficiently and with far lower ecological damage. Pre-fabricated EPS blocks can be caged in steel, cut into appropriate sizes and used to build the superstructure of a house very fast. Structural stability is added through holes in the slab, into which steel rods are inserted and concrete poured. Watch how the Karnataka State Police Housing Corporation built houses in just 17 days. Apart from the obvious reduction of cost in speed, there’s lesser debris, it’s easier to shape around (cutting conduits for cabling can be done with a pocket knife!) and it has a much smoother finish. Building schools and (since people have woken up to them now), toilets, can be a quick and easy task with EPS. And cheap, considering there’s a lot less cement involved. Additionally, because of the Styrofoam, the houses get two natural advantages. First, they’re more naturally noise insulated since Styrofoam absorbs sound. And secondly, there’s heat and cold insulation for houses as well—which means your air conditioning or heating is that much more efficient (the walls will resist the heat transfer from the outside to the inside). Which brings us to: insulation with multi-layer glass and air circulation When you see large buildings that are entirely glass on the outside, it makes you wonder how much they pay in cooling costs. Glass brings in heat and retains it, making buildings awfully hot. What if we used double layered glass instead? The air between the two layers of glass traps heat, and if that air circulates it’ll ensure that the inner layer never gets as hot as the outer layer. This allows your air cooling to be more efficient—and while the second layer of glass adds to costs, the savings will be in cooling costs over time. Going local with Solar Energy Much of the India that’s south of Mumbai is hot throughout the year. While India wants to use solar farms, those pose a problem—there are tremendous transmission losses while moving the power from the production source (the farm) to the end-use point (the house or office). Instead, if we could use solar techniques to harvest solar energy locally, it might save us those losses. This is already in practice in India. Many street lights and traffic signals use a local solar panel and battery. But we could go beyond; solar panels on roofs of vehicles, buildings and utilities. The Delhi Metro has started to do this at their stations and parking lots. We have large government buildings which can harness solar power and in much of India this should be enough to drive them through the day, reducing the requirement from the regular grid. If you’re using the power generated close enough to the source, transmission losses are reduced so you get more efficiency. India needs to use better battery technology to be able to store and dispense power, for which we need to encourage research. But with dirt-cheap solar panels today and better battery storage availability it would be a shame if we couldn’t use our extreme heat conditions to generate energy without the need to hit the grid. Desalination and nuclear power Our cities have little water, but the oceans have enough (and more). But to desalinate water costs a lot of money. Building desalination plants next to nuclear power plants can reduce costs tremendously—in fact, in India, in the Kalpakkam nuclear power plant, a desalination plant using Reverse Osmosis and Multi-Stage Flash produces enough water for the plant, the worker housing and the entire town, for a cost of 6 to 10 paise per litre. A four member family needs about 350 litres a day, which translates to a cost of Rs28 per day for clean water (no further “water filter” needed). While we may believe that we need to subsidize this, remember that the poor today get no water when water is not charged for; in general they have paid for what is good quality and affordable, especially if it remains available. (Like they adopted mobile phones.) The internet of low-cost things Today it’s easy enough in India to get a SIM card, and use it for data. This negates the need, in many cases, for voice or SMS requirements completely. Data usage by itself can be enough to provide good and low cost solutions. FM Radio is privatized but private operators aren’t allowed to distribute news. This is utter nonsense; news can be obtained by live channel feeds using the internet—all you need now is a car radio (or one built into helmets for two wheelers) which has a SIM card. This technology also enables, for instance, a Manipuri officer in Mumbai to get information about her state, in her language, with little effort. Imagine a security camera with a SIM card and a battery: it can directly upload clips to a private internet, which allows for remote monitoring; even switching off the power will keep the camera on. While it’s considered fashionable that your fridge tells you when you’ve run out of milk, the real use of a connected device can be by our notoriously delayed city-buses or trains, on which a SIM card can relay real time information on the internet about its location; for those who travel by bus or train, it would be enormously helpful to know how much they have to wait (or where that last bus or train of the day is.) These aren’t innovations so much as sensibly using technology that exists and has become dirt cheap. As long as regulators maintain that the telcos cannot charge specifically more for certain services (radio, video or camera data) and “net-neutrality” is our mantra, we can see massive increases in efficiency in our lives. There’s a lot more, of course. But the point is to achieve a broader goal by using technology better—even if it means hurting current players. Sure, the styrofoam-based house will hurt the cement businesses and in the short term will mean decreased earnings for labourers. But should we care about that, or the productivity gains from building a house within a month? Technology has always made someone redundant. One day, we might have enough machines to make journalists redundant; I look forward to that day too. This article is part of Quartz Ideas, our home for bold arguments and big thinkers.
def hash(self, key): value = 0 for i in range(len(key)): value = (value * self.hash_base + ord(key[i])) % self.table_capacity return value
Combustion of olive husks in a small scale bfb facility for heat and steam generation The paper reports a work-in-progress outlook of a R&D project aimed at developing an advanced boiler for the combustion of virgin and exhaust olive husks at small scale. Fluidized bed technology has been preferred because of its well known advantages that include large thermal inertia, high combustion efficiency, low NOx emissions and considerable heat transfer rate. A 100 kW atmospheric bubbling fluidized bed combustor has been designed and built. The facility is equipped with devices for continuous fuel feeding as well as heat extraction from the bed region. Measurements of temperature and gas concentrations are obtained by using thermocouples and gas-analyzer. Steady state tests of combustion were carried out at typical fluidized bed combustion temperatures. Tests proved that not only exhaust olive husks but also the virgin ones - with a water content larger than 50% by mass - can be reliably fed onto the fluidized bed. Further, they have been successful in achieving high combustion efficiency. Current results indicate that reduction of pollutants emissions, namely NOx, is still to be pursued. Further work is required to understand how pollutants emissions are related to the behavior of virgin olive husks and to operating conditions of the combustor.
The Discovery of the Great Wall of Jordan, Southern Levant Great wall of Jordan also known as Khatt Shebib is a unique ancient wall situated in Southern Jordan near Maan City. The remains of the wall which includes towers, barracks, rooms ...etc. are 150 km long from south to north, making it the longest linear archaeological site in southern Levant &in Jordan. The archaeological remains of the wall were first identified by British experts, the discovery was unveiled in 1948, then it was documented by air photographing in 1982, the Department of Antiquities explored it in 1992, with survey, excavations, & documentation continued to the present day. Located in the south of the Kingdom, the wall is the world's second longest after the China Wall, as it spans a distance of approximately 150 kilometers approximately, making it the region’s longest structure. Known locally as Hableh or Khatt Shebib, the wall stretches northwards from Ras Al Naqab in Maan Governorate extending to the Wadi Al Hasa area of Tafileh Governorate, A Jordanian team of archaeologists and experts imitated a field project in 1992-1996, and 2020in order to document the nearby remains of the wall, where comprehensive survey and excavations urgently needed in several significant sites along the wall sides. The field study concluded in revealing significant architectural structures built directly adjacent to the wall, also focused on the importance of the wall to be an attractive point for tourism in South Jordan. The date of the wall's construction clearly refers to Nabataean Period. Introduction Known locally as Khatt Shebib, Located in the south of the Kingdom, representing the region's longest structure not only in Jordan, but in the whole of southern Levant. Jordan's own "great wall" the remains of the wall are ranged approximately between 140-150 km long, making it the longest linear archaeological site in Jordan and the secondlongest structure built by humans in the world next to the Great Wall of China, according to the field studies and investigations results this wall represent a major step toward civilizations development in the ancient world. Located in the south of the Kingdom, making it the region's longest structure not only in Jordan but in the whole of southern Levant. It stretches northwards from Ras Al Naqab in Maan Governorate through Shobak to the Wadi Al Hasa area of Tafileh Governorate. Currently, field studies 1 are being conducted to ascertain 1 Archaeologists, anthropologists and tourism experts still authenticating the area's role in ancient times, especially focusing on the wall's ancient sites. Thewall (Khat Shabeeb) The local community in Ras an-Naqab villages informed the team of the work during their field operation in 1992 that this wall named after Shabeeb, and well known as Khat Shabeeb (khat means in Arabic: line) so the name derived from our Arabic language while the term 'Khatt' or Khatt Shabeeb was directly translates to 'line 'in English language. The resemblance or similarities between the wall and the line pushed the local people or villagers and their ancestors to adopt this name since centuries ago, which is still used up till nowadays continuously. Another name for the wall is Hableh used Near Shobek area, also stems from Arabic, the term Hableh derived from rope (similarity between shape of the rope and shape of line) lastly translated in English as Khat. The question raised here, who is Shabeeb the Owner of the wall? The legend or narrative say that Ameir Shabeeb or Prince conquest the area from south Maan toward north trying to capture the whole area of Jordan to be ruled by his authority as a strong leader in the Medieval Ages. During his struggle to the north, his horse was fell down causing suddenly death to Shabeeb between Maan and Zerqa near Amman . There are three places could be ascribed to this Shabeeb in Jordan those are as follow: First: Qaser Shabeeb (Shabeeb Palace) in Zerqa District north of Amman City Capital of Jordan, the current remains of the palace consist of two stories provided with arrow slits, a courtyard and large water reservoir, this palace ascribed to prince Shabeeb according to several travelers who visited this site through history, among of them, , the date of the place ranged from Roman period -Ottoman. The scholars concluded that the structure could be a fort rather than a palace, while the Ameir Shabeeb either al-Tubaai of Hemyri, Uqeily, Meqdadi, or Mehdawi is still needs more investigations in the history to be settled down. This made the matter of date the palace so confused, and possibly the date ranged broadly between 10 th -18 th centuryAD. Second: Khat Shabeeb near Amman, which is a stretch of land on the western edge of the plain (without a wall) or any architecture visible on the surface of ground, extending from west Amman,(wady sir) and Iraq Al-Ameir village to Naur village and ended in Hesban village, The ancestors of the local inhabitants refers to shabeeb as the owner of these villages, locality, and the whole land and territory east of this unseen line (khat) belong to him, while his base and living place was been in the famous palace called Qaser Shabeeb in Zerqa as mentioned above. Third: Khat Shbeeb, south Jordan, which was identified in 1948 and still subjected to field assessment by the author and his team Figure 1. Geology The wall (Khat Shebib) runs mainly though the southern Jordan Badiyeh rather than desert. The landscape of this expansive desert consists primarily of sand dunes and Rocket Mountain. "Cambrian -Silurian sedimentary succession exposed east central Sinai is described, and subdivided into three formations: Sarabit al-Khadim at base, abu Hamata in the middle, and the adedia on top, the newly described Ras al-Naqab Member yielded Ordovician fossils of the sublittoral khracies. These rock units are identical to their counterparts in west-central Sinai and can be correlated. With those exposed in southern Palestine, but are far much less thick and poorer in fossils tan equivalent strata in southern Jordan, and north western Saudi Arabia". Furthermore, Khatt Shebib reflects the geology of its setting, as the rocks used in its construction vary according to the geological framework of the area. (Kora: 1991) see Figure 2. Field Work Field work in Ras an-Naqab area initiated in 1992-93by Dept. of Antiquities of Jordan and continued during 1994-96 then, 2019-2020included survey, excavations, documentation and assessment for Environmental Impact Assessment reports (EIAR). While documentation of tangible and intangible heritage continued in the area during the past years, the current works resulted in discovering of several major, medium, and minor sites, among of them prehistoric sites, agricultural villages, watchtowers, cairns, water installations, and ancient roads. One of these major discoveries is the longest wall in the middle east, so called by local community as Khatt Shabeeb, the wall was subjected to field assessment and evaluation during seasons of 1992 till 1996, from the starting point in The Wall was placed slightly inRas an-Naqabnorth of the existing lower area of Dabbat Hanut and Qaa Al-Naqab between the old road of Ras an-Naqb (English Road) to the west, and the high mountains to the east. Its line was carefully chosen to make best use of the topography, and extended to north-south direction. Objectives The main objective of this work is to shed light on the importance of the recovered architectural remains of the wall& the surrounding zone in order to link the wall with other nearby newly discovered remains along the route from Maan to Hasa where major sites were found during the process of survey and excavations since 1992 till 1996 and continued now days. Also aims to match the results of previous conducted work of systematic survey& excavations in Ras Naqab area, with the newly gained data for analysis and comparative studies. Field work designed to conclude contribute in revealing several branches connected to the main wall noticed in previous seasons, in addition to that a major structures will be exposed which consists of either isolated or connected rooms and circular structures as well as other associated facilities on both alignment of the route. The discovery of the wall traces, foundations, associated buildings &branches considered as a progress step toward identification of lost parts of this great wall and will introduce new archaeological evidences such as the function and date of these remains. Previous Studies The area subjected to preliminary assessment before initiating our field project during 1992-1996 seasons and 2019-2020 seasons. Due to its magnitude, in 1948 the site garnered the attention of British Alec Kirk bride, who wrote preliminary observations on the wall, &first identified the wall, argue that the Khatt Shebib was used for military and defense purposes . Another organization involved in this process of researching the Khatt Shebib is the 'Aerial Photographic Archive for Archaeology in the Middle East' "these projects photograph and survey the ancient wall from above and on the ground. Using this they study the wall through methods such as comparing the modern remains to historical imagery and maps mainly from 1948 and in 1982. These aerial archaeological projects headed by Kennedy have produced substantial documentation on the Khatt Shebib, contributing to the understanding of the wall's geographical landscape, structure and features" , but despite his achievements Kennedy reduces the positive expectations of the wall as a significant discovery in southern Jordan . The 'Aqaba-Ma'an Archaeological and Epigraphic Survey (AMAES), directed by the late William (Bill) Jobling of the University of Sydney from 1980-1990, "was the first official comprehensive survey of the ancient remains found in the Hisma/WadiRamm desert of southern Wadi Ramm desert of southern Jordan".. While the Hisma's most prominent archaeological and epigraphic sites had been documented by earlier scholars, most notably George Horsfield, R. Savignac, G. Lankester Harding, Diana Kirkbride, . The AMAES aimed to fully explore the entirety of the region's vast network of sweeping valleys, towering rock faces, and sprawling boulder fields. In all, the survey explored an area of more than 2,500 sq. km, extending from the Red Sea port city of 'Aqaba in the west to the desert outpost of Mudawwara in the east, and from the well-watered Ma'an plateau and Ras an-Naqab escarpment in the north to Jordan's desert border with Saudi Arabia in the south.. See also . Several sites were registered during our field survey among of them, Kh Daouk, Umm Quseir, KhShdeid. etc. Those sites situated very close to the wall on both sides. The spring in Ras an-Naqab at 'Ayn Jammam is easily recognizable as one drives north on the highway from 'Aqaba to Ma'an, for the vegetation it stands out in stark contrast to the steeparid hillside around it. It is not surprising, then, that archaeologists have been aware of the site for some time e.g. , but it was not until the mid1980s that H. Gebel during 1993 was able to determine the significance of the settlement. The site consists of two components: one is a large structure with artifacts that indicate a Nabatean -Late Roman-Early Byzantine occupation . The second component is Neolithic, including LPPNB and Pottery Neolithic occupations . Claims of a PPNC occupation . have not been verified by lithics analysis. Two radiocarbon samples yielded dates of 8,520 ± 190 uncalbp (9,551 ± 253 calBP; and 8,030 ± 120 uncalbp (8,899 ± 186 calBP) . Highway construction plans threatened to damage the site severely, so the Cultural Resource Management department of the Department of Antiquities undertook two seasons of rescue excavations in 1995 and 1996, brief reports have appeared on the excavation as well as unpublished MA thesis . Another study entitled (A first radiometric chronology for the Khatt Shebib megalithic structure in Jordan using the luminescence dating of rock surfaces) concluded with a Persian date of the wall . A researcher from King Hussein University in Maan Fawzi Abu Daneh refers to this wall as (site no 62) among other sites found in west Maan -Udhruh . The leading method of studying the wall is aerial archaeology and multiple international archaeological organization have established projects in order to understand and discover more about the wall . Function The Khatt Shebib has gained increasing attention amongst archaeologists. Kirkbride first identified the wall, argue that the wall Khatt Shebib was used for military and defense purposes. However, as more has been discovered about the building and structure of the Khatt Shebib, some contemporary archaeologists have dismissed this theory, suggesting that the wall is somewhat low, 2 meter high, to have been used as a successful defense mechanism. Instead it is believed that the Khatt Shebib served as a border, indicating separate areas and divisions of land. . unlike its east-Asian counterpart, however, it does not seem to have been used for fortification, leaving its purpose an item of speculation for archaeologists and historians "This border acted as a means of restricting the access of nomadic populations to settled and farmed regions" . Some evidence seems to show that the wall was used for agricultural purposes, as it was no more than a 2 meter high, that the inhabitants of the area, especially to the west, were known to be farmers. . Historians believe the original plan was to build a wall of stone as a strong material to defend external threats. The wall would feature a guarded gate for the western areas. The purpose possibly was to control movement across the frontier and to counter low-intensity threats. There was no intention of fighting from the wall top; the units based on the wall were trained and equipped to encounter the enemy in the open. Do the ruins of this wall tell the same interpretation as scholars did? On the other hand, there has been no wavering about the fact that the wall was been used as a protection and somewhat limited defensive purposes. So far no signs of military usage in recent times have been discovered. In spite of it, the conclusions, is permissible that there was an important and distinguished wall in southern Jordan. Whether this wall/ Khatt Shebib was used for military purposes or rather the ancient wall served as a border, is a matter of debate among the scholars, the size of damage affected the upper courses of the wall and shortage of field excavations of the wall itself or in the buildings found on both sides, forced us to limit our expectations, since lack of any support either from inscriptions or architectural analysis and comparative studies see . Most of the found inscriptions found during field operations dated to Safaitic or Thamudic settlements in the area (Figure 4). Either we must adopt special attitude or follow the previous assumptions which lead us to uncertainty of the function of the wall. During the Nabataean period, however, there were many kingdoms in the region, so the wall/ Khatt Shebib may have marked a boundary between different kingdoms. It is also one of the earliest records of the building of a border wall, and has may influenced modern border structures today. The wall began as a series of independently constructed walls as long ago as the EBA3000 century BC, Fuji directed excavations by Japanese mission in Jafer, Burma, and surrounding area, he is the second after our operations who initiated field work in the wall and nearby structures, lastly the excavated area of the wall dated to the first half of third millennium EB1A, named as Klines, HBKL . This term (K, line) created by Fuji derived from Aharonis designation for a unique structure that was confirmed for the first time in the Negev High lands in the latter half at 1950 . Over time, it's logical to assume that these separated parts became connected into a single vast wall during later periods, such as the Islamic Period. The wall's major function was not only militaristic purpose to defend Nabatean against the multitude of invaders that plagued the borderlandsprimarily the nomadic people . But the walls provided other economic and social benefits for Nabatean, allowing the Nabateans to enforce economic duties along the Incense Route as well as decrease the number of immigrants from Arabian Desert. As a means of maintaining control over citizens of Nabatean who settled in (Ras Naqab, on the western side of the wall) and their trade, the Great Wall was quite successful. However, the wall did not contribute effectively to keep enemies &invaders out entirely. The Roman army under direct control of Emperor Trajan invades and takes territory across the north near wadi Hasa where the wall ended. Though this wall has never been altogether destroyed, still standing since the Bronze Age through Iron, Hellenistic, Roman, Byzantine, and Islamic Age, its maintenance was, and is, such a colossal undertaking that large sections of it have fallen into disrepair over time due to natural and human factors. Methods of Construction "As local archaeologists, we began to turn our focus to full implementation in 1992 by bringing together artifacts and features that we collected during excavation work," The wall is predominately constructed from available rocks scattered on the surface of the ground and used different kinds of rocks such as limestone, flint, sedimentary of two rows and filled the space between them with smaller stones& chunks, while it is not dressed chunks were used on the external both surfaces to fix the stones regularly, large stones or boulders were used as foundations for the wall, while other medium stones used in the upper courses. Other organic or non-organic materials not ruled out during filed investigations, test soundings now designed to get systematic sand samples to be analyzed as soon as possible. However, due to the significant length of the wall through mountains, valleys, slopes and plain areas, these materials mainly rocks differ along the span of the wall as the availability of resources varied in each location. See ( Figure 5). The original plan was to construct a stone wall 1-1,50M wide or more in some cases, and at least 1,50-2M high and may more than of 2 meters due to the level of ground for the sector of Ras Naqab highland area where several structures were found attached to the wall. At the time of its construction during the Nabatean period, the wall was approximately taller than a 2 meter with associated structures, and later after continuous fell down possibly becomes one and half high. Due to different types 0f direct and indirect threats to the integrity of the wall, including climatic change issue and growing population pressures, the wall is significantly now smaller, the existed height is less than one meter, these measurements restricted only to Ras an-Naqab area. Given the wall's length, it seems that the construction period had phases, which implies that various cultures left behind proof of their existence through its architecture,", noting that archaeological evidence from the wall suggests that it was mainly built during the Nabataean period (312 BC-106AD). At somewhat regular distance there was a structure (towers, barrack-blocks or buttresses) either square or circular shape, and at every mile but not regular a village site containing a gate through the wall, presumably supported by a tower (Figure 6). Most of these towers are affected by human and natural factors and now in ruins. Archaeologists and historians have only been able to hypothesize the ways or the methods in which they were used in previous times. It has been suggested or concluded that these structures (the wall and surrounding buildings) could have been designed as watchtowers, or shelters used for protection from deserts and storms or storage means for food. Many archaeologists and experts agree that the towers would never have been used for limited function such as military purposes since they are relatively small, measuring about two to four meters in diameter. Through examining these towers, archaeologists, historians, early architecture experts, and anthropologists have been able to document, and discover more about the construction methods, the lively hood sand lifestyle of the Ras an Naqab builders or people of the wall. (Figure 7). Extensive archaeological research and discoveries conducted in Ras an-Naqab close to the starting point of the wall revealed strong evidences to date the construction of the wall. Earlier attempts by surface investigations including systematic survey, test trenches, limited excavations or even comprehensive excavations, concluded that the wall was built during Bronze Age3200-1200BC, IronAge1200-850BC, and Persian539BC to 332BC, Nabatean Period 332 BC-106AD and medieval period 10 th -Ottoman Period. The major scientific archaeological project In Ras an-Naqab area included field survey and comprehensive excavations along the western areas of the wall shed more light on the process of chronology of the wall's construction. Through field investigations the stratified layers in sites of Al-Hiyyed, (Shami, 1997) AinJamma2, Abu Nusur, Dabet Hanut 1/2/3 and test trenches here and there in Tasan, Kasmiyeh, determined that the excavated and explored sites had been rooted from Paleolithic to Late Ottoman approximately500,000BC -1918AD. Most of the buildings and recovered datable materials dated to the Nabatean -Roman periods (Classical) also referred to as the Byzantine. se Table 1. This scientific survey and excavations has provided and supported the first dependable or reliable dates of Ras an-Naqab buildings, and revealed the positives of using result s of systematic survey and excavations in the interpretation process of several sites found very close to the wall. Along side this scientific field research and documentation the team of the archaeologists have discovered significant Nabatean sites such as abu Nsur village & Dabbat Hanut structures, Nabatean water systems, watch towers of different types and shapes, ceramic artifacts, such as pottery sherds, at sites along and very close by the wall. The number of these findings is large enough to rely onto indicate a period of construction or use of the wall to be contemporary by the establishment of these village sands its mainly from the 3edBC-7 th AD; however, this clear and significant archaeological evidence is believed by archaeologists and experts to be dated since the Early to Late Nabatean, period, and Early to Late Roman &Early to Late Byzantine Period Figure 8. Although we have now after a systematic excavations scientific, and archaeological evidence, that dates the building of Abu Nsur, Dabat Hanut in the Early Nabatean Ages. Preliminary assessment of the timeframe of wall's construction is ranged from the third BC to the seventh AD. Most of the wall experts, and archaeologists who worked on this wall agreed on that the wall (Great wall-Khatt Shebib) was not built in a short or limited period of time, rather, construction took place over prolonged period of time as sections were added (or phasea) and joined together, for example the results of Japanese mission assured this theory which dated the wall back to EBA3000 century BC . This may presents a challenge to experts and scholars at the beginning of the assessment and evaluation work who are aiming to determine the exact date of the wall construction. The date of the wall, Khatt Shebib's constructionis around to settle down in the period of Nabatean Kingdom, however, it has been widely debated by archaeologists and nonarchaeologists. This is evident as some experts or archaeologists argue or insist on that the wall and other associated structures were built during the Nabatean Period, whilst others argue it was possibly constructed during the Hellenistic or Persian era. Discussion Jordan's Wall appears to have continued in this form into the late 2nd century. A major war took place shortly after AD 106, when 'the Romans crossed the Wall which ended in destroying the whole defensive system of the Nabataea's. We know no details of the subsequent fighting in the area of the wall, but it probably led to changes to the wall, including the abandonment of many parts and the village sites and possibly redeployment of the army and the villagers as well as building the major Roman Road through Ras an -Naqab area. In the late 2nd or early 3rd century, new road constructed to the west of the wall which is called Via Nova Triana, so that the wall could continue to be used by local farmers as borders or protection wall against savage animals attacks their farms and houses, while a minor rather major repair to the Wall itself took place. Similar to other walls and several archaeological structures in the Middle East, the great wall of Jordan -Khatt Shebibfaces different types of threats to the structures 'integrity and sustainability. These types of threats include re-purposing the site and nearby structures for agricultural means and the destruction of the ancient ruins during conflict, mining activities or construction as populations grow and villages or cities expand. Natural factors like erosion, and human role represented in robbery activities, looting are also major direct threats to the wall, and associated structures integrity, which is already evident and clearly visible along various parts of the Great Wall where only a few original rocks remain and still standing. In the years that followed, Nabateans, the wall became a quarry for the stone to build villages, farms and houses along its line, until the conservation movement in the 18th and 19th centuries after establishing the Dept. of Antiquities 1921 -23 put a stop to that. It was only from the mid-19th century onwards that early archaeologists and historians such as Kirkbride, Lankester Harding, David Kennedy, Fuji, and other Jordanian scholars began to study Jordan's great wall and sought to protect its still magnificent remains. Over the centuries many sections of the wall have suffered damage caused by roads traversing it, and by the plunder of its stones to build nearby either Roman-Byzantine Villages or the new Roman Road and other structures. However, the remaining foundations and forts could attract tourists from throughout the world. It is urgent to prevent area farmers and project owners from removing the stones of the original wall to build private projects and/or construct dirt or paved roads, buying up the surrounding land and establishing a corridor of at least30meters is a must in this phase as well as to fund restoration work on this wall. Jordan's wall should be listed on tentative or nomination UNESCO World Heritage site. It remains unguarded, meaning tourists visiting the site have unfettered access, despite concerns over damage. More recently, when two workshops held in Amman and Maan in winter of2020, Jordan's Wall were adopted by NGOS as a touristic route and considered as an excited structure not only in Jordan, but in Southern Levant. Figure 9. Jordan's Wall walk remains will become a popular tourist activity, and the wall was included in The Guardian's "Where to Go in 2017" list. A visitor's center explaining the historic significance of the site is reportedly in the works. Tourism Development An expedition to access the wall's most significant features was organized during the past 6 months to gain more information about the current situation of the wall and to evaluate future needs for preservation and consolidation. In 1996, the team of the archaeologists formed a partnership with the Military Culture Department, which resulted in the creation of a museum at Prince Hashem Bin Al Hussein School in Ras Al Naqab that displays a number of artifacts and items discovered in the area of the wall over the years. Really what attract tourism to the area of the wall are the presence of the museum, the remains of 'great wall of Jordan', the discovered antiquities and sites, and Al Sharat Mountain range as well as the remains of the trade routes in the area. The NGO, s in Ras an-Naqab area and the tourism sector in Jordan held together several workshops during 2015-2020 discuss the significance of the wall and the surrounding major sites, aiming to define its role in improving tourism. Endeavors are being made to study the area with the aim of transforming it into a tourist destination after carrying out the proper maintenance. Conclusions The results of field studies described above may help explain how the Nabataeans civilization flourished in Southern Jordan and how the Nabataeans were able to move from the Jordanian highlands into the semi desert areas and practice agriculture based on water harvesting and how they protect and defend their territory. They may help explain, at least in part, the proliferation of Nabataen, Roman and Byzantine settlements in areas that are today virtually devoid of human habitation. It must also be pointed out that influences other than Nabateans may have been responsible for the establishment of settlements during the Pre-Nabataean, Roman and Byzantine periods. Nevertheless, the major discovered structures and artifacts as noted above for the periods under discussion must be seriously considered in an attempt to understand settlement patterns in the Ras an-Naqab area in southern part of the country for the periods of interest here . Finally, Jordan Great Wall is longer than Hadrian's Wall which continuous Roman defensive long wall that guarded and protected the north western frontier of the province of England from external threats and barbarian invaders. The wall started and extended from the place called coast to coast across the width of northern Britain; it ran around 73miles which equal to (118km) from Wall send (Segedunum)on the River Tyne in the east to Bowness on the Sol way Firth in the west. Except Chinas wall Jordan's wall is longer than other walls in the world among of them the wall of India. After Europe India comes next in line for building great walls. Surprisingly unpopular, Kumbalgarh Fort has a 36 km wall surrounding it. A massive, continuous wall, built around the 15th century is out there in open sight and we still don't talk about it. The unique remains of Jordan's wall deserves not only preservation and conservation but some degree of aesthetically and archaeologically appropriate development so that it can become again, like the Chinese and Europe walls, a focal point in the southern Levant. The proposed work needs not only a comprehensive conservation and management plans for the discovered parts, but also a detailed interpretation schemes that describe the specialty of the wall and its features within its current landscape.
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ import { i18n } from '@kbn/i18n'; import { lt } from '../../functions/common/lt'; import { FunctionHelp } from '.'; import { FunctionFactory } from '../../functions/types'; export const help: FunctionHelp<FunctionFactory<typeof lt>> = { help: i18n.translate('xpack.canvas.functions.ltHelpText', { defaultMessage: 'Return if the context is less than the argument', }), args: { value: i18n.translate('xpack.canvas.functions.lt.args.valueHelpText', { defaultMessage: 'The value to compare the context to', }), }, };
<reponame>747500/html-to-docx<gh_stars>0 declare module 'html-to-docx';