content
stringlengths 10
4.9M
|
---|
<gh_stars>0
package com.nacrt.demo.xml;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import java.util.Map;
import java.util.Properties;
public class XmlMainTest {
public static void main(String[] args) throws Exception {
ApplicationContext ac = new ClassPathXmlApplicationContext("applicationContext.xml");
UserService userService = ac.getBean(UserService.class);
userService.say();
System.out.println(userService);
System.out.println("==================");
Map<String, String> getenv = System.getenv();
System.out.println(getenv);
Properties properties = System.getProperties();
System.out.println(properties);
System.out.println("==================");
MyFactoryBean myFactoryBean = ac.getBean(MyFactoryBean.class);
System.out.println(myFactoryBean);
System.out.println(myFactoryBean.getObject());
System.out.println(myFactoryBean.getObject());
System.out.println("-----");
System.out.println(ac.getBean(A.class));
System.out.println(ac.getBean("myFactoryBean"));
System.out.println(ac.getBean("myFactoryBean"));
}
}
|
President of Uganda Yoweri Museveni (Carl Court/WPA Pool/Getty Images)
Ugandan President Yoweri Museveni is refusing to approve a controversial bill that would sharpen his country's already severe laws against same-sex relationships, imposing prison time for even discussing homosexuality in the abstract. But don't confuse this with protecting gay rights. Museveni, in power since 1986, has been a fierce opponent of gay rights, and appears to be making a political decision here to balance between anti-gay political forces at home and the risk of losing foreign aid.
Part of Museveni's balancing act reportedly included sending his parliament a letter explaining his decision. A Ugandan newspaper called The Daily Monitor reported what the text of the letter says (both the BBC and Voice of America, who have reporters there, considered it credible enough to repeat). Museveni reportedly argued that the government should focus on helping gay men and women be "rescued" from homosexuality by improving the economy (the link is not clear), further explaining, "Even with legislation, they will simply go underground and continue practicing homosexuality or lesbianism for mercenary reasons."
And then the president of Uganda went on to offer his theory for what causes homosexuality.
"You cannot call an abnormality an alternative orientation," Museveni wrote of homosexuality, according to the reports. "It could be that the Western societies, on account of random breeding, have generated many abnormal people."
Museveni's letter also reportedly claimed that some people have adopted homosexuality due to "financial inducements," or in the case of some women out of "sexual starvation."
These views would certainly be kooky, but they would sadly not be unique. The idea that homosexuality is a "Western perversion" imposed from outside can be unfortunately common in some African countries, in large part a product of leaders stirring up homophobia and anti-Western sentiment to generate support or distract from other issues.
Gay rights have been rapidly worsening in sub-Saharan Africa over recent years. Since a new anti-gay law was passed in Nigeria, Africa's most populous country, dozens of men and women accused of homosexuality have been arrested. |
import { Button, Col, List, message, Row } from 'antd';
import React from 'react';
import BookPreviewView from '../../../components/book-preview-view';
import ContentSubmitter from '../../../components/content-submitter';
import ContentWriter, { ArticleProps } from '../../../components/content-writer';
import { ReferenceDialog } from '../../../components/dialogs/reference-dialog';
import InitializerView from '../../../components/ui/initializer-view';
import { API } from '../../../configs/api-config';
import { EntityJSON } from '../../../types/api';
import { Book } from '../../../types/book';
import { Topic } from '../../../types/topic';
import { fetchDataByGet, fetchDataByPost, fetchDataByPut } from '../../../util/network-util';
export interface TopicWriterProps {
topicId: string;
};
export interface TopicWriterState {
saved: boolean;
saving: boolean;
topic: Topic;
originalTopic: Topic;
referenceDialogVisible: boolean;
references: Array<Book>;
};
export default class TopicWriter extends React.Component<TopicWriterProps, TopicWriterState> {
constructor(props: TopicWriterProps) {
super(props);
this.state = {
originalTopic: null,
topic: null,
saving: false,
saved: true,
references: [],
referenceDialogVisible: false,
}
this.onContentChange = this.onContentChange.bind(this);
this.onContentSave = this.onContentSave.bind(this);
}
onReference(books: Array<Book>) {
this.setState({ references: books, saved: false });
}
onContentChange(article: ArticleProps) {
this.setState((state) => {
let topic: any = state.topic || {};
topic.title = article.title;
topic.words = article.words;
topic.content = article.content;
return {
topic: topic,
saved: false,
}
})
}
onContentSave() {
const { topic } = this.state;
if (!(topic && topic.title && topic.content && topic.content.source)) {
message.error('不好意思,标题和内容是必须的。');
return;
}
let api = API.UserCommunityTopicCreate;
let requester = fetchDataByPost;
if (topic.contentId) {
api = API.UserCommunityTopicUpdate;
requester = fetchDataByPut;
}
this.setState({ saving: true });
requester<EntityJSON<Topic>>(api, {
topic_id: topic.contentId,
title: this.state.topic.title,
content: this.state.topic.content,
references: this.state.references.map((b) => b.id),
status: this.state.topic.status,
words: this.state.topic.words,
}).then((data) => {
message.success('保存成功!');
this.setState({ topic: data.entity, originalTopic: { ...data.entity }, saved: true });
}).catch((err) => {
message.error(`保存失败:${err}`);
}).finally(() => {
this.setState({ saving: false });
});
}
async getClientSideState() {
if (!this.props.topicId) {
return {};
}
let topicData = await fetchDataByGet<EntityJSON<Topic>>(API.UserCommunityTopicEntity, {
topic_id: this.props.topicId
})
return {
topic: topicData.entity,
originalTopic: { ...topicData.entity }
};
}
render() {
let topic = this.state.topic;
return (
<InitializerView
initializer={() => this.getClientSideState()}
onInitialized={(data) => this.setState(data, () => {
this.setState((state) => {
return {references: state.topic && state.topic.references.map((reference) => reference.ref) || []}
})
})}
>
<Row>
<Col span={16}>
<ContentWriter
value={topic as any}
onChange={this.onContentChange}
/>
</Col>
<Col span={8}>
<ContentSubmitter
originalStatus={this.state.originalTopic && this.state.originalTopic.status}
onStatusChange={(status) => {
let topic: any = this.state.topic || {};
topic.status = status;
this.setState({ topic: topic as Topic, saved: false });
}}
extra={
<div>
{
topic && topic.examination &&
<div>
<h3>评审信息</h3>
<p className="huidu-large-description">{topic.examination.reason}</p>
</div>
}
<h3>话题选项</h3>
<Button onClick={() => this.setState({ referenceDialogVisible: true })}>引用书籍</Button>
<List
renderItem={(item) => (
<List.Item>
<BookPreviewView book={item} />
</List.Item>
)}
dataSource={this.state.references}
/>
</div>
}
loading={this.state.saving}
content={this.state.topic}
saved={this.state.saved}
onSubmit={this.onContentSave}
/>
</Col>
<ReferenceDialog
visible={this.state.referenceDialogVisible}
onSelected={(books) => this.onReference(books)}
onCancel={() => this.setState({ referenceDialogVisible: false })}
/>
</Row>
</InitializerView>
)
}
} |
<gh_stars>0
# -*- coding:utf-8 -*-
"""
Wait
^^^^
.. moduleauthor:: <NAME> <<EMAIL>>
Just waits for something (i.e. that preconditions will be fullfilled)
"""
import rospy
from bitbots_body_behaviour.actions.go_to import Stand
from bitbots_stackmachine.abstract_action_element import AbstractActionElement
from humanoid_league_msgs.msg import HeadMode
class Wait(AbstractActionElement):
def __init__(self, connector, args=10):
super(Wait, self).__init__(connector)
self.time = rospy.get_time() + args
def perform(self, connector, reevaluate=False):
if connector.world_model.ball_seen():
connector.blackboard.set_head_duty(HeadMode.BALL_MODE)
#todo this was an old push which is not correct, since it is an action
#self.push(Stand)
if self.time < rospy.get_time():
self.pop()
|
The runup in gold prices in recent years – from $800 per ounce in early 2009 to above $1,900 in the autumn of 2011 – had all the features of a bubble. Now, like all asset-price surges that are divorced from the fundamentals of supply and demand, the gold bubble is deflating.
At the peak, gold bugs – a combination of paranoid investors and others with a fear-based political agenda – were happily predicting gold prices going to $2,000, $3,000 and even to $5,000 in a matter of years. But prices have moved mostly downward since then. In April, gold was selling for close to $1,300 per ounce and the price is still hovering below $1,400, an almost 30% drop from the 2011 high.
There are many reasons why the bubble has burst, and why gold prices are likely to move much lower, toward $1,000 by 2015.
First, gold prices tend to spike when there are serious economic, financial and geopolitical risks in the global economy. During the global financial crisis, even the safety of bank deposits and government bonds was in doubt for some investors. If you worry about financial Armageddon, it is indeed metaphorically the time to stock your bunker with guns, ammunition, canned food and gold bars.
But even in that dire scenario, gold might be a poor investment. Indeed, at the peak of the global financial crisis in 2008 and 2009, gold prices fell sharply a few times. In an extreme credit crunch, leveraged purchases of gold cause forced sales, because any price correction triggers margin calls. As a result, gold can be very volatile – upward and downward – at the peak of a crisis.
Second, gold performs best when there is a risk of high inflation, as its popularity as a store of value increases. But despite very aggressive monetary policy by many central banks – successive rounds of "quantitative easing" have doubled or even tripled the money supply in most advanced economies – global inflation is actually low and falling further.
The reason is simple: while base money is soaring, the velocity of money has collapsed, with banks hoarding the liquidity in the form of excess reserves. Ongoing private and public debt deleveraging has kept global demand growth below that of supply.
Thus, firms have little pricing power owing to excess capacity, while workers' bargaining power is low owing to high unemployment. Moreover, trade unions continue to weaken, while globalisation has led to cheap production of labor-intensive goods in China and other emerging markets, depressing the wages and job prospects of unskilled workers in advanced economies.
With little wage inflation, high goods inflation is unlikely. If anything, inflation is now falling further globally as commodity prices adjust downward in response to weak global growth. And gold is following the fall in actual and expected inflation.
Third, unlike other assets, gold does not provide any income. Whereas equities have dividends, bonds have coupons, and homes provide rents, gold is solely a play on capital appreciation. Now that the global economy is recovering, other assets – equities or even revived real estate – provide higher returns. Indeed, US and global equities have vastly outperformed gold since the sharp rise in gold prices in early 2009.
Fourth, gold prices rose sharply when real (inflation-adjusted) interest rates became increasingly negative after successive rounds of quantitative easing. The time to buy gold is when the real returns on cash and bonds are negative and falling. But the more positive outlook about the US and the global economy implies that over time the Federal Reserve and other central banks will exit from quantitative easing and zero policy rates, which means that real rates will rise, rather than fall.
Fifth, some argued that highly indebted sovereigns would push investors into gold as government bonds became more risky. But the opposite is happening now. Many of these highly indebted governments have large stocks of gold, which they may decide to dump to reduce their debts. Indeed, a report that Cyprus might sell a small fraction – some €400m – of its gold reserves triggered a 13% fall in gold prices in April. Countries like Italy, which has massive gold reserves (above $130bn), could be similarly tempted, driving down prices further.
Sixth, some extreme political conservatives, especially in the United States, hyped gold in ways that ended up being counterproductive. For this far-right fringe, gold is the only hedge against the risk posed by the government's conspiracy to expropriate private wealth. These fanatics also believe that a return to the gold standard is inevitable as hyperinflation ensues from central banks' "debasement" of paper money. But, given the absence of any conspiracy, falling inflation and the inability to use gold as a currency, such arguments cannot be sustained.
A currency serves three functions, providing a means of payment, a unit of account and a store of value. Gold may be a store of value for wealth, but it is not a means of payment. You cannot pay for your groceries with it. Nor is it a unit of account. Prices of goods and services, and of financial assets, are not denominated in gold terms.
So gold remains John Maynard Keynes's "barbarous relic," with no intrinsic value and used mainly as a hedge against irrational fear and panic. Yes, all investors should have a very modest share of gold in their portfolios as a hedge against extreme tail risks. But other real assets can provide a similar hedge, and those tail risks – while not eliminated – are certainly lower today than at the peak of the global financial crisis.
While gold prices may temporarily move higher in the next few years, they will be very volatile and will trend lower over time as the global economy mends itself. The gold rush is over.
Copyright: Project Syndicate, 2013. |
<gh_stars>100-1000
// Copyright 2019 Samaritan Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
// FIXME: mockgen can't handle cycle imports in reflect mode when outside of GOPATH currently,
// so add self_package parameter temporarily. Refer to: https://github.com/golang/mock/issues/310
//go:generate mockgen -package $GOPACKAGE -self_package $REPO_URI/$GOPACKAGE --destination ./mock_dynamic_test.go $REPO_URI/$GOPACKAGE DynamicSource
import (
"context"
"sync"
"time"
"github.com/samaritan-proxy/samaritan/pb/api"
"github.com/samaritan-proxy/samaritan/pb/config/bootstrap"
"github.com/samaritan-proxy/samaritan/pb/config/service"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
var _ DynamicSource = new(dynamicSource)
type dependencyHook func(added, removed []*service.Service)
type svcConfigHook func(svcName string, newCfg *service.Config)
type svcEndpointHook func(svcName string, added, removed []*service.Endpoint)
// DynamicSource represents the dynamic config source.
type DynamicSource interface {
// SetDependencyHook sets a hook which will be called
// when a dependency is added or removed. It must be
// called before Serve.
SetDependencyHook(hook dependencyHook)
// SetSvcConfigHook sets a hook which wil be called
// when the proxy config of subscribed service update.
// It must be called before Serve.
SetSvcConfigHook(hook svcConfigHook)
// SetSvcEndpointHook sets a hook which will be called
// when the endpoints of subscribed service updated.
// It must be called before Serve.
SetSvcEndpointHook(hook svcEndpointHook)
// Serve starts observing the config update from remote.
// It will return until Stop is called.
Serve()
// Stop stops observing the config update from remote.
Stop()
}
type dynamicSource struct {
b *bootstrap.Bootstrap
conn *grpc.ClientConn
c *discoveryClient
dependHook dependencyHook
svcCfgHook svcConfigHook
svcEtHook svcEndpointHook
quit chan struct{}
done chan struct{}
}
func newDynamicSource(b *bootstrap.Bootstrap) (*dynamicSource, error) {
d := &dynamicSource{
b: b,
quit: make(chan struct{}),
done: make(chan struct{}),
}
err := d.initDiscoveryClient()
return d, err
}
func (d *dynamicSource) initDiscoveryClient() error {
target := d.b.DynamicSourceConfig.Endpoint
// TODO: support Authentication
options := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: time.Second * 30,
Timeout: time.Second * 10,
}),
}
conn, err := grpc.Dial(target, options...)
if err != nil {
return err
}
d.conn = conn
stub := api.NewDiscoveryServiceClient(conn)
d.c = newDiscoveryClient(stub)
return nil
}
func (d *dynamicSource) SetDependencyHook(hook dependencyHook) {
d.dependHook = hook
}
func (d *dynamicSource) SetSvcConfigHook(hook svcConfigHook) {
d.svcCfgHook = hook
}
func (d *dynamicSource) SetSvcEndpointHook(hook svcEndpointHook) {
d.svcEtHook = hook
}
func (d *dynamicSource) Serve() {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
wg.Add(1)
go func() {
defer wg.Done()
d.c.StreamDependencies(ctx, d.b.Instance, d.dependHook)
}()
wg.Add(1)
go func() {
defer wg.Done()
d.c.StreamSvcConfigs(ctx, d.svcCfgHook)
}()
wg.Add(1)
go func() {
defer wg.Done()
d.c.StreamSvcEndpoints(ctx, d.svcEtHook)
}()
<-d.quit
cancel()
// close grpc client conn
d.conn.Close()
wg.Wait()
close(d.done)
}
func (d *dynamicSource) Stop() {
close(d.quit)
<-d.done
}
|
import React from 'react'
import { Svg, Path, SvgProps } from '../wrappers'
const Delete: React.FC<SvgProps> = ({ fill, ...props }) => (
<Svg {...props}>
<Path
fill={fill}
d="M6 19c0 1.1.9 2 2 2h8c1.1 0 2-.9 2-2V7H6v12zM19 4h-3.5l-1-1h-5l-1 1H5v2h14V4z"
/>
</Svg>
)
export default React.memo(Delete)
|
package main
import (
"fmt"
"github.com/Soreil/eulersolutions/utils"
)
func main() {
var mills int
for n := float64(0); n <= 100; n++ {
for r := float64(0); r < n; r++ {
if ncr(n, r) > 1000000 {
mills++
}
}
}
fmt.Println(mills)
}
func ncr(n, r float64) float64 {
if r > n {
return -1
}
return (utils.Factorial(n) / (utils.Factorial(r) * utils.Factorial(n-r)))
}
|
def dataset_to_tensors(dataset, capacity, map_fn=None, parallelism=None):
with tf.name_scope(None, 'dataset_to_tensors',
[dataset, capacity, map_fn, parallelism]):
if map_fn is not None:
dataset = dataset.map(map_fn, num_parallel_calls=parallelism)
return tf.contrib.data.get_single_element(dataset.batch(capacity)) |
Episodic lh secretion in the immature male and female rat as assessed by sequential blood sampling.
Using a repeated sampling method, the precise and detailed secretory pattern of luteinizing hormone (LH) was studied in the immature female and male rat. Animals which had been kept under controlled environmental conditions were implanted with a intracardiac cannula 1 day before, and sequential 50-microliters blood samples were collected every 10 min for a 4-hour period at 24-27 days of age. The concentrations of LH in the whole blood were determined by radioimmunoassay. When an LH pulse was defined as a rapid increase in LH concentrations to the levels higher than 0.28 ng/ml (mean of minimally detectable amounts plus 2 SD), LH was secreted in a pulsatile fashion in both the female and male rats. Most animals showed 1 or 2 LH pulses over the 4-hour sampling period, and the mean amplitudes were slightly but significantly higher in the female than in the male. |
/**
* <p>
* Helper class that allows to create <a href="https://github.com/JCTools/JCTools">JCTools</a> queues.
* </p>
* <p>
* JCTools queues will be created by default if they're found in the Thread context classloader and creation is not disabled.
* </p>
* <p>
* Creation of JCTools queues can be disabled per queue name. Once disabled, methods will fallback to {@code java.util.concurrent.ConcurrentLinkedQueue}.
* E.g. setting {@code -Dorg.appenders.GenericItemSourcePool.jctools.enabled=false} will get this class to create a fallback queue
* </p>
* NOTE: Consider this class <i>private</i>. API is highly experimental and can change without notice
*/
public class QueueFactory {
private static final Map<String, QueueFactory> CACHED_INSTANCES = new HashMap<>();
private final String name;
private final Features features;
// visible for testing
QueueFactory(final String name) {
this.name = name;
this.features = Features.builder()
.configure(Features.Feature.JCTOOLS_QUEUES,
Boolean.parseBoolean(System.getProperty(String.format("appenders.%s.%s.enabled", name, "jctools"), Boolean.toString(Features.Feature.JCTOOLS_QUEUES.isEnabled()))))
.build();
CACHED_INSTANCES.put(name, this);
}
public static QueueFactory getQueueFactoryInstance(final String name) {
return CACHED_INSTANCES.computeIfAbsent(name, QueueFactory::new);
}
public final <T> Queue<T> tryCreateMpscQueue(final int initialSize) {
return tryCreate(name, "org.jctools.queues.MpscUnboundedArrayQueue", initialSize);
}
public final <T> Queue<T> tryCreateSpscQueue(final int initialSize) {
return tryCreate(name, "org.jctools.queues.SpscUnboundedArrayQueue", initialSize);
}
public final <T> Queue<T> tryCreateMpmcQueue(final int initialSize) {
return tryCreate(name, "org.jctools.queues.MpmcUnboundedXaddArrayQueue", initialSize);
}
final <T> Queue<T> tryCreate(final String name, final String queueClassName, final int initialSize) {
if (features.isEnabled(Features.Feature.JCTOOLS_QUEUES) && hasClass(name, queueClassName)) {
getLogger().debug("{}: Using {}", name, queueClassName);
switch (queueClassName) {
case "org.jctools.queues.MpmcUnboundedXaddArrayQueue":
return new MpmcUnboundedXaddArrayQueue<>(initialSize);
case "org.jctools.queues.SpscUnboundedArrayQueue":
return new SpscUnboundedArrayQueue<>(initialSize);
case "org.jctools.queues.MpscUnboundedArrayQueue":
return new MpscUnboundedArrayQueue<>(initialSize);
default:
throw new UnsupportedOperationException(queueClassName + " is not supported");
}
}
final ConcurrentLinkedQueue<T> fallback = new ConcurrentLinkedQueue<>();
getLogger().debug("{}: Falling back to {}",
name,
fallback.getClass().getName());
return fallback;
}
/* visible for testing */
boolean hasClass(final String name, final String className) {
try {
Thread.currentThread().getContextClassLoader().loadClass(className);
return true;
} catch (ClassNotFoundException e) {
getLogger().debug("{}: {} not available",
name,
className);
return false;
}
}
/**
* Helper method that converts non-iterable queues to {@code ArrayList} if needed.
*
* @param items Collections to convert
* @param <T> item type
* @return new iterable collection or {@code items}
*/
public <T> Collection<T> toIterable(final Collection<T> items) {
// CLQ supports iterator(), so nothing to do
if (items instanceof ConcurrentLinkedQueue) {
return items;
}
if (items instanceof AbstractQueue) {
AbstractQueue<T> arrayQueue = (AbstractQueue<T>)items;
int size = items.size();
// Unlike CLQ, ArrayList doesn't allocate on add()
List<T> result = new ArrayList<>(size);
while (size-- > 0) {
result.add(arrayQueue.poll());
}
return result;
}
return items;
}
private static class Features {
private final BitSet state;
public Features(BitSet state) {
this.state = state;
}
public final boolean isEnabled(final Feature feature) {
return this.state.get(feature.ordinal());
}
public static Builder builder() {
return new Builder();
}
private static class Builder {
private final BitSet state = new BitSet();
Builder configure(final Feature feature, final boolean enabled) {
state.set(feature.ordinal(), enabled);
return this;
}
public Features build() {
return new Features(state);
}
}
private enum Feature {
JCTOOLS_QUEUES(true);
private final boolean enabled;
Feature(final boolean enabled) {
this.enabled = enabled;
}
public final boolean isEnabled() {
return enabled;
}
}
}
} |
/**
* Changes the favourite status of an existing person in the address book.
*/
public class FavouriteCommand extends Command {
public static final String COMMAND_WORD = "fav";
public static final String MESSAGE_USAGE = COMMAND_WORD
+ ": Changes the favourite status of a person identified by\n"
+ " the index number used in the displayed person list.\n"
+ "Parameters: INDEX (must be a positive integer greater than\n"
+ " 0 and less than 2147483648)\n"
+ "Example: " + COMMAND_WORD + " 1";
public static final String MESSAGE_FAVOURITE_PERSON_SUCCESS = "Added to Favourites Person: %1$s";
public static final String MESSAGE_DUPLICATE_PERSON = "This person is already saved to favourites.";
private final Index targetIndex;
private ModelMemento modelMemento;
private Person personToFavourite;
public FavouriteCommand(Index targetIndex) {
this.targetIndex = targetIndex;
}
@Override
public CommandResult execute(Model model) throws CommandException {
requireNonNull(model);
List<Person> lastShownList = model.getFilteredPersonList();
if (targetIndex.getZeroBased() >= lastShownList.size()) {
throw new CommandException(Messages.MESSAGE_INVALID_PERSON_DISPLAYED_INDEX);
}
this.personToFavourite = lastShownList.get(targetIndex.getZeroBased());
this.modelMemento = new ModelMemento();
modelMemento.setModel(new ModelManager(model.makeCopy()));
Person favouritedPerson = createFavouritedPerson(personToFavourite);
if (personToFavourite.isSameFavouritePerson(favouritedPerson) && model.hasFavouritePerson(favouritedPerson)) {
throw new CommandException(MESSAGE_DUPLICATE_PERSON);
}
model.setPerson(personToFavourite, favouritedPerson);
return new CommandResult(String.format(MESSAGE_FAVOURITE_PERSON_SUCCESS, favouritedPerson));
}
@Override
public CommandResult unExecute(Model model) throws CommandException {
model.setAddressBook(this.modelMemento.getModel().getAddressBook());
return new CommandResult("Favourite contact.");
}
@Override
public boolean equals(Object other) {
return other == this // short circuit if same object
|| (other instanceof FavouriteCommand // instanceof handles nulls
&& targetIndex.equals(((FavouriteCommand) other).targetIndex)); // state check
}
private static Person createFavouritedPerson(Person personToFavourite) {
assert personToFavourite != null;
Name updatedName = personToFavourite.getName();
Phone updatedPhone = personToFavourite.getPhone();
Email updatedEmail = personToFavourite.getEmail();
Faculty updatedFaculty = personToFavourite.getFaculty();
Role updatedRole = personToFavourite.getRole();
Telegram updatedTelegram = personToFavourite.getTelegram();
Favourite updatedFavourite = new Favourite(true); // edit command does not allow editing favourite status
Set<Tag> updatedTags = personToFavourite.getTags();
return new Person(updatedName, updatedPhone, updatedEmail, updatedFaculty, updatedRole,
updatedTelegram, updatedFavourite, updatedTags);
}
} |
An economical viable tokamak fusion reactor based on the ITER experience
This is my personal vision and outlook towards a fusion reactor based on my extensive experience from being part of the ITER design, and now construction, as well as leading the largest fusion technology program worldwide (KIT—Karlsruhe Institute of Technology) for 7 years. In particular, I want to discuss how a fusion reactor can be economically viable without employing too advanced physics and technology. It certainly will be a pulsed machine (approx. 20 000 s pulses) with thermal energy storage (turbine is steady state). I also want to discuss the optimum machine size and toroidal field for such a machine and why I think that high field and smaller plasmas may not necessarily make a fusion reactor more competitive. When one extrapolates from today's knowledge on ITER construction, even considering that ITER can be built much cheaper, it is clear that a fusion power plant will cost more than 10 or more likely more than 15 billion Euros/Dollars (the first of a kind even approx. 30 billion). Therefore, in order to have an economically attractive fusion reactor, it needs to produce a large amount of power (on the order of 2.5 GW electric). The possible size (R ∼ 10 m) and reasonably conservative physics basis of such a machine will be briefly described in the presentation. If we are successful in achieving advanced physics in a burning plasma, e.g. in ITER, then we can make the machine slightly smaller but the principal arguments for a large machine will not change significantly. Key technologies and their status will be discussed with particular emphasis on a realistic blanket and divertor design and the size and issues of a tritium-plant (T-plant) for such a machine as well as the challenges which have to be overcome beyond what is needed for ITER. Finally, a simple economic consideration will be discussed to show that a large machine could be economically viable, even in today's environment, in particular, in competition with renewables. This article is part of a discussion meeting issue ‘Fusion energy using tokamaks: can development be accelerated?’. |
package querytest
import (
"bytes"
"context"
"strings"
"testing"
"time"
"github.com/andreyvit/diff"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/flux"
"github.com/influxdata/flux/dependencies/dependenciestest"
"github.com/influxdata/flux/dependency"
"github.com/influxdata/flux/internal/spec"
"github.com/influxdata/flux/runtime"
"github.com/influxdata/flux/semantic/semantictest"
"github.com/influxdata/flux/stdlib/universe"
"github.com/influxdata/flux/values/valuestest"
)
type NewQueryTestCase struct {
Name string
Raw string
Want *flux.Spec
WantErr bool
}
var opts = append(
semantictest.CmpOptions,
cmp.AllowUnexported(flux.Spec{}),
cmp.AllowUnexported(universe.JoinOpSpec{}),
cmpopts.IgnoreUnexported(flux.Spec{}),
cmpopts.IgnoreUnexported(universe.JoinOpSpec{}),
cmpopts.IgnoreFields(flux.Operation{}, "Source"),
valuestest.ScopeTransformer,
)
func NewQueryTestHelper(t *testing.T, tc NewQueryTestCase) {
t.Helper()
now := time.Now().UTC()
ctx, deps := dependency.Inject(context.Background(), dependenciestest.Default())
defer deps.Finish()
got, err := spec.FromScript(ctx, runtime.Default, now, tc.Raw)
if (err != nil) != tc.WantErr {
t.Errorf("error compiling spec error = %v, wantErr %v", err, tc.WantErr)
return
}
if tc.WantErr {
return
}
if tc.Want != nil {
tc.Want.Now = now
if !cmp.Equal(tc.Want, got, opts...) {
t.Errorf("unexpected specs -want/+got %s", cmp.Diff(tc.Want, got, opts...))
}
}
}
func RunAndCheckResult(t testing.TB, querier *Querier, c flux.Compiler, d flux.Dialect, want string) {
t.Helper()
var buf bytes.Buffer
_, err := querier.Query(context.Background(), &buf, c, d)
if err != nil {
t.Errorf("failed to run query: %v", err)
return
}
got := buf.String()
if g, w := strings.TrimSpace(got), strings.TrimSpace(want); g != w {
t.Errorf("result not as expected want(-) got (+):\n%v", diff.LineDiff(w, g))
}
}
|
Farmers’ Perception of Selling Chili to the New Auction Market: A Case at Sleman Regency of Indonesia
Purpose : The aim of this study is to know the effect of economic and social factors on the farmers’ marketing decisions. Research Method : This research was conducted from January to March 2020. The respondents were 60 (sixty) farmers who were chosen by purposing sampling. The analysis models used in this research are likert and correlation analysis. Findings : The economic factor has a medium effect on farmers’ marketing decisions. Ease of farmers’ access to the auction market or to get information is the main economic variable to the farmers’ marketing decision. Price has a very high effect on the farmers’ marketing decisions. The social factor also affects the farmers’ marketing decisions but at a weak level. Research Limitation : The respondents only 60 farmers. Originality/ Value : Provides information for the new marketing institution to stay competitive and attractive to customers.
INTRODUCTION
The farmer's position in the markets can be that of a a producer or consumer. He will be a producer when he produces an agricultural product. Meanwhile, he will be a consumer when he uses a marketing service. Farmers have many considerations when using marketing services, especially selecting the marketing channel. They will pay attention to the benefit of their decision in using the marketing channel. Farmers perhaps can have access to direct marketing because it gives many benefits to farmers, such as increasing revenue and reducing cost (Adanacioglu, 2016). However, if it is not possible, farmers will use the existing marketing channel, including selling their products at a low price.
Farmers' behavior when choosing a marketing channel has been widely studied by researchers. Engel et al. (2001) designed a model about consumer behavior that chooses a service influenced by some factors. Farmers' behavior as users of marketing service will be influenced by environment, individual, and psychology process. This model is strengthened by Kotler & Keller (2016) with the black box model which describes consumer's characteristics and decision processes. This model explains consumer's decisions influenced by marketing stimulus (marketing mix), other stimuli (economics, politics, technology, and culture), and consumer characteristics (individual, culture, social, and psychology).
The environment effect is usually more dominant for the farmers' decision as a user of marketing service (Ghazaryan et al., 2018). This decision is influenced by economic and social factors (Hudeckova & Lostak, 2003;Artukoglu & Olgun, 2008). The economic factor consists of some variables comprising of price, distance, and standards grades of the product (Asebe et al., 2007;Tadesse & Bahiigwa, 2015;Hendrarini et al., 2020). The social effect has proven to make the agricultural sector more competitive (Hudeckova & Lostak, 2003). Many social factors, like advice from reference groups (pioneer farmers and family members), involvement in associations, culture, religion, and information technology can influence farmers' decisions (Noel, 2009;Kaewwongwattana et al., 2015;Lawal et al., 2017;Ataei et al., 2019).
Chili farmers in Sleman Regency (Indonesia) are also careful in choosing a marketing channel because chili price fluctuates and makes farmers lose. One of the marketing channels selected by farmers is the Sleman auction market. This auction market has just been established in 2018, but many farmers sell their fresh red chili there. Some research indicates that the agricultural auction markets can link farmers with consumer demand, create a national minimum price scheme, change the price discovery process, and drive logistical efficiency by reducing transaction costs (Meulenberg, 1989;Meulenberg & Viaene, 1993;Heezen & Baets. 1996;Tourte & Gaskell, 2004). Therefore, of course it is interesting to learn why farmers trust to sell their products to the new auction market. Moreover, the Sleman auction market currently has to compete with traders who usually buy chili from farmers. That way, this paper is important for a new marketing organization to learn about how to stay competitive and attractive to customers. The aim of this study is to know the effect of economic and social factors on the farmers' marketing decisions.
Study Area and Sampling
This study was conducted for 3 (three) months from January to March 2020 in Sleman Regency.
This area was chosen because it has a new auction market of chili in Yogyakarta Province. The other reason is the chili farm in Sleman Regency contributes to almost 28 percent of the total percentage of chili production in Yogyakarta Province (Central Bureau of Statistics of Yogyakarta Province, 2020).
Data Collected
The study used both primary and secondary data. Primary data were obtained from interviews with 60 (sixty) farmers who sold chili to the auction market. The samples were chosen by purposive sampling. Farmers chosen in this research are those who stay in Pakem, Seyegan, and Ngemplak sub-districts. The farmers in Pakem sub-district were chosen because their location is near to the auction market. Meanwhile, farmers in the Seyegan dan Ngemplak sub-district were chosen because their locations are far from the auction market and have different conditions with farmers in the Pakem sub-district. The secondary data were obtained from the local agricultural office and the Central Bureau of Statistics' documents.
Data Analysis 1). The likert analysis was used in determining the implementation of farmers' economic and social factors. The economic factor consists of three variables which are product, price, and access. Meanwhile, social factor variables consist of two variables including social interaction and extension. The analysis also measured the farmers' marketing decision to the auction market. Each of these variables has a question indicator that has been tested for validity and reliability.
The likert score in this study consists of 1-5 in which 1 for strongly disagree, 2 for disagree, 3 for being neutral, 4 for agree, and 5 for strongly agree. The average of likert score of each question from all respondents was converted by the following formula: The results were categorized into five groups ranging from very low to very high 2) Spearman correlation test was used for examining the relationship between the economic and social factors with the farmers' marketing decision to the auction market of chili. The first step of this analysis is done with the normality test using the Kolmogorov-Smirnov One-Sample test. When the distribution of the data was known, it can be continued with this correlation test. The formula of the Rank Spearman correlation test is followed by the study of Rees (2000): Where, r s = Spearman correlation coefficient ∑d i 2 = total square of the difference between ranks n = total respondents (person) The correlation criteria used in this research are shown in Table 02.
Economic Factor
The initial step taken in analyzing the data in this research is the validity and reliability test to ensure the question items correct and can be used for research. Based on the results of the validity and reliability test, all questions were valid and reliable so that they could be used in this study.
The economic factor characteristics consist of 3 (three) variables, i.e. product, price, and access. These characteristics were categorized as high because the score of that variable reached 78.45%. Access is the highest variable in the economic factor characteristics. The indicator with the highest score on the access variable is the auction market location. The auction market is located in the middle of Sleman Regency so farmers easily have access to it. Furthermore, the auction market also has a gathering point spread across several sub-districts. The gathering point makes farmers who are far from the auction market easy to sell chili to the auction market.
The chili price at the gathering point follows the price formed from the auction process in the auction market of Sleman Regency. The next indicator is the ease of getting payment for farmers. The auction market will pay farmers in cash after the price of the auction process is formed. The price will usually appear at night around 7.30 p.m and will be informed to farmers on that day. However, this condition is different from farmers who sell chili through a gathering point. The farmer will get paid 3-4 hours after the auction process closed. It is because the manager needs time to bring funds from the auction market to the gathering point. Consequently, the farmer at the gathering point will take his money in the next day or when he delivers the chili again to the gathering point. This condition makes some farmers feel the payment for chili is not timely. Another indicator in the access variable shows that it is very easy for farmers to contact the auction market manager using WhatsApp application and it is easy to become a member of the auction market because there are no administrative costs for registering as auction market members.
Furthermore, the variable in the second-ranking of the economic factor is the price, and it is found as in the very high category. The majority of farmers choose to sell chili to the auction market because the prices are higher than selling to traders. Farmers have been comparing prices on the auction market with prices at traders through information from other farmers, auction market managers, and traders. The price formed in the auction market is higher 0.67 to 1.07 US$ per kg than the price offered by traders. This is a major consideration for farmers to sell chili to the auction market.
However, some farmers feel that prices in the auction market do not match the chili quality.
There are 4 (four) grades of chili on the auction market which are SP, SP1, SPK, and SX. Each grade has a different price, for example in February-March 2020, the price of SP is 0.87 US $ per kg, while the price of SP1 is 0.73 US $ per kg. The difference in grade perception generally happened between farmers and auction market managers. The grading process by the auction market manager often resulted that the quality grade of farmers' chili have a decline or does not match the grading results at the farmers' level. As a result, farmers will accept grading results from the auction market manager because the accuracy is higher, and it is following the SOP (Standard Operating Procedure) of the auction market.
The lowest variable in the economic factor is the product. The indicator scores on this variable indicate differences in perceptions of chili quality between farmers and auction market managers, especially farmers at the gathering point. The chili supplied by farmers to the gathering point is still fresh and not deformed. However, chili quality has not followed the grade types determined by the auction market. The first grade of chili is SP or the best quality with criteria of length of 10-15 cm and 100% perfect red color. Next, the SP1 grade is chili with a length of 8-10 cm and a color of ripeness between 90-100%. Meanwhile, grade SPK has a characteristic form of ripeness 60-90% with a curved fruit shape and the length is quite short, 6.5-8 cm. Finally, the SX grade is the lowest quality and is smaller than 6.5 cm so that the selling price is low. This difference in perception because farmers do not know about the grade. It makes auction market manager work hard to grade chili from farmers. The auction market managers are forced to return poor quality chili produce to farmers. This difference in perception shows that farmers' knowledge of the quality grade of chili in the auction market is still poor.
Another indicator in product variables is the quantity and continuity of the chili supply. This indicator is considered as high category because farmers only sell chili in the 1st -14th picking harvest. During the harvest period, the quantity of chili is very high so that the continuity of chili supply to the auction market is always there. Meanwhile, when the harvest season is nearing the end, farmers will sell chili to traders because the quantity and quality of chili start to fall compared to the peak harvest. However, the different harvest periods between the chili supply to the auction market by farmers are not disrupted. This result is in line with conditions in other countries, such as Sri Lanka. Agribusiness companies rely on farmers to ensure they get an adequate supply of fruit and vegetables without interruption. Companies are forced to spend more than 30-40 percent of wastage due to the discontinued supply of products from farmers (Esham & Usami, 2006).
Social Factor
The social factor can influence farmers to make marketing decisions. In this study, the social factor consists of 2 (two) variables which are; social interaction and extension. The social factor score in this study is 66.67 and included in the high category.
Advice from families and other farmers encourage farmers to sell chili to the auction market. The family will drive the farmers to sell to the auction market. Moreover, calls from other farmers and farmer groups strengthen farmers to sell chili to the auction market. Currently, many interactions have been conducted by the auction market manager who is also the head of the farmers' group. The interaction is performed during the farmer group meeting, where it is discussed between the head of the farmer group and its members to sell chili to the auction market.
The lowest score of the social interaction variable is a promotion from the auction market. The information about the auction market in the mass media only contains news about the auction market and do not massively promote the auction market. The auction market promotion is only conducted by agricultural extension officers through leaflets as part of an extension with regard to benefit of auction marketing.
The extension from the farmer group is the highest indicator in the next variable. The farmer group meeting is an activity to encourage farmers to sell chili through the auction market. Trust to the head of the farmer group convinced farmers to sell their chili to the auction market. The next indicator is the extension from agricultural extension officers which encourage farmers to sell chili to the auction market. Agricultural extension officers are intense in attending farmer group meetings to provide solutions for various agricultural problems, like encouraging farmers to sell chili to the auction market in order to overcome the low price of chili at the farm level. Furthermore, there are some farmers who sell to the auction market because of an agreement with the local government officer. Moreover, the farmers get production inputs from farmers' group if farmers sell their chili through the auction market. The lowest indicator for this variable is the extension from the village heads. They never do extensions activities to the farmers to sell chili to the auction market. Farmers state that they sell chili to the auction market as long as this institution still exists in Sleman Regency. It is due to many conveniences and benefits gained from partnering with the auction market. Furthermore, the services provided by the auction market have been considered quite good. Farmers also recommend other farmers to sell their chili in the auction market, especially farmers who stay in Sleman Regency. All farmers in Sleman are perhaps willing to sell chili to the auction market so equalize the chili price in Sleman Regency and raise Sleman Regency as one of the chili producer regions in Indonesia.
The product variable has a significant relationship with the farmers' marketing decision to the auction market. If the production quantity is high, the farmers will choose a market which has absorption in large quantities (Kyaw, 2018). It is in line with the results of this study that farmers often sell to the auction market during the peak harvest period. Moreover, the farmers' marketing decision is also strengthened by the mechanism implemented by the auction market to accept the whole chili produce from farmers. Although in the next mechanism, the grading process is performed by the auction market manager, and it will lead to the possibility that chili can be returned to farmers because it is not worth selling. But, it happens only in small quantities.
The price variable also has a significant relationship with the farmers' marketing decision to the auction market in a moderate correlation. Olwande & Mathenge (2012) stated that farmers would sell large quantities of products to marketing institutions which can offer high prices. High prices are considered as an incentive to sell products and will positively influence farmers' decisions and participation (Nyaga et al., 2016). The auction market has proven to be able to provide a higher price than traders in the vicinity of the farmers' residence. It certainly becomes the main basis for farmers in deciding to sell chili to the auction market.
The access variable has a significant relationship with the farmers' marketing decision to the auction market. Easy access to information will improve farmers' knowledge and skills, especially in marketing their products (Melesse, 2016). Recently, the auction market has been very transparent in providing information on the quantity, quality, and price of products for farmers. It makes farmers confident to sell chili through the auction market. Furthermore, the location of the auction market and the gathering point makes farmers' access to this marketing institution closer. Mbitsemunda & Karangwa (2017) revealed that the long-distance between the market and farmers will cause farmers not interested to sell the product to the marketing institutions, Moreover, long distance makes transportation costs increase (Abayneh & Tefera, 2013). Another indicator is a quick cash payment process for farmers which makes farmers more interested to sell chili to the auction market. Farmers consider that its ability to provide fast payment is greater compared to other marketing institutions. The social interaction variable has a significant relationship with the farmers' marketing decision to the auction market. Jari & Fraser (2009) explained that social interaction encourages to determine the market. In this study, social interaction has a weak relationship with the farmers' marketing decision to the auction market. It is because farmers' decisions are very dominantly influenced by their initiatives, without much influence from the outside environment. However, social interaction variables, such as family and other farmers, have little effect on farmers' decisions.
The extension variable has a significant and direct relationship with the farmers' marketing decision to the auction market. Dlamini-Mazibuko et al. (2019) argued that extension will increase farmers' knowledge about various types of markets that can be accessed by farmers.
In this study, the relationship between extension and farmers' marketing decisions is still weak because extension efforts are still not conducted massively by all stakeholders. The main extension is performed by farmer groups while the village Heads do not commit to encourage farmers to sell chili to the auction market.
CONCLUSIONS
Farmers are very careful in choosing marketing institutions to sell their chili. However, farmers in Sleman Regency have trusted the auction market of chili to be the main selling institution. This situation is significantly influenced by the economic (products, prices, and access) and social factors (social interaction and extension).
Farmers choose to sell chili to the auction market because this institution is willing to accept the entire supply of chili from farmers, especially during the harvest season. Even the auction market still accepts chili that has not been through the grading process by farmers. Various facilities on this product variable make farmers sure to sell chili to the auction market. The price variable is the main factor for farmers' marketing decisions to the auction market. The price offered by the auction market to farmers is higher than the price offered by traders. This condition is certainly an incentive for farmers to sell chili to the auction market.
The access variable also attracts farmers to sell in the auction market. The strategic location of the auction market makes farmers easy to reach it. Moreover, in some sub-districts also there are gathering points that facilitate marketing access for farmers who are far from the auction market. It becomes a lesson for new marketing institutions to ensure physical access to producers. In addition, the auction market also guarantees economic access for farmers by providing fast cash payments. This condition is certainly in accordance with the characteristics of farmers who want to immediately enjoy the harvest funds as soon as possible.
The farmers' social factor is also proven to encourage farmers' marketing decisions, even though it is in a weak correlation. Families, farmer groups, and agricultural extension officers recommend farmers to sell chili to the auction market. The farmers consider it before making a decision on which marketing institution will be chosen. These various pieces are not major considerations because basically, farmers continue to see the economic factor in choosing a marketing institution.
The development effort needs to be conducted continuously by the auction market, including the socialization of grading to farmers and the promotion of the auction market to the public. Farmers need to be invited more thoroughly with regard to the process to reduce the post harvest loss of chili. The auction market manager needs to deliver grade rule in the auction market or makes technical guidelines for chili that farmers must comply. The auction market also needs to increase promotion in order to raise the number of farmers participating in the auction process. |
package YodasMod.potions;
import com.badlogic.gdx.graphics.Color;
import com.megacrit.cardcrawl.characters.AbstractPlayer;
import com.megacrit.cardcrawl.core.AbstractCreature;
import com.megacrit.cardcrawl.core.CardCrawlGame;
import com.megacrit.cardcrawl.dungeons.AbstractDungeon;
import com.megacrit.cardcrawl.events.shrines.WeMeetAgain;
import com.megacrit.cardcrawl.helpers.CardHelper;
import com.megacrit.cardcrawl.helpers.PowerTip;
import com.megacrit.cardcrawl.localization.PotionStrings;
import com.megacrit.cardcrawl.rooms.AbstractRoom;
import com.megacrit.cardcrawl.vfx.GainPennyEffect;
import static YodasMod.YodasMod.makeID;
public class LiquidGold extends AbstractEasyPotion {
public static final String POTION_ID = makeID("LiquidGold");
private static final PotionStrings potionStrings = CardCrawlGame.languagePack.getPotionString(POTION_ID);
public static final String NAME = potionStrings.NAME;
public static final String[] DESCRIPTIONS = potionStrings.DESCRIPTIONS;
public static final Color liquidColor = CardHelper.getColor(233,199,76);
public static final Color hybridColor = null;
public static final Color spotsColor = null;
public static final AbstractPlayer.PlayerClass playerClass = null;
public LiquidGold() {
super(NAME, POTION_ID, PotionRarity.COMMON, PotionSize.S, PotionColor.NONE, playerClass);
}
@Override
public void initializeData() {
this.potency = this.getPotency();
this.description = DESCRIPTIONS[0] + this.potency + DESCRIPTIONS[1];
this.tips.clear();
this.tips.add(new PowerTip(this.name, this.description));
}
@Override
public void use(AbstractCreature target) {
AbstractPlayer p = AbstractDungeon.player;
p.gainGold(this.potency);
for(int i = 0; i < this.potency; ++i) {
AbstractDungeon.effectList.add(new GainPennyEffect(p, p.hb.cX, p.hb.cY, p.hb.cX, p.hb.cY, true));
}
}
@Override
public boolean canUse() {
if (AbstractDungeon.actionManager.turnHasEnded && AbstractDungeon.getCurrRoom().phase == AbstractRoom.RoomPhase.COMBAT) {
return false;
} else {
return AbstractDungeon.getCurrRoom().event == null || !(AbstractDungeon.getCurrRoom().event instanceof WeMeetAgain);
}
}
@Override
public int getPotency(final int potency) {
return 50;
}
}
|
/* */
/* Ensures size of command buffer of the given task. */
/* Existing buffer will be copied to new buffer. */
/* */
/* This buffer is guranteed to be physically continuous. */
/* */
/* returns -ENOMEM if cannot allocate new buffer */
static int32_t cmdq_core_task_realloc_buffer_size(struct TaskStruct *pTask, uint32_t size)
{
void *pNewBuffer = NULL;
dma_addr_t newMVABase = 0;
int32_t commandSize = 0;
uint32_t *pCMDEnd = NULL;
if (pTask->pVABase && pTask->bufferSize >= size) {
return 0;
}
do {
pNewBuffer = cmdq_core_alloc_hw_buffer(cmdq_dev_get(),
size, &newMVABase,
GFP_KERNEL | __GFP_NO_KSWAPD);
if (pNewBuffer) {
pTask->useEmergencyBuf = false;
break;
}
if (size <= CMDQ_EMERGENCY_BLOCK_SIZE)
cmdq_core_alloc_emergency_buffer(&pNewBuffer, &newMVABase);
if (pNewBuffer) {
CMDQ_LOG("emergency buffer %p allocated\n", pNewBuffer);
pTask->useEmergencyBuf = true;
break;
}
pNewBuffer =
cmdq_core_alloc_hw_buffer(cmdq_dev_get(), size, &newMVABase, GFP_KERNEL);
if (pNewBuffer) {
CMDQ_LOG("buffer %p allocated after reclaim\n", pNewBuffer);
pTask->useEmergencyBuf = false;
break;
}
} while (0);
if (NULL == pNewBuffer) {
CMDQ_ERR("realloc cmd buffer of size %d failed\n", size);
return -ENOMEM;
}
memset(pNewBuffer, 0, size);
if (pTask->pVABase)
memcpy(pNewBuffer, pTask->pVABase, pTask->bufferSize);
pCMDEnd = pTask->pCMDEnd;
commandSize = pTask->commandSize;
cmdq_task_free_task_command_buffer(pTask);
pTask->pVABase = (uint32_t *) pNewBuffer;
pTask->MVABase = newMVABase;
pTask->bufferSize = size;
pTask->pCMDEnd = pCMDEnd;
pTask->commandSize = commandSize;
CMDQ_MSG("Task Buffer:0x%p, VA:%p PA:%pa\n", pTask, pTask->pVABase, &pTask->MVABase);
return 0;
} |
/*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.integration.dsl.flows;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import org.aopalliance.aop.Advice;
import org.aopalliance.intercept.MethodInterceptor;
import org.aopalliance.intercept.MethodInvocation;
import org.junit.After;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.BeanCreationException;
import org.springframework.beans.factory.ListableBeanFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.context.Lifecycle;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Scope;
import org.springframework.core.task.TaskExecutor;
import org.springframework.integration.MessageDispatchingException;
import org.springframework.integration.annotation.MessageEndpoint;
import org.springframework.integration.annotation.MessagingGateway;
import org.springframework.integration.annotation.ServiceActivator;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.FixedSubscriberChannel;
import org.springframework.integration.channel.NullChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.config.EnableIntegration;
import org.springframework.integration.context.IntegrationContextUtils;
import org.springframework.integration.dsl.IntegrationFlow;
import org.springframework.integration.dsl.IntegrationFlows;
import org.springframework.integration.dsl.MessageChannels;
import org.springframework.integration.dsl.Pollers;
import org.springframework.integration.dsl.Transformers;
import org.springframework.integration.endpoint.EventDrivenConsumer;
import org.springframework.integration.gateway.GatewayProxyFactoryBean;
import org.springframework.integration.handler.AbstractReplyProducingMessageHandler;
import org.springframework.integration.handler.LoggingHandler;
import org.springframework.integration.handler.ReactiveMessageHandlerAdapter;
import org.springframework.integration.handler.advice.ErrorMessageSendingRecoverer;
import org.springframework.integration.handler.advice.ExpressionEvaluatingRequestHandlerAdvice;
import org.springframework.integration.handler.advice.RequestHandlerRetryAdvice;
import org.springframework.integration.scheduling.PollerMetadata;
import org.springframework.integration.store.MessageStore;
import org.springframework.integration.store.SimpleMessageStore;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.integration.support.MutableMessageBuilder;
import org.springframework.integration.transformer.GenericTransformer;
import org.springframework.integration.transformer.PayloadSerializingTransformer;
import org.springframework.integration.util.NoBeansOverrideAnnotationConfigContextLoader;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageDeliveryException;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.MessagingException;
import org.springframework.messaging.PollableChannel;
import org.springframework.messaging.SubscribableChannel;
import org.springframework.messaging.support.ChannelInterceptor;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import reactor.core.publisher.Mono;
/**
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @author <NAME>
*
* @since 5.0
*/
@ContextConfiguration(loader = NoBeansOverrideAnnotationConfigContextLoader.class)
@SpringJUnitConfig
@DirtiesContext
public class IntegrationFlowTests {
@Autowired
private ListableBeanFactory beanFactory;
@Autowired
private ControlBusGateway controlBus;
@Autowired
@Qualifier("inputChannel")
private MessageChannel inputChannel;
@Autowired
@Qualifier("discardChannel")
private PollableChannel discardChannel;
@Autowired
@Qualifier("foo")
private SubscribableChannel foo;
@Autowired
@Qualifier("successChannel")
private PollableChannel successChannel;
@Autowired
@Qualifier("suppliedChannel")
private PollableChannel suppliedChannel;
@Autowired
@Qualifier("suppliedChannel2")
private PollableChannel suppliedChannel2;
@Autowired
@Qualifier("bridgeFlowInput")
private PollableChannel bridgeFlowInput;
@Autowired
@Qualifier("bridgeFlowOutput")
private PollableChannel bridgeFlowOutput;
@Autowired
@Qualifier("bridgeFlow2Input")
private MessageChannel bridgeFlow2Input;
@Autowired
@Qualifier("bridgeFlow2Output")
private PollableChannel bridgeFlow2Output;
@Autowired
@Qualifier("methodInvokingInput")
private MessageChannel methodInvokingInput;
@Autowired
@Qualifier("delayedAdvice")
private DelayedAdvice delayedAdvice;
@Autowired
private MessageStore messageStore;
@Autowired
@Qualifier("claimCheckInput")
private MessageChannel claimCheckInput;
@Autowired
@Qualifier("lambdasInput")
private MessageChannel lambdasInput;
@Test
public void testWithSupplierMessageSourceImpliedPoller() {
assertThat(this.suppliedChannel.receive(10000).getPayload()).isEqualTo("FOO");
}
@Test
public void testWithSupplierMessageSourceProvidedPoller() {
assertThat(this.suppliedChannel2.receive(10000).getPayload()).isEqualTo("FOO");
}
@Test
public void testDirectFlow() {
assertThat(this.beanFactory.containsBean("filter")).isTrue();
assertThat(this.beanFactory.containsBean("filter.handler")).isTrue();
assertThat(this.beanFactory.containsBean("expressionFilter")).isTrue();
assertThat(this.beanFactory.containsBean("expressionFilter.handler")).isTrue();
QueueChannel replyChannel = new QueueChannel();
Message<String> message = MessageBuilder.withPayload("100").setReplyChannel(replyChannel).build();
assertThatExceptionOfType(MessageDeliveryException.class)
.isThrownBy(() -> this.inputChannel.send(message))
.withCauseInstanceOf(MessageDispatchingException.class)
.withMessageContaining("Dispatcher has no subscribers");
this.controlBus.send("@payloadSerializingTransformer.start()");
final AtomicBoolean used = new AtomicBoolean();
this.foo.subscribe(m -> used.set(true));
this.inputChannel.send(message);
Message<?> reply = replyChannel.receive(10000);
assertThat(reply).isNotNull();
assertThat(reply.getPayload()).isEqualTo(200);
Message<?> successMessage = this.successChannel.receive(10000);
assertThat(successMessage).isNotNull();
assertThat(successMessage.getPayload()).isEqualTo(100);
assertThat(used.get()).isTrue();
this.inputChannel.send(new GenericMessage<Object>(1000));
Message<?> discarded = this.discardChannel.receive(10000);
assertThat(discarded).isNotNull();
assertThat(discarded.getPayload()).isEqualTo("Discarded: 1000");
}
@Test
public void testBridge() {
GenericMessage<String> message = new GenericMessage<>("test");
this.bridgeFlowInput.send(message);
Message<?> reply = this.bridgeFlowOutput.receive(10000);
assertThat(reply).isNotNull();
assertThat(reply.getPayload()).isEqualTo("test");
assertThat(this.beanFactory.containsBean("bridgeFlow2.channel#0")).isTrue();
assertThat(this.beanFactory.getBean("bridgeFlow2.channel#0")).isInstanceOf(FixedSubscriberChannel.class);
assertThatExceptionOfType(MessageDeliveryException.class)
.isThrownBy(() -> this.bridgeFlow2Input.send(message))
.withCauseInstanceOf(MessageDispatchingException.class)
.withMessageContaining("Dispatcher has no subscribers");
this.controlBus.send("@bridge.start()");
this.bridgeFlow2Input.send(message);
reply = this.bridgeFlow2Output.receive(10000);
assertThat(reply).isNotNull();
assertThat(reply.getPayload()).isEqualTo("test");
assertThat(this.delayedAdvice.getInvoked()).isTrue();
}
@Test
public void testWrongLastMessageChannel() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> new AnnotationConfigApplicationContext(InvalidLastMessageChannelFlowContext.class))
.withMessageContaining("'.fixedSubscriberChannel()' " +
"can't be the last EIP-method in the 'IntegrationFlow' definition");
}
@Test
public void testMethodInvokingMessageHandler() {
QueueChannel replyChannel = new QueueChannel();
Message<?> message = MessageBuilder.withPayload("world")
.setHeader(MessageHeaders.REPLY_CHANNEL, replyChannel)
.build();
this.methodInvokingInput.send(message);
Message<?> receive = replyChannel.receive(10000);
assertThat(receive).isNotNull();
assertThat(receive.getPayload()).isEqualTo("Hello World and world");
}
@Test
public void testLambdas() {
assertThat(this.beanFactory.containsBean("lambdasFlow.filter#0")).isTrue();
assertThat(this.beanFactory.containsBean("lambdasFlow.transformer#0")).isTrue();
QueueChannel replyChannel = new QueueChannel();
Message<?> message = MessageBuilder.withPayload("World".getBytes())
.setHeader(MessageHeaders.REPLY_CHANNEL, replyChannel)
.build();
this.lambdasInput.send(message);
Message<?> receive = replyChannel.receive(10000);
assertThat(receive).isNotNull();
assertThat(receive.getPayload()).isEqualTo("Hello World");
message = MessageBuilder.withPayload("Spring")
.setHeader(MessageHeaders.REPLY_CHANNEL, replyChannel)
.build();
this.lambdasInput.send(message);
assertThat(replyChannel.receive(10)).isNull();
}
@Test
public void testClaimCheck() {
QueueChannel replyChannel = new QueueChannel();
Message<String> message = MutableMessageBuilder.withPayload("foo").setReplyChannel(replyChannel).build();
this.claimCheckInput.send(message);
Message<?> receive = replyChannel.receive(10000);
assertThat(receive).isNotNull();
assertThat(receive).isSameAs(message);
assertThat(this.messageStore.getMessageCount()).isEqualTo(1);
assertThat(this.messageStore.getMessage(message.getHeaders().getId())).isSameAs(message);
}
@Autowired
private SubscribableChannel tappedChannel1;
@Autowired
@Qualifier("wireTapFlow2.input")
private SubscribableChannel tappedChannel2;
@Autowired
@Qualifier("wireTapFlow3.input")
private SubscribableChannel tappedChannel3;
@Autowired
private SubscribableChannel tappedChannel4;
@Autowired
@Qualifier("tapChannel")
private QueueChannel tapChannel;
@Autowired
@Qualifier("wireTapFlow5.input")
private SubscribableChannel tappedChannel5;
@Autowired
private PollableChannel wireTapSubflowResult;
@Test
public void testWireTap() {
this.tappedChannel1.send(new GenericMessage<>("foo"));
this.tappedChannel1.send(new GenericMessage<>("bar"));
Message<?> out = this.tapChannel.receive(10000);
assertThat(out).isNotNull();
assertThat(out.getPayload()).isEqualTo("foo");
assertThat(this.tapChannel.receive(0)).isNull();
this.tappedChannel2.send(new GenericMessage<>("foo"));
this.tappedChannel2.send(new GenericMessage<>("bar"));
out = this.tapChannel.receive(10000);
assertThat(out).isNotNull();
assertThat(out.getPayload()).isEqualTo("foo");
assertThat(this.tapChannel.receive(0)).isNull();
this.tappedChannel3.send(new GenericMessage<>("foo"));
this.tappedChannel3.send(new GenericMessage<>("bar"));
out = this.tapChannel.receive(10000);
assertThat(out).isNotNull();
assertThat(out.getPayload()).isEqualTo("foo");
assertThat(this.tapChannel.receive(0)).isNull();
this.tappedChannel4.send(new GenericMessage<>("foo"));
this.tappedChannel4.send(new GenericMessage<>("bar"));
out = this.tapChannel.receive(10000);
assertThat(out).isNotNull();
assertThat(out.getPayload()).isEqualTo("foo");
out = this.tapChannel.receive(10000);
assertThat(out).isNotNull();
assertThat(out.getPayload()).isEqualTo("bar");
this.tappedChannel5.send(new GenericMessage<>(""));
out = this.wireTapSubflowResult.receive(10000);
assertThat(out).isNotNull();
assertThat(out.getPayload()).isEqualTo("");
}
@Autowired
@Qualifier("subscribersFlow.input")
private MessageChannel subscribersFlowInput;
@Autowired
@Qualifier("subscriber1Results")
private PollableChannel subscriber1Results;
@Autowired
@Qualifier("subscriber2Results")
private PollableChannel subscriber2Results;
@Autowired
@Qualifier("subscriber3Results")
private PollableChannel subscriber3Results;
@Test
public void testSubscribersSubFlows() {
this.subscribersFlowInput.send(new GenericMessage<>(2));
Message<?> receive1 = this.subscriber1Results.receive(10000);
assertThat(receive1).isNotNull();
assertThat(receive1.getPayload()).isEqualTo(1);
Message<?> receive2 = this.subscriber2Results.receive(10000);
assertThat(receive2).isNotNull();
assertThat(receive2.getPayload()).isEqualTo(4);
Message<?> receive3 = this.subscriber3Results.receive(10000);
assertThat(receive3).isNotNull();
assertThat(receive3.getPayload()).isEqualTo(6);
}
@Autowired
@Qualifier("errorRecovererFunction")
private Function<String, String> errorRecovererFlowGateway;
@Test
public void testReplyChannelFromReplyMessage() {
assertThat(this.errorRecovererFlowGateway.apply("foo")).isEqualTo("foo");
}
@Autowired
private MessageChannel dedicatedQueueChannel;
@Autowired
private SubscribableChannel dedicatedResults;
@Test
public void testDedicatedPollingThreadFlow() throws InterruptedException {
AtomicReference<String> threadNameReference = new AtomicReference<>();
CountDownLatch resultLatch = new CountDownLatch(1);
this.dedicatedResults.subscribe(m -> {
threadNameReference.set(Thread.currentThread().getName());
resultLatch.countDown();
});
this.dedicatedQueueChannel.send(new GenericMessage<>("foo"));
assertThat(resultLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(threadNameReference.get()).isEqualTo("dedicatedTaskScheduler-1");
}
@Autowired
private MessageChannel flowWithNullChannelInput;
@Autowired
private NullChannel nullChannel;
@Test
public void testNullChannelInTheEndOfFlow() {
this.flowWithNullChannelInput.send(new GenericMessage<>("foo"));
}
@Autowired
@Qualifier("flowWithLocalNullChannel.input")
private MessageChannel flowWithLocalNullChannelInput;
@Autowired
@Qualifier("flowWithLocalNullChannel.channel#0")
private NullChannel localNullChannel;
@Test
public void testLocalNullChannel() {
this.flowWithLocalNullChannelInput.send(new GenericMessage<>("foo"));
}
@Autowired
private EventDrivenConsumer flow1WithPrototypeHandlerConsumer;
@Autowired
private EventDrivenConsumer flow2WithPrototypeHandlerConsumer;
@Test
public void testPrototypeIsNotOverridden() {
assertThat(this.flow2WithPrototypeHandlerConsumer.getHandler())
.isNotSameAs(this.flow1WithPrototypeHandlerConsumer.getHandler());
}
@Autowired
@Qualifier("globalErrorChannelResolutionFunction")
private Consumer<String> globalErrorChannelResolutionGateway;
@Autowired
SubscribableChannel errorChannel;
@Test
public void testGlobalErrorChannelResolutionFlow() throws InterruptedException {
CountDownLatch errorMessageLatch = new CountDownLatch(1);
MessageHandler errorMessageHandler = m -> errorMessageLatch.countDown();
this.errorChannel.subscribe(errorMessageHandler);
this.globalErrorChannelResolutionGateway.accept("foo");
assertThat(errorMessageLatch.await(10, TimeUnit.SECONDS)).isTrue();
this.errorChannel.unsubscribe(errorMessageHandler);
}
@Autowired
@Qualifier("interceptorChannelIn")
private MessageChannel interceptorChannelIn;
@Autowired
private List<String> outputStringList;
@Test
public void testInterceptorFlow() {
this.interceptorChannelIn.send(MessageBuilder.withPayload("foo").build());
assertThat(outputStringList).containsExactly(
"Pre send transform: foo",
"Pre send handle: FOO",
"Handle: FOO",
"Post send handle: FOO",
"Post send transform: foo"
);
}
@Autowired
@Qualifier("controlBusFlow")
Lifecycle controlBusFlow;
@Test
public void testStandardIntegrationFlowLifecycle() {
this.controlBusFlow.stop();
GatewayProxyFactoryBean controlBusGateway =
this.beanFactory.getBean("&controlBusGateway", GatewayProxyFactoryBean.class);
assertThat(controlBusGateway.isRunning()).isFalse();
Lifecycle controlBus = this.beanFactory.getBean("controlBus", Lifecycle.class);
assertThat(controlBus.isRunning()).isFalse();
this.controlBusFlow.start();
assertThat(controlBusGateway.isRunning()).isTrue();
assertThat(controlBus.isRunning()).isTrue();
}
@After
public void cleanUpList() {
outputStringList.clear();
}
@MessagingGateway
public interface ControlBusGateway {
void send(String command);
}
@Configuration
@EnableIntegration
public static class SupplierContextConfiguration1 {
@Bean
public Function<String, String> toUpperCaseFunction() {
return String::toUpperCase;
}
@Bean
public Supplier<String> stringSupplier() {
return () -> "foo";
}
@Bean
public IntegrationFlow supplierFlow() {
return IntegrationFlows.fromSupplier(stringSupplier())
.transform(toUpperCaseFunction())
.channel("suppliedChannel")
.get();
}
@Bean(name = PollerMetadata.DEFAULT_POLLER)
public PollerMetadata poller() {
return Pollers.fixedRate(100).get();
}
@Bean(name = IntegrationContextUtils.TASK_SCHEDULER_BEAN_NAME)
public TaskScheduler taskScheduler() {
ThreadPoolTaskScheduler threadPoolTaskScheduler = new ThreadPoolTaskScheduler();
threadPoolTaskScheduler.setPoolSize(100);
return threadPoolTaskScheduler;
}
@Bean
public MessageChannel suppliedChannel() {
return MessageChannels.queue(10).get();
}
}
@Configuration
@EnableIntegration
public static class SupplierContextConfiguration2 {
@Bean
public IntegrationFlow supplierFlow2() {
return IntegrationFlows.fromSupplier(() -> "foo",
c -> c.poller(Pollers.fixedDelay(100).maxMessagesPerPoll(1)))
.<String, String>transform(String::toUpperCase)
.channel("suppliedChannel2")
.get();
}
@Bean
public MessageChannel suppliedChannel2() {
return MessageChannels.queue(10).get();
}
}
@Configuration
@EnableIntegration
public static class ContextConfiguration {
@Bean
public IntegrationFlow controlBusFlow() {
return IntegrationFlows.from(ControlBusGateway.class, (gateway) -> gateway.beanName("controlBusGateway"))
.controlBus((endpoint) -> endpoint.id("controlBus"))
.get();
}
@Bean
public MessageChannel inputChannel() {
return MessageChannels.direct().get();
}
@Bean
public MessageChannel foo() {
return MessageChannels.publishSubscribe().get();
}
}
@Configuration
@ComponentScan
public static class ContextConfiguration2 {
@Autowired
@Qualifier("inputChannel")
private MessageChannel inputChannel;
@Autowired
@Qualifier("successChannel")
private PollableChannel successChannel;
@Bean
public Advice expressionAdvice() {
ExpressionEvaluatingRequestHandlerAdvice advice = new ExpressionEvaluatingRequestHandlerAdvice();
advice.setSuccessChannel(this.successChannel);
return advice;
}
@Bean
public IntegrationFlow flow2() {
return IntegrationFlows.from(this.inputChannel)
.filter(p -> p instanceof String, e -> e
.id("filter")
.discardFlow(df -> df
.transform(String.class, "Discarded: "::concat)
.channel(MessageChannels.queue("discardChannel"))))
.channel("foo")
.fixedSubscriberChannel()
.<String, Integer>transform(Integer::parseInt)
.transform(Foo::new)
.transform(new PayloadSerializingTransformer(),
c -> c.autoStartup(false).id("payloadSerializingTransformer"))
.channel(MessageChannels.queue(new SimpleMessageStore(), "fooQueue"))
.transform(Transformers.deserializer(Foo.class.getName()))
.<Foo, Integer>transform(f -> f.value)
.filter("true", e -> e.id("expressionFilter"))
.channel(publishSubscribeChannel())
.transform((Integer p) -> p * 2, c -> c.advice(this.expressionAdvice()))
.get();
}
@Bean
public MessageChannel publishSubscribeChannel() {
return MessageChannels.publishSubscribe().get();
}
@Bean
public IntegrationFlow subscribersFlow() {
return flow -> flow
.publishSubscribeChannel(executor(), s -> s
.subscribe(f -> f
.<Integer>handle((p, h) -> p / 2)
.channel(MessageChannels.queue("subscriber1Results")))
.subscribe(f -> f
.<Integer>handle((p, h) -> p * 2)
.channel(MessageChannels.queue("subscriber2Results"))))
.<Integer>handle((p, h) -> p * 3)
.channel(MessageChannels.queue("subscriber3Results"));
}
@Bean
public Executor executor() {
ThreadPoolTaskExecutor tpte = new ThreadPoolTaskExecutor();
tpte.setCorePoolSize(50);
return tpte;
}
@Bean
public MessageHandler loggingMessageHandler() {
return new LoggingHandler(LoggingHandler.Level.DEBUG);
}
@Bean
public IntegrationFlow wireTapFlow1() {
return IntegrationFlows.from("tappedChannel1")
.wireTap("tapChannel", wt -> wt.selector(m -> m.getPayload().equals("foo")))
.handle(new ReactiveMessageHandlerAdapter((message) -> Mono.just(message).log().then()))
.get();
}
@Bean
public IntegrationFlow wireTapFlow2() {
return f -> f
.wireTap("tapChannel", wt -> wt.selector(m -> m.getPayload().equals("foo")))
.handle(loggingMessageHandler());
}
@Bean
public IntegrationFlow wireTapFlow3() {
return f -> f
.transform("payload")
.wireTap("tapChannel", wt -> wt.selector("payload == 'foo'"))
.handle(loggingMessageHandler());
}
@Bean
public IntegrationFlow wireTapFlow4() {
return IntegrationFlows.from("tappedChannel4")
.wireTap(tapChannel())
.channel("nullChannel")
.get();
}
@Bean
public IntegrationFlow wireTapFlow5() {
return f -> f
.wireTap(sf -> sf
.transform(// Must not be lambda for SpEL fallback behavior on empty payload
new GenericTransformer<String, String>() {
@Override
public String transform(String source) {
return source.toUpperCase();
}
})
.channel(MessageChannels.queue("wireTapSubflowResult")))
.channel("nullChannel");
}
@Bean
public QueueChannel tapChannel() {
return new QueueChannel();
}
}
@MessageEndpoint
public static class AnnotationTestService {
@ServiceActivator(inputChannel = "publishSubscribeChannel")
public void handle(Object payload) {
assertThat(payload).isEqualTo(100);
}
}
@Configuration
public static class ContextConfiguration3 {
@Autowired
@Qualifier("delayedAdvice")
private MethodInterceptor delayedAdvice;
@Bean
public QueueChannel successChannel() {
return MessageChannels.queue().get();
}
@Bean
public IntegrationFlow bridgeFlow() {
return IntegrationFlows.from(MessageChannels.queue("bridgeFlowInput"))
.channel(MessageChannels.queue("bridgeFlowOutput"))
.get();
}
@Bean
public IntegrationFlow bridgeFlow2() {
return IntegrationFlows.from("bridgeFlow2Input")
.bridge(c -> c.autoStartup(false).id("bridge"))
.fixedSubscriberChannel()
.delay("delayer", d -> d
.delayExpression("200")
.advice(this.delayedAdvice)
.messageStore(this.messageStore()))
.channel(MessageChannels.queue("bridgeFlow2Output"))
.get();
}
@Bean
public SimpleMessageStore messageStore() {
return new SimpleMessageStore();
}
@Bean
public IntegrationFlow claimCheckFlow() {
return IntegrationFlows.from("claimCheckInput")
.claimCheckIn(this.messageStore())
.claimCheckOut(this.messageStore())
.get();
}
}
@Component("delayedAdvice")
public static class DelayedAdvice implements MethodInterceptor {
private final AtomicBoolean invoked = new AtomicBoolean();
@Override
public Object invoke(MethodInvocation invocation) throws Throwable {
this.invoked.set(true);
return invocation.proceed();
}
public Boolean getInvoked() {
return invoked.get();
}
}
@Configuration
public static class ContextConfiguration4 {
@Autowired
@Qualifier("integrationFlowTests.GreetingService")
private MessageHandler greetingService;
@Bean
public IntegrationFlow methodInvokingFlow() {
return IntegrationFlows.from("methodInvokingInput")
.handle(this.greetingService)
.get();
}
@Bean
public IntegrationFlow lambdasFlow() {
return IntegrationFlows.from("lambdasInput")
.filter(String.class, "World"::equals)
.transform(String.class, "Hello "::concat)
.get();
}
@Bean
public IntegrationFlow errorRecovererFlow() {
return IntegrationFlows.from(Function.class, (gateway) -> gateway.beanName("errorRecovererFunction"))
.<Object>handle((p, h) -> {
throw new RuntimeException("intentional");
},
e -> e.advice(retryAdvice()))
.get();
}
@Bean
public RequestHandlerRetryAdvice retryAdvice() {
RequestHandlerRetryAdvice requestHandlerRetryAdvice = new RequestHandlerRetryAdvice();
requestHandlerRetryAdvice.setRecoveryCallback(new ErrorMessageSendingRecoverer(recoveryChannel()));
return requestHandlerRetryAdvice;
}
@Bean
public MessageChannel recoveryChannel() {
return new DirectChannel();
}
@Bean
public IntegrationFlow recoveryFlow() {
return IntegrationFlows.from(recoveryChannel())
.<MessagingException, Message<?>>transform(MessagingException::getFailedMessage)
.get();
}
@Bean
public IntegrationFlow dedicatedPollingThreadFlow() {
return IntegrationFlows.from(MessageChannels.queue("dedicatedQueueChannel"))
.bridge(e -> e
.poller(Pollers.fixedDelay(0).receiveTimeout(-1))
.taskScheduler(dedicatedTaskScheduler()))
.channel("dedicatedResults")
.get();
}
@Bean
public TaskScheduler dedicatedTaskScheduler() {
return new ThreadPoolTaskScheduler();
}
@Bean
public IntegrationFlow flowWithNullChannel() {
return IntegrationFlows.from("flowWithNullChannelInput")
.nullChannel();
}
@Bean
public IntegrationFlow flowWithLocalNullChannel() {
return f -> f.channel(new NullChannel());
}
@Bean
@Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public AbstractReplyProducingMessageHandler myHandler() {
return new AbstractReplyProducingMessageHandler() {
@Override
protected Object handleRequestMessage(Message<?> requestMessage) {
return requestMessage;
}
};
}
@Bean
public IntegrationFlow flow1WithPrototypeHandler(
@Qualifier("myHandler") AbstractReplyProducingMessageHandler handler) {
return f -> f.handle(handler, e -> e.id("flow1WithPrototypeHandlerConsumer"));
}
@Bean
public IntegrationFlow flow2WithPrototypeHandler(
@Qualifier("myHandler") AbstractReplyProducingMessageHandler handler) {
return f -> f.handle(handler, e -> e.id("flow2WithPrototypeHandlerConsumer"));
}
@Bean
public IntegrationFlow globalErrorChannelResolutionFlow(@Qualifier("taskScheduler") TaskExecutor taskExecutor) {
return IntegrationFlows.from(Consumer.class,
(gateway) -> gateway.beanName("globalErrorChannelResolutionFunction"))
.channel(c -> c.executor(taskExecutor))
.handle((p, h) -> {
throw new RuntimeException("intentional");
})
.get();
}
}
@Configuration
public static class InterceptorContextConfiguration {
@Bean
public List<String> outputStringList() {
return new ArrayList<>();
}
@Bean
public IntegrationFlow interceptorFlow(List<String> outputStringList) {
return IntegrationFlows.from("interceptorChannelIn")
.intercept(new ChannelInterceptor() {
@Override
public Message<?> preSend(Message<?> message, MessageChannel channel) {
outputStringList.add("Pre send transform: " + message.getPayload());
return message;
}
@Override
public void postSend(Message<?> message, MessageChannel channel, boolean sent) {
outputStringList.add("Post send transform: " + message.getPayload());
}
})
.transform((String s) -> s.toUpperCase())
.intercept(new ChannelInterceptor() {
@Override
public Message<?> preSend(Message<?> message, MessageChannel channel) {
outputStringList.add("Pre send handle: " + message.getPayload());
return message;
}
@Override
public void postSend(Message<?> message, MessageChannel channel, boolean sent) {
outputStringList.add("Post send handle: " + message.getPayload());
}
})
.handle(m -> outputStringList.add("Handle: " + m.getPayload())).get();
}
}
@Service
public static class GreetingService extends AbstractReplyProducingMessageHandler {
@Autowired
private WorldService worldService;
@Override
protected Object handleRequestMessage(Message<?> requestMessage) {
return "Hello " + this.worldService.world() + " and " + requestMessage.getPayload();
}
}
@Service
public static class WorldService {
public String world() {
return "World";
}
}
private static class InvalidLastMessageChannelFlowContext {
@Bean
public IntegrationFlow wrongLastComponent() {
return IntegrationFlows.from(MessageChannels.direct())
.fixedSubscriberChannel()
.get();
}
}
@SuppressWarnings("serial")
public static class Foo implements Serializable {
private final Integer value;
public Foo(Integer value) {
this.value = value;
}
}
}
|
<filename>robozonky-integration-stonky/src/main/java/com/github/robozonky/integrations/stonky/Export.java
/*
* Copyright 2019 The RoboZonky Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.robozonky.integrations.stonky;
import java.io.File;
import java.net.URL;
import java.time.Duration;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.function.Consumer;
import java.util.function.Function;
import javax.ws.rs.core.Response;
import com.github.robozonky.api.remote.enums.OAuthScope;
import com.github.robozonky.common.async.Backoff;
import com.github.robozonky.common.remote.Zonky;
import com.github.robozonky.common.tenant.Tenant;
import io.vavr.control.Try;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
enum Export {
WALLET(Zonky::requestWalletExport, Zonky::downloadWalletExport),
INVESTMENTS(Zonky::requestInvestmentsExport, Zonky::downloadInvestmentsExport);
private static final Logger LOGGER = LogManager.getLogger(Export.class);
private final Consumer<Zonky> trigger;
private final Function<Zonky, URL> download;
Export(final Consumer<Zonky> trigger, final Function<Zonky, Response> delegate) {
this.trigger = trigger;
this.download = api -> download(api, delegate);
}
private URL download(final Zonky zonky, final Function<Zonky, Response> delegate) {
return Try.withResources(() -> delegate.apply(zonky))
.of(response -> {
final int status = response.getStatus();
LOGGER.debug("Download endpoint returned HTTP {}.", status);
if (status != 302) {
throw new IllegalStateException("Download not yet ready: " + this);
}
final String s = response.getHeaderString("Location");
return new URL(s);
}).get();
}
public CompletableFuture<Optional<File>> download(final Tenant tenant, final Duration backoffTime) {
final Backoff<URL> waitWhileExportRunning = Backoff.exponential(() -> tenant.call(download, OAuthScope.SCOPE_FILE_DOWNLOAD),
Duration.ofSeconds(1), backoffTime);
return CompletableFuture.runAsync(() -> tenant.run(trigger, OAuthScope.SCOPE_APP_WEB))
.thenApplyAsync(v -> waitWhileExportRunning.get())
.thenApplyAsync(urlOrError -> urlOrError.fold(r -> Optional.empty(), Util::download));
}
public CompletableFuture<Optional<File>> download(final Tenant tenant) {
return download(tenant, Duration.ofHours(1));
}
}
|
<reponame>fgmohammad/inpainting<gh_stars>0
#Credits https://github.com/MADF-inpainting/Pytorch-MADF
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from normalization import PN
from torchvision import models
upsampling = "bilinear"
class VGG16FeatureExtractor(nn.Module):
def __init__(self):
super().__init__()
vgg16 = models.vgg16(pretrained=True)
self.enc_1 = nn.Sequential(*vgg16.features[:5])
self.enc_2 = nn.Sequential(*vgg16.features[5:10])
self.enc_3 = nn.Sequential(*vgg16.features[10:17])
# fix the encoder
for i in range(3):
for param in getattr(self, 'enc_{:d}'.format(i + 1)).parameters():
param.requires_grad = False
def forward(self, image):
results = [image]
for i in range(3):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
class FilterGen(nn.Module):
def __init__(self, in_ch, conv_in_ch, conv_out_ch, kernel_size, stride, padding):
super().__init__()
self.kernel_size = kernel_size
self.conv_in_ch = conv_in_ch
self.conv_out_ch = conv_out_ch
nhidden = 16
self.conv = nn.Sequential(
nn.Conv2d(in_ch, nhidden, kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU()
)
self.conv1 = nn.Sequential(
nn.Conv2d(nhidden, conv_in_ch * kernel_size * kernel_size * conv_out_ch, kernel_size=1, stride=1, padding=0)
)
def forward(self, input):
mask = self.conv(input)
filters = self.conv1(mask)
N, C, H, W = filters.shape
filters = filters.view((N, C, H*W))
filters = filters.transpose(1,2)
filters = filters.view((N, H*W, self.conv_in_ch * self.kernel_size * self.kernel_size, self.conv_out_ch))
return filters, mask
class ConvWithFilter(nn.Module):
def __init__(self, out_ch, kernel_size, stride=0, padding=0, dilation=1, label_ch=0, bn='batchnorm', activ='relu'):
super().__init__()
self.kernel_size = kernel_size
self.padding = padding
self.dilation = dilation
self.stride = stride
self.bn = bn
self.activ=activ
if bn=="batchnorm":
self.bn = nn.BatchNorm2d(out_ch)
elif bn=="PN":
self.bn = PN(out_ch, label_ch, upsampling)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, features, filters):
N, C, in_H, in_W = features.shape
out_H = (in_H - self.kernel_size + 2 * self.padding) / self.stride + 1
out_W = (in_W - self.kernel_size + 2 * self.padding) / self.stride + 1
out_H = int(out_H)
out_W = int(out_W)
features_unf = torch.nn.functional.unfold(features, (self.kernel_size, self.kernel_size), dilation=self.dilation, padding=self.padding, stride=self.stride)
features_ = features_unf.transpose(1, 2).unsqueeze(2)
result = features_.matmul(filters)
result = result.squeeze(2)
result = result.transpose(1, 2)
N, C, _ = result.shape
result = result.view(N, C, out_H, out_W)
if hasattr(self, 'bn') and self.bn=="batchnorm":
out = self.bn(result)
elif hasattr(self, 'bn') and self.bn=="PN":
out = self.bn(result, edge_map)
if hasattr(self, 'activation'):
out = self.activation(result)
return out
class AttConv(nn.Module):
def __init__(self, query_dim, key_dim, value_dim, activation='relu'):
super(AttConv,self).__init__()
if activation is False:
self.conv = nn.Sequential(
nn.Conv2d(query_dim+key_dim, query_dim, kernel_size=3, stride=1, padding=1)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(query_dim+key_dim, query_dim, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
def forward(self, query, key):
out = key
return self.conv(torch.cat([out, query], dim = 1))
#return out
class DecActiv(nn.Module):
def __init__(self, in_ch, out_ch, label_ch=0, bn=False, activ='relu'):
super().__init__()
self.deconv = nn.ConvTranspose2d(in_ch, out_ch, kernel_size=4, stride=2, padding=1)
self.bn_type=bn
if bn=="PN":
self.bn = PN(out_ch, label_ch, upsampling)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, edge_map=None):
h = self.deconv(input)
if hasattr(self, 'bn') and self.bn_type=="PN":
h = self.bn(h, edge_map)
if hasattr(self, 'activation'):
h = self.activation(h)
return h
class MADFNet(nn.Module):
def __init__(self, args, layer_size=7, input_channels=3, upsampling_mode=upsampling):
super().__init__()
self.upsampling_mode = upsampling_mode
self.layer_size = layer_size
self.n_refinement_D = args.n_refinement_D
###Encoder
self.filter_gen_1 = FilterGen(in_ch = 3, conv_in_ch=input_channels*2, conv_out_ch=16, kernel_size=7, stride=2, padding=3)
self.filter_gen_2 = FilterGen(16, 16, 32, 5, 2, 2)
self.filter_gen_3 = FilterGen(16, 32, 64, 3, 2, 1)
self.filter_gen_4 = FilterGen(16, 64, 128, 3, 2, 1)
for i in range(5, layer_size + 1):
setattr(self, "filter_gen_{:d}".format(i), FilterGen(16, 128, 128, 3, 2, 1))
self.enc_conv_1 = ConvWithFilter(out_ch=16, kernel_size=7, stride=2, padding=3)
self.enc_conv_2 = ConvWithFilter(32, 5, 2, 2)
self.enc_conv_3 = ConvWithFilter(64, 3, 2, 1)
for i in range(4, layer_size + 1):
setattr(self, "enc_conv_{:d}".format(i), ConvWithFilter(128, 3, 2, 1))
self.enc_up_1 = nn.Sequential(
nn.Conv2d(16, 64, kernel_size=1, stride=1, padding=0),
nn.ReLU())
self.enc_up_2 = nn.Sequential(
nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0),
nn.ReLU())
self.enc_up_3 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU())
for i in range(4, layer_size + 1):
enc_up = nn.Sequential(nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0),
nn.ReLU())
setattr(self, "enc_up_{:d}".format(i), enc_up)
#######Encoder end
#######Recovery Decoder
for i in range(self.layer_size, 4, -1):
name = 'deconv_{:d}'.format(i)
dconv = []
dconv.append(nn.ConvTranspose2d(512, 512, kernel_size=4, stride=2, padding=1))
dconv.append(nn.BatchNorm2d(512))
dconv.append(nn.LeakyReLU(negative_slope=0.2))
setattr(self, name, nn.Sequential(*dconv))
self.deconv_4 = nn.Sequential(nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256), nn.LeakyReLU(negative_slope=0.2))
self.deconv_3 = nn.Sequential(nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(128), nn.LeakyReLU(negative_slope=0.2))
self.deconv_2 = nn.Sequential(nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(64), nn.LeakyReLU(negative_slope=0.2))
self.deconv_1 = nn.Sequential(nn.ConvTranspose2d(64, input_channels, kernel_size=4, stride=2, padding=1))
for i in range(self.layer_size, 4, -1):
setattr(self, 'att_conv_{:d}'.format(i), AttConv(query_dim=512, key_dim=512, value_dim=512))
self.att_conv_4 = AttConv(query_dim=256, key_dim=256, value_dim=256)
self.att_conv_3 = AttConv(query_dim=128, key_dim=128, value_dim=128)
self.att_conv_2 = AttConv(query_dim=64, key_dim=64, value_dim=64)
self.att_conv_1 = AttConv(query_dim=3, key_dim=6, value_dim=6, activation=False)
if self.n_refinement_D > 0:
for i in range(self.layer_size, 4, -1):
name = 'dec_ref0_{:d}'.format(i)
setattr(self, name, DecActiv(512, 512, label_ch=512, bn="PN", activ='leaky'))
self.dec_ref0_4 = DecActiv(512, 256, label_ch=256, bn="PN", activ='leaky')
self.dec_ref0_3 = DecActiv(256, 128, label_ch=128, bn="PN", activ='leaky')
self.dec_ref0_2 = DecActiv(128, 64, label_ch=64, bn="PN", activ='leaky')
self.dec_ref0_1 = nn.Conv2d(64 + input_channels*3, input_channels, kernel_size=1, stride=1, padding=0)
if self.n_refinement_D > 1:
for i in range(self.layer_size, 4, -1):
name = 'dec_ref1_{:d}'.format(i)
setattr(self, name, DecActiv(512, 512, label_ch=512, bn="PN", activ='leaky'))
self.dec_ref1_4 = DecActiv(512, 256, label_ch=256, bn="PN", activ='leaky')
self.dec_ref1_3 = DecActiv(256, 128, label_ch=128, bn="PN", activ='leaky')
self.dec_ref1_2 = DecActiv(128, 64, label_ch=64, bn="PN", activ='leaky')
self.dec_ref1_1 = nn.Conv2d(64 + input_channels*4, input_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input, input_mask):
h_dict = {} # for the output of enc_N
h_dict['h_0'] = torch.cat([input, input_mask], dim=1)
mask_pre = input_mask
pre_conv = h_dict['h_0']
h_key_prev = 'h_0'
for i in range(1, self.layer_size + 1):
h_key = 'h_{:d}'.format(i)
filters, mask_res = getattr(self, 'filter_gen_{:d}'.format(i))(mask_pre)
mask_pre = mask_res
conv_res = getattr(self, "enc_conv_{:d}".format(i))(pre_conv, filters)
h_dict[h_key] = getattr(self, "enc_up_{:d}".format(i))(conv_res)
pre_conv = conv_res
h_key_prev = h_key
h_key = 'h_{:d}'.format(self.layer_size)
h = h_dict[h_key]
h_att = h
h_second = h
outputs = []
for i in range(self.layer_size, 0, -1):
enc_h_key = 'h_{:d}'.format(i - 1)
dconv = getattr(self, 'deconv_{:d}'.format(i))(h_att)
h_att = getattr(self, 'att_conv_{:d}'.format(i))(dconv, h_dict[enc_h_key])
if i != 1:
if self.n_refinement_D > 0:
h_second = getattr(self, 'dec_ref0_{:d}'.format(i))(h_second, h_att)
if self.n_refinement_D > 1:
h = getattr(self, 'dec_ref1_{:d}'.format(i))(h, edge_map=h_second)
else:
outputs.append(h_att)
if self.n_refinement_D > 0:
h_second = F.interpolate(h_second, scale_factor=2, mode=upsampling)
h_second = torch.cat([h_second, h_att, h_dict[enc_h_key]], dim=1)
h_second = getattr(self, 'dec_ref0_{:d}'.format(i))(h_second)
outputs.append(h_second)
if self.n_refinement_D > 1:
h = F.interpolate(h, scale_factor=2, mode=upsampling)
h = torch.cat([h, h_att, h_second, h_dict[enc_h_key]], dim=1)
h = getattr(self, 'dec_ref1_{:d}'.format(i))(h)
outputs.append(h)
return outputs
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
if __name__ == '__main__':
size = (1, 3, 5, 5)
input = torch.ones(size)
input_mask = torch.ones(size)
input_mask[:, :, 2:, :][:, :, :, 2:] = 0
conv = PartialConv(3, 3, 3, 1, 1)
l1 = nn.L1Loss()
input.requires_grad = True
output, output_mask = conv(input, input_mask)
loss = l1(output, torch.randn(1, 3, 5, 5))
loss.backward()
assert (torch.sum(input.grad != input.grad).item() == 0)
assert (torch.sum(torch.isnan(conv.input_conv.weight.grad)).item() == 0)
assert (torch.sum(torch.isnan(conv.input_conv.bias.grad)).item() == 0)
|
LAST week a federal appeals court panel ruled that the NSA's indiscriminate hoovering of phone-call metadata, first revealed by the leaks of Edward Snowden, is not authorised by the Patriot Act. The pertinent section of the anti-terror bill, Section 215, is set to expire on June 1st, so the 2nd Circuit's ruling comes at a opportune time for congressional opponents of the NSA's bulk data-collection programme. "How can you reauthorise something that’s illegal?” asked Harry Reid, the Senate minority leader. "You can’t. You shouldn’t". On May 13th the House overwhelmingly approved legislation to end the government’s bulk collection of phone records. The Senate will soon debate the matter.
This development also clarifies the stances of several GOP presidential hopefuls, and the stakes of the primaries. Marco Rubio, a Florida senator, and Chris Christie, governor of New Jersey, hawkishly favour just the sort of reauthorisation of the Patriot Act's current provisions that Mr Reid opposes. Jeb Bush, formerly the governor of Florida, is on the record as an enthusiast of the NSA's phone metadata sweeps, and presumably wants them to continue. Meanwhile Rand Paul, a Kentucky senator, seeks to burnish his civil-libertarian credentials with a promise to filibuster any attempt at clean reauthorisation. Ted Cruz, the junior senator from Texas, has staked out a middle ground by co-sponsoring the "USA Freedom Act", which would end the bulk collection of phone records while extending other parts of the bill cherished by the security establishment.
Get our daily newsletter Upgrade your inbox and get our Daily Dispatch and Editor's Picks.
More important than campaign-trail jockeying, however, is the substance of the decision. It would seem to vindicate Edward Snowden, a former NSA contractor whose controversial leaks brought the formerly secret programme to light. According to the court, not only does the "staggering" quantity of information harvested by the NSA exceed the Patriot Act's statutory ambit, but the government's argument in defence of bulk-collection "defies any limiting principle". Moreover, the court says, the programme could not have been "legislatively ratified", given the fact that only a few members of congress, and none of the public, were even aware of its existence prior to Mr Snowden's leaks, much less the details concerning its "staggering" scope.
The ruling has important implication for the way we understand the role of whistle-blowers such as Mr Snowden. "Telling the public about the phone dragnet didn’t expose a legitimate state secret", writes Conor Friedersdorf of the Atlantic. "It exposed a violation of the constitutional order....Tens of millions of innocent U.S. citizens were ... subject to invasions of privacy that no law authorized". Without Mr Snowden's leak, which led the ACLU to challenge the NSA's programme in court, the "the NSA’s unlawful behavior would’ve continued, unknown to the public and unreviewed by Article III courts."
This of course does not vindicate all of Mr Snowden's leaks, which go well beyond the revelation of the NSA's phone-records sweep. But it helps to illustrate the indispensable role whistle-blowers play in defending the rights of citizens when the state insists on acting behind a veil of official secrecy, outside the normal scope of democratic and judicial oversight. When the executive insists on shielding the state's intelligence programmes not only from public scrutiny, but also from meaningful review by the duly-elected representatives of the people, there is no safeguard in place to protect the rights of the citizenry beyond the conscience of the spies and their executive-branch masters.
The American system was designed with the assumption that power is dangerous and will be abused, when possible. A system of checks and balances attempts to limit these potential abuses, as each branch of government polices the power of the other branches. Shielding portions of the executive from the view of the legislative and judicial branches effectively breaks this system. The only way for an illegal programme like the NSA's phone metadata surveillance to come into view is, rather perversely, for a public-spirited citizens in the employ of the spy agency to break the law and leak classified information to the public. It is sometimes argued that surely there must be a way for public-spirited folks with security clearances to report abuses of the system from within the system, keeping classified material under wraps. But it is the nature of abusive systems to deny that there are any abuses, and to move swiftly and decisively to silence anyone who claims otherwise. People of conscience, like Mr Snowden, therefore play an essential role in policing executive power and protecting the rights of citizens.
It is understandable for Barack Obama's administration to seek to punish Mr Snowden for exposing this illegal programme. This paper has argued that it would have been better for America had Mr Snowden sought to defend himself in court. Yet the government's disproportionately harsh treatment of other whistle-blowers, such as Chelsea Manning, who earned 35 years in prison for leaking classified documents to WikiLeaks, a website, has understandably sent Mr Snowden hiding. The government's crusade against whistle-blowers sends the message that the state's strategic interests in acting under the cover of unaccountable secrecy not only outweigh the rights of its citizens, which legitimate governments are instituted to protect, but also the principles of democratic government, from which liberal states are thought to derive the authority to act at all. The administration seems keen to communicate that anyone privy to state secrets must refrain from revealing them, even (especially?) when the content of the secrets expose the state as acting criminally and in conflict with the conditions of its own legitimacy.
Perhaps Mr Snowden went too far, and leaked too much, too indiscriminately. He deserves to be tried and punished for his excesses. But he also deserves some admiration and gratitude for risking his own freedom to protect the freedom of his fellow citizens. Any punishments meted out must weigh this service in the balance.
When settling on a champion in the coming race, Republican primary voters, many of whom profess to care a great deal about liberty and the perils of overweening government, are lucky to face a real choice. If they want a more transparent government that depends less on secrecy, Mr Paul would seem to be their man. If they want to maintain the sort of shadow government that needs the likes of Mr Snowden as a check against secret executive-branch abuses, Mr Cruz seems to be staking out this ground. And if they want a government, like Mr Obama's, dedicated to squashing Snowdens in defence of the principle that it ought to be able to violate its citizens' rights with impunity, Messrs Rubio, Christie and Bush would seem to fit that bill. At this point, the positions of the rest of the field are rather less clear, but they all should pipe up. The issue works as an unusually useful litmus test of a candidate's commitment to limited government and the rule of law, and voters deserve to know. |
/**
* This example uploads creative assets and creates a flash in-page creative associated with a given
* advertiser. To get a size ID, run GetSize.java.
*/
public class CreateFlashInpageCreative {
private static final String USER_PROFILE_ID = "INSERT_USER_PROFILE_ID_HERE";
// Creative values.
private static final String ADVERTISER_ID = "INSERT_ADVERTISER_ID_HERE";
private static final String SIZE_ID = "INSERT_SIZE_ID_HERE";
// Flash asset values.
private static final String FLASH_ASSET_NAME = "INSERT_FLASH_ASSET_NAME_HERE";
private static final String PATH_TO_FLASH_ASSET_FILE = "INSERT_PATH_TO_FLASH_ASSET_FILE_HERE";
// Backup image asset values.
private static final String IMAGE_ASSET_NAME = "INSERT_IMAGE_ASSET_NAME_HERE";
private static final String PATH_TO_IMAGE_ASSET_FILE = "INSERT_PATH_TO_IMAGE_ASSET_FILE_HERE";
public static void runExample(Dfareporting reporting, long profileId, long advertiserId,
long sizeId) throws Exception {
Creative creative = new Creative();
creative.setAdvertiserId(advertiserId);
creative.setName("Test flash in-page creative");
creative.setSize(new Size().setId(sizeId));
creative.setType("FLASH_INPAGE");
// Upload the flash asset.
CreativeAssetId flashAssetId = CreativeAssetUtils.uploadAsset(reporting, profileId,
advertiserId, FLASH_ASSET_NAME, PATH_TO_FLASH_ASSET_FILE, "FLASH");
CreativeAsset flashAsset = new CreativeAsset().setAssetIdentifier(flashAssetId)
.setRole("PRIMARY").setWindowMode("TRANSPARENT");
// Upload the backup image asset (note: asset type must be set to HTML_IMAGE).
CreativeAssetId imageAssetId = CreativeAssetUtils.uploadAsset(reporting, profileId,
advertiserId, IMAGE_ASSET_NAME, PATH_TO_IMAGE_ASSET_FILE, "HTML_IMAGE");
CreativeAsset backupImageAsset =
new CreativeAsset().setAssetIdentifier(imageAssetId).setRole("BACKUP_IMAGE");
// Add the creative assets.
creative.setCreativeAssets(ImmutableList.of(flashAsset, backupImageAsset));
// Set the backup image target window option.
creative.setBackupImageTargetWindow(new TargetWindow().setTargetWindowOption("NEW_WINDOW"));
Creative result = reporting.creatives().insert(profileId, creative).execute();
// Display the new creative ID.
System.out.printf("Flash in-page creative with ID %d was created.%n", result.getId());
}
public static void main(String[] args) throws Exception {
Dfareporting reporting = DfaReportingFactory.getInstance();
long advertiserId = Long.parseLong(ADVERTISER_ID);
long sizeId = Long.parseLong(SIZE_ID);
long profileId = Long.parseLong(USER_PROFILE_ID);
runExample(reporting, profileId, advertiserId, sizeId);
}
} |
<filename>ConfusionMatrix.py<gh_stars>0
# -*- coding: utf-8 -*-
"""ami.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Z6niq4l-zAz0wF7ClpvlzRK_o7ziWwst
"""
!pip install wandb -qqq
import wandb
wandb.init(project="Back_Propagation", entity="cs20m040")
!wandb login fb3bb8a505ba908b667b747ed68e4b154b2f6fc5
sweep_config = {
'method': 'random',
'metric': {
'name': 'accuracy',
'goal': 'maximize'
},
'parameters': {
'epochs': {'values': [10]},
'batch_size': {'values': [128]},
'learning_rate': {'values': [0.001]},
'hidden_layers': {'values': [5]},
'sizes': {'values': [64]},
'weight_decay': {'values': [0.0005]},
'opt_algo': {'values': ['rmsprop']},
'init_method': {'values': ['random']},
'activation_function': {'values': ['sigmoid']}
}
}
sweep_id = wandb.sweep(sweep_config, entity="cs20m040", project="Back_Propagation")
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
import pandas as pd
import cmath
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error, log_loss
from tqdm import tqdm_notebook
from tqdm.notebook import tqdm
from sklearn.preprocessing import OneHotEncoder
from keras.datasets import fashion_mnist
from sklearn.metrics import confusion_matrix
def dataload():
(X_train, Y_train), (X_val, Y_val) = fashion_mnist.load_data()
# reshape dataset to have a single channel
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1] * X_train.shape[2])
X_val = X_val.reshape(X_val.shape[0], 1, X_val.shape[1] * X_val.shape[2])
# Convert from integers to floats
X_train = X_train.astype('float32')
X_val = X_val.astype('float32')
# scale the values between 0 and 1 for both training and testing set
X_train = X_train / 255.0
X_val = X_val / 255.0
enc = OneHotEncoder()
# 0 -> (1, 0, 0, 0), 1 -> (0, 1, 0, 0), 2 -> (0, 0, 1, 0), 3 -> (0, 0, 0, 1)
y_OH_train = enc.fit_transform(np.expand_dims(Y_train,1)).toarray()
y_OH_val = enc.fit_transform(np.expand_dims(Y_val,1)).toarray()
return X_train, Y_train, X_val, Y_val, y_OH_train, y_OH_val
labels = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
class BackPropagation:
#constructor of backpropagation class
def __init__(self, input_size, output_size=1, init_method = 'random', activation_function = 'sigmoid', leaky_slope = 0.1, hidden_layers=[2], sizes=[32]):
self.x=input_size
self.y=output_size
self.h=len(hidden_layers)
self.sizes=[self.x] + hidden_layers + [self.y]
self.init_method = init_method
self.activation_function = activation_function
self.leaky_slope = leaky_slope
self.W={}
self.B={}
np.random.seed(0)
#Random Initialization
if init_method == "random":
for i in range(self.h+1):
self.W[i+1] = np.random.randn(self.sizes[i], self.sizes[i+1])
self.B[i+1] = np.random.randn(self.sizes[i+1])
#Xavier Initialization
elif init_method == "xavier":
for i in range(self.h+1):
self.W[i+1] = np.random.randn(self.sizes[i], self.sizes[i+1]) * np.sqrt(1 / self.sizes[i-1])
self.B[i+1] = np.random.randn(self.sizes[i+1])
for i in range(self.h+1):
self.W[i+1]=np.random.randn(self.sizes[i], self.sizes[i+1])
self.B[i+1]=np.zeros((1, self.sizes[i+1]))
#forward activation function
def forward_activation(self, X):
if self.activation_function == "sigmoid":
return 1.0/(1.0 + np.exp(-X))
elif self.activation_function == "tanh":
return np.tanh(X)
elif self.activation_function == "relu":
return np.maximum(0, X)
#compute gradient of the activation function
def grad_activation(self, X):
if self.activation_function == "sigmoid":
return X * (1 - X)
elif self.activation_function == "tanh":
return (1-np.square(np.tanh(X)))
elif self.activation_function == "relu":
return (1.0*(X>0))
#compute perceptron
def perceptron(self, x, w, b):
return np.dot(x, w)+ b
#compute sigmoid
def sigmoid(self, x):
return (1.0/(1.0 + np.exp(-x)))
#compute gradient of the sigmoid
def grad_sigmoid(self, x):
return x * (1-x)
#compute softmax
def softmax(self, y):
exps=np.exp(y)
return np.exp(y)/np.sum(exps)
#run forward pass in the neural network
def forward_pass(self, x):
self.A={}
self.H={}
self.H[0]= x
for i in range(self.h+1):
self.A[i+1]=np.matmul(self.H[i], self.W[i+1]) + self.B[i+1]
self.H[i+1]=self.forward_activation(self.A[i+1])
self.H[self.h+1]=self.softmax(self.A[self.h+1])
return self.H[self.h+1]
#compute gradients
def grad(self, x, y):
L=self.h+1
self.forward_pass(x)
self.dW={}
self.dB={}
self.dA={}
self.dH={}
self.dA[L]=(self.H[L]-y)
for k in range(L, 0, -1):
self.dW[k]=np.matmul(self.H[k-1].T, self.dA[k])
self.dB[k]=self.dA[k]
self.dH[k-1]=np.matmul(self.dA[k], self.W[k].T)
self.dA[k-1]=np.multiply(self.dH[k-1], self.grad_activation(self.H[k-1]))
#predict output on the basis of data
def predict(self, X):
preds=[]
for x in X:
preds.append(self.forward_pass(x))
return np.array(preds).squeeze()
#compute cross entropy
def cross_entropy(self,label,pred):
yl=np.multiply(pred,label)
yl=yl[yl!=0]
yl=-np.log(yl)
yl=np.mean(yl)
return yl
#trains the backpropagation model
def fit(self, X, Y, epochs=1, lr=0.001, initialize=True, display_loss=False, opt_algo='adam', l2_norm=False, weight_decay=0.8, batch_size=128, gamma=0.9, beta=0.9, beta1=0.9, beta2=0.999, epsi=1e-8):
X, X1, Y, Y1 = train_test_split(X, Y, test_size=0.1, random_state=1)
accuracy, val_accuracy, loss, val_loss= {}, {}, {}, {}
cross_entrophy_loss, cross_val_loss = {}, {}
if initialize:
for i in range(self.h+1):
self.W[i+1]=np.random.randn(self.sizes[i], self.sizes[i+1])
self.B[i+1]=np.zeros((1, self.sizes[i+1]))
if display_loss:
loss={}
vW, vB= {}, {}
for i in range(self.h+1):
vW[i+1]=np.zeros((self.sizes[i], self.sizes[i+1]))
vB[i+1]=np.zeros((1, self.sizes[i+1]))
for e in tqdm(range(epochs), total=epochs, unit="epoch"):
m=X.shape[0]
dW, dB= {}, {}
for i in range(self.h+1):
dW[i+1]=np.zeros((self.sizes[i], self.sizes[i+1]))
dB[i+1]=np.zeros((1, self.sizes[i+1]))
# Gradient Descent
if opt_algo=='gd':
for x, y in zip(X, Y):
self.grad(x, y)
for i in range(self.h+1):
#L2 Regularization
if l2_norm:
dW[i+1]+=self.dW[i+1] + weight_decay * self.W[i+1]
else:
dW[i+1]+=self.dW[i+1]
dB[i+1]+=self.dB[i+1]
for i in range(self.h+1):
self.W[i+1]-= lr*dW[i+1]/m
self.B[i+1]-= lr*dB[i+1]/m
# Stochastic Gradient Descent
elif opt_algo=='sgd':
sample_count= 0
for x, y in zip(X, Y):
self.grad(x, y)
sample_count+=1
for i in range(self.h+1):
#L2 Regularization
if l2_norm:
dW[i+1]+= self.dW[i+1] + weight_decay * self.W[i+1]
else:
dW[i+1]+=self.dW[i+1]
dB[i+1]+= self.dB[i+1]
if sample_count % batch_size == 0:
for i in range(self.h+1):
self.W[i+1]-= lr*dW[i+1]/batch_size
self.B[i+1]-= lr*dB[i+1]/batch_size
# Momentum Based Gradient Descent
elif opt_algo=='mgd':
sample_count=0
for x, y in zip(X, Y):
self.grad(x, y)
for i in range(self.h+1):
#L2 Regularization
if l2_norm:
dW[i+1]+= self.dW[i+1] + weight_decay * self.W[i+1]
else:
dW[i+1]+=self.dW[i+1]
dB[i+1]+= self.dB[i+1]
sample_count+=1
if sample_count % batch_size == 0:
for i in range(self.h+1):
vW[i+1]= gamma * vW[i+1] + lr*dW[i+1]
vB[i+1]= gamma * vB[i+1] + lr*dB[i+1]
self.W[i+1]-= vW[i+1]
self.B[i+1]-= vB[i+1]
# Nestrov Accelerated Gradient Descent
elif opt_algo=='nag':
sample_count=0
for x, y in zip(X, Y):
self.grad(x, y)
sample_count+=1
for i in range(self.h+1):
#L2 Regularization
if l2_norm:
dW[i+1]+= self.dW[i+1] + weight_decay * self.W[i+1]
else:
dW[i+1]+=self.dW[i+1]
dB[i+1]+= self.dB[i+1]
tW, tB= {}, {}
for i in range(self.h+1):
tW[i+1]= self.W[i+1] - gamma * vW[i+1]
tB[i+1]= self.B[i+1] - gamma * vB[i+1]
self.W[i+1]= tW[i+1]
self.B[i+1]= tB[i+1]
self.grad(x, y)
for i in range(self.h+1):
self.W[i+1]= (tW[i+1] - lr * self.dW[i+1])
self.B[i+1]= (tB[i+1] - lr * self.dB[i+1])
for i in range(self.h+1):
vW[i+1]= (gamma * vW[i+1] + lr * self.dW[i+1])
vB[i+1]= (gamma * vB[i+1] + lr * self.dB[i+1])
self.W[i+1]= tW[i+1] - vW[i+1]
self.B[i+1]= tB[i+1] - vB[i+1]
# RMSProp Gradient Descent
elif opt_algo=='rmsprop':
sample_count=0
for x, y in zip(X, Y):
self.grad(x, y)
sample_count+=1
for i in range(self.h+1):
dW[i+1]+=self.dW[i+1]
dB[i+1]+=self.dB[i+1]
if sample_count % batch_size == 0:
for i in range(self.h+1):
vW[i+1]= beta * vW[i+1] + (1-beta) * np.power(dW[i+1], 2)
vB[i+1]= beta * vB[i+1] + (1-beta) * np.power(dB[i+1], 2)
for i in range(self.h+1):
self.W[i+1]-= (lr/np.sqrt(vW[i+1] + epsi)) * dW[i+1]
self.B[i+1]-= (lr/np.sqrt(vB[i+1] + epsi)) * dB[i+1]
# Adam Gradient Descent
elif opt_algo=='adam':
sample_count=0
for x, y in zip(X, Y):
self.grad(x, y)
sample_count+=1
for i in range(self.h+1):
dW[i+1]+=self.dW[i+1]
dB[i+1]+=self.dB[i+1]
if sample_count % batch_size == 0:
mW, mB= {}, {}
for i in range(self.h+1):
mW[i+1]= np.zeros(dW[i+1].shape)
mB[i+1]= np.zeros(dB[i+1].shape)
mW[i+1]= beta1 * mW[i+1] + (1-beta1) * dW[i+1]
mB[i+1]= beta1 * mB[i+1] + (1-beta1) * dB[i+1]
vW[i+1]= beta2 * vW[i+1] + (1-beta2) * np.power(dW[i+1], 2.0)
vB[i+1]= beta2 * vB[i+1] + (1-beta2) * np.power(dB[i+1], 2.0)
mW[i+1]= (mW[i+1]/(1.0 - np.power(beta1 , sample_count)))
mB[i+1]= (mB[i+1]/(1.0 - np.power(beta1 , sample_count)))
vW[i+1]= (vW[i+1]/(1.0 - np.power(beta2 , sample_count)))
vB[i+1]= (vB[i+1]/(1.0 - np.power(beta2 , sample_count)))
self.W[i+1] -= (lr / (np.sqrt(vW[i+1] + epsi))) * mW[i+1]
self.B[i+1] -= (lr / (np.sqrt(vB[i+1] + epsi))) * mB[i+1]
# Nadam Gradient Descent
elif opt_algo=='nadam':
sample_count=0
for x, y in zip(X, Y):
self.grad(x, y)
sample_count+=1
for i in range(self.h+1):
#L2 Regularization
if l2_norm:
dW[i+1]+= self.dW[i+1] + weight_decay * self.W[i+1]
else:
dW[i+1]+=self.dW[i+1]
dB[i+1]+= self.dB[i+1]
if sample_count % batch_size == 0:
mW, mB= {}, {}
for i in range(self.h+1):
mW[i+1]= np.zeros(dW[i+1].shape)
mB[i+1]= np.zeros(dB[i+1].shape)
mW[i+1]= beta1 * mW[i+1] + (1-beta1) * dW[i+1]
mB[i+1]= beta1 * mB[i+1] + (1-beta1) * dB[i+1]
#for i in range(self.h+1):
vW[i+1]= beta2 * vW[i+1] + (1-beta2) * np.power(dW[i+1], 2)
vB[i+1]= beta2 * vB[i+1] + (1-beta2) * np.power(dB[i+1], 2)
#for i in range(self.h+1):
mW[i+1]= mW[i+1] / (1.0 - np.power(beta1 , sample_count))
mB[i+1]= mB[i+1] / (1.0 - np.power(beta1 , sample_count))
vW[i+1]= vW[i+1] / (1.0 - np.power(beta2 , sample_count))
vB[i+1]= vB[i+1] / (1.0 - np.power(beta2 , sample_count))
xW, xB= {}, {}
xW[i+1]= beta1 * mW[i+1] + (1-beta1) * dW[i+1] / (1.0 - np.power(beta1, sample_count))
xB[i+1]= beta1 * mB[i+1] + (1-beta1) * dB[i+1] / (1.0 - np.power(beta1, sample_count))
self.W[i+1]-= (lr / (np.sqrt(vW[i+1] + epsi))) * xW[i+1]
self.B[i+1]-= (lr / (np.sqrt(vB[i+1] + epsi))) * xB[i+1]
# Calculating Loss and Accuracy
y_preds=self.predict(X)
y_val_preds=self.predict(X1)
loss[e]=self.cross_entropy(y_preds, Y)
val_loss[e]=self.cross_entropy(y_val_preds, Y1)
accuracy[e]= accuracy_score(np.argmax(y_preds, axis=1), np.argmax(Y, axis=1))
val_accuracy[e]= accuracy_score(np.argmax(y_val_preds, axis=1), np.argmax(Y1, axis=1))
wandb.log({ 'Epoch': e, 'loss': loss[e], 'Val_loss': val_loss[e], 'Accuracy': accuracy[e], 'Val_accuracy': val_accuracy[e]})
# Plotting Loss
if display_loss:
plt.plot(np.array(list(loss.values())).astype(float))
plt.xlabel("Epoch")
plt.ylabel("Cross Entropy Loss")
plt.show()
def train():
config_defaults = {
'epochs' : 15,
'batch_size' : 128,
'weight_decay' : 0.0005,
'learning_rate' : 0.01,
'activation_function' : 'relu',
'dropout' : 0.5,
'momentum' : 0.9,
'seed' : 42,
'hidden_layers' : 4,
'opt_algo' : 'rmsprop',
'sizes' : 64,
'init_method' : 'random',
'weight_decay' : 0.5
}
wandb.init(config=config_defaults)
config = wandb.config
hl= [config.sizes for h in range(config.hidden_layers)]
X_train, Y_train, X_val, Y_val, y_OH_train, y_OH_val = dataload()
back_pro = BackPropagation(X_train.shape[2], y_OH_train.shape[1], init_method=config.init_method, activation_function = config.activation_function, hidden_layers=hl )
back_pro.fit(X_train, y_OH_train, initialize=False, display_loss=True, opt_algo = config.opt_algo , l2_norm=False, epochs = config.epochs, lr=config.learning_rate, weight_decay=config.weight_decay, batch_size=config.batch_size)
preds=back_pro.predict(X_val)
preds1=np.argmax(preds, 1)
test=np.argmax(y_OH_val, 1)
accuracy = accuracy_score(test, preds1)
print(accuracy)
wandb.log({ "accuracy" : accuracy})
wandb.log({"Confusion_matrix": wandb.plot.confusion_matrix(probs=None, preds=preds1, y_true=test, class_names=labels)})
wandb.agent(sweep_id,function=train) |
package gs
import (
"bytes"
"encoding/json"
"github.com/pkg/errors"
"github.com/viant/afs/option"
"github.com/viant/afs/storage"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"io/ioutil"
"os"
)
//JWTProvider represetns JWT based auth provider
type JWTProvider interface {
JWTConfig(scopes ...string) (config *jwt.Config, projectID string, err error)
}
//JwtConfig represents google service account secrets
type JwtConfig struct {
//google cloud credential
ClientEmail string `json:"client_email,omitempty"`
TokenURL string `json:"token_uri,omitempty"`
PrivateKey string `json:"private_key,omitempty"`
PrivateKeyID string `json:"private_key_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
TokenURI string `json:"token_uri,omitempty"`
Type string `json:"type,omitempty"`
ClientX509CertURL string `json:"client_x509_cert_url,omitempty"`
AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url,omitempty"`
jwtClientConfig *jwt.Config
}
//JWTConfig returns new JWT config for supplied scopes
func (c *JwtConfig) JWTConfig(scopes ...string) (config *jwt.Config, projectID string, err error) {
if c.jwtClientConfig != nil {
return c.jwtClientConfig, c.ProjectID, nil
}
var result = &jwt.Config{
Email: c.ClientEmail,
Subject: c.ClientEmail,
PrivateKey: []byte(c.PrivateKey),
PrivateKeyID: c.PrivateKeyID,
Scopes: scopes,
TokenURL: c.TokenURL,
}
if result.TokenURL == "" {
result.TokenURL = google.JWTTokenURL
}
c.jwtClientConfig = result
return result, c.ProjectID, nil
}
//NewJwtConfig returns new secrets from location
func NewJwtConfig(options ...storage.Option) (*JwtConfig, error) {
location := &option.Location{}
var JSONPayload = make([]byte, 0)
option.Assign(options, &location, &JSONPayload)
option.Assign(options, &location)
if location.Path == "" && len(JSONPayload) == 0 {
return nil, errors.New("auth location was empty")
}
if location.Path != "" {
file, err := os.Open(location.Path)
if err != nil {
return nil, errors.Wrap(err, "failed to open auth config")
}
defer func() { _ = file.Close() }()
if JSONPayload, err = ioutil.ReadAll(file); err != nil {
return nil, err
}
}
config := &JwtConfig{}
err := json.NewDecoder(bytes.NewReader(JSONPayload)).Decode(config)
return config, err
}
|
<filename>2.5.2/CN-Biz-Common-2000/src/main/java/org/zmsoft/service/player/ISPLayerRegistService.java
package org.zmsoft.service.player;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.zmsoft.framework.beans.LoginerBean;
import org.zmsoft.framework.beans.common.RESTResultBean;
/**
* 会员注册
*
*/
public interface ISPLayerRegistService {
RESTResultBean<String> doRegist(HttpServletRequest request, HttpServletResponse response, LoginerBean loginer) throws Exception;
}
|
def _build_response(result, count, offset):
videos = []
if result:
for video_details in result:
entry = {
shared_constants.INDEX_KEY: video_details.meta.index,
shared_constants.VIDEO_ID_KEY: video_details.meta.id,
}
video_info = video_details.to_dict()
for field, value in video_info.items():
entry[field] = value
videos.append(entry)
pagination = Paginate(offset=offset, count_per_page=len(videos), total_count=count)
return common_utils.multiple_args_to_single_dict(
details=videos,
pagination=pagination.dict(),
) |
import { MetaResponse } from './meta-response';
import { ResponseCode } from './response-code';
import { HttpStatus } from '@nestjs/common';
export class MetaResponseGenerator {
/**
*
*
* @static
* @template resultType
* @param {resultType} value
* @param {ResponseCode} responseCode
* @memberof MetaResponseGenerator
*/
static generateFull<resultType>(
value: resultType,
responseCode: ResponseCode) {
const instance = new MetaResponse();
instance.value = value;
instance.addResponseCode(responseCode)
}
/**
*
*
* @static
* @template resultType
* @param {resultType} value
* @param {ResponseCode} responseCode
* @returns {MetaResponse<resultType>}
* @memberof MetaResponseGenerator
*/
static generateByResponseCode<resultType>(
value: resultType,
responseCode: ResponseCode,
): MetaResponse<resultType> {
const instance = new MetaResponse<resultType>();
instance.value = value;
instance.addResponseCode(responseCode);
return instance;
}
/**
*
*
* @static
* @template resultType
* @param {ResponseCode} responseCode
* @returns {MetaResponse<resultType>}
* @memberof MetaResponseGenerator
*/
static generateAnErrorResponse<resultType>(
responseCode: ResponseCode,
): MetaResponse<resultType> {
return this.generateByResponseCode(null, responseCode);
}
/**
*
*
* @static
* @template resultType
* @param {HttpStatus} httpStatus
* @param {*} error
* @returns
* @memberof MetaResponseGenerator
*/
static generateErrorByStatus<resultType>(httpStatus: HttpStatus, error: any) {
return this.generateByResponseCode(null, new ResponseCode(httpStatus, undefined, undefined, undefined, undefined, error));
}
}
|
def save_16_bit_flow(flow, destination):
cv2.imwrite(destination, flow.astype(np.uint16)) |
// RemoveWithPrefix removes the keys with given prefix.
func (kv *EtcdKV) RemoveWithPrefix(prefix string) error {
start := time.Now()
key := path.Join(kv.rootPath, prefix)
ctx, cancel := context.WithTimeout(context.TODO(), RequestTimeout)
defer cancel()
_, err := kv.client.Delete(ctx, key, clientv3.WithPrefix())
CheckElapseAndWarn(start, "Slow etcd operation remove with prefix")
return err
} |
// loadSubdomain loads the id for the private domain corresponding to the private domain id.
func loadSubdomain(r *http.Request, db *database.DB, u *core.CurrentUser) (*core.Domain, error) {
subdomain := strings.ToLower(mux.Vars(r)["subdomain"])
if subdomain == "" {
return core.NewDomain(), nil
}
domain, err := core.LoadDomainByAlias(db, subdomain)
if err != nil {
return nil, fmt.Errorf("Couldn't convert subdomain to id: %v", err)
} else if domain == nil {
return nil, fmt.Errorf("Couldn't find private domain %s", subdomain)
}
return domain, nil
} |
package cn.programingmonkey.Table;
import org.springframework.stereotype.Component;
import javax.persistence.*;
import java.util.Date;
/**
* Created by cai on 2017/4/21.
*/
@Entity
@Table(name = "tg_like")
@Component
public class like {
private int id;
private String userId;
private String postId;
private Date addDate;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getUserId() {
return userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
public String getPostId() {
return postId;
}
public void setPostId(String postId) {
this.postId = postId;
}
public Date getAddDate() {
return addDate;
}
public void setAddDate(Date addDate) {
this.addDate = addDate;
}
}
|
/*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc32 by David S. Miller [email protected]
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <asm/prom.h>
#include <asm/oplib.h>
static struct device_node *allnodes;
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
static DEFINE_RWLOCK(devtree_lock);
int of_device_is_compatible(const struct device_node *device,
const char *compat)
{
const char* cp;
int cplen, l;
cp = of_get_property(device, "compatible", &cplen);
if (cp == NULL)
return 0;
while (cplen > 0) {
if (strncmp(cp, compat, strlen(compat)) == 0)
return 1;
l = strlen(cp) + 1;
cp += l;
cplen -= l;
}
return 0;
}
EXPORT_SYMBOL(of_device_is_compatible);
struct device_node *of_get_parent(const struct device_node *node)
{
struct device_node *np;
if (!node)
return NULL;
np = node->parent;
return np;
}
EXPORT_SYMBOL(of_get_parent);
struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
next = prev ? prev->sibling : node->child;
for (; next != 0; next = next->sibling) {
break;
}
return next;
}
EXPORT_SYMBOL(of_get_next_child);
struct device_node *of_find_node_by_path(const char *path)
{
struct device_node *np = allnodes;
for (; np != 0; np = np->allnext) {
if (np->full_name != 0 && strcmp(np->full_name, path) == 0)
break;
}
return np;
}
EXPORT_SYMBOL(of_find_node_by_path);
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
for (np = allnodes; np != 0; np = np->allnext)
if (np->node == handle)
break;
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
struct device_node *of_find_node_by_name(struct device_node *from,
const char *name)
{
struct device_node *np;
np = from ? from->allnext : allnodes;
for (; np != NULL; np = np->allnext)
if (np->name != NULL && strcmp(np->name, name) == 0)
break;
return np;
}
EXPORT_SYMBOL(of_find_node_by_name);
struct device_node *of_find_node_by_type(struct device_node *from,
const char *type)
{
struct device_node *np;
np = from ? from->allnext : allnodes;
for (; np != 0; np = np->allnext)
if (np->type != 0 && strcmp(np->type, type) == 0)
break;
return np;
}
EXPORT_SYMBOL(of_find_node_by_type);
struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compatible)
{
struct device_node *np;
np = from ? from->allnext : allnodes;
for (; np != 0; np = np->allnext) {
if (type != NULL
&& !(np->type != 0 && strcmp(np->type, type) == 0))
continue;
if (of_device_is_compatible(np, compatible))
break;
}
return np;
}
EXPORT_SYMBOL(of_find_compatible_node);
struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp)
{
struct property *pp;
for (pp = np->properties; pp != 0; pp = pp->next) {
if (strcasecmp(pp->name, name) == 0) {
if (lenp != 0)
*lenp = pp->length;
break;
}
}
return pp;
}
EXPORT_SYMBOL(of_find_property);
/*
* Find a property with a given name for a given node
* and return the value.
*/
const void *of_get_property(const struct device_node *np, const char *name,
int *lenp)
{
struct property *pp = of_find_property(np,name,lenp);
return pp ? pp->value : NULL;
}
EXPORT_SYMBOL(of_get_property);
int of_getintprop_default(struct device_node *np, const char *name, int def)
{
struct property *prop;
int len;
prop = of_find_property(np, name, &len);
if (!prop || len != 4)
return def;
return *(int *) prop->value;
}
EXPORT_SYMBOL(of_getintprop_default);
int of_n_addr_cells(struct device_node *np)
{
const int* ip;
do {
if (np->parent)
np = np->parent;
ip = of_get_property(np, "#address-cells", NULL);
if (ip != NULL)
return *ip;
} while (np->parent);
/* No #address-cells property for the root node, default to 2 */
return 2;
}
EXPORT_SYMBOL(of_n_addr_cells);
int of_n_size_cells(struct device_node *np)
{
const int* ip;
do {
if (np->parent)
np = np->parent;
ip = of_get_property(np, "#size-cells", NULL);
if (ip != NULL)
return *ip;
} while (np->parent);
/* No #size-cells property for the root node, default to 1 */
return 1;
}
EXPORT_SYMBOL(of_n_size_cells);
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
void *new_val;
int err;
new_val = kmalloc(len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
memcpy(new_val, val, len);
err = -ENODEV;
write_lock(&devtree_lock);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
if (!strcasecmp(prop->name, name)) {
void *old_val = prop->value;
int ret;
ret = prom_setprop(dp->node, (char *) name, val, len);
err = -EINVAL;
if (ret >= 0) {
prop->value = new_val;
prop->length = len;
if (OF_IS_DYNAMIC(prop))
kfree(old_val);
OF_MARK_DYNAMIC(prop);
err = 0;
}
break;
}
prevp = &(*prevp)->next;
}
write_unlock(&devtree_lock);
/* XXX Upate procfs if necessary... */
return err;
}
EXPORT_SYMBOL(of_set_property);
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
{
void *ret;
ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
if (ret != NULL)
memset(ret, 0, size);
prom_early_allocated += size;
return ret;
}
static int is_root_node(const struct device_node *dp)
{
if (!dp)
return 0;
return (dp->parent == NULL);
}
/* The following routines deal with the black magic of fully naming a
* node.
*
* Certain well known named nodes are just the simple name string.
*
* Actual devices have an address specifier appended to the base name
* string, like this "foo@addr". The "addr" can be in any number of
* formats, and the platform plus the type of the node determine the
* format and how it is constructed.
*
* For children of the ROOT node, the naming convention is fixed and
* determined by whether this is a sun4u or sun4v system.
*
* For children of other nodes, it is bus type specific. So
* we walk up the tree until we discover a "device_type" property
* we recognize and we go from there.
*/
static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *rprop;
rprop = of_find_property(dp, "reg", NULL);
if (!rprop)
return;
regs = rprop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io, regs->phys_addr);
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io,
regs->phys_addr);
}
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
dp->name,
devfn >> 3);
}
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io, regs->phys_addr);
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
{
struct device_node *parent = dp->parent;
if (parent != NULL) {
if (!strcmp(parent->type, "pci") ||
!strcmp(parent->type, "pciex"))
return pci_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "sbus"))
return sbus_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "ebus"))
return ebus_path_component(dp, tmp_buf);
/* "isa" is handled with platform naming */
}
/* Use platform naming convention. */
return sparc32_path_component(dp, tmp_buf);
}
static char * __init build_path_component(struct device_node *dp)
{
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
strcpy(tmp_buf, dp->name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
return n;
}
static char * __init build_full_name(struct device_node *dp)
{
int len, ourlen, plen;
char *n;
plen = strlen(dp->parent->full_name);
ourlen = strlen(dp->path_component_name);
len = ourlen + plen + 2;
n = prom_early_alloc(len);
strcpy(n, dp->parent->full_name);
if (!is_root_node(dp->parent)) {
strcpy(n + plen, "/");
plen++;
}
strcpy(n + plen, dp->path_component_name);
return n;
}
static unsigned int unique_id;
static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
{
static struct property *tmp = NULL;
struct property *p;
int len;
const char *name;
if (tmp) {
p = tmp;
memset(p, 0, sizeof(*p) + 32);
tmp = NULL;
} else {
p = prom_early_alloc(sizeof(struct property) + 32);
p->unique_id = unique_id++;
}
p->name = (char *) (p + 1);
if (special_name) {
strcpy(p->name, special_name);
p->length = special_len;
p->value = prom_early_alloc(special_len);
memcpy(p->value, special_val, special_len);
} else {
if (prev == NULL) {
name = prom_firstprop(node, NULL);
} else {
name = prom_nextprop(node, prev, NULL);
}
if (strlen(name) == 0) {
tmp = p;
return NULL;
}
strcpy(p->name, name);
p->length = prom_getproplen(node, p->name);
if (p->length <= 0) {
p->length = 0;
} else {
p->value = prom_early_alloc(p->length + 1);
len = prom_getproperty(node, p->name, p->value,
p->length);
if (len <= 0)
p->length = 0;
((unsigned char *)p->value)[p->length] = '\0';
}
}
return p;
}
static struct property * __init build_prop_list(phandle node)
{
struct property *head, *tail;
head = tail = build_one_prop(node, NULL,
".node", &node, sizeof(node));
tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
tail = tail->next;
while(tail) {
tail->next = build_one_prop(node, tail->name,
NULL, NULL, 0);
tail = tail->next;
}
return head;
}
static char * __init get_one_property(phandle node, char *name)
{
char *buf = "<NULL>";
int len;
len = prom_getproplen(node, name);
if (len > 0) {
buf = prom_early_alloc(len);
len = prom_getproperty(node, name, buf, len);
}
return buf;
}
static struct device_node * __init create_node(phandle node)
{
struct device_node *dp;
if (!node)
return NULL;
dp = prom_early_alloc(sizeof(*dp));
dp->unique_id = unique_id++;
kref_init(&dp->kref);
dp->name = get_one_property(node, "name");
dp->type = get_one_property(node, "device_type");
dp->node = node;
/* Build interrupts later... */
dp->properties = build_prop_list(node);
return dp;
}
static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
{
struct device_node *dp;
dp = create_node(node);
if (dp) {
*(*nextp) = dp;
*nextp = &dp->allnext;
dp->parent = parent;
dp->path_component_name = build_path_component(dp);
dp->full_name = build_full_name(dp);
dp->child = build_tree(dp, prom_getchild(node), nextp);
dp->sibling = build_tree(parent, prom_getsibling(node), nextp);
}
return dp;
}
void __init prom_build_devicetree(void)
{
struct device_node **nextp;
allnodes = create_node(prom_root_node);
allnodes->path_component_name = "";
allnodes->full_name = "/";
nextp = &allnodes->allnext;
allnodes->child = build_tree(allnodes,
prom_getchild(allnodes->node),
&nextp);
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
}
|
def main(uri: str, refs_table: str, refs_path: str, alts_table: str, alts_path: str, test: bool):
engine = create_engine(uri)
_load_names(
engine=engine,
table=alts_table,
path=alts_path,
test=test,
target_col='alt',
target_col_size=64,
add_unique_constraints=False,
)
with closing(engine.raw_connection()) as connection:
with connection.cursor() as cursor:
cursor.execute(f'CREATE INDEX ON {alts_table} (prefix, alt);')
_load_names(
engine=engine,
table=refs_table,
path=refs_path,
test=test,
target_col='name',
target_col_size=4096,
) |
def clean_name(name, allowed_chars):
ok = identifier_chars + allowed_chars
newname = "".join(c if c in ok else "-" for c in name)
newname = newname.lstrip("-")
if not newname:
raise RuntimeError(f"No valid chars in name '{name}'.")
return newname |
//#####################################################################
// Copyright 2011.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
// Class DIFFUSION_UNIFORM
//#####################################################################
#ifndef __DIFFUSION_UNIFORM__
#define __DIFFUSION_UNIFORM__
#include <PhysBAM_Tools/Grids_Uniform_Boundaries/BOUNDARY_UNIFORM.h>
#include <PhysBAM_Tools/Parallel_Computation/MPI_UNIFORM_GRID.h>
#include <PhysBAM_Geometry/Grids_Uniform_Level_Sets/LEVELSET_POLICY_UNIFORM.h>
#include <PhysBAM_Dynamics/Level_Sets/PARTICLE_LEVELSET_UNIFORM.h>
#include <PhysBAM_Dynamics/Advection_Equations/ADVECTION_CONSERVATIVE_UNIFORM_FORWARD.h>
namespace PhysBAM{
template<class T_GRID,class T2>
class DIFFUSION_UNIFORM
{
typedef typename T_GRID::VECTOR_T TV;typedef typename TV::SCALAR T;typedef typename T_GRID::VECTOR_INT TV_INT;typedef typename GRID_ARRAYS_POLICY<T_GRID>::ARRAYS_SCALAR T_ARRAYS_SCALAR;
typedef typename GRID_ARRAYS_POLICY<T_GRID>::FACE_ARRAYS T_FACE_ARRAYS_SCALAR;typedef typename T_ARRAYS_SCALAR::template REBIND<T2>::TYPE T_ARRAYS_T2;
typedef typename T_FACE_ARRAYS_SCALAR::template REBIND<bool>::TYPE T_FACE_ARRAYS_BOOL;
typedef typename T_ARRAYS_SCALAR::template REBIND<TV>::TYPE T_ARRAYS_VECTOR;typedef typename T_GRID::NODE_ITERATOR NODE_ITERATOR;typedef typename T_GRID::CELL_ITERATOR CELL_ITERATOR;
typedef typename T_GRID::FACE_ITERATOR FACE_ITERATOR;
typedef typename BOUNDARY_POLICY<T_GRID>::BOUNDARY_SCALAR T_BOUNDARY;typedef typename REBIND<T_BOUNDARY,T2>::TYPE T_BOUNDARY_T2;
typedef typename LEVELSET_POLICY<T_GRID>::LEVELSET T_LEVELSET;typedef typename MPI_GRID_POLICY<T_GRID>::MPI_GRID T_MPI_GRID;
public:
bool diffuse_weights,diffuse_errors;
T_MPI_GRID* mpi_grid;
int num_diffusion_iterations;
int evenodd,evenodd_cell;
int max_value,min_value,hard_max,hard_min;
VECTOR<VECTOR<bool,2>,TV::dimension> solid_walls;
VECTOR<VECTOR<bool,2>,TV::dimension> mpi_boundary;
//#####################################################################
DIFFUSION_UNIFORM(T_MPI_GRID* mpi_grid_input);
~DIFFUSION_UNIFORM();
bool Is_MPI_Boundary(const RANGE<TV_INT>& inside_domain,const FACE_INDEX<TV::dimension>& face);
bool Is_MPI_Boundary(const RANGE<TV_INT>& inside_domain,const TV_INT& index);
void Cell_Diffusion_Value_Helper(FACE_ITERATOR& iterator,T_ARRAYS_T2& Z,ARRAY<bool,TV_INT>* inside);
void Cell_Diffusion_Sum_Helper(FACE_ITERATOR& iterator,ARRAY<T,TV_INT>& sum_jc_cell,T_ARRAYS_T2& Z,ARRAY<bool,TV_INT>* inside);
void Cell_Diffusion_Error_Helper(FACE_ITERATOR& iterator,ARRAY<T,TV_INT>& sum_jc_cell,T_ARRAYS_T2& Z,ARRAY<bool,TV_INT>* inside);
void Cell_Diffusion_Helper(FACE_ITERATOR& iterator,ARRAY<T,TV_INT>* sum_jc_cell,T_ARRAYS_T2& Z,ARRAY<bool,TV_INT>* inside);
void Face_Diffusion_Sum_Helper(const GRID<TV>& grid,FACE_INDEX<TV::dimension>& first_face_index,FACE_INDEX<TV::dimension>& second_face_index,ARRAY<T,FACE_INDEX<TV::dimension> >& sum_jc,T_FACE_ARRAYS_SCALAR& Z,ARRAY<bool,FACE_INDEX<TV::dimension> >* inside);
void Face_Diffusion_Helper(FACE_ITERATOR& iterator,int axis,ARRAY<T,FACE_INDEX<TV::dimension> >* sum_jc,T_FACE_ARRAYS_SCALAR& Z,ARRAY<bool,FACE_INDEX<TV::dimension> >* inside);
void Face_Diffusion_Helper(CELL_ITERATOR& iterator,int axis,ARRAY<T,FACE_INDEX<TV::dimension> >* sum_jc,T_FACE_ARRAYS_SCALAR& Z,ARRAY<bool,FACE_INDEX<TV::dimension> >* inside);
void Face_Diffusion(const T_GRID& grid,ARRAY<T,FACE_INDEX<TV::dimension> >* sum_jc,T_FACE_ARRAYS_SCALAR& Z,T_BOUNDARY& boundary,BOUNDARY_UNIFORM<T_GRID,T>* boundary_sum,ARRAY<bool,FACE_INDEX<TV::dimension> >* inside=0);
void Cell_Diffusion(const T_GRID& grid,T_ARRAYS_T2& Z,T_BOUNDARY_T2& boundary,ARRAY<T,TV_INT>* sum_jc_cell=0,BOUNDARY_UNIFORM<T_GRID,T>* boundary_sum=0,ARRAY<bool,TV_INT>* inside=0);
//#####################################################################
};
}
#endif
|
// fake database record for dbstatus
public class FakeDBStatus extends FakeRecord {
public FakeDBStatus(final String aiid, final String devid, final TrainingStatus trainingStatus) {
addString("aiid", aiid);
addString("dev_id", devid);
addString("training_status",
(trainingStatus == null) ? null : trainingStatus.value());
addString("server_endpoint", "endpoint");
addString("training_progress", "0.0");
addString("training_error", "0.0");
//addString("queue_time", "");
}
} |
<reponame>sauvikatinnofied/ExploringFuse
// This file was generated based on '/usr/local/share/uno/Packages/Fuse.Reactive/0.24.6/$.uno'.
// WARNING: Changes might be lost if you edit this file directly.
#pragma once
#include <Fuse.Reactive.IObserver.h>
#include <Uno.IDisposable.h>
#include <Uno.Object.h>
namespace g{namespace Fuse{namespace Reactive{struct AsyncObject;}}}
namespace g{namespace Fuse{namespace Reactive{struct Binding;}}}
namespace g{namespace Fuse{namespace Reactive{struct InternalPathSubscription;}}}
namespace g{
namespace Fuse{
namespace Reactive{
// internal sealed class InternalPathSubscription :2235
// {
struct InternalPathSubscription_type : uType
{
::g::Fuse::Reactive::IObserver interface0;
::g::Uno::IDisposable interface1;
};
InternalPathSubscription_type* InternalPathSubscription_typeof();
void InternalPathSubscription__ctor__fn(InternalPathSubscription* __this, ::g::Fuse::Reactive::Binding* b, uString* path, uDelegate* failCallback);
void InternalPathSubscription__ctor_1_fn(InternalPathSubscription* __this, ::g::Fuse::Reactive::Binding* b, uDelegate* failCallback);
void InternalPathSubscription__CombineKeyAndPath_fn(uString* key, uString* path, uString** __retval);
void InternalPathSubscription__Dispose_fn(InternalPathSubscription* __this);
void InternalPathSubscription__HandleNewDataContext_fn(InternalPathSubscription* __this, uObject* val);
void InternalPathSubscription__HandleObject_fn(InternalPathSubscription* __this, ::g::Fuse::Reactive::AsyncObject* obj);
void InternalPathSubscription__HandleObjectCallback_fn(InternalPathSubscription* __this, uObject* val);
void InternalPathSubscription__HandlePath_fn(InternalPathSubscription* __this, uObject* dc, uString* path);
void InternalPathSubscription__Init_fn(InternalPathSubscription* __this, uObject* dc, uString* path);
void InternalPathSubscription__New1_fn(::g::Fuse::Reactive::Binding* b, uString* path, uDelegate* failCallback, InternalPathSubscription** __retval);
void InternalPathSubscription__New2_fn(::g::Fuse::Reactive::Binding* b, uDelegate* failCallback, InternalPathSubscription** __retval);
void InternalPathSubscription__OnAdd_fn(InternalPathSubscription* __this, uObject* addedValue);
void InternalPathSubscription__OnFailed_fn(InternalPathSubscription* __this, uString* message);
void InternalPathSubscription__OnInsertAt_fn(InternalPathSubscription* __this, int* index, uObject* value);
void InternalPathSubscription__OnNewAll_fn(InternalPathSubscription* __this, int* length);
void InternalPathSubscription__OnNewAt_fn(InternalPathSubscription* __this, int* index, uObject* newValue);
void InternalPathSubscription__OnRemove_fn(InternalPathSubscription* __this, uObject* value, int* index);
void InternalPathSubscription__OnSet_fn(InternalPathSubscription* __this, uObject* value);
void InternalPathSubscription__TakeKeyFromPath_fn(uString** path, uString** __retval);
struct InternalPathSubscription : uObject
{
uStrong< ::g::Fuse::Reactive::Binding*> _b;
uStrong<uDelegate*> _failCallback;
bool _isDisposed;
bool _isInnerLink;
uStrong<uString*> _key;
uStrong<InternalPathSubscription*> _next;
uStrong<uObject*> _subscription;
uStrong<uString*> _unhandledPath;
void ctor_(::g::Fuse::Reactive::Binding* b, uString* path, uDelegate* failCallback);
void ctor_1(::g::Fuse::Reactive::Binding* b, uDelegate* failCallback);
void Dispose();
void HandleNewDataContext(uObject* val);
void HandleObject(::g::Fuse::Reactive::AsyncObject* obj);
void HandleObjectCallback(uObject* val);
void HandlePath(uObject* dc, uString* path);
void Init(uObject* dc, uString* path);
void OnAdd(uObject* addedValue);
void OnFailed(uString* message);
void OnInsertAt(int index, uObject* value);
void OnNewAll(int length);
void OnNewAt(int index, uObject* newValue);
void OnRemove(uObject* value, int index);
void OnSet(uObject* value);
static uString* CombineKeyAndPath(uString* key, uString* path);
static InternalPathSubscription* New1(::g::Fuse::Reactive::Binding* b, uString* path, uDelegate* failCallback);
static InternalPathSubscription* New2(::g::Fuse::Reactive::Binding* b, uDelegate* failCallback);
static uString* TakeKeyFromPath(uString** path);
};
// }
}}} // ::g::Fuse::Reactive
|
/*
* Delete any allocated memory and reset address_ to nullptr
*/
void resetAddress() {
if (isAllocated()) {
delete address_;
}
address_ = nullptr;
} |
def welless_check_decorator(func):
def inner(self, population):
for indiv in population.population:
if indiv.dead:
indiv.set_worst_score()
else:
but_really_is_dead = False
for block in indiv.blocks:
if block.dead:
indiv.dead = True
indiv.set_worst_score()
func(self, population)
return inner |
def add_timeseries(self, name, transcription, subset='all'):
self._timeseries[name] = {'transcription': transcription,
'subset': subset,
'outputs': {}} |
The ongoing release of emails from Huma Abedin demonstrate the cozy relationship the US State Department had with the Clinton Foundation/Clinton Global Initiative during the maladministration of Hillary Clinton. Another shoe dropped yesterday when Judicial Watch release an email that indicated officials of the Clinton Foundation/Clinton Global Initiative requested diplomatic passports.
Diplomatic passports, by law, are only issued to members of the US government, or private citizens serving on federal commissions like the American Battlefield Monuments Commission, and can only be used when traveling for official purposes AND representing the US government.
The diplomatic passport is issued to a Foreign Service officer or to a person having diplomatic status because he or she is traveling abroad to carry out diplomatic duties on behalf of the United States Government. As with an official passport, the diplomatic passport cannot be used for leisure travel. For this reason, diplomats are encouraged to obtain a regular passport before departing the United States for official duties.
There is another class of passport, the “official”, that is issued to government employees who are traveling on official business but are not representing the diplomatic interests of the US government and to family members of those traveling on official business. Diplomatic passports bring a lot of benefits — your baggage isn’t inspected a customs, you may go through a different security system before boarding an airplane — and perks. For instance, many airlines give free upgrades to diplomats, you can get consular plates for your car if you are staying abroad for a longer period, you and your residence have diplomatic immunity.
Long story short, there is no conceivable set of circumstances under which Band and his merry men could qualify for a diplomatic or even official US passports. But, as we know, in Clinton-world, rules are for the little people. Real players don’t have to worry about chickensh** like rules.
State is saying that this request is tied to Clinton’s trip to North Korea to try to obtain the release of two American journalists and they were never issued. One thing we have learned is that the State Department will tell any lie it feels necessary to protect the Clintons. As Bill Clinton’s trip wasn’t until August it is hard to credit this as passing the laugh test.
While we don’t know if they received them, we do know that Abedin said she’d make it happen and the subject was not brought up again. Draw your own conclusions from that. |
<gh_stars>1-10
package bg.softuni.pethotel.service.impl;
import bg.softuni.pethotel.model.entity.RoleEntity;
import bg.softuni.pethotel.model.entity.UserEntity;
import bg.softuni.pethotel.model.enums.RoleNameEnum;
import bg.softuni.pethotel.repository.UserRepository;
import bg.softuni.pethotel.service.CloudinaryService;
import bg.softuni.pethotel.service.RoleService;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import org.modelmapper.ModelMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.security.crypto.password.PasswordEncoder;
import java.util.List;
@SpringBootTest
class UserServiceImplTest {
@Autowired
private UserRepository userRepository;
PasswordEncoder passwordEncoder;
RoleService roleService;
ApplicationUserService applicationUserService;
CloudinaryService cloudinaryService;
UserServiceImpl service;
@BeforeEach
public void setUp() {
passwordEncoder = Mockito.mock(PasswordEncoder.class);
roleService = Mockito.mock(RoleServiceImpl.class);
applicationUserService = Mockito.mock(ApplicationUserService.class);
cloudinaryService = Mockito.mock(CloudinaryServiceImpl.class);
ModelMapper modelMapper = new ModelMapper();
service = new UserServiceImpl(
userRepository,
modelMapper,
passwordEncoder,
roleService,
applicationUserService,
cloudinaryService);
UserEntity user = new UserEntity()
.setEmail("<EMAIL>")
.setFirstName("Ivan")
.setLastName("Ivanov")
.setPassword("<PASSWORD>")
.addRole(new RoleEntity().setName(RoleNameEnum.USER))
.addRole(new RoleEntity().setName(RoleNameEnum.MODERATOR));
userRepository.save(user);
}
@AfterEach
public void tearDown() {
userRepository.deleteAll();
}
@Test
void emailExists_whenUserExists() {
Assertions.assertTrue(service.emailExists("<EMAIL>"));
}
@Test
void emailExists_whenUserDoNotExists() {
Assertions.assertFalse(service.emailExists("<EMAIL>"));
}
} |
/********************************************************************/
/* Set up T_mat */
/* 1) First set it up as a DMSR matrix. This is a bit dumb because */
/* a rectangular matrix doesn't have a diagonal. Anyway, DMSR */
/* This stores the diagonal of row j in Ke_val[j] and stores a */
/* pointer to row j's off-diagonal nonzeros in Ke_bindx[j] with */
/* column/nonzero entries in Ke_bindx[Ke_bindx[j]:Ke_bindx[j+1]-1] */
/* and Ke_val[Ke_bindx[j]:Ke_bindx[j+1]-1]. */
/* 2) Convert the matrix to CSR format. */
/* 3) call a modified Aztec routine to convert global columns to */
/* local indices and to make an ML matrix out of it. */
/* 4) Since the above routine does not compute a communication data*/
/* structure, we clone Knmat's communication structure (i.e. we */
/* assume that Tmat and Knmat have the same communication. */
/*------------------------------------------------------------------*/
ML_Operator *user_T_build(struct user_partition *Edge_Partition,
struct user_partition *Node_Partition,
ML_Operator *Kn_mat, ML_Comm *comm)
{
int nx, i, ii, jj, horv, Ncols, Nexterns;
int *Tmat_bindx;
double *Tmat_val;
ML_Operator *Tmat;
struct ML_CSR_MSRdata *csr_data;
struct aztec_context *aztec_context;
int global_id;
int Nlocal_nodes, Nlocal_edges;
int nz_ptr;
Nlocal_nodes = Node_Partition->Nlocal;
Nlocal_edges = Edge_Partition->Nlocal;
nx = (int) sqrt( ((double) Node_Partition->Nglobal) + .00001);
Tmat_bindx = (int *) malloc((3*Nlocal_edges+1)*sizeof(int));
Tmat_val = (double *) malloc((3*Nlocal_edges+1)*sizeof(double));
Tmat_bindx[0] = Nlocal_edges + 1;
for (i = 0; i < Nlocal_edges; i++) {
global_id = (Edge_Partition->my_global_ids)[i];
Tmat_val[i] = 0.0;
invindex(global_id, &ii, &jj, nx, &horv);
nz_ptr = Tmat_bindx[i];
ii--;
if (horv == HORIZONTAL) {
if(ii != -1) {
Tmat_bindx[nz_ptr] = southwest(ii,jj,nx); Tmat_val[nz_ptr++] = -1.;
}
Tmat_bindx[nz_ptr] = southeast(ii,jj,nx); Tmat_val[nz_ptr++] = 1.;
}
else {
if (ii == -1) ii = nx-1;
Tmat_bindx[nz_ptr] = northwest(ii,jj,nx); Tmat_val[nz_ptr++] = -1.;
if (jj != 0) {
Tmat_bindx[nz_ptr] = southwest(ii,jj,nx); Tmat_val[nz_ptr++] = 1.;}
}
Tmat_bindx[i+1] = nz_ptr;
}
csr_data = (struct ML_CSR_MSRdata *) ML_allocate(sizeof(struct ML_CSR_MSRdata));
csr_data->columns = Tmat_bindx;
csr_data->values = Tmat_val;
ML_MSR2CSR(csr_data, Nlocal_edges, &Ncols);
aztec_context = (struct aztec_context *) Kn_mat->data;
Nexterns = (aztec_context->Amat->data_org)[AZ_N_external];
AZ_Tmat_transform2ml(Nexterns, Node_Partition->needed_external_ids,
reordered_node_externs,
Tmat_bindx, Tmat_val, csr_data->rowptr, Nlocal_nodes,
Node_Partition->my_global_ids,
comm, Nlocal_edges, &Tmat);
ML_free(csr_data);
Tmat->data_destroy = ML_CSR_MSRdata_Destroy;
ML_CommInfoOP_Clone(&(Tmat->getrow->pre_comm), Kn_mat->getrow->pre_comm);
return(Tmat);
} |
<filename>Intro to Python for Data Science/Functions and Packages/string-methods.py
'''
String Methods
100xp
Strings come with a bunch of methods. Follow the instructions closely to discover some of them. If
you want to discover them in more detail, you can always type help(str) in the IPython Shell.
A string room has already been created for you to experiment with.
Instructions
-Use the upper() method on room and store the result in room_up. Use the dot notation.
-Print out room and room_up. Did both change?
-Print out the number of o's on the variable room by calling count() on room and passing the
letter "o" as an input to the method. We're talking about the variable room, not the word "room"!
'''
# string to experiment with: room
room = "poolhouse"
# Use upper() on room: room_up
room_up = room.upper()
# Print out room and room_up
print(room)
print(room_up)
# Print out the number of o's in room
print(room.count('o')) |
<filename>src/containers/Home/TweetsPage.tsx
import React, { useMemo } from 'react';
import styled from 'styled-components';
import Box from '@material-ui/core/Box';
import Button from 'react-bootstrap/Button'
import { MetaInfo } from '../../components';
import { RoutesConfig } from '../../config/routes.config';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { FeatureInfoConfig, FeatureInfo } from '../../config/features.config';
import axios from 'axios'
import { CommunityStats, Individual, IndividualStats, MyContributions, CustomLegend, Categories} from './components';
import * as am4core from "@amcharts/amcharts4/core";
import * as am4charts from "@amcharts/amcharts4/charts";
import am4themes_animated from "@amcharts/amcharts4/themes/animated";
import { Tweet} from 'react-twitter-widgets'
import Card from 'react-bootstrap/Card';
import SideBar from '../../components/SideBar';
const Grid = styled.div`
&&& {
display: grid;
grid-column-gap: 0px;
grid-template-columns: auto 1px;
}`
const Space = styled.p`
&&& {
padding: 0em;
margin-bottom: 1.4em;
}`
const Sticky = styled.div`
&&& {
position: -webkit-sticky; /* Safari */
position: sticky;
top: 50%;
left: 75%;
max-height: 300px;
max-width: 250px;
}`
const Test = styled.div`
&&& {
position: -webkit-sticky; /* Safari */
position: sticky;
top: 2%;
left: 95%;
max-height: 300px;
max-width: 250px;
background-color: lightblue;
}`
const Title = styled.p`
&&& {
font-size: 30px;
font-weight:bold;
}`
const Subtitle = styled.p`
&&& {
padding: .05em;
font-size: 24px;
}`
const Home: React.FC = () => {
const featureList = useMemo<FeatureInfo[]>(() => {
return Object.keys(FeatureInfoConfig).map((key) => FeatureInfoConfig[key]);
}, []);
function CSVToArray( strData, strDelimiter ){
// Check to see if the delimiter is defined. If not,
// then default to comma.
strDelimiter = (strDelimiter || ",");
// Create a regular expression to parse the CSV values.
var objPattern = new RegExp(
(
// Delimiters.
"(\\" + strDelimiter + "|\\r?\\n|\\r|^)" +
// Quoted fields.
"(?:\"([^\"]*(?:\"\"[^\"]*)*)\"|" +
// Standard fields.
"([^\"\\" + strDelimiter + "\\r\\n]*))"
),
"gi"
);
// Create an array to hold our data. Give the array
// a default empty first row.
var arrData = [[]];
// Create an array to hold our individual pattern
// matching groups.
var arrMatches = null;
// Keep looping over the regular expression matches
// until we can no longer find a match.
while (arrMatches = objPattern.exec( strData )){
// Get the delimiter that was found.
var strMatchedDelimiter = arrMatches[ 1 ];
// Check to see if the given delimiter has a length
// (is not the start of string) and if it matches
// field delimiter. If id does not, then we know
// that this delimiter is a row delimiter.
if (
strMatchedDelimiter.length &&
strMatchedDelimiter !== strDelimiter
){
// Since we have reached a new row of data,
// add an empty row to our data array.
arrData.push( [] );
}
var strMatchedValue;
// Now that we have our delimiter out of the way,
// let's check to see which kind of value we
// captured (quoted or unquoted).
if (arrMatches[ 2 ]){
// We found a quoted value. When we capture
// this value, unescape any double quotes.
strMatchedValue = arrMatches[ 2 ].replace(
new RegExp( "\"\"", "g" ),
"\""
);
} else {
// We found a non-quoted value.
strMatchedValue = arrMatches[ 3 ];
}
// Now that we have our value string, let's add
// it to the data array.
arrData[ arrData.length - 1 ].push( strMatchedValue );
}
// Return the parsed data.
return( arrData );
}
return (
<div className='view-wrapper'>
<MetaInfo {...RoutesConfig.Home.metaInfo} />
<section className='container dashboard-content'>
<h1>Test</h1>
</section>
</div>
);
};
export default Home;
|
def meta_fcnet(fname_objective: str, fname_cost: str, noise: bool = True) -> Tuple[UserFunctionWrapper, ParameterSpace]:
parameter_space = ParameterSpace(
[
ContinuousParameter("lr", 0, 1),
ContinuousParameter("batch_size", 0, 1),
ContinuousParameter("n_units_1", 0, 1),
ContinuousParameter("n_units_2", 0, 1),
ContinuousParameter("dropout_1", 0, 1),
ContinuousParameter("dropout_2", 0, 1),
]
)
data = pickle.load(open(fname_objective, "rb"))
x_mean_objective = data["x_mean"]
x_std_objective = data["x_std"]
task_feature_objective = data["task_feature"]
objective = get_default_architecture(x_mean_objective.shape[0], classification=True).float()
objective.load_state_dict(data["state_dict"])
data = pickle.load(open(fname_cost, "rb"))
x_mean_cost = data["x_mean"]
x_std_cost = data["x_std"]
y_mean_cost = data["y_mean"]
y_std_cost = data["y_std"]
task_feature_cost = data["task_feature"]
cost = get_default_architecture(x_mean_cost.shape[0]).float()
cost.load_state_dict(data["state_dict"])
f = partial(
objective_function,
model_objective=objective,
model_cost=cost,
task_feature_objective=task_feature_objective,
task_feature_cost=task_feature_cost,
x_mean_objective=x_mean_objective,
x_std_objective=x_std_objective,
x_mean_cost=x_mean_cost,
x_std_cost=x_std_cost,
y_mean_objective=None,
y_std_objective=None,
y_mean_cost=y_mean_cost,
y_std_cost=y_std_cost,
log_objective=False,
with_noise=noise,
)
return f, parameter_space |
def deliver_image_gif():
return b64decode("""
R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==
""") |
/*
* V4L2 buffer helper functions.
*
* Copyright (C) 2017 Alexis Ballier <[email protected]>
* Copyright (C) 2017 Jorge Ramirez <[email protected]>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/videodev2.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#include "libavcodec/avcodec.h"
#include "libavcodec/internal.h"
#include "v4l2_context.h"
#include "v4l2_buffers.h"
#include "v4l2_m2m.h"
#define USEC_PER_SEC 1000000
static inline V4L2m2mContext *buf_to_m2mctx(V4L2Buffer *buf)
{
return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
container_of(buf->context, V4L2m2mContext, output) :
container_of(buf->context, V4L2m2mContext, capture);
}
static inline AVCodecContext *logger(V4L2Buffer *buf)
{
return buf_to_m2mctx(buf)->avctx;
}
static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
{
V4L2m2mContext *s = buf_to_m2mctx(out);
AVRational v4l2_timebase = { 1, USEC_PER_SEC };
int64_t v4l2_pts;
if (pts == AV_NOPTS_VALUE)
pts = 0;
/* convert pts to v4l2 timebase */
v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
}
static inline uint64_t v4l2_get_pts(V4L2Buffer *avbuf)
{
V4L2m2mContext *s = buf_to_m2mctx(avbuf);
AVRational v4l2_timebase = { 1, USEC_PER_SEC };
int64_t v4l2_pts;
/* convert pts back to encoder timebase */
v4l2_pts = (int64_t)avbuf->buf.timestamp.tv_sec * USEC_PER_SEC +
avbuf->buf.timestamp.tv_usec;
return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
}
static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
{
enum v4l2_ycbcr_encoding ycbcr;
enum v4l2_colorspace cs;
cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.colorspace :
buf->context->format.fmt.pix.colorspace;
ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.ycbcr_enc:
buf->context->format.fmt.pix.ycbcr_enc;
switch(ycbcr) {
case V4L2_YCBCR_ENC_XV709:
case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
case V4L2_YCBCR_ENC_XV601:
case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
default:
break;
}
switch(cs) {
case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
default:
break;
}
return AVCOL_PRI_UNSPECIFIED;
}
static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
{
enum v4l2_quantization qt;
qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.quantization :
buf->context->format.fmt.pix.quantization;
switch (qt) {
case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
default:
break;
}
return AVCOL_RANGE_UNSPECIFIED;
}
static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
{
enum v4l2_ycbcr_encoding ycbcr;
enum v4l2_colorspace cs;
cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.colorspace :
buf->context->format.fmt.pix.colorspace;
ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.ycbcr_enc:
buf->context->format.fmt.pix.ycbcr_enc;
switch(cs) {
case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
case V4L2_COLORSPACE_BT2020:
if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
return AVCOL_SPC_BT2020_CL;
else
return AVCOL_SPC_BT2020_NCL;
default:
break;
}
return AVCOL_SPC_UNSPECIFIED;
}
static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
{
enum v4l2_ycbcr_encoding ycbcr;
enum v4l2_xfer_func xfer;
enum v4l2_colorspace cs;
cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.colorspace :
buf->context->format.fmt.pix.colorspace;
ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.ycbcr_enc:
buf->context->format.fmt.pix.ycbcr_enc;
xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
buf->context->format.fmt.pix_mp.xfer_func:
buf->context->format.fmt.pix.xfer_func;
switch (xfer) {
case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
default:
break;
}
switch (cs) {
case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
default:
break;
}
switch (ycbcr) {
case V4L2_YCBCR_ENC_XV709:
case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
default:
break;
}
return AVCOL_TRC_UNSPECIFIED;
}
static void v4l2_free_buffer(void *opaque, uint8_t *unused)
{
V4L2Buffer* avbuf = opaque;
V4L2m2mContext *s = buf_to_m2mctx(avbuf);
if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
if (s->reinit) {
if (!atomic_load(&s->refcount))
sem_post(&s->refsync);
} else {
if (s->draining) {
/* no need to queue more buffers to the driver */
avbuf->status = V4L2BUF_AVAILABLE;
}
else if (avbuf->context->streamon)
ff_v4l2_buffer_enqueue(avbuf);
}
av_buffer_unref(&avbuf->context_ref);
}
}
static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
{
V4L2m2mContext *s = buf_to_m2mctx(in);
if (plane >= in->num_planes)
return AVERROR(EINVAL);
/* even though most encoders return 0 in data_offset encoding vp8 does require this value */
*buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
in->plane_info[plane].length, v4l2_free_buffer, in, 0);
if (!*buf)
return AVERROR(ENOMEM);
if (in->context_ref)
atomic_fetch_add(&in->context_refcount, 1);
else {
in->context_ref = av_buffer_ref(s->self_ref);
if (!in->context_ref) {
av_buffer_unref(buf);
return AVERROR(ENOMEM);
}
in->context_refcount = 1;
}
in->status = V4L2BUF_RET_USER;
atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
return 0;
}
static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, AVBufferRef* bref)
{
unsigned int bytesused, length;
if (plane >= out->num_planes)
return AVERROR(EINVAL);
bytesused = FFMIN(size, out->plane_info[plane].length);
length = out->plane_info[plane].length;
memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, out->plane_info[plane].length));
if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
out->planes[plane].bytesused = bytesused;
out->planes[plane].length = length;
} else {
out->buf.bytesused = bytesused;
out->buf.length = length;
}
return 0;
}
/******************************************************************************
*
* V4L2uffer interface
*
******************************************************************************/
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer* out)
{
int i, ret;
for(i = 0; i < out->num_planes; i++) {
ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, frame->buf[i]);
if (ret)
return ret;
}
v4l2_set_pts(out, frame->pts);
return 0;
}
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
{
V4L2m2mContext *s = buf_to_m2mctx(avbuf);
int i, ret;
av_frame_unref(frame);
/* 1. get references to the actual data */
for (i = 0; i < avbuf->num_planes; i++) {
ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
if (ret)
return ret;
frame->linesize[i] = avbuf->plane_info[i].bytesperline;
frame->data[i] = frame->buf[i]->data;
}
/* 1.1 fixup special cases */
switch (avbuf->context->av_pix_fmt) {
case AV_PIX_FMT_NV12:
if (avbuf->num_planes > 1)
break;
frame->linesize[1] = avbuf->plane_info[0].bytesperline;
frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
break;
default:
break;
}
/* 2. get frame information */
frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
frame->format = avbuf->context->av_pix_fmt;
frame->color_primaries = v4l2_get_color_primaries(avbuf);
frame->colorspace = v4l2_get_color_space(avbuf);
frame->color_range = v4l2_get_color_range(avbuf);
frame->color_trc = v4l2_get_color_trc(avbuf);
frame->pts = v4l2_get_pts(avbuf);
/* these two values are updated also during re-init in v4l2_process_driver_event */
frame->height = s->output.height;
frame->width = s->output.width;
/* 3. report errors upstream */
if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
}
return 0;
}
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
{
int ret;
av_packet_unref(pkt);
ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
if (ret)
return ret;
pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
pkt->data = pkt->buf->data;
if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
pkt->flags |= AV_PKT_FLAG_CORRUPT;
}
pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
return 0;
}
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
{
int ret;
ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, pkt->buf);
if (ret)
return ret;
v4l2_set_pts(out, pkt->pts);
if (pkt->flags & AV_PKT_FLAG_KEY)
out->flags = V4L2_BUF_FLAG_KEYFRAME;
return 0;
}
int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
{
V4L2Context *ctx = avbuf->context;
int ret, i;
avbuf->buf.memory = V4L2_MEMORY_MMAP;
avbuf->buf.type = ctx->type;
avbuf->buf.index = index;
if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
avbuf->buf.length = VIDEO_MAX_PLANES;
avbuf->buf.m.planes = avbuf->planes;
}
ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
if (ret < 0)
return AVERROR(errno);
if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
avbuf->num_planes = 0;
for (;;) {
/* in MP, the V4L2 API states that buf.length means num_planes */
if (avbuf->num_planes >= avbuf->buf.length)
break;
if (avbuf->buf.m.planes[avbuf->num_planes].length)
avbuf->num_planes++;
}
} else
avbuf->num_planes = 1;
for (i = 0; i < avbuf->num_planes; i++) {
avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
ctx->format.fmt.pix.bytesperline;
if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
PROT_READ | PROT_WRITE, MAP_SHARED,
buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
} else {
avbuf->plane_info[i].length = avbuf->buf.length;
avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
}
if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
return AVERROR(ENOMEM);
}
avbuf->status = V4L2BUF_AVAILABLE;
if (V4L2_TYPE_IS_OUTPUT(ctx->type))
return 0;
if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
avbuf->buf.m.planes = avbuf->planes;
avbuf->buf.length = avbuf->num_planes;
} else {
avbuf->buf.bytesused = avbuf->planes[0].bytesused;
avbuf->buf.length = avbuf->planes[0].length;
}
return ff_v4l2_buffer_enqueue(avbuf);
}
int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf)
{
int ret;
avbuf->buf.flags = avbuf->flags;
ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
if (ret < 0)
return AVERROR(errno);
avbuf->status = V4L2BUF_IN_DRIVER;
return 0;
}
|
/*
==================================================================================
Copyright (c) 2019 AT&T Intellectual Property.
Copyright (c) 2019 Nokia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================================
*/
package control
/*
#include <stdlib.h>
#include <e2ap/wrapper.h>
#cgo LDFLAGS: -le2apwrapper
#cgo CFLAGS: -I/usr/local/include/e2ap
*/
import "C"
import (
"errors"
"unsafe"
)
type E2ap struct {
}
/* RICsubscriptionRequest */
func (c *E2ap) GetSubscriptionRequestSequenceNumber(payload []byte) (subId uint16, err error) {
cptr := unsafe.Pointer(&payload[0])
cret := C.e2ap_get_ric_subscription_request_sequence_number(cptr, C.size_t(len(payload)))
if cret < 0 {
return 0, errors.New("e2ap wrapper is unable to get Subscirption Request Sequence Number due to wrong or invalid payload")
}
subId = uint16(cret)
return
}
func (c *E2ap) SetSubscriptionRequestSequenceNumber(payload []byte, newSubscriptionid uint16) (newPayload []byte, err error) {
cptr := unsafe.Pointer(&payload[0])
size := C.e2ap_set_ric_subscription_request_sequence_number(cptr, C.size_t(len(payload)), C.long(newSubscriptionid))
if size < 0 {
return make([]byte, 0), errors.New("e2ap wrapper is unable to set Subscription Request Sequence Number due to wrong or invalid payload")
}
newPayload = C.GoBytes(cptr, (C.int(size)+7)/8)
return
}
func (c *E2ap) SetSubscriptionRequestPayload(payload []byte, ricRequestorID uint16, ricRequestSequenceNumber uint16, ranFunctionID uint16, eventTriggerDefinition []byte, eventTriggerDefinitionSize int, actionCount int, actionIds []int64, actionTypes []int64, actionDefinitions []ActionDefinition, subsequentActions []SubsequentAction) (newPayload []byte, err error) {
cptr := unsafe.Pointer(&payload[0])
eventTrigger := unsafe.Pointer(&eventTriggerDefinition[0])
actIds := unsafe.Pointer(&actionIds[0])
actTypes := unsafe.Pointer(&actionTypes[0])
count := len(actionDefinitions)
actDefs := (*C.RICactionDefinition)(C.calloc(C.size_t(len(actionDefinitions)), C.sizeof_RICactionDefinition))
for index := 0; index < count; index++ {
ptr := *(*C.RICactionDefinition)(unsafe.Pointer((uintptr)(unsafe.Pointer(actDefs)) + (uintptr)(C.sizeof_RICactionDefinition*C.int(index))))
ptr.size = C.int(actionDefinitions[index].Size)
if ptr.size != 0 {
ptr.actionDefinition = (*C.uint8_t)(C.CBytes(actionDefinitions[index].Buf))
}
}
defer C.free(unsafe.Pointer(actDefs))
count = len(subsequentActions)
subActs := (*C.RICSubsequentAction)(C.calloc(C.size_t(len(subsequentActions)), C.sizeof_RICSubsequentAction))
for index := 0; index < count; index++ {
ptr := *(*C.RICSubsequentAction)(unsafe.Pointer((uintptr)(unsafe.Pointer(subActs)) + (uintptr)(C.sizeof_RICSubsequentAction*C.int(index))))
ptr.isValid = C.int(subsequentActions[index].IsValid)
ptr.subsequentActionType = C.long(subsequentActions[index].SubsequentActionType)
ptr.timeToWait = C.long(subsequentActions[index].TimeToWait)
}
defer C.free(unsafe.Pointer(subActs))
size := C.e2ap_encode_ric_subscription_request_message(cptr, C.size_t(len(payload)), C.long(ricRequestorID), C.long(ricRequestSequenceNumber), C.long(ranFunctionID), eventTrigger, C.size_t(eventTriggerDefinitionSize), C.int(actionCount), (*C.long)(actIds), (*C.long)(actTypes), actDefs, subActs)
if size < 0 {
return make([]byte, 0), errors.New("e2ap wrapper is unable to set Subscription Request Payload due to wrong or invalid payload")
}
newPayload = C.GoBytes(cptr, (C.int(size)+7)/8)
return
}
/* RICsubscriptionResponse */
func (c *E2ap) GetSubscriptionResponseSequenceNumber(payload []byte) (subId uint16, err error) {
cptr := unsafe.Pointer(&payload[0])
cret := C.e2ap_get_ric_subscription_response_sequence_number(cptr, C.size_t(len(payload)))
if cret < 0 {
return 0, errors.New("e2ap wrapper is unable to get Subscirption Response Sequence Number due to wrong or invalid payload")
}
subId = uint16(cret)
return
}
func (c *E2ap) SetSubscriptionResponseSequenceNumber(payload []byte, newSubscriptionid uint16) (newPayload []byte, err error) {
cptr := unsafe.Pointer(&payload[0])
size := C.e2ap_set_ric_subscription_response_sequence_number(cptr, C.size_t(len(payload)), C.long(newSubscriptionid))
if size < 0 {
return make([]byte, 0), errors.New("e2ap wrapper is unable to set Subscription Response Sequence Number due to wrong or invalid payload")
}
newPayload = C.GoBytes(cptr, (C.int(size)+7)/8)
return
}
func (c *E2ap) GetSubscriptionResponseMessage(payload []byte) (decodedMsg *DecodedSubscriptionResponseMessage, err error) {
cptr := unsafe.Pointer(&payload[0])
decodedMsg = &DecodedSubscriptionResponseMessage{}
decodedCMsg := C.e2ap_decode_ric_subscription_response_message(cptr, C.size_t(len(payload)))
defer C.free(unsafe.Pointer(decodedCMsg))
if decodedCMsg == nil {
return decodedMsg, errors.New("e2ap wrapper is unable to decode subscription response message due to wrong or invalid payload")
}
decodedMsg.RequestID = int32(decodedCMsg.requestorID)
decodedMsg.RequestSequenceNumber = int32(decodedCMsg.requestSequenceNumber)
decodedMsg.FuncID = int32(decodedCMsg.ranfunctionID)
admittedCount := int(decodedCMsg.ricActionAdmittedList.count)
for index := 0; index < admittedCount; index++ {
decodedMsg.ActionAdmittedList.ActionID[index] = int32(decodedCMsg.ricActionAdmittedList.ricActionID[index])
}
decodedMsg.ActionAdmittedList.Count = admittedCount
notAdmittedCount := int(decodedCMsg.ricActionNotAdmittedList.count)
for index := 0; index < notAdmittedCount; index++ {
decodedMsg.ActionNotAdmittedList.ActionID[index] = int32(decodedCMsg.ricActionNotAdmittedList.ricActionID[index])
decodedMsg.ActionNotAdmittedList.Cause[index].CauseType = int32(decodedCMsg.ricActionNotAdmittedList.ricCause[index].ricCauseType)
decodedMsg.ActionNotAdmittedList.Cause[index].CauseID = int32(decodedCMsg.ricActionNotAdmittedList.ricCause[index].ricCauseID)
}
decodedMsg.ActionNotAdmittedList.Count = notAdmittedCount
return
}
/* RICsubscriptionFailure */
func (c *E2ap) GetSubscriptionFailureSequenceNumber(payload []byte) (subId uint16, err error) {
cptr := unsafe.Pointer(&payload[0])
cret := C.e2ap_get_ric_subscription_failure_sequence_number(cptr, C.size_t(len(payload)))
if cret < 0 {
return 0, errors.New("e2ap wrapper is unable to get Subscirption Failure Sequence Number due to wrong or invalid payload")
}
subId = uint16(cret)
return
}
/* RICsubscriptionDeleteRequest */
func (c *E2ap) GetSubscriptionDeleteRequestSequenceNumber(payload []byte) (subId uint16, err error) {
cptr := unsafe.Pointer(&payload[0])
cret := C.e2ap_get_ric_subscription_delete_request_sequence_number(cptr, C.size_t(len(payload)))
if cret < 0 {
return 0, errors.New("e2ap wrapper is unable to get Subscirption Delete Request Sequence Number due to wrong or invalid payload")
}
subId = uint16(cret)
return
}
func (c *E2ap) SetSubscriptionDeleteRequestSequenceNumber(payload []byte, newSubscriptionid uint16) (newPayload []byte, err error) {
cptr := unsafe.Pointer(&payload[0])
size := C.e2ap_set_ric_subscription_delete_request_sequence_number(cptr, C.size_t(len(payload)), C.long(newSubscriptionid))
if size < 0 {
return make([]byte, 0), errors.New("e2ap wrapper is unable to set Subscription Delete Request Sequence Number due to wrong or invalid payload")
}
newPayload = C.GoBytes(cptr, (C.int(size)+7)/8)
return
}
func (c *E2ap) SetSubscriptionDeleteRequestPayload(payload []byte, ricRequestorID uint16, ricRequestSequenceNumber uint16, ranFunctionID uint16) (newPayload []byte, err error) {
cptr := unsafe.Pointer(&payload[0])
size := C.e2ap_encode_ric_subscription_delete_request_message(cptr, C.size_t(len(payload)), C.long(ricRequestorID), C.long(ricRequestSequenceNumber), C.long(ranFunctionID))
if size < 0 {
return make([]byte, 0), errors.New("e2ap wrapper is unable to set Subscription Delete Request Payload due to wrong or invalid payload")
}
newPayload = C.GoBytes(cptr, (C.int(size)+7)/8)
return
}
/* RICsubscriptionDeleteResponse */
func (c *E2ap) GetSubscriptionDeleteResponseSequenceNumber(payload []byte) (subId uint16, err error) {
cptr := unsafe.Pointer(&payload[0])
cret := C.e2ap_get_ric_subscription_delete_response_sequence_number(cptr, C.size_t(len(payload)))
if cret < 0 {
return 0, errors.New("e2ap wrapper is unable to get Subscirption Delete Response Sequence Number due to wrong or invalid payload")
}
subId = uint16(cret)
return
}
func (c *E2ap) SetSubscriptionDeleteResponseSequenceNumber(payload []byte, newSubscriptionid uint16) (newPayload []byte, err error) {
cptr := unsafe.Pointer(&payload[0])
size := C.e2ap_set_ric_subscription_delete_response_sequence_number(cptr, C.size_t(len(payload)), C.long(newSubscriptionid))
if size < 0 {
return make([]byte, 0), errors.New("e2ap wrapper is unable to set Subscription Delete Response Sequence Number due to wrong or invalid payload")
}
newPayload = C.GoBytes(cptr, (C.int(size)+7)/8)
return
}
/* RICsubscriptionDeleteFailure */
func (c *E2ap) GetSubscriptionDeleteFailureSequenceNumber(payload []byte) (subId uint16, err error) {
cptr := unsafe.Pointer(&payload[0])
cret := C.e2ap_get_ric_subscription_delete_failure_sequence_number(cptr, C.size_t(len(payload)))
if cret < 0 {
return 0, errors.New("e2ap wrapper is unable to get Subscirption Failure Sequence Number due to wrong or invalid payload")
}
subId = uint16(cret)
return
}
/* RICindication */
func (c *E2ap) GetIndicationMessage(payload []byte) (decodedMsg *DecodedIndicationMessage, err error) {
cptr := unsafe.Pointer(&payload[0])
decodedMsg = &DecodedIndicationMessage{}
decodedCMsg := C.e2ap_decode_ric_indication_message(cptr, C.size_t(len(payload)))
if decodedCMsg == nil {
return decodedMsg, errors.New("e2ap wrapper is unable to decode indication message due to wrong or invalid payload")
}
defer C.e2ap_free_decoded_ric_indication_message(decodedCMsg)
decodedMsg.RequestID = int32(decodedCMsg.requestorID)
decodedMsg.RequestSequenceNumber = int32(decodedCMsg.requestSequenceNumber)
decodedMsg.FuncID = int32(decodedCMsg.ranfunctionID)
decodedMsg.ActionID = int32(decodedCMsg.actionID)
decodedMsg.IndSN = int32(decodedCMsg.indicationSN)
decodedMsg.IndType = int32(decodedCMsg.indicationType)
indhdr := unsafe.Pointer(decodedCMsg.indicationHeader)
decodedMsg.IndHeader = C.GoBytes(indhdr, C.int(decodedCMsg.indicationHeaderSize))
decodedMsg.IndHeaderLength = int32(decodedCMsg.indicationHeaderSize)
indmsg := unsafe.Pointer(decodedCMsg.indicationMessage)
decodedMsg.IndMessage = C.GoBytes(indmsg, C.int(decodedCMsg.indicationMessageSize))
decodedMsg.IndMessageLength = int32(decodedCMsg.indicationMessageSize)
callproc := unsafe.Pointer(decodedCMsg.callProcessID)
decodedMsg.CallProcessID = C.GoBytes(callproc, C.int(decodedCMsg.callProcessIDSize))
decodedMsg.CallProcessIDLength = int32(decodedCMsg.callProcessIDSize)
return
}
|
/**
* Instance information in Get Application Instance response.
*/
@JsonDeserialize
@Value.Immutable
abstract class _ApplicationInstanceInfo {
/**
* The console IP
*/
@JsonProperty("console_ip")
@Nullable
abstract String getConsoleIp();
/**
* The console port
*/
@JsonProperty("console_port")
@Nullable
abstract Integer getConsolePort();
/**
* The debug IP
*/
@JsonProperty("debug_ip")
@Nullable
abstract String getDebugIp();
/**
* The debug port
*/
@JsonProperty("debug_port")
@Nullable
abstract Integer getDebugPort();
/**
* The details
*/
@JsonProperty("details")
@Nullable
abstract String getDetails();
/**
* The since
*/
@JsonProperty("since")
@Nullable
abstract Double getSince();
/**
* The state
*/
@JsonProperty("state")
@Nullable
abstract String getState();
/**
* The update
*/
@JsonProperty("uptime")
@Nullable
abstract Long getUptime();
} |
/**
* Create a new message, which will be the visible one.
*/
public void add(final String message) {
BroadcastMessage broadcastMessage = new BroadcastMessageImpl(message);
super.create(broadcastMessage);
} |
/**
* Checks if this reportable produces a report.
*
* @return boolean : true if report will be produced, false otherwise
*/
public boolean reportIsOn() {
return _reportMode;
} |
/**
* Starts the associated services.
*
* @throws Exception
*/
protected synchronized void startServices() throws Exception {
if (getLogService() != null) {
getLogService().start();
}
if (getStatusService() != null) {
getStatusService().start();
}
} |
<reponame>dmgerman/camel
begin_unit|revision:0.9.5;language:Java;cregit-version:0.0.1
begin_expr_stmt
unit|#
operator|#
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
operator|--
expr|#
operator|#
name|Licensed
name|to
name|the
name|Apache
name|Software
name|Foundation
argument_list|(
name|ASF
argument_list|)
name|under
name|one
name|or
name|more
expr|#
operator|#
name|contributor
name|license
name|agreements
operator|.
name|See
name|the
name|NOTICE
name|file
name|distributed
name|with
expr|#
operator|#
name|this
name|work
end_expr_stmt
begin_for
for|for additional information regarding copyright ownership. ## The ASF licenses this file to You under the Apache License
operator|,
name|Version
literal|2.0
expr|#
operator|#
operator|(
name|the
literal|"License"
operator|)
expr_stmt|;
end_for
begin_expr_stmt
name|you
name|may
name|not
name|use
name|this
name|file
name|except
name|in
name|compliance
name|with
expr|#
operator|#
name|the
name|License
operator|.
name|You
name|may
name|obtain
name|a
name|copy
name|of
name|the
name|License
name|at
expr|#
operator|#
expr|#
operator|#
name|http
operator|:
comment|//www.apache.org/licenses/LICENSE-2.0
expr|#
operator|#
expr|#
operator|#
name|Unless
name|required
name|by
name|applicable
name|law
name|or
name|agreed
name|to
name|in
name|writing
operator|,
name|software
expr|#
operator|#
name|distributed
name|under
name|the
name|License
name|is
name|distributed
name|on
name|an
literal|"AS IS"
name|BASIS
operator|,
expr_stmt|#
operator|#
name|WITHOUT
name|WARRANTIES
name|OR
name|CONDITIONS
name|OF
name|ANY
name|KIND
operator|,
name|either
name|express
name|or
name|implied
operator|.
expr|#
operator|#
name|See
name|the
name|License
end_expr_stmt
begin_for
for|for the specific language governing permissions and ## limitations under the License. ## ------------------------------------------------------------------------ package $
block|{
package|package
block|}
end_for
begin_empty_stmt
empty_stmt|;
end_empty_stmt
begin_import
import|import
name|java
operator|.
name|util
operator|.
name|Map
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|camel
operator|.
name|CamelContext
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|camel
operator|.
name|Endpoint
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|camel
operator|.
name|support
operator|.
name|DefaultComponent
import|;
end_import
begin_comment
comment|/** * Represents the component that manages {@link ${name}Endpoint}. */
end_comment
begin_class
DECL|class|$
specifier|public
class|class
name|$
block|{
name|name
block|}
end_class
begin_expr_stmt
DECL|class|$
name|Component
expr|extends
name|DefaultComponent
block|{
specifier|protected
name|Endpoint
name|createEndpoint
argument_list|(
name|String
name|uri
argument_list|,
name|String
name|remaining
argument_list|,
name|Map
argument_list|<
name|String
argument_list|,
name|Object
argument_list|>
name|parameters
argument_list|)
throws|throws
name|Exception
block|{
name|Endpoint
name|endpoint
operator|=
operator|new
name|$
block|{
name|name
block|}
name|Endpoint
argument_list|(
name|uri
argument_list|,
name|this
argument_list|)
block|;
name|setProperties
argument_list|(
name|endpoint
argument_list|,
name|parameters
argument_list|)
block|;
return|return
name|endpoint
return|;
block|}
end_expr_stmt
unit|}
end_unit
|
// This is the base router, which provides a few generic utility endpoints
import { Request, Response, Router } from "express";
import { ImATeapot } from "http-errors";
import { join } from "path";
import { IPackage } from "../interfaces";
export class HomeRouter {
public router: Router = Router();
constructor() {
/**
* Responds with project metadata
* This endpoint is useful to interrogate the status and version of the running service
*/
this.router.all("/", (req: Request, res: Response): void => {
const metadata: IPackage = require(join("..", "..", "package.json"));
res.jsonp({
data: {
author: metadata.author,
description: metadata.description,
name: metadata.name,
version: metadata.version,
},
});
});
/**
* Responds with HTTP I'm A Teapot Error
*/
this.router.all("/teapot", (req: Request, res: Response): void => {
throw new ImATeapot();
});
/**
* Thrown native Error
* Should log native Error and return Server Error
*/
this.router.all("/error", (req: Request, res: Response): void => {
throw new Error("Danger <NAME>");
});
}
}
|
/**
* Lese einen INT-Wert aus dem Empfangspuffer.
* Ergebnis ist ein INT32-Wert.
*/
INT32 decodeINT32(const std::string& rxData)
{
INT32 value = 0;
INT32 tempVal;
INT32 factor = 1;
INT32 baseFactor = 10;
INT32 sign = 1;
UINT16 digits;
UINT16 offset = 0;
const std::string number = rxData;
if (number.at(0) == '+')
{
offset = 1;
}
else if (number.at(0) == '-')
{
sign = -1;
offset = 1;
}
else
{
baseFactor = 16;
}
digits = (UINT16)number.length();
for (INT16 d = digits - 1; d >= offset; d -= 1)
{
tempVal = colaa::getValueOfChar(number.at(d));
value += tempVal * factor;
factor *= baseFactor;
}
value *= sign;
return value;
} |
Variations of the Plasma Environment Revealed by the Evolution of the Supra-arcade Fan in the 2017 September 10 Flare
Based on observations from the Interface Region Imaging Spectrograph and Hinode, we analyze the thermodynamic evolution of the supra-arcade fan (SAF) in the 2017 September 10 flare. The SAF presents discontinuous characters during the rising process, indicating a nonuniform process of magnetic reconnection in the solar eruption. The intensity peaks of the high-temperature spectral lines (Fe xxi 1354.08 Å, Fe xxiii 263.76 Å, and Fe xxiv 255.10 Å) basically correspond to the valley of the Doppler velocity and Doppler width. The temperature and density increase spatially at the upper boundary of the SAF. These results indicate that a compressed interface may exist in the SAF, where the plasma environment shows remarkable changes in density, temperature, and turbulence. In view of the fact that the height of the SAF is close to the hard X-ray source, we conclude that the interface could be related to termination shocks (TSs), taking into account the synthetic spectral profiles obtained from numerical experiments. In turn, the variations of the spectral profiles might be useful tools for identifying TSs from EUV spectral observations. |
<gh_stars>10-100
from subprocess import call
import sys
import datasets
for dataset_name in ['enron']:
rho_squared = datasets.DATASET_NORM_SQ_CONSTRAINTS[dataset_name]
quantile = 0.70
for epsilon in datasets.DATASET_EPSILONS[dataset_name]:
if epsilon == 0: continue
eta = (0.008 / epsilon) * (epsilon / 0.05) ** (0.5)
log_file = '%s_integer_eps%s_rhosq%s_IQP_v2.log' % (dataset_name, epsilon, rho_squared)
call(
'./matlab/bin/matlab -r "feasibilityAttack(\'%s\',%s,%s,%s,%s,\'gurobi\');exit" > %s &' % (
dataset_name,
epsilon,
eta,
rho_squared,
quantile,
log_file),
shell=True)
|
/**
* Try to load the service impl given by the className.
*/
private IBatchServiceBase _loadServiceHelper(Name serviceType) {
IBatchServiceBase service = null;
String className = serviceImplClassNames.get(serviceType);
try {
if (className != null) {
service = _loadService(className);
}
} catch (Throwable t) {
logger.log(Level.SEVERE, "Could not instantiate service: " + className + " due to exception:" + t);
throw new RuntimeException("Could not instantiate service " + className, t);
}
if (service == null) {
throw new RuntimeException("Instantiate of service=: " + className + " for serviceType: " + serviceType + " returned null. Aborting...");
}
return service;
} |
def forward(self, key, value, query, mask=None):
batch, k_len, d = key.size()
batch_, k_len_, d_ = value.size()
aeq(batch, batch_)
aeq(k_len, k_len_)
aeq(d, d_)
batch_, q_len, d_ = query.size()
aeq(batch, batch_)
aeq(d, d_)
aeq(self.model_dim % 8, 0)
if mask is not None:
batch_, q_len_, k_len_ = mask.size()
aeq(batch_, batch)
aeq(k_len_, k_len)
aeq(q_len_ == q_len)
def shape_projection(x):
b, l, d = x.size()
return x.view(b, l, self.head_count, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b * self.head_count, l, self.dim_per_head)
def unshape_projection(x, q):
b, l, d = q.size()
return x.view(b, self.head_count, l, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b, l, self.head_count * self.dim_per_head)
residual = query
key_up = shape_projection(self.linear_keys(key))
value_up = shape_projection(self.linear_values(value))
query_up = shape_projection(self.linear_query(query))
scaled = torch.bmm(query_up, key_up.transpose(1, 2))
scaled = scaled / math.sqrt(self.dim_per_head)
bh, l, dim_per_head = scaled.size()
b = bh // self.head_count
if mask is not None:
scaled = scaled.view(b, self.head_count, l, dim_per_head)
mask = mask.unsqueeze(1).expand_as(scaled)
scaled = scaled.masked_fill(Variable(mask), -1e18) \
.view(bh, l, dim_per_head)
attn = self.sm(scaled)
top_attn = attn \
.view(b, self.head_count, l, dim_per_head)[:, 0, :, :] \
.contiguous()
drop_attn = self.dropout(self.sm(scaled))
out = unshape_projection(torch.bmm(drop_attn, value_up), residual)
ret = self.res_dropout(out)
batch_, q_len_, d_ = ret.size()
aeq(q_len, q_len_)
aeq(batch, batch_)
aeq(d, d_)
return ret, top_attn |
<gh_stars>1-10
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <inttypes.h>
#include <signal.h>
#include <rte_eal.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_mbuf.h>
#include <rte_log.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_icmp.h>
#include <string>
#include <vector>
#include <fstream>
#include <istream>
#include <sstream>
#include <unordered_map>
#include <vector>
#include <atomic>
#define MBUF_PER_POOL 65535
#define MBUF_POOL_CACHE_SIZE 250
#define RX_RING_SIZE 512
#define TX_RING_SIZE 512
namespace QDPDK {
enum RET{
OK = 0,
FAIL,
};
template<typename T>
class DeqInterface{
protected:
T queue;
public:
DeqInterface(T q):queue(q){}
uint16_t Dequeue(rte_mbuf** bufs, size_t size) = delete;
};
template<>
uint16_t DeqInterface<int>::Dequeue(rte_mbuf** bufs, size_t size){
return rte_eth_rx_burst(queue, 0, bufs, size);
}
template<>
uint16_t DeqInterface<rte_ring*>::Dequeue(rte_mbuf** bufs, size_t size){
return rte_ring_sc_dequeue_burst(queue, (void**)bufs, size, NULL);
};
template<typename T>
class EnqInterface{
protected:
T queue;
public:
EnqInterface(T q):queue(q){}
uint16_t Enqueue(rte_mbuf** bufs, size_t n) = delete;
};
template<>
uint16_t EnqInterface<int>::Enqueue(rte_mbuf** bufs, size_t n){
uint16_t nb_tx = rte_eth_tx_burst(queue, 0, bufs, n);
if (unlikely(nb_tx < n)) {
for (auto buf = nb_tx; buf < n; buf++)
rte_pktmbuf_free(bufs[buf]);
}
return nb_tx;
};
template<>
uint16_t EnqInterface<rte_ring*>::Enqueue(rte_mbuf** bufs, size_t n){
uint16_t nb_tx = rte_ring_sp_enqueue_burst(queue, (void**)bufs, n, NULL);
if (unlikely(nb_tx < n)) {
for (auto buf = nb_tx; buf < n; buf++)
rte_pktmbuf_free(bufs[buf]);
}
return nb_tx;
};
class App{
public:
static std::atomic<bool> quit;
static std::atomic<bool> start;
static struct rte_eth_conf port_conf;
protected:
rte_mempool *mbuf_pool;
int mempool_size;
int num_ports;
int core_cnt;
public:
App(int num_ports, int argc, char *argv[]) : num_ports(num_ports){ AppInit(argc, argv);};
RET Run();
template<class C>
RET SetCore(C*);
rte_ring* CreateRing(const char *name, unsigned count, int socket_id, unsigned flags);
static void Signal_handler(int signum);
private:
RET PortInit(int);
RET AppInit(int, char**);
template<class C>
static RET CoreLoop(C*);
unsigned int get_available_lcore_id();
};
std::atomic<bool> App::quit = false;
std::atomic<bool> App::start = false;
struct rte_eth_conf App::port_conf;
void App::Signal_handler(int signum){
if (signum == SIGINT || signum == SIGTERM) {
quit = true;
}
}
RET App::PortInit(int port)
{
const uint16_t rx_rings = 1, tx_rings = 1;
uint16_t nb_rxd = RX_RING_SIZE;
uint16_t nb_txd = TX_RING_SIZE;
int retval;
uint16_t q;
port_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
if (port >= rte_eth_dev_count())
return FAIL;
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
return FAIL;
retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
if (retval != 0)
return FAIL;
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return FAIL;
}
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return FAIL;
}
retval = rte_eth_dev_start(port);
if (retval < 0)
return FAIL;
struct ether_addr addr;
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
rte_eth_promiscuous_enable(port);
return OK;
}
RET App::AppInit(int argc, char *argv[]) {
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
argc -= ret;
argv += ret;
int nb_ports = rte_eth_dev_count();
if (nb_ports < num_ports)
rte_exit(EXIT_FAILURE, "Error: fewer ports than the bind ports.\n");
mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
for (int portid = 0; portid < nb_ports; portid++)
if (PortInit(portid) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %" PRIu16 "\n", portid);
core_cnt = rte_lcore_count();
signal(SIGINT, Signal_handler);
signal(SIGTERM, Signal_handler);
return OK;
}
template<class C>
RET App::SetCore(C *core) {
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER3, "SETTING CORE.\n");
if (--core_cnt < 0)
rte_exit(EXIT_FAILURE, "Error: fewer cores than the set cores.\n");
rte_eal_remote_launch((lcore_function_t *) CoreLoop<C>, core, get_available_lcore_id());
return OK;
}
rte_ring* App::CreateRing(const char *name, unsigned count, int socket_id, unsigned flags=RING_F_SP_ENQ|RING_F_SC_DEQ) {
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER3, "CREATING RING.\n");
return rte_ring_create(name, count, socket_id, flags);
}
template<class C>
RET App::CoreLoop(C *core){
while(not start) rte_delay_ms(10);
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER3, "START lcore:%d\n", rte_lcore_id());
core->FirstCycle();
while(not quit){
core->Cycle();
}
core->LastCycle();
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER3, "FINISH lcore:%d\n", rte_lcore_id());
return OK;
}
RET App::Run() {
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER3, "START master\n");
start = true;
rte_eal_mp_wait_lcore();
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER3, "FINISH master\n");
return OK;
}
unsigned int App::get_available_lcore_id() {
static bool assigned[RTE_MAX_LCORE] = {false,};
for (uint i = RTE_MAX_LCORE - 1; i >= 0; i--){
if (!rte_lcore_is_enabled(i)){
continue;
}
if (i == rte_get_master_lcore()){
continue;
}
if (assigned[i]){
continue;
}
assigned[i] = true;
return i;
}
return 0;
}
}
|
def Calculate_all(generateXmlSolverInput: bool = False):
clientModel.service.calculate_all(generateXmlSolverInput) |
def cipd_spec(self, version):
cipd_pieces = [self._package_prefix, self._cipd_pkg_name_with_override]
if not self._spec_pb.upload.universal:
cipd_pieces.append(self.platform)
full_cipd_pkg_name = get_cipd_pkg_name(cipd_pieces)
patch_ver = self.create_pb.source.patch_version
if self.create_pb.package.alter_version_re:
version = re.sub(
self.create_pb.package.alter_version_re,
self.create_pb.package.alter_version_replace,
version)
symver = '%s@%s%s' % (PACKAGE_EPOCH, version,
'.' + patch_ver if patch_ver else '')
return self._cipd_spec_pool.get(full_cipd_pkg_name, symver) |
<gh_stars>1-10
#ifndef __LUCENE_ANALYSIS_TOKENIZER__
#define __LUCENE_ANALYSIS_TOKENIZER__
#import <Foundation/Foundation.h>
#import "LCTokenStream.h"
#import "LCReader.h"
/* A token stream specified for LCReader */
@interface LCTokenizer: LCTokenStream
{
/** The text source for this Tokenizer. */
id <LCReader> input;
}
- (id) initWithReader: (id <LCReader>) input;
@end
#endif /* __LUCENE_ANALYSIS_TOKENIZER__ */
|
#include "s_except.hpp"
s_except::s_except(std::string err_msg) {
this->err_msg = err_msg;
}
s_except::~s_except() {
//
}
const char * s_except::what() const throw() {
return this->err_msg.c_str();
}
|
package br.senac.tads.pi3.model.estoque;
public class Estoque {
public int id;
public String nome;
public int idEmpresa;
public Estoque(int i, String e, int emp){
this.id = i;
this.nome = e;
this.idEmpresa = emp;
}
public Estoque(){
}
public void setId(int id){
this.id = id;
}
public int getId(){
return this.id;
}
public void setNome(String n){
this.nome = n;
}
public String getNome(){
return this.nome;
}
public int getFkEmpresa(){
return this.idEmpresa;
}
public void setidEmpresa(int fk){
this.idEmpresa = fk;
}
}
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef TINK_INTEGRATION_AWSKMS_AWS_KMS_AEAD_H_
#define TINK_INTEGRATION_AWSKMS_AWS_KMS_AEAD_H_
#include "aws/kms/KMSClient.h"
#include "absl/strings/string_view.h"
#include "tink/aead.h"
#include "tink/util/statusor.h"
namespace crypto {
namespace tink {
namespace integration {
namespace awskms {
// AwsKmsAead is an implementation of AEAD that forwards encryption/decryption
// requests to a key managed by the AWS KMS (https://aws.amazon.com/kms).
class AwsKmsAead : public Aead {
public:
// Move only.
AwsKmsAead(AwsKmsAead&& other) = default;
AwsKmsAead& operator=(AwsKmsAead&& other) = default;
AwsKmsAead(const AwsKmsAead&) = delete;
AwsKmsAead& operator=(const AwsKmsAead&) = delete;
// Creates a new AwsKmsAead that is bound to the key specified in `key_arn`,
// and that uses the given client when communicating with the KMS.
static crypto::tink::util::StatusOr<std::unique_ptr<Aead>> New(
absl::string_view key_arn,
std::shared_ptr<Aws::KMS::KMSClient> aws_client);
crypto::tink::util::StatusOr<std::string> Encrypt(
absl::string_view plaintext,
absl::string_view associated_data) const override;
crypto::tink::util::StatusOr<std::string> Decrypt(
absl::string_view ciphertext,
absl::string_view associated_data) const override;
private:
AwsKmsAead(absl::string_view key_arn,
std::shared_ptr<Aws::KMS::KMSClient> aws_client)
: key_arn_(key_arn), aws_client_(aws_client) {}
std::string key_arn_; // The location of a crypto key in AWS KMS.
std::shared_ptr<Aws::KMS::KMSClient> aws_client_;
};
} // namespace awskms
} // namespace integration
} // namespace tink
} // namespace crypto
#endif // TINK_INTEGRATION_AWSKMS_AWS_KMS_AEAD_H_
|
Apparently the US Army is interested in a zealous interpretation of copyright protection, too.
According to the Electronic Frontier Foundation, a Chelsea Manning supporter recently attempted to mail Manning a series of printed EFF articles about prisoner rights. Those materials were withheld and not delivered to her because, according to the EFF, the correspondence contained “printed Internet materials, including email, of a volume exceeding five pages per day or the distribution of which may violate U.S. copyright laws.”
Other materials, including lengthy Bureau of Prisons documents, were allowed through, and so the EFF concludes that "it was potentially copyright concerns that resulted in Manning’s mail being censored."
Manning, who is serving a 35-year military prison term for leaking classified military documents to WikiLeaks, has previously had run-ins with military prison authorities over alleged “reading contraband.”
On February 11, EFF Executive Director Cindy Cohn wrote to the commandant of the US Disciplinary Barracks (USDB) at Fort Leavenworth, explaining that not only did EFF grant permission for Manning to receive the materials, but that all EFF content is published under a Creative Commons license.
On Tuesday, EFF wrote:
As of Monday morning, EFF has received no response from the Army explaining the matter or clarifying why the material was withheld. Manning has also not received the documents. We have since put the documents in the mail ourselves. … It is tremendously important to EFF that people who are incarcerated have access to our materials. For example, our Creative Commons license allows Prison Legal News to regularly republish our work in its periodical, which is widely circulated in corrections facilities nationwide. We also believe that there are many pages on the Internet, freely available to anyone with a Web browser, which would prove edifying to prisoners. We would be deeply concerned by a prison policy that blocked any copyrighted works from the Web being printed and distributed to prisoners, as this would block the overwhelming majority of news articles and academic publications. In the case of the materials denied to Manning, we hope that the Army made a mistake and does not have a policy of misusing copyright to deny prisoners access to important materials that the general public can freely access.
Fort Leavenworth did not immediately respond to Ars’ request for comment. |
n=int(input())
s=input()
l,l1=[],[]
def subString(s, n):
for i in range(n):
for len in range(i + 1, n + 1):
l.append(s[i: len])
subString(s,len(s))
for i in l:
if(len(i)==2):
l1.append(i)
max = 0
res = l1[0]
for i in l1:
freq = l1.count(i)
if freq > max:
max = freq
res = i
print(str(res))
|
// Copyright 2014 The Azul3D Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "_cgo_export.h"
void pre_go_chipmunk_body_velocity_func(cpBody *body, cpVect gravity, cpFloat damping, cpFloat dt) {
go_chipmunk_body_velocity_func((void*)body, gravity, damping, dt);
}
void pre_go_chipmunk_body_position_func(cpBody *body, cpFloat dt) {
go_chipmunk_body_position_func((void*)body, dt);
}
void pre_go_chipmunk_body_each_shape(cpBody *body, cpShape *shape, void *data) {
go_chipmunk_body_each_shape((void*)body, (void*)shape, data);
}
void pre_go_chipmunk_body_each_constraint(cpBody *body, cpConstraint *constraint, void *data) {
go_chipmunk_body_each_constraint((void*)body, (void*)constraint, data);
}
void pre_go_chipmunk_body_each_arbiter(cpBody *body, cpArbiter *arbiter, void *data) {
go_chipmunk_body_each_arbiter((void*)body, (void*)arbiter, data);
}
|
#include "lpg_unicode_view.h"
#include "lpg_allocate.h"
#include "lpg_assert.h"
#include <string.h>
bool unicode_view_equals(unicode_view left, unicode_view right)
{
if (left.length != right.length)
{
return false;
}
if (left.length == 0)
{
// memcmp must not be called with NULL
return true;
}
return !memcmp(left.begin, right.begin, left.length * sizeof(*left.begin));
}
bool unicode_view_less(unicode_view const left, unicode_view const right)
{
if (left.length == right.length)
{
for (size_t i = 0; i < left.length; ++i)
{
if (left.begin[i] < right.begin[i])
{
return true;
}
if (left.begin[i] > right.begin[i])
{
return false;
}
}
return false;
}
return left.length < right.length;
}
unicode_string unicode_view_copy(unicode_view value)
{
return unicode_string_from_range(value.begin, value.length);
}
unicode_view unicode_view_cut(unicode_view const whole, size_t const begin, size_t const end)
{
ASSUME(begin <= end);
ASSUME(end <= whole.length);
return unicode_view_create(whole.begin + begin, (end - begin));
}
optional_size unicode_view_find(unicode_view haystack, const char needle)
{
for (size_t i = 0; i < haystack.length; ++i)
{
if (haystack.begin[i] == needle)
{
return make_optional_size(i);
}
}
return optional_size_empty;
}
unicode_string unicode_view_zero_terminate(unicode_view original)
{
unicode_string const result = {allocate_array(original.length + 1, sizeof(*result.data)), original.length};
memcpy(result.data, original.begin, result.length);
result.data[result.length] = '\0';
return result;
}
unicode_string unicode_view_concat(unicode_view first, unicode_view second)
{
size_t const result_size = first.length + second.length;
unicode_string const result = {allocate(result_size), result_size};
if (first.length)
{
memcpy(result.data, first.begin, first.length);
}
if (second.length)
{
memcpy(result.data + first.length, second.begin, second.length);
}
return result;
}
|
import { User } from "./User";
export interface Request {
id : number,
itemName : string,
ingredients : string,
image : string,
description : string,
preparationTime : number,
price : number,
user: User
} |
Members of the exclusive club would be expected to have minimum liquid assets of £100 million.
They would pay £2million per up front to join, with annual dues of £500,000. A stay at the luxury hotel would start at £6,000 per person per night, rising to £14,000 for the best rooms.
Developers have lodged plans for the £1.3billion resort near Kinloch Rannoch, Perthshire, which will target the world's "highest net worth individuals".
Documents submitted to Perth and Kinross Council , which will rule on the planning application, state the aim is to create "a world-class private members resort in one of Scotland's most stunning settings.
"The philosophy behind the vision for the development is to create the optimum in luxury which caters exclusively for guests who will enjoy the very finest experience."
The plans, lodged by the Dall Estate and Malcolm James Developments, will see the redevelopment of a former private school.
Rannoch School was closed in 2002, before being bought by the latter firm and being used as a family home.
The resort would feature a luxury hotel, a body enhancement clinic staffed by leading surgeons, a health spa, two 18-hole golf courses and a clubhouse.
There would also be leisure facilities, shops, a loch-side restaurant, concert hall and up to 98 houses.
The documents state that the resort's construction, which would take three years, would pump £241.6million into the Scottish economy and create 2,200 jobs.
Around 500 staff would be employed in the resort's first year, rising to 850 after two years. It would create £28.8million for the economy in its first three years after opening.
A report by firm of tourism consultants, submitted with the plans, states: "The development is predicated upon targeting the world's highest net worth individuals in an exclusive private club environment that will maintain exclusivity and ensure privacy and security.
"The minimum liquid asset net worth of individuals will be set at £100million. The development has been benchmarked against luxury iconic hotel operations throughout the world."
It said the resort would be five-star and "above", the level of luxury offered by firms such as Ritz Carlton and Four Seasons Hotels.
"This is a unique concept design and planned execution that truly has little comparably precedent anywhere in the world," it said.
The proposals for outline planning permission are expected to go before Perth and Kinross councillors in mid-September. |
Cop with taser (Flickr)
A Pennsylvania man said police shocked him with a Taser stun gun while he was being treated in an ambulance for an apparent seizure.
Darren Scott, of Upland, was jailed on $100,000 bond on charges including aggravated assault of a law enforcement officer and resisting arrest following the March 12 incident, reported The Philadelphia Inquirer.
Police said the 48-year-old Scott was “extremely combative,” kicked a paramedic, and grabbed and pushed a police officer, who said he believed the patient was under the influence of narcotics.
But Scott’s attorney said his client had smoked only a small amount of marijuana the night before.
An employee of Two Js Sandwich Shop told police she was waiting on Scott when she noticed his eyes roll back into his head, so she grabbed his clothes to steady him and keep him from falling.
A friend who was with Scott laid him down on the floor, where he drew up his arms and hands toward his chest and began shaking uncontrollably for a couple of minutes.
Emergency crews were called, and Scott was placed on a stretcher and taken to an ambulance over his protests.
Scott’s wife said she tried to speak to her husband – who had no history of seizures – by phone, but he was disoriented.
His friend, 30-year-old Dawan Cox, said he heard Scott scream at least three times from inside the ambulance, presumably when he was shocked with the Taser.
The paramedic, Ramona Buocolo, told Officer William Casey that Scott had not suffered a seizure, according to investigators.
Attorney Enrique Latoison said Scott, who had suffered a punctured lung and compression fractures during the seizure, felt like he was being electrocuted and arched his back.
Medical experts said a seizure would prevent someone from responding to or following orders, even from an authority.
A spokeswoman from the Epilepsy Foundation said more than 400 people each year are charged with assault or resisting arrest during or after suffering a seizure.
Both police and prosecutors are investigating the incident.
“We look at it from the perspective of what a reasonable police officer would do,” said District Attorney Jack Whelan, adding that the officer did not use the Taser until Scott kicked the paramedic in the chest.
But Scott’s attorney said is client should never have been jailed.
“You have a seizure and end up with a criminal case. How does that happen?” Latoison said.
[Image: Cop with taser via Flickr Commons] |
def pack(tensors):
buffer = torch.cat([t.view(-1) for t in tensors])
shapes = [tensor.shape for tensor in tensors]
return buffer, shapes |
def process_tasks(self, input_path, output_path):
with open(input_path, "r") as fp:
tasks = json.load(fp)
with open(output_path, "a") as output_file:
for task in tasks:
self._process_task(task, output_file) |
Efficient utilization of renewable feedstocks: the role of catalysis and process design
Renewable carbon feedstocks such as biomass and CO2 present an important element of future circular economy. Especially biomass as highly functionalized feedstock provides manifold opportunities for the transformation into attractive platform chemicals. However, this change of the resources requires a paradigm shift in refinery design. Fossil feedstocks are processed in gas phase at elevated temperature. In contrast, biorefineries are based on processes in polar solvents at moderate conditions to selectively deoxygenate the polar, often thermally instable and high-boiling molecules. Here, challenges of catalytic deoxygenation, novel strategies for separation and opportunities provided at the interface to biotechnology are discussed in form of showcases. This article is part of a discussion meeting issue ‘Providing sustainable catalytic solutions for a rapidly changing world’. |
/**
* HTTP 204 No Content
*
* @return this for chaining
*/
@Nonnull
public PhotonUnifiedResponse createNoContent ()
{
setStatus (HttpServletResponse.SC_NO_CONTENT);
return this;
} |
P-713 Prescription determinants of intrauterine devices in different European countries - Results from the real-world safety study EURAS-LCS12
What are the reasons for intrauterine devices (IUD) use beyond contraception for various IUD types in routine clinical practice across Europe?
Beyond contraception, high- and low-dose levonorgestrel (LNG)-releasing IUDs are frequently used for heavy menstrual bleeding, especially in Mediterranean countries.
All IUDs are approved for the use of contraception, but additional indications differ by IUD type (i.e., LNG- and copper-containing). The higher, 52 mg LNG, dose IUD (e.g., Mirena) is also approved for the treatment of menorrhagia and endometrium protection during hormone replacement therapy. There is a documented off-label indication to use copper IUD as emergency contraception within 5 days of unprotected intercourse. The only approved indication for LNG-intrauterine devices containing less than 52 mg LNG (e.g., LCS12, Kyleena) is contraception.
EURAS-LCS12 is an ongoing prospective, non-interventional cohort study with active surveillance in ten European countries (Austria, Czech Republic, Spain, Sweden, Finland, Poland, Germany, United Kingdom, Italy, and France).
Women (N = 77,088) with a newly inserted hormonal or copper IUD (i.e., LCS12, Kyleena, Mirena, copper IUD) were enrolled between 2014 and 2021 (planned interim stage). Information on reasons for IUD use was collected from the inserting health care professional, and multiple answers were possible. In 2016 the inclusion criteria were restricted to women below the age of 40 years.
The most common reason for IUD use was birth control in all four cohorts (98.3% of LCS12, 98.4% of Kyleena, 93.4% of Mirena and 98.9% of copper IUD users). For LCS12, Kyleena and Mirena, heavy menstrual bleeding was reported as most frequent additional reason (9.0%, 11.6%, and 24.9%, respectively). Of all Mirena users, 5.5% received the prescription solely for the treatment of heavy menstrual bleeding without a need for contraception. For copper IUDs, emergency contraception was the most frequently reported additional reason in 2.2% of the women. There were differences with respect to country: In Italy, Austria, Spain, and France LCS12 and Kyleena were also prescribed for heavy menstrual bleeding with frequencies between 20% and 33%. The overall proportion for menorrhagia indication in Mirena users were in the ranges from 10% (Germany) to 58% (Italy). Copper IUDs were primarily used for contraception indications (98% to 100% across all countries). The rates for heavy menstrual bleeding as a reason for copper IUD use remained below 1% across all countries. The United Kingdom revealed the highest rate for emergency contraception usage in copper IUD users among all countries (i.e., 7%), followed by Germany with 2%.
The cohort is mainly limited to women below the age of 40 years. Frequencies of reasons for IUD use beyond contraception, especially with respect to heavy menstrual bleeding might differ in women of higher age.
Mirena is frequently used for its additional approved indication (i.e., menorrhagia). LCS12 and Kyleena are used for menorrhagia in addition to contraception, especially in Italy, Spain and Austria. It remains to be evaluated whether also low-dose LNG-IUDs are effective in treating idiopathic menorrhagia.
NCT02146950
|
word, text = raw_input(), []
while True:
tmp = raw_input()
if tmp == "END_OF_TEXT":
break
if tmp[-1] == '.':
tmp = tmp[:-1]
text += [s.lower() for s in tmp.split(" ")]
print text.count(word) |
// WithWindowSize will set the maximum allowed back-reference distance.
// The value must be a power of two between MinWindowSize and MaxWindowSize.
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
// The default value is determined by the compression level.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
case n < MinWindowSize:
return fmt.Errorf("window size must be at least %d", MinWindowSize)
case n > MaxWindowSize:
return fmt.Errorf("window size must be at most %d", MaxWindowSize)
case (n & (n - 1)) != 0:
return errors.New("window size must be a power of 2")
}
o.windowSize = n
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
o.customBlockSize = true
}
return nil
}
} |
Sylvester J. Pussycat Sr., usually called Sylvester, is a fictional character, a three-time Academy Award-winning anthropomorphic Tuxedo cat in the Looney Tunes and Merrie Melodies series of cartoons.[1] Most of his appearances have him often chasing Tweety Bird, Speedy Gonzales, or Hippety Hopper. Sylvester appeared in 103 cartoons in the golden age of American animation, only behind Bugs Bunny, Porky Pig, and Daffy Duck. Three of his cartoons won Academy Awards, the most for any starring Looney Tunes character: they are Tweetie Pie, Speedy Gonzales, and Birds Anonymous.
Animation history [ edit ]
Development [ edit ]
Sylvester predecessors appeared from 1939-1944. Naughty but Mice was the first. Notes to You was remade in color in one of Sylvester's cartoons, Back Alley Oproar. The Hep Cat features another version, as well as Birdy and the Beast, which features Tweety Bird. Before Sylvester's appearance in the cartoons, Blanc voiced a character named Sylvester on The Judy Canova Show using the voice that would eventually become associated with the cat.[2]
Personality and catchphrases [ edit ]
Sylvester shows a lot of pride in himself and never gives up. Despite (or perhaps because of) his pride and persistence, Sylvester is, with rare exceptions, placed squarely on the "loser" side of the Looney Tunes winner/loser hierarchy.
In many cartoons, Sylvester is shown intentionally sticking out his tongue while speaking, putting emphasis that the lisp is intentional. Sylvester is also known for spraying people he's talking to with the saliva from his lisping, which is a trait rarely shared by Daffy. A common gag used for both Sylvester and Daffy is a tendency to go on a long rant, complaining about a subject and then ending it by saying "sakes."
Sylvester's trademark exclamation is "Sufferin' succotash!", which is said to be a Minced oath of "Suffering Savior".
He shows a different character when paired with Porky Pig in explorations of spooky places, in which he does not speak, behaves as a scaredy cat, and always seems to see the scary things Porky doesn't see and gets scolded by him for it every time.
Sylvester, who for the most part always played the antagonist role, is featured playing the protagonist role in a couple of cartoons while having to deal with the canine duo of Spike the Bulldog and Chester the Terrier after being chased around. In 1952's Tree for Two by Friz Freleng, Sylvester is cornered in the back alley and this would result in Spike getting mauled by a black panther that had escaped from a zoo. In the 1954 film Dr. Jerkyl's Hide, Sylvester pummels Spike (here called "Alfie") thanks to a potion that transforms him into a feline monster. After Spike's ordeal, Sylvester would have the courage and confidence to confront Chester, only to be beaten and tossed away by the little dog.
Perhaps Sylvester's most developed role is in a series of Robert McKimson-directed shorts, in which the character is a hapless mouse-catching instructor to his dubious son, Sylvester Junior, with the "mouse" being a powerful baby kangaroo which he constantly mistakes for a "king-size mouse". His alternately confident and bewildered episodes bring his son to shame, while Sylvester himself is reduced to nervous breakdowns.
Sylvester also had atypical roles in a few cartoons:
In the television series Tiny Toon Adventures, Sylvester appeared as the mentor of Furrball. The character also starred in The Sylvester and Tweety Mysteries. In the series, he plays the narrator in the beginning of episodes.
Filmography [ edit ]
The character debuted in Friz Freleng's Life With Feathers (1945). Freleng's 1947 cartoon Tweetie Pie was the first pairing of Tweety with Sylvester, and the Bob Clampett-directed Kitty Kornered (1946) was Sylvester's first pairing with Porky Pig.
He also appears in a handful of cartoons with Elmer Fudd, such as a series of cartoons underwritten by the Alfred P. Sloan Foundation extolling the American economic system.
In the 1970s and 1980s, Sylvester appeared in various Warner Bros. television specials, and in the 1980s, he appeared in the feature-film compilations.
He has died more times than any other Looney Tunes character, having died in Peck Up Your Troubles, I Taw a Putty Tat, Back Alley Oproar, Mouse Mazurka, Bad Ol' Putty Tat, Ain't She Tweet, Satan's Waitin', Muzzle Tough, Sandy Claws, Tweety's Circus, Too Hop To Handle, Tree Cornered Tweety, Tweet and Lovely, Trick or Tweet, The Wild Chase, and Museum Scream. He was also cast in the role of the Jacob Marley-like ghost in Bah, Humduck! A Looney Tunes Christmas.
A baby version of Sylvester is part of the title cast of characters in Baby Looney Tunes.
Sylvester is featured in The Looney Tunes Show (2011–14) voiced by Jeff Bergman. He is shown living with Granny alongside Tweety. In "Point, Laser Point," it is revealed that Sylvester was attracted by a glowing red dot that was on his mother's necklace when he was young as experienced through hypnotic therapy done by Witch Lezah. It was also revealed that his mother (voiced by Estelle Harris) has retired to Florida (with Sylvester's mother being disappointed that Sylvester never kept wearing his retainer, never remembered where she lives in Florida, and has not caught Tweety yet). This episode also introduced Sylvester's brother Alan (voiced by Jeff Bennett).
Sylvester also makes recurring appearances in New Looney Tunes.
Cameo appearances [ edit ]
Sylvester appears in the Robot Chicken episode "Werewolf vs. Unicorn" voiced by Patrick Pinney. During Arnold Schwarzenegger’s announcement of illegal aliens from Mexico, Sylvester demonstrates a wired fence that will keep the aliens out, only for it to be penetrated by Speedy Gonzales.
Sylvester makes a cameo appearance in Who Framed Roger Rabbit, where he provides the punchline for a double-entendre joke regarding Judge Doom's (Christopher Lloyd) identity.
Sylvester appears as part of the TuneSquad team in Space Jam, bearing the number 9 on his jersey.
He also has two cameo appearances in Looney Tunes: Back in Action, but the second time, "Sylvester" is really Mr. Smith in disguise.
In a Garfield cartoon, he made a cameo by sending Rosalina a love letter.
In the VeggieTales episode "Madame Blueberry", he appears as a barbershop quartet singer (Tenor) with Barnyard Dog (Lead), Tiger (Baritone), and Foghorn Leghorn (Bass) on a cardboard box.
Other appearances [ edit ]
From 1979 to 1983, Sylvester was the "spokescat" for 9 Lives' line of dry cat food. His face appeared on the product's boxes and Sylvester was also featured in a series of television commercials. These ads usually consisted of Sylvester trying to get to his box of 9 Lives while avoiding Hector the Bulldog. Sylvester would always succeed in luring the dog away so he could get to his food, but would always find himself a target again by the end of the commercial, which generally ended with Sylvester calling 9 Lives dry food "worth riskin' your life for."
In the Family Guy episode Padre de Familia, Peter made up his American version of Speedy Gonzales called Rapid Dave after he decided that immigrants shouldn't be allowed into America. Sylvester (in which Jeff Bergman reprises his role) appeared in the cartoon with Dave that Peter made, and tried to catch him.
In The Simpsons episode Lisa's First Word, toddler Bart exclaims "Sufferin' succotash!" like Sylvester when Baby Lisa says her first word "Bart."
In 1985, Sylvester could be heard in an episode of the game show Press Your Luck. Host Peter Tomarken had earlier incorrectly credited his catchphrase "Suffering Succotash!" to Daffy Duck. Even though all three contestants had correctly answered "Sylvester," they were ruled incorrect. In a segment produced later and edited into the broadcast, Sylvester phoned Tomarken and told him, "Daffy Duck steals from me all the time." All three participants returned to compete in future episodes.
In comic books [ edit ]
Sylvester in Tweety and Sylvester comics, No.9, published in 1955
Tweety and Sylvester cover, No.100. Published in 1979
Western Publications produced a comic book about Tweety and Sylvester entitled Tweety and Sylvester first in Dell Comics Four Color series #406, 489, and 524, then in their own title from Dell Comics (#4-37, 1954–62), and later from Gold Key Comics (#1-102, 1963–72). In most of the comic books, Sylvester has white fur surrounding his eyes, similar to Pepé Le Pew. The white fur disappeared in later comics.
Sylvester and Tweety appear in a DC Comics and Looney Tunes crossover comic called Catwoman/Tweety and Sylvester. In the issue, witches from the DC and Looney Tunes universe place a wager where the existence of all birds and cats (as well as bird and cat-themed heroes and villains) depends on if Sylvester can eat Tweety. Sylvester (designed more realistically for the DC Universe) teams up with Catwoman while Tweety teams up with Black Canary.[3]
In video games [ edit ]
Sylvester has appeared in the video games, Sylvester and Tweety in Cagey Capers, The Bugs Bunny Crazy Castle, The Bugs Bunny Birthday Blowout, Bugs Bunny Rabbit Rampage, Looney Tunes: Acme Arsenal, The Bugs Bunny Crazy Castle 2, Looney Tunes: Back in Action, Looney Tunes: Space Race, and Bugs Bunny: Crazy Castle 3.
Naming [ edit ]
The name "Sylvester" is a play on Felis silvestris, the scientific name for the wild cat species (domestic cats like Sylvester, though, are actually Felis catus). Sylvester was not named until Chuck Jones gave him the name Sylvester, which was first used in Scaredy Cat.[citation needed] Although the character was named Sylvester in later cartoon shorts (beginning with 1948's Scaredy Cat), he was called "Thomas" in his first appearance with Tweety Bird in Tweetie Pie, most likely as a reference to a male cat being called a tom.[citation needed] Mel Blanc had also voiced a human character named Sylvester on Judy Canova's radio show earlier in the 1940s. Sylvester was officially given his name in the 1948 Chuck Jones short, Scaredy Cat.
Voice [ edit ]
Origin [ edit ]
Sylvester's trademark is his sloppy and yet stridulating lisp. In Mel Blanc's autobiography, That's Not All Folks! It's worth noting that Sylvester's voice is similar to Daffy Duck, only not sped up in post-production, plus the even more exaggerated slobbery lisp. Conventional wisdom is that Daffy's lisp, and hence also Sylvester's, were based on the lisp of producer Leon Schlesinger. However, Blanc made no such claim. He said that Daffy's lisp was based on him having a long beak and that he borrowed the voice for Sylvester.[4] He also said that Sylvester's voice was very much like his own, excluding the lisp (his son Noel Blanc has also confirmed this). In addition, director Bob Clampett, in a 1970 Funnyworld interview, agreed with Blanc's account concerning Schlesinger.[5]
Voice actors [ edit ]
Reception and legacy [ edit ]
Sylvester was #33 on TV Guide's list of top 50 best cartoon characters, together with Tweety.[7]
See also [ edit ] |
// This example shows how you can use the Toggl API to print
// all your workspaces names, client names, project names and the time entries
// of the last month.
func Example() {
baseURL := "https://www.toggl.com/api/v8"
apiToken := "Toggl-API-Token"
api := NewAPI(baseURL, apiToken)
fmt.Println("Workspaces:")
workspaces, workspacesError := api.GetWorkspaces()
if workspacesError != nil {
fmt.Fprintf(os.Stderr, "Failed to get workspaces: %s", workspacesError)
return
}
for _, workspace := range workspaces {
fmt.Println(workspace.Name)
}
fmt.Println("")
fmt.Println("Clients:")
clients, clientsError := api.GetClients()
if clientsError != nil {
fmt.Fprintf(os.Stderr, "Failed to get clients: %s", clientsError)
return
}
for _, client := range clients {
fmt.Println(client.Name)
}
fmt.Println("")
fmt.Println("Projects:")
for _, workspace := range workspaces {
projects, projectsError := api.GetProjects(workspace.ID)
if projectsError != nil {
fmt.Fprintf(os.Stderr, "Failed to get projects: %s", projectsError)
return
}
for _, project := range projects {
fmt.Println(project.Name)
}
}
fmt.Println("")
fmt.Println("Time Entries:")
stop := time.Now()
start := stop.AddDate(0, -1, 0)
timeEntries, timeEntriesError := api.GetTimeEntries(start, stop)
if timeEntriesError != nil {
fmt.Fprintf(os.Stderr, "Failed to get timeEntries: %s", timeEntriesError)
return
}
for _, timeEntry := range timeEntries {
fmt.Printf("%s - %s: %s\n", timeEntry.Start, timeEntry.Stop, timeEntry.Description)
}
fmt.Println("")
} |
def parse(self, force = False):
if self._parsed and not force:
return
self.resetBody()
self.initialize()
self._trustPacketVersion = self.readBin(1)[0]
if self._trustPacketVersion != 1:
raise PGPError("Unknown trust packet version %s" % self._trustPacketVersion)
self._keyId = self.readExact(8)
self._refreshTimestamp = int4FromBytes(*self.readBin(4))
self._parsed = True |
<gh_stars>1-10
import { MessageType } from "../types/message";
var weather = require("openweather-apis");
export = {
name: "weather",
description: "display weather im given city",
execute(msg: MessageType, args: string) {
weather.setLang("en");
weather.setCity(args);
weather.setUnits("metric");
weather.setAPPID(process.env.OPENWEATHERAPI_KEY);
weather.getAllWeather(function (err: any, JSONObj: any) {
console.log(JSONObj);
if (err) {
msg.channel.send("Unknown city");
} else {
msg.channel.send(
"The weather in " +
args +
" is " +
JSONObj.main.temp +
" with " +
JSONObj.weather[0].description
);
}
});
},
};
|
def _allocate_new_rows(self, min_increment=None):
new_rows = max(1, int(np.ceil(len(self.data) * self.increment)))
if min_increment:
new_rows = max(new_rows, min_increment - new_rows)
append_data = np.empty([new_rows] + list(self.data.shape[1:]), dtype=self.dtype)
self.data = np.concatenate([self.data, append_data]) |
<reponame>rebuy-de/kubernetes-deployment<filename>pkg/interceptors/multi.go
package interceptors
import (
"github.com/pkg/errors"
"github.com/rebuy-de/kubernetes-deployment/pkg/gh"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime"
)
type Multi struct {
Interceptors []interface{}
}
func New(interceptors ...interface{}) *Multi {
return &Multi{
Interceptors: interceptors,
}
}
func (m *Multi) Add(interceptors ...interface{}) {
m.Interceptors = append(m.Interceptors, interceptors...)
}
func (m *Multi) PostFetch(branch *gh.Branch) error {
for _, i := range m.Interceptors {
c, ok := i.(PostFetcher)
if !ok {
continue
}
err := c.PostFetch(branch)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (m *Multi) PreApply(objs []runtime.Object) error {
for _, i := range m.Interceptors {
c, ok := i.(PreApplier)
if !ok {
continue
}
err := c.PreApply(objs)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (m *Multi) PreManifestApply(obj runtime.Object) (runtime.Object, error) {
var err error
for _, i := range m.Interceptors {
c, ok := i.(PreManifestApplier)
if !ok {
continue
}
obj, err = c.PreManifestApply(obj)
if err != nil {
return obj, errors.WithStack(err)
}
}
return obj, nil
}
func (m *Multi) PostManifestApply(obj runtime.Object) error {
for _, i := range m.Interceptors {
c, ok := i.(PostManifestApplier)
if !ok {
continue
}
err := c.PostManifestApply(obj)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (m *Multi) PostApply(objs []runtime.Object) error {
for _, i := range m.Interceptors {
c, ok := i.(PostApplier)
if !ok {
continue
}
err := c.PostApply(objs)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (m *Multi) PostManifestRender(obj runtime.Object) (runtime.Object, error) {
var err error
for _, i := range m.Interceptors {
c, ok := i.(PostManifestRenderer)
if !ok {
continue
}
obj, err = c.PostManifestRender(obj)
if err != nil {
return nil, errors.WithStack(err)
}
}
return obj, nil
}
func (m *Multi) Close() error {
var err error
if m == nil {
return nil
}
for _, i := range m.Interceptors {
c, ok := i.(Closer)
if !ok {
continue
}
err = c.Close()
if err != nil {
log.Warn(err)
}
}
return err
}
|
<gh_stars>1-10
fn main() {
let x: i32 = 5;
let y: Option<i32> = Some(3);
let result = match type_of(x) == type_of(y) {
Ok(value) => value,
Err(error) => {
panic!("Trouble adding numbers", error)
},
};
//println!("{}", x + y).expect("Failed to add dissimilar types"); // Cannot add i32 to Option<i32>
println!("{}", result);
let z = y as i32;
println!("{}", z + x);
}
|
def apt_update():
print('>> apt update')
with hide('output'):
r = sudo('apt update')
if r.find('packages can be upgraded') == -1:
raise FabricCommandError(f'Result = {r}')
print('>>> Success apt update') |
// All variables and functions in this file are carbon copy-paste from the standard library crypto/rsa
package rsablind
import (
"crypto/rand"
"crypto/rsa"
"errors"
"io"
"math/big"
)
var bigZero = big.NewInt(0)
var bigOne = big.NewInt(1)
// Carbon copy of crypto/rsa encrypt()
func encrypt(c *big.Int, pub *rsa.PublicKey, m *big.Int) *big.Int {
e := big.NewInt(int64(pub.E))
c.Exp(m, e, pub.N)
return c
}
// Carbon copy of crypto/rsa decrypt()
// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
// random source is given, RSA blinding is used.
func decrypt(random io.Reader, priv *rsa.PrivateKey, c *big.Int) (m *big.Int, err error) {
// TODO(agl): can we get away with reusing blinds?
if c.Cmp(priv.N) > 0 {
err = rsa.ErrDecryption
return
}
var ir *big.Int
if random != nil {
// Blinding enabled. Blinding involves multiplying c by r^e.
// Then the decryption operation performs (m^e * r^e)^d mod n
// which equals mr mod n. The factor of r can then be removed
// by multiplying by the multiplicative inverse of r.
var r *big.Int
for {
r, err = rand.Int(random, priv.N)
if err != nil {
return
}
if r.Cmp(bigZero) == 0 {
r = bigOne
}
var ok bool
ir, ok = modInverse(r, priv.N)
if ok {
break
}
}
bigE := big.NewInt(int64(priv.E))
rpowe := new(big.Int).Exp(r, bigE, priv.N)
cCopy := new(big.Int).Set(c)
cCopy.Mul(cCopy, rpowe)
cCopy.Mod(cCopy, priv.N)
c = cCopy
}
if priv.Precomputed.Dp == nil {
m = new(big.Int).Exp(c, priv.D, priv.N)
} else {
// We have the precalculated values needed for the CRT.
m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
m.Sub(m, m2)
if m.Sign() < 0 {
m.Add(m, priv.Primes[0])
}
m.Mul(m, priv.Precomputed.Qinv)
m.Mod(m, priv.Primes[0])
m.Mul(m, priv.Primes[1])
m.Add(m, m2)
for i, values := range priv.Precomputed.CRTValues {
prime := priv.Primes[2+i]
m2.Exp(c, values.Exp, prime)
m2.Sub(m2, m)
m2.Mul(m2, values.Coeff)
m2.Mod(m2, prime)
if m2.Sign() < 0 {
m2.Add(m2, prime)
}
m2.Mul(m2, values.R)
m.Add(m, m2)
}
}
if ir != nil {
// Unblind.
m.Mul(m, ir)
m.Mod(m, priv.N)
}
return
}
// Carbon-copy of crypto/rsa decryptAndCheck()
func decryptAndCheck(random io.Reader, priv *rsa.PrivateKey, c *big.Int) (m *big.Int, err error) {
m, err = decrypt(random, priv, c)
if err != nil {
return nil, err
}
// In order to defend against errors in the CRT computation, m^e is
// calculated, which should match the original ciphertext.
check := encrypt(new(big.Int), &priv.PublicKey, m)
if c.Cmp(check) != 0 {
return nil, errors.New("rsa: internal error")
}
return m, nil
}
// Carbon-copy of crypto/rsa modInverse()
// modInverse returns ia, the inverse of a in the multiplicative group of prime
// order n. It requires that a be a member of the group (i.e. less than n).
func modInverse(a, n *big.Int) (ia *big.Int, ok bool) {
g := new(big.Int)
x := new(big.Int)
y := new(big.Int)
g.GCD(x, y, a, n)
if g.Cmp(bigOne) != 0 {
// In this case, a and n aren't coprime and we cannot calculate
// the inverse. This happens because the values of n are nearly
// prime (being the product of two primes) rather than truly
// prime.
return
}
if x.Cmp(bigOne) < 0 {
// 0 is not the multiplicative inverse of any element so, if x
// < 1, then x is negative.
x.Add(x, n)
}
return x, true
}
|
import java.awt.image.*;
/**
* Provides an interface to a picture as an array of Pixels
*
* @author <NAME>, modified by <NAME>
* @version March 1, 2002
*/
public class PixelImage
{
private BufferedImage myImage;
private int width;
private int height;
/**
* Map this PixelImage to a real image
* @param bi The image
*/
public PixelImage(BufferedImage bi)
{
// initialise instance variables
this.myImage = bi;
this.width = bi.getWidth();
this.height = bi.getHeight();
}
/**
* Return the width of the image
*/
public int getWidth() {
return this.width;
}
/**
* Return the height of the image
*/
public int getHeight() {
return this.height;
}
/**
* IGNORE THIS METHOD
*/
public BufferedImage getImage() {
return this.myImage;
}
/**
* Return the image's pixel data as an array of Pixels. The
* first coordinate is the x-coordinate, so the size of the
* array is [width][height], where width and height are the
* dimensions of the array
* @return The array of pixels
*/
public Pixel[][] getData() {
Raster r = this.myImage.getRaster();
Pixel[][] data = new Pixel[r.getHeight()][r.getWidth()];
int[] samples = new int[3];
for (int row = 0; row < r.getHeight(); row++) {
for (int col = 0; col < r.getWidth(); col++) {
samples = r.getPixel(col, row, samples);
Pixel newPixel = new Pixel(samples[0], samples[1], samples[2]);
data[row][col] = newPixel;
}
}
return data;
}
/**
* Set the image's pixel data from an array. This array matches
* that returned by getData(). It is an error to pass in an
* array that does not match the image's dimensions or that
* has pixels with invalid values (not 0-255)
* @param data The array to pull from
*/
public void setData(Pixel[][] data) throws IllegalArgumentException {
int[] pixelValues = new int[3]; // a temporary array to hold r,g,b values
WritableRaster wr = this.myImage.getRaster();
if (data.length != wr.getHeight()) {
throw new IllegalArgumentException("Array size does not match");
} else if (data[0].length != wr.getWidth()) {
throw new IllegalArgumentException("Array size does not match");
}
for (int row = 0; row < wr.getHeight(); row++) {
for (int col = 0; col < wr.getWidth(); col++) {
pixelValues[0] = data[row][col].getRed();
pixelValues[1] = data[row][col].getGreen();
pixelValues[2] = data[row][col].getBlue();
wr.setPixel(col, row, pixelValues);
}
}
}
}
|
Since its launch a year ago, the xenophiles at Paradox have been busy expanding Stellaris with a slew of patches, meatier updates and two significantly different chunks of DLC. Problems, both endemic to 4X games and specific to Stellaris, have been grappled with, and it’s in better shape than ever, but the team still has big plans for its future.
A lot of the studio’s focus, post-launch, has been on Stellaris’ mid-game. It’s a tricky phase to get right because the galaxy starts to become familiar, the thrills of exploration and meeting new races cease, and everything just slows down a little. It is even more jarring in Stellaris, where the exploration-centric early-game is incredibly busy.
Utopia, the most recent and largest expansion, declares war on the mid-game by entirely reworking species design and politics, considerably extending the development of space empires. A lot of Utopia’s concepts were originally developed for an earlier expansion, and the story-based DLC, Leviathans, was made instead.
“We ended up going back to the drawing board a bit because we realised we needed to spend more time addressing customer feedback,” game director Martin Anward recalls. “We took a lot of those concepts, various ideas we had and feedback about politics, and we decided to make an expansion that’s all about internal politics.”
The impact can be felt throughout the game, not just in the middle, and some of the largest changes first become clear right at species creation. “There are a lot of features that I like, but the one I like the most, because it has the biggest impact on the player, is Traditions and Ascension Perks. They add all these strategic choices as the game goes on. And it makes sure things keeps happening like, ‘I’m going to go down this tree, I’m going to turn my people into robots.’ It gives you these long-term plans to work towards.”
For the uninitiated, Traditions and Ascension Perks help you define your species right at the start and then continue to do so as you play. You can create a species of genetically augmented psychics, tech-obsessed robots or even a hivemind, each with their own mechanics and paths to progress down. Eventually, perks unlock that let you create megastructures, like gargantuan ringworlds or energy-generating Dyson spheres.
“I feel that it’s good now,” Anward says, after giving it some thought. “Earlier I would say it was OK. There was definitely a point where not enough things were happening. Now I feel… I will never be completely happy with it, but the mid-game is in good shape.”
If he seems ambivalent, it’s only because he’s looking ahead, to what Stellaris could be. Utopia was all about politics and populations, now he wants to give diplomacy a similar treatment. He envisions a galactic UN or space Holy Roman Empire, an institution where membership means better trade deals and other benefits, at the cost of being forced to abide by certain rules. And then there’s war...
“Obviously I want planet destroyers,” he confesses. “But it’s going to be an interesting challenge making that for a game with a warscore system. I also want to, overall, give more of a strategic layer to war—a better way for the player to understand how a war unfolds instead of ships just moving around in pretty battles.”
With the mid-game improved, the team's attention now turns to the end-game. The massive galaxy-spanning crises could be more balanced and more interesting, Anward thinks, but the main goal is to simply extend the end and add more things to strive for once you’ve finished building megastructures and developing your species.
In a year or two, we’ll have a much more defined universe than we have now. Martin Anward
But Stellaris is more than wars and politics—it’s a vehicle for intriguing sci-fi yarns and emergent stories. The Leviathans story DLC largely handled the latter, throwing new enemies, AI groups and a new end-game threat into the mix. And several updates have expanded the former, not least the Kennedy patch, which saw Alexis Kennedy, founder of Failbetter Games and now writing for BioWare, pen an appropriately spooky tale.
“I definitely do want to bring in new writers,” Anward says. “And I’d love to have Alexis Kennedy back, he did a terrific job and he was a real pleasure to work with.” It’s through these stories that he hopes the Stellaris universe will start to feel more tangible. “As we’re doing this sort of thing, we’re adding little bits and pieces and adapting them for the Stellaris universe, so we’re actually building a real setting out of all these pieces. In a year or two, we’ll have a much more defined universe than we have now.”
Along with more tales of space adventure and extradimensional horrors, characters are another area that he’d like to see enhanced. “Not at all Crusader Kings 2 level, we’re not doing that, but I feel like it’s a bit of a waste to have all these characters when there’s not much to them. I’d like to integrate them more into the stories. So you could have a story going on and it’s about this character and they play a pivotal part in it. Or you could have a politics story—an admiral from the fleet gets elected to office, rises up to become ruler and then proclaims a galactic empire.”
With the multitude of ideas that Anward and the Stellaris team have, it’s likely a good fit for Paradox’s prolific DLC model. But it has one advantage over the likes of Europa Universalis 4: it’s not shackled to history. The universe is a very big place, and unrestrained, Stellaris could go anywhere. And with Iain M Banks’ Culture novels as one of Anward’s biggest inspirations, it might go to somewhere very weird. |
//! userlevel=Normal
//: 2D Triangular Mesh
class TriMesh2dC
{
public:
TriMesh2dC()
{}
TriMesh2dC(const SArray1dC<Point2dC> &nVertices,const SArray1dC<Index3dC> &nFaces)
: vertices(nVertices),
faces(nFaces)
{}
SArray1dC<Point2dC> &Vertices()
{ return vertices; }
SArray1dC<Index3dC> &Faces()
{ return faces; }
const SArray1dC<Point2dC> &Vertices() const
{ return vertices; }
const SArray1dC<Index3dC> &Faces() const
{ return faces; }
SizeT NoFaces() const
{ return faces.Size(); }
SizeT NoVertices() const
{ return vertices.Size(); }
IntT FindFace(const Point2dC &pnt);
RealRange2dC BoundingRectangle() const;
bool IsFlat() const;
protected:
SArray1dC<Point2dC> vertices;
SArray1dC<Index3dC> faces;
} |
/**
* Called when the fragment is attached to the activity
* @param context the context for the activity
*/
@Override
public void onAttach(@NonNull Context context) {
super.onAttach(context);
try{
postButtonClickListener = (OnPostButtonClickListener) context;
}catch(ClassCastException e){
throw new ClassCastException(context.toString() + " must implement OnPostButtonClickListener");
}
try{
entryAdapterMethods = (EntryAdapterMethods) context;
}catch(ClassCastException e) {
throw new ClassCastException(context.toString() + " must implement EntryAdapterMethods");
}
} |
package ganyi.hadoop.replayer.simulator.rm;
import ganyi.hadoop.replayer.GlobalConfigure;
import ganyi.hadoop.replayer.message.Message;
import ganyi.hadoop.replayer.message.MessageQueue;
import ganyi.hadoop.replayer.network.netAddress;
import ganyi.hadoop.replayer.rpc.RpcPosition;
import ganyi.hadoop.replayer.rpc.param.AllocateRequestParam;
import ganyi.hadoop.replayer.rpc.param.FinishApplicationMasterRequestParam;
import ganyi.hadoop.replayer.rpc.param.RegisterApplicationMasterRequestParam;
import ganyi.hadoop.replayer.simulator.Simulator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.*;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.json.JSONException;
import org.json.JSONObject;
import javax.sound.midi.SysexMessage;
import javax.xml.bind.DatatypeConverter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.List;
import java.util.StringJoiner;
import java.util.concurrent.Executors;
public class ResourceManagerAppMasterSimulator extends Simulator {
ApplicationMasterProtocol appMasterRM;
volatile ALLOCATE_TYPE allocateState;
//volatile ALLOCATE_TYPE allocateState;
boolean startAllocateCounter = false;
int totalRequest;
int alreadyRequest;
int finished;
UserGroupInformation ugi;
long responseID;
double progress;
long allocTS;
int count = 0;
boolean isRegistered = false;
//List<String> nmSimID;
public ResourceManagerAppMasterSimulator(String[] args,
GlobalConfigure configuration,
MessageQueue<Message> inboundQueue,
MessageQueue<Message> outBoundQueue) {
super(args, configuration, inboundQueue, outBoundQueue);
}
public ResourceManagerAppMasterSimulator(String[] args) {
super(new String[]{"rmc.1"});
configure = new GlobalConfigure(args[0], "rmc.1");
}
public int getTotalRequest() {
return totalRequest;
}
public int getAlreadyRequest() {
return alreadyRequest;
}
public void setAlreadyRequest(int alreadyRequest) {
this.alreadyRequest = alreadyRequest;
}
public long getResponseID() {
return responseID;
}
public void setResponseID(long responseID) {
this.responseID = responseID;
}
/*public void updateProgress(){
//not sure what does progress mean in RM_APP.
progress = alreadyRequest / totalRequest;
}*/
public double getProgress() {
return progress;
}
public ALLOCATE_TYPE getAllocateState() {
synchronized (allocateState) {
return allocateState;
}
}
public void setAllocateState(ALLOCATE_TYPE allocateState) {
synchronized (allocateState) {
this.allocateState = allocateState;
}
}
@Override
public void startRPCService() {
netAddress rmAddr = configure.getResourceManagerAddr();
InetSocketAddress socketAddress =
new InetSocketAddress(rmAddr.getIp(), rmAddr.getPort());
org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
conf.set("yarn.resourcemanager.hostname", socketAddress.getAddress().getHostName());
conf.set("yarn.resourcemanager.address", socketAddress.getAddress().getHostAddress() + ":" + rmAddr.getPort());
conf.set("yarn.resourcemanager.scheduler.address", socketAddress.getAddress().getHostAddress() + ":" + (rmAddr.getPort() + 1));
appMasterRM = ugi.doAs(
(PrivilegedAction<ApplicationMasterProtocol>) () -> {
try {
return ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
);
if (appMasterRM == null) {
LOG.error("Failed to create rmMaster rpc proxy.");
System.exit(5);
}
/*rmMaster = ClientRMProxy.createRMProxy(conf,ApplicationMasterProtocol.class);*/
}
/*@Override
void SetupRpcManager() {
rpcMgr = new RPCManager(this.rmMaster, this);
}*/
@Override
public void stopSimulatorRPCConnection() {
RPC.stopProxy(appMasterRM);
}
@Override
public void periodicalJob(TimerTaskType type, String[] cmd) {
LOG.info(getIdentifier() + " allocate state: " + this.allocateState.name() + " progress:" + progress);
if (type == TimerTaskType.AppMaster_allocate) {
/*if (progress < 1) {*/
try {
int requested = alreadyRequest;
if (getAllocateState() == ALLOCATE_TYPE.normal) {
playOnce("0,0,2");
} else if (getAllocateState() == ALLOCATE_TYPE.resource_request) {
LOG.info("process resource request.");
playOnce("0,0,1");
setAllocateState(ALLOCATE_TYPE.normal);
}
if(requested < alreadyRequest){
if(alreadyRequest < totalRequest){
setAllocateState(ALLOCATE_TYPE.resource_request);
}
else if(alreadyRequest == totalRequest /* && count ==1 */){
setAllocateState(ALLOCATE_TYPE.resource_request);
}
}
/*if (requested != alreadyRequest || alreadyRequest == totalRequest) {
//request resource next time.
LOG.info("Request resource next time.");
setAllocateState(ALLOCATE_TYPE.resource_request);
}*/
} catch (IOException e) {
e.printStackTrace();
}
/*} else {
LOG.info(getIdentifier()+ " finishAM.");
try {
playOnce("0,0,3");
} catch (IOException e) {
e.printStackTrace();
}
//rpcMgr.respondWithFinish("RM_AM|" + getJobID());
}*/
responseID += 1;
}
}
@Override
public void init() {
String id = envs[0];
totalRequest = Integer.valueOf(envs[1]);
LOG = LogFactory.getLog(ResourceManagerAppMasterSimulator.class);
LOG.info("Total number of request NM: " + totalRequest + ".");
setIdentifier(id);
alreadyRequest = 0;
finished = 0;
responseID = 0;
progress = alreadyRequest / totalRequest;
String[] ss = envs[2].split(":");
Text kind = new Text(ss[0]);
Text service = new Text(ss[1]);
byte[] identifier = DatatypeConverter.parseHexBinary(ss[2]);
byte[] password = DatatypeConverter.parseHexBinary(ss[3]);
ugi = UserGroupInformation.createRemoteUser("SPARK_USER");
Token<AMRMTokenIdentifier> token = new Token<>(identifier, password, kind, service);
ugi.addToken(token);
setStartHBCounter(true);
}
@Override
public void run() {
init();
String jobFile = "";
while (true) {
Message message = inboundQueue.get();
if (message.getMsgType() == Message.MSG_TYPE.command) {
String[] ss = message.getCmd().split("#");
if (ss[0].equalsIgnoreCase("start")) {
LOG.info("Start TS:"+getJobID()+"#"+System.currentTimeMillis()+"#");
jobFile = ss[1];
validateEmulator(jobFile);
try {
play("");
//rpcMgr.respondWithFinish(jobFile);
} catch (IOException e) {
e.printStackTrace();
}
}
} else if (message.getMsgType() == Message.MSG_TYPE.release_simulator) {
LOG.info("Release simulator thread: " + getIdentifier());
setStartHBCounter(false);
try {
metricsReport();
} catch (RuntimeException e) {
LOG.error("Runtime exception happens when collecting metrics.");
}
stopRPCService();
break;
} else if (message.getMsgType() == Message.MSG_TYPE.start_HB_count) {
startAllocateCounter = true;
} else if (message.getMsgType() == Message.MSG_TYPE.ChangeHBState) {
LOG.info(getIdentifier()+":Receive ChangeHBState in am.");
if (message.getCmd().equalsIgnoreCase(ResourceManagerAppMasterSimulator
.ALLOCATE_TYPE.resource_request.name())) {
setAllocateState(ALLOCATE_TYPE.resource_request);
LOG.info("Start allocate:"+getJobID()+":"+System.currentTimeMillis()+":");
/*LOG.info("stop AM before allocate.");
System.exit(5);*/
allocTS = System.currentTimeMillis();
String[] amcmd = new String[]{""};
scheduleAggresiveTimer(200,3000,
TimerTaskType.AppMaster_allocate,amcmd);
/*scheduleTimerTask(1000,
Simulator.TimerTaskType.AppMaster_allocate, amcmd);*/
}
} else if (message.getMsgType() == Message.MSG_TYPE.RMAppMasterStatus) {
if (message.getCmd().equalsIgnoreCase("progress")) {
finished += 1;
progress = 1.0 * finished / totalRequest;
LOG.info("progress: " + progress);
//when progress goes to 100%, stop allocate and call finishApplication.
if (progress == 1.0) {
LOG.info("Progress reach 100%, stop allocate and send finishAppMaster in "+getIdentifier());
stopTimers();
//shutdown only once.
try {
playOnce("0,0,3");
} catch (IOException e) {
e.printStackTrace();
}
/*while(true) {
try {
playOnce("0,0,3");
} catch (IOException e) {
e.printStackTrace();
}
if(!isRegistered){
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}*/
//Notify CC to change state of all NMs.
Message msg = new Message(Message.MSG_TYPE.RMAppMasterStatus,
"finish", getIdentifier(), "CC.1", getJobID());
msg.setMisc(envs[3]);
sendMessage(msg);
}
}
else if (message.getCmd().equalsIgnoreCase("toReleasePhase")) {
Message msg = new Message(Message.MSG_TYPE.finish_response,
jobFile,getIdentifier(),"CC.1",getJobID());
sendMessage(msg);
}
}
}
//RMAppMasterRPCTest();
}
@Override
public void playbook() throws IOException {
String[] cmdset;
LOG.info(getIdentifier()+":Haha, start run RM APP RPC.");
script.setTerminatingPoint(1);
long ts = System.currentTimeMillis();
while ((cmdset = script.getNext()) != null) {
String[] param = interpreter.interpret(cmdset, recorder);
ActionStation(param[0], param[1], param[2], param[3]);
}
LOG.info("register latency:"+getJobID()+"#"+String.valueOf(System.currentTimeMillis() - ts));
setAllocateState(ALLOCATE_TYPE.normal);
//notify CC that am finishes register.
Message msg = new Message(Message.MSG_TYPE.RMAppMasterStatus,
"register", getIdentifier(), "CC.1", getJobID());
sendMessage(msg);
}
@Override
public String ExecuteRPC(String pos, String cmd, JSONObject object) throws IOException, YarnException {
String jsonResponse;
RpcPosition position = new RpcPosition(pos);
if (cmd.equalsIgnoreCase("registerApplicationMaster")) {
RegisterApplicationMasterRequest param =
RegisterApplicationMasterRequestParam.parseParam(object);
long t1 = System.currentTimeMillis();
RegisterApplicationMasterResponse response = appMasterRM.registerApplicationMaster(param);
long t2 = System.currentTimeMillis();
updateLatency(t1, t2, pos, cmd);
//recorder.addReocord(position, param, response);
//jsonResponse = gson.toJson(response);
isRegistered = true;
//LOG.info(getIdentifier()+ " "+ cmd +" response: "+jsonResponse);
} else if (cmd.equalsIgnoreCase("allocate")) {
AllocateRequest param = AllocateRequestParam.parseParam(object);
long t1 = System.currentTimeMillis();
AllocateResponse response = appMasterRM.allocate(param);
long t2 = System.currentTimeMillis();
updateLatency(t1, t2, pos, cmd);
if(getTotalRequest() == getAlreadyRequest()){
setAlreadyRequest(getTotalRequest() + 1);
}
List<Container> containers = response.getAllocatedContainers();
if (containers.size() != 0) {
//int vdrSize = containers.size();
count ++;
String s= getIdentifier()+" get "+ containers.size()+
" containers from allocate, already get "+getAlreadyRequest();
if(getAlreadyRequest() < getTotalRequest()) {
Message message = new Message(Message.MSG_TYPE.RMAppMasterStatus,
"start_nm_container", getIdentifier(), "CC.1",
getJobID());
StringJoiner sj = new StringJoiner("|");
for (Container container : containers) {
StringBuilder sb = new StringBuilder();
sb.append(container.getNodeId())
.append("#").append(container.getId().toString());
sj.add(sb.toString());
}
message.setMisc(sj.toString());
sendMessage(message);
int requested = getAlreadyRequest();
requested += containers.size();
setAlreadyRequest(requested);
s += " <<launch containers!>>";
}
else{
s += " <<discard all over-subscribes.>>";
}
LOG.info(s);
/*System.out.println("vdr should equal requested=" + requested + ", already=" + getAlreadyRequest() +
", vdrSize=" + vdrSize + ", csz=" + containers.size());*/
}
if (getTotalRequest() - getAlreadyRequest() == 0) {
LOG.info("Allocate latency:"+getJobID()+"#"+String.valueOf(System.currentTimeMillis() - allocTS)+"#");
LOG.info("End TS:"+getJobID()+"#"+System.currentTimeMillis()+"#");
}
} else if (cmd.equalsIgnoreCase("finishApplicationMaster")) {
LOG.info(getIdentifier() +" invokes finishApplicationMaster.");
FinishApplicationMasterRequest param =
FinishApplicationMasterRequestParam.parseParam(object);
long t1 = System.currentTimeMillis();
FinishApplicationMasterResponse response = appMasterRM.finishApplicationMaster(param);
long t2 = System.currentTimeMillis();
updateLatency(t1, t2, pos, cmd);
if (response.getIsUnregistered()) {
isRegistered = false;
}
} else {
throw new RuntimeException(String.format("Cannot find command %s " +
"in ApplicationMasterProtocolPB\n"));
}
return "";
}
public enum ALLOCATE_TYPE {
normal,
resource_request,
}
}
|
import { URL } from '../platform/URL';
import { Record } from '../Record';
import { LoaderPlugin, pluginFactory, PluginSpec } from '../Plugin';
import { Loader } from '../Loader';
const enum ParserState {
OUT,
IN_ELEMENT,
IN_ATTRIBUTE,
IN_COMMENT,
IN_SCRIPT_CONTENT
}
class HTMLPlugin implements LoaderPlugin {
constructor(private loader: Loader) { }
analyze(record: Record) {
let code = '';
const html = record.sourceCode || '';
const re = new RegExp(
(
'<!--|' +
'-->|' +
'/>|' +
'[">]|' +
'</?((script|style)([ \t\n]*(/?>)?))?|' +
'type[ \t\n]*=[ \t\n]*"x-req[^-]*-([^"]*)"|' +
'src[ \t\n]*=[ \t\n]*"([^"]*)"'
),
'gi'
);
let match: RegExpExecArray | null;
let state: ParserState = ParserState.OUT;
let syntaxDepth = 0;
let inScript = false;
let mime = '';
let src = '';
let nameLen = 0;
let scriptStart = 0;
while((match = re.exec(html))) {
let token = match[0];
switch(state) {
case ParserState.OUT:
if(token == '<!--') {
state = ParserState.IN_COMMENT;
++syntaxDepth;
break;
} else if(token.charAt(0) == '<') {
state = ParserState.IN_ELEMENT;
++syntaxDepth;
if(match[2]) {
nameLen = match[2].length;
inScript = token.charAt(1) != '/' && !!match[3];
mime = '';
src = '';
token = match[4];
} else {
inScript = false;
break;
}
} else {
break;
}
// Fallthru
case ParserState.IN_ELEMENT:
if(token == '"') {
state = ParserState.IN_ATTRIBUTE;
} else if(token == '/>' || token == '>') {
state = ParserState.OUT;
--syntaxDepth;
if(inScript) {
if(token == '>') {
state = ParserState.IN_SCRIPT_CONTENT;
scriptStart = re.lastIndex;
}
if(mime && src) {
// Ensure import path is either explicitly relative or an absolute URL.
if(!/^(\.?\.?\/|[a-z]+:)/.test(src)) src = './' + src;
code += 'require("' + src + '");\n';
}
inScript = false;
}
} else if(inScript) {
mime = match[5] || mime;
src = match[6] || src;
}
break;
case ParserState.IN_ATTRIBUTE:
if(token == '"') {
state = ParserState.IN_ELEMENT;
}
break;
case ParserState.IN_COMMENT:
if(token == '-->') {
state = ParserState.OUT;
--syntaxDepth;
}
break;
case ParserState.IN_SCRIPT_CONTENT:
if(match[2] && match[2].length == nameLen && token.charAt(1) == '/' && match[4]) {
if(mime && !src) {
code += html.substr(scriptStart, re.lastIndex - match[0].length - scriptStart);
}
state = ParserState.OUT;
}
break;
}
}
// console.log(state, syntaxDepth, 'SHOULD BE', 0, 0);
// console.log(code);
// const importation = this.loader.newImportation('./#.js', record.resolvedKey);
// importation.sourceCode = code;
// record.addImport('./#.js', importation);
record.sourceCode = code;
record.addPlugin(this.loader.getDefaultPlugin(), true);
}
instantiate(record: Record) {
// console.log(record);
// return record.compiled || (record.sourceCode && JSON.parse(record.sourceCode));
}
/* wrap(record: Record) {
return record.sourceCode || 'null';
} */
extensions: { [name: string]: LoaderPlugin | undefined } = {
html: this,
htm: this,
};
id?: string;
}
export const HTML = pluginFactory('html', HTMLPlugin);
|
NEW YORK—A series of horrific, devastatingly injurious events failed to befall 33-year-old Flavorpill.com digital media developer Jake Reston as he confidently skated to work on his longboard, shocked witnesses reported today.
Hundreds of aghast Manhattan commuters who watched Reston not hit a pothole with his idiotic oversized skateboard or fly uncontrollably through the air and smash his stupid fucking face right into the pavement said they were compelled to look away when Reston's foppish, military-inspired canvas shoulder bag didn't become snagged on the side-view mirror of a passing taxi cab and cause him to be dragged screaming down the street.
Advertisement
"My God, it was one of the most awful things I've ever seen," said retail associate Laura Forester, 29, who was exiting the subway when Reston passed right in front of her and managed not to, at the very least, lose control of his longboard and fall in a big pile of dog shit. "He made it all the way to the door of his obnoxiously ultramodern office building without a scratch on him."
"All I could think about was his poor parents," Forester added.
Doctors on the scene further dashed bystanders' hopes, saying there was nothing they could do and that Reston was likely to live out a long, comfortable life and continue to date a series of incongruously beautiful women.
Advertisement
Many New Yorkers said they felt helpless and frozen in place while witnessing the terrible sequence of events that did not happen to Reston, especially when the floppy-haired fuckface violated a red traffic signal without being immediately broadsided by a garbage truck and sent flying through the display window of one of the annoying, overpriced stores where he buys his slim-fitting oxford button-downs and preciously clever graphic tees.
Others admitted they felt guilty for not having done more, like stepping out into the street and punching Reston square in the goddamn mouth.
"Seeing something like this, it makes you question the existence of God," said passerby David Erickson, still dumbfounded by the fact that Reston had remained upright during his entire longboard commute rather than ending up a crumpled, bloody pile on the sidewalk, with the iPad he no doubt had somewhere on his person smashed to pieces beside him. "I mean, I don't expect something crazy to happen, like a big chunk of building coming loose and flattening him from above, but nothing? Nothing at all?"
Advertisement
"There's just no way to make sense of this," continued Erickson, sadly shaking his head.
Erickson added that he lost all faith when Reston glided by the mirrored glass facade of an apartment building, and, despite getting a good look at himself, did not become overwhelmed by what a smug little turd he was and immediately throw himself into traffic.
Some, however, remained stoic about the succession of physically crippling incidents that did not befall the digital media professional, even after Reston also failed to collide at high speed with a passing candy-apple red Vespa scooter driven by equally insufferable UrbanDaddy.com senior culture editor James Leary, 36.
Advertisement
"I think it's important to remember that tomorrow's another day, and we just have to keep looking ahead," 78-year-old local retiree Arnold Stephens said before walking into a nearby hardware store and purchasing hundreds of ball bearings. |
<reponame>dycz0fx/Vitis_Libraries
/*
* Copyright 2020 Xilinx, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xf_database/gqe_filter.hpp"
#include "xf_database/gqe_bloomfilter.hpp"
#include "prepare.hpp"
#include "x_utils.hpp"
#include <unordered_map>
#include "xf_utils_sw/logger.hpp"
#define VEC_LEN 8
// 1 / BUILD_FACTOR of L table rows will be built into bloom-filter
#define BUILD_FACTOR 10
// load one col data into 1 buffer
template <typename T>
int load_dat(void* data, const std::string& name, const std::string& dir, const int sf, const size_t n) {
if (!data) {
return -1;
}
std::string fn = dir + "/dat" + std::to_string(sf) + "/" + name + ".dat";
FILE* f = fopen(fn.c_str(), "rb");
if (!f) {
std::cerr << "ERROR: " << fn << " cannot be opened for binary read." << std::endl;
}
size_t cnt = fread(data, sizeof(T), n, f);
fclose(f);
if (cnt != n) {
std::cerr << "ERROR: " << cnt << " entries read from " << fn << ", " << n << " entries required." << std::endl;
return -1;
}
return 0;
}
int main(int argc, const char* argv[]) {
std::cout << "--------------- Query 5 simplified, filter --------------- " << std::endl;
using namespace xf::common::utils_sw;
Logger logger(std::cout, std::cerr);
// cmd arg parser.
x_utils::ArgParser parser(argc, argv);
std::string xclbin_path;
if (!parser.getCmdOption("-xclbin", xclbin_path)) {
std::cout << "ERROR: xclbin path is not set!\n";
return 1;
}
std::string in_dir;
if (!parser.getCmdOption("-in", in_dir)) {
std::cout << "Please provide the path to the input tables\n";
return 1;
}
std::string scale;
int factor_o = 1;
int factor_l = 1;
if (parser.getCmdOption("-O", scale)) {
try {
factor_o = std::stoi(scale);
} catch (...) {
factor_o = 1;
}
}
if (parser.getCmdOption("-L", scale)) {
try {
factor_l = std::stoi(scale);
} catch (...) {
factor_l = 1;
}
}
std::string section;
int sec_l = 1;
if (parser.getCmdOption("-sec", section)) {
try {
sec_l = std::stoi(section);
} catch (...) {
sec_l = 1;
}
}
std::vector<std::string> cols_rt;
cols_rt.push_back("l_orderkey");
cols_rt.push_back("l_extendedprice");
cols_rt.push_back("l_discount");
std::string in_dir_datr = prepare(in_dir, factor_l, cols_rt);
std::cout << "Read right table form " << in_dir_datr << std::endl;
int64_t table_o_nrow = 1500000 * factor_o;
int64_t table_l_nrow = 6001215;
switch (factor_l) {
case 1:
table_l_nrow = 6001215;
break;
case 2:
table_l_nrow = 11997941;
break;
case 4:
table_l_nrow = 23996604;
break;
case 8:
table_l_nrow = 47989007;
break;
case 10:
table_l_nrow = 59986052;
break;
case 12:
table_l_nrow = 71985077;
break;
case 20:
table_l_nrow = 119994608;
break;
case 30:
table_l_nrow = 179998372;
break;
case 32:
table_l_nrow = 192000000;
break;
case 33:
table_l_nrow = 198000000;
break;
case 34:
table_l_nrow = 204000000;
break;
case 35:
table_l_nrow = 210000000;
break;
case 36:
table_l_nrow = 216000000;
break;
case 37:
table_l_nrow = 222000000;
break;
case 38:
table_l_nrow = 228000000;
break;
case 39:
table_l_nrow = 234000000;
break;
case 40:
table_l_nrow = 240012290;
break;
case 60:
table_l_nrow = 360011594;
break;
case 80:
table_l_nrow = 480025129;
break;
case 100:
table_l_nrow = 600037902;
break;
case 150:
table_l_nrow = 900035147;
break;
case 200:
table_l_nrow = 1200018434;
break;
case 250:
table_l_nrow = 1500000714;
break;
default:
table_l_nrow = 6001215;
std::cerr << "L factor not supported, using SF1" << std::endl;
}
if (factor_l > 30 && factor_l < 40) {
factor_l = 40;
}
int sim_scale = 10000;
if (parser.getCmdOption("-scale", scale)) {
try {
sim_scale = std::stoi(scale);
} catch (...) {
sim_scale = 10000;
}
}
table_o_nrow /= sim_scale;
table_l_nrow /= sim_scale;
std::cout << "Orders SF(" << factor_o << ")\t" << table_o_nrow << " rows\n"
<< "Lineitem SF(" << factor_l << ")\t" << table_l_nrow << " rows\n";
using namespace xf::database;
gqe::utils::MM mm;
// 32-bit data load from tpch data
int32_t* table_l_in_0 = mm.aligned_alloc<int32_t>(table_l_nrow);
int32_t* table_l_in_1 = mm.aligned_alloc<int32_t>(table_l_nrow);
// 64-bit data actually used in gqe-int64 kernel
int64_t* tab_l_col0 = mm.aligned_alloc<int64_t>(table_l_nrow);
int64_t* tab_l_col1 = mm.aligned_alloc<int64_t>(table_l_nrow);
int64_t tab_l_val_len = (table_l_nrow + 7) / 8;
char* tab_l_valid = mm.aligned_alloc<char>(tab_l_val_len);
int64_t* tab_o1_col0 = mm.aligned_alloc<int64_t>(table_l_nrow / BUILD_FACTOR);
int64_t* tab_o1_col1 = mm.aligned_alloc<int64_t>(table_l_nrow / BUILD_FACTOR);
int64_t* tab_o2_col0 = mm.aligned_alloc<int64_t>(table_l_nrow / BUILD_FACTOR);
int64_t* tab_o2_col1 = mm.aligned_alloc<int64_t>(table_l_nrow / BUILD_FACTOR);
int64_t table_c_nrow = table_l_nrow;
int64_t table_c_nrow_depth = (table_c_nrow + VEC_LEN - 1) / VEC_LEN;
ap_uint<512>* tab_c_col0 = mm.aligned_alloc<ap_uint<512> >(table_c_nrow_depth);
memset(tab_c_col0, 0, table_c_nrow_depth * sizeof(ap_uint<512>));
ap_uint<512>* tab_c_col1 = mm.aligned_alloc<ap_uint<512> >(table_c_nrow_depth);
memset(tab_c_col1, 0, table_c_nrow_depth * sizeof(ap_uint<512>));
int err = 0;
err += load_dat<int32_t>(table_l_in_0, "l_orderkey", in_dir, factor_l, table_l_nrow);
err += load_dat<int32_t>(table_l_in_1, "l_extendedprice", in_dir, factor_l, table_l_nrow);
if (err) return err;
std::cout << "LineItem table has been read from disk" << std::endl;
// diable even rows of table L
for (int i = 0; i < tab_l_val_len; i++) {
tab_l_valid[i] = 0x55;
}
// convert data from 32-bit to 64-bit, for testing only
for (int i = 0; i < table_l_nrow; ++i) {
tab_l_col0[i] = table_l_in_0[i];
tab_l_col1[i] = table_l_in_1[i];
}
// build 0 - 1/BUILD_FACTOR of table L into bf1
for (int i = 0; i < table_l_nrow / BUILD_FACTOR; i++) {
tab_o1_col0[i] = table_l_in_0[i];
tab_o1_col1[i] = table_l_in_1[i];
}
// build 1/BUILD_FACTOR - 2/BUILD_FACTOR of table L into bf2
for (int i = table_l_nrow / BUILD_FACTOR; i < (table_l_nrow / BUILD_FACTOR) * 2; i++) {
tab_o2_col0[i - table_l_nrow / BUILD_FACTOR] = table_l_in_0[i];
tab_o2_col1[i - table_l_nrow / BUILD_FACTOR] = table_l_in_1[i];
}
gqe::Table tab_l("Table L");
tab_l.addCol("l_orderkey", gqe::TypeEnum::TypeInt64, tab_l_col0, table_l_nrow);
tab_l.genRowIDWithValidation("l_rowid", "l_valid", 1, 1, tab_l_valid, table_l_nrow);
gqe::Table tab_o1("Table O1");
tab_o1.addCol("l_orderkey", gqe::TypeEnum::TypeInt64, tab_o1_col0, table_l_nrow / BUILD_FACTOR);
tab_o1.addCol("l_extendedprice", gqe::TypeEnum::TypeInt64, tab_o1_col1, table_l_nrow / BUILD_FACTOR);
gqe::BloomFilter bf1((uint64_t)table_l_nrow / BUILD_FACTOR * 2);
bf1.build(tab_o1, "l_orderkey");
gqe::Table tab_o2("Table O2");
tab_o2.addCol("l_orderkey", gqe::TypeEnum::TypeInt64, tab_o2_col0, table_l_nrow / BUILD_FACTOR);
tab_o2.addCol("l_extendedprice", gqe::TypeEnum::TypeInt64, tab_o2_col1, table_l_nrow / BUILD_FACTOR);
gqe::BloomFilter bf2((uint64_t)table_l_nrow / BUILD_FACTOR * 2);
bf2.build(tab_o2, "l_orderkey");
bf1.merge(bf2);
gqe::Table tab_c("Table C");
tab_c.addCol("c1", gqe::TypeEnum::TypeBool, tab_c_col0, table_c_nrow);
tab_c.addCol("c2", gqe::TypeEnum::TypeBool, tab_c_col1, table_c_nrow);
tab_l.info();
tab_o1.info();
tab_o2.info();
gqe::FpgaInit init_ocl(xclbin_path);
init_ocl.createHostBufs();
init_ocl.createDevBufs();
// constructor
gqe::Filter bigfilter(init_ocl);
bigfilter.SetBufAllocMaxNum(100);
gqe::StrategySet params;
params.sec_l = sec_l;
gqe::ErrCode err_code;
err_code = bigfilter.run(tab_l, "l_orderkey", bf1, "l_rowid > 0", tab_c, "c2", params);
if (err_code) {
return err_code;
}
std::cout << "Check results on CPU" << std::endl;
// get total number of sections from output table
size_t p_nsec = tab_c.getSecNum();
std::cout << "------------Checking result-------------" << std::endl;
// save filtered key/payload to std::unordered_map for checking
std::unordered_map<int64_t, int64_t> filtered_pairs;
int64_t index = 0;
for (size_t n = 0; n < p_nsec; n++) {
// get number of rows for each section
size_t nrow_per_sec = tab_c.getSecRowNum(n);
// validation flag is stored to tab_c::c2, see command line bigfilter.run()
ap_uint<8>* p_ptr = (ap_uint<8>*)tab_c.getValColPointer(1, p_nsec, n);
// insert the valid key rows
for (size_t i = 0; i < nrow_per_sec; i++) {
if (p_ptr[i / (sizeof(ap_uint<8>) * 8)][i % (sizeof(ap_uint<8>) * 8)]) {
filtered_pairs.insert(
std::make_pair<int64_t, int64_t>((int64_t)tab_l_col0[index], (int64_t)tab_l_col1[index]));
}
index++;
}
}
// test each added key in the filtered key list
int nerror = 0;
for (int i = 0; i < (table_l_nrow / BUILD_FACTOR) * 2; i++) {
bool valid = (tab_l_valid[i / 8] >> (i % 8)) & 0x1;
if (valid) {
std::unordered_map<int64_t, int64_t>::const_iterator got = filtered_pairs.find((int64_t)tab_l_col0[i]);
if (got == filtered_pairs.end()) {
nerror++;
std::cout << "Missing key = " << tab_l_col0[i] << " in bloom-filter." << std::endl;
}
}
}
if (nerror) std::cout << nerror << " errors found in " << table_l_nrow << " inputs.\n";
nerror ? logger.error(Logger::Message::TEST_FAIL) : logger.info(Logger::Message::TEST_PASS);
return nerror;
}
|
// based on the work of Robert Sedgewick and Kevin Wayne
public class Graph<T> implements Iterable<T>, Serializable {
private Map<T, Set<T>> verticesMap;
private Map<T, String> VertexColor;
private int edgesCount;
public Graph() {
verticesMap = new HashMap<>();
VertexColor = new HashMap<>();
}
public int getNumVertices() {
return verticesMap.size();
}
public int getNumEdges() {
return edgesCount;
}
private void validateVertex(T v) {
if (!hasVertex(v)) throw new IllegalArgumentException(v.toString() + " is not a vertex");
}
public int degree(T v) {
validateVertex(v);
return verticesMap.get(v).size();
}
public int getMaxDegree() {
int maxDegree = 0;
for (T v : verticesMap.keySet()) {
int degree = verticesMap.get(v).size();
if (maxDegree < degree) {
maxDegree = degree;
}
}
return maxDegree;
}
public Set<T> getVertexNeighbour(T v) {
return verticesMap.get(v);
}
public void addEdge(T v, T w) {
if (!hasVertex(v)) addVertex(v);
if (!hasVertex(w)) addVertex(w);
if (!hasEdge(v, w)) edgesCount++;
verticesMap.get(v).add(w);
verticesMap.get(w).add(v);
}
public void addVertexColor(T v, String color) {
VertexColor.put(v, color);
}
public String getVertexColor(T v) {
return VertexColor.get(v);
}
private void addVertex(T v) {
if (!hasVertex(v)) verticesMap.put(v, new HashSet<T>());
}
public boolean hasEdge(T v, T w) {
validateVertex(v);
validateVertex(w);
return verticesMap.get(v).contains(w);
}
public void fixObjectInMap() {
HashMap<T, T> nodes = new HashMap<>();
for (T v : verticesMap.keySet()) {
nodes.put(v, v);
}
for (T v : verticesMap.keySet()) {
for (T w : verticesMap.get(v)) {
verticesMap.get(v).add(nodes.get(v));
}
}
}
public boolean hasVertex(T v) {
return verticesMap.containsKey(v);
}
@Override
public Iterator<T> iterator() {
return verticesMap.keySet().iterator();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
for (T v : verticesMap.keySet()) {
builder.append(v.toString() + ": ");
for (T w : verticesMap.get(v)) {
builder.append(w.toString() + " ");
}
builder.append("\n");
}
return builder.toString();
}
public static void main(String[] args) {
Graph<String> graph = new Graph<>();
graph.addEdge("A", "B");
graph.addEdge("A", "C");
graph.addEdge("C", "D");
graph.addEdge("D", "E");
graph.addEdge("D", "G");
graph.addEdge("E", "G");
graph.addVertex("H");
System.out.println(graph);
System.out.println("Vertices: " + graph.getNumVertices());
System.out.println("Edges: " + graph.getNumEdges());
}
public Object copy() {
Object orig = this;
Object obj = null;
try {
// Write the object out to a byte array
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream(bos);
out.writeObject(orig);
out.flush();
out.close();
// Make an input stream from the byte array and read
// a copy of the object back in.
ObjectInputStream in = new ObjectInputStream(
new ByteArrayInputStream(bos.toByteArray()));
obj = in.readObject();
} catch (IOException e) {
e.printStackTrace();
} catch (ClassNotFoundException cnfe) {
cnfe.printStackTrace();
}
return obj;
}
} |
<filename>src/background/index.ts
chrome.runtime.onStartup.addListener(() => {
chrome.storage.sync.set({ isClicked: false }, () => {})
chrome.storage.sync.set({lastImage: ""}, () => {})
}
)
var clickCounter: number = 0;
var recordId: string = ''
function b64toBlob(b64Data: any, contentType: any) {
contentType = contentType || ''
let sliceSize: number = 512
var byteCharacters = atob(b64Data)
var byteArrays = []
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize)
var byteNumbers = new Array(slice.length)
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i)
}
var byteArray = new Uint8Array(byteNumbers)
byteArrays.push(byteArray)
}
var blob = new Blob(byteArrays, { type: contentType })
return blob
}
chrome.runtime.onMessage.addListener(function(request, sender, sendResponse) {
if (request.name == 'screenshot') {
chrome.tabs.captureVisibleTab( function(dataUrl) {
sendResponse({ screenshotUrl: dataUrl});
});
}
if(request.name === "Record Click") {
clickCounter++
chrome.browserAction.setBadgeText({text: `${clickCounter}`})
chrome.tabs.captureVisibleTab(async function(dataUrl) {
var block = dataUrl.split(';')
var contentType = block[0].split(':')[1]
var realData = block[1].split(',')[1]
// Convert to blob
var blob = b64toBlob(realData, contentType)
// Create a FormData and append the file
var fd = new FormData()
fd.append('screenshot', blob)
fd.append('isPosition', 'true')
fd.append('positionX', request.Xcoordinate)
fd.append('positionY', request.Ycoordinate)
fd.append('recordName', request.title)
if(clickCounter === 1) {
recordId = `nmcHJs${Date.now()}`
fd.append('recordId', recordId)
handleNewRecord(fd)
}
else {
fd.append('recordId', recordId)
handleExistingRecord(fd)
}
})
}
if(request.name === "Start Recording") {
screenRecording()
}
if(request.name === "Stop Clicks") {
clickCounter = 0
chrome.browserAction.setBadgeText({
'text': ''
});
sendResponse({recordId: recordId})
}
return true;
});
// take screen recording
function screenRecording() {
chrome.desktopCapture.chooseDesktopMedia(['screen', 'audio'],
function onAccessApproved(id) {
let recordedChunks: any = [];
const constraints = {
"video": {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: id,
minWidth: 1280,
minHeight: 720,
maxWidth: 1280,
maxHeight: 720
}
},
"audio": false
};
// @ts-ignore
navigator.mediaDevices.getUserMedia(constraints).then(gotMedia).catch(e => {
console.error('getUserMedia() failed: ' + e);
});
function gotMedia(stream: any) {
var theStream = stream;
var binaryData = [];
var recorder
var theRecorder: any
binaryData.push(stream);
try {
recorder = new MediaRecorder(stream, { mimeType: "video/webm" });
} catch (e) {
console.error('Exception while creating MediaRecorder: ' + e);
return;
}
theRecorder = recorder;
recorder.ondataavailable =
(event) => { recordedChunks.push(event.data); };
recorder.start(100);
stream.getVideoTracks()[0].onended = function () {
handleRecordedStream(theStream, theRecorder);
};
}
async function handleRecordedStream(theStream: any, theRecorder: any) {
theRecorder.stop();
theStream.getTracks().forEach((track: any) => { track.stop(); });
var blob = new Blob(recordedChunks, { type: "video/webm" });
const file = new File([blob], 'recording')
const formData = new FormData();
formData.append('screenshot', file);
const requestOptions = {
method: 'POST',
body: formData,
}
const response = await fetch(
'http://localhost:2000/save-video',
requestOptions,
)
const data = await response.json()
}
})
}
const handleNewRecord = async (formData: any) => {
const requestOptions = {
method: 'POST',
body: formData,
}
const response = await fetch(
'http://localhost:2000/handle-new-record',
requestOptions,
)
const data = await response.json()
}
const handleExistingRecord = async (formData: any) => {
const requestOptions = {
method: 'POST',
body: formData,
}
const response = await fetch(
'http://localhost:2000/handle-existing-record',
requestOptions,
)
const data = await response.json()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.