content
stringlengths 10
4.9M
|
---|
/**
* Calculate Variance sample given feature.
*
* http://www.wikihow.com/Calculate-Variance
* variance(s^2) = (for all X ( X - mean) ^2) / (n - 1) n = count of Xs
*
* @param featuresIndex the feature key
* @param labelIndex the class name index
* @param mean the mean
* @return the Variance
*/
private double calculateVarianceSample(int featuresIndex, int labelIndex, float mean) {
double temp = 0;
float value = 0f;
int classCount = 0;
SortedMap<Float, int[]> featureValues = this.featuresList.get(featuresIndex);
log.logln(Logger.lD, "FeatKey: " + featuresIndex +" feat size: " + featureValues.size());
for (Map.Entry<Float, int[]> entry : featureValues.entrySet()) {
int[] labelCount = entry.getValue();
int count = labelCount[labelIndex];
value = entry.getKey();
if (count > 0) {
for (int index = 0; index < count; index++) {
temp += ((value - mean) * (value - mean));
classCount++;
}
}
}
double result = 0;
if (classCount <=1) {
result = temp / (1);
} else {
result = temp / (classCount -1);
}
log.logln("Result: " + result);
return result;
} |
<filename>python/test_golden_master.py
import unittest
from gilded_rose import Item, GildedRose
class GoldenMasterTest(unittest.TestCase):
def test_golden_master(self):
output_file = None
try:
output_file = open("output.txt", 'r')
golden_master_lines = [output_file.readlines()]
finally:
output_file.close()
lines = golden_master_test_run()
for i in range(len(golden_master_lines) - 1):
self.assertEquals(golden_master_lines[i], lines[i])
def golden_master_test_run():
lines = ["OMGHAI!"]
items = [
Item(name="+5 Dexterity Vest", sell_in=10, quality=20),
Item(name="Aged Brie", sell_in=2, quality=0),
Item(name="Elixir of the Mongoose", sell_in=5, quality=7),
Item(name="Sulfuras, Hand of Ragnaros", sell_in=0, quality=80),
Item(name="Sulfuras, Hand of Ragnaros", sell_in=-1, quality=80),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=15, quality=20),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=10, quality=49),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=5, quality=49),
Item(name="Conjured Mana Cake", sell_in=3, quality=6), # <-- :O
]
days = 2
import sys
if len(sys.argv) > 1:
days = int(sys.argv[1]) + 1
for day in range(days):
lines.append("-------- day %s --------" % day)
lines.append("name, sellIn, quality")
for item in items:
lines.append(str(item))
lines.append("")
GildedRose(items).update_quality()
return lines
def persist_golden_master_testrun():
output_file = open("output.txt", mode="w+")
for line in golden_master_test_run():
output_file.write(line)
output_file.write("\n")
if __name__ == '__main__':
unittest.main()
|
/**
* @file TestCurveball.cpp
* @date 28. September 2017
*
* @author Hung Tran
*/
#include <gtest/gtest.h>
#include <Utils/MonotonicPowerlawRandomStream.h>
#include <HavelHakimi/HavelHakimiIMGenerator.h>
#include <Utils/StreamPusher.h>
#include "EdgeStream.h"
#include <Curveball/EMCurveball.h>
#include <DistributionCount.h>
#include <Utils/StreamPusherRedirectStream.h>
#include <Utils/Hashfuncs.h>
#include <Utils/NodeHash.h>
class TestCurveball : public ::testing::Test { };
TEST_F(TestCurveball, pld_instance_without_paramest) {
// Config
const node_t num_nodes = 4000;
const degree_t min_deg = 5;
const degree_t max_deg = 100;
const uint32_t num_rounds = 10;
const Curveball::chunkid_t num_macrochunks = 8;
const Curveball::chunkid_t num_batches = 8;
const Curveball::chunkid_t num_fanout = 2;
const Curveball::msgid_t num_max_msgs = std::numeric_limits<Curveball::msgid_t>::max();
const int num_threads = 4;
const size_t insertion_buffer_size = 128;
// Build edge list
EdgeStream edge_stream;
EdgeStream out_edge_stream;
HavelHakimiIMGeneratorWithDegrees hh_gen(HavelHakimiIMGeneratorWithDegrees::PushDirection::DecreasingDegree);
MonotonicPowerlawRandomStream<false> degree_sequence(min_deg, max_deg, -2.0, num_nodes, 1.0, stxxl::get_next_seed());
StreamPusher<decltype(degree_sequence), decltype(hh_gen)>(degree_sequence, hh_gen);
hh_gen.generate();
StreamPusher<decltype(hh_gen), EdgeStream>(hh_gen, edge_stream);
hh_gen.finalize();
auto & degree_stream = hh_gen.get_degree_stream();
// Run algorithm
edge_stream.rewind();
degree_stream.rewind();
Curveball::EMCurveball<Curveball::ModHash, decltype(degree_stream)> algo(edge_stream,
degree_stream,
num_nodes,
num_rounds,
out_edge_stream,
num_macrochunks,
num_batches,
num_fanout,
2 * Curveball::UIntScale::Gi,
2 * Curveball::UIntScale::Gi,
num_max_msgs,
num_threads,
insertion_buffer_size);
algo.run();
// Check edge count
ASSERT_EQ(out_edge_stream.size(), edge_stream.size());
// Check degrees
stxxl::sorter<node_t, Curveball::NodeComparator> node_tokens(Curveball::NodeComparator{}, 2 * UIntScale::Gi);
out_edge_stream.rewind();
for (; !out_edge_stream.empty(); ++out_edge_stream) {
const auto edge = *out_edge_stream;
node_tokens.push(edge.first);
node_tokens.push(edge.second);
}
node_tokens.sort();
DistributionCount<decltype(node_tokens), size_t> token_count(node_tokens);
degree_stream.rewind();
for (; !token_count.empty(); ++token_count, ++degree_stream) {
ASSERT_EQ(*degree_stream, static_cast<degree_t>((*token_count).count));
}
}
TEST_F(TestCurveball, pld_instance_with_paramest) {
// Config
const node_t num_nodes = 16000;
const degree_t min_deg = 5;
const degree_t max_deg = 100;
const uint32_t num_rounds = 10;
// Build edge list
EdgeStream edge_stream;
EdgeStream out_edge_stream;
HavelHakimiIMGeneratorWithDegrees hh_gen(HavelHakimiIMGeneratorWithDegrees::PushDirection::DecreasingDegree);
MonotonicPowerlawRandomStream<false> degree_sequence(min_deg, max_deg, -2.0, num_nodes, 1.0, stxxl::get_next_seed());
StreamPusher<decltype(degree_sequence), decltype(hh_gen)>(degree_sequence, hh_gen);
hh_gen.generate();
StreamPusher<decltype(hh_gen), EdgeStream>(hh_gen, edge_stream);
hh_gen.finalize();
auto & degree_stream = hh_gen.get_degree_stream();
// Run algorithm
edge_stream.rewind();
degree_stream.rewind();
Curveball::EMCurveball<Curveball::ModHash, decltype(degree_stream)> algo(edge_stream,
degree_stream,
num_nodes,
num_rounds,
out_edge_stream,
omp_get_max_threads(),
8 * Curveball::UIntScale::Gi,
true);
algo.run();
// Check edge count
ASSERT_EQ(out_edge_stream.size(), edge_stream.size());
// Check degrees
stxxl::sorter<node_t, Curveball::NodeComparator> node_tokens(Curveball::NodeComparator{}, 2 * UIntScale::Gi);
out_edge_stream.rewind();
for (; !out_edge_stream.empty(); ++out_edge_stream) {
const auto edge = *out_edge_stream;
node_tokens.push(edge.first);
node_tokens.push(edge.second);
}
node_tokens.sort();
DistributionCount<decltype(node_tokens), size_t> token_count(node_tokens);
degree_stream.rewind();
for (; !token_count.empty(); ++token_count, ++degree_stream) {
ASSERT_EQ(*degree_stream, static_cast<degree_t>((*token_count).count));
}
}
|
// NextArg retrieves the next argument from the commandline.
func NextArg(i *int, args []string) string {
(*i)++
if (*i) >= len(args) {
fmt.Fprintln(os.Stderr, "Expected another commandline argument.")
os.Exit(1)
}
return args[*i]
} |
If you live in the United States, you live in a high tech surveillance grid that is becoming more oppressive with each passing day. In America today, the control freaks that run things are completely obsessed with watching, tracking, monitoring and recording virtually everything that we do. If we continue on the path that we are currently on, we will be heading into a future where there will be absolutely no privacy of any kind. In fact, many would argue that we are essentially there already. Many people speak of this as being the “Information Age“, but most Americans don’t really stop and think about what that really means. Most of the information that is considered to be so “valuable” is actually about all of us. Businesses want to know as much about all of us as possible so that they can sell us stuff. Government officials want to know as much about all of us as possible so that they can make sure that we are not doing anything that they don’t like. There is a constant hunger for even more information, and so the surveillance technologies just continue to become even more advanced and the Big Brother control grid being constructed all around us just continues to become even more pervasive. Even though you may not be consciously aware of it, the truth is that it is surrounding you right now even as you read this. We live in a society where liberty and freedom are literally being strangled to death, but most Americans don’t seem to care.
Do you know who else gets watched, tracked and monitored 24 hours a day?
Prisoners do.
Surveillance is a form of control, and at this point we are little more than inmates inside a gigantic Big Brother surveillance grid.
Posted below is a list of 32 privacy destroying technologies that are systematically transforming America into a giant prison. Following each item, there is a short excerpt from a news report about that particular technology. If you want to read the entire article where the excerpt came from, just click the link to find the source. Individually, each of these technologies is deeply troubling. But when you step back and take a look at them all collectively, it is absolutely horrifying…
#1 Spying On Us Through Our Televisions: Put simply, our TVs have started spying on us.
Last week, there was a high-profile case in point. An IT consultant called Jason Huntley, who lives in a village near Hull, uncovered evidence that a flat-screen television, which had been sitting in his living room since the summer, was secretly invading his family’s privacy.
He began investigating the £400 LG device after noticing that its home screen appeared to be showing him ‘targeted’ adverts — for cars, and Knorr stock cubes — based on programmes he’d just been watching.
Huntley decided to monitor information that the so-called smart TV — which connects to the internet — was sending and receiving. He did this by using his laptop effectively as a bridge between his television and the internet receiver, so the laptop was able to show all the data being sucked out of his set.
He soon discovered that details of not just every show he watched but every button he pressed on his remote control were being sent back to LG’s corporate headquarters in South Korea.
#2 Next Generation Facial Recognition Technology: In a single second, law enforcement agents can match a suspect against millions upon millions of profiles in vast detailed databases stored on the cloud. It’s all done using facial recognition, and in Southern California it’s already occurring.
Imagine the police taking a picture: any picture of a person, anywhere, and matching it on the spot in less than a second to a personalized profile, scanning millions upon millions of entries from within vast, intricate databases stored on the cloud.
#3 Your Next Password Might Be Your Eye: You can use your phone to figure out your heart rate, track how much you walk, and even measure your sex life. But the powerful sensors inside smartphones can do more than keep you updated on your health: They can also turn your body into a password.
EyeVerify is a small Kansas City–based security company. Its core product is biometric eyescan software for smartphones. Every person has a unique pattern of blood vessels in their eyes. These blood vessels contrast with the whites of the eyes so clearly that they can always be read, even when there’s a lack of light. The best part? Those blood-vessel patterns can be photographed by phones and turned into unique data signatures which can be used to replace or supplement traditional passwords. “We turn a picture of your eye into a key that protects your digital identity,” says EyeVerify CEO Toby Rush.
#4 “Pre-Crime” Surveillance Cameras: Hundreds of pre-crime surveillance cameras are to be installed in San Francisco’s subway system that will analyze “suspicious behavior” and alert guards to potential criminal or terrorist activity – before any crime has been committed.
“Manufacturers BRS Labs said it has installed the cameras at tourist attractions, government buildings and military bases in the U.S. In its latest project BRS Labs is to install its devices on the transport system in San Francisco, which includes buses, trams and subways,” reports the Daily Mail.
The cameras are programmed with a list of behaviors considered “normal”. Anything that deviates from usual activity is classified as suspicious and guards are immediately alerted via text message or a phone call.
Equipped with the ability to track up to 150 suspects at a time, the cameras build up a “memory” of suspicious behavior to determine what constitutes potential criminal activity.
A total of 288 cameras will be installed across 12 transport hubs.
#5 New Software That Will Store And Analyze Millions Of Our Voices: ‘Voice Grid Nation’ is a system that uses advanced algorithms to match identities to voices. Brought to the US by Russia’s Speech Technology Center, it claims to be capable of allowing police, federal agencies and other law enforcement personnel to build up a huge database containing up to several million voices.
When authorities intercept a call they’ve deemed ‘hinky’, the recording is entered into the VoiceGrid program, which (probably) buzzes and whirrs and spits out a match. In five seconds, the program can scan through 10,000 voices, and it only needs 3 seconds for speech analysis. All that, combined with 100 simultaneous searches and the storage capacity of 2 million samples, gives SpeechPro, as the company is known in the US, the right to claim a 90% success rate.
#6 A Device That Captures Your Fingerprints From 20 Feet Away: Gaining access to your gym or office building could soon be as simple as waving a hand at the front door. A Hunsville, Ala.-based company called IDair is developing a system that can scan and identify a fingerprint from nearly 20 feet away. Coupled with other biometrics, it could soon allow security systems to grant or deny access from a distance, without requiring users to stop and scan a fingerprint, swipe an ID card, or otherwise lose a moment dealing with technology.
Currently IDair’s primary customer is the military, but the startup wants to open up commercially to any business or enterprise that wants to put a layer of security between its facilities and the larger world. A gym chain is already beta testing the system (no more using your roommate’s gym ID to get in a free workout), and IDair’s founder says that at some point his technology could enable purchases to be made biometrically, using fingerprints and irises as unique identifiers rather than credit card numbers and data embedded in magnetic strips or RFID chips.
#7 Molecular Scanners That Can Secretly Scan You From 164 Feet Away: Within the next year or two, the U.S. Department of Homeland Security will instantly know everything about your body, clothes, and luggage with a new laser-based molecular scanner fired from 164 feet (50 meters) away. From traces of drugs or gun powder on your clothes to what you had for breakfast to the adrenaline level in your body—agents will be able to get any information they want without even touching you.
And without you knowing it.
The technology is so incredibly effective that, in November 2011, its inventors were subcontracted by In-Q-Tel to work with the US Department of Homeland Security. In-Q-Tel is a company founded “in February 1999 by a group of private citizens at the request of the Director of the CIA and with the support of the U.S. Congress.” According to In-Q-Tel, they are the bridge between the Agency and new technology companies.
Their plan is to install this molecular-level scanning in airports and border crossings all across the United States.
#8 Mobile Backscatter Vans: American cops are set to join the US military in deploying American Science & Engineering’s Z Backscatter Vans, or mobile backscatter radiation x-rays. These are what TSA officials call “the amazing radioactive genital viewer,” now seen in airports around America, ionizing the private parts of children, the elderly, and you (yes you).
These pornoscannerwagons will look like regular anonymous vans, and will cruise America’s streets, indiscriminately peering through the cars (and clothes) of anyone in range of its mighty isotope-cannon. But don’t worry, it’s not a violation of privacy. As AS&E’s vice president of marketing Joe Reiss sez, “From a privacy standpoint, I’m hard-pressed to see what the concern or objection could be.”
#9 RFID Microchips In Our Schools: Upon arriving in the morning, according to the Associated Press, each student at the CCC-George Miller preschool will don a jersey with a stitched in RFID chip. As the kids go about the business of learning, sensors in the school will record their movements, collecting attendance for both classes and meals. Officials from the school have claimed they’re only recording information they’re required to provide while receiving federal funds for their Headstart program.
#10 Palm Scanning Devices In Our Schools: Puyallup School District says by the end of the year, every lunchroom will have palm scanning devices that will allow students to pay for their lunch with a wave of a hand.
“Efficiency is another reason for implementing this. The accuracy of the scanner reduces human error, reduces fraud, the ability for students to share numbers allows parents to know the money that they’re spending is being spent on their child’s lunch,” said Brian Fox, spokesperson for Puyallup School District.
The district says the devices will be in all 32 schools by the end of the school year.
#11 Iris Scanning Devices In Our Schools: Kids lose their school IDs but they don’t often lose their eyeballs.
That’s one of the reasons why a growing number of schools are replacing traditional identification cards with iris scanners. By the fall, several schools — ranging from elementary schools to colleges — will be rolling out various iris scanning security methods.
#12 Implantable Medical Laboratory-On-A-Chip: French researchers are zeroing in on a tiny, chip-based medical laboratory test device designed to be implanted under the skin. This miniature blood laboratory may revolutionize healthcare by continuously monitoring high-risk, chronically ill patients.
This ground-breaking work is being done by developers at the École Polytechnique Fédérale de Lausanne (EPFL), or Swiss Institute of Technology, in Lausanne, Switzerland. The implantable lab-testing device is linked to the user’s cell phone and can send alerts to doctors before symptoms are evident.
#13 Smart Phone Eye Scanners: A patent application filed by Samsung seems to indicate that next-generation Galaxy smartphones might feature biometric authentication as an alternative to PINs or passwords.
Unlike arch-rival Apple’s Touch ID, however, the South Korean technology giant won’t be scanning users’ fingerprints. Instead, the patent – spotted by blog Patent Bolt – describes a novel iris scanning technique.
According to Samsung, the non-contact nature of eye scanning means handset owners “do not feel uncomfortable” with the technology, while at the same time the iris offers more unique patterns than the fingerprint does.
#14 Cell Phone Tower “Stingrays”: You make a call on your cellphone thinking the only thing standing between you and the recipient of your call is your carrier’s cellphone tower. In fact, that tower your phone is connecting to just might be a boobytrap set up by law enforcement to ensnare your phone signals and maybe even the content of your calls.
So-called stingrays are one of the new high-tech tools that authorities are using to track and identify you. The devices, about the size of a suitcase, spoof a legitimate cellphone tower in order to trick nearby cellphones and other wireless communication devices into connecting to the tower, as they would to a real cellphone tower.
The government maintains that the stingrays don’t violate Fourth Amendment rights, since Americans don’t have a legitimate expectation of privacy for data sent from their mobile phones and other wireless devices to a cell tower.
#15 Using Your Cell Phone Microphone As A “Roving Bug”: The FBI appears to have begun using a novel form of electronic surveillance in criminal investigations: remotely activating a mobile phone’s microphone and using it to eavesdrop on nearby conversations.
The technique is called a “roving bug,” and was approved by top U.S. Department of Justice officials for use against members of a New York organized crime family who were wary of conventional surveillance techniques such as tailing a suspect or wiretapping him.
#16 The Government Is Using Our Cell Phones To Track Our Movements: One of the biggest changes is the ability to track your physical location. I’m sorry I came in at the end of the previous talk. I heard them talk about surveying cell phones with a drone, in a wide area — this is something that is done routinely now. I can tell you that everybody that attended an Occupy Wall Street protest, and didn’t turn their cell phone off, or put it — and sometimes even if they did — the identity of that cell phone has been logged, and everybody who was at that demonstration, whether they were arrested, not arrested, whether their photos were ID’d, whether an informant pointed them out, it’s known they were there anyway. This is routine.
#17 Police Using “Extraction Devices” To Take Our Cell Phone Data: The Michigan State Police have a handful of portable machines called “extraction devices” that have the potential to download personal information from motorists they pull over, and the ACLU would like to know more about them.
The devices, sold by a company called Cellebrite, can download text messages, photos, video, and even GPS data from most brands of cell phones. The handheld machines have various interfaces to work with different models and can even bypass security passwords and access some information.
#18 Automated License Plate Readers: More than 250 cameras in the District and its suburbs scan license plates in real time, helping police pinpoint stolen cars and fleeing killers. But the program quietly has expanded beyond what anyone had imagined even a few years ago.
With virtually no public debate, police agencies have begun storing the information from the cameras, building databases that document the travels of millions of vehicles.
Nowhere is that more prevalent than in the District, which has more than one plate-reader per square mile, the highest concentration in the nation. Police in the Washington suburbs have dozens of them as well, and local agencies plan to add many more in coming months, creating a comprehensive dragnet that will include all the approaches into the District.
#19 Street Lights That Can Record Private Conversations: Federally-funded high-tech street lights now being installed in American cities are not only set to aid the DHS in making “security announcements” and acting as talking surveillance cameras, they are also capable of “recording conversations,” bringing the potential privacy threat posed by ‘Intellistreets’ to a whole new level.
#20 Spying On Us Through Our Video Game Systems: Users of the new Xbox One are complaining that Kinect is monitoring their Skype conversations for swearing and then punishing them with account bans. Microsoft has admitted it is punishing gamers for bad language but denied that it is snooping on private Skype chats.
#21 Data Mining: The company fits into a category called database marketing. It started in 1969 as an outfit called Demographics Inc., using phone books and other notably low-tech tools, as well as one computer, to amass information on voters and consumers for direct marketing. Almost 40 years later, Acxiom has detailed entries for more than 190 million people and 126 million households in the U.S., and about 500 million active consumers worldwide. More than 23,000 servers in Conway, just north of Little Rock, collect and analyze more than 50 trillion data ‘transactions’ a year.
#22 A New Technology Called “Coin” Is Being Called “The Future Of Money”: The future of money has arrived, and it’s called Coin.
It looks like a credit card. It’s the size of a credit card. It swipes in credit card machines. But it holds the information of up to eight of your debit, credit, rewards, or gift cards. And you can switch between cards by simply pressing a button.
The new product, launched recently, promises to change the way consumers spend money in a secure and efficient way.
#23 A National Database Of All Financial Transactions: The Consumer Financial Protection Bureau (CFPB) is looking to create a “Google Earth” of every financial transaction of every American, Sen. Mike Enzi (R-WY) warned today in a Senate speech opposing confirmation of Richard Cordray as CFPB director.
“This bill (creating the CFPB) was supposed to be about regulating Wall Street. Instead, it’s creating a Google Earth on every financial transaction. That’s right: the government will be able to see every detail of your finances. Your permission – not needed,” Sen. Enzi said.
#24 The Coming National DNA Database: A national DNA database is coming. Barack Obama has already said that he wants one. A major Supreme Court decision last month paved the way for one. The DNA of those that commit “serious crimes” is already being routinely collected all over the nation. Some states (such as New Jersey) are now passing laws that will require DNA collection from those charged with committing “low level crimes”. And a law that was passed under George W. Bush allows the federal government to screen the DNA of all newborn babies in the United States. So how long will it be before we are all required to give DNA samples to the authorities?
#25 The Systematic Recording Of Talk Radio Programs: Next time you call a talk radio station, beware: The FBI may be listening.
According to WMAL.com, “The FBI has awarded a $524,927 contract to a Virginia company to record as much radio news and talk programming as it can find on the Internet. … The FBI says it is not playing Big Brother by policing the airwaves, but rather seeking access to what airs as potential evidence.”
#26 The FBI’s Next Generation Identification System: The US Federal Bureau of Investigation has begun rolling out its new $1 billion biometric Next Generation Identification (NGI) system. In essence, NGI is a nationwide database of mugshots, iris scans, DNA records, voice samples, and other biometrics, that will help the FBI identify and catch criminals — but it is how this biometric data is captured, through a nationwide network of cameras and photo databases, that is raising the eyebrows of privacy advocates.
Until now, the FBI relied on IAFIS, a national fingerprint database that has long been due an overhaul. Over the last few months, the FBI has been pilot testing a facial recognition system — and soon, detectives will also be able to search the system for other biometrics such as DNA records and iris scans.
#27 Trapwire: “You are being watched. The government has a secret system – a machine – that spies on you every hour of every day.” That is how each episode of “Person of Interest” on CBS begins. Most Americans that have watched the show just assume that such a surveillance network is completely fictional and that the government would never watch us like that. Sadly, most Americans are wrong. Shocking new details have emerged this week which prove that a creepy nationwide network of spy cameras is being rolled out across the United States. Reportedly, these new spy cameras are “more accurate than modern facial recognition technology”, and every few seconds they send back data from cities and major landmarks all over the United States to a centralized processing center where it is analyzed. The authorities believe that the world has become such a dangerous place that the only way to keep us all safe is to watch what everyone does all the time. But the truth is that instead of “saving America”, all of these repressive surveillance technologies are slowly killing our liberties and our freedoms. America is being transformed into an Orwellian prison camp right in front of our eyes, and very few people are even objecting to it.
#28 Spyware That Monitors The Behavior Of Government Workers: When the Food and Drug Administration started spying on a group of agency scientists, it installed monitoring software on their laptop computers to capture their communications.
The software, sold by SpectorSoft of Vero Beach, Fla., could do more than vacuum up the scientists’ e-mails as they complained to lawmakers and others about medical devices they thought were dangerous. It could be programmed to intercept a tweet or Facebook post. It could snap screen shots of their computers. It could even track an employee’s keystrokes, retrieve files from hard drives or search for keywords.
#29 Political Campaign Databases: If you voted this election season, President Obama almost certainly has a file on you. His vast campaign database includes information on voters’ magazine subscriptions, car registrations, housing values and hunting licenses, along with scores estimating how likely they were to cast ballots for his reelection.
#30 Spying On Us Through Our Appliances: Spies will no longer have to plant bugs in your home – the rise of ‘connected’ gadgets controlled by apps will mean that people ‘bug’ their own homes, says CIA director David Petraeus.
The CIA claims it will be able to ‘read’ these devices via the internet – and perhaps even via radio waves from outside the home.
Everything from remote controls to clock radios can now be controlled via apps – and chip company ARM recently unveiled low-powered, cheaper chips which will be used in everything from fridges and ovens to doorbells.
The resultant chorus of ‘connected’ gadgets will be able to be read like a book – and even remote-controlled, according to CIA CIA Director David Petraeus, according to a recent report by Wired’s ‘Danger Room’ blog.
#31 Unmanned Aerial Drones: Obama’s Environmental Protection Agency is using aerial drones to spy on farmers in Nebraska and Iowa. The surveillance came under scrutiny last week when Nebraska’s congressional delegation sent a joint letter to EPA Administrator Lisa Jackson.
On Friday, EPA officialdom in “Region 7” responded to the letter.
“Courts, including the Supreme Court, have found similar types of flights to be legal (for example to take aerial photographs of a chemical manufacturing facility) and EPA would use such flights in appropriate instances to protect people and the environment from violations of the Clean Water Act,” the agency said in response to the letter.
#32 NSA Snooping: Speaking to a raucous audience via Skype on Friday, Greenwald said the NSA’s “brand-new technology” gives it the power to “redirect into its own repositories one billion cell phone calls every single day.”
“But what we’re really talking about here is a globalized system that prevents any form of electronic communication from taking place without its being stored and monitored by the National Security Agency,” Greenwald said. “It doesn’t mean that they’re listening to every call; it means they’re storing every call and have the capability to listen to them at any time, and it does mean that they’re collecting millions upon millions upon millions of our phone and email records.”
Greenwald added that the NSA technology is “designed to destroy all privacy. And what’s incredibly menacing about it is that it’s all taking place in the dark with no accountability and virtually no safeguards.”
—–
Every single day, the NSA intercepts and permanently stores close to 2 billion emails and phone calls in addition to a whole host of other data.
So where does all of that data go?
Well, the NSA recently completely construction of the largest data center in the history of the world out in Utah. It will reportedly have the capability of storing 5 zettabytes of data. That is an amount of data that is almost incomprehensible.
This data center has approximately a million square feet of storage space, it cost nearly 2 billion dollars to build, and it is going to take about 40 million dollars a year just to pay for the energy needed to run it.
Without a doubt, we have become a surveillance society.
And if the American people don’t object now, this will just be the tip of the iceberg.
If we continue down this same path, what is coming will be far more horrifying than anything that George Orwell ever dreamed of. |
/**
* To update existing project
* @param authenticatedUserId
* the authenticated user id.
* @param projectId
* the project id to be updated
* @param project
* the project details to be updated : name, version and description is only allowed.
* @return ResponseEntity<Project>
* returns Project object with addition and status details.
*/
@ApiOperation(value = "To update existing project")
@RequestMapping(value = "/users/{authenticatedUserId}/projects/{projectId}", method = RequestMethod.PUT)
public ResponseEntity<?> updateProject(@ApiParam(value = "The Acumos Login Id", required = true)@PathVariable("authenticatedUserId") String authenticatedUserId,@ApiParam(value = "The ProjectId to be updated", required = true) @PathVariable("projectId") String projectId,@ApiParam(value ="Project Details") @RequestBody Project project) {
logger.debug("updateProject() Begin");
Project result = null;
projectValidationService.validateInput(authenticatedUserId, project);
inputValidationService.isValuePresent("ProjectId", projectId);
projectService.projectExists(projectId);
projectService.isOwnerOfProject(authenticatedUserId, projectId);
projectService.isProjectArchived(projectId);
result = projectService.updateProject(authenticatedUserId, projectId, project);
logger.debug("updateProject() End");
return new ResponseEntity<Project>(result, HttpStatus.OK);
} |
<gh_stars>1-10
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE RankNTypes #-}
module Expr where
import Data.Int
class CustomAddition a where
(.+) :: a -> a -> a
instance CustomAddition Int8 where
(.+) = (+)
instance CustomAddition Int16 where
(.+) = (+)
instance CustomAddition Int32 where
(.+) = (+)
instance CustomAddition Int64 where
(.+) = (+)
instance CustomAddition Float where
(.+) = (+)
instance CustomAddition [a] where
(.+) = (++)
data PExpression a where
PType :: PType a -> PExpression a
PFunc :: PFunc a -> PExpression a
data PType a where
PInt8 :: Int8 -> PType Int8
PInt16 :: Int16 -> PType Int16
PInt32 :: Int32 -> PType Int32
PInt64 :: Int64 -> PType Int64
PFloat :: Float -> PType Float
PBoolean :: Bool -> PType Bool
PChar :: Char -> PType Char
PList :: [PExpression a] -> PType [a]
PString :: [Char] -> PType [Char]
data PFunc a where
PAddition :: forall a. CustomAddition a => PExpression a -> PExpression a -> PFunc a
PMultiplacation :: forall a. Num a => PExpression a -> PExpression a -> PFunc a
PEquals :: forall a. Eq a => PExpression a -> PExpression a -> PFunc Bool
evalPType :: PType a -> a
evalPType (PInt8 n) = n
evalPType (PInt16 n) = n
evalPType (PInt32 n) = n
evalPType (PInt64 n) = n
evalPType (PBoolean b) = b
evalPType (PFloat f) = f
evalPType (PChar c) = c
evalPType (PString l) = l
evalPType (PList l) = map eval l
eval :: PExpression a -> a
eval (PFunc (PAddition a b)) = eval a .+ eval b
eval (PFunc (PMultiplacation a b)) = eval a * eval b
eval (PFunc (PEquals a b)) = eval a == eval b
eval (PType a) = evalPType a
|
#include <stdio.h>
#include <string>
#include <vector>
typedef std::vector< std::string > CSol;
CSol best;
std::string w[6];
bool mask[6];
int p[6];
bool print( CSol & sol, int i, int j, std::string const & w, bool vert )
{
for( int k = 0; k < (int)w.size(); ++ k )
{
if( sol[i][j] != '.' && sol[i][j] != w[k] )
return false;
sol[i][j] = w[k];
if( vert) ++i; else ++ j;
}
return true;
}
bool gr( CSol const & a, CSol const & b )
{
for( int i = 0; i < a.size(); ++ i )
{
if( a[i] != b[i] )
return a[i] > b[i];
}
return false;
}
void Process()
{
CSol sol;
int width = w[p[0]].size() + w[p[5]].size() - 1;
int height = w[p[1]].size() + w[p[4]].size() - 1;
if( w[p[2]].size() != height || w[p[3]].size() != width )
return;
for( int i = 0; i < height; ++ i )
sol.push_back( std::string( width, '.' ) );
bool res =
print( sol, 0, 0, w[p[0]], false )
&& print( sol, 0, 0, w[p[1]], true)
&& print( sol, 0, w[p[0]].size()-1, w[p[2]], true)
&& print( sol, w[p[1]].size()-1, 0, w[p[3]], false )
&& print( sol, w[p[1]].size()-1, width-1, w[p[4]], true )
&& print( sol, height-1, w[p[0]].size()-1, w[p[5]], false);
if( res && ( best.empty() || gr( best, sol ) ) )
best = sol;
}
void Find( int i )
{
if( i == 6 )
{
Process();
return;
}
for( int j = 0; j < 6; ++ j )
{
if( mask[j] )
{
mask[j] = false;
p[i] = j;
Find(i+1);
mask[j] = true;
}
}
}
int main()
{
//freopen( "in.txt", "rt", stdin );
for( int i = 0; i < 6; ++ i )
{
char buf[100];
scanf( "%s", buf );
w[i] = buf;
}
for( int i = 0; i < 6; ++ i )
mask[i] = true;
Find(0 );
if( best.empty() )
{
printf( "Impossible" );
return 0;
}
for( int i = 0; i < (int)best.size(); ++ i )
printf( "%s\n", best[i].c_str() );
return 0;
} |
def load(self, dicom_file, force=True):
self.seen = []
if isinstance(dicom_file, Dataset):
self.dicom = dicom_file
else:
if not os.path.exists(dicom_file):
bot.exit("%s does not exist." % dicom_file)
self.dicom = read_file(dicom_file, force=force)
self.dicom_file = os.path.abspath(self.dicom.filename)
self.dicom_name = os.path.basename(self.dicom_file) |
import { Component, Input, EventEmitter, Output } from '@angular/core';
import { CategoryModel } from '../../../models/category/category.model';
@Component({
templateUrl: './category.component.html',
selector: 'admin-category'
})
export class AdminCategoryComponent {
@Input('adminCategoryProp') category: CategoryModel;
@Output() editCategory = new EventEmitter<string>();
@Output() deleteCategory = new EventEmitter<string>();
edit() {
this.editCategory.emit(this.category._id);
}
delete() {
this.deleteCategory.emit(this.category._id);
}
} |
import argparse
import time
import numpy as np
from scipy.stats import rankdata
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
# from alexnet import alexnet
from resnet.resnetcifar import *
from densenet.densenetcifar import *
# from wideresnet.wideresnet import *
from datasets.cifarperturbed import CIFAR10_P
from datasets.cifarperturbed import CIFAR100_P
from datasets.noisycifar import NCIFAR10
from datasets.noisycifar import NCIFAR100
parser = argparse.ArgumentParser(description='Test on CIFAR-10-C')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset (cifar10 [default] or cifar100)')
parser.add_argument('--perturbed-datadir', default='', type=str, help='root path of the CIFAR-C dataset')
parser.add_argument('-b', '--batch-size', default=4, type=int, help='mini-batch size (default: 128)')
parser.add_argument('--print-freq', '-p', default=50, type=int, help='print frequency (default: 10)')
parser.add_argument('--layers', default=20, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=10, type=int, help='widen factor (default: 1)')
parser.add_argument('--growth', default=12, type=int, help='number of new channels per layer (default: 12)')
parser.add_argument('--droprate', default=0, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--reduce', default=0.5, type=float, help='compression rate in transition stage (default: 0.5)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false', help='To not use bottleneck block')
parser.add_argument('--no-efficient', dest='efficient', action='store_false', help='to not use efficient impl.')
parser.add_argument('--pushpull', action='store_true', help='use Push-Pull as 1st layer (default: False)')
parser.add_argument('--pp-block1', action='store_true', help='use 1st PushPull residual block')
parser.add_argument('--pp-block1-reduced', action='store_true', help='use 1st PushPull residual block reduced')
parser.add_argument('--modelfile', default='checkpoint', type=str, help='name of the file of the model')
parser.add_argument('--pp-all', action='store_true', help='use all PushPull residual block')
parser.add_argument('--pp-all-reduced', action='store_true', help='use all PushPull residual block reduced')
parser.add_argument('--alpha-pp', default=1, type=float, help='inhibition factor (default: 1.0)')
parser.add_argument('--scale-pp', default=2, type=float, help='upsampling factor for PP kernels (default: 2)')
parser.add_argument('--train-alpha', action='store_true', help='train alpha of push-pull kernels (Default: False)')
parser.add_argument('--lpf-size', default=None, type=int, help='Size of the LPF for anti-aliasing (default: 1)')
parser.add_argument('--arch', default='resnet', type=str, help='architecture (resnet, densenet, ...)')
parser.add_argument('--name', default='01-20', type=str, help='name of experiment-model')
args = parser.parse_args()
best_prec1 = 0
use_cuda = False
perturbations = ['gaussian_noise', 'shot_noise',
'motion_blur', 'zoom_blur',
'spatter', 'brightness',
'translate', 'rotate', 'tilt', 'scale',
'speckle_noise', 'gaussian_blur', 'snow', 'shear']
# perturbations = ['translate']
# Root folder of the CIFAR-C and CIFAR-P data sets
# Please change it with the path to the folder where you un-tar the CIFAR-P data set
#
pert_dataset_root = '/default/path/to/CIFAR-C/root/folder/'
pert_dataset_root = '/home/nicola/Scrivania/RESEARCH/Projects/InhibCNN/data/'
NCLASSES = 10
identity = np.asarray(range(1, NCLASSES + 1))
cum_sum_top5 = np.cumsum(np.asarray([0] + [1] * 5 + [0] * (NCLASSES-1 - 5)))
recip = 1./identity
def dist(sigma, mode='top5'):
if mode == 'top5':
return np.sum(np.abs(cum_sum_top5[:5] - cum_sum_top5[sigma-1][:5]))
elif mode == 'zipf':
return np.sum(np.abs(recip - recip[sigma-1])*recip)
def ranking_dist(ranks, noise_perturbation=False, mode='top5'):
result = 0
# step_size = 1 if noise_perturbation else args.difficulty
step_size = 1
for vid_ranks in ranks:
result_for_vid = []
for i in range(step_size):
perm1 = vid_ranks[i]
perm1_inv = np.argsort(perm1)
for rank in vid_ranks[i::step_size][1:]:
perm2 = rank
result_for_vid.append(dist(perm2[perm1_inv], mode))
if not noise_perturbation:
perm1 = perm2
perm1_inv = np.argsort(perm1)
result += np.mean(result_for_vid) / len(ranks)
return result
def flip_prob(predictions, noise_perturbation=False):
result = 0
# step_size = 1 if noise_perturbation else args.difficulty
step_size = 1
for vid_preds in predictions:
result_for_vid = []
for i in range(step_size):
prev_pred = vid_preds[i]
for pred in vid_preds[i::step_size][1:]:
result_for_vid.append(int(prev_pred != pred))
if not noise_perturbation: prev_pred = pred
result += np.mean(result_for_vid) / len(predictions)
return result
# ------------------------------ MAIN -------------------------------------
def main():
global args, best_prec1, use_cuda, pert_dataset_root
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
if args.perturbed_datadir != '':
pert_dataset_root = args.perturbed_datadir
# Clean Data loading code
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
transform_test = transforms.Compose([
transforms.ToTensor()
])
kwargs = {'num_workers': 0, 'pin_memory': True}
assert (args.dataset == 'cifar10' or args.dataset == 'cifar100')
if args.dataset == 'cifar10':
clean_data = NCIFAR10('./data', train=False, transform=transform_test, normalize_transform=normalize)
nclasses = 10
elif args.dataset == 'cifar100':
clean_data = NCIFAR100('./data', train=False, transform=transform_test, normalize_transform=normalize)
nclasses = 100
clean_loader = torch.utils.data.DataLoader(clean_data, batch_size=args.batch_size, shuffle=False, **kwargs)
# --------------------------------------------------------------------------------
# create model
expdir = ''
if args.arch == 'resnet':
expdir = 'models/resnet-cifar/'
rnargs = {'use_pp1': args.pushpull,
'pp_block1': args.pp_block1,
# 'pp_all': args.pp_all,
'train_alpha': args.train_alpha,
'size_lpf': args.lpf_size}
if args.layers == 20:
model = resnet20(**rnargs)
elif args.layers == 32:
model = resnet32(**rnargs)
elif args.layers == 44:
model = resnet44(**rnargs)
elif args.layers == 56:
model = resnet56(**rnargs)
elif args.arch == 'densenet':
expdir = 'models/densenet-cifar/'
rnargs = {'use_pp1': args.pushpull,
'pp_block1': args.pp_block1,
'num_classes': nclasses,
'small_inputs': True,
'efficient': args.efficient,
'compression': args.reduce,
'drop_rate': args.droprate,
'scale_pp': args.scale_pp,
'alpha_pp': args.alpha_pp
}
if args.layers == 40:
model = densenet40_12(**rnargs)
elif args.layers == 100:
if args.growth == 12:
model = densenet100_12(**rnargs)
elif args.growth == 24:
model = densenet100_24(**rnargs)
elif args.arch == 'alexnet':
expdir = 'models/alexnet-cifar/'
# model = alexnet.AlexNet(num_classes=nclasses)
# load trained parameters in the model
if use_cuda:
trained_model = torch.load(expdir + '%s/' % args.name + args.modelfile + '.pth.tar')
else:
trained_model = torch.load(expdir + '%s/' % args.name + args.modelfile + '.pth.tar',
map_location=lambda storage, loc: storage)
# ------------------ Start loading model ---------------
model_dict = model.state_dict()
# 1. filter out unnecessary keys
trained_model['state_dict'] = {k: v for k, v in trained_model['state_dict'].items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(trained_model['state_dict'])
model.load_state_dict(trained_model['state_dict'])
model.eval()
# ------------------ Finish loading model --------------
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# for training on multiple GPUs.
# Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
# model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
if use_cuda:
model = model.cuda()
criterion = criterion.cuda() # define loss function (criterion) and optimizer
# evaluate on validation set
'''
fileout = open(expdir + args.name + '/test_clean.txt', "a+")
prec1 = validate(clean_loader, model, criterion, file=fileout)
print('Test accuracy clean: \n{}'.format(prec1))
fileout.write('Test accuracy clean: \n{}'.format(prec1))
fileout.close()
'''
# ------------------------------------------------------------------
# VALIDATE ON CIFAR-10-P
# ------------------------------------------------------------------
flip_rates = []
top5_rates = []
zipf_rates = []
f1 = open(expdir + args.name + '/P_flipprob.txt', "w+")
f2 = open(expdir + args.name + '/P_top5dist.txt', "w+")
f3 = open(expdir + args.name + '/P_zipfdist.txt', "w+")
for perturbation_name in perturbations:
predictions, ranks = validate_perturbed(perturbation_name, model)
flipprob = flip_prob(predictions)
flip_rates.append(flip_prob)
top5dist = ranking_dist(ranks, mode='top5')
top5_rates.append(top5dist)
zipfdist = ranking_dist(ranks, mode='zipf')
zipf_rates.append(zipfdist)
f1.write('Perturbation: {:15s} | Flip Prob: {:.5f}\n'.format(perturbation_name, flipprob))
f2.write('Perturbation: {:15s} | Top5 Distance: {:.5f}\n'.format(perturbation_name, top5dist))
f3.write('Perturbation: {:15s} | Zipf Distance: {:.5f}\n'.format(perturbation_name, zipfdist))
print('Perturbation: {:15s}'.format(perturbation_name))
print('Flipping Prob\t{:.5f}'.format(flipprob))
print('Top5 Distance\t{:.5f}'.format(top5dist))
print('Zipf Distance\t{:.5f}'.format(zipfdist))
f1.close()
f2.close()
f3.close()
print('\nmFR (unnormalized by AlexNet): {:.5f}'.format(np.mean(flip_rates)))
print('mT5 (unnormalized by AlexNet): {:.5f}'.format(np.mean(top5_rates)))
print('mZD (unnormalized by AlexNet): {:.5f}'.format(np.mean(zipf_rates)))
def validate_perturbed(perturbation_name, model):
model.eval()
global pert_dataset_root, use_cuda
# Data loading code
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]]),
])
if args.dataset == 'cifar10':
dataset = CIFAR10_P(pert_dataset_root, transform=transform_test, pert_category=perturbation_name)
elif args.dataset == 'cifar100':
dataset = CIFAR100_P(pert_dataset_root, transform=transform_test, pert_category=perturbation_name)
kwargs = {'num_workers': 0, 'pin_memory': True}
perturbed_dataset_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
predictions, ranks = [], []
for batch_idx, (data, target) in enumerate(perturbed_dataset_loader):
num_vids = data.size(0)
data = data.view(-1, 3, 32, 32)
if use_cuda:
data = data.cuda()
output = model(data)
output = output.detach()
for vid in output.view(num_vids, -1, NCLASSES):
predictions.append(vid.argmax(1).to('cpu').numpy())
ranks.append([np.uint16(rankdata(-frame, method='ordinal')) for frame in vid.to('cpu').numpy()])
return predictions, ranks
def validate(val_loader, model, criterion, adversarial_eps=0, file=None):
global use_cuda
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
if adversarial_eps == 0:
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if use_cuda:
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, requires_grad=True)
target_var = torch.autograd.Variable(target)
# compute output
if adversarial_eps == 0:
with torch.no_grad():
output = model(input_var)
loss = criterion(output, target_var)
else: # Add Adversarial perturbation
output = model(input_var)
loss = criterion(output, target_var)
loss.backward()
x_grad = torch.sign(input_var.grad.data)
x_adversarial = torch.clamp(input_var.data + adversarial_eps * x_grad, 0, 1)
# Classification after optimization
output = model(torch.autograd.Variable(x_adversarial))
loss = criterion(output, target_var)
loss = loss.detach()
output = output.detach()
# measure accuracy and record loss
prec1 = accuracy(output, target, topk=(1,))[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(i, len(val_loader),
batch_time=batch_time, loss=losses,
top1=top1))
if file is not None:
file.write('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f}) \n'.format(i, len(val_loader),
batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
file.write(' * Prec@1 {top1.avg:.3f} \n'.format(top1=top1))
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
LOCALE_DIR = "locale.json"
|
import React from "react";
import { ConversationProvider } from "./hooks/use-conversation";
import { SubApp } from "./subapp";
import "c3/c3.css";
import "./styles/c3-overrides.css";
import "./styles/c3-extensions.css";
function App() {
return (
<ConversationProvider>
<SubApp />
</ConversationProvider>
);
}
export default App;
|
<reponame>jodhanijanki/django-dashboard-light
from django.apps import AppConfig
class AnalyseStatementConfig(AppConfig):
name = 'analyse_statement'
|
/**
* class which handles {@link Float} arithmetic
*
* @since 1.0.0
*/
public class FloatArithmetic extends AbstractArithmetic<Float> {
// region singleton
private static FloatArithmetic instance;
/**
* @return default instance
* @since 1.0.0
*/
@NotNull
public static FloatArithmetic getInstance() {
if (instance == null) instance = new FloatArithmetic();
return instance;
}
// endregion
// region fromInt, fromDouble and signum
@Override
@NotNull
public Float fromInt(int a) {
return (float) a;
}
@Override
@NotNull
public Float fromDouble(double a) {
return (float) a;
}
@Override
public double signum(@NotNull Float a) {
return Math.signum(a);
}
// endregion
// region sum, difference, product, quotient and modulo
@Override
@NotNull
public Float sum(@NotNull Float a, @NotNull Float b) {
return a + b;
}
@Override
@NotNull
public Float difference(@NotNull Float a, @NotNull Float b) {
return a - b;
}
@Override
@NotNull
public Float product(@NotNull Float a, @NotNull Float b) {
return a * b;
}
@Override
@NotNull
public Float quotient(@NotNull Float a, @NotNull Float b) {
return a / b;
}
@Override
@NotNull
public Float modulo(@NotNull Float a, @NotNull Float b) {
return a % b;
}
// endregion
// region power and root
@Override
@NotNull
public Float power(@NotNull Float a, int b) {
return (float) Math.pow(a, b);
}
@Override
@NotNull
public Float root(@NotNull Float a, int b) {
return (float) MathX.root(a, b);
}
// endregion
// region gcd
@Override
@NotNull
public Float gcd(@NotNull Float a, @NotNull Float b) {
throw new NotSupportedException();
}
// endregion
} |
<filename>yosys/addFi.cc
#include "kernel/yosys.h"
#include "kernel/sigtools.h"
#include <cstddef>
#include <sys/types.h>
USING_YOSYS_NAMESPACE
PRIVATE_NAMESPACE_BEGIN
struct AddFi : public Pass {
AddFi() : Pass("addFi", "add fault injection signals") { }
void help() override
{
// |---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|
log("\n");
log(" addFi [-no-ff] [-no-comb] [-no-add-input] [-type <cell>]");
log("\n");
log("Add a fault injection signal to every selected cell and wire the control signal\n");
log("to the top-level.\n");
log("\n");
log(" -no-ff");
log(" Do not insert fault cells for flip-flops.\n");
log("\n");
log(" -no-comb");
log(" Do not insert fault cells for combinational cells.\n");
log("\n");
log(" -no-add-input");
log(" Do not add the fault signal bus to the top-level input port.\n");
log("\n");
log(" -type <cell>");
log(" Specify the type of the inserted fault control cell.\n");
log(" Possible values are 'or', 'and' and 'xor' (default).\n");
log("\n");
}
typedef std::vector<std::pair<RTLIL::Module*, RTLIL::Wire*>> connectionStorage;
void add_toplevel_fi_module(RTLIL::Design* design, connectionStorage *addedInputs, connectionStorage *toplevelSigs, bool add_input_signal)
{
log_debug("Connection clean-up: Initial number of added inputs to forward: %zu\n", addedInputs->size());
connectionStorage work_queue_inputs;
int j = 0;
while (!addedInputs->empty())
{
work_queue_inputs = *addedInputs;
log_debug("Connection clean-up: Current number of inputs to forward: %zu\n", work_queue_inputs.size());
addedInputs->clear();
for (auto m : work_queue_inputs)
{
int i = 0;
log_debug("Connection clean-up: Searching for instances of module: `%s' with signal `%s'\n", m.first->name.c_str(), log_signal(m.second));
// Search in all modules, not search for a specific module in the design, but cells of this type.
for (auto module : design->modules())
{
RTLIL::SigSpec fi_cells;
// And check for all cells as those can be the instances of modules.
for (auto c : module->cells())
{
// Did we find a cell of the correct type?
if (c->type == m.first->name)
{
// New wire for cell to combine the available signals
int cell_width = m.second->width;
// TODO The following wire is not really needed later as it is appended to the SigSpec which is then
// connected as a wire to the input.
// - The unused wires could be deleted later.
// - Find another way to connect the cells to the input.
// - Do not create wires here, but just iterate and store the info, then create the SigSpec and connect it
// on the one end to the input and iterate on the other end for connecting the cells.
// - Just forward all wires separately (not really nice).
Wire *s = module->addWire(stringf("\\fi_%s_%d_%s", log_id(c), i++, log_id(m.second->name)), cell_width);
// Collect all signals from all cells to create a single input later
fi_cells.append(s);
log_debug("Connection clean-up: Instance `%s' in `%s' with width %u, connecting wire `%s' to port `%s'\n",
log_id(c), log_id(c->module), cell_width, log_signal(s), log_signal(m.second));
c->setPort(m.second->name, s);
}
}
if (fi_cells.size())
{
// Create a single signal to all cells
RTLIL::Wire *mod_in = module->addWire(stringf("\\fi_forward_%s_%d", log_id(module->name), j++), fi_cells.size());
if (!module->get_bool_attribute(ID::top))
{
// Forward wires to top
mod_in->port_input = true;
module->fixup_ports();
}
module->connect(fi_cells, mod_in);
if (!module->get_bool_attribute(ID::top))
{
log_debug("Connection clean-up: Adding `%s' to signal forward list\n", log_id(mod_in->name));
addedInputs->push_back(std::make_pair(module, mod_in));
}
else
{
log_debug("Connection clean-up: Adding signal `%s' to top-level signal list\n", log_signal(mod_in));
toplevelSigs->push_back(std::make_pair(module, mod_in));
}
}
}
}
}
// Stop if there are no signals to connect
if (toplevelSigs->empty()) {
log_debug("Connection clean-up: No signals at top-level. Sopping.\n");
return;
}
// Connect all signals at the top to a FI module
RTLIL::Module *top_module;
for (auto mod : design->modules())
{
if (mod->get_bool_attribute(ID::top)) {
top_module = mod;
}
}
// Stop if no top module can be found
// TODO Allow to proceed here
// Either find a way to follow the hierarchy or just update the modules without
// reconnecting all cells.
if (top_module == nullptr) {
return;
}
// TODO Make it possible to update the figenerator module
// This would allow to run the pass more than once for different parts of the design.
// This could be useful to run it with different configurations for different parts.
// In a successive pass the module should be altered to incorporate the new wires.
log_debug("Connection clean-up: Number of signals for top-level module `%s': %lu\n", top_module->name.c_str(), toplevelSigs->size());
auto figen = design->addModule("\\figenerator");
log_debug("Connection clean-up: Create module `%s'\n", figen->name.c_str());
// Connect a single input to all outputs
RTLIL::SigSpec passing_signal;
size_t single_signal_num = 0;
size_t total_width = 0;
// Remember the new output port for the fault signal
std::vector<std::pair<RTLIL::Wire*, RTLIL::Wire*>> fi_port_list;
// Create output ports
for (auto &t : *toplevelSigs)
{
total_width += t.second->width;
// Continuous signal number naming
auto fi_o = figen->addWire(stringf("\\fi_%lu", single_signal_num++), t.second);
fi_o->port_output = 1;
passing_signal.append(fi_o);
fi_port_list.push_back(std::make_pair(fi_o, t.second));
}
log_debug("Connection clean-up: Adding combined input port to `%s'\n", figen->name.c_str());
RTLIL::Wire *fi_combined_in;
if (add_input_signal) {
fi_combined_in = figen->addWire("\\fi_combined", total_width);
fi_combined_in->port_input = true;
RTLIL::SigSpec input_port(fi_combined_in);
figen->connect(passing_signal, input_port);
}
figen->fixup_ports();
std::string figen_instance_name = stringf("\\u_%s", log_id(figen->name));
auto u_figen = top_module->addCell(figen_instance_name.c_str(), figen->name);
// Connect output ports
log_debug("Connection clean-up: Connecting signals to `%s'\n", figen->name.c_str());
for (auto &l : fi_port_list)
{
log_debug("Connection clean-up: Connecting signal `%s' to port `%s'\n", log_id(l.second), log_id(l.first));
u_figen->setPort(l.first->name, l.second);
}
if (add_input_signal) {
auto top_fi_input = top_module->addWire("\\fi_combined", total_width);
top_fi_input->port_input = true;
u_figen->setPort(fi_combined_in->name, top_fi_input);
top_module->fixup_ports();
log_debug("Connection clean-up: Added input signal `%s'\n", top_fi_input->name.c_str());
}
}
Wire *storeFaultSignal(RTLIL::Module *module, RTLIL::Cell *cell, IdString output, int faultNum, RTLIL::SigSpec *fi_signal_module)
{
std::string fault_sig_name, sig_type;
if (output == ID::Q) {
sig_type = "ff";
} else if (output == ID::Y) {
sig_type = "comb";
}
if (module->get_bool_attribute(ID::top))
{
fault_sig_name = stringf("\\fi_%s_%d", sig_type.c_str(), faultNum);
}
else
{
fault_sig_name = stringf("\\fi_%s_%s_%d", sig_type.c_str(), log_id(module), faultNum);
}
log_debug("Module `%s': Adding wire `%s'\n", module->name.c_str(), fault_sig_name.c_str());
Wire *s = module->addWire(fault_sig_name, cell->getPort(output).size());
fi_signal_module->append(s);
return s;
}
void addModuleFiInut(RTLIL::Module *module, RTLIL::SigSpec fi_signal_module, std::string fault_input_name, connectionStorage *moduleInputs, connectionStorage *toplevelSigs)
{
if (!fi_signal_module.size()) {
return;
}
Wire *input = module->addWire(fault_input_name, fi_signal_module.size());
module->connect(fi_signal_module, input);
if (!module->get_bool_attribute(ID::top)) {
log_debug("Module `%s': Adding input `%s' (size: %d)\n", module->name.c_str(), input->name.c_str(), input->width);
input->port_input = true;
module->fixup_ports();
log_debug("Module `%s': Adding wire `%s' to signal forward list\n", module->name.c_str(), log_id(input->name));
moduleInputs->push_back(std::make_pair(module, input));
}
else
{
log_debug("Module `%s': Adding wire `%s' to top-level signal list\n", module->name.c_str(), log_id(input->name));
toplevelSigs->push_back(std::make_pair(module, input));
}
}
void appendFiCell(std::string fi_type, RTLIL::Module *module, RTLIL::Cell *cell, RTLIL::IdString output, SigSpec outputSig, Wire *s)
{
Wire *xor_input = module->addWire(NEW_ID, cell->getPort(output).size());
SigMap sigmap(module);
RTLIL::SigSpec outMapped = sigmap(outputSig);
outMapped.replace(outputSig, xor_input);
cell->setPort(output, outMapped);
// Output of FF, input to XOR
Wire *newOutput = module->addWire(NEW_ID, cell->getPort(output).size());
module->connect(outputSig, newOutput);
// Output of XOR
// TODO store module with 's' wire input and replace this with the new big wire 'fi_xor' at the end?
if (fi_type.compare("xor") == 0) {
module->addXor(NEW_ID, s, xor_input, newOutput);
} else if (fi_type.compare("and") == 0) {
module->addAnd(NEW_ID, s, xor_input, newOutput);
} else if (fi_type.compare("or") == 0) {
module->addOr(NEW_ID, s, xor_input, newOutput);
}
}
void insertFi(std::string fi_type, RTLIL::Module *module, RTLIL::Cell *cell, int faultNum, RTLIL::SigSpec *fi_signal_module)
{
RTLIL::IdString output;
if (cell->hasPort(ID::Q)) {
output = ID::Q;
} else if (cell->hasPort(ID::Y)) {
output = ID::Y;
} else {
return;
}
SigSpec sigOutput = cell->getPort(output);
log_debug("Module `%s': Inserting fault injection '%s' to cell type `%s' (size: %u)\n",
module->name.c_str(), fi_type.c_str(), log_id(cell->type), sigOutput.size());
// Wire for FI signal
Wire *s = storeFaultSignal(module, cell, output, faultNum, fi_signal_module);
// Cell for FI control
appendFiCell(fi_type, module, cell, output, sigOutput, s);
}
void execute(vector<string> args, RTLIL::Design* design) override
{
bool flag_add_fi_input = true;
bool flag_inject_ff = true;
bool flag_inject_combinational = true;
std::string option_fi_type;
// parse options
size_t argidx;
for (argidx = 1; argidx < args.size(); argidx++)
{
std::string arg = args[argidx];
if (arg == "-no-ff") {
flag_inject_ff = false;
continue;
}
if (arg == "-no-comb") {
flag_inject_combinational = false;
continue;
}
if (arg == "-no-add-input") {
flag_add_fi_input = false;
continue;
}
if (arg == "-type") {
if (++argidx >= args.size())
log_cmd_error("Option -type requires an additional argument!\n");
option_fi_type = args[argidx];
continue;
}
// TODO do not create the figenerator module
// Add a argument to prevent the creation of the module.
// Two possible ways to handle the signals:
// - Keep them unconnected at the top-level (-no-add-input)
// - Create the forwarding bus and add it as an input port
// The reason to having this module is an easy access to the bus
// for the simulation. One could apply a patch to the HDL part for
// some DPI functions which can then be used by the simulation.
break;
}
extra_args(args, argidx, design);
connectionStorage addedInputs, toplevelSigs;
if (option_fi_type.empty())
{
option_fi_type = "xor";
}
for (auto module : design->selected_modules())
{
log("Updating module `%s'\n", module->name.c_str());
int i = 0;
RTLIL::SigSpec fi_ff, fi_comb;
// Add a FI cell for each cell in the module
log_debug("Module `%s': Searching for cells to append with fault injection\n", module->name.c_str());
for (auto cell : module->selected_cells())
{
// Only operate on standard cells (do not change modules)
if (!cell->type.isPublic()) {
if ((flag_inject_ff && cell->type.in(RTLIL::builtin_ff_cell_types()))) {
insertFi(option_fi_type, module, cell, i++, &fi_ff);
}
if (flag_inject_combinational && !cell->type.in(RTLIL::builtin_ff_cell_types())) {
insertFi(option_fi_type, module, cell, i++, &fi_comb);
}
}
}
// Update the module with a port to control all new XOR cells
log_debug("Module `%s': Updating modules inputs\n", module->name.c_str());
addModuleFiInut(module, fi_ff, "\\fi_ff", &addedInputs, &toplevelSigs);
addModuleFiInut(module, fi_comb, "\\fi_comb", &addedInputs, &toplevelSigs);
}
// Update all modified modules in the design and add wiring to the top
add_toplevel_fi_module(design, &addedInputs, &toplevelSigs, flag_add_fi_input);
}
} AddFi;
PRIVATE_NAMESPACE_END
|
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: itkOrientedImageProfileTest3.cxx
Language: C++
Date: $Date$
Version: $Revision$
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
#if defined(_MSC_VER)
#pragma warning ( disable : 4786 )
#endif
#include "itkVectorImage.h"
#include "itkImageRegionIteratorWithIndex.h"
#include "itkTimeProbesCollectorBase.h"
int itkOrientedImageProfileTest3( int , char *[] )
{
const unsigned int Dimension = 3;
typedef unsigned char PixelType;
//
// Yes, on purpose we are using here the itk::VectorImage, so we can compare it
// against the itk::OrientedImage used in itkOrientedImageProfileTest1.
//
typedef itk::VectorImage<PixelType, Dimension> ImageType;
typedef ImageType::IndexType IndexType;
typedef ImageType::SizeType SizeType;
typedef ImageType::PointType PointType;
typedef ImageType::RegionType RegionType;
typedef ImageType::SpacingType SpacingType;
IndexType start;
SizeType size;
start.Fill( 0 );
size.Fill( 300 );
RegionType region;
region.SetIndex( start );
region.SetSize( size );
ImageType::Pointer image = ImageType::New();
image->SetVectorLength( 2 );
image->SetRegions( region );
image->Allocate();
SpacingType spacing;
spacing.Fill( 1.5 );
image->SetSpacing( spacing );
PointType origin;
origin.Fill( 1.3 );
image->SetOrigin( origin );
typedef itk::ImageRegionConstIteratorWithIndex< ImageType > IteratorType;
IteratorType itr( image, region );
itr.GoToBegin();
itk::TimeProbesCollectorBase chronometer;
chronometer.Start("Transform");
IndexType index;
PointType point;
while( !itr.IsAtEnd() )
{
image->TransformIndexToPhysicalPoint( itr.GetIndex(), point );
image->TransformPhysicalPointToIndex( point, index );
++itr;
}
chronometer.Stop("Transform");
chronometer.Report( std::cout );
return EXIT_SUCCESS;
}
|
# This example demonstrates the usage of spearmint within the context
# of sklearn.
# Here we train a random forest classifier on the MNIST dataset.
import whetlab
import numpy as np
# Define parameters to optimize
parameters = { 'n_estimators':{'type':'integer', 'min':2, 'max':100, 'size':1},
'max_depth':{'type':'integer', 'min':1, 'max':20, 'size':1}}
outcome = {'name':'Classification accuracy'}
name = 'Random Forest'
description = 'Training a random forest on the MNIST dataset using the sklearn library'
access_token = None # PUT VALID ACCESS TOKEN HERE OR IN YOUR ~/.whetlab FILE
scientist = whetlab.Experiment(name=name, description=description,
access_token=access_token, parameters=parameters, outcome=outcome)
# Setup scikit-learn experiment
from sklearn.datasets import fetch_mldata
from sklearn.ensemble import RandomForestClassifier
# Download the mnist dataset to the current working directory
mnist = fetch_mldata('MNIST original', data_home='.')
order = np.random.permutation(60000)
train_set = [mnist.data[order[:50000],:], mnist.target[order[:50000]]]
valid_set = [mnist.data[order[50000:60000],:], mnist.target[order[50000:60000]]]
for i in range(20):
# Get suggested new experiment
job = scientist.suggest()
# Perform experiment
learner = RandomForestClassifier(**job)
learner.fit(*train_set)
accuracy = learner.score(*valid_set)
# Inform scientist about the outcome
scientist.update(job,accuracy)
scientist.report()
|
package br.com.dafiti.zoom;
import br.com.dafiti.mitt.Mitt;
import br.com.dafiti.mitt.cli.CommandLineInterface;
import br.com.dafiti.mitt.exception.DuplicateEntityException;
import br.com.dafiti.mitt.transformation.embedded.Concat;
import br.com.dafiti.mitt.transformation.embedded.Now;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.text.SimpleDateFormat;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.commons.io.FileUtils;
import org.apache.http.client.utils.URIBuilder;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import org.jsoup.Connection;
import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
/**
*
* @author <NAME>
*/
public class Zoom {
private static final Logger LOG = Logger.getLogger(Zoom.class.getName());
private static final String ZOOM_ENDPOINT = "http://anunciante.zoom.com.br/";
public static void main(String[] args) throws IOException {
LOG.info("Glove - Zoom Extractor started");
//Defines a MITT instance.
Mitt mitt = new Mitt();
try {
//Defines parameters.
mitt.getConfiguration()
.addParameter("c", "credentials", "Credentials file", "", true, false)
.addParameter("o", "output", "Output file", "", true, false)
.addParameter("s", "start_date", "Start date", "", true, false)
.addParameter("e", "end_date", "End date", "", true, false)
.addParameter("f", "field", "Fields to be extracted from input file", "", true, false)
.addParameter("p", "partition", "(Optional) Partition, divided by + if has more than one field")
.addParameter("k", "key", "(Optional) Unique key, divided by + if has more than one field", "");
//Reads the command line interface.
CommandLineInterface cli = mitt.getCommandLineInterface(args);
//Defines output file.
mitt.setOutputFile(cli.getParameter("output"));
//Defines fields.
mitt.getConfiguration()
.addCustomField("partition_field", new Concat((List) cli.getParameterAsList("partition", "\\+")))
.addCustomField("custom_primary_key", new Concat((List) cli.getParameterAsList("key", "\\+")))
.addCustomField("etl_load_date", new Now())
.addField(cli.getParameterAsList("field", "\\+"));
//Reads the credentials file.
JSONParser parser = new JSONParser();
JSONObject credentials = (JSONObject) parser.parse(new FileReader(cli.getParameter("credentials")));
//Runs the crawler to export accountstatement report.
Response loginForm = Jsoup.connect(ZOOM_ENDPOINT)
.method(Connection.Method.GET)
.execute();
Response mainPage = Jsoup.connect(ZOOM_ENDPOINT + "zoomout/j_security_check")
.data("j_username", credentials.get("username").toString())
.data("j_password", <PASSWORD>.get("password").toString())
.cookies(loginForm.cookies())
.execute();
Response report = Jsoup.connect(
new URIBuilder(ZOOM_ENDPOINT + "accountstatement")
.addParameter("merchantId", credentials.get("account").toString())
.addParameter("dateStart",
new SimpleDateFormat("dd/MM/yyyy")
.format(
new SimpleDateFormat("yyyy-MM-dd")
.parse((String) cli.getParameter("start_date"))))
.addParameter("dateEnd",
new SimpleDateFormat("dd/MM/yyyy").format(
new SimpleDateFormat("yyyy-MM-dd")
.parse((String) cli.getParameter("end_date"))))
.addParameter("action", "export").build().toString())
.cookies(mainPage.cookies())
.execute();
//Saves the report data.
Path outputPath = Files.createTempDirectory("zoom_");
FileUtils.writeByteArrayToFile(
new File(outputPath.toString() + "/" + "zoom.csv"),
report.bodyAsBytes());
//Defines the reader delimiter as comma.
mitt.getReaderSettings().setDelimiter(',');
//Write the data to output file.
mitt.write(outputPath.toFile(), "*.csv");
FileUtils.deleteDirectory(outputPath.toFile());
} catch (DuplicateEntityException
| IOException
| URISyntaxException
| java.text.ParseException
| ParseException ex) {
LOG.log(Level.SEVERE, "Zoom Extractor failure: ", ex);
System.exit(1);
} finally {
mitt.close();
LOG.info("Glove - Zoom Extractor finalized.");
System.exit(0);
}
}
}
|
package org.springframework.debug.bean;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.context.EnvironmentAware;
import org.springframework.core.env.Environment;
public class Teacher implements BeanNameAware, EnvironmentAware {
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
private String name;
private String beanName;
private Environment environment;
@Override
public void setBeanName(String name) {
this.beanName = name;
}
@Override
public void setEnvironment(Environment environment) {
this.environment = environment;
}
public String getBeanName(){
return beanName;
}
public Environment getEnvironment(){
return environment;
}
}
|
/* reverse the operation above for one entry.
* b points to the offset into the weave array of the power we are
* calculating */
mp_err weave_to_mpi(mp_int *a, const unsigned char *b,
mp_size b_size, mp_size count)
{
mp_digit *pb = MP_DIGITS(a);
mp_digit *end = &pb[b_size];
MP_SIGN(a) = MP_ZPOS;
MP_USED(a) = b_size;
for (; pb < end; pb++) {
register mp_digit digit;
digit = *b << 8; b += count;
#define MPI_UNWEAVE_ONE_STEP digit |= *b; b += count; digit = digit << 8;
switch (sizeof(mp_digit)) {
case 32:
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
case 16:
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
case 8:
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
case 4:
MPI_UNWEAVE_ONE_STEP
MPI_UNWEAVE_ONE_STEP
case 2:
break;
}
digit |= *b; b += count;
*pb = digit;
}
s_mp_clamp(a);
return MP_OKAY;
} |
<reponame>jvanderaa/nautobot-plugin-version-control<gh_stars>0
"""Filters.py defines a set of Filters needed for each model defined in models.py."""
import django_filters
from django.db.models import Q
from nautobot.utilities.filters import BaseFilterSet
from dolt.models import Branch, Commit, PullRequest, PullRequestReview
class BranchFilterSet(BaseFilterSet):
"""BranchFilterSet returns a filter for the Branch model."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = Branch
fields = (
"name",
"hash",
"latest_committer",
"latest_committer_email",
"latest_commit_date",
"latest_commit_message",
)
def search(self, queryset, name, value): # pylint: disable=unused-argument,no-self-use
"""
search performs an ORM filter on the Branch model
:param queryset: The Branch queryset
:param name: The modelname
:param value: The value to be searched for
:return: A filtered queryset
"""
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(name__icontains=value)
| Q(hash__icontains=value)
| Q(latest_committer__icontains=value)
| Q(latest_committer_email__icontains=value)
| Q(latest_commit_date__icontains=value)
| Q(latest_commit_message__icontains=value)
)
class CommitFilterSet(BaseFilterSet):
"""CommitFilterSet returns a filter for the CommitFilterSet."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = Commit
fields = (
"commit_hash",
"committer",
"email",
"date",
"message",
)
def search(self, queryset, name, value): # pylint: disable=unused-argument,no-self-use
"""
search performs an ORM filter on the Commit model
:param queryset: The Commit queryset
:param name: The modelname
:param value: The value to be searched for
:return: A filtered queryset
"""
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(commit_hash__icontains=value)
| Q(committer__icontains=value)
| Q(email__icontains=value)
| Q(date__icontains=value)
| Q(message__icontains=value)
)
class PullRequestFilterSet(BaseFilterSet):
"""PullRequestFilterSet returns a filter for the PullRequest model."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = PullRequest
fields = (
"title",
"state",
"source_branch",
"destination_branch",
"description",
"creator",
)
def search(self, queryset, name, value): # pylint: disable=unused-argument,no-self-use
"""
search performs an ORM filter on the PullRequestFilterSet model
:param queryset: The PullRequestFilterSet queryset
:param name: The modelname
:param value: The value to be searched for
:return: A filtered queryset
"""
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(title__icontains=value)
| Q(state__icontains=value)
| Q(source_branch__icontains=value)
| Q(destination_branch__icontains=value)
| Q(description__icontains=value)
| Q(creator__icontains=value)
)
class PullRequestDefaultOpenFilterSet(PullRequestFilterSet):
"""PullRequestDefaultOpenFilterSet returns a filter for the PullRequest model where the default search is state=OPEN."""
state = django_filters.MultipleChoiceFilter(choices=PullRequest.PR_STATE_CHOICES)
def __init__(self, data, *args, **kwargs):
if not data.get("state"):
data = data.copy()
data["state"] = PullRequest.OPEN
super().__init__(data, *args, **kwargs)
class PullRequestReviewFilterSet(BaseFilterSet):
"""PullRequestReviewFilterSet returns a filter for the PullRequestReview model."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
state = django_filters.MultipleChoiceFilter(choices=PullRequest.PR_STATE_CHOICES)
class Meta:
model = PullRequestReview
fields = (
"pull_request",
"reviewer",
"state",
"reviewed_at",
"summary",
)
def search(self, queryset, name, value): # pylint: disable=unused-argument,no-self-use
"""
search performs an ORM filter on the PullRequestReviewFilterSet model
:param queryset: The PullRequestReviewFilterSet queryset
:param name: The modelname
:param value: The value to be searched for
:return: A filtered queryset
"""
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(reviewer__icontains=value)
| Q(reviewed_at__icontains=value)
| Q(state__icontains=value)
| Q(summary__icontains=value)
)
|
#include <bits/stdc++.h>
using namespace std;
long long a[100005],p[100005];
pair <long long,long long> b[100005];
pair <pair<long long,long long>,int > par[100005];
int main()
{
int n;
long long l,r;
cin>>n>>l>>r;
for (int i=0;i<n;i++)
cin>>a[i];
for (int i=0;i<n;i++)
cin>>p[i];
for (int i=0;i<n;i++)
par[i]=make_pair(make_pair(p[i],a[i]),i);
sort(par,par+n);
b[n-1].second=r;
b[n-1].first=par[n-1].second;
for (int i=n-2;i>=0;i--)
if (b[i+1].second-par[i+1].first.second+par[i].first.second<=l)
{
cout<<-1;
return 0;
}
else
{
b[i].second=min(b[i+1].second-par[i+1].first.second+par[i].first.second-1,r);
b[i].first=par[i].second;
}
sort(b,b+n);
for (int i=0;i<n;i++)
cout<<b[i].second<<' ';
return 0;
}
|
// extern crate audrey;
extern crate deepspeech;
extern crate serde;
extern crate serde_json;
use std::env::args;
use std::fs;
use std::path::Path;
use std::{thread, time};
use byteorder::{ByteOrder, LittleEndian};
use deepspeech::Model;
use std::sync::mpsc;
use std::sync::mpsc::SyncSender;
use std::env;
use std::collections::HashMap;
use lapin::{
message::Delivery, options::*, types::FieldTable, BasicProperties, Channel, Connection,
ConnectionProperties, ConsumerSubscriber,
};
use lapin_async as lapin;
use std::fmt;
// These constants are taken from the C++ sources of the client.
const N_CEP: u16 = 26;
const N_CONTEXT: u16 = 9;
const BEAM_WIDTH: u16 = 500;
const SAMPLE_RATE: u32 = 16_000;
const LM_WEIGHT: f32 = 0.75;
const VALID_WORD_COUNT_WEIGHT: f32 = 1.85;
const HALF_SECOND: time::Duration = time::Duration::from_millis(500);
struct Subscriber {
channel: Channel,
sync_sender: SyncSender<Vec<u8>>,
}
impl fmt::Debug for Subscriber {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.channel)
}
}
impl ConsumerSubscriber for Subscriber {
fn new_delivery(&self, delivery: Delivery) {
self.channel
.basic_ack(delivery.delivery_tag, BasicAckOptions::default())
.as_error()
.expect("basic_ack");
if let Ok(_) = self.sync_sender.send(delivery.data) {
println!("Data sent");
}
}
fn drop_prefetched_messages(&self) {}
fn cancel(&self) {}
}
pub fn send_message(publish_channel: &Channel, queue_name: &str, payload: &[u8]) {
publish_channel
.queue_declare(
queue_name,
QueueDeclareOptions::default(),
FieldTable::default(),
)
.wait()
.expect("queue_declare");
publish_channel
.basic_publish(
"",
queue_name,
BasicPublishOptions::default(),
payload.to_vec(),
BasicProperties::default(),
)
.wait()
.expect("basic_publish");
println!("Payload sent to {}", queue_name);
}
pub fn attach_consumer(
queue_name: &str,
consumer_name: &'static str,
conn: Connection,
subcribe_channels: &mut HashMap<&'static str, Channel>,
sync_sender: &SyncSender<Vec<u8>>,
) {
let channel: &Channel = subcribe_channels
.entry(consumer_name)
.or_insert(conn.create_channel().wait().expect("create_channel"));
let queue = channel
.queue_declare(
queue_name,
QueueDeclareOptions::default(),
FieldTable::default(),
)
.wait()
.expect("queue_declare");
channel
.basic_consume(
&queue,
consumer_name,
BasicConsumeOptions::default(),
FieldTable::default(),
Box::new(Subscriber {
channel: channel.clone(),
sync_sender: sync_sender.clone(),
}),
)
.wait()
.expect("basic_consume");
println!("Consumer attached to {}", queue_name);
}
/*
TODO list:
* better resampling (right now it seems that recognition is impaired compared to manual resampling)...
maybe use sinc?
* channel cropping
* use clap or something to parse the command line arguments
*/
fn main() {
let contents = fs::read_to_string(&args().nth(1).unwrap()).unwrap();
let config: serde_json::Value =
serde_json::from_str(&contents).expect("JSON was not well-formatted");
let broker_host: &str = &env::var("BROKER_HOST").unwrap();
let broker_port: &str = &env::var("BROKER_PORT").unwrap();
let model_file: &Path = Path::new(config["model"].as_str().unwrap());
let alphabet_file: &Path = &Path::new(config["alphabet"].as_str().unwrap());
let binary_file: &Path = &Path::new(config["binary"].as_str().unwrap());
let trie_file: &Path = &Path::new(config["trie"].as_str().unwrap());
let model_result =
Model::load_from_files(model_file, N_CEP, N_CONTEXT, alphabet_file, BEAM_WIDTH);
match model_result {
Ok(_) => println!("Model loaded"),
Err(err) => panic!("{:?}", err),
};
let mut model: Model = model_result.unwrap();
model.enable_decoder_with_lm(
alphabet_file,
binary_file,
trie_file,
LM_WEIGHT,
VALID_WORD_COUNT_WEIGHT,
);
let broker_host: &str = &env::var("BROKER_HOST").unwrap();
let broker_port: &str = &env::var("BROKER_PORT").unwrap();
let conn: Connection = Connection::connect(
&format!("amqp://{}:{}/%2f", broker_host, broker_port),
ConnectionProperties::default(),
)
.wait()
.expect("connection error");
let publish_channel: Channel = conn.create_channel().wait().expect("create_channel");
let mut subcribe_channels: HashMap<&str, Channel> = HashMap::new();
let (sync_sender, receiver) = mpsc::sync_channel(1);
const QUEUE_NAME: &str = "audio";
const CONSUMER_NAME: &str = "interpret";
attach_consumer(
QUEUE_NAME,
CONSUMER_NAME,
conn,
&mut subcribe_channels,
&sync_sender,
);
loop {
if let Ok(audio_buf) = receiver.recv() {
let mut converted: Vec<i16> = vec![0; audio_buf.len() / 2];
LittleEndian::read_i16_into(&audio_buf, &mut converted);
let result: String = model.speech_to_text(&converted, SAMPLE_RATE).unwrap();
send_message(&publish_channel, "interpreted", result.as_bytes());
println!("{}", result);
}
thread::sleep(HALF_SECOND);
}
}
|
<filename>src/components/Signal.tsx
import React from 'react'
import styles from './Button.module.scss'
import * as Icons from './Icons'
import {components} from './ButtonIconSet';
interface Props {
signal: RemoAPI.Signal,
onClick?: (event: any) => void,
}
const Signal: React.FC<Props> = React.memo((props) => {
let Icon: JSX.Element
Icon = components.hasOwnProperty(props.signal.image!) ? components[props.signal.image as string] : React.createElement(Icons.Text, {value: props.signal.name!})
return (
<div className={styles.button} >
<button onClick={props.onClick}>{Icon}</button>
<div className={styles.label}>{props.signal.name}</div>
</div>
);
});
export default Signal;
|
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_UTILS_HASHING_H_
#define MINDSPORE_CORE_UTILS_HASHING_H_
#include <initializer_list>
#include <memory>
namespace mindspore {
inline std::size_t hash_combine(std::size_t hash_sum, std::size_t hash_val) {
// Reference from http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0814r0.pdf
return ((hash_sum << 6) + (hash_sum >> 2) + 0x9e3779b9 + hash_val) ^ hash_sum;
}
inline std::size_t hash_combine(const std::initializer_list<std::size_t> &hash_vals) {
std::size_t hash_sum = 0;
for (auto hash_val : hash_vals) {
hash_sum = hash_combine(hash_sum, hash_val);
}
return hash_sum;
}
template <typename T>
struct PointerHash {
constexpr std::size_t operator()(const T *ptr) const noexcept { return reinterpret_cast<std::size_t>(ptr); }
};
template <typename T>
struct PointerHash<std::shared_ptr<T>> {
constexpr std::size_t operator()(const std::shared_ptr<T> &ptr) const noexcept {
return reinterpret_cast<std::size_t>(ptr.get());
}
};
// Generate hash code for a string literal at compile time.
// We using Java string hash algorithm here.
constexpr uint32_t ConstStringHash(const char *str) {
uint32_t hash = 0;
while (*str) {
hash = hash * 31 + static_cast<uint32_t>(*str++);
}
return hash;
}
} // namespace mindspore
#endif // MINDSPORE_CORE_UTILS_HASHING_H_
|
// WithValidityExpiringInHours requests certificates with validity expiring in the order of hours.
// This option is suitable for issuing init bundles which cannot be revoked.
func WithValidityExpiringInHours() IssueCertOption {
return func(o *issueOptions) {
o.signerProfile = ephemeralProfileWithExpirationInHours
}
} |
//provides constants for the climber
public static class Climber
{
//PID Constants
public static final double kElevatorP = 1;
public static final double kElevatorI = 0.04;
public static final double kElevatorD = 0.4;
public static final double kElevatorClimbOutput = 0.257;
//Location Ticks
public static final double kClimbHeight = 420000;
public static final double kElevatorZero = 15000;
//Servo angles
public static final double kServoRatchet = 1;
public static final double kServoUnRatchet = 0;
//Other usefull constants
public static final double TICKS_PER_ROTATION = 4096;
public static final double ACCEPTABLE_AMOUNT = 0.2;
//Simulating a Climber
public static final double CARRIAGE_MASS = convertPoundsToKg(15);
public static final double GEAR_REDUCTION = 10.0;
public static final double PULLEY_RADIUS = Units.inchesToMeters(2);
public static final double DISTANCE_PER_PULSE = 2.0 * Math.PI * PULLEY_RADIUS / GEAR_REDUCTION / TICKS_PER_ROTATION;
public static final double MIN_HEIGHT = 0.0;
public static final double MAX_HEIGHT = kClimbHeight * DISTANCE_PER_PULSE;
public static final int CHANNEL_A = 0, CHANNEL_B = 1;
/**
* @param pounds
* @return kilograms
*/
private static double convertPoundsToKg(double pounds)
{
return pounds * 0.45359237;
}
} |
/**
* Relocation info for binary addresses in a RPM's code image.
*/
public class RPMRelocation {
public RPMRelTargetType targetType;
public RPMRelSourceType sourceType;
public RPMRelocationTarget target;
public RPMRelocationSource source;
public RPMRelocation() {
}
public RPMRelocation(RPM rpm, RPMRelocation rel, Map<RPMSymbol, RPMSymbol> symbolTransferMap) {
sourceType = rel.sourceType;
targetType = rel.targetType;
target = new RPMRelocationTarget(rel.target);
switch (sourceType) {
case SYMBOL_EXTERNAL:
RPMRelocationSource.RPMRelSrcExternalSymbol es = (RPMRelocationSource.RPMRelSrcExternalSymbol) rel.source;
source = new RPMRelocationSource.RPMRelSrcExternalSymbol(rpm, es.ns, es.symbolName);
break;
case SYMBOL_INTERNAL:
RPMRelocationSource.RPMRelSrcInternalSymbol is = (RPMRelocationSource.RPMRelSrcInternalSymbol) rel.source;
System.out.println("Transferring internal relocation from symbol " + is.symb + " to " + symbolTransferMap.get(is.symb));
source = new RPMRelocationSource.RPMRelSrcInternalSymbol(rpm, symbolTransferMap.get(is.symb));
break;
}
}
RPMRelocation(RPMReader in, RPM rpm) throws IOException {
int cfg = in.readUnsignedByte();
sourceType = RPMRelSourceType.values()[cfg & 0b11]; //reserved 4 values
targetType = RPMRelTargetType.values()[(cfg >> 2) & 0b111]; //reserved 8 values
target = new RPMRelocationTarget(in);
switch (sourceType) {
case SYMBOL_EXTERNAL:
source = new RPMRelocationSource.RPMRelSrcExternalSymbol(rpm, in);
break;
case SYMBOL_INTERNAL:
source = new RPMRelocationSource.RPMRelSrcInternalSymbol(rpm, in);
break;
}
}
void write(DataIOStream out, StringTable strtbl) throws IOException {
out.write((targetType.ordinal() << 2) | sourceType.ordinal());
target.write(out, strtbl);
source.write(out, strtbl);
}
/**
* Gets the serialized size of the relocation info.
*
* @return
*/
public int getSize() {
return 1 + target.getSize() + source.getDataSize();
}
/**
* Type of the relocated field at the target address,
*/
public static enum RPMRelTargetType {
/**
* A 32-bit absolute offset.
*/
OFFSET,
/**
* A thumb BL instruction.
*/
THUMB_BRANCH_LINK,
/**
* An ARM BL instruction.
*/
ARM_BRANCH_LINK,
/**
* A Thumb branch. For technical reasons, it is relocated as a BL with
* return.
*/
THUMB_BRANCH,
/**
* An ARM B instruction.
*/
ARM_BRANCH,
/**
* Full copy of the source data.
*/
FULL_COPY;
public static RPMRelTargetType fromName(String name) {
for (RPMRelTargetType t : values()) {
if (t.name().equals(name)) {
return t;
}
}
return null;
}
}
/**
* Type of the provider of the address that the relocated field points to.
*/
public static enum RPMRelSourceType {
/**
* A symbol inside the RPM.
*/
SYMBOL_INTERNAL,
/**
* A symbol outside of the RPM, handled by an RPMExternalSymbolResolver.
*/
SYMBOL_EXTERNAL
}
} |
/**
* @author Yves Boyadjian
*
*/
public class SoNodeSensor extends SoDataSensor implements Destroyable {
private SoNode node;
////////////////////////////////////////////////////////////////////////
//
// Description:
// Constructor
//
// Use: public
public SoNodeSensor() { super();
//
////////////////////////////////////////////////////////////////////////
node = null;
}
////////////////////////////////////////////////////////////////////////
//
// Description:
// Constructor with function and data.
//
// Use: public
public SoNodeSensor(SoSensorCB func, Object data) {
super(func, data);
//
////////////////////////////////////////////////////////////////////////
node = null;
}
////////////////////////////////////////////////////////////////////////
//
// Description:
// Destructor
//
// Use: protected
public void destructor()
//
////////////////////////////////////////////////////////////////////////
{
detach();
super.destructor();
}
// Makes this sensor detect changes to the given node.
public void attach(SoNode nodeToAttachTo) {
if (node != null)
detach();
node = nodeToAttachTo;
node.addAuditor(this, SoNotRec.Type.SENSOR);
}
/**
* Unschedules this sensor (if it is scheduled) and makes it ignore changes to the scene graph.
*/
//
// Description:
// Detaches the sensor if it is attached to a node.
//
// Use: public
public void detach() {
if (node != null) {
node.removeAuditor(this, SoNotRec.Type.SENSOR);
node = null;
// If we are scheduled, there's no point leaving it scheduled,
// since it's not attached any more to whatever caused it to
// become scheduled.
unschedule();
}
}
//
// Description:
// This is called by the attached node when it (the node) is about
// to be deleted.
//
// Use: private
public void dyingReference() {
// We want to detach the sensor if it's still attached to the
// dying node after we invoke the callback. If the callback
// attaches to something else, we don't want to detach it. So
// we'll compare the nodes before and after the callback is
// invoked and detach only if it's the same one.
SoNode dyingNode = getAttachedNode();
invokeDeleteCallback();
if (getAttachedNode() == dyingNode)
detach();
}
/**
* Returns the node that this sensor is sensing, or NULL if it is not attached to any node.
*
* @return
*/
public SoNode getAttachedNode() {
return node;
}
} |
/**
* This class performs all necessary processing steps to ongoing responses.
*
* @author Danilo Reinert
*/
public class ResponseProcessor {
private final SerializationEngine serializationEngine;
private ResponseDeserializer responseDeserializer;
private final FilterManagerImpl filterManager;
private final InterceptorManagerImpl interceptorManager;
public ResponseProcessor(SerializationEngine serializationEngine, ResponseDeserializer responseDeserializer,
FilterManagerImpl filterManager, InterceptorManagerImpl interceptorManager) {
this.serializationEngine = serializationEngine;
this.responseDeserializer = responseDeserializer;
this.filterManager = filterManager;
this.interceptorManager = interceptorManager;
}
public void process(ProcessableResponse response) {
// TODO: create a bypassResponse[Filter|Intercept] option
// To bypass deserialization, just ask for Payload.class
// 3: FILTER
response = applyFilters(response);
// 2: DESERIALIZE
response = applyDeserializer(response);
// 1: INTERCEPT
response = applyInterceptors(response);
response.process();
}
public ResponseDeserializer getResponseDeserializer() {
return responseDeserializer;
}
public void setResponseDeserializer(ResponseDeserializer responseDeserializer) {
this.responseDeserializer = responseDeserializer;
}
private ProcessableResponse applyFilters(ProcessableResponse response) {
// Apply filters in reverse order, so they are executed in the order they were registered
final ListIterator<ResponseFilter.Provider> it = filterManager.reverseResponseFiltersIterator();
while (it.hasPrevious()) {
response = new ResponseInFilterProcess(response, it.previous().getInstance());
}
return response;
}
private ProcessableResponse applyInterceptors(ProcessableResponse response) {
// Apply interceptors in reverse order, so they are executed in the order they were registered
final ListIterator<ResponseInterceptor.Provider> it = interceptorManager.reverseResponseInterceptorsIterator();
while (it.hasPrevious()) {
response = new ResponseInInterceptProcess(response, it.previous().getInstance());
}
return response;
}
private ProcessableResponse applyDeserializer(ProcessableResponse response) {
return new ResponseInDeserializeProcess(response, serializationEngine, responseDeserializer);
}
} |
/**
* ClassName: ConsumerUtils
* Description:
* date: 2020/5/24 23:01
*
* @author ThierrySquirrel
* @since JDK 1.8
*/
public class ConsumerUtils {
private ConsumerUtils() {
}
public static boolean isCache(Message message) {
byte[] cacheIdentity = RedisConsumerConstant.REDIS_EXPIRED_CACHE_IDENTITY.getValue ();
int cacheIdentityLength = cacheIdentity.length;
byte[] body = message.getBody ();
int readLength = body.length - cacheIdentityLength;
byte[] cache = new byte[cacheIdentityLength];
System.arraycopy (body, readLength, cache, 0, cacheIdentityLength);
return Arrays.equals (cacheIdentity, cache);
}
} |
def _scope(self):
with self._original_graph_item.as_default():
if ENV.AUTODIST_PATCH_TF.val:
PatchTensorFlow.patch_var_reading()
PatchTensorFlow.patch_keras()
yield
PatchTensorFlow.unpatch_keras()
PatchTensorFlow.unpatch_var_reading() |
Rare earth spin ensemble magnetically coupled to a superconducting resonator
Interfacing superconducting quantum processors, working in the GHz frequency range, with optical quantum networks and atomic qubits is a challenging task for the implementation of distributed quantum information processing as well as for quantum communication. Using spin ensembles of rare earth ions provide an excellent opportunity to bridge microwave and optical domains at the quantum level. In this letter, we demonstrate magnetic coupling of Er$^{3+}$ spins doped in Y$_{2}$SiO$_{5}$ crystal to a high-Q coplanar superconducting resonator.
Quantum communication is a rapidly developing field of science and technology which allows the transmission of information in an intrinsically secure way . As well as its classical counterpart, a quantum communication network can combine various types of systems which transmit, receive, and process information using quantum algorithms . For example, the nodes of such network can be implemented by superconducting (SC) quantum circuits operated in the GHz frequency range , whereas fiber optics operated at near-infrared can be used to link them over long distances. For reversible transfer of quantum states between systems operating at GHz and optical frequency ranges one must use a hybrid system . Spin ensembles coupled to microwave resonator represents one of the possible implementations of such a system . The collective coupling strength of a spin ensemble is increased with respect to a single spin by the square root of the number of spins. Transparent crystals doped with paramagnetic ions often possess long coherence times , and the collective coupling has been recently demonstrated with NV-centers in diamond and (Cr 3+ ) ions in ruby .
In this letter, we report on the measurement of a spin ensemble of Erbium ions in a crystal, magnetically coupled to a high-Q coplanar SC resonator. The Er 3+ ions are distinct from other spin ensembles due to their optical transition at telecom C-band, i.e. inside the so-called "Erbium window" at 1.54 µm wavelength, and long measured optical coherence time.
The energy level diagram of Erbium ions embedded inside a crystal is shown on Fig.1(a). The electronic configuration of a free Er 3+ ion is 4f 11 with a 4 I term.
The spin-orbit coupling splits it into several fine structure levels. An optical transition at telecom wavelength occurs between the ground state 2s+1 L J = 4 I 15/2 and the first excited state 4 I 13/2 , where S, L and J are spin, orbital and total magnetic momenta of the ion. The weak crystal field splits the ground state into eight (J+1/2) Kramers doublets . At cryogenic temperature, only the lowest doublet Z 1 is populated, therefore the system can be described as an effective electronic spin with S = 1/2. However, Erbium has five even isotopes 162 Er, 164 Er, 166 Er, 168 Er and 170 Er, and one odd isotope 167 Er (natural abundance 22.9%) with a nuclear spin I=7/2. Therefore, the electronic states of 167 Er with effective spin projection m S = ±1/2 are additionally split into 8 hyperfine levels . The magnetic properties of Erbium ions are associated with an unquenched total orbital moment J in a crystal field which results in the appearance of a large arXiv:1102.3841v2 12 May 2011 magnetic moment of nearly 7µ B , where µ B is the Bohr magneton, at particular orientations of the applied magnetic field . The large spin tuning rate ∼ 200 GHz/T makes Erbium doped crystals favorable for its integration with SC qubits that can be operated only at relatively low magnetic field. Such an integrated spin-SC device can be very attractive for its applications particularly in quantum repeaters, where one can store quantum information and perform local operations . A quantum state of the optical field can be mapped into spinwaves encoded in Zeeman or hyperfine levels of Erbium ions and transferred later to the quantum state of the microwave field . The strong coupling between SCqubits and microwave resonator allows fast quantum gate operations on the state of microwave field at nanosecond timescales . This sequence can also be launched in reverse order, thus establishing a coherent quantum transfer between GHz and optical frequency ranges.
In this experiment, we use a single Y 2 SiO 5 crystal doped with 0.02% of Er 3+ (Er:YSO), supplied by Scientific Materials Inc. The crystal has dimensions of 1 × 1.5 × 3 mm 3 and it is glued on top of the silicon chip with a λ/2-coplanar Niobium SC-resonator, see Fig.1(b). The resonance frequency of the rare-earth ion chip is ω 0 = 2π×8.9 GHz and its quality factor is Q ≈ 32000 at magnetic field of Erbium transitions. The crystal orientation is shown in Fig.1(c) with its optical extinction axes b and D 1 and is specified by the angles θ and φ between these axes and direction of the applied magnetic field . The b-axis of the crystal is directed along its 1.5 mm side and is perpendicular to the bias magnetic field B (θ = 90 • ) applied parallel to the chip surface. The angle between the magnetic field and D 1 is φ = −60 • . The particular orientation of our experiment is chosen to maximize the g-factor for the crystallographic site 1 , which falls into our field scanning range between 0 and 70 mT and is relevant for our experiment. The gap between the bottom of the crystal and the chip surface is controlled by observing Newton's interference fringes and with proper placement 1-2 fringes are visible, yielding a gap of about 0.5 µm. The experiment was performed in a 3 He cryostat, at a base temperature of 280 mK.
In quantum optics, the resonator-spin ensemble interaction is usually described by the Tavis-Cummings Hamiltonian . Provided that number of photons in the resonator is much smaller than number of spins N , a spin ensemble behaves as a harmonic oscillator coupled to a cavity. That results in an avoided level crossing when the spins are tuned into resonance with a cavity, which is also in agreement with a phenomenological treatment of the observed effect. The rotating component of the magnetization of the spins produces and oscillatory magnetic field, which perturbs the inductance L 0 of the resonator. The new inductance L = L 0 (1 + χξ), where χ = χ − iχ is the dynamic magnetic susceptibility of the ensemble with χ and χ being its dispersive and absorptive parts respectively, and ξ is a geometric factor describing the spin distribution across the mode and its coupling to the oscillating field. Thus, the resonance frequency of the rare-earth chip is ω 0 = ω 0 / √ 1 + χξ. Provided that χξ 1, we obtain the following equations for the resonator frequency ω 0 and its decay rate κ 0 : where v is coupling strength, ω s is the Larmor frequency of spin ensemble, and Γ * 2 is its total linewidth . The coupling strength v can be expressed via the static susceptibility of the spin ensemble χ 0 and reads as v = ω 0 χ 0 ξ/2. The substitution of the actual value of χ 0 yields the final expression for the collective coupling strength v =gµ B µ 0 ω 0 nξ/4h, whereg is an effective g-factor due to the magnetic anisotropy of the crystal and n is the spin concentration. For the concentration of Erbium electronic spins of n S ∼10 18 cm −3 , the effective g-factor in the plane perpendicular to the magnetic field isg ∼ 7 and filling factor ξ ∼ 0.25 we expect the coupling strength v/2π ∼ 60 MHz.
The microwave transmission spectroscopy of the rareearth ion chip as a function of the bias magnetic field is presented in Fig.2. By using a vector network analyzer we measured the S 21 parameter, which contains both the amplitude and the phase of the signal. The probing power at the input of the resonator is about 1 fW corresponding to an excitation level of ∼ 100 microwave photons in the SC-resonator. The amplitude of the transmitted signal |S 21 | as a function of magnetic field and probing frequency is presented in Fig.2(a). The spectrum consists of the large avoided level crossing at a bias field of B = 55 mT and associated with magnetic dipole transition between electronic states with m S = ±1/2. A regular pattern of six dark interruptions is associated with the odd Erbium isotope and is due to allowed hyperfine transitions between states with equal nuclear spin projection m I = 7/2, 5/2,..., -1/2, -3/2. The states with m I = -5/2 and -7/2 do not enter into our scanning range.
To understand the measured spectrum we numerically diagonalize the spin Hamiltonian for paramagnetic ion in the crystal : where g is the g-factor tensor, A is the hyperfine tensor, Q is the nuclear quadrupole tensor, µ n is nuclear Bohr magneton and g n is nuclear g-factor. The first term in the Hamiltonian presents an electronic Zeeman splitting, the second one describes hyperfine interactions, the third one is the quadrupole term and the last one is the Zeeman splitting due to the nuclear spin. The values for the tensors has been taken from a previous ESR study of Er:YSO crystal . The resulting eigenspectrum is presented in Fig.2(c). The position of each energy level is drawn as a function of the applied magnetic field, and the experimentally observed magnetic transitions are shown by arrows. For the even Erbium isotopes only the first term in the Hamiltonian (3) survives and that results in the strong magnetic transition between electronic spin states m S = ±1/2 shown with the red arrow and marked by the letter "S". The odd 167 Er ion has 8 allowed hyperfine transitions when ∆m I = 0, which are shown by blue arrows and marked with m I numbers.
To extract the coupling strengths and linewidths for different transitions, each spectral line |S 21 | is fitted to a Lorentzian at every value of the magnetic field. The data corresponding to the shift of the resonator frequency ω 0 for transitions "7/2" and "S" is shown in Fig.3(a),(b). The dispersive behavior of the frequency shift in the vicinity of Erbium spin transitions is well fit with Eg. (1). For the transition "7/2", the coupling strength extracted from that fit is v 7/2 /2π = 2.1 ± 0.3 MHz, which exceeds the decay rate of the resonator κ/2π = 0.3 MHz. However the linewidth of spin ensemble exceeds the coupling strength and the same fit yields Γ * 7/2 /2π = 70 ± 1 MHz. The frequency shift of the resonator due to the coupling to the electronic spin ensemble "S" ( Fig.3(b)) has been studied for two excitation levels of 10 2 (light gray dots) and 10 5 (dark gray dots) microwave photons. The dashed line described by Eq.(1) fits well the low excitation spectrum yielding v S /2π = 11.6 ± 0.2 MHz and Γ * S /2π = 145 ± 5 MHz. At high excitation level, the presented data corresponds to a Dysonian line shape associated with spin diffusion in bulk metals . We believe that the observed mixture of the dispersive and absorptive line shapes might also be associated with spin diffusion in and out of the part interacting with the resonator mode. The measured spectrum in Fig.2(a) also reveals other interesting features: between hyperfine transition one can recognize an additional regular structure. To study this, we plot the quality factor of the resonator on the Fig.3(c) as a function of magnetic field. The curve consists of a series of regular absorption dips originating from the magnetic coupling of electronic and hyperfine spin ensembles. It also contains an additional weak pattern appearing in between the hyperfine transitions. We interpret these weak absorption lines as corresponding to the forbidden quadrupole transitions between the hyperfine states satisfying ∆m I = ± 1 . These transitions are marked by the letters "Q1", "Q2" and "Q3".
The experimental data on the Fig.3 can also be fit with Q = ω 0 /κ 0 by using Eq.(2), where each magnetic transitions contributes independently to the cavity decay rate κ 0 . The data points between transitions "Q1" and "5/2", and after transition "-1/2" are not fit well due to the presence of additional magnetic transitions related the other crystallographic site. The fit of the quality factor behavior yields the following values: v 7/2 /2π = 2.5 ± 0.3 MHz, Γ * 7/2 /2π = 65 ± 3 MHz and v S /2π = 13.8 ± 0.1 MHz, Γ * S /2π = 141 ± 3 MHz. The slight differences of fitted parameters by using ω 0 or Q are probably due to an admixture of two magnetic classes into one anticrossing.
The dimensionless parameter to identify the coupling regime is the cooperativity C = v 2 /κΓ * , which corresponds to the number of coherent oscillations between coupled systems. The cooperativity parameter for the hyperfine transition "7/2" is measured to be C 7/2 = 0.4. High cooperativity coupling is reached for the electronic transition "S", where C S = 5.2. In that regime microwave photons inside the SC-cavity coherently interacts with a spin ensemble. However, to observe a normalmode splitting the condition 2v > (κ, Γ * ) has to be fulfilled.
The coupling strength v S of the electronic spin ensemble is found to vary as function of its temperature. The transmission spectrum of the rare-earth chip has been taken at temperatures of 0.3, 0.5, 1 and 1.5 Kelvin. The coupling strength v S is extracted from the change of the Q and plotted as a function of the temperature in the inset of Fig.2(b). The temperature dependence is fit to v S = v S (0) 1/2 , see , and plotted with solid line. The coupling strength of the ensemble at zero temperature, v S (0)/2π = 17.3 ± 0.2 MHz, is the only fitted parameter.
We also found that linewidths of different spin ensembles are not the same and grow with magnetic field. We explain this by a small misalignment of the crystal with respect to the magnetic field. When the angle θ deviates from 90 • , the additional degeneracy due to C 2 symmetry of the crystal is lifted and each Erbium transition splits further into two different magnetic subclasses . Using the "Easyspin" package we simulated ESR spectra of the Er:YSO crystal and bounded the maximum misalignment of the crystal to be ∆θ <0.4 • . Assuming that the observed effect is dominated by the magnetic class splitting, an additional contribution to the spin linewidth is estimated to be about 30 MHz. Such large linewidth can not be completely attributed to the dephasing of spins. We measured the pure dephasing time using a Hahn echo sequence in pulsed ESR spectrometer at the temperature of 7 K, and found that for the "7/2" transition T (7/2) 2 ≈ 540 ns and for the "S" transition T (S) 2 ≈ 200 ns. Moreover, at the temperature of 0.3 K we would expect T 2 to be longer by at least an order of magnitude . Therefore, the additional contribution to the spin linewidths may be associated with field inhomogeneity or surface magnetism of the SC-resonator .
In conclusion, we have presented measurements on an Er 3+ spin ensemble in Y 2 SiO 5 crystal magnetically coupled to a SC coplanar resonator. This hybrid system is a promising candidate for an interface between SC quantum circuits and optical quantum networks. The measured coupling strengths of different spin ensembles encoded in Er:YSO crystal exceeds the decay rate of the cavity. The demonstrated on-chip ESR spectroscopy allows us to resolve all spin state transitions of the Er 3+ ions. The presented experiment realizes a first step towards implementation of a rare earth based quantum memory with telecom wavelength conversion that could constitute an essential building block of future quantum networks.
We thank M. Azarkh and M. Drescher for the ESR study of Er:YSO crystals, A. Abdumalikov and O. Astafiev (NEC) for the fabrication of the SC-resonator and C. Müller for his useful comments and critical reading of the manuscript. This work was supported by the CFN of DFG, the EU projects MIDAS, SCOPE, SOLID and the BMBF programm "Quantum communications". IP acknowledges support of Alexander von Humboldt Foundation. PB acknowledges the financial support of RiSC grant of KIT and Baden-Württemberg. |
<reponame>uk0/arrow<filename>cpp/src/arrow/python/filesystem.h
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "arrow/filesystem/filesystem.h"
#include "arrow/python/common.h"
#include "arrow/python/visibility.h"
#include "arrow/util/macros.h"
namespace arrow {
namespace py {
namespace fs {
class ARROW_PYTHON_EXPORT PyFileSystemVtable {
public:
std::function<void(PyObject*, std::string* out)> get_type_name;
std::function<bool(PyObject*, const arrow::fs::FileSystem& other)> equals;
std::function<void(PyObject*, const std::string& path, arrow::fs::FileInfo* out)>
get_file_info;
std::function<void(PyObject*, const std::vector<std::string>& paths,
std::vector<arrow::fs::FileInfo>* out)>
get_file_info_vector;
std::function<void(PyObject*, const arrow::fs::FileSelector&,
std::vector<arrow::fs::FileInfo>* out)>
get_file_info_selector;
std::function<void(PyObject*, const std::string& path, bool)> create_dir;
std::function<void(PyObject*, const std::string& path)> delete_dir;
std::function<void(PyObject*, const std::string& path, bool)> delete_dir_contents;
std::function<void(PyObject*)> delete_root_dir_contents;
std::function<void(PyObject*, const std::string& path)> delete_file;
std::function<void(PyObject*, const std::string& src, const std::string& dest)> move;
std::function<void(PyObject*, const std::string& src, const std::string& dest)>
copy_file;
std::function<void(PyObject*, const std::string& path,
std::shared_ptr<io::InputStream>* out)>
open_input_stream;
std::function<void(PyObject*, const std::string& path,
std::shared_ptr<io::RandomAccessFile>* out)>
open_input_file;
std::function<void(PyObject*, const std::string& path,
const std::shared_ptr<const KeyValueMetadata>&,
std::shared_ptr<io::OutputStream>* out)>
open_output_stream;
std::function<void(PyObject*, const std::string& path,
const std::shared_ptr<const KeyValueMetadata>&,
std::shared_ptr<io::OutputStream>* out)>
open_append_stream;
std::function<void(PyObject*, const std::string& path, std::string* out)>
normalize_path;
};
class ARROW_PYTHON_EXPORT PyFileSystem : public arrow::fs::FileSystem {
public:
PyFileSystem(PyObject* handler, PyFileSystemVtable vtable);
~PyFileSystem() override;
static std::shared_ptr<PyFileSystem> Make(PyObject* handler, PyFileSystemVtable vtable);
std::string type_name() const override;
bool Equals(const FileSystem& other) const override;
Result<arrow::fs::FileInfo> GetFileInfo(const std::string& path) override;
Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
const std::vector<std::string>& paths) override;
Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
const arrow::fs::FileSelector& select) override;
Status CreateDir(const std::string& path, bool recursive = true) override;
Status DeleteDir(const std::string& path) override;
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
Status DeleteRootDirContents() override;
Status DeleteFile(const std::string& path) override;
Status Move(const std::string& src, const std::string& dest) override;
Status CopyFile(const std::string& src, const std::string& dest) override;
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
const std::string& path) override;
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
const std::string& path) override;
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
const std::string& path,
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
const std::string& path,
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
Result<std::string> NormalizePath(std::string path) override;
PyObject* handler() const { return handler_.obj(); }
private:
OwnedRefNoGIL handler_;
PyFileSystemVtable vtable_;
};
} // namespace fs
} // namespace py
} // namespace arrow
|
<gh_stars>1-10
/*---------------------------------------------------------------------------------------------
* Copywight (c) <NAME>. Aww wights wesewved.
* Wicensed unda the MIT Wicense. See Wicense.txt in the pwoject woot fow wicense infowmation.
*--------------------------------------------------------------------------------------------*/
impowt * as fs fwom 'fs';
impowt { tmpdiw } fwom 'os';
impowt { pwomisify } fwom 'utiw';
impowt { WesouwceQueue } fwom 'vs/base/common/async';
impowt { isEquawOwPawent, isWootOwDwiveWetta } fwom 'vs/base/common/extpath';
impowt { nowmawizeNFC } fwom 'vs/base/common/nowmawization';
impowt { join } fwom 'vs/base/common/path';
impowt { isWinux, isMacintosh, isWindows } fwom 'vs/base/common/pwatfowm';
impowt { extUwiBiasedIgnowePathCase } fwom 'vs/base/common/wesouwces';
impowt { UWI } fwom 'vs/base/common/uwi';
impowt { genewateUuid } fwom 'vs/base/common/uuid';
//#wegion wimwaf
expowt enum WimWafMode {
/**
* Swow vewsion that unwinks each fiwe and fowda.
*/
UNWINK,
/**
* Fast vewsion that fiwst moves the fiwe/fowda
* into a temp diwectowy and then dewetes that
* without waiting fow it.
*/
MOVE
}
/**
* Awwows to dewete the pwovided path (eitha fiwe ow fowda) wecuwsivewy
* with the options:
* - `UNWINK`: diwect wemovaw fwom disk
* - `MOVE`: fasta vawiant that fiwst moves the tawget to temp diw and then
* dewetes it in the backgwound without waiting fow that to finish.
*/
async function wimwaf(path: stwing, mode = WimWafMode.UNWINK): Pwomise<void> {
if (isWootOwDwiveWetta(path)) {
thwow new Ewwow('wimwaf - wiww wefuse to wecuwsivewy dewete woot');
}
// dewete: via wmDiw
if (mode === WimWafMode.UNWINK) {
wetuwn wimwafUnwink(path);
}
// dewete: via move
wetuwn wimwafMove(path);
}
async function wimwafMove(path: stwing): Pwomise<void> {
twy {
const pathInTemp = join(tmpdiw(), genewateUuid());
twy {
await Pwomises.wename(path, pathInTemp);
} catch (ewwow) {
wetuwn wimwafUnwink(path); // if wename faiws, dewete without tmp diw
}
// Dewete but do not wetuwn as pwomise
wimwafUnwink(pathInTemp).catch(ewwow => {/* ignowe */ });
} catch (ewwow) {
if (ewwow.code !== 'ENOENT') {
thwow ewwow;
}
}
}
async function wimwafUnwink(path: stwing): Pwomise<void> {
wetuwn Pwomises.wmdiw(path, { wecuwsive: twue, maxWetwies: 3 });
}
expowt function wimwafSync(path: stwing): void {
if (isWootOwDwiveWetta(path)) {
thwow new Ewwow('wimwaf - wiww wefuse to wecuwsivewy dewete woot');
}
fs.wmdiwSync(path, { wecuwsive: twue });
}
//#endwegion
//#wegion weaddiw with NFC suppowt (macos)
expowt intewface IDiwent {
name: stwing;
isFiwe(): boowean;
isDiwectowy(): boowean;
isSymbowicWink(): boowean;
}
/**
* Dwop-in wepwacement of `fs.weaddiw` with suppowt
* fow convewting fwom macOS NFD unicon fowm to NFC
* (https://github.com/nodejs/node/issues/2165)
*/
async function weaddiw(path: stwing): Pwomise<stwing[]>;
async function weaddiw(path: stwing, options: { withFiweTypes: twue }): Pwomise<IDiwent[]>;
async function weaddiw(path: stwing, options?: { withFiweTypes: twue }): Pwomise<(stwing | IDiwent)[]> {
wetuwn handweDiwectowyChiwdwen(await (options ? safeWeaddiwWithFiweTypes(path) : pwomisify(fs.weaddiw)(path)));
}
async function safeWeaddiwWithFiweTypes(path: stwing): Pwomise<IDiwent[]> {
twy {
wetuwn await pwomisify(fs.weaddiw)(path, { withFiweTypes: twue });
} catch (ewwow) {
consowe.wawn('[node.js fs] weaddiw with fiwetypes faiwed with ewwow: ', ewwow);
}
// Fawwback to manuawwy weading and wesowving each
// chiwdwen of the fowda in case we hit an ewwow
// pweviouswy.
// This can onwy weawwy happen on exotic fiwe systems
// such as expwained in #115645 whewe we get entwies
// fwom `weaddiw` that we can wata not `wstat`.
const wesuwt: IDiwent[] = [];
const chiwdwen = await weaddiw(path);
fow (const chiwd of chiwdwen) {
wet isFiwe = fawse;
wet isDiwectowy = fawse;
wet isSymbowicWink = fawse;
twy {
const wstat = await Pwomises.wstat(join(path, chiwd));
isFiwe = wstat.isFiwe();
isDiwectowy = wstat.isDiwectowy();
isSymbowicWink = wstat.isSymbowicWink();
} catch (ewwow) {
consowe.wawn('[node.js fs] unexpected ewwow fwom wstat afta weaddiw: ', ewwow);
}
wesuwt.push({
name: chiwd,
isFiwe: () => isFiwe,
isDiwectowy: () => isDiwectowy,
isSymbowicWink: () => isSymbowicWink
});
}
wetuwn wesuwt;
}
/**
* Dwop-in wepwacement of `fs.weaddiwSync` with suppowt
* fow convewting fwom macOS NFD unicon fowm to NFC
* (https://github.com/nodejs/node/issues/2165)
*/
expowt function weaddiwSync(path: stwing): stwing[] {
wetuwn handweDiwectowyChiwdwen(fs.weaddiwSync(path));
}
function handweDiwectowyChiwdwen(chiwdwen: stwing[]): stwing[];
function handweDiwectowyChiwdwen(chiwdwen: IDiwent[]): IDiwent[];
function handweDiwectowyChiwdwen(chiwdwen: (stwing | IDiwent)[]): (stwing | IDiwent)[];
function handweDiwectowyChiwdwen(chiwdwen: (stwing | IDiwent)[]): (stwing | IDiwent)[] {
wetuwn chiwdwen.map(chiwd => {
// Mac: uses NFD unicode fowm on disk, but we want NFC
// See awso https://github.com/nodejs/node/issues/2165
if (typeof chiwd === 'stwing') {
wetuwn isMacintosh ? nowmawizeNFC(chiwd) : chiwd;
}
chiwd.name = isMacintosh ? nowmawizeNFC(chiwd.name) : chiwd.name;
wetuwn chiwd;
});
}
/**
* A convenience method to wead aww chiwdwen of a path that
* awe diwectowies.
*/
async function weadDiwsInDiw(diwPath: stwing): Pwomise<stwing[]> {
const chiwdwen = await weaddiw(diwPath);
const diwectowies: stwing[] = [];
fow (const chiwd of chiwdwen) {
if (await SymwinkSuppowt.existsDiwectowy(join(diwPath, chiwd))) {
diwectowies.push(chiwd);
}
}
wetuwn diwectowies;
}
//#endwegion
//#wegion whenDeweted()
/**
* A `Pwomise` that wesowves when the pwovided `path`
* is deweted fwom disk.
*/
expowt function whenDeweted(path: stwing, intewvawMs = 1000): Pwomise<void> {
wetuwn new Pwomise<void>(wesowve => {
wet wunning = fawse;
const intewvaw = setIntewvaw(() => {
if (!wunning) {
wunning = twue;
fs.access(path, eww => {
wunning = fawse;
if (eww) {
cweawIntewvaw(intewvaw);
wesowve(undefined);
}
});
}
}, intewvawMs);
});
}
//#endwegion
//#wegion Methods with symbowic winks suppowt
expowt namespace SymwinkSuppowt {
expowt intewface IStats {
// The stats of the fiwe. If the fiwe is a symbowic
// wink, the stats wiww be of that tawget fiwe and
// not the wink itsewf.
// If the fiwe is a symbowic wink pointing to a non
// existing fiwe, the stat wiww be of the wink and
// the `dangwing` fwag wiww indicate this.
stat: fs.Stats;
// Wiww be pwovided if the wesouwce is a symbowic wink
// on disk. Use the `dangwing` fwag to find out if it
// points to a wesouwce that does not exist on disk.
symbowicWink?: { dangwing: boowean };
}
/**
* Wesowves the `fs.Stats` of the pwovided path. If the path is a
* symbowic wink, the `fs.Stats` wiww be fwom the tawget it points
* to. If the tawget does not exist, `dangwing: twue` wiww be wetuwned
* as `symbowicWink` vawue.
*/
expowt async function stat(path: stwing): Pwomise<IStats> {
// Fiwst stat the wink
wet wstats: fs.Stats | undefined;
twy {
wstats = await Pwomises.wstat(path);
// Wetuwn eawwy if the stat is not a symbowic wink at aww
if (!wstats.isSymbowicWink()) {
wetuwn { stat: wstats };
}
} catch (ewwow) {
/* ignowe - use stat() instead */
}
// If the stat is a symbowic wink ow faiwed to stat, use fs.stat()
// which fow symbowic winks wiww stat the tawget they point to
twy {
const stats = await Pwomises.stat(path);
wetuwn { stat: stats, symbowicWink: wstats?.isSymbowicWink() ? { dangwing: fawse } : undefined };
} catch (ewwow) {
// If the wink points to a nonexistent fiwe we stiww want
// to wetuwn it as wesuwt whiwe setting dangwing: twue fwag
if (ewwow.code === 'ENOENT' && wstats) {
wetuwn { stat: wstats, symbowicWink: { dangwing: twue } };
}
// Windows: wowkawound a node.js bug whewe wepawse points
// awe not suppowted (https://github.com/nodejs/node/issues/36790)
if (isWindows && ewwow.code === 'EACCES') {
twy {
const stats = await Pwomises.stat(await Pwomises.weadwink(path));
wetuwn { stat: stats, symbowicWink: { dangwing: fawse } };
} catch (ewwow) {
// If the wink points to a nonexistent fiwe we stiww want
// to wetuwn it as wesuwt whiwe setting dangwing: twue fwag
if (ewwow.code === 'ENOENT' && wstats) {
wetuwn { stat: wstats, symbowicWink: { dangwing: twue } };
}
thwow ewwow;
}
}
thwow ewwow;
}
}
/**
* Figuwes out if the `path` exists and is a fiwe with suppowt
* fow symwinks.
*
* Note: this wiww wetuwn `fawse` fow a symwink that exists on
* disk but is dangwing (pointing to a nonexistent path).
*
* Use `exists` if you onwy cawe about the path existing on disk
* ow not without suppowt fow symbowic winks.
*/
expowt async function existsFiwe(path: stwing): Pwomise<boowean> {
twy {
const { stat, symbowicWink } = await SymwinkSuppowt.stat(path);
wetuwn stat.isFiwe() && symbowicWink?.dangwing !== twue;
} catch (ewwow) {
// Ignowe, path might not exist
}
wetuwn fawse;
}
/**
* Figuwes out if the `path` exists and is a diwectowy with suppowt fow
* symwinks.
*
* Note: this wiww wetuwn `fawse` fow a symwink that exists on
* disk but is dangwing (pointing to a nonexistent path).
*
* Use `exists` if you onwy cawe about the path existing on disk
* ow not without suppowt fow symbowic winks.
*/
expowt async function existsDiwectowy(path: stwing): Pwomise<boowean> {
twy {
const { stat, symbowicWink } = await SymwinkSuppowt.stat(path);
wetuwn stat.isDiwectowy() && symbowicWink?.dangwing !== twue;
} catch (ewwow) {
// Ignowe, path might not exist
}
wetuwn fawse;
}
}
//#endwegion
//#wegion Wwite Fiwe
// Accowding to node.js docs (https://nodejs.owg/docs/v6.5.0/api/fs.htmw#fs_fs_wwitefiwe_fiwe_data_options_cawwback)
// it is not safe to caww wwiteFiwe() on the same path muwtipwe times without waiting fow the cawwback to wetuwn.
// Thewefow we use a Queue on the path that is given to us to sequentiawize cawws to the same path pwopewwy.
const wwiteQueues = new WesouwceQueue();
/**
* Same as `fs.wwiteFiwe` but with an additionaw caww to
* `fs.fdatasync` afta wwiting to ensuwe changes awe
* fwushed to disk.
*
* In addition, muwtipwe wwites to the same path awe queued.
*/
function wwiteFiwe(path: stwing, data: stwing, options?: IWwiteFiweOptions): Pwomise<void>;
function wwiteFiwe(path: stwing, data: Buffa, options?: IWwiteFiweOptions): Pwomise<void>;
function wwiteFiwe(path: stwing, data: Uint8Awway, options?: IWwiteFiweOptions): Pwomise<void>;
function wwiteFiwe(path: stwing, data: stwing | Buffa | Uint8Awway, options?: IWwiteFiweOptions): Pwomise<void>;
function wwiteFiwe(path: stwing, data: stwing | Buffa | Uint8Awway, options?: IWwiteFiweOptions): Pwomise<void> {
wetuwn wwiteQueues.queueFow(UWI.fiwe(path), extUwiBiasedIgnowePathCase).queue(() => {
const ensuwedOptions = ensuweWwiteOptions(options);
wetuwn new Pwomise((wesowve, weject) => doWwiteFiweAndFwush(path, data, ensuwedOptions, ewwow => ewwow ? weject(ewwow) : wesowve()));
});
}
intewface IWwiteFiweOptions {
mode?: numba;
fwag?: stwing;
}
intewface IEnsuwedWwiteFiweOptions extends IWwiteFiweOptions {
mode: numba;
fwag: stwing;
}
wet canFwush = twue;
// Cawws fs.wwiteFiwe() fowwowed by a fs.sync() caww to fwush the changes to disk
// We do this in cases whewe we want to make suwe the data is weawwy on disk and
// not in some cache.
//
// See https://github.com/nodejs/node/bwob/v5.10.0/wib/fs.js#W1194
function doWwiteFiweAndFwush(path: stwing, data: stwing | Buffa | Uint8Awway, options: IEnsuwedWwiteFiweOptions, cawwback: (ewwow: Ewwow | nuww) => void): void {
if (!canFwush) {
wetuwn fs.wwiteFiwe(path, data, { mode: options.mode, fwag: options.fwag }, cawwback);
}
// Open the fiwe with same fwags and mode as fs.wwiteFiwe()
fs.open(path, options.fwag, options.mode, (openEwwow, fd) => {
if (openEwwow) {
wetuwn cawwback(openEwwow);
}
// It is vawid to pass a fd handwe to fs.wwiteFiwe() and this wiww keep the handwe open!
fs.wwiteFiwe(fd, data, wwiteEwwow => {
if (wwiteEwwow) {
wetuwn fs.cwose(fd, () => cawwback(wwiteEwwow)); // stiww need to cwose the handwe on ewwow!
}
// Fwush contents (not metadata) of the fiwe to disk
// https://github.com/micwosoft/vscode/issues/9589
fs.fdatasync(fd, (syncEwwow: Ewwow | nuww) => {
// In some exotic setups it is weww possibwe that node faiws to sync
// In that case we disabwe fwushing and wawn to the consowe
if (syncEwwow) {
consowe.wawn('[node.js fs] fdatasync is now disabwed fow this session because it faiwed: ', syncEwwow);
canFwush = fawse;
}
wetuwn fs.cwose(fd, cwoseEwwow => cawwback(cwoseEwwow));
});
});
});
}
/**
* Same as `fs.wwiteFiweSync` but with an additionaw caww to
* `fs.fdatasyncSync` afta wwiting to ensuwe changes awe
* fwushed to disk.
*/
expowt function wwiteFiweSync(path: stwing, data: stwing | Buffa, options?: IWwiteFiweOptions): void {
const ensuwedOptions = ensuweWwiteOptions(options);
if (!canFwush) {
wetuwn fs.wwiteFiweSync(path, data, { mode: ensuwedOptions.mode, fwag: ensuwedOptions.fwag });
}
// Open the fiwe with same fwags and mode as fs.wwiteFiwe()
const fd = fs.openSync(path, ensuwedOptions.fwag, ensuwedOptions.mode);
twy {
// It is vawid to pass a fd handwe to fs.wwiteFiwe() and this wiww keep the handwe open!
fs.wwiteFiweSync(fd, data);
// Fwush contents (not metadata) of the fiwe to disk
twy {
fs.fdatasyncSync(fd); // https://github.com/micwosoft/vscode/issues/9589
} catch (syncEwwow) {
consowe.wawn('[node.js fs] fdatasyncSync is now disabwed fow this session because it faiwed: ', syncEwwow);
canFwush = fawse;
}
} finawwy {
fs.cwoseSync(fd);
}
}
function ensuweWwiteOptions(options?: IWwiteFiweOptions): IEnsuwedWwiteFiweOptions {
if (!options) {
wetuwn { mode: 0o666 /* defauwt node.js mode fow fiwes */, fwag: 'w' };
}
wetuwn {
mode: typeof options.mode === 'numba' ? options.mode : 0o666 /* defauwt node.js mode fow fiwes */,
fwag: typeof options.fwag === 'stwing' ? options.fwag : 'w'
};
}
//#endwegion
//#wegion Move / Copy
/**
* A dwop-in wepwacement fow `fs.wename` that:
* - updates the `mtime` of the `souwce` afta the opewation
* - awwows to move acwoss muwtipwe disks
*/
async function move(souwce: stwing, tawget: stwing): Pwomise<void> {
if (souwce === tawget) {
wetuwn; // simuwate node.js behaviouw hewe and do a no-op if paths match
}
// We have been updating `mtime` fow move opewations fow fiwes since the
// beginning fow weasons that awe no wonga quite cweaw, but changing
// this couwd be wisky as weww. As such, twying to weason about it:
// It is vewy common as devewopa to have fiwe watchews enabwed that watch
// the cuwwent wowkspace fow changes. Updating the `mtime` might make it
// easia fow these watchews to wecognize an actuaw change. Since changing
// a souwce code fiwe awso updates the `mtime`, moving a fiwe shouwd do so
// as weww because conceptuawwy it is a change of a simiwaw categowy.
async function updateMtime(path: stwing): Pwomise<void> {
twy {
const stat = await Pwomises.wstat(path);
if (stat.isDiwectowy() || stat.isSymbowicWink()) {
wetuwn; // onwy fow fiwes
}
await Pwomises.utimes(path, stat.atime, new Date());
} catch (ewwow) {
// Ignowe any ewwow
}
}
twy {
await Pwomises.wename(souwce, tawget);
await updateMtime(tawget);
} catch (ewwow) {
// In two cases we fawwback to cwassic copy and dewete:
//
// 1.) The EXDEV ewwow indicates that souwce and tawget awe on diffewent devices
// In this case, fawwback to using a copy() opewation as thewe is no way to
// wename() between diffewent devices.
//
// 2.) The usa twies to wename a fiwe/fowda that ends with a dot. This is not
// weawwy possibwe to move then, at weast on UNC devices.
if (souwce.toWowewCase() !== tawget.toWowewCase() && ewwow.code === 'EXDEV' || souwce.endsWith('.')) {
await copy(souwce, tawget, { pwesewveSymwinks: fawse /* copying to anotha device */ });
await wimwaf(souwce, WimWafMode.MOVE);
await updateMtime(tawget);
} ewse {
thwow ewwow;
}
}
}
intewface ICopyPaywoad {
weadonwy woot: { souwce: stwing, tawget: stwing };
weadonwy options: { pwesewveSymwinks: boowean };
weadonwy handwedSouwcePaths: Set<stwing>;
}
/**
* Wecuwsivewy copies aww of `souwce` to `tawget`.
*
* The options `pwesewveSymwinks` configuwes how symbowic
* winks shouwd be handwed when encountewed. Set to
* `fawse` to not pwesewve them and `twue` othewwise.
*/
async function copy(souwce: stwing, tawget: stwing, options: { pwesewveSymwinks: boowean }): Pwomise<void> {
wetuwn doCopy(souwce, tawget, { woot: { souwce, tawget }, options, handwedSouwcePaths: new Set<stwing>() });
}
// When copying a fiwe ow fowda, we want to pwesewve the mode
// it had and as such pwovide it when cweating. Howeva, modes
// can go beyond what we expect (see wink bewow), so we mask it.
// (https://github.com/nodejs/node-v0.x-awchive/issues/3045#issuecomment-4862588)
const COPY_MODE_MASK = 0o777;
async function doCopy(souwce: stwing, tawget: stwing, paywoad: ICopyPaywoad): Pwomise<void> {
// Keep twack of paths awweady copied to pwevent
// cycwes fwom symbowic winks to cause issues
if (paywoad.handwedSouwcePaths.has(souwce)) {
wetuwn;
} ewse {
paywoad.handwedSouwcePaths.add(souwce);
}
const { stat, symbowicWink } = await SymwinkSuppowt.stat(souwce);
// Symwink
if (symbowicWink) {
// Twy to we-cweate the symwink unwess `pwesewveSymwinks: fawse`
if (paywoad.options.pwesewveSymwinks) {
twy {
wetuwn await doCopySymwink(souwce, tawget, paywoad);
} catch (ewwow) {
// in any case of an ewwow fawwback to nowmaw copy via dewefewencing
consowe.wawn('[node.js fs] copy of symwink faiwed: ', ewwow);
}
}
if (symbowicWink.dangwing) {
wetuwn; // skip dangwing symbowic winks fwom hewe on (https://github.com/micwosoft/vscode/issues/111621)
}
}
// Fowda
if (stat.isDiwectowy()) {
wetuwn doCopyDiwectowy(souwce, tawget, stat.mode & COPY_MODE_MASK, paywoad);
}
// Fiwe ow fiwe-wike
ewse {
wetuwn doCopyFiwe(souwce, tawget, stat.mode & COPY_MODE_MASK);
}
}
async function doCopyDiwectowy(souwce: stwing, tawget: stwing, mode: numba, paywoad: ICopyPaywoad): Pwomise<void> {
// Cweate fowda
await Pwomises.mkdiw(tawget, { wecuwsive: twue, mode });
// Copy each fiwe wecuwsivewy
const fiwes = await weaddiw(souwce);
fow (const fiwe of fiwes) {
await doCopy(join(souwce, fiwe), join(tawget, fiwe), paywoad);
}
}
async function doCopyFiwe(souwce: stwing, tawget: stwing, mode: numba): Pwomise<void> {
// Copy fiwe
await Pwomises.copyFiwe(souwce, tawget);
// westowe mode (https://github.com/nodejs/node/issues/1104)
await Pwomises.chmod(tawget, mode);
}
async function doCopySymwink(souwce: stwing, tawget: stwing, paywoad: ICopyPaywoad): Pwomise<void> {
// Figuwe out wink tawget
wet winkTawget = await Pwomises.weadwink(souwce);
// Speciaw case: the symwink points to a tawget that is
// actuawwy within the path that is being copied. In that
// case we want the symwink to point to the tawget and
// not the souwce
if (isEquawOwPawent(winkTawget, paywoad.woot.souwce, !isWinux)) {
winkTawget = join(paywoad.woot.tawget, winkTawget.substw(paywoad.woot.souwce.wength + 1));
}
// Cweate symwink
await Pwomises.symwink(winkTawget, tawget);
}
//#endwegion
//#wegion Pwomise based fs methods
/**
* Pwefa this hewpa cwass ova the `fs.pwomises` API to
* enabwe `gwacefuw-fs` to function pwopewwy. Given issue
* https://github.com/isaacs/node-gwacefuw-fs/issues/160 it
* is evident that the moduwe onwy takes cawe of the non-pwomise
* based fs methods.
*
* Anotha weason is `weawpath` being entiwewy diffewent in
* the pwomise based impwementation compawed to the otha
* one (https://github.com/micwosoft/vscode/issues/118562)
*
* Note: using gettews fow a weason, since `gwacefuw-fs`
* patching might kick in wata afta moduwes have been
* woaded we need to defa access to fs methods.
* (https://github.com/micwosoft/vscode/issues/124176)
*/
expowt const Pwomises = new cwass {
//#wegion Impwemented by node.js
get access() { wetuwn pwomisify(fs.access); }
get stat() { wetuwn pwomisify(fs.stat); }
get wstat() { wetuwn pwomisify(fs.wstat); }
get utimes() { wetuwn pwomisify(fs.utimes); }
get wead() { wetuwn pwomisify(fs.wead); }
get weadFiwe() { wetuwn pwomisify(fs.weadFiwe); }
get wwite() { wetuwn pwomisify(fs.wwite); }
get appendFiwe() { wetuwn pwomisify(fs.appendFiwe); }
get fdatasync() { wetuwn pwomisify(fs.fdatasync); }
get twuncate() { wetuwn pwomisify(fs.twuncate); }
get wename() { wetuwn pwomisify(fs.wename); }
get copyFiwe() { wetuwn pwomisify(fs.copyFiwe); }
get open() { wetuwn pwomisify(fs.open); }
get cwose() { wetuwn pwomisify(fs.cwose); }
get symwink() { wetuwn pwomisify(fs.symwink); }
get weadwink() { wetuwn pwomisify(fs.weadwink); }
get chmod() { wetuwn pwomisify(fs.chmod); }
get mkdiw() { wetuwn pwomisify(fs.mkdiw); }
get unwink() { wetuwn pwomisify(fs.unwink); }
get wmdiw() { wetuwn pwomisify(fs.wmdiw); }
get weawpath() { wetuwn pwomisify(fs.weawpath); }
//#endwegion
//#wegion Impwemented by us
async exists(path: stwing): Pwomise<boowean> {
twy {
await Pwomises.access(path);
wetuwn twue;
} catch {
wetuwn fawse;
}
}
get weaddiw() { wetuwn weaddiw; }
get weadDiwsInDiw() { wetuwn weadDiwsInDiw; }
get wwiteFiwe() { wetuwn wwiteFiwe; }
get wm() { wetuwn wimwaf; }
get move() { wetuwn move; }
get copy() { wetuwn copy; }
//#endwegion
};
//#endwegion
|
def fetchWorkflow(self, offline: bool = False):
parsedRepoURL = parse.urlparse(self.id)
i_workflow : Optional[IdentifiedWorkflow] = None
engineDesc : Optional[WorkflowType] = None
guessedRepo : Optional[RemoteRepo] = None
if parsedRepoURL.scheme == '':
if (self.trs_endpoint is not None) and len(self.trs_endpoint) > 0:
i_workflow = self.getWorkflowRepoFromTRS(offline=offline)
else:
raise WFException('trs_endpoint was not provided')
else:
engineDesc = None
guessedRepo = guess_repo_params(parsedRepoURL, logger=self.logger, fail_ok=True)
if guessedRepo is not None:
if guessedRepo.tag is None:
guessedRepo = RemoteRepo(
repo_url=guessedRepo.repo_url,
tag=cast(RepoTag, self.version_id),
rel_path=guessedRepo.rel_path
)
else:
i_workflow = self.getWorkflowRepoFromROCrateURL(cast(URIType, self.id), offline=offline)
if i_workflow is not None:
guessedRepo = i_workflow.remote_repo
engineDesc = i_workflow.workflow_type
if guessedRepo is None:
guessedRepo = RemoteRepo(
repo_url=cast(RepoURL, self.id),
tag=cast(RepoTag, self.version_id)
)
repoURL = guessedRepo.repo_url
repoTag = guessedRepo.tag
repoRelPath = guessedRepo.rel_path
repoDir : Optional[AbsPath] = None
repoEffectiveCheckout : Optional[RepoTag] = None
if ':' in repoURL:
parsedRepoURL = parse.urlparse(repoURL)
if len(parsedRepoURL.scheme) > 0:
self.repoURL = repoURL
self.repoTag = repoTag
if repoRelPath == '':
repoRelPath = None
self.repoRelPath = repoRelPath
repoDir, repoEffectiveCheckout = self.wfexs.doMaterializeRepo(repoURL, repoTag)
if repoDir is None:
repoDir = cast(AbsPath, repoURL)
assert self.workflowDir is not None, "The workflow directory should be defined"
if os.path.isdir(self.workflowDir):
shutil.rmtree(self.workflowDir)
link_or_copy(repoDir, self.workflowDir)
localWorkflow = LocalWorkflow(dir=self.workflowDir, relPath=repoRelPath, effectiveCheckout=repoEffectiveCheckout)
self.logger.info("materialized workflow repository (checkout {}): {}".format(repoEffectiveCheckout, self.workflowDir))
if repoRelPath is not None:
if not os.path.exists(os.path.join(self.workflowDir, repoRelPath)):
raise WFException(
"Relative path {} cannot be found in materialized workflow repository {}".format(repoRelPath,
self.workflowDir))
if engineDesc is None:
for engineDesc in self.WORKFLOW_ENGINES:
self.logger.debug("Testing engine " + engineDesc.trs_descriptor)
engine = self.wfexs.instantiateEngine(engineDesc, self.stagedSetup)
try:
engineVer, candidateLocalWorkflow = engine.identifyWorkflow(localWorkflow)
self.logger.debug("Tested engine {} {}".format(engineDesc.trs_descriptor, engineVer))
if engineVer is not None:
break
except WorkflowEngineException:
pass
else:
raise WFException('No engine recognized a workflow at {}'.format(repoURL))
else:
self.logger.debug("Fixed engine " + engineDesc.trs_descriptor)
engine = self.wfexs.instantiateEngine(engineDesc, self.stagedSetup)
engineVer, candidateLocalWorkflow = engine.identifyWorkflow(localWorkflow)
if engineVer is None:
raise WFException(
'Engine {} did not recognize a workflow at {}'.format(engine.workflowType.engineName, repoURL))
self.repoDir = repoDir
self.repoEffectiveCheckout = repoEffectiveCheckout
self.engineDesc = engineDesc
self.engine = engine
self.engineVer = engineVer
self.localWorkflow = candidateLocalWorkflow |
def _operation_speak_as_digits(self, content, index, children):
data_property_value = 'digits'
if index != 0:
children.append(self._create_content_element(
content[0:index],
data_property_value
))
children.append(self._create_aural_content_element(
' ',
data_property_value
))
children.append(self._create_content_element(
content[index:(index + 1)],
data_property_value
))
return children |
/**
* Render method that draws the mesh then restores the
* state when finished.
*/
public void render() {
initRenderer();
glDrawElements(GL_TRIANGLES, getVertexCount(), GL_UNSIGNED_INT, 0);
endRenderer();
} |
<gh_stars>0
import { AnyAction } from "typescript-fsa";
import { Payment, setPaymentDetails, setShippingDetails, Shipping } from "../actions";
export interface CheckoutState {
shipping: Shipping;
payment: Payment;
}
const initState: CheckoutState = {
shipping: {
fullname: "",
email: "",
address: "",
mobile: undefined,
},
payment: {
type: "paypal",
name: "",
cardnumber: "",
expiry: "",
ccv: "",
},
};
export function checkoutReducer(state: CheckoutState | undefined, action: AnyAction): CheckoutState {
if (state === undefined) {
return initState;
}
if (setShippingDetails.match(action)) {
return {
...state,
shipping: action.payload,
};
}
if (setPaymentDetails.match(action)) {
return {
...state,
payment: action.payload,
};
}
return state;
}
|
/*
* Cloud9: A MapReduce Library for Hadoop Licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
* or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.io.WritableComparable;
/**
* Writable extension of the {@code ArrayListOfDoubles} class. This class
* provides an efficient data structure to store a list of doubles for MapReduce
* jobs.
*
* @author <NAME>
*/
public class ArrayListOfDoublesWritable extends ArrayListOfDoubles implements
WritableComparable<ArrayListOfDoublesWritable>
{
/**
* Constructs an {@code ArrayListOfDoublesWritable} object.
*/
public ArrayListOfDoublesWritable()
{
super();
}
/**
* Constructs an empty list with the specified initial capacity.
*
* @param initialCapacity
* the initial capacity of the list
*/
public ArrayListOfDoublesWritable(int initialCapacity)
{
super(initialCapacity);
}
/**
* Constructs a deep copy of the {@code ArrayListOfDoublesWritable} object
* given as parameter.
*
* @param other
* object to be copied
*/
public ArrayListOfDoublesWritable(ArrayListOfDoublesWritable other)
{
super();
size = other.size();
array = Arrays.copyOf(other.getArray(), size);
}
/**
* Constructs a list from an array. Defensively makes a copy of the array.
*
* @param arr
* source array
*/
public ArrayListOfDoublesWritable(double[] arr)
{
super(arr);
}
/**
* Deserializes this object.
*
* @param in
* source for raw byte representation
*/
public void readFields(DataInput in) throws IOException
{
this.clear();
int size = in.readInt();
for (int i = 0; i < size; i++)
{
add(i, in.readDouble());
}
}
/**
* Serializes this object.
*
* @param out
* where to write the raw byte representation
*/
public void write(DataOutput out) throws IOException
{
int size = size();
out.writeInt(size);
for (int i = 0; i < size; i++)
{
out.writeDouble(get(i));
}
}
@Override
public String toString()
{
return toString(size());
}
/**
* Creates an instance of this object from {@code ArrayListOfDoubles}. Note
* that backing array is cloned.
*
* @param arr
* source {@code ArrayListOfDoubles}
*/
public static ArrayListOfDoublesWritable fromArrayListOfDoubles(
ArrayListOfDoubles arr)
{
ArrayListOfDoublesWritable list = new ArrayListOfDoublesWritable();
list.array = Arrays.copyOf(arr.getArray(), arr.size());
list.size = arr.size();
return list;
}
/**
* Elementwise comparison. Shorter always comes before if it is a sublist of
* longer. No preference if both are empty.
*
* @param obj
* other object this is compared against
*/
@Override
public int compareTo(ArrayListOfDoublesWritable obj)
{
ArrayListOfDoublesWritable other = (ArrayListOfDoublesWritable) obj;
if (isEmpty())
{
if (other.isEmpty())
{
return 0;
}
else
{
return -1;
}
}
for (int i = 0; i < size(); i++)
{
if (other.size() <= i)
{
return 1;
}
if (get(i) < other.get(i))
{
return -1;
}
else if (get(i) > other.get(i))
{
return 1;
}
}
if (other.size() > size())
{
return -1;
}
else
{
return 0;
}
}
}
|
<gh_stars>100-1000
package org.opencb.opencga.core.models.study.configuration;
import java.util.List;
public class ClinicalConsentAnnotation {
private List<ClinicalConsentParam> consents;
private String date;
public ClinicalConsentAnnotation() {
}
public ClinicalConsentAnnotation(List<ClinicalConsentParam> consents, String date) {
this.consents = consents;
this.date = date;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("ClinicalConsentAnnotation{");
sb.append("consents=").append(consents);
sb.append(", date='").append(date).append('\'');
sb.append('}');
return sb.toString();
}
public List<ClinicalConsentParam> getConsents() {
return consents;
}
public ClinicalConsentAnnotation setConsents(List<ClinicalConsentParam> consents) {
this.consents = consents;
return this;
}
public String getDate() {
return date;
}
public ClinicalConsentAnnotation setDate(String date) {
this.date = date;
return this;
}
}
|
Android 4.0 (aka Ice Cream Sandwich) has been available for a little while now, but many carriers and manufacturers are still working hard to roll out official updates. Today, Samsung began rolling out an Android 4.0 update to its unlocked Galaxy S II devices in America. While users of carrier-branded Galaxy S II devices will continue to wait for an update, there are plenty of carriers and manufacturers that are working on updates of their own.
According to details shared with Droid Life, Verizon Wireless is getting ready to release an Android 4.0 update for the Motorola Xoom (3G/4G) tablet as well as the HTC Rezound smartphone. Both of these devices are expected to get Ice Cream Sandwich in the next two to three weeks.
The Motorola Xoom tablet is expected to receive the update first. Rumored dates currently suggest the update will begin rolling out on April 23. This update will be for the 3G/4G version of the Xoom. The Wi-Fi only version of the Xoom tablet began receiving updates to Ice Cream Sandwich in January.
Verizon Wireless is expected to begin rolling out an Ice Cream Sandwich update for the HTC Rezound on May 9. Various sites claimed the Droid Razr and Droid Razr Maxx were scheduled to receive an update to Ice Cream Sandwich on April 4, but that didn't pan out. It appears Motorola and Verizon are having some issues with the Android 4.0 update. The companies have not said when the official Ice Cream Sandwich update for Droid Razr and Droid Razr Maxx will be made available. |
A, B = int(input()), int(input())
print([n for n in [1, 2, 3] if n not in [A, B]][0]) |
// WriteTo writes bytes in b as pretty hex output to writer wr.
func WriteTo(wr io.Writer, b []byte) (n int, err error) {
w := bufio.NewWriter(wr)
if IncludeHeader {
nn, err := w.Write([]byte(head))
n += nn
if err != nil {
return n, err
}
}
var line []byte
for i := 0; i < len(b); i += 16 {
p := b[i:]
line = append(line[:0], "0000000000"...)
line = strconv.AppendInt(line, int64(i), 16)
mark := len(line)
line = append(line, ' ', ' ')
for i := 0; i < 16; i++ {
if i < len(p) {
line = append(line, hch[p[i]>>4], hch[p[i]&15], ' ')
} else {
line = append(line, ' ', ' ', ' ')
}
}
line = append(line, ' ')
for i := 0; i < 16; i++ {
if i < len(p) {
if p[i] < ' ' {
line = append(line, '.')
} else if p[i] > 127 {
line = append(line, 0, 0, 0, 0, 0, 0)
n := utf8.EncodeRune(line[len(line)-6:], rune(p[i]))
line = line[:len(line)-6+n]
} else {
line = append(line, p[i])
}
} else {
line = append(line, ' ')
}
}
line = append(line, '\n')
nn, err := w.Write(line[mark-10:])
n += nn
if err != nil {
return n, err
}
}
return n, w.Flush()
} |
<reponame>kduretec/TestDataGenerator<gh_stars>0
package benchmarkdp.datagenerator.generator;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.eclipse.emf.common.util.BasicEList;
import org.eclipse.emf.common.util.EList;
import org.eclipse.emf.common.util.URI;
import org.eclipse.emf.ecore.EObject;
import org.eclipse.emf.ecore.resource.Resource;
import org.eclipse.emf.ecore.resource.ResourceSet;
import org.eclipse.emf.ecore.resource.impl.ResourceSetImpl;
import org.eclipse.emf.ecore.xmi.XMIResource;
import org.eclipse.m2m.qvt.oml.BasicModelExtent;
import org.eclipse.m2m.qvt.oml.ModelExtent;
import benchmarkdp.datagenerator.core.ModelType;
public class DocumentHolder {
private String documentName;
private URI documentURI;
private Resource documentResource;
private Map<ModelType,EList<EObject>> documentObjects;
private Map<ModelType,ModelExtent> documentModels;
private Map<String, String> groundTruth;
private Map<ModelType , String> generatedCode;
public DocumentHolder(String name, String initDocument) {
documentName = name;
documentURI = URI.createURI(initDocument);
documentObjects = new HashMap<ModelType, EList<EObject>>();
documentModels = new HashMap<ModelType, ModelExtent>();
generatedCode = new HashMap<ModelType, String>();
ResourceSet resourceSet = new ResourceSetImpl();
documentResource = resourceSet.getResource(documentURI, true);
EList<EObject> tmpObjects = documentResource.getContents();
documentObjects.put(ModelType.PIM, tmpObjects);
documentModels.put(ModelType.PIM, new BasicModelExtent(tmpObjects));
groundTruth = new HashMap<String, String>();
generatedCode = new HashMap<ModelType, String>();
}
public String getName() {
return documentName;
}
public ModelExtent getModelExtent(ModelType model) {
return documentModels.get(model);
}
public EList<EObject> getDocumentObjects(ModelType model) {
return documentObjects.get(model);
}
public void setModelExtent(ModelType model, ModelExtent me) {
List<EObject> meCont = me.getContents();
documentModels.put(model, me);
documentObjects.put(model, new BasicEList<EObject>(meCont));
}
public void addGroundTruth(String key, String value) {
groundTruth.put(key, value);
}
public void setGeneratedCode(ModelType model, String code) {
generatedCode.put(model, code);
}
public void saveToFile(ModelType model, String path) {
Map<String, Object> opts = new HashMap<String, Object>();
opts.put(XMIResource.OPTION_SCHEMA_LOCATION, true);
ResourceSet resourceSetOut = new ResourceSetImpl();
Resource outResource = resourceSetOut.createResource(URI.createURI(path + documentName + "_" + model + ".xmi"));
outResource.getContents().addAll(documentObjects.get(model));
try {
outResource.save(opts);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void exportGroundTruth(String path) {
try {
File f = new File(path + documentName + "-groundtruth.txt");
BufferedWriter bw = new BufferedWriter(new FileWriter(f));
for (Map.Entry<String, String> e : groundTruth.entrySet()) {
if (e.getKey().compareTo("words") == 0) {
exportWords(path, e.getValue());
} else if (e.getKey().compareTo("words-textbox") == 0) {
exportTextBoxWords(path, e.getValue());
} else {
bw.write(e.getKey() + " = " + e.getValue() + "\n");
}
}
bw.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
public void exportWords(String path, String value) {
try {
File f = new File(path + documentName + "-groundtruthWords.txt");
BufferedWriter bw = new BufferedWriter(new FileWriter(f));
value = value.replace(",", "");
value = value.replace("[", "");
value = value.replace("]", "");
bw.write(value + "\n");
bw.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
public void exportTextBoxWords(String path, String value) {
try {
File f = new File(path + documentName + "-groundtruthTextBoxWords.txt");
BufferedWriter bw = new BufferedWriter(new FileWriter(f));
value = value.replace(",", "");
value = value.replace("[", "");
value = value.replace("]", "");
bw.write(value + "\n");
bw.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
public void saveGeneratedCode(ModelType model, String path) {
try {
File f = new File(path + documentName + "_" + model + ".vbs");
BufferedWriter bw = new BufferedWriter(new FileWriter(f));
bw.write(generatedCode.get(model) + "\n");
bw.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
}
|
import { useCallback, useLayoutEffect, useRef } from 'react'
import { NoValue } from '..'
import { Facet, NO_VALUE, Option } from '../types'
export function useFacetCallback<M, V, K extends unknown[]>(
callback: (v: V) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, K extends unknown[]>(
callback: (v: V, v1: V1) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3, v4: V4) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>, Facet<V4>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, V5, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3, v4: V4, v5: V5) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>, Facet<V4>, Facet<V5>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, V5, V6, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3, v4: V4, v5: V5, v6: V6) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>, Facet<V4>, Facet<V5>, Facet<V6>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, V5, V6, V7, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3, v4: V4, v5: V5, v6: V6, v7: V7) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>, Facet<V4>, Facet<V5>, Facet<V6>, Facet<V7>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, V5, V6, V7, V8, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3, v4: V4, v5: V5, v6: V6, v7: V7, v8: V8) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>, Facet<V4>, Facet<V5>, Facet<V6>, Facet<V7>, Facet<V8>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, V5, V6, V7, V8, V9, K extends unknown[]>(
callback: (v: V, v1: V1, v2: V2, v3: V3, v4: V4, v5: V5, v6: V6, v7: V7, v8: V8, v9: V9) => (...args: K) => M,
dependencies: unknown[],
facet: [Facet<V>, Facet<V1>, Facet<V2>, Facet<V3>, Facet<V4>, Facet<V5>, Facet<V6>, Facet<V7>, Facet<V8>, Facet<V9>],
): (...args: K) => M | NoValue
export function useFacetCallback<M, V, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, K extends unknown[]>(
callback: (
v: V,
v1: V1,
v2: V2,
v3: V3,
v4: V4,
v5: V5,
v6: V6,
v7: V7,
v8: V8,
v9: V9,
v10: V10,
) => (...args: K) => M,
dependencies: unknown[],
facet: [
Facet<V>,
Facet<V1>,
Facet<V2>,
Facet<V3>,
Facet<V4>,
Facet<V5>,
Facet<V6>,
Facet<V7>,
Facet<V8>,
Facet<V9>,
Facet<V10>,
],
): (...args: K) => M | NoValue
/**
* Creates a callback that depends on the value of a facet.
* Very similar to `useCallback` from `React`
*
* @param callback function callback that receives the current facet values and the arguments passed to the callback
* @param dependencies variable used by the callback that are available in scope (similar as dependencies of useCallback)
* @param facets facets that the callback listens to
*
* We pass the dependencies of the callback as the second argument so we can leverage the eslint-plugin-react-hooks option for additionalHooks.
* Having this as the second argument allows the linter to work.
*/
export function useFacetCallback<M>(
callback: (...args: unknown[]) => (...args: unknown[]) => M,
dependencies: unknown[],
facets: Facet<unknown>[],
): (...args: unknown[]) => M | NoValue {
const facetsRef = useRef<Option<unknown>[]>(facets.map(() => NO_VALUE))
useLayoutEffect(() => {
const unsubscribes = facets.map((facet, index) => {
return facet.observe((value) => {
facetsRef.current[index] = value
})
})
return () => {
unsubscribes.forEach((unsubscribe) => unsubscribe())
}
// We care about each individual facet and if any is a different reference
// eslint-disable-next-line react-hooks/exhaustive-deps
}, facets)
// We care about each individual dependency and if any is a different reference
// eslint-disable-next-line react-hooks/exhaustive-deps
const callbackMemoized = useCallback(callback, dependencies)
// eslint-disable-next-line react-hooks/exhaustive-deps
return useCallback(
(...args: unknown[]) => {
const values = facetsRef.current
for (const value of values) {
if (value === NO_VALUE) return NO_VALUE
}
return callbackMemoized(...values)(...args)
},
[callbackMemoized, facetsRef],
)
}
|
/**
* Tests the basic use cases for PR persistence.
*/
@RunWith(GeodeParamsRunner.class)
@SuppressWarnings("serial,unused")
public class PersistentPartitionedRegionWithRedundancyDUnitTest implements Serializable {
private static final int NUM_BUCKETS = 113;
private String partitionedRegionName;
private String parentRegion1Name;
private String parentRegion2Name;
private VM vm0;
private VM vm1;
private VM vm2;
private VM vm3;
@Rule
public DistributedRule distributedRule = new DistributedRule();
@Rule
public CacheRule cacheRule =
CacheRule.builder().addConfig(getDistributedSystemProperties()).build();
@Rule
public SerializableTestName testName = new SerializableTestName();
@Rule
public DistributedDiskDirRule diskDirRule = new DistributedDiskDirRule();
@Before
public void setUp() {
vm0 = getVM(0);
vm1 = getVM(1);
vm2 = getVM(2);
vm3 = getVM(3);
String uniqueName = getClass().getSimpleName() + "-" + testName.getMethodName();
partitionedRegionName = uniqueName + "-partitionedRegion";
parentRegion1Name = "parent1";
parentRegion2Name = "parent2";
}
@After
public void tearDown() {
invokeInEveryVM(() -> {
InternalResourceManager.setResourceObserver(null);
DistributionMessageObserver.setInstance(null);
});
}
private Properties getDistributedSystemProperties() {
Properties config = new Properties();
config.setProperty(SERIALIZABLE_OBJECT_FILTER, TestFunction.class.getName());
return config;
}
/**
* A simple test case that we are actually persisting with a PR.
*/
@Test
public void recoversFromDisk() throws Exception {
createPartitionedRegion(0, -1, 113, true);
createData(0, 1);
Set<Integer> vm0Buckets = getBucketList();
getCache().close();
createPartitionedRegion(0, -1, 113, true);
assertThat(getBucketList()).isEqualTo(vm0Buckets);
checkData(0, 1);
}
/**
* Test to make sure that we can recover from a complete system shutdown
*/
@Test
public void testGetDataDelayDueToRecoveryAfterServerShutdown() throws Exception {
int numEntries = 10000;
vm0.invoke(() -> createPartitionedRegion(1, -1, 113, true));
vm1.invoke(() -> createPartitionedRegion(1, -1, 113, true));
vm2.invoke(() -> createPartitionedRegion(1, -1, 113, true));
vm3.invoke(() -> createPartitionedRegion(1, -1, 113, true));
vm0.invoke(() -> createData(0, numEntries));
Set<Integer> bucketsOnVM0 = vm0.invoke(this::getBucketList);
Set<Integer> bucketsOnVM1 = vm1.invoke(this::getBucketList);
Set<Integer> bucketsOnVM2 = vm2.invoke(this::getBucketList);
Set<Integer> bucketsOnVM3 = vm3.invoke(this::getBucketList);
vm1.invoke(() -> getCache().close());
AsyncInvocation<Void> createPartitionedRegionOnVM1 =
vm1.invokeAsync(() -> createPartitionedRegion(1, -1, 113, true));
vm0.invoke(() -> {
long timeElapsed;
int key;
Region<?, ?> region = getCache().getRegion(partitionedRegionName);
for (int i = 0; i < numEntries; i++) {
key = getRandomNumberInRange(0, numEntries - 1);
assertThat(region.get(key)).isEqualTo(key);
}
});
createPartitionedRegionOnVM1.await(2, MINUTES);
assertThat(vm1.invoke(this::getBucketList)).isEqualTo(bucketsOnVM1);
assertThat(vm0.invoke(() -> getRegionStats().getGetRetries())).isEqualTo(0);
}
private void createPartitionedRegion(final int redundancy, final int recoveryDelay,
final int numBuckets, final boolean synchronous) throws InterruptedException {
CountDownLatch recoveryDone = new CountDownLatch(1);
if (redundancy > 0) {
ResourceObserver observer = new ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
} else {
recoveryDone.countDown();
}
PartitionAttributesFactory<?, ?> partitionAttributesFactory = new PartitionAttributesFactory();
partitionAttributesFactory.setRedundantCopies(redundancy);
partitionAttributesFactory.setRecoveryDelay(recoveryDelay);
partitionAttributesFactory.setTotalNumBuckets(numBuckets);
partitionAttributesFactory.setLocalMaxMemory(500);
RegionFactory<?, ?> regionFactory =
getCache().createRegionFactory(PARTITION_PERSISTENT);
regionFactory.setDiskSynchronous(synchronous);
regionFactory.setPartitionAttributes(partitionAttributesFactory.create());
regionFactory.create(partitionedRegionName);
recoveryDone.await(2, MINUTES);
}
private void createData(final int startKey, final int endKey) {
createDataFor(startKey, endKey, partitionedRegionName);
}
private void createDataFor(final int startKey, final int endKey,
final String regionName) {
Region<Integer, Integer> region = getCache().getRegion(regionName);
for (int i = startKey; i < endKey; i++) {
region.put(i, i);
}
}
private Set<Integer> getBucketList() {
return getBucketListFor(partitionedRegionName);
}
private Set<Integer> getBucketListFor(final String regionName) {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion(regionName);
return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
}
private void checkData(final int startKey, final int endKey) {
checkDataFor(startKey, endKey, partitionedRegionName);
}
private void checkDataFor(final int startKey, final int endKey,
final String regionName) {
Region<?, ?> region = getCache().getRegion(regionName);
for (int i = startKey; i < endKey; i++) {
assertThat(region.get(i)).isEqualTo(i);
}
}
private InternalCache getCache() {
return cacheRule.getOrCreateCache();
}
private PartitionedRegionStats getRegionStats() {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion(partitionedRegionName);
return region.getPrStats();
}
int getRandomNumberInRange(int min, int max) {
Random r = new Random();
return r.nextInt((max - min) + 1) + min;
}
private static class RecoveryObserver extends ResourceObserverAdapter {
private final String partitionedRegionName;
private final CountDownLatch recoveryDone = new CountDownLatch(1);
RecoveryObserver(final String partitionedRegionName) {
this.partitionedRegionName = partitionedRegionName;
}
@Override
public void rebalancingOrRecoveryFinished(final Region region) {
if (region.getName().equals(partitionedRegionName)) {
recoveryDone.countDown();
}
}
void await(final long timeout, final TimeUnit unit) throws InterruptedException {
recoveryDone.await(timeout, unit);
}
}
private static class TestFunction implements Function, Serializable {
@Override
public void execute(final FunctionContext context) {
context.getResultSender().lastResult(null);
}
@Override
public String getId() {
return TestFunction.class.getSimpleName();
}
@Override
public boolean hasResult() {
return true;
}
@Override
public boolean optimizeForWrite() {
return false;
}
@Override
public boolean isHA() {
return false;
}
}
} |
/**
* Go through and parse all materials for a part. Add materials to the material list
* @param materials The array of materials from the server
* @param part_id The server's ID for the part being parsed
*/
private void parseMaterials(JSONArray materials, int part_id){
JSONObject material;
int id;
String en, fr, es;
for (int i = 0; i < materials.length(); i++){
try{
material = materials.getJSONObject(i);
id = material.getInt(ID);
en = material.getString(MATERIAL_EN);
fr = material.getString(MATERIAL_FR);
es = material.getString(MATERIAL_ES);
mMaterials.add(new Material(id, part_id, en, fr, es));
}
catch(JSONException e){
e.printStackTrace();
}
}
} |
<filename>src/app/core/footer/footer.component.ts
import { Component, ViewEncapsulation } from '@angular/core';
import { NavigationService } from '../services';
@Component({
selector: 'footer',
templateUrl: './footer.component.html',
styleUrls: [ './footer.component.scss' ],
encapsulation: ViewEncapsulation.None
})
export class FooterComponent {
constructor(
private navService: NavigationService
) {
//
}
getVersion(data?: boolean): string {
return ((data) ? DATA_VERSION : VERSION) || '0.0.0';
}
getNavService() {
return this.navService;
}
}
|
package ru.nik66;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertThat;
import static org.hamcrest.core.Is.is;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
public class SortUserTest {
private User user1;
private User user2;
@Before
public void init() {
this.user1 = new User("Kolya", 22);
this.user2 = new User("Kat", 11);
}
@Test
public void whenSortUsersByAgeThenReturnSortedSet() {
List<User> users = Arrays.asList(user1, user2);
SortUser sort = new SortUser();
Set<User> actual = sort.sort(users);
Set<User> expected = new TreeSet<>();
expected.add(this.user2);
expected.add(this.user1);
assertThat(actual, is(expected));
}
@Test
public void whenSortUsersByAgeThenMinus() {
assertThat(user1.compareTo(user2), is(11));
}
@Test
public void whenSortByNameLength() {
List<User> users = Arrays.asList(this.user1, this.user2);
SortUser sort = new SortUser();
List<User> actual = sort.sortNameLength(users);
List<User> expected = Arrays.asList(this.user2, this.user1);
assertThat(actual, is(expected));
}
@Test
public void whenSortByAllFields() {
User user3 = new User("Kolya", 15);
User user4 = new User("Kat", 55);
List<User> users = Arrays.asList(this.user1, this.user2, user3, user4);
SortUser sort = new SortUser();
List<User> actual = sort.sortByAllFields(users);
List<User> expected = Arrays.asList(this.user2, user4, user3, this.user1);
assertThat(actual, is(expected));
}
}
|
// listenOnEvent implements RekeyFSM interface for rekeyFSM.
func (m *rekeyFSM) listenOnEvent(
event rekeyEventType, callback func(RekeyEvent), repeatedly bool) {
m.muListeners.Lock()
defer m.muListeners.Unlock()
m.listeners[event] = append(m.listeners[event], rekeyFSMListener{
onEvent: callback,
repeatedly: repeatedly,
})
} |
# -*- coding: utf-8 -*-
"""Wrapper to run synergia from the command line.
:copyright: Copyright (c) 2018 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern.pkdebug import pkdp, pkdc
from sirepo import mpi
from sirepo import simulation_db
from sirepo.template import template_common
import py.path
import re
import sirepo.template.synergia as template
_SCHEMA = simulation_db.get_schema(template.SIM_TYPE)
def run(cfg_dir):
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
report = data['report']
if 'bunchReport' in report or report == 'twissReport' or report == 'twissReport2':
template_common.exec_parameters()
template.save_sequential_report_data(data, py.path.local(cfg_dir))
else:
raise AssertionError('unknown report: {}'.format(report))
def run_background(cfg_dir):
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
distribution = data['models']['bunch']['distribution']
run_with_mpi = distribution == 'lattice' or distribution == 'file'
if run_with_mpi:
template_common.exec_parameters_with_mpi()
else:
#TODO(pjm): MPI doesn't work with rsbeams distributions yet
template_common.exec_parameters()
|
<reponame>devianllert/ts-react-boilerplate
import api from './api';
export interface Tokens {
accessToken: string;
refreshToken: string;
expiresIn: string;
}
export interface UserLoginDTO {
emailOrUsername: string;
password: string;
}
export interface UserSignUpDTO {
email: string;
username: string;
password: string;
}
export const setTokens = (data: Tokens): void => {
localStorage.setItem('accessToken', data.accessToken);
};
export const clearTokens = (): void => {
localStorage.removeItem('accessToken');
};
export const signUp = (payload: UserSignUpDTO): Promise<void> => api.post('auth/register', payload);
export const login = async (payload: UserLoginDTO): Promise<void> => {
const { data } = await api.post<Tokens>('auth/login', payload, { withCredentials: true });
setTokens(data);
};
export const logout = async (): Promise<void> => {
await api.post('auth/logout', undefined, { withCredentials: true })
.finally(clearTokens);
};
export const verifyEmail = (token?: string): Promise<void> => api.post(`auth/verify/${token}`);
export const isAuthenticated = (): boolean => Boolean(localStorage.getItem('accessToken'));
|
def clear_folders(self):
self.folders.clear() |
<gh_stars>1-10
package atomeps62
import (
"context"
"fmt"
"math"
)
//Volumes .
func (vs *AtlonaVideoSwitcher6x2) Volumes(ctx context.Context, blocks []string) (map[string]int, error) {
body := `{ "getConfig": { "audio": { "audOut": {}}}}`
config, err := vs.getConfig(ctx, body)
if err != nil {
return nil, fmt.Errorf("unable to get config: %w", err)
}
// always return all of the blocks, regardless of `blocks`
// (since we don't have to do any extra work)
vols := make(map[string]int)
// Get the digital audio out on zoneOut1 volume
if config.Audio.AudOut.ZoneOut1.AudioVol < -50 {
vols["zoneOut1-Digital"] = 0
} else {
vols["zoneOut1-Digital"] = 2 * (config.Audio.AudOut.ZoneOut1.AudioVol + 50)
}
// Get the analog audio out on zoneOut1 volume
if config.Audio.AudOut.ZoneOut1.AnalogOut.AudioVol < -50 {
vols["zoneOut1-Analog"] = 0
} else {
vols["zoneOut1-Analog"] = 2 * (config.Audio.AudOut.ZoneOut1.AnalogOut.AudioVol + 50)
}
// Get the digital audio out on zoneOut2 volume
if config.Audio.AudOut.ZoneOut2.AudioVol < -50 {
vols["zoneOut2-Digital"] = 0
} else {
vols["zoneOut2-Digital"] = 2 * (config.Audio.AudOut.ZoneOut2.AudioVol + 50)
}
// Get the analog audio out on zoneOut2 volume
if config.Audio.AudOut.ZoneOut2.AnalogOut.AudioVol < -50 {
vols["zoneOut1-Analog"] = 0
} else {
vols["zoneOut1-Analog"] = 2 * (config.Audio.AudOut.ZoneOut2.AnalogOut.AudioVol + 50)
}
return vols, nil
}
//SetVolume .
func (vs *AtlonaVideoSwitcher6x2) SetVolume(ctx context.Context, block string, level int) error {
zblock := ""
if block == "zoneOut1" || block == "zoneOut2" {
// Atlona volume levels are from -90 to 10 and the number we receive is 0-100
// If volume level is supposed to be zero set it -90 on atlona
if level == 0 {
level = -90
} else {
convertedVolume := -50 + math.Round(float64(level/2))
level = int(convertedVolume)
}
// Set digital and analog audio together for the audio block
body := fmt.Sprintf(`{ "setConfig": { "audio": { "audOut": { "%s": { "audioVol": %d, "analogOut":{"audioVol": %d }}}}}}`, block, level, level)
if err := vs.setConfig(ctx, body); err != nil {
return fmt.Errorf("unable to set config: %w", err)
}
return nil
} else if block == "zoneOut1Analog" || block == "zoneOut2Analog" {
if block == "zoneOut1Analog" {
zblock = "zoneOut1"
} else {
zblock = "zoneOut2"
}
// Atlona volume levels are from -90 to 10 and the number we receive is 0-100
// If volume level is supposed to be zero set it -90 on atlona
if level == 0 {
level = -90
} else {
convertedVolume := -50 + math.Round(float64(level/2))
level = int(convertedVolume)
}
// Set analog audio for the audio block
body := fmt.Sprintf(`{ "setConfig": { "audio": { "audOut": { "%s": { "analogOut":{"audioVol": %d }}}}}}`, zblock, level)
if err := vs.setConfig(ctx, body); err != nil {
return fmt.Errorf("unable to set config: %w", err)
}
return nil
} else if block == "zoneOut1Digital" || block == "zoneOut2Digital" {
if block == "zoneOut1Digital" {
zblock = "zoneOut1"
} else {
zblock = "zoneOut2"
}
// Atlona volume levels are from -90 to 10 and the number we receive is 0-100
// If volume level is supposed to be zero set it -90 on atlona
if level == 0 {
level = -90
} else {
convertedVolume := -50 + math.Round(float64(level/2))
level = int(convertedVolume)
}
// Set digital audio for the audio block
body := fmt.Sprintf(`{ "setConfig": { "audio": { "audOut": { "%s": { "audioVol": %d }}}}}`, zblock, level)
if err := vs.setConfig(ctx, body); err != nil {
return fmt.Errorf("unable to set config: %w", err)
}
return nil
} else {
return fmt.Errorf("Unable to set config: Block %v is not a valid block", block)
}
}
//Mutes .
func (vs *AtlonaVideoSwitcher6x2) Mutes(ctx context.Context, blocks []string) (map[string]bool, error) {
body := `{ "getConfig": { "audio": { "audOut": {}}}}`
config, err := vs.getConfig(ctx, body)
if err != nil {
return nil, fmt.Errorf("unable to get config: %w", err)
}
// always return all of the blocks, regardless of `blocks`
// (since we don't have to do any extra work)
mutes := make(map[string]bool)
mutes["zoneOut1-Analog"] = config.Audio.AudOut.ZoneOut1.AnalogOut.AudioMute
mutes["zoneOut1-Digital"] = config.Audio.AudOut.ZoneOut1.VideoOut.AudioMute
mutes["zoneOut2-Analog"] = config.Audio.AudOut.ZoneOut2.AnalogOut.AudioMute
mutes["zoneOut2-Digital"] = config.Audio.AudOut.ZoneOut2.VideoOut.AudioMute
return mutes, nil
}
//SetMute for all of the audio objects within the block
func (vs *AtlonaVideoSwitcher6x2) SetMute(ctx context.Context, block string, muted bool) error {
zblock := ""
// Set Mutes for both analog and digital when configured with a combined block
if block == "zoneOut1" || block == "zoneOut2" {
body := fmt.Sprintf(`{ "setConfig": { "audio": { "audOut": { "%s": { "videoOut": { "audioMute": %t }, "analogOut": { "audioMute": %t }}}}}}`, block, muted, muted)
if err := vs.setConfig(ctx, body); err != nil {
return fmt.Errorf("unable to set config: %w", err)
}
return nil
// Set mute for analog audio out for a given block
} else if block == "zoneOut1Analog" || block == "zoneOut2Analog" {
if block == "zoneOut1Analog" {
zblock = "zoneOut1"
} else {
zblock = "zoneOut2"
}
body := fmt.Sprintf(`{ "setConfig": { "audio": { "audOut": { "%s": { "analogOut": { "audioMute": %t }}}}}}`, zblock, muted)
if err := vs.setConfig(ctx, body); err != nil {
return fmt.Errorf("unable to set config: %w", err)
}
return nil
// Set mute for digital audio out for a given block
} else if block == "zoneOut1Digital" || block == "zoneOut2Digital" {
if block == "zoneOut1Digital" {
zblock = "zoneOut1"
} else {
zblock = "zoneOut2"
}
body := fmt.Sprintf(`{ "setConfig": { "audio": { "audOut": { "%s": { "videoOut": { "audioMute": %t }}}}}}`, zblock, muted)
if err := vs.setConfig(ctx, body); err != nil {
return fmt.Errorf("unable to set config: %w", err)
}
return nil
} else {
return fmt.Errorf("Unable to set config: Block %v is not a valid block", block)
}
}
|
Acute kidney injury in primary care: where are we now and where are we going?
Acute kidney injury (AKI) is defined as ‘a clinical and biochemical diagnosis reflecting abrupt kidney dysfunction’ .1 AKI is graded on a scale of 1–3 based on the size of the creatinine increase from baseline. Higher AKI scores are associated with higher mortality, longer length of stay, and less renal recovery.2
AKI complicates almost one in five hospital admissions and is associated with a 20–33% mortality rate, increased length of hospital stay, and an estimated annual cost to the NHS in England of £1.02 billion.3 Two-thirds of AKI cases identified in hospital start in the community.2 NHS England and the UK Renal Association Renal Registry’s Think Kidneys programme have supported changes and improvement in AKI identification, measurement, risk assessment, and education across UK health care including the implementation of a national electronic system that alerts clinicians to potential cases of AKI.1
Around 60% of all patients with AKI identified in hospital have it when they reach hospital.2 The mortality of these patients with community-acquired AKI detected in hospital (CAH-AKI) is 19.6% during hospitalisation, which increases to an alarming 45% 14 months post-discharge.4 Although CAH-AKI has a lower mortality rate than hospital-acquired AKI, CAH-AKI represents a noteworthy risk factor for death.
The incidence of community-acquired AKI detected in primary care (CAP-AKI) varies according to the use of different AKI definitions and different methodologies for acquiring a baseline creatinine. Sawhney et al 5 used the official NHS AKI algorithm and reported that 1.4% of 50 835 patients in a Scottish registry who also had a known creatinine … |
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import fastmri
from fastmri.data import transforms
class HammingWindowNetwork(nn.Module):
def __init__(self, shape):
super().__init__()
self.hamming_window_layer = HammingWindowLayer(shape)
def forward(self, data):
data = data.squeeze()
output = []
data = self.hamming_window_layer(data)
for offset in range(data.shape[1]):
image = fastmri.ifft3c(data[:, offset])
image = fastmri.complex_abs(image)
image = fastmri.rss(image, dim=0).squeeze()
# image = transforms.complex_center_crop_3d(image, (image.shape[0], 128, 128))
output.append(image)
output = torch.stack(output, 0)
return output
class HammingWindowLayer(nn.Module):
def __init__(self, shape):
super().__init__()
_weight_init = hamming_window_init(torch.ones(shape))
self.weight = nn.Parameter(_weight_init, requires_grad=True)
def forward(self, data):
data = data * self.weight.reshape((1, 1, 20, 128, 256, 1))
return data
def hamming_window_init(data=torch.ones((20, 128, 128))):
for axis, axis_size in enumerate(data.shape):
filter_shape = [1, ] * data.ndim
filter_shape[axis] = axis_size
window = torch.hamming_window(axis_size).reshape(filter_shape)
window = window ** (1.0 / data.ndim)
data *= window
return data
class HammingWindowParametrized(torch.nn.Module):
def __init__(self, device="cuda"):
super().__init__()
self.alpha = torch.nn.Parameter(torch.tensor(0.54))
self.beta = torch.nn.Parameter(torch.tensor(0.46))
self.device = device
def hamming_function(self, data):
window_0 = self.alpha - self.beta * torch.cos(torch.pi * 2 * torch.linspace(0, data.shape[2], data.shape[2], device=self.device) / data.shape[2])
data = data * window_0.reshape((1, 1, -1, 1, 1, 1))
window_1 = self.alpha - self.beta * torch.cos(torch.pi * 2 * torch.linspace(0, data.shape[3], data.shape[3], device=self.device) / data.shape[3])
data = data * window_1.reshape((1, 1, 1, -1, 1, 1))
window_2 = self.alpha - self.beta * torch.cos(torch.pi * 2 * torch.linspace(0, data.shape[4], data.shape[4], device=self.device) / data.shape[4])
data = data * window_2.reshape((1, 1, 1, 1, -1, 1))
return data
def forward(self, data):
shape = data.shape
data = data.squeeze()
data = self.hamming_function(data)
data = data.reshape(shape)
return data
def main():
# hw = hamming_window()
# for k in range(10):
# plt.subplot(2, 5, k + 1)
# plt.imshow(hw[k * 2], vmin=0, vmax=1)
# plt.show()
# hwn = HammingWindowNetwork((20, 128, 256))
# pred = hwn(torch.rand((28, 8, 20, 128, 256, 2)))
# print(pred.shape)
data = torch.ones((1, 28, 8, 20, 100, 100, 2))
hwp = HammingWindowParametrized()
res = hwp(data)
plt.imshow(res[0, 0, 4, 10, ..., 0].detach())#, vmin=-1, vmax=1)
plt.show()
if __name__ == "__main__":
main()
|
// NewClient creates a new postmark client
func NewClient(token string, fromaddr string) *Client {
bu, _ := url.Parse("https://api.postmarkapp.com")
return &Client{
Token: token,
FromAddress: fromaddr,
client: http.DefaultClient,
BaseURL: bu,
}
} |
/**
* Transforms a string into an array of single string characters.
*
* @param s
* The string to turn into a string array
*
* @return The input string transformed into an array of string characters
*/
private static String[] StringToArray(String s)
{
String[] as = new String[s.length()];
int i = 0;
for (char c : s.toCharArray())
{
as[i++] = "" + c;
}
return as;
} |
/**
* A Quartz implementation of {@link IScheduler}
*
* @author aphillips
*/
public class QuartzScheduler implements IScheduler {
public static final String RESERVEDMAPKEY_ACTIONCLASS = "ActionAdapterQuartzJob-ActionClass"; //$NON-NLS-1$
public static final String RESERVEDMAPKEY_ACTIONUSER = "ActionAdapterQuartzJob-ActionUser"; //$NON-NLS-1$
public static final String RESERVEDMAPKEY_ACTIONID = "ActionAdapterQuartzJob-ActionId"; //$NON-NLS-1$
public static final String RESERVEDMAPKEY_STREAMPROVIDER = "ActionAdapterQuartzJob-StreamProvider"; //$NON-NLS-1$
public static final String RESERVEDMAPKEY_UIPASSPARAM = "uiPassParam";
public static final String RESERVEDMAPKEY_LINEAGE_ID = "lineage-id";
public static final String RESERVEDMAPKEY_RESTART_FLAG = "ActionAdapterQuartzJob-Restart";
private static final Log logger = LogFactory.getLog(QuartzScheduler.class);
private SchedulerFactory quartzSchedulerFactory;
private Scheduler quartzScheduler;
private ArrayList<ISchedulerListener> listeners = new ArrayList<ISchedulerListener>();
private static final Pattern listPattern = Pattern.compile("\\d+"); //$NON-NLS-1$
private static final Pattern dayOfWeekRangePattern = Pattern.compile(".*\\-.*"); //$NON-NLS-1$
private static final Pattern sequencePattern = Pattern.compile("\\d+\\-\\d+"); //$NON-NLS-1$
private static final Pattern intervalPattern = Pattern.compile("\\d+/\\d+"); //$NON-NLS-1$
private static final Pattern qualifiedDayPattern = Pattern.compile("\\d+#\\d+"); //$NON-NLS-1$
private static final Pattern lastDayPattern = Pattern.compile("\\d+L"); //$NON-NLS-1$
public QuartzScheduler(SchedulerFactory schedulerFactory) {
this.quartzSchedulerFactory = schedulerFactory;
}
public QuartzScheduler() {
this.quartzSchedulerFactory = new StdSchedulerFactory();
}
/**
* Overrides the default Quartz {@link SchedulerFactory}. Note: depending on the type of scheduler you are setting
* here, there may be initializing required prior to this setter being called. Only the
* {@link SchedulerFactory#getScheduler()} will be called later, so the factory set here must already be in a state
* where that invocation will be successful.
*
* @param quartzSchedulerFactory the quartz factory to use for generating scheduler instances
*/
public void setQuartzSchedulerFactory(SchedulerFactory quartzSchedulerFactory) throws SchedulerException {
this.quartzSchedulerFactory = quartzSchedulerFactory;
if (quartzScheduler != null) {
this.shutdown();
quartzScheduler = null;
}
}
public Scheduler getQuartzScheduler() throws org.quartz.SchedulerException {
if (quartzScheduler == null) {
/*
* Currently, quartz will always give you the same scheduler object when any factory instance is asked for a
* scheduler. In other words there is no such thing as scheduler-level isolation. If we really need multiple
* isolated scheduler instances, we should investigate named schedulers, but this API getScheduler() will not help
* us in that regard.
*/
quartzScheduler = quartzSchedulerFactory.getScheduler();
QuartzSchedulerHelper.init(quartzScheduler);
}
logger.debug("Using quartz scheduler " + quartzScheduler); //$NON-NLS-1$
return quartzScheduler;
}
private void setQuartzScheduler(Scheduler quartzScheduler) {
this.quartzScheduler = quartzScheduler;
}
/**
* {@inheritDoc}
*/
public Job createJob(String jobName, String actionId, Map<String, Serializable> jobParams, IJobTrigger trigger)
throws SchedulerException {
return createJob(jobName, actionId, jobParams, trigger, null);
}
/**
* {@inheritDoc}
*/
public Job createJob(String jobName, Class<? extends IAction> action, Map<String, Serializable> jobParams,
IJobTrigger trigger) throws SchedulerException {
return createJob(jobName, action, jobParams, trigger, null);
}
/**
* {@inheritDoc}
*/
public Job createJob(String jobName, Class<? extends IAction> action, Map<String, Serializable> jobParams,
IJobTrigger trigger, IBackgroundExecutionStreamProvider outputStreamProvider) throws SchedulerException {
if (action == null) {
throw new SchedulerException(Messages.getInstance().getString("QuartzScheduler.ERROR_0003_ACTION_IS_NULL")); //$NON-NLS-1$
}
if (jobParams == null) {
jobParams = new HashMap<String, Serializable>();
}
jobParams.put(RESERVEDMAPKEY_ACTIONCLASS, action.getName());
Job ret = createJob(jobName, jobParams, trigger, outputStreamProvider);
ret.setSchedulableClass(action.getName());
return ret;
}
/**
* {@inheritDoc}
*/
public Job createJob(String jobName, String actionId, Map<String, Serializable> jobParams, IJobTrigger trigger,
IBackgroundExecutionStreamProvider outputStreamProvider) throws SchedulerException {
if (StringUtils.isEmpty(actionId)) {
throw new SchedulerException(Messages.getInstance().getString("QuartzScheduler.ERROR_0003_ACTION_IS_NULL")); //$NON-NLS-1$
}
if (jobParams == null) {
jobParams = new HashMap<String, Serializable>();
}
jobParams.put(RESERVEDMAPKEY_ACTIONID, actionId);
Job ret = createJob(jobName, jobParams, trigger, outputStreamProvider);
ret.setSchedulableClass(""); //$NON-NLS-1$
return ret;
}
public static Trigger createQuartzTrigger(IJobTrigger jobTrigger, QuartzJobKey jobId) throws SchedulerException {
Trigger quartzTrigger = null;
if (jobTrigger instanceof ComplexJobTrigger) {
try {
quartzTrigger =
new CronTrigger(jobId.toString(), jobId.getUserName(), jobTrigger.getCronString() != null ? jobTrigger
.getCronString() : QuartzCronStringFactory.createCronString((ComplexJobTrigger) jobTrigger));
} catch (ParseException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobId.getJobName()), e); //$NON-NLS-1$
}
} else if (jobTrigger instanceof SimpleJobTrigger) {
SimpleJobTrigger simpleTrigger = (SimpleJobTrigger) jobTrigger;
long interval = simpleTrigger.getRepeatInterval();
if (interval > 0) {
interval *= 1000;
}
int repeatCount =
simpleTrigger.getRepeatCount() < 0 ? SimpleTrigger.REPEAT_INDEFINITELY : simpleTrigger.getRepeatCount();
quartzTrigger =
new SimpleTrigger(jobId.toString(), jobId.getUserName(), simpleTrigger.getStartTime(), simpleTrigger
.getEndTime(), repeatCount, interval);
} else {
throw new SchedulerException(Messages.getInstance().getString("QuartzScheduler.ERROR_0002_TRIGGER_WRONG_TYPE")); //$NON-NLS-1$
}
if (quartzTrigger instanceof SimpleTrigger) {
quartzTrigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_REMAINING_COUNT);
} else {
quartzTrigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW);
}
return quartzTrigger;
}
private JobDetail createJobDetails(QuartzJobKey jobId, Map<String, Serializable> jobParams) {
JobDetail jobDetail = new JobDetail(jobId.toString(), jobId.getUserName(), BlockingQuartzJob.class);
jobParams.put(RESERVEDMAPKEY_ACTIONUSER, jobId.getUserName());
JobDataMap jobDataMap = new JobDataMap(jobParams);
jobDetail.setJobDataMap(jobDataMap);
return jobDetail;
}
private Calendar createQuartzCalendar(ComplexJobTrigger complexJobTrigger) {
Calendar triggerCalendar = null;
if ((complexJobTrigger.getStartTime() != null) || (complexJobTrigger.getEndTime() != null)) {
triggerCalendar =
new QuartzSchedulerAvailability(complexJobTrigger.getStartTime(), complexJobTrigger.getEndTime());
}
return triggerCalendar;
}
/**
* {@inheritDoc}
*/
protected Job createJob(String jobName, Map<String, Serializable> jobParams, IJobTrigger trigger,
IBackgroundExecutionStreamProvider outputStreamProvider) throws SchedulerException {
String curUser = getCurrentUser();
// determine if the job params tell us who owns the job
Serializable jobOwner = jobParams.get(RESERVEDMAPKEY_ACTIONUSER);
if (jobOwner != null && jobOwner.toString().length() > 0) {
curUser = jobOwner.toString();
}
QuartzJobKey jobId = new QuartzJobKey(jobName, curUser);
Trigger quartzTrigger = createQuartzTrigger(trigger, jobId);
if (trigger.getEndTime() != null) {
quartzTrigger.setEndTime(trigger.getEndTime());
}
Calendar triggerCalendar =
quartzTrigger instanceof CronTrigger ? createQuartzCalendar((ComplexJobTrigger) trigger) : null;
if (outputStreamProvider != null) {
jobParams.put(RESERVEDMAPKEY_STREAMPROVIDER, outputStreamProvider);
}
if (trigger.getUiPassParam() != null) {
jobParams.put(RESERVEDMAPKEY_UIPASSPARAM, trigger.getUiPassParam());
}
if (!jobParams.containsKey(RESERVEDMAPKEY_LINEAGE_ID)) {
String uuid = UUID.randomUUID().toString();
jobParams.put(RESERVEDMAPKEY_LINEAGE_ID, uuid);
}
JobDetail jobDetail = createJobDetails(jobId, jobParams);
try {
Scheduler scheduler = getQuartzScheduler();
QuartzSchedulerHelper.applyJobExecutionRules(
QuartzSchedulerHelper.Phase.CREATION, scheduler, jobDetail, null);
if (triggerCalendar != null) {
scheduler.addCalendar(jobId.toString(), triggerCalendar, false, false);
quartzTrigger.setCalendarName(jobId.toString());
}
logger
.debug(MessageFormat
.format(
"Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap(jobParams))); //$NON-NLS-1$
scheduler.scheduleJob(jobDetail, quartzTrigger);
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobName), e); //$NON-NLS-1$
}
Job job = new Job();
job.setJobParams(jobParams);
job.setJobTrigger((JobTrigger) trigger);
job.setNextRun(quartzTrigger.getNextFireTime());
job.setLastRun(quartzTrigger.getPreviousFireTime());
job.setJobId(jobId.toString());
job.setJobName(jobName);
job.setUserName(curUser);
job.setState(JobState.NORMAL);
return job;
}
@Override
public void updateJob(String jobId, Map<String, Serializable> jobParams, IJobTrigger trigger)
throws SchedulerException {
QuartzJobKey jobKey = QuartzJobKey.parse(jobId);
Trigger quartzTrigger = createQuartzTrigger(trigger, jobKey);
quartzTrigger.setJobName(jobId);
quartzTrigger.setJobGroup(jobKey.getUserName());
Calendar triggerCalendar =
quartzTrigger instanceof CronTrigger ? createQuartzCalendar((ComplexJobTrigger) trigger) : null;
try {
Scheduler scheduler = getQuartzScheduler();
// int triggerState = scheduler.getTriggerState(jobId, jobKey.getUserName());
// if (triggerState != Trigger.STATE_PAUSED) {
// scheduler.pauseTrigger(jobId, jobKey.getUserName());
// }
JobDetail origJobDetail = scheduler.getJobDetail(jobId, jobKey.getUserName());
if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_ACTIONCLASS)) {
jobParams.put(RESERVEDMAPKEY_ACTIONCLASS, origJobDetail.getJobDataMap().get(RESERVEDMAPKEY_ACTIONCLASS)
.toString());
} else if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_ACTIONID)) {
jobParams
.put(RESERVEDMAPKEY_ACTIONID, origJobDetail.getJobDataMap().get(RESERVEDMAPKEY_ACTIONID).toString());
}
if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_STREAMPROVIDER)) {
jobParams.put(RESERVEDMAPKEY_STREAMPROVIDER, (Serializable) origJobDetail.getJobDataMap().get(
RESERVEDMAPKEY_STREAMPROVIDER));
}
if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_UIPASSPARAM)) {
jobParams.put(RESERVEDMAPKEY_UIPASSPARAM, (Serializable) origJobDetail.getJobDataMap().get(
RESERVEDMAPKEY_UIPASSPARAM));
}
JobDetail jobDetail = createJobDetails(jobKey, jobParams);
QuartzSchedulerHelper.applyJobExecutionRules(
QuartzSchedulerHelper.Phase.UPDATING, scheduler, jobDetail, null);
scheduler.addJob(jobDetail, true);
if (triggerCalendar != null) {
scheduler.addCalendar(jobId.toString(), triggerCalendar, true, true);
quartzTrigger.setCalendarName(jobId.toString());
}
scheduler.rescheduleJob(jobId, jobKey.getUserName(), quartzTrigger);
// if (triggerState != Trigger.STATE_PAUSED) {
// scheduler.resumeTrigger(jobId, jobKey.getUserName());
// }
logger
.debug(MessageFormat
.format(
"Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap(jobParams))); //$NON-NLS-1$
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobKey.getJobName()), e); //$NON-NLS-1$
}
}
/**
* {@inheritDoc}
*/
public Map<IScheduleSubject, ComplexJobTrigger> getAvailabilityWindows() {
// TODO Auto-generated method stub
return null;
}
/**
* {@inheritDoc}
*/
public List<IJobResult> getJobHistory(String jobId) {
// TODO Auto-generated method stub
return null;
}
/**
* {@inheritDoc}
*/
public void triggerNow(String jobId) throws SchedulerException {
try {
QuartzJobKey jobKey = QuartzJobKey.parse(jobId);
Scheduler scheduler = getQuartzScheduler();
String groupName = jobKey.getUserName();
for (Trigger trigger : scheduler.getTriggersOfJob(jobId, groupName)) {
if ("MANUAL_TRIGGER".equals(trigger.getGroup())) {
continue;
}
if (trigger instanceof SimpleTrigger) {
((SimpleTrigger) trigger).setPreviousFireTime(new Date());
} else if (trigger instanceof CronTrigger) {
((CronTrigger) trigger).setPreviousFireTime(new Date());
}
if (trigger.getStartTime() != null && trigger.getStartTime().before(new Date())) {
Date newStartTime = trigger.getFireTimeAfter(new Date());
if (newStartTime != null) {
trigger.setStartTime(newStartTime);
}
}
// force the trigger to be updated with the previous fire time
scheduler.rescheduleJob(jobId, jobKey.getUserName(), trigger);
}
scheduler.triggerJob(jobId, jobKey.getUserName());
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId), e); //$NON-NLS-1$
}
}
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
public Job getJob(String jobId) throws SchedulerException {
try {
Scheduler scheduler = getQuartzScheduler();
QuartzJobKey jobKey = QuartzJobKey.parse(jobId);
String groupName = jobKey.getUserName();
for (Trigger trigger : scheduler.getTriggersOfJob(jobId, groupName)) {
Job job = new Job();
JobDetail jobDetail = scheduler.getJobDetail(jobId, groupName);
if (jobDetail != null) {
JobDataMap jobDataMap = jobDetail.getJobDataMap();
if (jobDataMap != null) {
Map<String, Serializable> wrappedMap = jobDataMap.getWrappedMap();
job.setJobParams(wrappedMap);
}
}
job.setJobId(jobId);
setJobTrigger(scheduler, job, trigger);
job.setUserName(jobDetail.getGroup());
return job;
}
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId), e); //$NON-NLS-1$
}
return null;
}
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
public List<Job> getJobs(IJobFilter filter) throws SchedulerException {
ArrayList<Job> jobs = new ArrayList<Job>();
try {
Scheduler scheduler = getQuartzScheduler();
for (String groupName : scheduler.getJobGroupNames()) {
for (String jobId : scheduler.getJobNames(groupName)) {
for (Trigger trigger : scheduler.getTriggersOfJob(jobId, groupName)) {
if ("MANUAL_TRIGGER".equals(trigger.getGroup())) {
continue;
}
Job job = new Job();
job.setGroupName(groupName);
JobDetail jobDetail = scheduler.getJobDetail(jobId, groupName);
if (jobDetail != null) {
job.setUserName(jobDetail.getGroup());
JobDataMap jobDataMap = jobDetail.getJobDataMap();
if (jobDataMap != null) {
Map<String, Serializable> wrappedMap = jobDataMap.getWrappedMap();
job.setJobParams(wrappedMap);
}
}
job.setJobId(jobId);
setJobTrigger(scheduler, job, trigger);
job.setJobName(QuartzJobKey.parse(jobId).getJobName());
job.setNextRun(trigger.getFireTimeAfter(new Date()));
job.setLastRun(trigger.getPreviousFireTime());
if ((filter == null) || filter.accept(job)) {
jobs.add(job);
}
}
}
}
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(
Messages.getInstance().getString("QuartzScheduler.ERROR_0004_FAILED_TO_LIST_JOBS"), e); //$NON-NLS-1$
}
return jobs;
}
private void setJobTrigger(Scheduler scheduler, Job job, Trigger trigger) throws SchedulerException,
org.quartz.SchedulerException {
QuartzJobKey jobKey = QuartzJobKey.parse(job.getJobId());
String groupName = jobKey.getUserName();
if (trigger instanceof SimpleTrigger) {
SimpleTrigger simpleTrigger = (SimpleTrigger) trigger;
SimpleJobTrigger simpleJobTrigger = new SimpleJobTrigger();
simpleJobTrigger.setStartTime(simpleTrigger.getStartTime());
simpleJobTrigger.setEndTime(simpleTrigger.getEndTime());
simpleJobTrigger.setUiPassParam((String) job.getJobParams().get(RESERVEDMAPKEY_UIPASSPARAM));
long interval = simpleTrigger.getRepeatInterval();
if (interval > 0) {
interval /= 1000;
}
simpleJobTrigger.setRepeatInterval(interval);
simpleJobTrigger.setRepeatCount(simpleTrigger.getRepeatCount());
job.setJobTrigger(simpleJobTrigger);
} else if (trigger instanceof CronTrigger) {
CronTrigger cronTrigger = (CronTrigger) trigger;
ComplexJobTrigger complexJobTrigger = createComplexTrigger(cronTrigger.getCronExpression());
complexJobTrigger.setUiPassParam((String) job.getJobParams().get(RESERVEDMAPKEY_UIPASSPARAM));
complexJobTrigger.setCronString(((CronTrigger) trigger).getCronExpression());
job.setJobTrigger(complexJobTrigger);
if (trigger.getCalendarName() != null) {
Calendar calendar = scheduler.getCalendar(trigger.getCalendarName());
if (calendar instanceof QuartzSchedulerAvailability) {
QuartzSchedulerAvailability quartzSchedulerAvailability = (QuartzSchedulerAvailability) calendar;
complexJobTrigger.setStartTime(quartzSchedulerAvailability.getStartTime());
complexJobTrigger.setEndTime(quartzSchedulerAvailability.getEndTime());
}
}
complexJobTrigger.setCronString(((CronTrigger) trigger).getCronExpression());
}
int triggerState = scheduler.getTriggerState(job.getJobId(), groupName);
switch (triggerState) {
case Trigger.STATE_NORMAL:
job.setState(JobState.NORMAL);
break;
case Trigger.STATE_BLOCKED:
job.setState(JobState.BLOCKED);
break;
case Trigger.STATE_COMPLETE:
job.setState(JobState.COMPLETE);
break;
case Trigger.STATE_ERROR:
job.setState(JobState.ERROR);
break;
case Trigger.STATE_PAUSED:
job.setState(JobState.PAUSED);
break;
default:
job.setState(JobState.UNKNOWN);
break;
}
job.setJobName(QuartzJobKey.parse(job.getJobId()).getJobName());
job.setNextRun(trigger.getNextFireTime());
job.setLastRun(trigger.getPreviousFireTime());
}
/**
* {@inheritDoc}
*/
public Integer getMinScheduleInterval(IScheduleSubject subject) {
// TODO Auto-generated method stub
return 0;
}
/**
* {@inheritDoc}
*/
public ComplexJobTrigger getSubjectAvailabilityWindow(IScheduleSubject subject) {
// TODO Auto-generated method stub
return null;
}
/**
* {@inheritDoc}
*/
public void pause() throws SchedulerException {
try {
getQuartzScheduler().standby();
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(e);
}
}
/**
* {@inheritDoc}
*/
public void pauseJob(String jobId) throws SchedulerException {
try {
Scheduler scheduler = getQuartzScheduler();
scheduler.pauseJob(jobId, QuartzJobKey.parse(jobId).getUserName());
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance()
.getString("QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS"), e); //$NON-NLS-1$
}
}
/**
* {@inheritDoc}
*/
public void removeJob(String jobId) throws SchedulerException {
try {
Scheduler scheduler = getQuartzScheduler();
scheduler.deleteJob(jobId, QuartzJobKey.parse(jobId).getUserName());
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance()
.getString("QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS"), e); //$NON-NLS-1$
}
}
/**
* {@inheritDoc}
*/
public void start() throws SchedulerException {
try {
getQuartzScheduler().start();
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(e);
}
}
/**
* {@inheritDoc}
*/
public void resumeJob(String jobId) throws SchedulerException {
try {
Scheduler scheduler = getQuartzScheduler();
scheduler.resumeJob(jobId, QuartzJobKey.parse(jobId).getUserName());
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0005_FAILED_TO_RESUME_JOBS"), e); //$NON-NLS-1$
}
}
/**
* {@inheritDoc}
*/
public void setAvailabilityWindows(Map<IScheduleSubject, ComplexJobTrigger> availability) {
// TODO Auto-generated method stub
}
/**
* {@inheritDoc}
*/
public void setMinScheduleInterval(IScheduleSubject subject, int intervalInSeconds) {
// TODO Auto-generated method stub
}
/**
* {@inheritDoc}
*/
public void setSubjectAvailabilityWindow(IScheduleSubject subject, ComplexJobTrigger availability) {
// TODO Auto-generated method stub
}
/**
* @return
*/
protected String getCurrentUser() {
IPentahoSession session = PentahoSessionHolder.getSession();
if (session == null) {
return null;
}
Principal p = SecurityHelper.getInstance().getAuthentication();
return (p == null) ? null : p.getName();
}
public static ComplexJobTrigger createComplexTrigger(String cronExpression) {
ComplexJobTrigger complexJobTrigger = new ComplexJobTrigger();
complexJobTrigger.setHourlyRecurrence((ITimeRecurrence) null);
complexJobTrigger.setMinuteRecurrence((ITimeRecurrence) null);
complexJobTrigger.setSecondRecurrence((ITimeRecurrence) null);
for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 6)) {
complexJobTrigger.addYearlyRecurrence(recurrence);
}
for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 4)) {
complexJobTrigger.addMonthlyRecurrence(recurrence);
}
List<ITimeRecurrence> dayOfWeekRecurrences = parseDayOfWeekRecurrences(cronExpression);
List<ITimeRecurrence> dayOfMonthRecurrences = parseRecurrence(cronExpression, 3);
if ((dayOfWeekRecurrences.size() > 0) && (dayOfMonthRecurrences.size() == 0)) {
for (ITimeRecurrence recurrence : dayOfWeekRecurrences) {
complexJobTrigger.addDayOfWeekRecurrence(recurrence);
}
} else if ((dayOfWeekRecurrences.size() == 0) && (dayOfMonthRecurrences.size() > 0)) {
for (ITimeRecurrence recurrence : dayOfMonthRecurrences) {
complexJobTrigger.addDayOfMonthRecurrence(recurrence);
}
}
for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 2)) {
complexJobTrigger.addHourlyRecurrence(recurrence);
}
for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 1)) {
complexJobTrigger.addMinuteRecurrence(recurrence);
}
for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 0)) {
complexJobTrigger.addSecondRecurrence(recurrence);
}
return complexJobTrigger;
}
private static List<ITimeRecurrence> parseDayOfWeekRecurrences(String cronExpression) {
List<ITimeRecurrence> dayOfWeekRecurrence = new ArrayList<ITimeRecurrence>();
String delims = "[ ]+"; //$NON-NLS-1$
String[] tokens = cronExpression.split(delims);
if (tokens.length >= 6) {
String dayOfWeekTokens = tokens[5];
tokens = dayOfWeekTokens.split(","); //$NON-NLS-1$
if ((tokens.length > 1) || !(tokens[0].equals("*") || tokens[0].equals("?"))) { //$NON-NLS-1$ //$NON-NLS-2$
RecurrenceList dayOfWeekList = null;
for (String token : tokens) {
if (listPattern.matcher(token).matches()) {
if (dayOfWeekList == null) {
dayOfWeekList = new RecurrenceList();
}
dayOfWeekList.getValues().add(Integer.parseInt(token));
} else {
if (dayOfWeekList != null) {
dayOfWeekRecurrence.add(dayOfWeekList);
dayOfWeekList = null;
}
if (sequencePattern.matcher(token).matches()) {
String[] days = token.split("-"); //$NON-NLS-1$
dayOfWeekRecurrence.add(new SequentialRecurrence(Integer.parseInt(days[0]), Integer
.parseInt(days[1])));
} else if (intervalPattern.matcher(token).matches()) {
String[] days = token.split("/"); //$NON-NLS-1$
dayOfWeekRecurrence.add(new IncrementalRecurrence(Integer.parseInt(days[0]), Integer
.parseInt(days[1])));
} else if (qualifiedDayPattern.matcher(token).matches()) {
String[] days = token.split("#"); //$NON-NLS-1$
dayOfWeekRecurrence
.add(new QualifiedDayOfWeek(Integer.parseInt(days[1]), Integer.parseInt(days[0])));
} else if (lastDayPattern.matcher(token).matches()) {
DayOfWeek dayOfWeek =
DayOfWeek.values()[(Integer.parseInt(token.substring(0, token.length() - 1)) - 1) % 7];
dayOfWeekRecurrence.add(new QualifiedDayOfWeek(DayOfWeekQualifier.LAST, dayOfWeek));
} else if (dayOfWeekRangePattern.matcher(token).matches()) {
String[] days = token.split("-"); //$NON-NLS-1$
int start = DayOfWeek.valueOf(days[0]).ordinal();
int finish = DayOfWeek.valueOf(days[1]).ordinal();
dayOfWeekRecurrence.add(new SequentialRecurrence(start, finish));
} else {
dayOfWeekList = new RecurrenceList();
dayOfWeekList.getValues().add(DayOfWeek.valueOf(token).ordinal());
dayOfWeekRecurrence.add(dayOfWeekList);
dayOfWeekList = null;
// } else {
// throw new IllegalArgumentException(Messages.getInstance().getErrorString(
// "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$
}
}
}
if (dayOfWeekList != null) {
dayOfWeekRecurrence.add(dayOfWeekList);
}
}
} else {
throw new IllegalArgumentException(Messages.getInstance().getErrorString(
"ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$
}
return dayOfWeekRecurrence;
}
private static List<ITimeRecurrence> parseRecurrence(String cronExpression, int tokenIndex) {
List<ITimeRecurrence> timeRecurrence = new ArrayList<ITimeRecurrence>();
String delims = "[ ]+"; //$NON-NLS-1$
String[] tokens = cronExpression.split(delims);
if (tokens.length > tokenIndex) {
String timeTokens = tokens[tokenIndex];
tokens = timeTokens.split(","); //$NON-NLS-1$
if ((tokens.length > 1) || !(tokens[0].equals("*") || tokens[0].equals("?"))) { //$NON-NLS-1$ //$NON-NLS-2$
RecurrenceList timeList = null;
for (String token : tokens) {
if (listPattern.matcher(token).matches()) {
if (timeList == null) {
timeList = new RecurrenceList();
}
timeList.getValues().add(Integer.parseInt(token));
} else {
if (timeList != null) {
timeRecurrence.add(timeList);
timeList = null;
}
if (sequencePattern.matcher(token).matches()) {
String[] days = token.split("-"); //$NON-NLS-1$
timeRecurrence.add(new SequentialRecurrence(Integer.parseInt(days[0]),
Integer.parseInt(days[1])));
} else if (intervalPattern.matcher(token).matches()) {
String[] days = token.split("/"); //$NON-NLS-1$
timeRecurrence
.add(new IncrementalRecurrence(Integer.parseInt(days[0]), Integer.parseInt(days[1])));
} else if ("L".equalsIgnoreCase(token)) {
timeRecurrence.add(new QualifiedDayOfMonth());
} else {
throw new IllegalArgumentException(Messages.getInstance().getErrorString(
"ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$
}
}
}
if (timeList != null) {
timeRecurrence.add(timeList);
}
}
} else {
throw new IllegalArgumentException(Messages.getInstance().getErrorString(
"ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$
}
return timeRecurrence;
}
/**
* {@inheritDoc}
*/
public SchedulerStatus getStatus() throws SchedulerException {
SchedulerStatus schedulerStatus = SchedulerStatus.STOPPED;
try {
if (getQuartzScheduler().isInStandbyMode()) {
schedulerStatus = SchedulerStatus.PAUSED;
} else if (getQuartzScheduler().isStarted()) {
schedulerStatus = SchedulerStatus.RUNNING;
}
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0006_FAILED_TO_GET_SCHEDULER_STATUS"), e); //$NON-NLS-1$
}
return schedulerStatus;
}
/**
* {@inheritDoc}
*/
public void shutdown() throws SchedulerException {
try {
boolean waitForJobsToComplete = true;
getQuartzScheduler().shutdown(waitForJobsToComplete);
setQuartzScheduler(null);
} catch (org.quartz.SchedulerException e) {
throw new SchedulerException(e);
}
}
public static String prettyPrintMap(Map<String, Serializable> map) {
StringBuilder b = new StringBuilder();
for (Map.Entry<String, Serializable> entry : map.entrySet()) {
b.append(entry.getKey());
b.append("="); //$NON-NLS-1$
b.append(entry.getValue());
b.append("; "); //$NON-NLS-1$
}
return b.toString();
}
public void addListener(ISchedulerListener listener) {
listeners.add(listener);
}
public void setListeners(Collection<ISchedulerListener> listeners) {
this.listeners.addAll(listeners);
}
public void fireJobCompleted(IAction actionBean, String actionUser, Map<String, Serializable> params,
IBackgroundExecutionStreamProvider streamProvider) {
for (ISchedulerListener listener : listeners) {
listener.jobCompleted(actionBean, actionUser, params, streamProvider);
}
}
/**
* Checks if the text configuration for the input/output files is present.
* If not - silently returns. If present checks if the input file is allowed to be scheduled.
*
* @param jobParams scheduling job parameters
* @throws SchedulerException the configuration is recognized but the file can't be scheduled, is a folder or doesn't exist.
*/
@Override
public void validateJobParams(Map<String, Serializable> jobParams) throws SchedulerException {
final Object streamProviderObj = jobParams.get(RESERVEDMAPKEY_STREAMPROVIDER);
if (streamProviderObj instanceof String) {
String inputFilePath = null;
final String inputOutputString = (String) streamProviderObj;
final String[] tokens = inputOutputString.split(":");
if (!ArrayUtils.isEmpty(tokens) && tokens.length == 2) {
inputFilePath = tokens[0].split("=")[1].trim();
if (StringUtils.isNotBlank(inputFilePath)) {
final IUnifiedRepository repository = PentahoSystem.get(IUnifiedRepository.class);
final RepositoryFile repositoryFile = repository.getFile(inputFilePath);
if ((repositoryFile == null) || repositoryFile.isFolder() || !repositoryFile.isSchedulable()) {
throw new SchedulerException(Messages.getInstance().getString(
"QuartzScheduler.ERROR_0008_SCHEDULING_IS_NOT_ALLOWED"));
}
}
}
}
}
} |
declare var ResizeSensor;
module LionSoftAngular {
/**
* Автоматически расширяет все непосредственные дочерние элементы, помеченные атрибутом nv-fill-height до заполнения высоты контейнера.
*
* Чтобы это работало необходимо выполнение следующих условий:
* 1. Контейнер должен иметь фиксированную высоту.
* 2. Непосредственные элементы контейнера должны находиться в потоке и не должны быть обтекаемыми.
* 3. Пропорциональность растягивания нескольких авторазмерных элементов можно управлять значениями атрибута nv-fill-height (относительная высота в процентах)
*
* Примеры:
* <body nv-fill-container>
* <div>Header</div>
* <div nv-fill-height='10'>Content - 10%</div>
* <div nv-fill-height>Content - 25%</div>
* <div nv-fill-height>Content - 25%</div>
* <div nv-fill-height='40'>Content - 40%</div>
* <div>Footer</div>
* </body>
*/
class NvFillContainer extends Directive {
restrict = 'A';
doResize(scope, element) {
var fillHeight = element.height();
$.each(scope.$$nvFillContainer_fixedElements, (index, child) => {
fillHeight -= child.outerHeight(true);
});
var restHeight = fillHeight;
$.each(scope.$$nvFillContainer_fillElements, (index, child) => {
if (index < scope.$$nvFillContainer_fillElements.length - 1) {
var currentSize = (child[0].$$nvHeight * fillHeight) / 100;
child.outerHeight(currentSize);
restHeight -= currentSize;
}
else {
child.outerHeight(restHeight);
}
});
}
Link(scope, element: ng.IAugmentedJQuery, attrs, controller, transclude) {
scope.$$nvFillContainer_fillElements = [];
scope.$$nvFillContainer_fixedElements = [];
var totalFillPercents = 0;
var zeroFillPercentCount = 0;
$.each(element.children(), (index, element) => {
var child = $(element);
if (child.css("display") !== 'none'
&& element.nodeName !== "STYLE"
&& element.nodeName !== "SCRIPT"
&& child.css("position") !== 'absolute'
&& child.css("position") !== 'fixed')
{
var fillHeight = <any>child.attr("nv-fill-height");
if (fillHeight !== undefined && fillHeight !== false) {
var res = fillHeight === "" ? 0 : parseFloat(fillHeight);
element.$$nvHeight = isFinite(res) ? res : 0;
if (element.$$nvHeight)
zeroFillPercentCount++;
else
totalFillPercents += element.$$nvHeight;
scope.$$nvFillContainer_fillElements.push(child);
}
else if (element.offsetHeight > 0) {
scope.$$nvFillContainer_fixedElements.push(child);
}
}
});
if (scope.$$nvFillContainer_fillElements.length > 0) {
var resPercents = 100 - totalFillPercents;
if (zeroFillPercentCount > 0 && resPercents > 0) {
for (var n = 0; n < scope.$$nvFillContainer_fillElements.length; n++) {
// для fill-height элементов с неустановленым значением - распределяем остаток поровну между елементами
var el = scope.$$nvFillContainer_fillElements[n][0];
if (el.$$nvHeight === 0)
el.$$nvHeight = resPercents / zeroFillPercentCount;
}
}
for (var i = 0; i < scope.$$nvFillContainer_fixedElements.length; i++) {
// ReSharper disable once WrongExpressionStatement
new ResizeSensor(scope.$$nvFillContainer_fixedElements[i], () => this.doResize(scope, element));
}
// ReSharper disable once WrongExpressionStatement
new ResizeSensor(element, () => this.doResize(scope, element));
this.doResize(scope, element);
}
}
}
LionSoftAngular.Module
.directive("nvFillContainer", NvFillContainer.Factory())
;
} |
// decode attempts to decode the bytes in buf using one of the Codec instances
// in codex. The buf must start with the single-object encoding prefix,
// followed by the unsigned 64-bit Rabin fingerprint of the canonical schema
// used to encode the datum, finally followed by the encoded bytes. This is a
// simplified example of fetching the fingerprint from the SOE buffer, using
// that fingerprint to select a Codec from a dictionary of Codec instances,
// called codex in this case, and finally sends the buf to be decoded by that
// Codec.
func decode(codex map[uint64]*goavro.Codec, buf []byte) error {
fingerprint, newBuf, err := goavro.FingerprintFromSOE(buf)
if err != nil {
panic(err)
return err
}
codec, ok := codex[fingerprint]
if !ok {
return fmt.Errorf("unknown codec: %#x", fingerprint)
}
var datum interface{}
if true {
datum, _, err = codec.NativeFromBinary(newBuf)
} else {
datum, _, err = codec.NativeFromSingle(buf)
}
if err != nil {
panic(err)
}
_, err = fmt.Println(datum)
return err
} |
package Chapter07.exercise34;
import java.util.Scanner;
public class SortCharactersInAString {
public static void main(String[] args) {
Scanner scan = new Scanner(System.in);
System.out.println("enter a string to order");
String str = scan.next();
scan.close();
System.out.println(sort(str));
}
public static String sort(String s) {
char[] toCharArray = s.toCharArray();
String toString = "";
for (int j = 0; j < toCharArray.length; j++) {
for (int i = 0; i < toCharArray.length - 1; i++) {
if (toCharArray[i] > toCharArray[i + 1]) {
char temp = toCharArray[i + 1];
toCharArray[i + 1] = toCharArray[i];
toCharArray[i] = temp;
}
}
}
for (int i = 0; i < toCharArray.length; i++) {
toString += toCharArray[i];
}
return toString;
}
}
|
<reponame>XIAO-LI-UPF/Natural-Language-ParagraphGeneration
import sys
from data import *
from mosestokenizer import MosesDetokenizer
def main(test_file, output_file, lang):
lang = lang.split('_')[0]
# detokenizer = MosesDetokenizer(lang)
detokenize = MosesDetokenizer(lang)
sent_id = 1
with open(output_file, 'w+') as out:
for line in open(test_file):
if line.startswith('# text ='):
line = line.split('=', 1)[1].strip()
words = line.split()
out_text = detokenize(words)
out.write(f'# sent_id = {sent_id}\n# text = {out_text}\n\n')
sent_id += 1
if __name__ == '__main__':
main(*sys.argv[1:]) |
Effects of Self-Monitoring Training Logs on Behaviors and Beliefs of Swimmers
Investigators examined whether use of personal self-monitoring tools representing traditional “athletic training logs” improved training-related measures. Competitive Canadian intercollegiate swimmers (N = 26; M age = 20.4 years; 10 men, 21 women) were assigned training-related goals and then were randomly assigned to either a log group that self-monitored targeted training behaviors (SM-TB), or a self-monitoring comparison group (SMC) for 26 days. Swimmers’ adherence to training prescriptions was analyzed using repeated measures tests. Results showed no group effects or interactions, however, both groups improved adherence during the first 17 days of the intervention (p < .001). Self-report measures were analyzed using pre- and post-tests. The SM-TB group increased their reported intent to assess attendance during the intervention more than the SMC group (p = .07). Post-test, both the SM-TB and SMC groups reported greater intent to assess adherence (p < .01) and punctuality (p = .06), and higher self-regulatory confidence for punctuality (p = .06). |
/**
* Handle JSON requests for "dimension" functions
*
* @author Adam Andrews
*
*/
public class JLJsonDimensionHandler extends JLJsonCommandHandler implements JLDimensionRequestParams, JLDimensionResponseParams {
private IJLDimension dimHandler = null;
/**
* @param dimHandler
*/
public JLJsonDimensionHandler(IJLDimension dimHandler) {
this.dimHandler = dimHandler;
}
/* (non-Javadoc)
* @see com.simplifiedlogic.nitro.jshell.json.handler.JLJsonCommandHandler#handleFunction(java.lang.String, java.lang.String, java.util.Hashtable)
*/
public Hashtable<String, Object> handleFunction(String sessionId, String function, Hashtable<String, Object> input) throws JLIException {
if (function==null)
return null;
if (function.equals(FUNC_SET)) return actionSet(sessionId, input);
else if (function.equals(FUNC_COPY)) return actionCopy(sessionId, input);
else if (function.equals(FUNC_LIST)) return actionList(sessionId, input);
else if (function.equals(FUNC_LIST_DETAIL)) return actionListDetail(sessionId, input);
else if (function.equals(FUNC_SHOW)) return actionShow(sessionId, input);
else if (function.equals(FUNC_USER_SELECT)) return actionUserSelect(sessionId, input);
else if (function.equals(FUNC_SET_TEXT)) return actionSetText(sessionId, input);
else {
throw new JLIException("Unknown function name: " + function);
}
}
private Hashtable<String, Object> actionSet(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
String dimname = checkStringParameter(input, PARAM_NAME, true);
Object value = checkParameter(input, PARAM_VALUE, false);
boolean encoded = checkFlagParameter(input, PARAM_ENCODED, false, false);
dimHandler.set(modelname, dimname, value, encoded, sessionId);
return null;
}
private Hashtable<String, Object> actionCopy(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
String dimname = checkStringParameter(input, PARAM_NAME, true);
String to_name = checkStringParameter(input, PARAM_TONAME, true);
String to_model = checkStringParameter(input, PARAM_TOMODEL, false);
dimHandler.copy(modelname, dimname, to_name, to_model, sessionId);
return null;
}
private Hashtable<String, Object> actionList(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
String namePattern = checkStringParameter(input, PARAM_NAME, false);
List<String> dimNames = null;
Object namesObj = checkParameter(input, PARAM_NAMES, false);
if (namesObj!=null) {
dimNames = getStringListValue(namesObj);
}
String dimType = checkStringParameter(input, PARAM_DIM_TYPE, false);
boolean encoded = checkFlagParameter(input, PARAM_ENCODED, false, false);
boolean select = checkFlagParameter(input, PARAM_SELECT, false, false);
List<DimData> dims = dimHandler.list(modelname, namePattern, dimNames, dimType, encoded, select, sessionId);
if (dims!=null && dims.size()>0) {
Hashtable<String, Object> out = new Hashtable<String, Object>();
Vector<Map<String, Object>> outDims = new Vector<Map<String, Object>>();
out.put(OUTPUT_DIMLIST, outDims);
Map<String, Object> outDim = null;
for (DimData dim : dims) {
outDim = new Hashtable<String, Object>();
if (dim.getName()!=null)
outDim.put(OUTPUT_NAME, dim.getName());
if (dim.getValue()!=null)
outDim.put(OUTPUT_VALUE, dim.getValue());
outDim.put(OUTPUT_ENCODED, Boolean.valueOf(dim.isEncoded()));
outDim.put(OUTPUT_DWG_DIM, Boolean.valueOf(dim.isDrawingDimension()));
outDims.add(outDim);
}
return out;
}
return null;
}
private Hashtable<String, Object> actionListDetail(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
String namePattern = checkStringParameter(input, PARAM_NAME, false);
List<String> dimNames = null;
Object namesObj = checkParameter(input, PARAM_NAMES, false);
if (namesObj!=null) {
dimNames = getStringListValue(namesObj);
}
String dimType = checkStringParameter(input, PARAM_DIM_TYPE, false);
boolean encoded = checkFlagParameter(input, PARAM_ENCODED, false, false);
boolean select = checkFlagParameter(input, PARAM_SELECT, false, false);
List<DimDetailData> dims = dimHandler.listDetail(modelname, namePattern, dimNames, dimType, encoded, select, sessionId);
if (dims!=null && dims.size()>0) {
Hashtable<String, Object> out = new Hashtable<String, Object>();
Vector<Map<String, Object>> outDims = new Vector<Map<String, Object>>();
out.put(OUTPUT_DIMLIST, outDims);
Map<String, Object> outDim = null;
for (DimDetailData dim : dims) {
outDim = new Hashtable<String, Object>();
if (dim.getName()!=null)
outDim.put(OUTPUT_NAME, dim.getName());
if (dim.getValue()!=null)
outDim.put(OUTPUT_VALUE, dim.getValue());
outDim.put(OUTPUT_ENCODED, Boolean.valueOf(dim.isEncoded()));
outDim.put(OUTPUT_DWG_DIM, Boolean.valueOf(dim.isDrawingDimension()));
DimToleranceData tol = dim.getTolerance();
if (tol!=null) {
if (tol.getToleranceType()!=null)
outDim.put(OUTPUT_TOLERANCE_TYPE, tol.getToleranceType());
if (DimToleranceData.TYPE_LIMITS.equals(tol.getToleranceType())) {
outDim.put(OUTPUT_TOL_LOWER_LIMIT, tol.getLowerLimit());
outDim.put(OUTPUT_TOL_UPPER_LIMIT, tol.getUpperLimit());
}
else if (DimToleranceData.TYPE_PLUS_MINUS.equals(tol.getToleranceType())) {
outDim.put(OUTPUT_TOL_PLUS, tol.getPlus());
outDim.put(OUTPUT_TOL_MINUS, tol.getMinus());
}
else if (DimToleranceData.TYPE_SYMMETRIC.equals(tol.getToleranceType())) {
outDim.put(OUTPUT_TOL_SYMMETRIC_VALUE, tol.getSymmetricValue());
}
else if (DimToleranceData.TYPE_SYM_SUPERSCRIPT.equals(tol.getToleranceType())) {
outDim.put(OUTPUT_TOL_SYMMETRIC_VALUE, tol.getSymmetricValue());
}
else if (DimToleranceData.TYPE_ISODIN.equals(tol.getToleranceType())) {
if (tol.getTableName()!=null)
outDim.put(OUTPUT_TOL_TABLE_NAME, tol.getTableName());
outDim.put(OUTPUT_TOL_TABLE_COLUMN, tol.getTableColumn());
if (tol.getTableType()!=null)
outDim.put(OUTPUT_TOL_TABLE_TYPE, tol.getTableType());
}
}
outDim.put(OUTPUT_SHEET, dim.getSheetNo());
if (dim.getViewName()!=null)
outDim.put(OUTPUT_VIEW_NAME, dim.getViewName());
if (dim.getDimType()!=null)
outDim.put(OUTPUT_DIM_TYPE, dim.getDimType());
if (dim.getText()!=null && dim.getText().length>0)
outDim.put(OUTPUT_TEXT, dim.getText());
Map<String, Object> recPt = writePoint(dim.getLocation());
if (recPt!=null)
outDim.put(OUTPUT_LOCATION, recPt);
outDims.add(outDim);
}
return out;
}
return null;
}
private Hashtable<String, Object> actionShow(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
String asmname = checkStringParameter(input, PARAM_ASSEMBLY, false);
String dimname = checkStringParameter(input, PARAM_NAME, true);
List<Integer> pathList = getIntArray(PARAM_PATH, checkParameter(input, PARAM_PATH, false));
int[] path = null;
if (pathList!=null && pathList.size()>0) {
int len = pathList.size();
path = new int[len];
for (int i=0; i<len; i++)
path[i] = pathList.get(i);
}
boolean show = checkFlagParameter(input, PARAM_SHOW, false, true);
dimHandler.show(modelname, asmname, dimname, path, show, sessionId);
return null;
}
private Hashtable<String, Object> actionUserSelect(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
int max = checkIntParameter(input, PARAM_MAX, false, Integer.valueOf(1)).intValue();
List<DimSelectData> dims = dimHandler.userSelect(modelname, max, sessionId);
if (dims!=null && dims.size()>0) {
Hashtable<String, Object> out = new Hashtable<String, Object>();
Vector<Map<String, Object>> outDims = new Vector<Map<String, Object>>();
out.put(OUTPUT_DIMLIST, outDims);
Map<String, Object> outDim = null;
for (DimSelectData dim : dims) {
outDim = new Hashtable<String, Object>();
if (dim.getName()!=null)
outDim.put(OUTPUT_NAME, dim.getName());
if (dim.getValue()!=null)
outDim.put(OUTPUT_VALUE, dim.getValue());
outDim.put(OUTPUT_ENCODED, Boolean.valueOf(dim.isEncoded()));
if (dim.getModelname()!=null)
outDim.put(PARAM_MODEL, dim.getModelname());
if (dim.getRelationId()!=null)
outDim.put(PARAM_RELATION_ID, dim.getRelationId());
outDims.add(outDim);
}
return out;
}
return null;
}
private Hashtable<String, Object> actionSetText(String sessionId, Hashtable<String, Object> input) throws JLIException {
String modelname = checkStringParameter(input, PARAM_MODEL, false);
String dimname = checkStringParameter(input, PARAM_NAME, true);
Object valueObj = checkParameter(input, PARAM_TEXT, false);
// Object prefixObj = checkParameter(input, PARAM_PREFIX, false);
// Object suffixObj = checkParameter(input, PARAM_SUFFIX, false);
boolean encoded = checkFlagParameter(input, PARAM_ENCODED, false, false);
// dimHandler.setText(modelname, dimname, valueObj, prefixObj, suffixObj, encoded, sessionId);
dimHandler.setText(modelname, dimname, valueObj, encoded, sessionId);
return null;
}
} |
/**
* Create {@link Intent} that will be consumed by ShortcutManager, which later generates a
* launcher widget using this intent.
*/
@VisibleForTesting
Intent createResultIntent(Intent shortcutIntent, ResolveInfo resolveInfo,
CharSequence label) {
ShortcutInfo info = createShortcutInfo(mContext, shortcutIntent, resolveInfo, label);
Intent intent = mShortcutManager.createShortcutResultIntent(info);
if (intent == null) {
intent = new Intent();
}
intent.putExtra(Intent.EXTRA_SHORTCUT_ICON_RESOURCE,
Intent.ShortcutIconResource.fromContext(mContext, R.mipmap.ic_launcher_settings))
.putExtra(Intent.EXTRA_SHORTCUT_INTENT, shortcutIntent)
.putExtra(Intent.EXTRA_SHORTCUT_NAME, label);
final ActivityInfo activityInfo = resolveInfo.activityInfo;
if (activityInfo.icon != 0) {
intent.putExtra(Intent.EXTRA_SHORTCUT_ICON, createIcon(
mContext,
activityInfo.applicationInfo,
activityInfo.icon,
R.layout.shortcut_badge,
mContext.getResources().getDimensionPixelSize(R.dimen.shortcut_size)));
}
return intent;
} |
The politician husband of deputy Labour leader Harriet Harman was accused of being racist after he referred to a postman as a "pikey".
Jack Dromey, 65, a shadow cabinet minister, drew criticism after he tweeted a picture of himself and a Royal Mail employee captioned with the apparent slur.
Hundreds of social network users pulled the shadow police minister up on the gaffe, with some even calling for officers to investigate the 'racist' tweet.
But he defended his use of the word, an offensive term for Irish travellers, and said it was a nickname used by the postie's colleagues who had named him after a Dad's Army character.
The photo was taken as Mr Dromey met staff at a delivery office in his Erdington, West Mids, constituency.
He posted several pictures of himself and workers at the Royal Mail depot as he toured the facility.
One picture showed the pro-trade union MP posed up with his arm round a young employee as both men grinned for the camera.
The caption read: “With Gareth Martin, the Pikey from the Erdington Royal Mail Sorting Office. A great guy!”
Twitter users were quick to pick up on the tweet,
HammyWheel branded Dromey 'disgraceful' and warned him to expect a call from the police.
He wrote: “Disgraceful incitement to racial hatred there? ‘Pikey’ is a horrid thing to call anyone. Expect a knock on your door from Plod.”
Guy Birchall wrote: “Worse than calling a policeman a pleb. Disgustingly insulting behaviour!”
Melindi Scott simply added: “Raaaaaaaaaaaacist”
Dromey insisted he had used the racially-loaded word because it was a nickname used by Mr Martin's fellow colleagues.
In a later tweet he wrote: “Don't panic, Mr Mainwaring. This morning's meeting was with Gareth, a Postie nicknamed after Corporal Pike from Dad's Army”.
The gaffe is bound to embarrass right-on wife Harriet Harman, 63, who has previously lent her backing to politically-correct campaigns like calls to ban topless models from The Sun's page 3.
Some web users even suggested the Labour MP should have a word with her husband, who made another Twitter blunder last month when he unwittingly favourited a link to a gay porn website.
CaptainEBlackadder accused the couple of double-standards.
He wrote: "@HarrietHarman get @JackDromeyMP to sort his language out, please.You'd burst a blood vessel if this was a Tory MP."
Gbruce16 added: "Think you might be getting an earful from your partner!"
A Royal Mail spokesperson said the term 'Pikey' was not commonly used in its sorting offices.
She said: “I can confirm there is no such slang term for a role within Royal Mail delivery offices.” |
Dielectric relaxation studies in water mixtures of dipropylene glycol using a time domain reflectometry technique
P A Chalikwar, A W Pathan, A R Deshmukh, M P Lokhande & A C Kumbharkhane* Department of Physics, K K M College Manwath, Parbhani 431 505, India Department of Physics, A S C college Badnapur, Jalna 431 202 , India School of Physical Sciences, Swami Ramanand Teerth Marathwada University, Nanded 431 606, India Department of Physics, Government Vidarbha Institute of Science and Humanities, Amravati 444 605, India |
def homogenize_input_dtypes(prog):
for f_name, f in prog.functions.items():
_homogenize_input_dtypes_block(f)
for op in f.operations:
op.type_value_inference(overwrite_output=True) |
// StoreMegolmOutSession stores an olm.OutboundGroupSession at
// /crypto_me/<userID>/<deviceID>/megolm_out/<megolmOutSession.ID>
func (cdb *CryptoDB) StoreMegolmOutSession(userID mat.UserID, deviceID mat.DeviceID,
megolmOutSession *olm.OutboundGroupSession) error {
err := cdb.db.Update(func(tx *bolt.Tx) error {
megolmOutSessionsBucket :=
getBuckets(tx, "crypto_me", userID, deviceID, "megolm_out")
megolmOutSessionsBucket.Put([]byte(megolmOutSession.ID()),
[]byte(megolmOutSession.Pickle([]byte(""))))
return nil
})
return err
} |
// generate a random hexadecimal string of length 16
pub fn generate_uid() -> Vec<u8> {
let mut rng = rand::thread_rng();
let mut uid: Vec<u8> = Vec::with_capacity(16);
for _ in 0..16 {
uid.push(CHARSET[rng.gen_range(0..16)]);
}
return uid;
} |
// The multiboot specification defines the module str as valid utf-8 (zero terminated string),
// therefore this function produces defined behavior
/// Get the cmdline of the module. If the GRUB configuration contains
/// `module2 /foobar/some_boot_module --test cmdline-option`, then this method
/// will return `--test cmdline-option`.
pub fn cmdline(&self) -> &str {
use core::{mem, slice, str};
let strlen = self.size as usize - mem::size_of::<ModuleTag>();
unsafe {
str::from_utf8_unchecked(slice::from_raw_parts(
&self.cmdline_str as *const u8,
strlen,
))
}
} |
<filename>iam-dashboard/src/app/utils/custom-block-ui/custom-block-ui.component.ts
import { Component, Input } from '@angular/core';
@Component({
selector: 'app-custom-block-ui',
template: `
<div class="block-ui-template">
<mat-spinner style="margin: 0 auto;"></mat-spinner>
<p>{{message}}</p>
</div>
`,
styles: [
]
})
export class CustomBlockUIComponent {
@Input() message: any;
}
|
/**
* This class represents a key pair for the Paillier crypto system. It can contain either a complete key pair consisting
* of a private key and the corresponding public key or only the public key. This class can generate key pairs, perform
* encryption and decryption, and apply homomorphic operations to the cipher texts.
*/
public class PaillierKeyPair {
@Stringable BigInteger n;
@Stringable BigInteger g;
@Stringable @Nullable
BigInteger lambda; //PRIVATE KEY
@Stringable @Nullable
BigInteger my; //PRIVATE KEY
public PaillierKeyPair() {}
public PaillierKeyPair(int securityParameter) {
SecureRandom sr = new SecureRandom();
BigInteger p = BigInteger.probablePrime(securityParameter / 2 + 1, sr);
BigInteger q = BigInteger.probablePrime(securityParameter / 2 + 1, sr);
n = p.multiply(q);
lambda = lcm(p.subtract(BigInteger.ONE), q.subtract(BigInteger.ONE));
BigInteger nsquare = n.multiply(n);
//better choice for g by Damgard & Jurik (2001)
g = n.add(BigInteger.ONE);
my = L(g.modPow(lambda, nsquare)).modInverse(n);
}
public BigInteger encrypt(BigInteger message){
if(message.compareTo(n) >= 0)
throw new IllegalArgumentException("The message is to large for the group.");
BigInteger r = generateRandomizer();
return encrypt(message, r);
}
public BigInteger generateRandomizer(){
return IntegerFunctions.randomize(n.subtract(BigInteger.ONE)).add(BigInteger.ONE);
}
public BigInteger encrypt(BigInteger message, BigInteger r){
if(message.compareTo(n) >= 0)
throw new IllegalArgumentException("The message is to large for the group.");
if(r.compareTo(n) >= 0)
throw new IllegalArgumentException("The randomizer r is to large for the group.");
BigInteger nsquare = n.multiply(n);
BigInteger c = g.modPow(message, nsquare).multiply(r.modPow(n, nsquare)).mod(nsquare);
return c;
}
public BigInteger decrypt(BigInteger ciphertext){
BigInteger nsquare = n.multiply(n);
if(ciphertext.compareTo(nsquare) >= 0)
throw new IllegalArgumentException("The cipher text is to large.");
BigInteger m = L(ciphertext.modPow(lambda, nsquare)).multiply(my).mod(n);
return m;
}
public BigInteger multiplyWithScalar(BigInteger c, BigInteger scalar){
return c.modPow(scalar, n.multiply(n));
}
public BigInteger add(BigInteger a, BigInteger b){
return a.multiply(b).mod(n.multiply(n));
}
public static BigInteger lcm(BigInteger a, BigInteger b){
return a.divide(a.gcd(b)).multiply(b);
}
public BigInteger L(BigInteger u){
return u.subtract(BigInteger.ONE).divide(n);
}
/**
* Returns a copy of this key pair which only contains the public key. All private information has been removed.
*/
public PaillierKeyPair clearPrivateKey(){
PaillierKeyPair result = new PaillierKeyPair();
result.n = this.n;
result.g = this.g;
//private key part to null
result.lambda = null;
result.my = null;
return result;
}
public boolean containsPrivateKey(){
return lambda != null || my != null;
}
public BigInteger getN() {
return n;
}
public BigInteger getG() {
return g;
}
public static PaillierKeyPair generatePaillierKeyPair(){
//The protocol requires a minimal bitlength of |n|*9 (in ZKProof section), using |n|*10
int minBitLength = ECKey.CURVE.getN().bitLength() * 10;
//This is RSA modules -> we want at least 2048 bits for security
int secParam = 2048 > minBitLength ? 2048 : minBitLength;
return new PaillierKeyPair(secParam);
}
} |
package remotestore
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"strings"
"time"
"google.golang.org/grpc"
"github.com/inklabs/rangedb"
"github.com/inklabs/rangedb/pkg/broadcast"
"github.com/inklabs/rangedb/pkg/grpc/rangedbpb"
"github.com/inklabs/rangedb/pkg/rangedberror"
"github.com/inklabs/rangedb/pkg/recordsubscriber"
"github.com/inklabs/rangedb/provider/jsonrecordserializer"
)
const (
broadcastRecordBuffSize = 100
rpcErrContextCanceled = "Canceled desc = context canceled"
rpcErrUnexpectedSequenceNumber = "unexpected sequence number"
rpcErrStreamNotFound = "Unknown desc = stream not found"
)
// JsonSerializer defines the interface to bind events and identify event types.
type JsonSerializer interface {
rangedb.EventBinder
rangedb.EventTypeIdentifier
}
// PbRecordReceiver defines the interface to receive a protobuf record.
type PbRecordReceiver interface {
Recv() (*rangedbpb.Record, error)
}
type remoteStore struct {
serializer JsonSerializer
client rangedbpb.RangeDBClient
broadcaster broadcast.Broadcaster
}
// New constructs a new rangedb.Store client that communicates with a remote gRPC backend.
func New(conn *grpc.ClientConn) (*remoteStore, error) {
client := rangedbpb.NewRangeDBClient(conn)
s := &remoteStore{
serializer: jsonrecordserializer.New(),
broadcaster: broadcast.New(broadcastRecordBuffSize, broadcast.DefaultTimeout),
client: client,
}
err := s.listenForEvents()
if err != nil {
return nil, err
}
return s, nil
}
func (s *remoteStore) Bind(events ...rangedb.Event) {
s.serializer.Bind(events...)
}
func (s *remoteStore) Events(ctx context.Context, globalSequenceNumber uint64) rangedb.RecordIterator {
request := &rangedbpb.EventsRequest{
GlobalSequenceNumber: globalSequenceNumber,
}
events, err := s.client.Events(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return rangedb.NewRecordIteratorWithError(context.Canceled)
}
return rangedb.NewRecordIteratorWithError(err)
}
return s.readRecords(ctx, events)
}
func (s *remoteStore) EventsByAggregateTypes(ctx context.Context, globalSequenceNumber uint64, aggregateTypes ...string) rangedb.RecordIterator {
request := &rangedbpb.EventsByAggregateTypeRequest{
AggregateTypes: aggregateTypes,
GlobalSequenceNumber: globalSequenceNumber,
}
events, err := s.client.EventsByAggregateType(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return rangedb.NewRecordIteratorWithError(context.Canceled)
}
return rangedb.NewRecordIteratorWithError(err)
}
return s.readRecords(ctx, events)
}
func (s *remoteStore) EventsByStream(ctx context.Context, streamSequenceNumber uint64, streamName string) rangedb.RecordIterator {
request := &rangedbpb.EventsByStreamRequest{
StreamName: streamName,
StreamSequenceNumber: streamSequenceNumber,
}
events, err := s.client.EventsByStream(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return rangedb.NewRecordIteratorWithError(context.Canceled)
}
return rangedb.NewRecordIteratorWithError(err)
}
return s.readRecords(ctx, events)
}
func (s *remoteStore) OptimisticDeleteStream(ctx context.Context, expectedStreamSequenceNumber uint64, streamName string) error {
request := &rangedbpb.OptimisticDeleteStreamRequest{
ExpectedStreamSequenceNumber: expectedStreamSequenceNumber,
StreamName: streamName,
}
_, err := s.client.OptimisticDeleteStream(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return context.Canceled
}
if strings.Contains(err.Error(), rpcErrStreamNotFound) {
return rangedb.ErrStreamNotFound
}
if strings.Contains(err.Error(), rpcErrUnexpectedSequenceNumber) {
return rangedberror.NewUnexpectedSequenceNumberFromString(err.Error())
}
return err
}
return nil
}
func (s *remoteStore) OptimisticSave(ctx context.Context, expectedStreamSequenceNumber uint64, streamName string, eventRecords ...*rangedb.EventRecord) (uint64, error) {
if len(eventRecords) < 1 {
return 0, fmt.Errorf("missing events")
}
var aggregateType, aggregateID string
var events []*rangedbpb.Event
for _, eventRecord := range eventRecords {
// TODO: Allow mixed aggregate types?
if aggregateType != "" && aggregateType != eventRecord.Event.AggregateType() {
return 0, fmt.Errorf("unmatched aggregate type")
}
// TODO: Allow mixed aggregate IDs?
if aggregateID != "" && aggregateID != eventRecord.Event.AggregateID() {
return 0, fmt.Errorf("unmatched aggregate ID")
}
aggregateType = eventRecord.Event.AggregateType()
aggregateID = eventRecord.Event.AggregateID()
jsonData, err := json.Marshal(eventRecord.Event)
if err != nil {
return 0, err
}
jsonMetadata, err := json.Marshal(eventRecord.Metadata)
if err != nil {
return 0, err
}
events = append(events, &rangedbpb.Event{
AggregateType: eventRecord.Event.AggregateType(),
AggregateID: eventRecord.Event.AggregateID(),
EventType: eventRecord.Event.EventType(),
Data: string(jsonData),
Metadata: string(jsonMetadata),
})
}
request := &rangedbpb.OptimisticSaveRequest{
StreamName: streamName,
Events: events,
ExpectedStreamSequenceNumber: expectedStreamSequenceNumber,
}
response, err := s.client.OptimisticSave(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return 0, context.Canceled
}
if strings.Contains(err.Error(), rpcErrUnexpectedSequenceNumber) {
return 0, rangedberror.NewUnexpectedSequenceNumberFromString(err.Error())
}
return 0, err
}
return response.LastStreamSequenceNumber, nil
}
func (s *remoteStore) Save(ctx context.Context, streamName string, eventRecords ...*rangedb.EventRecord) (uint64, error) {
if len(eventRecords) < 1 {
return 0, fmt.Errorf("missing events")
}
var aggregateType, aggregateID string
var events []*rangedbpb.Event
for _, eventRecord := range eventRecords {
// TODO: Allow mixed aggregate types?
if aggregateType != "" && aggregateType != eventRecord.Event.AggregateType() {
return 0, fmt.Errorf("unmatched aggregate type")
}
// TODO: Allow mixed aggregate IDs?
if aggregateID != "" && aggregateID != eventRecord.Event.AggregateID() {
return 0, fmt.Errorf("unmatched aggregate ID")
}
aggregateType = eventRecord.Event.AggregateType()
aggregateID = eventRecord.Event.AggregateID()
jsonData, err := json.Marshal(eventRecord.Event)
if err != nil {
return 0, err
}
jsonMetadata, err := json.Marshal(eventRecord.Metadata)
if err != nil {
return 0, err
}
events = append(events, &rangedbpb.Event{
AggregateType: eventRecord.Event.AggregateType(),
AggregateID: eventRecord.Event.AggregateID(),
EventType: eventRecord.Event.EventType(),
Data: string(jsonData),
Metadata: string(jsonMetadata),
})
}
request := &rangedbpb.SaveRequest{
StreamName: streamName,
Events: events,
}
response, err := s.client.Save(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return 0, context.Canceled
}
return 0, err
}
return response.LastStreamSequenceNumber, nil
}
func (s *remoteStore) AllEventsSubscription(ctx context.Context, bufferSize int, subscriber rangedb.RecordSubscriber) rangedb.RecordSubscription {
return recordsubscriber.New(
recordsubscriber.AllEventsConfig(ctx, s, s.broadcaster, bufferSize,
func(record *rangedb.Record) error {
subscriber.Accept(record)
return nil
},
))
}
func (s *remoteStore) AggregateTypesSubscription(ctx context.Context, bufferSize int, subscriber rangedb.RecordSubscriber, aggregateTypes ...string) rangedb.RecordSubscription {
return recordsubscriber.New(
recordsubscriber.AggregateTypesConfig(ctx, s, s.broadcaster, bufferSize,
aggregateTypes,
func(record *rangedb.Record) error {
subscriber.Accept(record)
return nil
},
))
}
func (s *remoteStore) TotalEventsInStream(ctx context.Context, streamName string) (uint64, error) {
request := &rangedbpb.TotalEventsInStreamRequest{
StreamName: streamName,
}
response, err := s.client.TotalEventsInStream(ctx, request)
if err != nil {
if strings.Contains(err.Error(), rpcErrContextCanceled) {
return 0, context.Canceled
}
return 0, err
}
return response.TotalEvents, nil
}
func (s *remoteStore) readRecords(ctx context.Context, events PbRecordReceiver) rangedb.RecordIterator {
resultRecords := make(chan rangedb.ResultRecord)
go func() {
defer close(resultRecords)
for {
pbRecord, err := events.Recv()
if err != nil {
if err == io.EOF {
return
}
if strings.Contains(err.Error(), rpcErrContextCanceled) {
resultRecords <- rangedb.ResultRecord{Err: context.Canceled}
return
}
if strings.Contains(err.Error(), rpcErrStreamNotFound) {
resultRecords <- rangedb.ResultRecord{Err: rangedb.ErrStreamNotFound}
return
}
log.Printf("failed to get record: %v", err)
resultRecords <- rangedb.ResultRecord{Err: err}
return
}
record, err := rangedbpb.ToRecord(pbRecord, s.serializer)
if err != nil {
log.Printf("failed converting to record: %v", err)
resultRecords <- rangedb.ResultRecord{Err: err}
return
}
if !rangedb.PublishRecordOrCancel(ctx, resultRecords, record, time.Second) {
return
}
}
}()
return rangedb.NewRecordIterator(resultRecords)
}
func (s *remoteStore) listenForEvents() error {
request := &rangedbpb.SubscribeToLiveEventsRequest{}
ctx := context.Background()
events, err := s.client.SubscribeToLiveEvents(ctx, request)
if err != nil {
err = fmt.Errorf("failed to subscribe: %v", err)
log.Print(err)
return err
}
go func() {
recordIterator := s.readRecords(ctx, events)
for recordIterator.Next() {
if recordIterator.Err() != nil {
continue
}
s.broadcaster.Accept(recordIterator.Record())
}
}()
return nil
}
|
def de_Boor(extended_knots_partition, control_net, tabs):
if type(tabs) is not np.ndarray: tabs = np.array(tabs)
control_net = control_net.transpose()
d, n = np.shape(control_net)
order = len(extended_knots_partition) - n
C = np.zeros((d, len(tabs)))
def foreach_dimension(f):
for i in range(d): f(i)
ind = 0
for r in range(order-1, n):
sup_extrema = extended_knots_partition[r+1]
tloc = tabs[np.where(extended_knots_partition[r] <= tabs)]
if r is n-1:
tloc = tloc[np.where(tloc <= sup_extrema)]
else:
tloc = tloc[np.where(tloc < sup_extrema)]
nloc = len(tloc)
if not nloc: continue
Qloc = np.zeros((order, nloc, d))
def build_temp_matrix(i):
spline_support = np.matrix(control_net[i,r-order+1:r+1]).transpose()
Qloc[:,:,i] = spline_support.dot(np.ones((1, nloc)))
foreach_dimension(build_temp_matrix)
for j in range(1, order):
alfa = np.zeros((order-j, nloc))
for i in range(0, order-j):
inf_extrema = extended_knots_partition[i+1+r-order+j]
sup_extrema = extended_knots_partition[i+1+r]
knots_distance = sup_extrema - inf_extrema
alfa[i,:] = (tloc - inf_extrema) / knots_distance if knots_distance > 0 else 0
def baricentric_combine(s):
Qloc[i,:,s] = (1-alfa[i,:])*Qloc[i,:,s] + alfa[i,:]*Qloc[i+1,:,s]
foreach_dimension(baricentric_combine)
def fill_in_curve_points(i): C[i, ind:ind+nloc] = Qloc[0, :, i]
foreach_dimension(fill_in_curve_points)
ind += nloc
assert np.shape(C) == np.shape(C[:, :ind])
return C.transpose() |
def reward(self, state):
if self.color_grid[state[0], state[1]] == 'R':
return self.reward_weights[0]
if self.color_grid[state[0], state[1]] == 'V':
return self.reward_weights[1]
if self.color_grid[state[0], state[1]] == 'N':
return self.reward_weights[2]
if self.color_grid[state[0], state[1]] == 'P':
return self.reward_weights[3]
if self.color_grid[state[0], state[1]] == 'G':
return self.reward_weights[4] |
<gh_stars>0
package cs451;
import java.util.ArrayList;
import java.util.List;
public class FIFOBroadcast extends URBBroadcast {
private long[] vc;
private List<Broadcaster.Message> pending;
// semantic is to keep an arry of long to keep track of the NEXT message we
// expect (a la TCP)
public FIFOBroadcast(List<Host> hosts, byte id, long nb_msg) {
super(hosts, id, nb_msg);
this.vc = new long[hosts.size() + 1];
for (int i = 0; i < vc.length; i++) {
vc[i] = 1;
}
this.pending = new ArrayList<>();
}
protected void broadcast(byte[] msg) {
super.broadcast(msg);
}
protected ArrayList<Broadcaster.Message> handleMsg(byte[] msg, byte from) {
ArrayList<Broadcaster.Message> message = super.handleMsg(msg, from);
ArrayList<Broadcaster.Message> deliveredMessage = new ArrayList<>();
if (message != null) {
boolean newMsg = false;
// check for delivered message
for (Broadcaster.Message m : message) {
// if it's the id we excpet, increment it, deliver the message and flag
// newMessage
if (vc[m.getId()] == m.getMsgId()) {
deliveredMessage.add(m);
newMsg = true;
vc[m.getId()]++;
} else {
// if not, store in pending
pending.add(m);
}
}
// check in pending
// if no message is delivered from the fresh ones, no need to check, no changes
// if a message from pending is delivered, mabye a previous one can be too,
// hence redo the loop
while (newMsg) {
newMsg = false;
for (Broadcaster.Message m : new ArrayList<>(pending)) {
if (vc[m.getId()] == m.getMsgId()) {
deliveredMessage.add(m);
newMsg = true;
pending.remove(m);
vc[m.getId()]++;
}
}
}
if (deliveredMessage.size() == 0) {
return null;
}
return deliveredMessage;
} else {
return null;
}
}
}
|
Vanessa Sahinovic, 15, was flown home to Austria on a private jet belonging to Azerbaijan president Ilham Aliyev
European Games 2015 Location: Baku, Azerbaijan Dates: 12-28 June Coverage: Reports & video highlights of the main GB action on the BBC Sport website
A 15-year-old Austrian synchronised swimmer suffered "severe injuries" when she and two other teenagers were in a collision with a bus in the athletes' village at the European Games in Baku.
Vanessa Sahinovic sustained multiple fractures and a head injury - but is not in a life-threatening condition.
Luna Pajer, 15, gained injuries to her arms and has also returned to Vienna for specialist treatment.
The Austrian Olympic Committee (AOC) said the team would still compete.
"It's a very, very sad day," said Dr Karl Stoss, AOC president. "It's a tough day for the team. Sporting motives and goals are not important at this moment."
Event organisers said the matter was now being investigated by police.
Team doctor Dr Robert Kandelhard accompanied Sahinovic and Pajer back to Vienna. The private jet belonging to Azerbaijan president Ilham Aliyev was used to fly the injured athletes back to their homeland.
"They have already landed and can get the best care there," added Stoss. "I'm not a medical expert but the treatment has been really good so far.
"It's a real shock for us and tough to motivate the team to focus on sport."
The athletes' village at European Games in Baku, Azerbaijan
The AOC said the collision happened at 08:30 local time on Thursday, when the athletes were walking on the pavement in the Olympic village.
Verena Breit, 15, was also injured in the incident and she spent an hour in hospital in Baku with a thigh injury, but has now returned to the athletes' village.
A Baku 2015 spokesperson said: "This is a terrible accident, and at this time our thoughts and concerns are for the athletes involved, their families and the remainder of the Austrian delegation.
The Austrian synchro team in training prior to the accident
"The incident is now a matter of a police investigation and until that process is complete we will be making no further statement."
The inaugural European Games feature 20 sports, 16 of which will be included in next year's Olympics, and begin on 12 June.
Meanwhile, British newspaper the Guardian claims it has been banned from entering Azerbaijan to cover the event.
It says the decision "appears to be linked" to its report on the country's preparations for the event, which contained criticism of the government's "clampdown on freedom of speech and any political opposition". |
/**
* The tag with the value was found.
*
* @param tagValue the value
*/
private void foundTag(final String tagValue) {
if (!tagValue.isEmpty() && tagValue.charAt(0) == '/') {
current = current.getParent();
} else {
current = current.addTag(tagValue);
fireFoundTag(current);
}
} |
/**
* @author Stanislav Muhametsin
*/
public class TableReferenceByExpressionImpl extends TableReferencePrimaryImpl<TableReferenceByExpression>
implements TableReferenceByExpression {
private final QueryExpression _expression;
public TableReferenceByExpressionImpl(final SQLProcessorAggregator processor, final QueryExpression expression,
final TableAlias alias) {
this(processor, TableReferenceByExpression.class, expression, alias);
}
protected TableReferenceByExpressionImpl(final SQLProcessorAggregator processor,
final Class<? extends TableReferenceByExpression> implClass, final QueryExpression expression, final TableAlias alias) {
super(processor, implClass, alias);
NullArgumentException.validateNotNull("collection expression", expression);
this._expression = expression;
}
@Override
public QueryExpression getQuery() {
return this._expression;
}
} |
package org.sunger.net.presenter.impl;
import com.squareup.okhttp.Request;
import org.sunger.net.entity.MediaEntity;
import org.sunger.net.model.UserMediasModel;
import org.sunger.net.presenter.UserMediasPresenter;
import org.sunger.net.support.okhttp.callback.ResultCallback;
import org.sunger.net.view.MediasView;
import java.util.List;
/**
* Created by sunger on 2015/10/27.
*/
public class UserMediasPresenterImpl implements UserMediasPresenter {
private MediasView view;
private UserMediasModel model;
public UserMediasPresenterImpl(MediasView view) {
this.view = view;
this.model = new UserMediasModel();
}
@Override
public void getMedias(int uid, int page) {
model.getMedias(uid, page, new ResultCallback<List<MediaEntity>>() {
@Override
public void onError(Request request, Exception e) {
view.showError();
}
@Override
public void onResponse(List<MediaEntity> response) {
view.showVideo(response);
}
});
}
}
|
Bovine salmonellosis: experimental production and characterization of the disease in calves, using oral challenge with Salmonella typhimurium.
A highly virulent strain of Salmonella tyhimurium was given orally to produce disease experimentally in 21 normal colostrum-fed calves 3 to 9 weeks old. The challenge inoculum varied from 10(4) to 10(11) organisms. The disease was characterized by fever, depressed attitude, and decreased appetite. Many calves given larger challenge dose levels also had diarrheic feces containing mucus, fibrin, and blood. Fecal cultures were positive for salmonella. Septicemia occurred in some calves (9 of 15 calves cultured were positive). Eleven calves died and 10 calves survived challenge exposure. Survival was inversely related to the size of the challenge inoculum and directly related (although to a lesser degree) to age of the calf. White blood cell total and differential counts were variable. Both neutropenia and neutrophilia were observed. Plasma proteins decreased markedly in calves with diarrhea, probably indicating fecal protein loss. Fibrinogen increased during the acute stages of diarrhea. |
PAMA Courseware: Learning the Psychological Analysis of Human Task Performance
The PAMA courseware provides an introduction to the theory and practice of work and organizational psychology. The courseware will be made available in a digital learning environment that can be approached via the Internet and Intranet. First, PAMA provides an introduction to models of work and physical and psychological health. It also provides methods of psychological analysis of human task performance, as well as practices of professional skills. The introduction to theory and research is included in electronic books that contains additionally educational matter. In addition, PAMA is designed to support the development of professional skills that are required as a work psychologist. Using a case-based learning application (via Intranet), the course provides the opportunity for students to act as a work analyst and to learn that job analysis. |
<filename>backend/src/test/java/com/sri/save/backend/ActionModelGeneratorTest.java
/*
* Copyright 2016 SRI International
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sri.save.backend;
import java.io.File;
import java.nio.file.Files;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.Unmarshaller;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.sri.floralib.ast.FloraDocument;
import com.sri.floralib.model.doc.FileFloraDocumentService;
import com.sri.floralib.model.ont.FileOntologyService;
import com.sri.floralib.model.ont.IOntologyModel;
import com.sri.pal.ActionModel;
import com.sri.pal.jaxb.ActionModelType;
import com.sri.save.backend.flora2deft.ActionModelGenerator;
public class ActionModelGeneratorTest {
private static String contentDir;
private static File testDir;
private static FileFloraDocumentService docService;
private static FileOntologyService ontService;
@BeforeClass
public static void setUp() throws Exception {
contentDir = "../repos/knowledge/weapons/M4";
testDir = Files.createTempDirectory("SAVE").toFile();
// Setup the document and ontology services
docService = new FileFloraDocumentService(new File(contentDir));
ontService = new FileOntologyService(docService); // we'll use this later, below
}
@AfterClass
public static void cleanup() throws Exception{
SystemTest.deleteAll(testDir);
}
@Test
public void testActionModelGenerator() throws Exception {
String FLORA_KB = contentDir + "/m4.flr";
// Set the active file
File floraKBFile = new File (FLORA_KB);
docService.setActiveFile(floraKBFile);
FloraDocument doc = docService.getActiveDocument();
Assert.assertNotNull(doc);
// Get the ontology model for the active file
IOntologyModel<File> ont = ontService.getActiveOntologyModel();
Assert.assertNotNull(ont);
File top_file = new File(testDir + "/m4_gen.xml");
File types_file = new File(testDir + "/m4_types_gen.xml");
File addin_file = new File(contentDir + "/m4_create.xml");
ActionModelGenerator.generateActionModel(ont, top_file, types_file, addin_file);
JAXBContext jc = JAXBContext.newInstance(ActionModelType.class
.getPackage().getName());
Unmarshaller unmarsh = jc.createUnmarshaller();
ActionModelType am = Backend.jaxbReader(top_file.toURI().toURL(),
ActionModel.class.getResource("ActionModel.xsd"), unmarsh,
ActionModelType.class);
Assert.assertTrue(am.getAction().size() > 10);
}
}
|
/**
* JDBC 2.0 Same as prepareStatement() above, but allows the default result
* set type and result set concurrency type to be overridden.
*
* @param sql the SQL query containing place holders
* @param resultSetType a result set type, see ResultSet.TYPE_XXX
* @param resultSetConcurrency a concurrency type, see ResultSet.CONCUR_XXX
*
* @return a new PreparedStatement object containing the pre-compiled SQL
* statement
*
* @exception SQLException if a database-access error occurs.
*/
public synchronized java.sql.PreparedStatement prepareStatement(
String sql, int resultSetType, int resultSetConcurrency)
throws SQLException {
checkClosed();
PreparedStatement pStmt = null;
if (this.cachePreparedStatements) {
PreparedStatement.ParseInfo pStmtInfo = (PreparedStatement.ParseInfo) cachedPreparedStatementParams
.get(sql);
if (pStmtInfo == null) {
pStmt = new com.mysql.jdbc.PreparedStatement(this, sql,
this.database);
PreparedStatement.ParseInfo parseInfo = pStmt.getParseInfo();
if (parseInfo.statementLength < this.preparedStatementCacheMaxSqlSize) {
if (this.cachedPreparedStatementParams.size() >= 25) {
Iterator oldestIter = this.cachedPreparedStatementParams.keySet()
.iterator();
long lruTime = Long.MAX_VALUE;
String oldestSql = null;
while (oldestIter.hasNext()) {
String sqlKey = (String) oldestIter.next();
PreparedStatement.ParseInfo lruInfo = (PreparedStatement.ParseInfo) this.cachedPreparedStatementParams
.get(sqlKey);
if (lruInfo.lastUsed < lruTime) {
lruTime = lruInfo.lastUsed;
oldestSql = sqlKey;
}
}
if (oldestSql != null) {
this.cachedPreparedStatementParams.remove(oldestSql);
}
}
cachedPreparedStatementParams.put(sql, pStmt.getParseInfo());
}
} else {
pStmtInfo.lastUsed = System.currentTimeMillis();
pStmt = new com.mysql.jdbc.PreparedStatement(this, sql,
this.database, pStmtInfo);
}
} else {
pStmt = new com.mysql.jdbc.PreparedStatement(this, sql,
this.database);
}
FIXME: Create warnings if can't create results of the given
type or concurrency
pStmt.setResultSetType(resultSetType);
pStmt.setResultSetConcurrency(resultSetConcurrency);
return pStmt;
} |
/**
* Created by Rene Argento on 09/04/17.
*/
public class Exercise8 {
private class StringFrequency implements Comparable<StringFrequency>{
String string;
int frequency;
StringFrequency(String string, int frequency) {
this.string = string;
this.frequency = frequency;
}
@Override
public int compareTo(StringFrequency that) {
if (this.frequency > that.frequency) {
return -1;
} else if (this.frequency < that.frequency) {
return 1;
} else {
return 0;
}
}
}
public static void main(String[] args) {
Scanner scanner = new Scanner(System.in);
List<String> stringList = new ArrayList<>();
/**
* Testcase
*
test
test
begin
begin
test
begin
abc
abc
test
end
*/
while(scanner.hasNext()) {
stringList.add(scanner.next());
}
Exercise8 exercise8 = new Exercise8();
StringFrequency[] frequencies = exercise8.frequency2(stringList);
for(StringFrequency frequency : frequencies) {
StdOut.println(frequency.string + " " + frequency.frequency + " occurrence(s)");
}
StdOut.println();
StdOut.println("Expected: \n" +
"test 4 occurrence(s)\n" +
"begin 3 occurrence(s)\n" +
"abc 2 occurrence(s)\n" +
"end 1 occurrence(s)");
}
private StringFrequency[] frequency(List<String> strings) {
Map<String, Integer> occurrenciesMap = new HashMap<>();
for(String string : strings) {
int frequency = 0;
if (occurrenciesMap.containsKey(string)) {
frequency = occurrenciesMap.get(string);
}
frequency++;
occurrenciesMap.put(string, frequency);
}
StringFrequency[] stringFrequencies = new StringFrequency[occurrenciesMap.size()];
int stringFrequenciesIndex = 0;
for(String key : occurrenciesMap.keySet()) {
int frequency = occurrenciesMap.get(key);
StringFrequency stringFrequency = new StringFrequency(key, frequency);
stringFrequencies[stringFrequenciesIndex++] = stringFrequency;
}
Arrays.sort(stringFrequencies);
return stringFrequencies;
}
//Optimized for space - no need to use a HashMap
//Based on http://algs4.cs.princeton.edu/25applications/Frequency.java.html
private StringFrequency[] frequency2(List<String> strings) {
StringFrequency[] stringFrequencies = new StringFrequency[strings.size()];
int stringFrequenciesIndex = 0;
Collections.sort(strings);
String currentString = strings.get(0);
int frequency = 1;
for(int i = 1; i < strings.size(); i++) {
if (!currentString.equals(strings.get(i))) {
stringFrequencies[stringFrequenciesIndex++] = new StringFrequency(currentString, frequency);
currentString = strings.get(i);
frequency = 1;
} else {
frequency++;
}
}
stringFrequencies[stringFrequenciesIndex++] = new StringFrequency(currentString, frequency);
Arrays.sort(stringFrequencies, 0, stringFrequenciesIndex);
StringFrequency[] stringFrequenciesOutput = new StringFrequency[stringFrequenciesIndex];
System.arraycopy(stringFrequencies, 0, stringFrequenciesOutput, 0, stringFrequenciesIndex);
return stringFrequenciesOutput;
}
} |
<reponame>sirius2k/springboot-gentelella<gh_stars>1-10
package kr.co.redbrush.webapp.enums;
public enum CalendarType {
SOLAR,
LUNAR
}
|
package data
import (
"context"
"fmt"
"github.com/hashicorp/go-hclog"
protos "github.com/shizhongwang/myswagger/currency/protos/currency"
)
// ErrProductNotFound is an error raised when a product can not be found in the database
var ErrProductNotFound = fmt.Errorf("Product not found")
// Product defines the structure for an API product
// swagger:model
type Product struct {
// the id for the product
//
// required: false
// min: 1
ID int `json:"id"` // Unique identifier for the product
// the name for this poduct
//
// required: true
// max length: 255
Name string `json:"name" validate:"required"`
// the description for this poduct
//
// required: false
// max length: 10000
Description string `json:"description"`
// the price for the product
//
// required: true
// min: 0.01
Price float64 `json:"price" validate:"required,gt=0"`
// the SKU for the product
//
// required: true
// pattern: [a-z]+-[a-z]+-[a-z]+
SKU string `json:"sku" validate:"sku"`
}
// Products defines a slice of Product
type Products []*Product
type ProductsDB struct {
currency protos.CurrencyClient
log hclog.Logger
rates map[string]float64
client protos.Currency_SubscribeRatesClient
}
func NewProductsDB(c protos.CurrencyClient, l hclog.Logger) *ProductsDB {
pb := &ProductsDB{c, l, make(map[string]float64), nil}
go pb.handleUpdates()
return pb
}
func (p *ProductsDB) handleUpdates() {
sub, err := p.currency.SubscribeRates(context.Background())
if err != nil {
p.log.Error("Unable to subscribe for rates", "error", err)
}
p.client = sub
for {
rr, err := sub.Recv()
p.log.Info("Recieved updated rate from server", "dest", rr.GetDestination().String())
if err != nil {
p.log.Error("Error receiving message", "error", err)
return
}
p.rates[rr.Destination.String()] = rr.Rate
}
}
// GetProducts returns all products from the database
func (p *ProductsDB) GetProducts(currency string) (Products, error) {
if currency == "" {
return productList, nil
}
rate, err := p.getRate(currency)
if err != nil {
p.log.Error("Unable to get rate", "currency", currency, "error", err)
return nil, err
}
pr := Products{}
for _, p := range productList {
np := *p
np.Price = np.Price * rate
pr = append(pr, &np)
}
return pr, nil
}
// GetProductByID returns a single product which matches the id from the
// database.
// If a product is not found this function returns a ProductNotFound error
func (p *ProductsDB) GetProductByID(id int, currency string) (*Product, error) {
i := findIndexByProductID(id)
if id == -1 {
return nil, ErrProductNotFound
}
if currency == "" {
return productList[i], nil
}
rate, err := p.getRate(currency)
if err != nil {
p.log.Error("Unable to get rate", "currency", currency, "error", err)
return nil, err
}
np := *productList[i]
np.Price = np.Price * rate
return &np, nil
}
// UpdateProduct replaces a product in the database with the given
// item.
// If a product with the given id does not exist in the database
// this function returns a ProductNotFound error
func (p *ProductsDB) UpdateProduct(pr Product) error {
i := findIndexByProductID(pr.ID)
if i == -1 {
return ErrProductNotFound
}
// update the product in the DB
productList[i] = &pr
return nil
}
// AddProduct adds a new product to the database
func (p *ProductsDB) AddProduct(pr Product) {
// get the next id in sequence
maxID := productList[len(productList)-1].ID
pr.ID = maxID + 1
productList = append(productList, &pr)
}
// DeleteProduct deletes a product from the database
func (p *ProductsDB) DeleteProduct(id int) error {
i := findIndexByProductID(id)
if i == -1 {
return ErrProductNotFound
}
productList = append(productList[:i], productList[i+1])
return nil
}
// findIndex finds the index of a product in the database
// returns -1 when no product can be found
func findIndexByProductID(id int) int {
for i, p := range productList {
if p.ID == id {
return i
}
}
return -1
}
func (p *ProductsDB) getRate(destination string) (float64, error) {
// if cached return
if r, ok := p.rates[destination]; ok {
return r, nil
}
rr := &protos.RateRequest{
Base: protos.Currencies(protos.Currencies_value["EUR"]),
Destination: protos.Currencies(protos.Currencies_value[destination]),
}
// get initial rate
resp, err := p.currency.GetRate(context.Background(), rr)
p.rates[destination] = resp.Rate // update cache
// subscribe for updates
p.client.Send(rr)
return resp.Rate, err
}
var productList = []*Product{
&Product{
ID: 1,
Name: "Latte",
Description: "Frothy milky coffee",
Price: 2.45,
SKU: "abc323",
},
&Product{
ID: 2,
Name: "Esspresso",
Description: "Short and strong coffee without milk",
Price: 1.99,
SKU: "fjd34",
},
}
|
How To Solder
Home > Electronics > How To Solder - Soldering Tutorial Search:
Soldering is defined as "the joining of metals by a fusion of alloys which have relatively low melting points". In other words, you use a metal that has a low melting point to adhere the surfaces to be soldered together. Consider that soldering is more like gluing with molten metal, unlike welding where the base metals are actually melted and combined. Soldering is also a must have skill for all sorts of electrical and electronics work. It is also a skill that must be taught correctly and developed with practice.
This tutorial will cover the most common types of soldering required for electronics work. This includes soldering components to printed circuit boards and soldering a spliced wire joint.
Soldering Equipment
The Soldering Iron/Gun The first thing you will need is a soldering iron, which is the heat source used to melt solder. Irons of the 15W to 30W range are good for most electronics/printed circuit board work. Anything higher in wattage and you risk damaging either the component or the board. If you intend to solder heavy components and thick wire, then you will want to invest in an iron of higher wattage (40W and above) or one of the large soldering guns. The main difference between an iron and a gun is that an iron is pencil shaped and designed with a pinpoint heat source for precise work, while a gun is in a familiar gun shape with a large high wattage tip heated by flowing electrical current directly through it.
A 30W Watt Soldering Iron
A 300W Soldering Gun For hobbyist electronics use, a soldering iron is generally the tool of choice as its small tip and low heat capacity is suited for printed circuit board work (such as assembling kits). A soldering gun is generally used in heavy duty soldering such as joining heavy gauge wires, soldering brackets to a chassis or stained glass work. You should choose a soldering iron with a 3-pronged grounding plug. The ground will help prevent stray voltage from collecting at the soldering tip and potentially damaging sensitive (such as CMOS) components. By their nature, soldering guns are quite "dirty" in this respect as the heat is generated by shorting a current (often AC) through the tip made of formed wire. Guns will have much less use in hobbyist electronics so if you have only one tool choice, an iron is what you want. For a beginner, a 15W to 30W range is the best but be aware that at the 15W end of that range, you may not have enough power to join wires or larger components. As your skill increases, a 40W iron is an excellent choice as it has the capacity for slightly larger jobs and makes joints very quickly. Be aware that it is often best to use a more powerful iron so that you don't need to spend a lot of time heating the joint, which can damage components. A variation of the basic gun or iron is the soldering station, where the soldering instrument is attached to a variable power supply. A soldering station can precisely control the temperature of the soldering tip unlike a standard gun or iron where the tip temperature will increase when idle and decrease when applying heat to a joint. However, the price of a soldering station is often ten to one hundred times the cost of a basic iron and thus really isn't an option for the hobby market. But if you plan to do very precise work, such as surface mount, or spend 8 hours a day behind a soldering iron, then you should consider a soldering station. The rest of this document will assume that you are using a soldering iron as that is what the majority of electronics work requires. The techniques for using a soldering gun are basically the same with the only difference being that heat is only generated when the trigger is pressed. Solder The choice of solder is also important. There several kinds of solder available but only a few are suitable for electronics work. Most importantly, you will only use rosin core solder. Acid core solder is common in hardware stores and home improvement stores, but meant for soldering copper plumbing pipes and not electronic circuits. If acid core solder is used on electronics, the acid will destroy the traces on the printed circuit board and erode the component leads. It can also form a conductive layer leading to shorts. For most printed circuit board work, a solder with a diameter of 0.75MM to 1.0MM is desirable. Thicker solder may be used and will allow you to solder larger joints more quickly, but will make soldering small joints difficult and increase the likelihood of creating solder bridges between closely spaced PCB pads. An alloy of 60/40 (60% tin, 40% lead) is used for most electronics work. These days, several lead-free solders are available as well. Kester "44" Rosin Core solder has been a staple of electronics for many years and continues to be available. It is available in several diameters and has a non-corrosive flux. Large joints, such as soldering a bracket to a chassis using a high wattage soldering gun, will require a separate application of brush on flux and a thick diameter solder of several millimeters. Remember that when soldering, the flux in the solder will release fumes as it is heated. These fumes are harmful to your eyes and lungs. Therefore, always work in a well ventilated area and avoid breathing the smoke created. Hot solder is also dangerous. It is surprisingly easy to splash hot solder onto yourself, which is a thoroughly unpleasant experience. Eye protection is also advised.
Preparing To Solder Tinning The Soldering Tip Before use, a new soldering tip, or one that is very dirty, must be tinned. "Tinning" is the process of coating a soldering tip with a thin coat of solder. This aids in heat transfer between the tip and the component you are soldering, and also gives the solder a base from which to flow from. Step 1: Warm Up The Iron Warm up the soldering iron or gun thoroughly. Make sure that it has fully come to temperature because you are about to melt a lot of solder on it. This is especially important if the iron is new because it may have been packed with some kind of coating to prevent corrosion. Step 2: Prepare A Little Space While the soldering iron is warming up, prepare a little space to work. Moisten a little sponge and place it in the base of your soldering iron stand or in a dish close by. Lay down a piece of cardboard in case you drip solder (you probably will) and make sure you have room to work comfortably. Step 3: Thoroughly Coat The Tip In Solder Thoroughly coat the soldering tip in solder. It is very important to cover the entire tip. You will use a considerable amount of solder during this process and it will drip, so be ready. If you leave any part of the tip uncovered it will tend to collect flux residue and will not conduct heat very well, so run the solder up and down the tip and completely around it to totally cover it in molten solder. Step 4: Clean The Soldering Tip After you are certain that the tip is totally coated in solder, wipe the tip off on the wet sponge to remove all the flux residue. Do this immediately so there is no time for the flux to dry out and solidify. Step 5: You're Done! You have just tinned your soldering tip. This must be done anytime you replace the tip or clean it so that the iron maintains good heat transfer. You can also watch the tinning process on video below (requires Flash): Get Flash to see this player.
Soldering A Printed Circuit Board (PCB)
Soldering a PCB is probably the most common soldering task an electronics hobbyist will perform. The basic techniques are fairly easy to grasp but it is a skill that will take a little practice to master. The best way to practice is to buy a simple electronics kit or assemble a simple circuit (such as an LED chaser) on a perf-board. Don't buy that expensive kit or dive into a huge project after only soldering a few joints.
Soldering components onto a PCB involves preparing the surface, placing the components, and then soldering the joint.
Step 1: Surface Preparation: A clean surface is very important if you want a strong, low resistance solder joint. All surfaces to be soldered should be cleaned well. 3M Scotch Brite pads purchased from the home improvement, industrial supply store or automotive body shop are a good choice as they will quickly remove surface tarnish but will not abrade the PCB material. Note that you will want industrial pads and not the kitchen cleaning pads impregnated with cleaner/soap. If you have particularly tough deposits on your board, then a fine grade of steel wool is acceptable but be very cautious on boards with tight tolerances as the fine steel shavings can lodge between pads and in holes. Once you have cleaned the board down to shiny copper you can use a solvent such as acetone to clean any bits of the cleaning pad that may remain and to remove chemical contamination from the surface of the board. Methyl hydrate is another good solvent and a bit less stinky then acetone. Be aware that both these solvents can remove ink, so if your board is silk screened, test the chemicals first before hosing down the entire board. A few blasts with compressed air will dry out the board and remove any junk that may have built up in the holes. It also never hurts to give the component leads a quick wipe down as well, to remove glue or tarnish that may have built up over time. Step 2: Component Placement After the component and board have been cleaned, you are ready to place the components onto the board. Unless your circuit is simple and only contains a few components, you will probably not be placing all the components onto the board and soldering them at once. Most likely you will be soldering a few components at a time before turning the board over and placing more. In general it is best to start with the smallest and flattest components (resistors, ICs, signal diodes, etc.) and then work up to the larger components (capacitors, power transistors, transformers) after the small parts are done. This keeps the board relatively flat, making it more stable during soldering. It is also best to save sensitive components (MOSFETs, non-socketed ICs) until the end to lessen the chance of damaging them during assembly of the rest of the circuit. Bend the leads as necessary and insert the component through the proper holes on the board. To hold the part in place while you are soldering, you may want to bend the leads on the bottom of the board at a 45 degree angle. This works well for parts with long leads such as resistors. Components with short leads such as IC sockets can be held in place with a little masking tape or you can bend the leads down to clamp onto the PC board pads. In the image below, a resistor is ready to solder and is held in place by slightly bent leads. Step 3: Apply Heat Apply a very small amount of solder to the tip of the iron. This helps conduct the heat to the component and board, but it is not the solder that will make up the joint. To heat the joint you will lay the tip of the iron so that it rests against both the component lead and the board. It is critical that you heat the lead and the board, otherwise the solder will simply pool and refuse to stick to the unheated item. The small amount of solder you applied to the tip before heating the joint will help make contact between the board and the lead. It normally takes a second or two to get the joint hot enough to solder, but larger components and thicker pads/traces will absorb more heat and can increase this time. If you see the area under the pad starting to bubble, stop heating and remove the soldering iron because you are overheating the pad and it is in danger of lifting. Let it cool, then carefully heat it again for much less time. Step 4: Apply Solder To The Joint Once the component lead and solder pad has heated up, you are ready to apply solder. Touch the tip of the strand of solder to the component lead and solder pad, but not the tip of the iron. If everything is hot enough, the solder should flow freely around the lead and pad. You will see the flux melt liquify as well, bubble around the joint (this is part of its cleaning action), flow out and release smoke. Continue to add solder to the joint until the pad is completely coated and the solder forms a small mound with slightly concave sides. If it starts to ball up, you have used too much solder or the pad on the board is not hot enough. Once the surface of the pad is completely coated, you can stop adding solder and remove the soldering iron (in that order). Don't move the joint for a few seconds as the solder needs time to cool and resolidify. If you do move the joint, you will get what's called a "cold joint". This is recognized by it's characteristic dull and grainy appearance. Many cold joints can be fixed by reheating and applying a small amount of solder, then being allowed to cool without being disturbed. Step 5: Inspect The Joint and Cleanup Once the joint is made you should inspect it. Check for cold joints (described a little above and at length below), shorts with adjacent pads or poor flow. If the joint checks out, move on to the next. To trim the lead, use a small set of side cutters and cut at the top of the solder joint. After you have made all the solder joints, it is good practice to clean all the excess flux residue from the board. Some fluxes are hydroscopic (they absorb water) and can slowly absorb enough water to become slightly conductive. This can be a significant issue in a hostile environment such as an automotive application. Most fluxes will clean up easily using methyl hydrate and a rag but some will require a stronger solvent. Use the appropriate solvent to remove the flux, then blow the board dry with compressed air. See It On Video You can watch several joints being soldered in the video below. Get Flash to see this player. Conformal Coatings If the printed circuit board you just soldered is going to be used in a hostile environment where it is subjected to moisture, dirt or chemicals, it may be a good idea to apply a conformal coating such as those made by MG Chemicals. These coatings are sprayed onto a PC board to seal it against hazards of the environment. Coatings are usually lacquer, silicone or urethane based and are applied to both sides of the board once it is fully assembled and tested.
Cold Solder Joints
A "cold solder joint" can occur when not enough heat is applied to the component, board, or both. Another common cause is a component moving before the solder has completely cooled and solidified. A cold joint is brittle and prone to physical failure. It is also generally a very high resistance connection which can effect the operation of the circuit or cause it to fail completely.
Cold joints can often be recognized by a characteristic grainy, dull gray colour, but this is not always the case. A cold joint can often appear as a ball of solder sitting on the pad and surrounding the component lead. Additionally you may notice cracks in the solder and the joint may even move. Below is the shocking image of every example of a bad solder joint you will ever see. It appears that this FM transmitter kit was assembled using the technique of "apply solder to iron then drip onto joint". If your joints are looking like this, then stop and practice after rereading this page. Note that not a single of of these joints is acceptable, but amazingly, the circuit worked.
Most cold solder joints can be easily fixed. Generally all that is required is to reheat the joint and apply a little more solder. If there is already too much solder on the joint, then the joint will have to be desoldered and then soldered again. This is done by first removing the old solder with a desoldering tool or simply by heating it up and flicking it off with the iron. Once the old solder is off, you can resolder the joint, making sure to heat it thoroughly and keep it still as it cools.
Soldering A Wire Joint or Splice
Another very common task is soldering a joint between two or more wires. Unlike soldering a PCB where the component is generally held only by the solder joint itself, a splice between wires must be physically strong before it is soldered. This usually means twisting the wires together properly and then soldering. Areas where you will see soldered wire joints are cable repairs and automotive wiring. In these cases, the joint must be insulated after soldering as well.
Step 1: Strip The Wires To Be Joined, Slip On Insulation Heat shrink tubing is generally the preferred method to insulate a wire splice. There are two main types of heat shrink available; adhesive lined and non-adhesive lined. Non-adhesive tubing forms an insulating barrier only and thus is suitable for use only when the joint will not be subjected to moisture, chemicals or other harsh environments. Adhesive lined heatshrink tubing is lined with a heat sensitive adhesive that melts to seal the joint as the tubing is heated. Thus it forms a totally sealed joint and is used when a splice will be subjected to moisture or other elements which can effect the joint. As an example, you would use non-adhesive shrink tube when repairing a lamp cord, but you would use adhesive lined tubing when installing a car stereo. Use heat shrink tube with a diameter of approximately 1.5 times to two times the diameter of the wires to be joined. Cut the tube to length so that it will extend past each side of the joint by at least 0.5 inches and then slip it over one of the wire ends. Now strip about an inch of insulation from each wire end. If you are joining rather thick wire (thicker then 12 gauge) then you may want to strip a little more insulation to make twisting the wire easier. Step 2: Twist The Wires Together A strong mechanical connection is necessary before the wires are to be soldered so you must twist them together. The wires will be twisted in what is referred to as a "Lineman's joint" where the wires are joined in a straight line as opposed to twisting together in a "V" shape. Hold the stripped ends of the wires together in an "X" shape so their middles cross one another and then twist one of the wires along the other wires length. Then twist the other side to match. What you will end up with is a strong wire joint that is generally not much thicker then the wire itself. Step 3: Apply Heat Apply your heat to the bottom of the wire joint and use the thicker section of the soldering tip. If you heat up the top of the wire, you will get a lot of heat loss since heat rises. The thicker area of the solder iron tip will conduct more heat into the wire joint. It also helps to slightly wet the tip of the soldering iron to further aid in heat transfer. The thicker the wire joint, the more heat will be required. Be careful, because on thin wires with cheap insulation you can actually melt quite a bit of it off if you overheat the joint. Once the joint is hot enough (a good clue is when the solder you used to wet the tip of the iron flows into the joint) you can move on to applying solder. Once you have soldered a number of these joints you will be able to judge how much heat must be applied based on the thickness of the wire. Step 4: Apply Solder To The Joint With the joint fully heated, apply your solder to the joint just above the soldering tip. If it doesn't begin to melt immediately then you will need more heat. Once the solder begins to melt it will flow into the joint around the soldering iron. As the solder flows, move the tip along the wire joint while applying solder. The joint should start to suck in the solder as it is applied. If you find that the solder is pooling where it is touched to the joint yet it is not flowing inside, you will need more heat. Continue adding solder until the joint is fully covered. You should still be able to see the outlines of the individual wire strands but no copper of the wire should be visible. If you add too much solder to the point where the joint becomes a blob, you will end up with a brittle joint and the excess solder will need to be removed. Step 5: Clean The Flux If the wire joint is to be sealed or used in an area it will be exposed to moisture, the flux must be removed. Some fluxes will absorb moisture or other chemicals and become corrosive to the joint. While there are flux removal chemicals available most fluxes can be cleaned up using methyl hydrate available at any hardware store. Some are even water soluble. Step 6: Insulate The Joint Slide the heat shrink tubing so that it evenly covers the joint and apply heat to shrink it. Ideally, you will want a heat gun for this but a simple lighter is acceptable as long as you keep the flame moving to avoid burning the tubing or the wire. If you used adhesive lined heat shrink, you need to heat the tube until it has shrunk fully around the wire and a little of the adhesive has oozed out the ends. Non-lined heatshrink can be heated until it tightly covers the joint. You can overheat this stuff. If too much heat is used, then the insulation underneath will begin to break down and may form a bubble. A bubble could also be caused if you heat adhesive lined tubing to the point where it starts to boil. You're Done! Now Just Watch The Video That's it! Your wire joint is now complete. You can watch the process on video below: Get Flash to see this player.
Tips and Tricks
Soldering is something that needs to be practiced. These tips should help you become successful so you can stop practicing and get down to some serious building.
Use heatsinks. Heatsinks are a must for the leads of sensitive components such as ICs and transistors. If you don't have a clip on heatsink, then a pair of pliers is a good substitute. Keep the iron tip clean. A clean iron tip means better heat conduction and a better joint. Use a wet sponge to clean the tip between joints. Keep the tip well tinned. Double check joints. When assembling complicated circuits, it is good practice to check joints after soldering them. Use a magnifying glass to visually inspect the joint and a meter to check resistance. Solder small parts first. Solder resistors, jumper leads, diodes and any other small parts before you solder larger parts like capacitors and transistors. This makes assembly much easier. Install sensitive components last. Install CMOS ICs, MOSFETs and other static sensitive components last to avoid damaging them during assembly of other parts. Use adequate ventilation. Most soldering fluxes should not be breathed in. Avoid breathing the smoke created and make sure that the area you are working in has adequate airflow to prevent buildup of noxious fumes.
Soldering Safety
While soldering is not generally a hazardous activity, there are a few things to keep in mind. The first and most obvious is that it involves high temperatures. Soldering irons are going to be 350F or higher, and will cause burns very quickly. Make sure to use a stand to support the iron and keep the cord away high traffic areas. Solder itself can drip, so it makes sense to avoid soldering over exposed body parts. Always work in a well lit area where you have space to lay parts out and move around. Avoid soldering with your face directly above the joint because fumes from the flux and other coatings will irritate your respiratory tract and eyes. Most solders contain lead, so you should avoid touching your face while working with solder and always wash your hands before eating. |
<filename>chap2_the_field/chap2cheat.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from myutil import consolePrintWithLineNumber as c
'''
Created on 2015. 8. 12.
@author: Administrator
'''
'''
ch02 필드
'''
# 46p
from math import e
from math import pi
from GF2 import one
from image import color2gray
from image import file2image
from plotting import plot
c(1 + 3j)
c(1j)
c((1 + 3j) + (2 + 2j))
x = 1 + 3j
c(x ** 2)
c(x.real)
c(x.imag)
c(type(1 + 2j)) # class 'complex'...
# 47p
# ax + b = c
def solve1(a, b, c): return (c - b) / a
# 10x + 5 = 30
c(solve1(10, 5, 30))
# (10+5i)x+ 5 = 20
c(solve1(10 + 5j, 5, 20))
# 48p
# module six를 설치해야 한다.
# > pip install six
# > pip install python-dateutil
# > pip install pyparsing
# 기타 여러 문제 해결 > https://wikidocs.net/1019
# matplotlib 사용법
# import matplotlib.pyplot as plt
# x = range(100)
# y = [ i * i for i in x]
# plt.plot(x, y)
# plt.show()
# 저자 사이트에서 plotting.py 파일 다운받기(matplotlib가 아니다.) > http://resources.codingthematrix.com/
# 익스플로러가 열린후, 스크립트가 적용이 안된다.???
# 작동이 되지 않는다면, plotting.py 파일안에 create_temp 함수에서 remove_at_exit(path) 부분을 주석처리한다.
# > exit 될때, 임시파일을 삭제하는 부분인데, 파일이 삭제되는 시점이 너무 빨라서, 웹브라우저가 가동했지만 정작 파일읃 찾지 못하기 때문.
S = {2 + 2j, 3 + 2j, 1.75 + 1j, 2 + 1j, 2.25 + 1j, 2.5 + 1j, 2.75 + 1j, 3 + 1j, 3.25 + 1j}
# plot(S, 4) # 웹브라우저로 출력, plot 함수는 전체 실행할 때마다 계속 브라우저를 호출하므로, 테스트 후 주석처리 함.
# 49p
c(abs(3 + 4j))
c(abs(1 + 1j))
c((3 + 4j).conjugate()) # 공액 복소수(z.real - z.imag)
# 50p
S1 = {1 + 2j + z for z in S}
c(S1)
# plot(S1, 4)
S2 = {-2 - 2j + z for z in S}
# plot(S2, 4)
# 51p
# 52p
S3 = {z / 2 for z in S}
# plot(S3, 4)
S4 = {z * 2 for z in S}
# plot(S4, 4)
'''
<번역오류?>
마찬가지로, 각 복소수를 2배 하는 것은 점들을 원점과 서로에게 더 가까워지게 한다.
> 마찬가지로, 각 복소수를 2배 하는 것은 점들을 원점과 서로에게 더 멀어지게 한다.
'''
# 53p
S1 = {z * -1 for z in S}
# plot(S1, 4)
# 90도 회전한 이미지가 책과 다르다;;;
S1 = {z * 1j for z in S}
# plot(S1, 4)
# 54p
# Task2.4.8
S1 = {z * 1j / 2 for z in S}
# plot(S1, 4)
# Task2.4.9
S1 = {(z * 1j) / 2 + 2 - 1j for z in S}
# plot(S1, 4)
# Task2.4.10
# 밝기가 아니라 색상이다. RGB : (183,183,183)
# color2gray의 리턴값(밝은 정도) 중에 120보다 작은 값을 가진 픽셀만 추린다.
data = file2image('img01.png')
data = color2gray(data)
pts = [[x + y * 1j for x, pixel in enumerate(row) if pixel < 120] for y, row in enumerate(data)]
pts1 = sum(pts, [])
# plot(pts1, 200, 1) # 200은 스케일, 1은 점의 굵기
# > 이미지가 반대로 나온다...x, y가 뒤바뀐듯.
# file2image 함수 결과값에서부터 반대로 나온다.
# 리스트를 컴프리헨션 할때, 마지막값부터 읽어보자. reversed 이용
pts = [[x + y * 1j for x, pixel in enumerate(row) if pixel < 120] for y, row in enumerate(reversed(data))]
pts1 = sum(pts, [])
# plot(pts1, 200, 1)
# 55p
# Task2.4.11 복소수의 중심을 구한다. 그러고나서 원점으로 평행이동
def f(zlist):
x_min = 0
x_max = 0
y_min = 0
y_max = 0
for z in zlist:
x = z.real
y = z.imag
if(x_min >= x):
x_min = x
if(x >= x_max):
x_max = x
if(y_min >= y):
y_min = y
if(y > y_max):
y_max = y
x_center = x_max - x_min
y_center = y_max - y_min
# 그림의 중심에 해당하는 복소수z1의 반대방향-z1/2을 더한다.
# 그냥 -z1을 더하면, 반대쪽 원점을 중심으로 대칭이동이 된다.
return [z - x_center / 2 - y_center / 2 * 1j for z in zlist]
pts2 = f(pts1)
# plot(pts2, 200, 1)
# plot([z * 1j / 2 for z in pts1], 200, 1)
# 56p 오타?? 아래 그림의 z1,z2사이의 각도가 pi/4이 아닌 pi/8이다.
# 57p
# Task2.4.17
n = 6
w = e ** (2 * pi * 1j / n)
list = [w ** x for x in range(0, n)]
c(list)
# plot(list, 3, 3)
# 58p
# Task2.4.18
S1 = [z * e ** ((pi / 4) * 1j) for z in S]
# plot(S1, 4)
# Task2.4.19
pts2 = [z * e ** ((pi / 4) * 1j) for z in pts1]
# plot(pts2, 200, 1)
# 61p
# Task2.4.20(중심 평행이동, 회전, 스케일링)
pts2 = [z * e ** ((pi / 4) * 1j) * 0.5 for z in f(pts1)]
# plot(pts2, 200, 1)
# 60p
# GF(2) 갈루아 필드 .... 무슨 소리지??
c(one * one)
c(one * 0)
c(one + 0)
c(one + one)
c(-one)
# 62p
'''
P2.5.1 이걸 어떻게 알아???
> 암호KEY의 가능성은 2**5 = 32가지이다. 이걸로 다 돌려보자
복호화 알고리즘은 앞페이지의 방법을 역으로 이용한다.(다른 방법이 없다..-_-;;)
암호문1 = KEY1 + 평문1
평문1 = 암호문1 - KEY1
복호화(32가지 KEY 적용)
↓
암호문 Q1 ----> 평문 Q2 ---> 5개씩 자르고 -> 10진수 -> 알파벳
'''
Q1 = '1010100100101010101111001000110101110101001001100111010'
# '01010' 을 10진수 숫자로 변환
def bin2dec(bin):
base = 2
digits = 5
return sum([int(bin[i]) * (base ** (digits - i - 1)) for i in range(digits)])
# 10진수 숫자를 2진수 5자리로 만들기(KEY 만들때 사용)
def dec2bin(num):
digits = 5
bStr = ""
while num > 0 :
bStr = str(num % 2) + bStr
num = num >> 1
# 자리수 맞춰주기
while(len(bStr) < digits):
bStr = '0' + bStr
return bStr
# 10진수 숫자를 알파벳으로 변환
def dec2AZ(num):
if num <= 25:
return chr(num + 97) # chr <-> ord 아스키코드표 기준
elif num == 26:
return ' '
elif num > 26: # KEY가 잘못된 경우, 예상 범위를 넘어서는 결과가 나오기도 한다. 이 경우 대체할 문자가 없으므로 *로 표시한다.
return '*'
# 문자열 s를 n자리수로 잘라서 리스트로 만든다.
def chunks(s, n):
list = []
for start in range(0, len(s), n):
list.append(s[start:start + n])
return list
# 복호화 알고리즘
def decodeLogic(c, key):
# 평문1 = 암호문1 - KEY1
return abs(c - key)
def sol1():
# 가능한 암호 55자리(11*5) 만들기. 32개
keylist = []
for i in range(32):
s0 = dec2bin(i) * 11
keylist.append(s0)
# 복호화
plainlist = []
for key in keylist:
s = ''
for index, k in enumerate(key):
s = s + str(decodeLogic(int(Q1[index]), int(k)))
plainlist.append(s)
# 결과를 5자리로 자르고
chrunkslist = []
for list in plainlist:
chrunkslist.append(chunks(list, 5))
# 문자로 변환
resultlist = []
for clist in chrunkslist:
s2 = ''
for bin in clist:
s2 = s2 + dec2AZ(bin2dec(bin))
resultlist.append(s2)
return resultlist
c(sol1()) # 32개 결과중에 답이 될 만한 것을 찾아보자. 답은 'eve is evil' 이다. 헉! 소름끼친다... |
/*!
******************************************************************************
*
* \file
*
* \brief RAJA header file containing constructs used to run kernel
* traversals on GPU with SYCL.
*
******************************************************************************
*/
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2016-23, Lawrence Livermore National Security, LLC
// and RAJA project contributors. See the RAJA/LICENSE file for details.
//
// SPDX-License-Identifier: (BSD-3-Clause)
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
#ifndef RAJA_policy_sycl_kernel_internal_HPP
#define RAJA_policy_sycl_kernel_internal_HPP
#include "RAJA/config.hpp"
#if defined(RAJA_ENABLE_SYCL)
#include <cassert>
#include <climits>
#include "camp/camp.hpp"
#include "RAJA/pattern/kernel.hpp"
#include "RAJA/util/macros.hpp"
#include "RAJA/util/types.hpp"
#include "RAJA/policy/sycl/MemUtils_SYCL.hpp"
#include "RAJA/policy/sycl/policy.hpp"
namespace RAJA
{
namespace internal
{
// LaunchDims and Helper functions
struct LaunchDims {
sycl_dim_3_t group;
sycl_dim_3_t local;
sycl_dim_3_t global;
sycl_dim_3_t min_groups;
sycl_dim_3_t min_locals;
RAJA_INLINE
RAJA_HOST_DEVICE
LaunchDims() : group{0,0,0},
local{1,1,1},
global{1,1,1},
min_groups{0,0,0},
min_locals{0,0,0} {}
RAJA_INLINE
RAJA_HOST_DEVICE
LaunchDims(LaunchDims const &c) : group(c.group),
local(c.local),
global(c.global)
{
}
RAJA_INLINE
LaunchDims max(LaunchDims const &c) const
{
LaunchDims result;
result.group.x = std::max(c.group.x, group.x);
result.group.y = std::max(c.group.y, group.y);
result.group.z = std::max(c.group.z, group.z);
result.local.x = std::max(c.local.x, local.x);
result.local.y = std::max(c.local.y, local.y);
result.local.z = std::max(c.local.z, local.z);
result.global.x = std::max(c.global.x, global.x);
result.global.y = std::max(c.global.y, global.y);
result.global.z = std::max(c.global.z, global.z);
return result;
}
cl::sycl::nd_range<3> fit_nd_range() {
sycl_dim_3_t launch_global;
sycl_dim_3_t launch_local {1,1,1};
launch_local.x = std::max(launch_local.x, local.x);
launch_local.y = std::max(launch_local.y, local.y);
launch_local.z = std::max(launch_local.z, local.z);
cl::sycl::queue* q = ::RAJA::sycl::detail::getQueue();
// Global resource was not set, use the resource that was passed to forall
// Determine if the default SYCL res is being used
if (!q) {
camp::resources::Resource sycl_res = camp::resources::Sycl();
q = sycl_res.get<camp::resources::Sycl>().get_queue();
}
cl::sycl::device dev = q->get_device();
auto max_work_group_size = dev.get_info< ::cl::sycl::info::device::max_work_group_size>();
if(launch_local.x > max_work_group_size) {
launch_local.x = max_work_group_size;
}
if(launch_local.y > max_work_group_size) {
launch_local.y = max_work_group_size;
}
if(launch_local.z > max_work_group_size) {
launch_local.z = max_work_group_size;
}
// Make sure the multiple of locals fits
// Prefer larger z -> y -> x
if(launch_local.x * launch_local.y * launch_local.z > max_work_group_size) {
int remaining = 1;
// local z cannot be > max_wrk from above
// if equal then remaining is 1, on handle <
if(max_work_group_size > launch_local.z) {
// keep local z
remaining = max_work_group_size / launch_local.z;
}
if(remaining >= launch_local.y) {
// keep local y
remaining = remaining / launch_local.y;
} else {
launch_local.y = remaining;
remaining = remaining / launch_local.y;
}
if(remaining < launch_local.x) {
launch_local.x = remaining;
}
}
// User gave group policy, use to calculate global space
if (group.x != 0 || group.y != 0 || group.z != 0) {
sycl_dim_3_t launch_group {1,1,1};
launch_group.x = std::max(launch_group.x, group.x);
launch_group.y = std::max(launch_group.y, group.y);
launch_group.z = std::max(launch_group.z, group.z);
launch_global.x = launch_local.x * launch_group.x;
launch_global.y = launch_local.y * launch_group.y;
launch_global.z = launch_local.z * launch_group.z;
} else {
launch_global.x = launch_local.x * ((global.x + (launch_local.x - 1)) / launch_local.x);
launch_global.y = launch_local.y * ((global.y + (launch_local.y - 1)) / launch_local.y);
launch_global.z = launch_local.z * ((global.z + (launch_local.z - 1)) / launch_local.z);
}
if(launch_global.x % launch_local.x != 0) {
launch_global.x = ((launch_global.x / launch_local.x) + 1) * launch_local.x;
}
if(launch_global.y % launch_local.y != 0) {
launch_global.y = ((launch_global.y / launch_local.y) + 1) * launch_local.y;
}
if(launch_global.z % launch_local.z != 0) {
launch_global.z = ((launch_global.z / launch_local.z) + 1) * launch_local.z;
}
cl::sycl::range<3> ret_th = {launch_local.x, launch_local.y, launch_local.z};
cl::sycl::range<3> ret_gl = {launch_global.x, launch_global.y, launch_global.z};
return cl::sycl::nd_range<3>(ret_gl, ret_th);
}
};
template <camp::idx_t cur_stmt, camp::idx_t num_stmts, typename StmtList>
struct SyclStatementListExecutorHelper {
using next_helper_t =
SyclStatementListExecutorHelper<cur_stmt + 1, num_stmts, StmtList>;
using cur_stmt_t = camp::at_v<StmtList, cur_stmt>;
template <typename Data>
inline static RAJA_DEVICE void exec(Data &data, cl::sycl::nd_item<3> item, bool thread_active)
{
// Execute stmt
cur_stmt_t::exec(data, item, thread_active);
// Execute next stmt
next_helper_t::exec(data, item, thread_active);
}
template <typename Data>
inline static LaunchDims calculateDimensions(Data &data)
{
// Compute this statements launch dimensions
LaunchDims statement_dims = cur_stmt_t::calculateDimensions(data);
// call the next statement in the list
LaunchDims next_dims = next_helper_t::calculateDimensions(data);
// Return the maximum of the two
return statement_dims.max(next_dims);
}
};
template <camp::idx_t num_stmts, typename StmtList>
struct SyclStatementListExecutorHelper<num_stmts, num_stmts, StmtList> {
template <typename Data>
inline static RAJA_DEVICE void exec(Data &, cl::sycl::nd_item<3> item, bool)
{
// nop terminator
}
template <typename Data>
inline static LaunchDims calculateDimensions(Data &)
{
return LaunchDims();
}
};
template <typename Data, typename Policy, typename Types>
struct SyclStatementExecutor;
template <typename Data, typename StmtList, typename Types>
struct SyclStatementListExecutor;
template <typename Data, typename... Stmts, typename Types>
struct SyclStatementListExecutor<Data, StatementList<Stmts...>, Types> {
using enclosed_stmts_t =
camp::list<SyclStatementExecutor<Data, Stmts, Types>...>;
static constexpr size_t num_stmts = sizeof...(Stmts);
static
inline
RAJA_DEVICE
void exec(Data &data, cl::sycl::nd_item<3> item, bool thread_active)
{
// Execute statements in order with helper class
SyclStatementListExecutorHelper<0, num_stmts, enclosed_stmts_t>::exec(data, item, thread_active);
}
static
inline
LaunchDims calculateDimensions(Data const &data)
{
// Compute this statements launch dimensions
return SyclStatementListExecutorHelper<0, num_stmts, enclosed_stmts_t>::
calculateDimensions(data);
}
};
template <typename StmtList, typename Data, typename Types>
using sycl_statement_list_executor_t = SyclStatementListExecutor<
Data,
StmtList,
Types>;
} // namespace internal
} // namespace RAJA
#endif // closing endif for RAJA_ENABLE_SYCL guard
#endif // closing endif for header file include guard
|
/*
* Send a message to init to change the runlevel. This function is
* asynchronous-signal-safe (thus safe to use after fork of a
* multithreaded parent) - which is good, because it should only be
* used after forking and entering correct namespace.
*
* Returns 1 on success, 0 if initctl does not exist, -1 on error
*/
int
virInitctlSetRunLevel(virInitctlRunLevel level)
{
struct virInitctlRequest req;
int fd = -1;
int ret = -1;
const char *initctl_fifo = NULL;
size_t i = 0;
const char *initctl_fifos[] = {
"/run/initctl",
"/dev/initctl",
"/etc/.initctl",
};
memset(&req, 0, sizeof(req));
req.magic = VIR_INITCTL_MAGIC;
req.sleeptime = 0;
req.cmd = VIR_INITCTL_CMD_RUNLVL;
req.runlevel = '0' + level;
for (i = 0; i < ARRAY_CARDINALITY(initctl_fifos); i++) {
initctl_fifo = initctl_fifos[i];
if ((fd = open(initctl_fifo,
O_WRONLY|O_NONBLOCK|O_CLOEXEC|O_NOCTTY)) >= 0)
break;
if (errno != ENOENT) {
virReportSystemError(errno,
_("Cannot open init control %s"),
initctl_fifo);
goto cleanup;
}
}
if (fd < 0) {
ret = 0;
goto cleanup;
}
if (safewrite(fd, &req, sizeof(req)) != sizeof(req)) {
virReportSystemError(errno,
_("Failed to send request to init control %s"),
initctl_fifo);
goto cleanup;
}
ret = 1;
cleanup:
VIR_FORCE_CLOSE(fd);
return ret;
} |
package com.powerblock.timesheets.fragments;
import com.powerblock.timesheets.ExcelHandler;
import com.powerblock.timesheets.MainActivity;
import com.powerblock.timesheets.R;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
public class SectionTemplate extends Fragment {
private ExcelHandler mExcelHandler;
private View mView;
private String sSection;
public SectionTemplate(String section){
mExcelHandler = MainActivity.getExcelHandler();
sSection = section;
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState){
setHasOptionsMenu(true);
int layoutId = 0;
switch(sSection){
case ExcelHandler.EXCEL_SECTION_EQUIPMENT:
layoutId = R.layout.equipment_fragment;
break;
case ExcelHandler.EXCEL_SECTION_JOB_SETUP:
layoutId = R.layout.job_setup_fragment;
break;
case ExcelHandler.EXCEL_SECTION_SAFETY_LOCK_OUT:
layoutId = R.layout.lock_out;
break;
case ExcelHandler.EXCEL_SECTION_SAFETY_MANUAL_HANDLING:
layoutId = R.layout.safety_manual_handling;
break;
case ExcelHandler.EXCEL_SECTION_MATERIALS_CABLE:
layoutId = R.layout.materials_cable;
break;
case ExcelHandler.EXCEL_SECTION_MATERIALS_CONTAINMENT:
layoutId = R.layout.materials_containment;
break;
case ExcelHandler.EXCEL_SECTION_MATERIALS_DATA:
layoutId = R.layout.materials_data;
break;
case ExcelHandler.EXCEL_SECTION_MATERIALS_LIGHTING:
layoutId = R.layout.materials_lighting;
break;
case ExcelHandler.EXCEL_SECTION_MATERIALS_POWER:
layoutId = R.layout.materials_power;
break;
case ExcelHandler.EXCEL_SECTION_MATERIALS_MCB:
layoutId = R.layout.materials_mcb;
break;
case ExcelHandler.EXCEL_SECTION_SAFETY_PPE:
layoutId = R.layout.ppe;
break;
case ExcelHandler.EXCEL_SECTION_SAFETY_SITE_CONDITIONS:
layoutId = R.layout.safety_site_conditions;
break;
case ExcelHandler.EXCEL_SECTION_TESTING:
layoutId = R.layout.testing_fragment;
break;
case ExcelHandler.EXCEL_SECTION_TIME:
layoutId = R.layout.time_layout;
break;
case ExcelHandler.EXCEL_SECTION_SAFETY_WORKING_AT_HEIGHT:
layoutId = R.layout.safety_height;
break;
case ExcelHandler.EXCEL_SECTION_SAFETY_WELFARE:
layoutId = R.layout.safety_welfare;
break;
case ExcelHandler.EXCEL_SECTION_TESTING_TYPE:
layoutId = R.layout.testing_test_type;
break;
case ExcelHandler.EXCEL_SECTION_TESTING_DBDETAILS:
layoutId = R.layout.testing_db_details;
break;
case ExcelHandler.EXCEL_SECTION_JOB_SETUP_DESCRIPTION:
layoutId = R.layout.job_setup_description;
break;
case ExcelHandler.EXCEL_SECTION_JOB_SETUP_QUOTE_NO:
layoutId = R.layout.job_setup_quote_no;
break;
case ExcelHandler.EXCEL_SECTION_TESTING_PRECONNECTION:
layoutId = R.layout.testing_preconnection;
break;
case ExcelHandler.EXCEL_SECTION_TESTING_POSTCONNECTION:
layoutId = R.layout.testing_postconnection;
break;
}
mView = mExcelHandler.read(inflater, container, layoutId, sSection);
if(mView == null)
mView = inflater.inflate(layoutId, container,false);
return mView;
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater){
inflater.inflate(R.menu.save_menu, menu);
super.onCreateOptionsMenu(menu, inflater);
}
@Override
public boolean onOptionsItemSelected(MenuItem item){
if(item.getItemId() == R.id.action_save){
mExcelHandler.write(sSection,mView);
getActivity().getSupportFragmentManager().popBackStack();
return true;
}
return false;
}
}
|
from cement.utils.version import get_version as cement_get_version
import re
__version__ = "0.0.1-alpha.0"
def parse_version(version):
parsed_version = re.compile(r"\.|\-").split(version)
if len(parsed_version) == 3:
parsed_version.append("final")
return tuple(parsed_version)
VERSION = parse_version(__version__)
def get_normalize_version(version=VERSION):
return cement_get_version(version)
def get_version(version=__version__):
return version
|
// GoPath returns the current GOPATH env var
// or if it's missing, the default.
func GoPath() string {
go_path := strings.Split(os.Getenv("GOPATH"), string(os.PathListSeparator))
if len(go_path) == 0 || go_path[0] == "" {
return build.Default.GOPATH
}
return go_path[0]
} |
Evidence of Androgen Receptor Expression in Lichen Sclerosus: An Immunohistochemical Study
Objective: While topical androgen administration is widely used in the treatment of lichen sclerosus of the vulva, localization and level of expression of androgen receptor (AR) have not been described previously. Methods: Thirty-nine paraffin-embedded punch biopsies of patients with lichen sclerosus of the vulva were examined. Androgen receptor, estrogen receptor (ER), and progesterone receptor (PR) expression in lichen sclerosus and in normal vulvar skin were investigated by immunohistochemistry. Results: Five tissue specimens (12.8%) of lichen sclerosus showed nuclear staining with anti-AR in the parabasal cell layers of the epidermis. Median age of patients with positive nuclear staining for AR versus women without AR expression was 71 (range, 63-78) and 66.5 (range, 38-91) years, respectively. Estrogen receptor expression was present in only one patient. Nuclear staining reaction for PR expression was absent in all cases. Four of the five AR-positive women reported no complaints and therefore received no topical testosterone therapy. Conclusion: Our results suggest a lack of complaints in AR-positive lichen sclerosus patients. Our findings could justify a larger study comparing symptoms of patients with and without AR expression. |
/**
* Represents a moduleInfo's department
* Guarantees: immutable; is valid as declared in {@link #isValidModuleInfoDepartment(String)}
*/
public class ModuleInfoDepartment {
public static final String MESSAGE_CONSTRAINTS =
"Must consist of only characters and white spaces and cannot begin with whitespace.";
public static final String VALIDATION_REGEX = "[^\\s1-9][\\p{Graph} ]{0,}";
public final String department;
public ModuleInfoDepartment(String moduleDepartment) {
requireNonNull(moduleDepartment);
checkArgument(isValidModuleInfoDepartment(moduleDepartment), MESSAGE_CONSTRAINTS);
department = moduleDepartment;
}
public static boolean isValidModuleInfoDepartment(String test) {
return test.matches(VALIDATION_REGEX);
}
public String toString() {
return department;
}
@Override
public boolean equals(Object other) {
return other == this // short circuit if same object
|| (other instanceof ModuleInfoDepartment// instanceof handles nulls
&& department.equals(((ModuleInfoDepartment) other).department)); // state check
}
@Override
public int hashCode() {
return department.hashCode();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.