content
stringlengths
10
4.9M
<filename>controller/src/main/java/org/jboss/as/controller/services/path/RelativePathService.java /* * JBoss, Home of Professional Open Source. * Copyright 2010, Red Hat, Inc., and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.jboss.as.controller.services.path; import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.PATH; import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.RELATIVE_TO; import java.io.File; import java.util.function.Consumer; import java.util.function.Supplier; import org.jboss.as.controller.logging.ControllerLogger; import org.jboss.dmr.ModelNode; import org.jboss.msc.service.ServiceBuilder; import org.jboss.msc.service.ServiceController; import org.jboss.msc.service.ServiceName; import org.jboss.msc.service.ServiceTarget; import org.wildfly.common.Assert; /** * {@link AbstractPathService} implementation for paths that are relative * to other paths. * * @author <NAME> * @author <a href="mailto:<EMAIL>"><NAME></a> */ public class RelativePathService extends AbstractPathService { private final String relativePath; private final Supplier<String> pathSupplier; private RelativePathService(final String relativePath, final Consumer<String> pathConsumer, final Supplier<String> pathSupplier) { super(pathConsumer); this.relativePath = convertPath(relativePath); this.pathSupplier = pathSupplier; } public static ServiceController<?> addService(final String name, final String relativePath, final String relativeTo, final ServiceTarget serviceTarget) { return addService(pathNameOf(name), relativePath, false, relativeTo, serviceTarget); } public static ServiceController<?> addService(final ServiceName name, final String relativePath, final String relativeTo, final ServiceTarget serviceTarget) { return addService(name, relativePath, false, relativeTo, serviceTarget); } /** * Installs a path service. * * @param name the name to use for the service * @param path the relative portion of the path * @param possiblyAbsolute {@code true} if {@code path} may be an {@link #isAbsoluteUnixOrWindowsPath(String) absolute path} * and should be {@link AbsolutePathService installed as such} if it is, with any * {@code relativeTo} parameter ignored * @param relativeTo the name of the path that {@code path} may be relative to * @param serviceTarget the {@link ServiceTarget} to use to install the service * @return the ServiceController for the path service */ public static ServiceController<?> addService(final ServiceName name, final String path, boolean possiblyAbsolute, final String relativeTo, final ServiceTarget serviceTarget) { if (possiblyAbsolute && isAbsoluteUnixOrWindowsPath(path)) { return AbsolutePathService.addService(name, path, serviceTarget); } final ServiceBuilder<?> builder = serviceTarget.addService(name); final Consumer<String> pathConsumer = builder.provides(name); final Supplier<String> injectedPath = builder.requires(pathNameOf(relativeTo)); builder.setInstance(new RelativePathService(path, pathConsumer, injectedPath)); return builder.install(); } public static void addService(final ServiceName name, final ModelNode element, final ServiceTarget serviceTarget) { final String relativePath = element.require(PATH).asString(); final String relativeTo = element.require(RELATIVE_TO).asString(); addService(name, relativePath, false, relativeTo, serviceTarget); } static String convertPath(String relativePath) { Assert.checkNotNullParam("relativePath", relativePath); Assert.checkNotEmptyParam("relativePath", relativePath); if (relativePath.charAt(0) == '/') { if (relativePath.length() == 1) { throw ControllerLogger.ROOT_LOGGER.invalidRelativePathValue(relativePath); } return relativePath.substring(1); } else if (relativePath.indexOf(":\\") == 1) { throw ControllerLogger.ROOT_LOGGER.pathIsAWindowsAbsolutePath(relativePath); } else { if(isWindows()) { return relativePath.replace("/", File.separator); } else { return relativePath.replace("\\", File.separator); } } } static String doResolve(String base, String relativePath) { base = base.endsWith(File.separator) ? base.substring(0, base.length() -1) : base; return base + File.separatorChar + relativePath; } @Override protected String resolvePath() { return doResolve(pathSupplier.get(), relativePath); } private static boolean isWindows(){ return File.separatorChar == '\\'; } }
Serum polychlorinated biphenyls (PCBs) in Anniston, AL, residents have been associated with hypertension and diabetes. There have been no systematic interventions to reduce PCB body burdens in Anniston or other populations. Our objective was to determine the efficacy of 15 g/day of dietary olestra to reduce PCBs in Anniston residents. Blood PCBs and 1,1-bis-(4-chlorophenyl)-2,2-dichloroethene were measured at baseline and 4-month intervals in a double-blind, placebo-controlled, 1-year trial. Participants with elevated serum PCBs were randomized into two groups of 14 and received potato crisps made with olestra or vegetable oil (VO). Elimination rates during the study period were compared with 5-year prestudy rates. Eleven participants in the olestra group and 12 in the VO group completed the study. Except for one participant in the VO group, reasons for dropout were unrelated to treatments. The elimination rate of 37 non-coplanar PCB congeners during the 1-year trial was faster during olestra consumption compared to the pretrial period (-0.0829 ± 0.0357 and -0.00864 ± 0.0116 year(-1), respectively; P=.04), but not during VO consumption (-0.0413 ± 0.0408 and -0.0283 ± 0.0096 year(-1), respectively; P=.27). The concentration of PCBs in two olestra group participants decreased by 27% and 25% during the trial. There was no significant time by group interaction in change from baseline. However, group main effects for total PCBs and PCB 153 were of borderline significance. This pilot study has demonstrated that olestra can safely reduce body burdens of PCBs and supports a larger intervention trial that may also determine whether reduction in PCBs will reduce the risk of hypertension and diabetes. Copyright © 2014 Elsevier Inc. All rights reserved.
Pirro: 'Bozos' Still Haven't Figured Out What Russia Did to Stop 'Queen Hillary' Cruz: 'There's No Good Answer in Syria After 8 Years of Obama Weakness' Judge Jeanine Pirro applauded President Trump's decision to launch 59 missiles targeting a Syrian air base in response to the recent chemical weapons attack on civilians. She praised the president for acting decisively, saying that it makes Americans proud when the United States takes action against an "evil" dictator like Bashar al-Assad. Pirro said it's a refreshing shift from the "dithering" President Obama, who "couldn't make a decision, who drew a line in the sand and did nothing about it." She noted that the Obama administration worked with Russia in 2013 on a now-failed agreement to make sure Assad gave up his chemical weapons and they were removed from the country. Pirro scoffed at those who argue that the warning to Russia before the missile strikes shows collusion between Trump and Vladimir Putin. She argued that the warning shows Trump does not want a "world war," only to put a stop to Assad's war crimes. "For all those people who said there was this 'bromance' between Donald Trump and Putin? Well, the 'bromance' is over," said Pirro. See complete coverage of the Syrian strikes, here. WATCH: Harvard Students Insist Pres. Trump Is More Dangerous Than ISIS MSNBC Guest Suggests Susan Rice Being Attacked Because She's Black Woman Trump Claims Top Dem Told Him He'd Be 'One of the Great Presidents' Watch what Judge Jeanine and Geraldo had to say above and don't miss "Justice," Saturday and Sunday at 9pm ET on Fox News Channel. Plus, check out the premiere of "You the Jury" tonight at 9pm ET on the FOX broadcast network.
#include <iostream> #include <cstdio> using namespace std; int main() { char c, cp; int ans; int count=0; int len[100000]; int sum=0, psum=0; int ind=0; int temp; while(c!='0'&&c!='1'){ scanf("%c", &c); } cp=c; count=1; sum++; while(cin>>c){ if(c=='0' || c=='1'){ sum++; if(c!=cp){ cp=c; len[ind]=count; count=1; ind++; }else{ count++; } } } len[ind]=count; ans=sum; for(int i=0; i<ind;i++){ psum+=len[i]; temp=(sum-psum<psum)?psum:sum-psum; ans=(ans<temp) ? ans :temp; } printf("%d\n", ans); return 0; }
// Complete the countingValleys function below. func countingValleys(n int32, s string) int32 { var valleys int32 return valleys }
/******************************************************************************* * Caleydo - Visualization for Molecular Biology - http://caleydo.org * Copyright (c) The Caleydo Team. All rights reserved. * Licensed under the new BSD license, available at http://caleydo.org/license ******************************************************************************/ package org.caleydo.core.serialize; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; import org.caleydo.core.data.datadomain.ATableBasedDataDomain; import org.caleydo.core.view.ISingleTablePerspectiveBasedView; /** * Abstract class for all serialized view representations that handle a single * {@link ATableBasedDataDomain} (In contrast to container views that hold * multiple of those views). * * @author <NAME> * @author <NAME> */ @XmlRootElement @XmlType public abstract class ASerializedSingleTablePerspectiveBasedView extends ASerializedView { /** The ID string of the data domain */ protected String dataDomainID; /** The key of the tablePerspective */ protected String tablePerspectiveKey; /** * DO NOT CALL THIS CONSTRUCTOR! ONLY USED FOR DESERIALIZATION. */ public ASerializedSingleTablePerspectiveBasedView() { } /** * Constructor using a reference to {@link ISingleTablePerspectiveBasedView} * from which the view ID and the data are automatically initialized */ public ASerializedSingleTablePerspectiveBasedView( ISingleTablePerspectiveBasedView singleTablePerspectiveBasedView) { super(singleTablePerspectiveBasedView); if (singleTablePerspectiveBasedView.getDataDomain() != null) { this.dataDomainID = singleTablePerspectiveBasedView.getDataDomain() .getDataDomainID(); if (singleTablePerspectiveBasedView.getTablePerspective() != null) { this.tablePerspectiveKey = singleTablePerspectiveBasedView .getTablePerspective().getTablePerspectiveKey(); } } } /** * Sets the data domain associated with a view * * @param dataDomain */ public void setDataDomainID(String dataDomainID) { this.dataDomainID = dataDomainID; } /** * Returns the data domain a view is associated with * * @return */ public String getDataDomainID() { return dataDomainID; } /** * @return the tablePerspectiveKey, see {@link #tablePerspectiveKey} */ public String getTablePerspectiveKey() { return tablePerspectiveKey; } /** * @param tablePerspectiveKey * setter, see {@link #tablePerspectiveKey} */ public void setTablePerspectiveKey(String tablePerspectiveKey) { this.tablePerspectiveKey = tablePerspectiveKey; } }
<reponame>blockchainhelppro/Erc-Tokens #include "dxvk_buffer.h" #include "dxvk_device.h" namespace dxvk { DxvkBuffer::DxvkBuffer( DxvkDevice* device, const DxvkBufferCreateInfo& createInfo, VkMemoryPropertyFlags memoryType) : m_device (device), m_info (createInfo), m_memFlags (memoryType) { // Align physical buffer slices to 256 bytes, which guarantees // that we don't violate any Vulkan alignment requirements m_physSliceLength = createInfo.size; m_physSliceStride = align(createInfo.size, 256); // Allocate a single buffer slice m_physSlice = this->allocPhysicalBuffer(1) ->slice(0, m_physSliceStride); } DxvkPhysicalBufferSlice DxvkBuffer::allocPhysicalSlice() { std::unique_lock<sync::Spinlock> freeLock(m_freeMutex); // If no slices are available, swap the two free lists. if (m_freeSlices.size() == 0) { std::unique_lock<sync::Spinlock> swapLock(m_swapMutex); std::swap(m_freeSlices, m_nextSlices); } // If there are still no slices available, create a new // physical buffer and add all slices to the free list. if (m_freeSlices.size() == 0) { std::unique_lock<sync::Spinlock> swapLock(m_swapMutex); m_physBuffer = this->allocPhysicalBuffer(m_physSliceCount); for (uint32_t i = 0; i < m_physSliceCount; i++) { m_freeSlices.push_back(m_physBuffer->slice( m_physSliceStride * i, m_physSliceLength)); } m_physSliceCount *= 2; } // Take the first slice from the queue DxvkPhysicalBufferSlice result = std::move(m_freeSlices.back()); m_freeSlices.pop_back(); return result; } void DxvkBuffer::freePhysicalSlice(const DxvkPhysicalBufferSlice& slice) { // Add slice to a separate free list to reduce lock contention. std::unique_lock<sync::Spinlock> swapLock(m_swapMutex); // Discard slices allocated from other physical buffers. // This may make descriptor set binding more efficient. if (m_physBuffer->handle() == slice.handle()) m_nextSlices.push_back(slice); } Rc<DxvkPhysicalBuffer> DxvkBuffer::allocPhysicalBuffer(VkDeviceSize sliceCount) const { DxvkBufferCreateInfo createInfo = m_info; createInfo.size = sliceCount * m_physSliceStride; return m_device->allocPhysicalBuffer(createInfo, m_memFlags); } DxvkBufferView::DxvkBufferView( const Rc<vk::DeviceFn>& vkd, const Rc<DxvkBuffer>& buffer, const DxvkBufferViewCreateInfo& info) : m_vkd(vkd), m_info(info), m_buffer(buffer), m_physView(this->createView()), m_revision(m_buffer->m_revision) { } DxvkBufferView::~DxvkBufferView() { } void DxvkBufferView::updateView() { if (m_revision != m_buffer->m_revision) { m_physView = this->createView(); m_revision = m_buffer->m_revision; } } Rc<DxvkPhysicalBufferView> DxvkBufferView::createView() { return new DxvkPhysicalBufferView( m_vkd, m_buffer->slice(), m_info); } DxvkBufferTracker:: DxvkBufferTracker() { } DxvkBufferTracker::~DxvkBufferTracker() { } void DxvkBufferTracker::freeBufferSlice( const Rc<DxvkBuffer>& buffer, const DxvkPhysicalBufferSlice& slice) { m_entries.push_back({ buffer, slice }); } void DxvkBufferTracker::reset() { for (const auto& e : m_entries) e.buffer->freePhysicalSlice(e.slice); m_entries.clear(); } }
/** * Adds the tablespace. * * @param query the query */ private void addTablespace(StringBuffer query) { if (null != this.tablespaceName) { query.append(MPPDBIDEConstants.LINE_SEPARATOR); query.append("TABLESPACE "); query.append(ServerObject.getQualifiedObjectName(this.tablespaceName)); } }
def pack_result_summary_key(result_summary_key): assert result_summary_key.kind() == 'TaskResultSummary' request_key = result_summary_key_to_request_key(result_summary_key) return pack_request_key(request_key) + '0'
Story highlights Errol Louis: The conflict over Sandy relief reflects deep division in Republican Party He says the House majority is split between pragmatic pols and radical budget cutters John Boehner is caught in the middle, trying to keep a lid on the battle, Louis says Louis: Complaints, threats by Christie, King show they fear the aid request will be chopped The battle over relief funding for areas devastated by Superstorm Sandy should leave no doubt about whether there is a war within the Republican Party over the fundamentals of taxation and spending. On one side are old-school pols who are committed to reducing government deficits but willing to engage in traditional horse-trading with their big-spending liberal colleagues -- and to support items such as relief for disasters, which can strike any region of the country at any time. On the other side are dyed-in-the-wool budget radicals, who believe government spending must be curtailed, deeply and immediately. They are perfectly comfortable slicing, delaying or crippling normally sacrosanct programs, including disaster relief. The two sides are engaged in an old-fashioned power struggle, with Speaker of the House John Boehner as the man in the middle, trying to keep a lid on the battle. The factional fighting delayed and nearly destroyed the fiscal cliff negotiations, with Boehner unable to persuade most of his Republican members to vote for a compromise bill that kept taxes from increasing for nearly all Americans. Errol Louis Trying to get a vote on hurricane relief the same night proved to be a bridge too far. Boehner, struggling to keep his divided caucus in line -- and facing a critical vote to renew his speakership -- decided to kill the aid bill. It might have kept the budget hawks happy for a moment, but Republicans in New York and New Jersey were furious. And the mood turned ugly. Rarely do public accusations of political betrayal sound as personal -- at times, nearly shrill -- as the howls that came from New York and New Jersey Republicans over Boehner's last-minute refusal to allow a promised vote on $60 billion worth of relief for areas hard hit by Sandy. "The speaker just decided to pull the vote. He gave no explanation," said Rep. Michael Grimm, a tea party member who is the sole remaining Republican member of Congress from New York City. "I feel I was misled from the very beginning," he said in a radio interview. Rep. Peter King, a senior New York Republican, was even blunter. "This has been a betrayal of trust," he said. "We were told at every stage that [a vote] was definitely going on. It is inexcusable. It is wrong." Photos: Photos: Long, slow recovery from Superstorm Sandy Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – David McCue stands near the roof of his beach house, which was completely demolished by Superstorm Sandy, in Ortley Beach, New Jersey, on Sunday, November 25. See photos of the immediate aftermath of Sandy. Hide Caption 1 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Utility workers replace a pole Sunday that was damaged by Superstorm Sandy in Seaside Heights, New Jersey. Hide Caption 2 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Old photographs are laid out to dry on a car hood on Sunday after being removed from a home in Seaside Heights. Hide Caption 3 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Rena McCain cleans out the first floor of her home in Seaside Heights on Sunday. Hide Caption 4 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Little remains of this home in Union Beach, New Jersey, on Tuesday, November 20, after Superstorm Sandy devastated the area. Hide Caption 5 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Wilma Marrero and son Joseph Kendall wait in line about five hours Tuesday for food and other items from a Coney Island distribution point in Brooklyn, New York. Hide Caption 6 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Volunteers on Tuesday remove flooring from a Keansburg, New Jersey, home damaged by the storm. Hide Caption 7 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People walk along a beach in the heavily damaged Rockaway neighborhood in the Queens borough of New York on Friday, November 16. Hide Caption 8 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Amy Neukom works to remove sand from her parents' house in Mantoloking, New Jersey, on Friday -- a result of Superstorm Sandy. Hide Caption 9 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Residents wait for free food in a parking lot in Rockaway on Friday. Hide Caption 10 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – President Barack Obama embraces a local resident on Cedar Grove Avenue during a visit to Staten Island on Thursday, November 15. Hide Caption 11 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People receive free sandwiches from a mobile food distribution center in the Rockaway neighborhood of Queens, New York, on Thursday. Hide Caption 12 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Residents take free clothing Thursday at the Ocean Bay Action Center in Queens. Hide Caption 13 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A tattered U.S. flag flies over the storm-damaged boardwalk in the Rockaway section of Queens on Thursday. Hide Caption 14 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Lisa Baney walks back toward her family's home in Bay Head, New Jersey, after taking a photo of a neighbor's destroyed house Wednesday, November 14. Hide Caption 15 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A living room is filled with sand Wednesday washed in by Superstorm Sandy in Point Pleasant Beach, New Jersey. Two weeks after the storm hit, many residents of the seaside town remain without power. Hide Caption 16 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People wait in line to receive free food from the American Red Cross on Wednesday in the heavily damaged Rockaway neighborhood of Queens, New York. Thousands are still without power and heat. Hide Caption 17 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People walk past a destroyed miniature golf attraction Wednesday in Point Pleasant, New Jersey. Hide Caption 18 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Heavy equipment operator Bill Unger carries photos he salvaged from a mass dump of household possessions on Tuesday in the Midland Beach area of the Staten Island. Unger said he's been collecting the photos as he cleans and taking them to his daughter, who is posting them on Facebook for neighborhood residents to find and later collect. Hide Caption 19 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People wait in line to collect some of the 1,500 donated coats from New York Cares on Tuesday in Queens. The charity started its annual coat drive early this year to assist those affected by Superstorm Sandy. It hopes to collect 200,000 coats this winter. Hide Caption 20 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A woman walks past debris on Rockaway Beach on Tuesday. Hide Caption 21 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A New York police officer jumps over a large crack in a boardwalk in Brooklyn on Wednesday, November 14. The boardwalk was damaged by the storm surge from Hurricane Sandy. Hide Caption 22 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Insurance claims adjusters walk past burned-out houses Wednesday in the Breezy Point section of Queens, New York. A fire that broke out in the neighborhood during the storm destroyed more than 100 homes. Hide Caption 23 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A family friend of a drowning victim of Superstorm Sandy looks through household debris in New York on Tuesday, November 13. Hide Caption 24 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A man looks through the home of a drowning victim Tuesday. Hide Caption 25 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Children wait to board buses to temporary schools in the Rockaway Beach neighborhood of Queens, New York, on Tuesday. The Rockaways peninsula was hit especially hard when Sandy barreled into the East Coast on October 29, unleashing a record storm surge that flooded low-lying areas and fierce winds that toppled trees and power lines. Hide Caption 26 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A home damaged by Sandy is seen in Union Beach, New Jersey, on Monday, November 12. Hide Caption 27 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A resident of Breezy Point in Queens walks past houses destroyed by fire during Sandy on Monday. Hide Caption 28 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Shoes are set out to dry Monday on the steps of a house in an area that was completely flooded on the south side of Staten Island in New York City. Hide Caption 29 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Dale Freeman stands in his water-damaged apartment in a public housing building that still doesn't have power on Monday in Brooklyn. About 58,000 customers in the Rockaways, Long Island and Brooklyn require more extensive repairs before electricity can be restored. Hide Caption 30 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Residents who returned to their damaged homes line up for a hot meal served from a Red Cross vehicle on Samson Avenue in Seaside Heights, New Jersey, on Monday. Hide Caption 31 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A home damaged by Sandy lists in Mantoloking, New Jersey, on Monday. Hide Caption 32 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – One room has power Monday in Rockaway, where many areas are still without electricity. "If you don't have your power back, it probably means power can't be restored to your home at this time," New York Gov. Andrew Cuomo said Monday afternoon. Hide Caption 33 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Debris strewen by Superstorm Sandy sits piled outside a damaged home in Mantoloking, New Jersey, on Monday. Hide Caption 34 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People line up to receive donated items from Catholic Charities of Brooklyn and Queens at Visitation of the Blessed Virgin Mary Catholic Church in Brooklyn on Monday. Hide Caption 35 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – People gather for donated food beneath a spotlight in an area still without power on Monday in Rockaway. Hide Caption 36 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – Roofers repair damage Monday on a home in the Annadale neighborhood of Staten Island. Hide Caption 37 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – An SUV sits atop the debris from a home in Mantoloking, New Jersey, on Monday. Hide Caption 38 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A woman makes her way past trash piles in Lower Manhattan on Monday. Hide Caption 39 of 40 Photos: Photos: Long, slow recovery from Superstorm Sandy Long, slow recovery from Superstorm Sandy – A man looks through donated clothes and supplies at nightfall Monday in Rockaway. Hide Caption 40 of 40 JUST WATCHED Meeks: Put aside politics for Sandy aid Replay More Videos ... MUST WATCH Meeks: Put aside politics for Sandy aid 04:23 JUST WATCHED Hoyer on Sandy inaction: 'It's a tragedy' Replay More Videos ... MUST WATCH Hoyer on Sandy inaction: 'It's a tragedy' 05:34 JUST WATCHED Republican fallout over Sandy aid Replay More Videos ... MUST WATCH Republican fallout over Sandy aid 02:53 JUST WATCHED Christie: Boehner wouldn't take my calls Replay More Videos ... MUST WATCH Christie: Boehner wouldn't take my calls 06:00 New Jersey Gov. Chris Christie, a rising star in the Republican Party, held a news conference to attack Boehner. "It was disgusting to watch," he said. "One set of Republicans was trying to prove something to another set." Christie put his finger on the dynamic that is likely to drive Republican politics for the next few years. A fair number of GOP members of Congress no doubt supported Boehner's move. Rep. Darrell Issa of California, who chairs the House Oversight and Government Reform Committee, took to the airwaves to denounce the $60 billion bill, specifically blaming Sens. Chuck Schumer and Kirsten Gillibrand of New York. "They had the opportunity to have a $27 [billion] to $30 billion dollar legit relief package, packed it with pork, then dared us not to vote on it," said Issa. "The speaker has the support of the majority of Republicans that if we're going to provide relief, we can't allow it to be doubled with unrelated pork no matter where the relief is." Translation: Republican budget hawks plan to give the areas devastated by Sandy half -- or less -- of what New York and New Jersey requested. And even powerful, nationally popular Republicans like Christie may not be able to budge them. One sign of where the power lies -- for the moment, at least -- is the threats made by Grimm and King. "We were betrayed. We were let down. There have to be consequences," said Grimm, who at first suggested he might not vote for Boehner to continue as speaker. King suggested that New York's wealthy donors close their wallets to Republican leaders. "Anyone from New York or New Jersey who contributes one penny to congressional Republicans is out of their minds," he said. Such statements betray the sort of pure frustration often voiced by men who lack the power to make good on their threats. King later pronounced himself satisfied after meeting with Boehner. The speaker said the House will vote Friday on part of the Sandy relief package, and that the rest of the legislation will be taken up by the next Congress in about two weeks -- long enough for the different sides to catch their breath before resuming the civil war in the Republican Party.
package net.londonunderground; import mtr.RegistryObject; import mtr.mappings.BlockEntityMapper; import net.fabricmc.api.ModInitializer; import net.londonunderground.mappings.FabricRegistryUtilities; import net.minecraft.core.Registry; import net.minecraft.resources.ResourceLocation; import net.minecraft.sounds.SoundEvent; import net.minecraft.world.item.BlockItem; import net.minecraft.world.item.CreativeModeTab; import net.minecraft.world.item.Item; import net.minecraft.world.level.block.Block; import net.minecraft.world.level.block.entity.BlockEntityType; public class MainFabric implements ModInitializer { @Override public void onInitialize() { Main.init(MainFabric::registerBlock, MainFabric::registerBlockEntityType, MainFabric::registerSoundEvent); FabricRegistryUtilities.registerCommand(PanelCommand::register); } private static void registerBlock(String path, RegistryObject<Block> block, CreativeModeTab itemGroup) { Registry.register(Registry.BLOCK, new ResourceLocation(Main.MOD_ID, path), block.get()); Registry.register(Registry.ITEM, new ResourceLocation(Main.MOD_ID, path), new BlockItem(block.get(), new Item.Properties().tab(itemGroup))); } private static <T extends BlockEntityMapper> void registerBlockEntityType(String path, RegistryObject<? extends BlockEntityType<? extends BlockEntityMapper>> blockEntityType) { Registry.register(Registry.BLOCK_ENTITY_TYPE, new ResourceLocation(Main.MOD_ID, path), blockEntityType.get()); } private static void registerSoundEvent(String path, SoundEvent soundEvent) { Registry.register(Registry.SOUND_EVENT, new ResourceLocation(Main.MOD_ID, path), soundEvent); } }
#include <fstream> #include <sstream> #include <algorithm> #include "bitmatrix.h" void bye_bye() { printf("ERROR: The original bitmatrix was modified.\n"); exit(1); } void check_equal(Bitmatrix *b1, Bitmatrix *b2) { int i, j; if (b1 == NULL || b2 == NULL) { if (b1 == b2) return; bye_bye(); } if (b1->Rows() != b2->Rows()) bye_bye(); if (b1->Cols() != b2->Cols()) bye_bye(); for (i = 0; i < b1->Rows(); i++) for (j = 0; j < b1->Cols(); j++) { if (b1->Val(i, j) != b2->Val(i, j)) bye_bye; } return; } vector <string> StoSVec(string &s) { istringstream ss; string t; vector <string> rv; ss.str(s); while (ss >> t) rv.push_back(t); return rv; } main(int argc, char **argv) { Bitmatrix *bm, *bm2, *bm3, *bm4, *bm5; BM_Hash *ht; int i, r, c, w, v, p, b; string s; vector <string> sv; istringstream ss; string prompt = ""; HTVec all; vector <int> ind; if (argc > 2) { cerr << "usage: matrix_editor [prompt]\n"; exit(1); } if (argc == 2) prompt = argv[1]; if (prompt.size() > 0) prompt += " "; bm = NULL; ht = new BM_Hash(10000); while (1) { cout << prompt; cout.flush(); if (!getline(cin, s)) exit(0); sv = StoSVec(s); if (sv.size() > 0 && sv[0][0] != '#') { if (sv[0] == "EMPTY") { if (sv.size() != 3 || sscanf(sv[1].c_str(), "%d", &r) != 1 || r <= 0 || sscanf(sv[2].c_str(), "%d", &c) != 1 || c <= 0) { printf("Should be: EMPTY rows cols\n"); } else { if (bm != NULL) delete bm; bm = new Bitmatrix(r, c); } } else if (sv[0] == "SET") { if (sv.size() != 4 || sscanf(sv[1].c_str(), "%d", &r) != 1 || r < 0 || sscanf(sv[2].c_str(), "%d", &c) != 1 || c < 0 || (sv[3] != "0" && sv[3] != "1")) { printf("Should be: SET r c 0|1\n"); } else if (bm == NULL) { printf("No current matrix.\n"); } else if (r >= bm->Rows()) { printf("r must be less than %d\n", bm->Rows()); } else if (c >= bm->Cols()) { printf("c must be less than %d\n", bm->Cols()); } else { bm->Set(r, c, sv[3][0]); } } else if (sv[0] == "VAL") { if (sv.size() != 3 || sscanf(sv[1].c_str(), "%d", &r) != 1 || r < 0 || sscanf(sv[2].c_str(), "%d", &c) != 1 || c < 0) { printf("Should be: VAL r c\n"); } else if (bm == NULL) { printf("No current matrix.\n"); } else if (r >= bm->Rows()) { printf("r must be less than %d\n", bm->Rows()); } else if (c >= bm->Cols()) { printf("c must be less than %d\n", bm->Cols()); } else { printf("%d\n", bm->Val(r, c)); } } else if (sv[0] == "SWAP") { if (sv.size() != 3 || sscanf(sv[1].c_str(), "%d", &r) != 1 || r < 0 || sscanf(sv[2].c_str(), "%d", &c) != 1 || c < 0) { printf("Should be: SWAP r1 r2\n"); } else if (bm == NULL) { printf("No current matrix.\n"); } else if (r >= bm->Rows()) { printf("r must be less than %d\n", bm->Rows()); } else if (c >= bm->Rows()) { printf("c must be less than %d\n", bm->Rows()); } else { bm->Swap_Rows(r, c); } } else if (sv[0] == "+=") { if (sv.size() != 3 || sscanf(sv[1].c_str(), "%d", &r) != 1 || r < 0 || sscanf(sv[2].c_str(), "%d", &c) != 1 || c < 0) { printf("Should be: R1+=R2 r1 r2\n"); } else if (bm == NULL) { printf("No current matrix.\n"); } else if (r >= bm->Rows()) { printf("r must be less than %d\n", bm->Rows()); } else if (c >= bm->Rows()) { printf("c must be less than %d\n", bm->Rows()); } else { bm->R1_Plus_Equals_R2(r, c); } } else if (sv[0] == "PRINT") { w = -1; if (sv.size() == 1) { w = 0; } else if (sv.size() != 2 || sscanf(sv[1].c_str(), "%d", &w) != 1 || w < 0) { printf("Should be: PRINT [w]\n"); w = -1; } if (w >= 0) { if (bm == NULL) { printf("No current matrix.\n"); } else { bm->Print(w); } } } else if (sv[0] == "WRITE") { if (sv.size() != 2) { printf("Should be: WRITE filename\n"); } else if (bm == NULL) { printf("No current matrix.\n"); } else { bm->Write(sv[1]); } } else if (sv[0] == "PGM") { if (sv.size() != 4) { printf("Should be: PGM filename pixels border\n"); } else if (sscanf(sv[2].c_str(), "%d", &p) == 0 || p <= 0) { printf("Should be: PGM filename pixels border -- pixels > 0\n"); } else if (sscanf(sv[3].c_str(), "%d", &b) == 0 || b < 0) { printf("Should be: PGM filename pixels border -- border >= 0\n"); } else { bm->PGM(sv[1], p, b); } } else if (sv[0] == "READ") { if (sv.size() != 2) { printf("Should be: WRITE filename\n"); } else { if (bm == NULL) delete bm; bm = new Bitmatrix(sv[1]); } } else if (sv[0] == "STORE") { if (sv.size() != 2) { printf("Should be: STORE key\n"); } else { bm2 = ht->Recall(sv[1]); if (bm2 != NULL) delete bm2; ht->Store(sv[1], bm->Copy()); } } else if (sv[0] == "RECALL") { if (sv.size() != 2) { printf("Should be: RECALL key\n"); } else { bm2 = ht->Recall(sv[1]); if (bm2 == NULL) { printf("No matrix with key %s\n", sv[1].c_str()); } else { if (bm != NULL) delete bm; bm = bm2->Copy(); } } } else if (sv[0] == "ALL") { if (sv.size() != 1) { printf("Should be: ALL\n"); } else { all = ht->All(); for (i = 0; i < all.size(); i++) { printf("%-30s %3d X %3d\n", all[i]->key.c_str(), all[i]->bm->Rows(), all[i]->bm->Cols()); } } } else if (sv[0] == "SUM") { if (sv.size() != 3) { printf("Should be: SUM key1 key2\n"); } else { bm2 = ht->Recall(sv[1]); bm3 = ht->Recall(sv[2]); if (bm2 == NULL) { printf("No matrix %s\n", sv[1].c_str()); } else if (bm3 == NULL) { printf("No matrix %s\n", sv[2].c_str()); } else if (bm2->Rows() != bm3->Rows()) { printf("Rows don't match\n"); } else if (bm2->Cols() != bm3->Cols()) { printf("Cols don't match\n"); } else { if (bm != NULL) delete bm; bm4 = bm2->Copy(); bm5 = bm3->Copy(); bm = Sum(bm2, bm3); check_equal(bm2, bm4); check_equal(bm3, bm5); delete bm4; delete bm5; } } } else if (sv[0] == "PRODUCT") { if (sv.size() != 3) { printf("Should be: PRODUCT key1 key2\n"); } else { bm2 = ht->Recall(sv[1]); bm3 = ht->Recall(sv[2]); if (bm2 == NULL) { printf("No matrix %s\n", sv[1].c_str()); } else if (bm3 == NULL) { printf("No matrix %s\n", sv[2].c_str()); } else if (bm2->Cols() != bm3->Rows()) { printf("Dimensions don't match\n"); } else { bm4 = bm2->Copy(); bm5 = bm3->Copy(); if (bm != NULL) delete bm; bm = Product(bm2, bm3); check_equal(bm2, bm4); check_equal(bm3, bm5); delete bm4; delete bm5; } } } else if (sv[0] == "SUBMATRIX") { if (sv.size() < 2) { printf("Should be: SUBMATRIX rows...\n"); } else if (bm == NULL) { printf("No matrix %s\n", sv[1].c_str()); } else { ind.clear(); for (i = 1; i < sv.size(); i++) { if (sscanf(sv[i].c_str(), "%d", &r) != 1 || r < 0 || r >= bm->Rows()) { printf("Bad row %s. Should be a number between 0 and %d\n", sv[i].c_str(), bm->Rows()-1); i = sv.size() + 10; } else { ind.push_back(r); } } if (i == sv.size()) { bm3 = bm->Copy(); bm2 = Sub_Matrix(bm, ind); check_equal(bm, bm3); delete bm3; delete bm; bm = bm2; } } } else if (sv[0] == "INVERT") { if (sv.size() != 1) { printf("Should be: INVERT\n"); } else if (bm == NULL) { printf("No matrix %s\n", sv[1].c_str()); } else if (bm->Rows() != bm->Cols()) { printf("Can't invert -- not a square matrix\n"); } else { bm3 = bm->Copy(); bm2 = Inverse(bm); check_equal(bm, bm3); delete bm3; if (bm2 == NULL) { printf("Matrix not invertible.\n"); } else { delete bm; bm = bm2; } } } else if (sv[0] == "QUIT") { exit(0); } else { printf("Unknown command %s\n", sv[0].c_str()); } } } }
//! Template cards for Welcome Screen. //! //! Template cards allow user to select a predefined template by clicking on the corresponding //! card. use ensogl::prelude::*; use ensogl::system::web::traits::*; use crate::ClickableElement; use enso_frp as frp; use ensogl::system::web; use web::Element; use web::HtmlDivElement; use web::JsCast; // ====================== // === Template Cards === // ====================== // === CardDefinition struct. === struct CardDefinition { class: &'static str, background_image_url: Option<&'static str>, header: &'static str, content: &'static str, template: &'static str, } // === Predefined cards. === const CARD_SPREADSHEETS: CardDefinition = CardDefinition { class: crate::css_class::CARD_SPREADSHEETS, background_image_url: Some("/assets/spreadsheets.png"), header: "Combine spreadsheets", content: "Glue multiple spreadsheets together to analyse all your data at once.", template: "orders", }; const CARD_GEO: CardDefinition = CardDefinition { class: crate::css_class::CARD_GEO, background_image_url: None, header: "Geospatial analysis", content: "Learn where to open a coffee shop to maximize your income.", template: "restaurants", }; const CARD_VISUALIZE: CardDefinition = CardDefinition { class: crate::css_class::CARD_VISUALIZE, background_image_url: None, header: "Analyze GitHub stars", content: "Find out which of Enso's repositories are most popular over time.", template: "stargazers", }; // === Card struct. === #[derive(Debug, Clone)] struct Card { pub clickable_element: ClickableElement, pub template_name: &'static str, } impl Deref for Card { type Target = ClickableElement; fn deref(&self) -> &Self::Target { &self.clickable_element } } // ============= // === Model === // ============= #[derive(Debug, Clone, CloneRef)] pub struct Model { logger: Logger, pub root_dom: Element, cards: Rc<Vec<Card>>, } impl Model { /// Constructor. pub fn new(logger: Logger, open_template: &frp::Any<String>) -> Self { let root_dom = web::document.create_element_or_panic("main"); root_dom.set_class_name(crate::css_class::CONTENT); let templates = web::document.create_div_or_panic(); let header = Self::create_header("Templates"); templates.append_or_warn(&header); let (cards_dom, cards) = Self::create_cards(); templates.append_or_warn(&cards_dom); root_dom.append_or_warn(&templates); let model = Self { logger, root_dom, cards: Rc::new(cards) }; model.setup_event_listeners(open_template); model } /// Attach click event for every card to `open_template` FRP endpoint. fn setup_event_listeners(&self, open_template: &frp::Any<String>) { for card in self.cards.iter() { let network = &card.network; let template_name = card.template_name.to_owned(); frp::extend! { network open_template <+ card.click.constant(template_name); } } } fn create_header(content: &str) -> Element { let header = web::document.create_element_or_panic("h2"); header.set_text_content(Some(content)); header } /// Create main content, a set of cards. fn create_cards() -> (HtmlDivElement, Vec<Card>) { let mut cards = Vec::new(); let dom = web::document.create_div_or_panic(); dom.set_class_name(crate::css_class::CARDS); let row1 = Self::create_row(&[CARD_SPREADSHEETS, CARD_GEO], &mut cards); dom.append_or_warn(&row1); let row2 = Self::create_row(&[CARD_VISUALIZE], &mut cards); dom.append_or_warn(&row2); (dom, cards) } fn create_row(definitions: &[CardDefinition], cards: &mut Vec<Card>) -> HtmlDivElement { let row = web::document.create_div_or_panic(); row.set_class_name(crate::css_class::ROW); for definition in definitions { let card = Self::create_card(definition); row.append_or_warn(&card.element); cards.push(card.clone()); } row } /// Helper to create a single card DOM from provided definition. fn create_card(definition: &CardDefinition) -> Card { let card = web::document.create_div_or_panic(); card.set_class_name(&format!("{} {}", crate::css_class::CARD, definition.class)); if let Some(src) = definition.background_image_url { let img = web::document.create_element_or_panic("img"); img.set_attribute_or_warn("src", src); card.append_or_warn(&img); } let card_header = web::document.create_element_or_panic("h3"); card_header.set_text_content(Some(definition.header)); card.append_or_warn(&card_header); let text_content = web::document.create_element_or_panic("p"); text_content.set_text_content(Some(definition.content)); card.append_or_warn(&text_content); let clickable_element = ClickableElement::new(card.unchecked_into()); Card { clickable_element, template_name: definition.template } } } // =========== // === FRP === // =========== ensogl::define_endpoints! { Input {} Output { // Create a new project from template `name`. open_template(String), } } // ===================== // === TemplateCards === // ===================== /// Template Cards for Welcome View. It contains a few predefined template cards. Clicking on /// a template card creates a new project with some prepared code. #[derive(Debug, Clone, CloneRef)] pub struct TemplateCards { pub model: Model, pub frp: Frp, } impl Deref for TemplateCards { type Target = Frp; fn deref(&self) -> &Self::Target { &self.frp } } impl TemplateCards { pub fn new(logger: &Logger) -> Self { let logger = Logger::new_sub(logger, "TemplateCards"); let frp = Frp::new(); let model = Model::new(logger, &frp.output.source.open_template); Self { model, frp } } }
/** Benchmarks for the {@link DoubleFormat} class. */ @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.NANOSECONDS) @Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) @Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) @Fork(value = 1, jvmArgs = {"-server", "-Xms512M", "-Xmx512M"}) public class DoubleFormatPerformance { /** Decimal format pattern for plain output. */ private static final String PLAIN_PATTERN = "0.0##"; /** Decimal format pattern for plain output with thousands grouping. */ private static final String PLAIN_GROUPED_PATTERN = "#,##0.0##"; /** Decimal format pattern for scientific output. */ private static final String SCI_PATTERN = "0.0##E0"; /** Decimal format pattern for engineering output. */ private static final String ENG_PATTERN = "##0.0##E0"; /** Benchmark input providing a source of random double values. */ @State(Scope.Thread) public static class DoubleInput { /** The number of doubles in the input array. */ @Param({"10000"}) private int size; /** Minimum base 2 exponent for random input doubles. */ @Param("-100") private int minExp; /** Maximum base 2 exponent for random input doubles. */ @Param("100") private int maxExp; /** Double input array. */ private double[] input; /** Get the input doubles. * @return the input doubles */ public double[] getInput() { return input; } /** Set up the instance for the benchmark. */ @Setup(Level.Iteration) public void setup() { input = randomDoubleArray(size, minExp, maxExp, RandomSource.create(RandomSource.XO_RO_SHI_RO_128_PP)); } } /** Create a random double value with exponent in the range {@code [minExp, maxExp]}. * @param minExp minimum exponent; must be less than {@code maxExp} * @param maxExp maximum exponent; must be greater than {@code minExp} * @param rng random number generator * @return random double */ private static double randomDouble(final int minExp, final int maxExp, final UniformRandomProvider rng) { // Create random doubles using random bits in the sign bit and the mantissa. final long mask = ((1L << 52) - 1) | 1L << 63; final long bits = rng.nextLong() & mask; // The exponent must be unsigned so + 1023 to the signed exponent final long exp = rng.nextInt(maxExp - minExp + 1) + minExp + 1023; return Double.longBitsToDouble(bits | (exp << 52)); } /** Create an array with the given length containing random doubles with exponents in the range * {@code [minExp, maxExp]}. * @param len array length * @param minExp minimum exponent; must be less than {@code maxExp} * @param maxExp maximum exponent; must be greater than {@code minExp} * @param rng random number generator * @return array of random doubles */ private static double[] randomDoubleArray(final int len, final int minExp, final int maxExp, final UniformRandomProvider rng) { final double[] arr = new double[len]; for (int i = 0; i < arr.length; ++i) { arr[i] = randomDouble(minExp, maxExp, rng); } return arr; } /** Run a benchmark test on a function accepting a double argument. * @param <T> function output type * @param input double array * @param bh jmh blackhole for consuming output * @param fn function to call */ private static <T> void runDoubleFunction(final DoubleInput input, final Blackhole bh, final DoubleFunction<T> fn) { for (final double d : input.getInput()) { bh.consume(fn.apply(d)); } } /** Benchmark testing just the overhead of the benchmark harness. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void baseline(final DoubleInput input, final Blackhole bh) { runDoubleFunction(input, bh, d -> "0.0"); } /** Benchmark testing the {@link Double#toString()} method. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void doubleToString(final DoubleInput input, final Blackhole bh) { runDoubleFunction(input, bh, Double::toString); } /** Benchmark testing the {@link String#format(String, Object...)} method. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void stringFormat(final DoubleInput input, final Blackhole bh) { runDoubleFunction(input, bh, d -> String.format("%f", d)); } /** Benchmark testing the BigDecimal formatting performance. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void bigDecimal(final DoubleInput input, final Blackhole bh) { final DoubleFunction<String> fn = d -> BigDecimal.valueOf(d) .setScale(3, RoundingMode.HALF_EVEN) .stripTrailingZeros() .toString(); runDoubleFunction(input, bh, fn); } /** Benchmark testing the {@link DecimalFormat} class. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void decimalFormatPlain(final DoubleInput input, final Blackhole bh) { final DecimalFormat fmt = new DecimalFormat(PLAIN_PATTERN); runDoubleFunction(input, bh, fmt::format); } /** Benchmark testing the {@link DecimalFormat} class with thousands grouping. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void decimalFormatPlainGrouped(final DoubleInput input, final Blackhole bh) { final DecimalFormat fmt = new DecimalFormat(PLAIN_GROUPED_PATTERN); runDoubleFunction(input, bh, fmt::format); } /** Benchmark testing the {@link DecimalFormat} class with scientific format. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void decimalFormatScientific(final DoubleInput input, final Blackhole bh) { final DecimalFormat fmt = new DecimalFormat(SCI_PATTERN); runDoubleFunction(input, bh, fmt::format); } /** Benchmark testing the {@link DecimalFormat} class with engineering format. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void decimalFormatEngineering(final DoubleInput input, final Blackhole bh) { final DecimalFormat fmt = new DecimalFormat(ENG_PATTERN); runDoubleFunction(input, bh, fmt::format); } /** Benchmark testing the {@link DoubleFormat#PLAIN} format. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void doubleFormatPlain(final DoubleInput input, final Blackhole bh) { final DoubleFunction<String> fmt = DoubleFormat.PLAIN.builder() .minDecimalExponent(-3) .build(); runDoubleFunction(input, bh, fmt); } /** Benchmark testing the {@link DoubleFormat#PLAIN} format with * thousands grouping. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void doubleFormatPlainGrouped(final DoubleInput input, final Blackhole bh) { final DoubleFunction<String> fmt = DoubleFormat.PLAIN.builder() .minDecimalExponent(-3) .groupThousands(true) .build(); runDoubleFunction(input, bh, fmt); } /** Benchmark testing the {@link DoubleFormat#SCIENTIFIC} format. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void doubleFormatScientific(final DoubleInput input, final Blackhole bh) { final DoubleFunction<String> fmt = DoubleFormat.SCIENTIFIC.builder() .maxPrecision(4) .alwaysIncludeExponent(true) .build(); runDoubleFunction(input, bh, fmt); } /** Benchmark testing the {@link DoubleFormat#ENGINEERING} format. * @param input benchmark state input * @param bh jmh blackhole for consuming output */ @Benchmark public void doubleFormatEngineering(final DoubleInput input, final Blackhole bh) { final DoubleFunction<String> fmt = DoubleFormat.ENGINEERING.builder() .maxPrecision(6) .alwaysIncludeExponent(true) .build(); runDoubleFunction(input, bh, fmt); } }
package com.ervin.ez_websocket.controller; import org.jasypt.encryption.StringEncryptor; import org.jasypt.encryption.pbe.PooledPBEStringEncryptor; import org.jasypt.encryption.pbe.config.SimpleStringPBEConfig; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import java.util.HashMap; import java.util.Map; @RestController @RequestMapping("/encrypt") public class TestController { @Autowired StringEncryptor stringEncryptor; @GetMapping("/default") public Map<String,String> encrypt(@RequestParam(value = "text") String text){ Map<String,String> res = new HashMap<>(); res.put("plain",text); System.out.println("PlainText:" + text); String encryptTxt = stringEncryptor.encrypt(text); res.put("encrypt",encryptTxt); System.out.println("EncryptText:" + encryptTxt); String decryptTxt = stringEncryptor.decrypt(encryptTxt); res.put("decrypt",decryptTxt); System.out.println("DecryptText:" + decryptTxt); return res; } // PBEWITHHMACSHA512ANDAES_256 @GetMapping("/sha512aes256") public Map<String,String> encrypt2(@RequestParam(value = "text") String text){ Map<String,String> res = new HashMap<>(); res.put("plain",text); PooledPBEStringEncryptor encryptor = new PooledPBEStringEncryptor(); SimpleStringPBEConfig config = new SimpleStringPBEConfig(); config.setPassword("<PASSWORD>"); config.setAlgorithm("PBEWITHHMACSHA512ANDAES_256"); config.setKeyObtentionIterations("1000"); config.setPoolSize("1"); config.setProviderName(null); config.setProviderClassName(null); config.setSaltGeneratorClassName("org.jasypt.salt.RandomSaltGenerator"); config.setIvGeneratorClassName("org.jasypt.salt.NoOpIVGenerator"); config.setStringOutputType("base64"); encryptor.setConfig(config); System.out.println("PlainText:" + text); String encryptTxt = encryptor.encrypt(text); res.put("encrypt",encryptTxt); System.out.println("EncryptText:" + encryptTxt); String decryptTxt = encryptor.decrypt(encryptTxt); res.put("decrypt",decryptTxt); System.out.println("DecryptText:" + decryptTxt); return res; } // PBEWithMD5AndDES @GetMapping("/md5aes") public Map<String,String> encrypt3(@RequestParam(value = "text") String text){ Map<String,String> res = new HashMap<>(); res.put("plain",text); PooledPBEStringEncryptor encryptor = new PooledPBEStringEncryptor(); SimpleStringPBEConfig config = new SimpleStringPBEConfig(); config.setPassword("<PASSWORD>"); config.setAlgorithm("PBEWithMD5AndDES"); config.setKeyObtentionIterations("1000"); config.setPoolSize("1"); config.setProviderName(null); config.setProviderClassName(null); config.setSaltGeneratorClassName("org.jasypt.salt.RandomSaltGenerator"); config.setIvGeneratorClassName("org.jasypt.salt.NoOpIVGenerator"); config.setStringOutputType("base64"); encryptor.setConfig(config); System.out.println("PlainText:" + text); String encryptTxt = encryptor.encrypt(text); res.put("encrypt",encryptTxt); System.out.println("EncryptText:" + encryptTxt); String decryptTxt = encryptor.decrypt(encryptTxt); res.put("decrypt",decryptTxt); System.out.println("DecryptText:" + decryptTxt); return res; } }
class Song { public Id: any; public Name: string; public Url: string; public Duration: string; public Image: string; } export default Song;
import React from "react" import assert from "assert" import reactNodeToString from "./index" type Test = [input: React.ReactNode, expectedOutput: string] describe("react-node-to-string", () => { it("extracts correct string", () => { const tests: Test[] = [ ["lorem ipsum", "lorem ipsum"], [123, "123"], [true, ""], [false, ""], [null, ""], [void 0, ""], [["lorem", "ipsum"], "loremipsum"], [["lorem", <>ipsum</>], "loremipsum"], [<></>, ""], [<>lorem ipsum</>, "lorem ipsum"], [ <p> <strong>lorem</strong>ipsum </p>, "loremipsum", ], ] tests.forEach((test, index) => { assert.strictEqual( reactNodeToString(test[0]), test[1], `Test ${index} failed` ) }) }) })
Supplemental Material: Holocene to latest Pleistocene incremental slip rates from the east-central Hope fault (Conway segment) at Hossack Station, Marlborough fault system, South Island, New Zealand: Towards a dated path of earthquake slip along a plate boundary fault <div>Includes IRSL detailed procedures with figures and a table, line log of trench 3, and an alternative restoration of offset B.<br></div> Takedown If you consider content in White Rose Research Online to be in breach of UK law, please notify us by emailing [email protected] including the URL of the record and the reason for the withdrawal request. ■ INTRODUCTION Understanding the rate at which faults store and release elastic strain energy is of fundamental importance for a wide range of issues, from seismic hazard assessment to informed interpretation of geodetic data to the strength and evolution of faults. Previous analyses of incremental fault slip rates indicate a wide range of behaviors, from rates that are seemingly constant over a wide range of time scales (e.g., Weldon and Sieh, 1985;Noriega et al., 2006;Kozaci et al., 2007;Gold and Cowgill, 2011;Van Der Woerd et al., 2002;Salisbury et al., 2018) to examples in which strain release is markedly nonconstant (e.g., Friedrich et al., 2003;Weldon et al., 2004;Mason et al., 2006;Gold and Cowgill, 2011;Onderdonk et al., 2015;Dolan et al., 2016;Zinke et al., 2017Zinke et al., , 2019. Despite a growing number of studies, the overall dearth of these slip-rate data from major faults globally hampers our ability to understand the causes of such behavior. In this study, we document incremental slip rates over ive different Holocene to latest Pleistocene time intervals on the Hope fault, one of the fastest-slipping strike-slip faults in the Australian-Paciic plate boundary in northern South Island, New Zealand (e.g., Litchield et al., 2014). We discuss these results in light of their implications for plate-boundary strain accommodation, fault mechanics, and potential use in probabilistic seismic hazard analysis. The Marlborough Fault System and Hope Fault The Pacific-Australian plate boundary cuts across the South Island of New Zealand, which spans the onshore gap between subduction zones of opposing polarity, with the Hikurangi megathrust dipping northwest off the east coast of North Island and the Puysegar megathrust dipping southeast off the southwestern coast of southern South Island (Fig. 1A). In northern South Island, most relative plate motion is accommodated by the Marlborough fault system, a system of subparallel right-lateral strike-slip faults that splay northeastward from the Alpine fault, the main plate-boundary fault to the southwest (Fig. 1B). From north to south, the four main faults of the Marlborough fault system, which collectively accommodate ~80%-90% of the total plate motion of ~39 mm/yr (DeMets et al., 2010;Wallace et al., 2012;Litchield et al., 2014), are the Wairau, Awatere, Clarence, and Hope faults. Within the Marlborough fault system, the southernmost Hope fault is thought to have the fastest slip rate, estimated by previous workers to exceed 10 mm/yr along the majority of its on-land length (Cowan, 1990;Cowan and McGlone, 1991;Langridge and Berrymxan, 2005;Langridge et al., 2016;Khajavi et al., 2018). Along the single-stranded, east-central part of the fault, the slip rate has been estimated to be as fast as ~20-25 mm/yr over mid-Holocene time scales (McMorran, 1991;Van Dissen and Yeats, 1991;Langridge et al., 2003). Previous work shows that the Holocene-late Pleistocene slip rate of the Hope fault may have varied through time. Utilizing weathering-rind age estimates, Knuepfer (1992) suggested that slip rate along the Hope fault may have varied by as much as an order of magnitude over millennial time scales. More recently, to the west of our study area, where slip is partitioned between the Hope fault and southern Kakapo strand, Khajavi et al. (2018) documented similar variations in slip rate during Holocene time on the northern Hope fault strand. The focus of this study is the Conway segment of the Hope fault. The Conway segment is a structural segment on the east-central part of the Hope fault bounded at its southwestern end by the ~7-km-wide transtensional Hanmer Basin (Wood et al., 1994) and at its northeastern end by a structurally complex transition in which most slip is thought to be transferred northeastward from the Hope fault onto the fast-slipping oblique-reverse faults of the Jordan-Kekerengu fault system (Van Dissen and Yeats, 1991;Van Dissen et al., 2016;Kearse et al., 2017) (Fig. 1B). The Conway segment has not generated a surface-rupturing earthquake since the arrival of European settlers in this part of New Zealand ca. 1840 CE. Although minor ground deformation was observed at an isolated location ~40 km east of the Hossack Station study site following the 2016 Subduction zone C o n w a y s e g . P o r t e r 's P a s s -t o -A m b e r le y F a u lt Z o n e A lp in e F a u lt K e ll y F a u lt H ur un ui se g. H op e R. se g. S e a w a r d S e g . C la r e n c e F a u lt A w a t e r e F a u lt W a ir a u F a u lt H ik u r a n g i T r e n c h K e k e re n g u F a u lt W a i r a r a p a F a u l t Downloaded from http://pubs.geoscienceworld.org/gsa/geosphere/article-pdf/doi/10.1130/GES02263.1/5173033/ges02263.pdf by guest 7.8 Kaik ura earthquake, the majority of the Conway segment did not exhibit any signs of surface rupture (Litchield et al., 2018;Hatem et al., 2019). In contrast to the Conway segment, the Hurunui and Hope River segments of the Hope fault immediately west of the Hanmer Basin ruptured together in the 1888 CE M w ~7.1-7.3 Amuri earthquake (McKay, 1890;Cowan, 1990Cowan, , 1991Cowan and McGlone, 1991;Khajavi et al., 2016). Hossack Station Study Site The study site is located on Hossack Station along the western part of the Conway segment, ~3 km east of the eastern end of the Hanmer Basin (Figs. 1B,. At the study site, the Hope fault is single stranded and strikes ~075°, extending across a broad valley illed by an aggradational luvial deposit, which is capped by a planar, gently west-dipping terrace tread that we refer to as surface S1. Surface S1 is similar to other valley-illing luvial gravel terraces that characterize many large river systems in northern South Island that have been dated at ca. 12-15 ka (Khajavi et al., 2016;Zinke et al., 2017Zinke et al., , 2019. These ages support earlier depositional models suggesting that extensive ill terraces were deposited at the end of the Last Glacial Maximum when large sediment loads exceeded stream capacity, leading to widespread aggradation (Lensen, 1968;Bull and Knuepfer, 1987;Bull, 1991Bull, , 2008. At the Hossack Station study site, surface S1 has been incised by channels lowing westward to the Hanmer River (Figs. 2A, 2B, 2D). One prominent stream that extends along the southern margin of the valley has looped back and forth across the Hope fault, yielding the progressive stream offsets that are the focus of this study. The site was previously identiied by Freund (1971) and studied by McMorran (1991), who both noted progressive offsets of this channel. Based on previous observations, we refer to this stream as the "Loops Stream. " McMorran (1991) used observations in trenches, pits, and auger borehole transects, together with radiocarbon dating, to document a late Holocene (ca. 3.6 ka) slip rate of 18 ± 8 mm/yr. We identify four progressive fault offsets of the Loops Stream at the Hossack Station site, as well as an older offset of the headwaters of Loops Stream and adjacent topography <1 km east of the main study site (Figs. 2B,2C;172.973001°). These progressive Hope fault offsets are documented here using detailed geomorphic mapping both in the ield and through analysis of aerial photos and the high-resolution (>12 points/ m 2 ) lidar data that we collected in 2014 (https://doi. org/10.5069/G9G44N75 available at www.opentopography.org). We refer to these progressive offsets as offset A (youngest) to offset E (oldest). To the south of the fault, the active Loops Stream course lows through a channel that has been incised by ~5 m into the S1 surface, with steep to near-vertical channel walls. The active stream (which we refer to as channel C1) locally lows northward nearly perpendicular to the fault and makes an abrupt, near-90° turn to the west where it meets, and lows westward along, the Hope fault (Figs. 2D,2E). The sharply deined right-lateral offset of the incised channel wall on the outside of this 90° bend provides the youngest offset we identify (offset A; Fig. 3). An older alignment of the deeply incised channel C1 deines offset B (Fig. 3). Two stillolder, now-abandoned, less-incised stream channel courses (channel C2 and channel C3 ) provide longer-term estimates for fault offset of the Loops Stream (offsets C and D, respectively; Fig. 3). We describe each of these four Loops Stream offsets in detail below, from oldest to youngest. We follow these descriptions with documentation of the oldest offset we observe at the Hossack Station site, which is deined by restoration of the Loops Stream headwaters and the displaced contact between the surface S1 ill-terrace luvial gravels and the bedrock canyon walls against which they form a buttress unconformity. We refer to this oldest restoration as offset E (Fig. 2B). To better deine the geometric relationships between paleochannel morphology and the local Hope fault orientation, and to collect samples for radiocarbon and luminescence dating to constrain the ages of the offset channels C1-C3 as well as surface S1, we excavated three fault-parallel trenches and seven sample pits. We dated a total of 62 radiocarbon samples and eight post-infrared (post-IR) infrared stimulated luminescence (IRSL; Supplemental Material 1 ) samples from these excavations. Radiocarbon samples, including detrital charcoal, wood, seeds, and other plant matter, were prepared using standard acid-base-acid pretreatment and analyzed at the University of California, Irvine, W.M. Keck accelerator mass spectrometer facility. Radiocarbon age results were then calibrated to calendric years using the most up-to-date Southern Hemisphere calibration curve, SHCal13 (Hogg et al., 2013), using the program OxCal (Bronk Ramsey, 2009). Luminescence samples were prepared and analyzed at the University of California, Los Angeles, using the newly developed post-IR-IRSL 225 single-grain method (Rhodes, 2015;Lewis et al., 2017;Zinke et al., 2017; see Supplemental Material for explanation of method). All age data were modeled using Bayesian statistics in OxCal. All radiocarbon and luminescence ages mentioned in the text and shown on the igures are in units of years before the year 2019 CE, with the exception of the oldest luminescence ages, which are listed in units of thousands of years ago (ka). In the following, our documentation of each of these ive Loops Stream progressive offsets includes sections describing (1) the geomorphology and geometry of past streamlow, (2) age control on these different conigurations of the Loops Stream, and (3) constraints on measurements of the fault offset. The reported uncertainties in sample ages are 2σ, as calculated using the program OxCal (Bronk Ramsey, 2009), and the reported uncertainties in offset determination are based on sedimentological and structural limits on possible streamlow geometries. We follow these descriptions with a detailed summary of all of our observations, synthesizing the data from all ive offsets. ■ GEOMORPHIC, STRATIGRAPHIC, AND CHRONOLOGIC OBSERVATIONS OF OFFSET LOOPS STREAM CHANNELS We begin by describing the four closely spaced stream offsets at the main (western) study site and follow with a description of the older offset of Contour interval is 50 cm. Trenches and pits discussed in this manuscript are shown in blue; trenches not discussed herein are shown in gray. Yellow outline shows the area of panel E. Shutter ridge is shown in blue shading. (D) Oblique view, looking northwest, of the topography of the area outlined in white in panel A. A prominent valley-illing surface is shown in purple (S1); a similar, relatively lat surface S1* is shown in pink, but S1* is at a slightly lower elevation (<2 m) than S1. Shutter ridge is shown in blue shading. Contour interval is 50 cm. Background is a shaded-relief digital elevation model. Topographic data used in this igure are from lidar dataset available referenced in manuscript. (E) Oblique view, looking northwest, of the topography in the area outlined in yellow in panel C. Channels C1-C3 are shown in decreasing saturations of gray overlay. Offset restoration areas are labeled. Trench and pit locations are gray polygons. Contour interval is 50 cm. Background color is a shaded-relief digital elevation model. Topographic data used in this igure are from lidar dataset available referenced in manuscript. Shutter ridge (highlighted in blue in panels C and D) is located between the downstream extents of C2 and C3. Graticule at map edges are in reference frame NZGD2000. Center of ield of view in each panel ~-42.538057°, 172.974219°. Channel C3 Location, Geometry, and Stratigraphy The largest of the four Loops Stream channel offsets we identify (offset D) is constrained by a combination of: (1) the preserved geomorphic expression of the now-partly buried channel course incised into the S1 surface along the downstream reach north of the fault; and (2) three-dimensional (3-D) exposures of the now-buried channel C3 deposits, including the erosional base of the channel and basal bedload gravels, in two fault-parallel trenches we excavated just north of (i.e., downstream from) the fault (Fig. 3, offset D panel). Despite the fact that the incised course of channel C3 north of the fault has been partially illed in by post-high energy streamlow deposits (see trench results in Fig. 4), the ~20-m-wide downstream channel course is well preserved in the landscape north of the fault, where it is marked by discontinuous, ~1-m-tall remnants of the paleochannel walls where the channel incised into surface S1. The geomorphically deined channel walls indicate that the channel lowed northward across the fault before turning sharply to the west around the eastern nose of a 3-m-tall, 20-m-wide bedrock shutter ridge that extends for ~70 m along the fault (Fig. 2D). Farther west, the location of the geomorphically deined channel C3 course indicates that the downstream end of the channel lowed westward along a stream course that was subsequently reoccupied and more deeply incised by channel C2, a younger channel course that will be discussed below as part of offset C. The partially buried downstream course of channel C3 is beheaded at the fault. To better deine the stratigraphy of the C3 channel and the underlying and overlying deposits, as well as the geometry and location of C3 where it crossed the fault, we excavated two fault-parallel trenches (T-7 and T-10) just north of the fault crossing near the eastern end of the bedrock shutter ridge around which the channel lowed (Fig. 4). Both trenches exposed the erosional base of the channel, as well as the C3 channel high-energy bedload gravel deposits (dark gray units in Fig. 4) and overlying predominantly ine-grained, post-channel abandonment deposits that have partially illed in the incised channel course (pale gray units in Fig. 4) . The C3 channel basal bedload deposits consist of brown, organic-rich, clast-supported pebble gravels. The gravel clasts are typically subangular and are inilled by a silty sandy matrix. Channel C3 bedload gravels onlap the eroded channel base where the channel incised into the eastern end of the bedrock shutter ridge, as seen on the western ends of the trench T-10 logs (Figs. 4B and 4C). Comparison of the elevations of the S1 ill-terrace surface and the erosional base of the C3 channel in trench T-7 indicates that the channel incised ~3 m down into the S1 surface. Channel C3 eroded down into an older unit consisting of angular to subangular, closely packed, clast-supported pebble gravel. This older, matrix-poor gravel does not exhibit any characteristics of signiicant luvial transport (e.g., channelization, bedding, clast imbrication), suggesting that this gravel deposit may represent colluvium shed from the steep slope of the shutter ridge to the west of T-7 and T-10. Both trenches document a fundamental shift in deposition at the top of the channel C3 basal bedload gravels. The C3 bedload gravels in both trenches T-7 and T-10 are overlain by gravelly sand deposits ~20 cm thick. Overlying these deposits in both trenches is a sequence of much iner-grained deposits, consisting mostly of silts and clays, with minor sparse gravel distributed through some beds. These ine-grained units are organic rich in places and contain 1-to 50-cm-long pieces of wood, including intact tree trunks and branches. Finally, the uppermost (~50 cm) of trenches T-7 and T-10 contains thinly bedded peats that have undulatory contacts, perhaps suggesting that these layers were deformed by liquefaction processes during local earthquakes. In addition to deining the stratigraphy, trenches T-7 and T-10 constrain the fault-proximal depositional geometry of channel C3. Trench T-10 exposed both banks of channel C3, whereas trench T-7 revealed only the western incised bank of channel C3; the eastern bank of the stream lies somewhere to the east of the eastern end of the trench (Fig. 4). The 345°-350° trend of the incised channel walls and thalweg, as measured in 3-D in the trenches, is in good agreement with the C3 trend measured using the high-resolution lidar topography data, and is plotted as navy-blue lines in Figure 3 (offset D panel). The channel width of C3 is narrow in trench T-10 just north of the fault (see meter marker ~2-6 m in Figs. 4B and 4C), and increased to at least 8 m as the stream lowed around the eastern end of the shutter ridge, as shown by the minimum extent of the channel C3 bedload gravels exposed in trench T-7 (see distance ~3-12 m in Fig. 4A). South of the fault, the only possible upstream source channel for the downstream reach of channel C3 described above is the currently active Loops Stream channel (Fig. 2). In the area immediately to the south of the fault, post-channel C3 streamlow associated with subsequent Loops Stream downcutting during progressive fault offset (discussed in following sections) has eroded the channel area south of the fault and east of the interpreted initial incision of C3, which is expressed in the geomorphology as a highly linear, north-northwest-trending upstream incised drainage. Age Control for Channel C3 and Associated Deposits To constrain the ages of channel C3 initiation and abandonment, we collected radiocarbon and luminescence samples from (1) the basal, prechannel C3 gravels that C3 incised into, (2) the C3 channel bedload gravels, and (3) the post-channel abandonment ine-grained deposits, in trenches T-7 and T-10 ( Table 1). Samples that yielded ages that were older than those recovered from underlying units were considered to be due to reworked material and were not included in our inal age model. Our inal OxCal age model for the stratigraphy exposed in trenches T-7 and T-10 included 19 radiocarbon samples and two post-IR-IRSL samples, Notes: Keck AMS is the name of the facility at UC Irvine that ran the samples. AMS stands for Accelerator Mass Spectrometer. OxCal is the community standard model and the method is discussed in the text (Bronk Ramsey ). R_Date refers to the specific calculation (command) completed in OxCal. ± refers to 2σ. 1. Negative ages represent B.C.E. ages (as opposed to positive ages representing C.E.) in the final column, or represent the incorporation of extremely young radiocarbon in the yr BP columns. dashes indicate the model could not confidently converge on these values. Only radiocarbon data are included in this table. IRSL data are in the Supplemental Information (see text footnote 1). See discussion in text and log figures regarding which samples were included or excluded. which we describe from oldest to youngest (Fig. 5A). Two luminescence samples from the pre-channel C3 gravels into which the C3 channel incised yielded late Pleistocene ages of ca. 24 ka (samples HS15-L-16 from trench T-7 and HS17-L-01 from trench T-10 ) (see Section S1 in the Supplemental Material (footnote 1) for details on luminescence age determinations). Radiocarbon sample HS17-35, also collected from the pre-channel C3 gravel (trench T-10), yielded a slightly younger age of ca. 19 ka (19.082-19.589 ka). We dated 16 radiocarbon samples from the overlying channel C3 bedload gravel deposit from both walls of trench T-10 as well as the north wall of trench T-7. These samples from the C3 channel bedload gravels are signiicantly younger than the underlying gravels into which channel C3 was incised. Speciically, the oldest sample from near the base of the channel C3 bedload gravels (sample HS15-P7-11, collected from trench T-7) yielded an age of 11.473-11.829 ka, and the youngest sample collected from the C3 channel bedload gravels (sample HS15-P7-12, collected from trench T-7, ~10 cm stratigraphically above the level of sample HS15-P7-11) yielded an age of 8.527-8.695 ka. We exclude sample HS15-P7-11 from the trench T-7 and T-10 age model because this sample is ~2-3 k.y. older than underlying samples from the same unit, indicating that this sample is older than the depositional age of the unit. Most samples collected from the channel C3 bedload gravel unit, which are stratigraphically above sample HS15-P7-11 (i.e., samples HS15-13 , HS15-14 , HS17-26 , and HS17-8 ), were collected from similar stratigraphic depths in the two trenches and yielded similar ages. The similarity of these ages suggests that there had been minimal reworking and that the samples had incorporated little pre-depositional material. To minimize the effect of skewing the incision age in the younger direction based on radiocarbon material that was included in the channel following initial incision, we select the oldest of these sample ages-that of sample HS15-13-to be the representative sample dating the initial incision of channel C3 at 9.337-9.515 ka. Restoration of Channel C3 and Offset D As noted above, the downstream reach of channel C3 is beheaded at the fault, indicating that the upstream part of the original C3 channel has been offset right-laterally by Hope fault slip. The only OxCal v4.4.1 Bronk Ramsey (2020); r:5 SHCal13 atmospheric curve (Hogg et al., 2013) 10,000 T-7 + T-10 age model T-4 age model site-wide preferred age model A C B Figure 5. OxCal age models of trenches T-7 and T-10 (A) and trench T-4 (B), and a site-wide age model combining samples collected from all three trenches as well as from pits 5 and 9 (C). Lighter gray probability density function (PDF) for each sample denotes calibrated age, and darker gray PDF in front of lighter gray PDF shows the modeled 2σ age range. Trench number and wall direction if applicable follow sample numbers. possible upstream (i.e., south of the fault) source drainage for the downstream reach of channel C3 is the currently active, deeply incised Loops Stream channel C1 (Fig. 3, offset D panel). Restoration of the two once-continuous offset channel segments allows us to determine the total displacement of the C3 channel since initial incision of the channel into the S1 surface ca. 9.4 ka. Key to this restoration is the use of the geomorphically deined limits of the location and geometry of initial incision of the upstream reach of the C3 channel into the S1 surface south of the fault. This part of the Loops Stream channel has been modiied and eroded during subsequent channel-cutting episodes. Because of this erosion, we use the geometry of the topographically highest edges of the preserved western and eastern stream banks along the incised upstream reach of the Loops Stream, which record the widest-possible limits of initial incision into the S1 surface during channel C3 time when the stream was lowing through the downstream part of the channel exposed in trenches T-10 and T-7, to measure the maximum-and minimum-possible offsets of the C3 channel. Speciically, we use these widest-possible limits on the location of the C3 channel upstream of the fault, together with the 3-D geometry of the C3 channel exposed in trench T-10 just downstream of the fault, to constrain the fault offset. The largest-possible offset is deined by restoring the western bank of the channel, as exposed in 3-D in both trenches T-7 and T-10 (blue line south of the orange triangle north of the fault on Fig. 3, offset D panel), to the current top of the western bank of channel C3 marking initial incision into the S1 valley-ill deposit south of the fault (note the nearly north-south topographic contour north of the orange triangle south of the fault on Fig. 3, offset D panel). The resulting maximum-possible offset of the western bank of channel C3 is 152 m. Similarly, to measure the smallest-possible offset of channel C3, we restore the trend of the incised eastern bank exposed in 3-D in trench T-10 (blue line south of orange triangle north of the fault on Fig. 3, offset D panel) relative to the north-northwest-trending, linear trend of the initial channel C3 incision into the S1 surface south of the fault (highlighted in pale blue shading north of the orange triangle south of the fault on Fig. 3, offset D panel). The resulting minimum-possible offset of the eastern bank of channel C3 is 146 m. Our "preferred" offset for the C3 channel of 149 m is simply the average of the minimum and maximum estimates of offset, given that subsequent incision and erosion by the Loops Stream has destroyed any further evidence of channel C3 in this area. The resulting offset is expressed as 149 ± 3 m. Channel C2 Location, Geometry, and Stratigraphy As with the older channel C3, the downstream segment of the C2 channel is beheaded at the fault. Downstream from the fault, the C2 paleochannel course is well preserved in the geomorphology, manifested by prominent, ~1-m-tall paleo-stream banks that are incised into the S1 surface (Fig, 3, offset C panel). These banks deine an ~20-m-wide stream course extending west-northwestward (285°-295°) from the fault near the western end of the fault-parallel bedrock shutter ridge (Figs. 2C-2E; Fig. 3, offset C panel). South of the fault, the topographically highest incision into the S1 surface to the west of the currently active C1 channel is marked by a linear edge trending 285°-290°, which is traceable for >30 m to where it merges upstream with the uppermost part of the deeply incised, southwestern bank of the active C1 channel (Figs. 2C-2E; Fig. 3, offset C panel). The linear paleo-stream banks preserved in the geomorphology north of the fault (west of the shutter ridge) and south of the fault (east of the shutter ridge, in present-day topography) are the only two stream-related geomorphic features at the Hossack Station site to exhibit an ~285° trend. All other stream-related features exhibit much more northerly trends near the fault. We therefore interpret the incised stream bank trending 285°-290° south of the fault as the upstream reach of the C2 channel, which is now offset from the downstream reach of C2 north of the fault. We excavated trench T-4 approximately perpendicular to the geomorphically well-deined C2 channel course ~15 m north of the fault in order to expose the 3-D orientation of the C2 channel downstream of the fault, document the stratigraphy associated with C2 channel streamlow, and collect samples for dating (Fig. 6). Trench T-4 exposed a well-deined erosional channel cut, with the southwestern edge merging upward into the geomorphically deined C2 channel margin; we did not expose the northeastern margin of the C2 stream bank in trench T-4. The channel C2 clast-supported bedload gravel deposit illing in the base of this incised channel consists of subangular pebbles with a silt to sand matrix (dark gray units on Fig. 6). These gravels were incised into a lat-lying, well-bedded sequence of gravels and gravelly silts interspersed with silt and clay beds (medium gray units on Fig. 6). Overlying the C2 channel bedload gravels is a sequence of organic-rich clays and peats with local gravelly silt lenses (pale gray units on Fig. 6). As with our interpretation of the older C3 channel sequence exposed in trenches T-7 and T-10, we interpret this upward transition in trench T-4 from deposition of C2 channel bedload gravels to much iner-grained, organic-rich deposits as recording abandonment of high-energy streamlow through the C2 channel and subsequent post-channel abandonment deposition and partial inilling of the incised C2 channel by much lower-energy slack-water deposits. Using exposures of the channel thalweg and southwestern edge of the incised C2 channel observed in both walls of trench T-4, we measured the trend of the C2 channel as 285°-290°. This is similar to the geomorphically deined trend of the southwestern channel margin between T-4 and the fault (pale blue line north of the fault in Fig. 3, offset C panel), as well as the trend of the interpreted southwestern margin of the C2 channel south (i.e., upstream) of the fault (pale blue line in Fig. 3, offset C panel). In addition, this northwestern trend of both the southwestern channel edge and thalweg of C2 is in good agreement with the trend of the same features documented by McMorran (1991) in a channel-perpendicular auger transect and trench located ~15 m east-southeast of trench T-4 (purple polygons in Fig. 3, offset C panel). We Downloaded from http://pubs.geoscienceworld.org/gsa/geosphere/article-pdf/doi/10.1130/GES02263.1/5173033/ges02263.pdf by guest projected the trends measured in the geomorphology and in our trench T-4 through the observation areas of McMorran (1991), and together these four observation areas conirm the orientation of the downstream reach of C2 north of the fault, which closely matches the northwestern trend of the topographic feature incised into the S1 surface south of the fault that we interpret as the southwestern channel bank of C2 upstream of the fault. To determine the age of channel C2 incision as marked by the erosional base of the channel, we used OxCal to calculate the boundary age between sample HS15-50, collected from below the C2 channel bedload gravels, and overlying bedload gravel sample HS15-54. The resulting OxCal boundary age of 4.615-5.274 ka records initial incision of the C2 channel into the underlying deposits. This age of initial C2 channel incision is nearly contemporaneous with, but slightly younger than, the age of channel C3 abandonment recorded in trench T-7 (sample HS15-3; 5.391-5.646 ka). Similarly, we can constrain the abandonment age of the C2 channel by using samples from higher in the bedload gravels in comparison with samples from the overlying ine-grained section. Younger samples collected from stratigraphically high in the C2 channel bedload gravels include samples HS15-33 (2.932-3.104 ka) and HS15-8 (1.557-1.654 ka). Several other samples collected from the channel C2 bedload gravels (e.g., sample HS15-5; 6.061-6.254 ka) yielded ages older than or contemporaneous with the samples collected from the underlying channel-cut facies. The age of sample HS15-8, which was collected from near the top contact of the channel C2 bedload gravels, is very similar to the 1.680-1.804 ka age of sample HS15-24, which was collected from near the base of the overlying organic-rich, post-channel abandonment ine-grained section. We calculate a boundary age of channel C2 abandonment between samples HS15-8 and HS15-24 of 1.693-1.756 ka. (Table 1). Samples utilized in the composite age model discussed in this manuscript are boxed. Digits along contacts are uncertainty (in cm) of mapping the given contact. Restoration of Channel C2 and Offset C We use the 285°-295° trend of both the beheaded, downstream segment of the C2 channel and the upstream incised southwestern channel edge to reconstruct the offset of paleochannel C2. Speciically, restoration of the southwestern stream bank of channel C2, as preserved in the present-day geomorphology and observed in our trenches and McMorran's (1991) excavations, requires 101 m of back-slip to align the upstream and downstream segments of the channel trending 285°-295° (Fig. 3, offset C panel). The thalweg of channel C2 provides a secondary constraint on this offset (dashed darker blue line north of the fault on Fig. 3, offset C panel); the thalweg is poorly expressed in the geomorphology of the upstream channel segment. The ±3 m uncertainty of our channel C2 restoration encompasses the full map width of the northeast-facing southwestern bank of C2 preserved south of the fault (Fig. 3, offset C panel). This error range takes into account measurement error when the southwestern bank was projected in the ield, both within trench T-4 and from trench T-4 to the projected location of McMorran's (1991) long-since backilled trench. The relatively small error range (±3 m) is due to the near-identical trends derived from the projection of multiple measurement points along the southwestern edge of channel C2 north and south of the fault in both the ield as well as the high-resolution lidar topographic data that captured this same trend in the landscape. Channel C1 Location, Geometry, and Stratigraphy Unlike older channels C2 and C3, the modern channel, channel C1, is not completely beheaded at the fault. Rather, the deeply incised C1 channel still accommodates active streamlow, albeit only along the westernmost part of the incised channel course at the fault crossing, where the channel makes a near-90° bend to the west (Figs. 2C, 2E). Channel C1 has incised ~5 m into the regional S1 surface, much more deeply than the older C2 and C3 channels. We interpret that this deep incision event, which occurred after abandonment of channel C2, was likely linked to local downstream relative baselevel changes associated with the Hanmer River, the major, west-lowing river into which the Loops Stream lows ~1 km west of the study site (Figs. 2A, 2D). We recognize two offset channel conigurations for the deeply incised C1 channel at the study site. We refer to these older channel geometries as C1A (younger) and C1B (older). Channel C1B and C1A Location, Geometry, and Stratigraphy South of the fault, the western edge of the deeply incised upstream reach of channel C1 is notably linear and extends NNW for >20 m to the fault crossing (Figs. 2C, 2E; Fig. 3, offset A and offset B panels). The eastern stream bank of channel C1 is similarly linear and parallel to the western bank from 15 to 30 m south of the fault. We refer to this highly linear upstream channel segment, which lowed northward almost perpendicular to the fault, as channel C1B. As discussed in a following section, restoration of this channel geometry deines offset B (Fig. 3, offset B panel). Within ~15 m of the fault, the area to the east of where the linear upstream reach of the C1 channel would project has been eroded out down to the local (modern) C1A channel base level, indicating that this more recent erosion occurred after the initial deep channel C1 incision event associated with the linear channel C1B stream course (Fig. 3, offset B panel). North of the preserved linear upstream eastern stream bank, the edges of this eroded area are deined by the prominent, ~4-5-m-tall incised stream banks. The incised stream bank curves sharply to the east-northeast as it approaches the fault, and at the fault it turns sharply westward downstream, extending parallel to, and ~5-10 m north of, the Hope fault. This resulted in the development of a sharply curved, concave-to-the-southeast, incised stream bank at the fault crossing (Figs. 2C, 2E; Fig. 3, offset A panel). Streamlow along the east-northeast-trending upstream reach and through this sharp corner deines the geometry of channel C1A. As discussed in a following section, restoration of the offset of this stream geometry deines our youngest Offset A (Fig. 3, offset A panel). We excavated a 1.5-m-deep pit (pit 9) into the protected, concave northeastern corner of the incised channel 1B course to document the stratigraphy and collect samples for dating the abandonment of bedload gravel deposition in this now low-to no-low portion of channel C1 (Fig. 7). In his study, McMorran (1991) excavated a north-trending trench (his trench 2) in this same area; our pit 9 was hand dug ~3 m east of that earlier trench. The pit 9 Downloaded from http://pubs.geoscienceworld.org/gsa/geosphere/article-pdf/doi/10.1130/GES02263.1/5173033/ges02263.pdf by guest exposure revealed densely packed, coarse-pebble gravel below ~1 m depth, overlain along a sharply deined, planar contact by ~50 cm of massive, dark gray clayey sandy silt, which extended all the way upward to the base of the weakly developed A/C soil proile. As we discuss in the descriptions of the other Loops Stream offsets, we interpret this abrupt upward transition from high-energy luvial gravel deposition to deposition of ine-grained muds indicative of very low-energy conditions as marking the abandonment of active, erosive streamlow with suficient stream power to move gravel bedload, and subsequent passive inilling of the channel with ine-grained, suspended-load sediments. Age Control for Channel C1B and Associated Deposits The age of initial incision of channel C1 can be estimated from the timing of abandonment of low through channel C2. This is implied by the lack of any other possible new channels that incised either northwest or north-northwest spatially between channels C2 and C1. Additionally, we observed evidence of channel C2 streamlow along the fault during progressive right-lateral defection of the channel during Hope fault offset. Speciically, there is a slight fault-parallel depression along the fault to the west of the geomorphic expression of the incised channel C2 (including trench T-4), indicating along-fault low of channel C2. We exposed this channel in cross-section by excavating trench T-3 ( Fig. S2 ). This trench exposed a fault-parallel (~075°) channel. Flow of channel C2 through trench T-3 supports a channel geometry in which channel C2 was lowing along the fault during its progressive right-lateral delection by the Hope fault up until channel C2 abandonment and simultaneous initiation and incision of channel C1. The age of C2 abandonment, the stream course that immediately predates the C1 channel course, is derived from sample HS15-24 collected from trench T-4, which yielded an age of ca. 1.5-1.6 ka. Samples were also collected from the C1 gravels that were exposed in pit 9, but we were unable to sample any deposit depositionally older than the C1 gravels. Therefore, any sample collected from pit 9 C1 gravels would represent a minimum age of C1 incision, whereas the age of C2 abandonment likely provides a more accurate estimate of C1 incision. Pit 9 C1 sample ages will be discussed in the section titled "Age Control for Channel C1A and Associated Deposits. " Restoration of Channel C1B and Offset B Initial incision of channel C1 was oriented north-northwest and utilized the deeply incised upstream thalweg south of the fault. This upstream reach was irst incised during channel C3 activity and was later reactivated during C1 incision. The eastern bank of this north-northwest incision is restored by back-slip of 29 m. This restoration aligns the eastern bank of C1 cutting across the fault as C1 begins to low southwest parallel to the fault. Our ±1.5 m lateral uncertainty in the offset measurement encompasses ±1 m of potential elevation change in the eastern bank elevation across the fault (lateral uncertainty encompasses two contour lines on the 50 cm contour interval maps presented in Fig. 3, offset B panel). These error bounds provide the maximum and minimum values of sedimentologically plausible fault offset. Any offset restoration value smaller than this 29 ± 1.5 m range would result in initial channel incision having occurred during avulsion out of an east-northeast-lowing, fault-parallel "S" bend in the channel, a geometry that we consider to be sedimentologically implausible. We illustrate this unlikely geometry in Figure S3 (footnote 1), which shows the stream geometry that would be required if initial avulsion out of the C1 channel across the fault occurred at an offset restoration of 14 m. There is no preserved evidence, either in the geomorphology or in the trench logs, to support the idea that initial incision of channel C1 occurred in such an "S" shape. As such, we do not consider stream geometries that require <27.5 m offset restoration, which we consider to be the lower end of sedimentologically plausible stream geometry at the time of initial avulsion and incision of the C1B channel across the fault. Age Control for Channel C1A and Associated Deposits We collected two radiocarbon and three IRSL samples from pit 9 to constrain the age of most recent high-energy channel C1A streamlow through the site. The two charcoal samples were sampled from 46 cm depth from within the silty clay unit, <5 cm above the contact with the underlying 50-cm-thick gravelly clay unit, which in turn overlies the basal, high-energy bedload gravels. Collectively, samples HS15-18A, -18B, and -20 yielded an age range of 0.374-0.569 ka. Because these ages were collected from within the post-channel abandonment silty clays >50 cm stratigraphically above the channel bedload gravels exposed in the base of pit 9, they provide a conservative minimum age constraint of ca. 375-570 yr on the most recent high-energy streamlow across the site of pit 9. One of the three IRSL samples (HS15-L-19) was collected from the gravelly silty clay at 98 cm depth just above the sharp contact with the underlying channel C1B bedload gravels, whereas the other two were collected from the uppermost bedload gravels (sample HS15-L-18 at 109 cm depth) and from deeper within the gravel deposit (sample HS15-L-20 at 138 cm depth) respectively. The post-IR-IRSL 225 ages calculated using the methodology of Rhodes (2015) yielded ages of 1.270 ± 0.190 ka (sample HS15-L-19), 3.500 ± 0.310 ka (sample HS15-L-18), and 4.940 ± 0.440 ka (sample HS15-L-20). The ages from the two lower samples are apparently out of stratigraphic order, given that these samples are older than the abandonment of channel C2 yet represent low through the younger channel C1. Additionally, the single-grain age plots reveal a complicated distribution of ages suggestive of a continuum of partially bleached signals (Section S1 ). We suspect that samples HS15-L-18 and HS15-L-20 relect a process known as "shadowing" , in which the presence of numerous, apparently older grains were only partially or incompletely bleached during the youngest depositional event, which is the event of interest (i.e., low through channel C1). Because these grains are only partially bleached, they do not record the younger age of interest, and instead make the calculated age apparently older because these partially bleached grains cannot statistically be differentiated from the youngest grains, which likely date the depositional event of interest. In order to overcome the limitations presented by incomplete bleaching and shadowing, the IR 50 (see Supplemental Material ) signals, which are bleached in sunlight more rapidly, were examined for these two samples. For the uppermost sample (HS15-L-19), an IR 50 age of 1.090 ± 0.080 ka was determined. The equivalent dose estimates for the IR 50 and post-IR-IRSL 225 signals are well within 1σ uncertainty values of each other, indicating there is no signiicant difference between the two estimates. We have reasons to be conident in the thermal stability of the post-IR-IRSL 225 age demonstrated by comparison with independent age assessments at a range of different sites (Rhodes, 2015), and the apparent close agreement between these age estimates suggests no signiicant (i.e., beyond uncertainty limits) instability of the IR 50 signal (i.e., fading) for grains in this catchment. The IR 50 ages of these samples are 1.580 ± 0.130 ka for HS15-L-18 and 1.530 ± 0.140 ka for HS15-L-20. Based on the conirmation of acceptable IR 50 signal stability provided by sample HS15-L-19, we have conidence that the IR 50 age estimates for the two lower samples within this section are representative of the youngest depositional (i.e., bleaching) event recorded by these gravel deposit and therefore represent timing of active low through channel C1. The IR 50 age of deeper sample HS15-L-20 almost exactly matches the abandonment age of channel C2 documented independently in trench T-4, supporting the idea that the IR 50 ages likely provide better estimates of the ages of the sediments in pit 9 (see Supplemental Material ) and are most comparable to our well-constrained ~19 mm/yr offset C slip rate averaged over the past 5.4 k.y. These earlier, faster late Holocene rates are thus not in conlict with our slower, longer-term average slip rate of the Conway segment of the Hope fault. These comparisons reinforce our basic observation that slip rate along the Conway segment of the Hope fault was slower during latest Pleistocene-early Holocene time (~12-14 mm/yr) and faster during the mid-to late Holocene (~19 mm/yr). Comparison of the Conway Segment Hossack Station Incremental Slip-Rate Record to the Late Holocene Hurunui-Hope River Incremental Slip-Rate Record The new Hossack Station rates also facilitate comparisons with earlier estimates of incremental slip rate on the Hope fault. The only other incremental slip-rate record available for the Hope fault comes from Khajavi et al. (2018), who combined previously measured offsets from Cowan (1990), Cowan and McGlone (1991), and Langridge and Berryman (2005), as well as small offsets measured from lidar data novel to their study, with geochronologic constraints from these previous studies and a nearby paleoseismologic study (Khajavi et al., 2016) to document late Holocene slip-rate changes during the past ~2.3 k.y. Speciically, Khajavi et al. (2018) used the paleoearthquake timing of Khajavi et al. (2016) combined with a compilation of their small-offset measurements from the Hope River-Hurunui section of the fault to determine earthquake-by-earthquake incremental slip for the past 1.6 k.y. In addition, they compared the rates derived from these individual earthquake offsets with a ca. 2.3 ka rate based on cumulative fault displacements and surface ages measured at three sites along the Hurunui segment (Cowan, 1990;Cowan and McGlone, 1991;Langridge and Berryman, 2005). Based on these data, Khajavi et al. (2018) reported a fast average rate of 25 +3.4/−3.2 mm/yr between ca. 1.6 and 2.3 ka. The cumulative slip constraint used on the young end of this interval overlaps with two closely spaced paleoearthquakes identiied by Khajavi et al. (2016) at ca. 1.5 and 1.6 ka; these two events were followed by four younger earthquakes. Using the data presented by Khajavi et al. (2018), and assuming that their ifth and sixth earthquakes back mark the end of a previous fast period extending back to at least 2.3 ka, there is a marked inlection point in the rate of coseismic strain release along the Hurunui-Hope River segments of the Hope fault at ca. 1.5 ka. Prior to 1.5 ka, the Khajavi et al. (2018) data set suggests a slip rate of ~30 mm/yr encompassing ~26 m of slip between 1.5 ka and 2.3 ka, which was followed by a much slower rate of ~6 mm/yr based on ~8 m of slip occurring during the four youngest events, including the historic 1888 C.E. Amuri rupture. Although the time spans of the incremental rates we measured at the Hossack Station site do not exactly match those discussed by Khajavi et al. (2018), both studies reveal a similar pattern of relatively slow slip rate during the latest Holocene (since 1.5 ka along the Hurunui-Hope River segments and since 1.1 ka at the Hossack Station site on the Conway segment) that was preceded by a period of faster slip (extending from 1.5 ka to as least 2.3 ka along the Hurunui-Hope River segments, versus a very fast interval between 1.1 and 1.6 ka and a fast but slightly slower incremental rate between 1.6 ka and 5.4 ka at the Hossack Station site). Thus, patterns of incremental slip-rate variability along the Hope fault appear to be relatively consistent across the major structural discontinuity of the Hanmer pull-apart basin over time scales that span multiple earthquakes and tens of meters of slip. Although the relative pattern of incremental slip-rate variability may be consistent across the Hanmer Basin, the absolute measurements of cumulative slip along the Hurunui-Hope River segments are about a factor of two less than those observed in our study along the Conway segment. This is illustrated by the 1.6 ka offset estimates that are common to both studies. Speciically, Khajavi et al. (2018) reported ~15 m of fault slip since ca. 1.6 ka on the Hurunui-Hope River segments of the Hope fault, compared to our preferred slip estimate of 29 m since 1.6 ka (offset B) at the Hossack Station site. Much of this difference is likely due to the fact that the Hope fault is double stranded in the area of the Khajavi et al. (2018) measurements; the more southerly Kakapo strand of the Hope fault extends westward for a distance of ~40 km subparallel to the northern strand segments from the Kakapo fault-Hope River strand intersection at Glynne Wye (Fig. 1B). Thus, the Khajavi et al. (2018) rates do not span the entire width of the Hope fault zone. Assuming that there is no difference in temporal strain accumulation patterns along the Hope fault on either side of the Hanmer Basin, the apparent consistency in rate variability relative to the mismatch in cumulative displacement between the single-stranded Conway segment and the northern Hurunui-Hope Shelter strand of the double-stranded Hope fault to the west suggests that the difference in total dextral slip may be taken up by slip that is partitioned onto the southern, Kakapo segment of the Hope fault system. If correct, this would suggest that the slip rate of the Kakapo strand may be somewhat faster than previous estimates of 4.4-8.4 mm/yr averaged over the past ~5.3 k.y. (Knuepfer, 1988(Knuepfer, , 1992 and 4.7-8.0 mm/yr averaged since ca. 17 ± 2 ka (Cowan et al., 1989), at least during late Holocene time. The broadly synchronous along-strike changes in late Holocene incremental rate along the Conway and Hope River-Hurunui segments suggest coordinated waxing and waning of slip rate along the entire Hope fault. Although this behavior would not be surprising for single earthquake sequences, which indeed likely occurs along the Hope fault , these accelerations and decelerations in Hope fault slip span multiple earthquakes and tens of meters of fault slip. This observation indicates that whatever controls this nonconstant slip rate behavior must operate over time spans longer than single earthquake cycles. The exact mechanisms that control such behavior remain incompletely understood, but could be related to changes in the strength of the fault through time (e.g., Dolan et al., 2007Dolan et al., , 2016Oskin et al., 2008;Zinke et al., 2017Zinke et al., , 2019 and/or changes in the rate of elastic strain accumulation, which in turn could be controlled by either system-level tradeoffs among mechanically complementary faults within complex plate-boundary fault systems (e.g., Dolan et al., 2016;Wedmore et al., 2017) or changes in relative plate motion rates (e.g., Anderson, 1975;Pollitz, 1986;Romanowicz, 1993;Dolan et al., 2016;Meade and Loveless, 2017). Whatever the exact cause of the variable incremental slip rate on the Hope fault, the new Hossack Station data add to a growing body of evidence that such slip-rate variations may be more common than previously thought along some faults (e.g., Wallace, 1987;Friedrich et al., 2003;Weldon et al., 2004;Dolan et al., 2007Dolan et al., , 2016Sieh et al., 2008;Gold and Cowgill, 2011;Goldinger et al., 2013;Ninis et al., 2013;Onderdonk et al., 2015;Zinke et al., 2017Zinke et al., , 2019Khajavi et al., 2018). Future documentation of additional detailed, well-dated incremental slip-rate records from many more faults will provide constraints on the variability (or constancy) of fault slip rates that will, in turn, facilitate a more thorough understanding of the mechanical controls on spatial-temporal patterns of fault slip. Implications for Probabilistic Seismic Hazard Modeling Geologic slip rate is one of the most basic inputs for probabilistic seismic hazard analysis codes (e.g., Stirling et al., 2012;Dawson and Weldon, 2013;Field et al., 2013;Petersen et al., 2015). Typically, a multi-millennial average slip rate is preferred, because such a rate is thought to capture the overall behavior of a fault. However, the variability of incremental slip rates presented in this manuscript highlights the conundrum currently facing modeling decisions when slip rates must be selected as inputs for deformation models. For instance, with the Hossack Station data set, selecting the long-term (ca. 13.8 ka rate averaged through present day) underestimates the 5.4 ka average rate, which is another multi-millennial, "long-term" rate ( Fig. 11D). Additionally, as previously discussed, both of these multi-millennial rates (slip rates BC and CD) are slower than the centennial rate AB (Fig. 11D). The slip rate for the Conway segment of the Hope fault as currently used in the 2010 New Zealand National Seismic Hazard Model (fault segment 403) is 20 mm/yr (Stirling et al., 2012). This rate is based on a combination of a mid-Holocene slip rate (Langridge et al., 2003) and a rate based on potentially unreliable weathering-rind age control (Knuepfer, 1992). The well-dated, longer-term (latest Pleistocene-Holocene) Conway segment slip rate of ~15 mm/yr (yellow swath on Fig. 11D) that we document in this paper is slower than the currently utilized value by ~25%. As noted above, although the longer-term rate we measure is less than the currently used value for slip rate along the Conway segment, the Hossack Station mid-to late Holocene slip rate BC (green swath on Fig. 11D, spanning 1.6 ka to 5.4 ka) is a closer match to the 20 mm/yr rate utilized in the New Zealand National Seismic Hazard Model. The fastest rate recorded at Hossack Station (rate AB of ~32 mm/yr) far exceeds the rate used for probabilistic seismic hazard analyses. Which slip rate (short-term, long-term, or a combination) best represents how a fault is behaving at present day and how it will continue to behave within the near future remains an open question. As deformation models and probabilistic seismic hazard codes advance to allow the use of higher-resolution geologic input data, variability in incremental slip rates, such as that documented herein along the Hope fault at the Hossack Station site as well as other sites along the Hope fault (e.g., Khajavi et al., 2018), may be utilized to determine time-dependent hazard within regional plate-boundary fault systems Van Dissen et al., 2020). ■ CONCLUSIONS New incremental rate data demonstrate that the slip rate of the Conway segment of the Hope fault, the main plate-boundary strike-slip fault in this area of the South Island of the Paciic-Australia plate boundary, has varied signiicantly during Holocene-latest Pleistocene time. These incremental slip rates range from a latest Holocene (1.1 ka-present) rate of 8.2 +2.7/−1.5 mm/yr, to a rate of 32.7 +124.9/−10.1 mm/yr averaged over 1.6-1.1 ka, to 19.1 ± 0.8 mm/yr between 5.4 and 1.6 ka, to 12.0 ± 0.9 mm/yr between 9.4 and 5.4 ka, to 13.7 +4.0/−3.4 mm/yr from 13.8 to 9.4 ka. We observe a variation by a factor of ~1.5-4× times within this slip-rate data set, similar to other faults in the Marlborough fault system as observed in incremental slip-rate records measured using cumulative slip measurements along the Awatere (Zinke et al., 2017) and Clarence faults. These data add to a growing body of evidence that slip rate on some faults varies considerably over displacement scales of tens to hundreds of meters, and may vary in complex patterns across Hope fault segments, as shown in a comparison of our record of incremental slip rates to previous slip rate estimates for farther west along the Hurunui-Hope River segments . Furthermore, our results, when combined with slip-rate estimates from Khajavi et al. (2018), suggest the potential recent (ca. 1.6 ka) importance of the Kakapo fault in strain partitioning within the Hope fault system. Not only are such variations in geologic slip rate of critical importance for understanding the mechanics and earthquake behavior of major fault systems, they are also of great importance in probabilistic seismic hazard analysis, in which fault slip rate represents a primary model input. Variable slip rates such as those that we document for the Hope fault are not currently represented in probabilistic seismic hazard analyses due to the current coniguration of deformation and hazard modeling codes. Incorporation of variable rates into probabilistic hazard assessments is, however, a current direction in probabilistic seismic hazard research (e.g., Zeng, 2018;Hatem et al., 2020;Van Dissen et al., 2020). The Hossack Station record highlights the necessity of understanding the incremental displacement history (i.e., the dated path) of major faults in order to more fully understand how plate-boundary fault systems accommodate relative plate motion in time and space.
SAN DIEGO — The talking heads on MLB Network ran through a segment on the Cubs that spelled it out with a “SHAKE IT UP” on-screen graphic: Call up Javier Baez, make him your third baseman and shift Kris Bryant to left field. Starlin Castro didn’t look up from his phone on Wednesday afternoon, relaxing in a leather chair with his feet up on a table in the middle of Petco Park’s visiting clubhouse. Addison Russell didn’t seem to be paying attention to the TVs either, standing by his locker and getting ready for batting practice. The Cubs are approaching potential franchise-altering decisions that will involve their All-Star shortstop and converted second baseman. Promoting Baez from Triple-A Iowa could shore up their infield defense, while moving Bryant to the outfield could generate more production out of an important corner spot. “Those are possibilities,” manager Joe Maddon admitted before downplaying those scenarios. [MORE CUBS: Cubs should be in position to make much bigger moves] The night before, Bryant, Castro and Russell combined for three errors that led to three unearned runs in a 4-3 loss to the San Diego Padres. Baez has a take-charge personality on the field that makes it hard to imagine him not yelling for that pop-up in shallow center that bounced out of Russell’s glove. In a game the Cubs felt they should have won, left fielder Chris Coghlan absolutely crushed two balls off James Shields and Craig Kimbrel, showing signs that maybe he’s about to get hot after those two home runs. “I know his batting average (.205) isn’t high, but who’s hit the ball harder with worse luck than he has this year?” Maddon said. “You got to look through that surface stuff. I think Coghlan’s had a pretty nice year so far. He’s been unlucky, man. That guy’s hit line drives that have been caught and even if, say, four of them had fallen ... he’s probably hitting about .240 or .250 and everybody’s happy. “I like depth in a long season. I don’t like pushing or pressing people to get here before it’s their time or before it’s really absolutely the necessary time to do it. “Right now, Javy’s taking care of business. It’s great. I like him a lot. He’s going to be a really good major-league player. But I also like what’s going on here a lot, too.” [MORE CUBS: Javier Baez will be an X-factor if Cubs stay in contention] Coghlan has pretty much seen it all after becoming the 2009 National League Rookie of the Year with the Florida Marlins. He got injured, got released, signed a minor-league deal with the Cubs and put up an .804 OPS during last year’s bounce-back season. “I’ve played enough years that the average only matters at the end,” said Coghlan, who does have six homers and a .705 OPS. “The numbers are up there, and it looks bad. Trust me, I know that it’s not good. But that’s why they call it an average at the end of the year. “I don’t care what I’m hitting. I could be hitting .400 right now — and I finish the year at .190. I mean, .190’s the only one that matters, not .400 the first two months. “So every day you just look to grind out some at-bats, man, and hit the ball hard. Obviously, the harder you hit it, the better chance you have for it to fall. I feel like my numbers definitely don’t show or reflect the way that I’ve hit the ball. And in the long run, it will.” [SHOP CUBS: Get your Cubs gear right here] Maddon tried but couldn’t convince Theo Epstein’s front office to put Baez on the Opening Night roster. The Cubs have Baez focusing on shortstop and second base at Iowa — where he’s hitting .308 with an .823 OPS this month — after he took an extended leave of absence in April to deal with the death of his younger sister. “Javy can make any defense better,” Maddon said. “I talked about that in camp: I thought he was one of the finest young infielders I’ve seen. But I’m not displeased with anybody out there right now, either. Sometimes, you just got to wait for your opportunity. “I hear that he is doing well. That’s good when you get thick and you have that kind of depth coming, because something’s going to happen. It always does. I hate to say it, but it does. And I’ve talked about how baseball has a cruel way of answering its own questions. “Right now, from Javy’s perspective, the biggest thing he has to do is to stay ready.”
MUMBAI, India--(BUSINESS WIRE)--Axalta Coating Systems (NYSE: AXTA), a leading global supplier of liquid and powder coatings, opened its expanded Technology Center Savli in Vadodara in the state of Gujarat, India. The technology investment demonstrates Axalta’s commitment to delivering innovative coating solutions in India, a strategic market in Asia-Pacific. The Technology Center includes an automotive coatings development laboratory and color laboratory. The automotive coatings development laboratory will focus on developing special paint formulations to ensure superior color match, aesthetics and quality. The new laboratory space will feature contemporary robotic spray applicators that can simulate line application conditions inside customer manufacturing plants. The equipment is designed to facilitate the development, approval, quality control, and customer line support required by auto-OEM customers. Additional capabilities at the facility also include color measurement tools and accelerated paint testing equipment. “Technology plays a crucial role in driving our business as well as serving our customers,” explained Dr. Barry Snyder, Axalta’s Senior Vice President and Chief Technology Officer at the opening ceremony. “This Technology Center is part of a global network of technology facilities that are designed to develop tailored solutions to meet local customer needs.” “At Axalta, we monitor automotive trends and forecast how color preferences are changing in the automotive market,” added Sobers Sethi, Axalta Vice President and President, Emerging Markets. “I am pleased that we can now offer innovative coating solutions directly to our customers. We’re confident that with this new state-of-the-art technology center, we will be able to meet the paint and coatings requirements of all our customers in India.” Axalta established its India business in 1996, and today operates a manufacturing plant in Vadodara within the state of Gujarat along with offices & regional training centers in Gurugram, Mumbai, Kolkata, Bengaluru and Vadodara. Axalta Coating Systems Axalta is a leading global company focused solely on coatings and providing customers with innovative, colorful, beautiful and sustainable solutions. From light OEM vehicles, commercial vehicles and refinish applications to electric motors, buildings and pipelines, our coatings are designed to prevent corrosion, increase productivity and enable the materials we coat to last longer. With more than 150 years of experience in the coatings industry, the 13,600 people of Axalta continue to find ways to serve our 100,000+ customers in more than 130 countries better every day with the finest coatings, application systems and technology. For more information visit axalta.com and follow us @Axalta on Twitter and on LinkedIn.
""" 输入一个整数数组,实现一个函数来调整该数组中数字的顺序, 使得所有的奇数位于数组的前半部分,所有的偶数位于数组的后半部分, 并保证奇数和奇数,偶数和偶数之间的相对位置不变。 """ # -*- coding:utf-8 -*- class Solution: def reOrderArray(self, array): # write code here # key 作为排序依据 return sorted(array, key=lambda x: x%2==0)
/** * Unit test for simple App. */ public class SAXParserTest extends TestCase { /** * Create the test case * * @param testName * name of the test case */ public SAXParserTest(String testName) { super(testName); } /** * @return the suite of tests being tested */ public static Test suite() { return new TestSuite(SAXParserTest.class); } /* * === *** *** ** ** *** *** === === Tests after this point === === *** *** * ** ** *** *** === */ /** * Rigourous Test :-) */ public void testSAXParser1() { System.out.println("testSAXParser1()"); XMLFile result = SAXParser.parseString("<?xml version=\"1.0\" encoding=\"UTF-8\"?><PLAYER id=\"adamsarota\"><NAME first=\"Adam\" last=\"Sarota\">thisIsMyNameValue<subtag>\n</subtag></NAME></PLAYER>"); System.out.println((result.toString())); assertTrue(true); try { System.out.println(result.getContent("PLAYER.NAME")); } catch (NoSuchElementException e) { // TODO Auto-generated catch block System.err.println(e.getMessage()); e.printStackTrace(); } try { System.out.println(result.getElement("PLAYER.NAME").getAttribute("first")); } catch (NoSuchAttributeException | NoSuchElementException e) { // TODO Auto-generated catch block System.err.println(e.getMessage()); e.printStackTrace(); } System.out.println(); } /** * Rigourous Test :-) */ public void testSAXParser2() { System.out.println("testSAXParser2()"); ArrayList<XMLTag> tagstack = new ArrayList<XMLTag>(); LinkedHashMap<String, String> atts1 = new LinkedHashMap<String, String>(); atts1.put("name", "rootele"); XMLTag tag1 = new XMLTag("rootelement", atts1); tagstack.add(tag1); LinkedHashMap<String, String> atts2 = new LinkedHashMap<String, String>(); atts2.put("aname", "zelement"); atts2.put("zname", "aelement"); XMLTag tag2 = new XMLTag("childelement", atts2); tag2.setContent("1"); tagstack.add(tag2); XMLTag currenttag = tagstack.get(tagstack.size() - 1); tagstack.remove(tagstack.size() - 1); tagstack.get(tagstack.size() - 1).addElement(currenttag); XMLTag returntag = tagstack.get(tagstack.size() - 1); // System.out.println(returntag.hasElement("childelement")); System.out.println(returntag.toString(0)); assertTrue(true); } public void indentOut(String str) { } public static String unEscapeString(String s){ StringBuilder sb = new StringBuilder(); for (int i=0; i<s.length(); i++) switch (s.charAt(i)){ case '\n': sb.append("\\n"); break; case '\t': sb.append("\\t"); break; // ... rest of escape characters default: sb.append(s.charAt(i)); } return sb.toString(); } }
/** * A Simple map that doesn't allow null Values. * * @author Yeelp * * @param <Key> the type of keys of the map * @param <Value> the type of values stored in the map */ public class NonNullMap<Key, Value> extends HashMap<Key, Value> implements Map<Key, Value> { private static final long serialVersionUID = -7597325545229879253L; private Value defaultVal; @SuppressWarnings("unused") private NonNullMap() { throw new UnsupportedOperationException("A default value must be specifed for the NonNullMap"); } /** * Create a new NonNullMap * * @param defaultVal the default value stored in the map. The NonNullMap will * fall back to this value whenever it encounters a null * Value. */ public NonNullMap(Value defaultVal) { super(); this.defaultVal = defaultVal; } /** * Create a new NonNullMap initialized with the specified keys and default value * * @param keys the keys to use when initializing the map * @param defaultVal the default value to initialize the map with. The * NonNullMap will use this whenever it encounters null. */ @SafeVarargs public NonNullMap(Value defaultVal, Key... keys) { this(defaultVal); for(Key k : keys) { super.put(k, defaultVal); } } /** * Get the default value the map uses. Note updating the default value here will * change it across all mappings that are mapped to the default value. * * @return the default value */ public Value getDefaultValue() { return this.defaultVal; } @Override public boolean containsValue(Object value) { return value.equals(this.defaultVal) || super.containsValue(value); } /** * {@inheritDoc} * * * <p> * For the NonNullMap, if the key has no mapping, the default value is returned * instead. */ @Override public Value get(Object key) { return super.getOrDefault(key, this.defaultVal); } /** * {@inheritDoc} * * <p> * Since the NonNullMap disallows null keys and values, this method throws an * {@link UnsupportedOperationException} if either the key or value is null. * * @throws UnsupportedOperationException if the key or value is null */ @Override public Value put(Key key, Value value) { if(value == null) { throw new UnsupportedOperationException("Null values disallowed for NonNullMap"); } else if(key == null) { throw new UnsupportedOperationException("Null keys disallowed for NonNullMap"); } return super.put(key, value); } /** * Maps the specified key to the default value, and returns whatever the * original value associated with this key was. * * @param key the key that should have its mapping set to the default value. * @return the value once associated with this key This method returns null if, * and only if, this map never contained the specified key. */ @Nullable public Value setDefault(Key key) { return super.put(key, this.defaultVal); } }
Effects of Hydrogen Peroxide on Dental Unit Biofilms and Treatment Water Contamination Objectives: To study effects of various concentrations of hydrogen peroxide on mature waterline biofilms and in controlling planktonic (free-floating) organisms in simulated dental treatment water systems; and to study in vitro the effects of 2%, 3%, and 7% hydrogen peroxide on the removal of mature biofilms and inorganic compounds in dental waterlines. Methods: Four units of an automated dental unit water system simulation device was used for 12 weeks. All units were initially cleaned to control biofilms and inorganic deposits. H2O2 at concentrations of 1%, 2%, 3% was used weekly for periodic cleaning in three treatment group units (units 1, 2 & 3), with 0.05%, 0.15% and 0.25% H2O2 in municipal water used as irrigant respectively. The control unit (unit 4) did not have weekly cleanings and used municipal water as irrigant. Laser Scanning Confocal Microscopy and Scanning Electron Microscopy were used to study deposits on lines, and weekly heterotrophic plate counts done to study effluent water contamination. A 24 hour in vitro challenge test with 7%, 3% and 2% H2O2 on mature biofilms was conducted using harvested waterlines to study biofilm and inorganic deposit removal. Results: Heterotrophic plate counts of effluent water showed that the control unit reached contamination levels in excess of 400,000 CFU/mL while all treatment units showed contamination levels <500 CFU/mL through most of the 12 weeks. All treatment units showed varying levels of biofilm and inorganic deposit control in this short 12 week study. The in vitro challenge test showed although there was biofilm control, there was no eradication even when 7% H2O2 was used for 24 hours. Conclusions: 2% H2O2 used as a periodic cleaner, and diluted to 0.05% in municipal water for irrigation was beneficial in controlling biofilm and planktonic contamination in dental unit water systems. However, to remove well established biofilms, it may take more than 2 months when initial and multiple periodic cleanings are performed using H2O2. Biofilms (organic contamination) are routinely found in dental treatment water delivery systems. They can be observed colonizing within dental unit waterlines in as little as two weeks. 9 When viewed through a Scanning Electron Microscope (SEM), biofilms are characterized by microorganisms embedded in an extracellular matrix with thickness ranging from 30 to 50 microns that may allow chunks of material/biofilm to dislodge, thereby contaminating other areas of the dental treatment water system. 10 Inorganic deposits (minerals) derived from dissolved salts in municipal water also contaminate the lines. 11 These inorganic deposits are found interspersed with biofilms and are difficult to remove with most periodic cleaners/disinfectants. 11,12 Colonization and proliferation of many and varied species of microorganisms has been well documented in dental unit water systems. Apart from bacteria, amoebae species have also been observed. 21 Some of these microorganisms found in this environment have also been associated with hospital infections, and some in particular are of concern for the dental office. In one case, Mycobacterium xenopi was implicated in 19 cases of pulmonary disease in a hospital with transmission occurring through infected aerosols when patients used a shower. 29 Water spray related aerosols generated by high-speed handpieces; ultrasonic/Piezo electric scalers and air/water syringes are common place in the dental environment contaminating the immediate surroundings of patients seated in the chair. 31,32 These sprays and aerosols generated in the dental office could be a potential route for the transmission of microbes. 18,32,33 Atlas et al 33 found Legionella in treatment water from dental units, water faucets and drinking water fountains. Aerosols generated by the dental handpieces were the source of sub-clinical infection with Legionella pneumophila in a dental school environment. 18 Fotos et al 34 investigated exposure of students and employees at a dental clinic and found that, of the 270 sera tested, 20% had significantly higher IgG antibody activity to the pooled Legionella sp. antigen as compared with known negative controls. In a similar sero-epidemiological study Reinthaler et al 35 found a high prevalence of antibodies to Legionella pneumophila among dental personnel. These two cornerstone sero-epidemi-IntroductIon ological studies 34,35 on Legionella a known pathogen, are of significant concern to both dental care providers (occupational exposure), as well as iatrogenic disease risk to patients. Other than microbes, very high doses of bacterial endotoxins (>100 EU/mL) were measured in dental unit water, with even municipal water containing more that 25 EU/Ml. 36 Exposure of the patient to certain microbes associated with respiratory, enteric diseases or even conjunctivitis may be very plausible if the water quality is poor. 37 The types of organisms may range from Amoebae, Legionella to E. coli 21 seen in dental units connected to municipal water, or when connected to selfcontained reservoirs, which may be contaminated by the dental staff not following proper hand washing or aseptic procedures such as wearing gloves while handling self-contained reservoirs. 37 Considering the presence of these contaminants, control methods for cleaning and disinfecting the dental water system and providing quality irrigant/ dental treatment water is warranted. To avoid water from passively dripping from the handpieces, air/water syringes, ultrasonic or Piezo electric scalers, devices are manufactured with a retraction mechanism. This mechanism can actively "suck-back" contaminants from the oral cavity with the introduction of oral contaminants including microbes into the dental unit waterlines and the dental unit water system. Today, many dental water systems (with retraction mechanisms) are equipped with anti-retraction values to prevent suck-back of contaminants from the oral cavity and/or are designed to give a short 'terminal flush' of water to push out any suck-back. 38 In the in vitro experimental studies, even new and unused antiretraction valves were shown to be quite unreliable, leading to microbial suck-back into the waterline system from the patient end. 39,40 Factors associated with biofilm formation in dental unit water systems could be--long periods of stagnation, high surface to volume ratio, nutrient content of water for the microbial survival, mineral content and hardness of water facilitating coating of the lumen, fluid mechanics such as laminar flow, low flow rate, microbial quality (bacteria, fungi, protozoans and nematodes) of the water entering the system, and failure of anti-retraction valves leading to contamination from the oral cavity of patients. 37,39,40 Flushing dental unit waterlines (DUWLs) with water at the beginning and end of patient treatment session have been previously advocated. 41,42 This flushing protocol, as recommended earlier by the U.S. Department of Health and Human Services, 43,44 may diminish planktonic organisms for a short period of time but will not eliminate contamination or control biofilms. One study concluded that a two minute flushing reduced the counts of planktonic organisms, on average by one-third, but did not reduce counts to zero. 6 While it may provide transient reduction in planktonic microbes, purely flushing the water for a few minutes prior to treatment is not effective in biofilm control. 21,45 Investigators have tested methods such as inline micro filter devices, 7,8 flushing water lines with various disinfectant solutions which include hydrogen peroxide based chemicals, 46,47 chlorhexidine gluconate, 48,49 sodium hypochlorite, 50-52 povidone-iodine, 20 iodine, 53 mouthwash, 54 silver and silver compounds. 55,56 Some of these periodic cleaning/disinfection methods, although effective in controlling planktonic organisms and possibly biofilms, do not eliminate existing biofilms or biofilm formation due to the inherent contamination of source water when municipal water or only low grade antimicrobials in the irrigant is used. Therefore in addition to initial & periodic cleaning/disinfection of the water systems, purification of water or providing an irrigant of acceptable microbial quality is necessary. Using untreated tap/municipal source water for dental treatment is not reliable with respect to microbial quality, as studies have repeatedly demonstrated planktonic counts ranging from zero to at least a few hundred colony forming units, exceeding the contamination levels set per current CDC's recommendations for dental treatment water quality of 500 colony forming units/milliliter (CFU/mL) in the United States of America. 7,8,11,36,47,49,52,53,57,58 Silver compounds and ionic silver have been used as antimicrobials in health care and particularly in dressing materials for burn care and wound care and have little or no side-effects but good antimicrobial properties. Silver citrate has been incorporated into the material composition of indwelling urinary catheters and has shown merits with respect to microbial and biofilm control. 62,63 Silver ion technology (silver citrate) in municipal water as a continuously present antimicrobial is also being used to treat the municipal water for irrigation purposes. While use of sodium hypochlorite (NaOCl) in higher concentrations is effective in biofilm control, 7,8,11,19 and in low concentrations in "improving" microbiological quality of dental treatment water, it is very corrosive and damaging to the dental unit water system. 51,52 High amounts of trihalomethanes (carcinogens) are produced when in contact with organic matter such as biofilms. 64 Constantly present low levels of NaOCl in the presence of organic matter can also increase the total trihalomethane levels beyond levels set by the U.S. Environmental Protection Agency. The use of NaOCl for the specified purpose of cleaning DUWLs has not been approved by the U.S. FDA. "Purely using any low level antimicrobial without initial and periodic cleaning of lines may expose patients and employees to endotoxins." 36 The objectives of this study were to determine the effects of various concentrations of hydrogen peroxide on mature waterline biofilms and in controlling planktonic (free-floating) organisms in simulated dental treatment water systems; an additional objective was to study in vitro the effects of 2%, 3%, and 7% hydrogen peroxide on the removal of mature biofilms and inorganic compounds in dental waterlines. MAtErIALs And MEtHods Naturally occurring biofilms of heterotrophic mesophilic microorganisms that had accumulated on dental lines utilized in dental suites were harvested and used in this study. No specific microbes were introduced into the lines. An automated, dental unit water system simulation device ( Figure 1A-C.) was used to simulate the water flow in an operating dental suite. The simulation device was retrofitted with four dental unit waterlines (over 10 years old) to simulate water systems of four operating dental suites. The waterlines had mature biofilms and heterotrophic contamination of more than 400,000 CFU/mL. Planktonic contamination was studied using heterotrophic plate counts and biofilm presence was confirmed using Laser Scanning Confocal Microscopy (LSCM) as well as Scanning Electron Microscopy (SEM). Effluent water was neutralized with sodium thiosulphate prior to heterotrophic plate count (HPC) in triplicate, to study contamination by planktonic microorganisms using R2A agar plates. R2A Agar growth medium is a low nutrient agar, which, in combination with a low incubation temperature and an extended incubation time, is suitable for the recovery of stressed and chlorine-tolerant bacteria from drinking water. 65,66 The automated dental unit water simulation system prototype was initially designed, tested and used by Dr. R. Puttaiah, Mr. E. Gambal and Dr. S.E. Mills at the Dental Investigations Service, Brooks AFB, San Antonio, TX in 1995. 7,8 The Automated Dental Unit Water System Simulator for in vitro use, used in this study was designed by Dr. R. Puttaiah, BCD TAMUS HSC, Dr. J. Zawada, A-dec Inc., and Dr. S. Seibert, BCD TAMUS HSC and constructed by A-dec Inc. Newberg, OR ( Figure 1). The Simulation Device uses 8 dental unit water line systems built to scale which function as a dental unit water system. Each dental unit water line system simulates a single dental suite. Each Dental Unit has 4 handpiece lines and 1 Air/Water Syringe Line attached to a Control Block. The source water (inlet water) can be derived from the municipality or a self-contained reservoir (to introduce different irrigants). All 8 units can be independently controlled (independent unique functions) or may be collectively controlled using an Allen-Bradley Logic Controller (Allen-Bradley & Rockwell Automation, Milwaukee, WI, USA) that turns the units 'on' and 'off' based on the algorithm provided to simulate water usage (period of flow) while manual valves control the flow volume. The algorithms have been programmed into the RSLogix (Allen-Bradley & Rockwell Automation, Milwaukee, WI, USA) automation software using a personal computer, which in turn controls the logic controller based on programmed algorithms. The algorithms used in this study simulated typical dental clinic use of about 600 -650 mL per day, with an intermittent flow (hourly cumulative time of 12-minute random flow), 6 hour work day of 4 days per week. The unit was shut off nights and weekends. Preparation of the test system Waterlines from operating dental units (10 years or older) were harvested and attached to the Automated Dental Unit Water System Simulator. The Simulator used municipal water as irrigant for 1 month to maintain viable biofilms and heterotrophic contamination. Municipal water pH was 7.0 -7.5 and the total dissolved solids 180 ppm to 250 ppm. Line samples were removed from each unit to evaluate the biofilm at baseline using LSCM ( Figure 2). Heterotrophic Plate Count (HPC) of planktonic or free-floating microbes in effluent treatment water samples were collected from each unit to measure contamination levels. HPC of effluent water showed a maximum contamination of >400,000 CFU/mL from the collected water in each dental unit. To study effects of Hydrogen Peroxide (H 2 O 2 ) on mature waterline biofilms and in controlling planktonic organisms in dental treatment water, four units in the simulated dental water system were used in this study. • Unit 1 (treatment 1) used 1% H 2 O 2 for 4 initial cleanings each 5 minutes in contact within the lumen followed by weekly cleaning. Irrigant was 0.05% H 2 O 2 in municipal water for simulated dental care. • Unit 2 (treatment 2) used 2% H 2 O 2 for 4 initial cleanings each 5 minutes in contact within the lumen followed by weekly cleaning. Irrigant was 0.15% H 2 O 2 in municipal water for simulated dental care. • Unit 3 (treatment 3) used 3% H 2 O 2 for 4 initial cleanings each 5 minutes in contact within the lumen followed by weekly cleaning. Irrigant was 0.25% H 2 O 2 in municipal water for simulated dental care. • Unit 4 (control) was initially cleaned using 60 ppm of Active Chlorine Dioxide (ClO 2 ) 4 times for 5 minutes contact and used municipal water as an irrigant for simulated dental care. No additional cleaning with ClO 2 was conducted throughout the rest of the study. Baseline and weekly water samples were collected in sterile containers. The lines hold approximately 20 mL water; therefore, 10 mL water was collected before use in the morning after the unit remained stagnant overnight. These were pooled samples (4 handpiece and 1 Air/Water Syringe lines from each simulation unit) of about 2 mL each. All the external line surfaces near the effluent area (6 inches) of the tubing were cleaned with an alcohol swab twice before collection to control external contamination. The collected water was neutralized with sodium thiosulphate by mixing 0.1 mL water sample in 0.9 mL sodium thiosulphate (10 mg in 125 mL sterile water, Millipore Whirl-pak bag with thiosulphate) and held European Journal of Dentistry for 30 seconds. A 1.0 mL aliquot of the neutralized sample was plated on R2A agar, incubated at room temperature (22°C) for 7 days and the colonies counted. Water samples were plated in triplicate. Sterility controls were conducted in parallel. Mean heterotrophic CFUs/mL were converted into Log 10 values (CFUs/mL) for normalization of data and compared using Oneway Analysis of Variance and Scheffe's post-hoc test at alpha 0.05. LSCM of biofilms and other deposits in the lumen of lines were studied on the lumenal surfaces from lines, with 1 cm from each unit harvested at baseline (post initial cleaning) and at 60 days. The outer surface of the waterline to be harvested was wiped with an alcohol swab on the outside and handled aseptically. The blade used to slice the line was wiped clean with an alcohol swab. A 1 cm section of line from each Unit was removed, slit axially and immediately dyed using the Molecular Probes, Invitrogen Corporation, Carlsbad, California, USA. LIVE/DEAD® BacLight TM Assay (The BacLight Green (B-35000) and BacLight Red (B-35001) bacterial stains are fluorescent labeling reagents for detecting and monitoring bacteria. These two dyes are not nucleic acid stains. Bacteria stained with the BacLight Green and BacLight Red bacterial stains exhibit bright green and red fluorescence (absorption/emission ~480/516 and ~581/644 nm, respectively. The BacLight kits are well suited for use with LSCM. The LIVE/DEAD BacLight Bacterial Viability Kits employ two nucleic acid stains -the green-fluorescent SYTO® 9 stain and the red-fluorescent propidium iodide stain. These stains differ in their ability to penetrate healthy bacterial cells. When used alone, SYTO 9 stain labels both live and dead bacteria. In contrast, propidium iodide penetrates only bacteria with damaged membranes, reducing SYTO 9 fluorescence when both dyes are present. Thus, live bacteria with intact membranes fluoresce green, while dead bacteria with damaged membranes fluoresce red). While being observed using laser scanning confocal microscopy, the sample holding mount/immersion slide was wiped with an alcohol swab and sterile water was used for immersion. No neutralization of the biofilm samples was conducted. Following staining, the lines were immediately studied using LSCM. The biofilms were observed using a water immersion 40X lens and digital images made. The complete lumenal surface was scrutinized and the worst case scenario recorded/imaged to identify any residual biofilm or other deposits. Although topographic images and channel graphs Lin, Svoboda, Giletto, Seibert, Puttaiah including quantitative outputs were generated, no quantitative tests or three-dimensional analysis were performed on the output as we did not standardize the view, field or aperture for the images as needed for quantification. 67 Only live/ dead (green/red) images were used for qualitative presence or absence of viable biofilm/cells. After LSCM the same samples were desiccated, coated with gold palladium for SEM and the worst case scenario (maximum contamination in sample) viewed and recorded/imaged at 1600X. SEM was conducted to see if there were biofilms but mostly for studying inorganic matter such as salt deposits from hardness in municipal water previously used in the lines when they were in use in clinical dental units. To study the effects of 2%, 3% and 7% H 2 O 2 on mature biofilms and inorganic compounds in dental waterlines, three 250 mL monojet syringes were attached to lines (30 cm long) with about 10 -14 year old biofilms. Treatment Group 1 used 2% H 2 O 2 , Treatment Group 2 used 3% H 2 O 2 , and Treatment Group 2 used 7% H 2 O 2 to challenge biofilms and inorganic matter in lines. For each group, a fresh 10 mL of H 2 O 2 of their respective concentration was loaded at 0, 5, 10, 20, 30, 60, 180, 360 and 1440 minutes. There were no active flushes between loadings at each time interval. A line sample (1 cm length) at each time interval was gently harvested from each group (distal end away from the syringe) for LSCM (procedures for processing line samples were similar to those in Task 1) and SEM to study removal of both inorganic and deposits. The luminal surface was scrutinized to identify the worst case scenario for both biofilm and inorganic deposits and imaged. A representative sample was not taken or considered as we wanted to identify any residue. Outcomes were classified on a 5 level ordinal scale for biofilm/deposit presence with 1=presence of mature biofilm matrix; 2=biofilm present & no mature matrix; 3=scattered microbes & no biofilm; 4=no microbes & no biofilms; 5=inorganic deposits with no biofilm or microbes. rEsuLts Figure 3 shows the graph of longitudinal observation of the contamination levels (HPC) of the effluent water from the control and the treatment units and were computed as CFUs/mL. After initial cleaning of all the units (Treatment and Control Units), all effluent counts started below the 500 CFU/mL maximum contamination level Absolute CFU/mL 1.00 0.00 Table 1. Descriptive statistics of heterotrophic contamination in effluent water from simulated control and treatment units. Effects of hydrogen peroxide on dental unit biofilms European Journal of Dentistry of treatment water set by the CDC. The control unit showed unacceptable levels from week 1 onwards reaching a maximum contamination level of 400,000 CFU/mL by the end of the 12 week study. Treatment Unit 1 showed a maximum contamination in week 2 and thereafter remained below 500 CFU/mL. Treatment Units 2 & 3 remained below 500 CFU/mL limit throughout the 12 week study. A summary of descriptive statistics (Table 1) addresses the mean contamination levels (both absolute values and Log 10 values of the CFUs/mL). One way analysis of variance (α=0.05) and Scheffe's post hoc tests were used to study any difference between the Log 10 values of the effluent heterotrophic plate counts of the four units. Test for homogeneity of variance showed a significant difference between the contamination levels among the groups (P<.05). Scheffe's post hoc test for multiple comparisons determined that there was no significant difference between any of the treatment groups (P>.05), while all treatment groups showed significantly lower contamination levels than the control group (P<.05). LSCM and SEM of the waterline surfaces to study the effects of periodic cleaning with H 2 O 2 and the use of dilute H 2 O 2 as irrigant in differing concentrations conducted in Task 1 are described as a template (Figure 4). At baseline, the Control Unit had a minimal amount of salt deposits and biofilms while the Treatment Unit 1 had a clean line surface with no visible salt or biofilm deposits in the field of view Treatment Units 2 and 3 showed very little biofilm deposits but a significant amount of salt deposits at baseline. At the end of the study, both the Control Unit and the Treatment Unit 1 showed residual biofilms. Treatment Units 2 and 3 showed cleaner line surfaces with respect to salt deposits but still showed residual biofilm. All Z-view samples (Control & Treatment Groups) showed residual biofilm deposits ranging from traces to a large mass. In this 12 week study, only the 3% and 2% H 2 O 2 showed better salt removal than the 1% H 2 O 2 . The in vitro test (Task 2) to study the effects of 2, 3 and 7% H 2 O 2 on biofilm removal were a 24 hour longitudinal challenge study. All three groups had biofilms and salts at baseline. 2% H 2 O 2 periodic cleaner/disinfectant had a minimal noticeable disruption of the mature biofilm with some removal of inorganic deposits. 3% H 2 O 2 showed a noticeable removal of biofilm and salt deposits. 7% H 2 O 2 removed biofilm and salt deposits similar to the 3% H 2 O 2 . Although minimal, there still remained some residual biofilm on the line surface in both the 3% and 7% H 2 O 2 treatments ( Figure 5). dIscussIon Contamination of dental unit water systems and possibility of risks to patients have been addressed for over 50 years. 15,16 Although the American Den- Figure 3. The Control Unit's contamination started at <10 CFU/mL reached unacceptable contamination levels by the end of the first week. Although all Treatment Units were highly contaminated prior to initial cleaning, their counts remained below 500 CFU/mL throughout the study period but for one sample from Treatment Unit 1. Treatment Unit 1 showed only one count of >500 CFU/mL in week 3 but remained below 500 CFU/mL for the remainder of the 12 week study. Lin, Svoboda, Giletto, Seibert, Puttaiah tal Association (ADA) set goals of <200 CFU/mL of heterotrophic counts for dental treatment water contamination levels, only in the recent past, the ADA and the Centers for Disease Control & Prevention concluded that the maximum contamination of dental treatment water should be <500 CFU/mL and that biofilms in dental unit water sys-tems should be controlled. 44,68,69 These guidelines are based on microbial characteristics, patient susceptibility, lack of and difficulties associated with epidemiological surveillance (morbidity and mortality) other than sero-epidemiological studies demonstrating health risks for both patients and employees. 4 Effects of hydrogen peroxide on dental unit biofilms Hydrogen peroxide in different formulations (alkaline peroxide as well as silver added to 6% H 2 O 2 respectively) has been studied with respect to biofilm control in dental unit water systems. 56,70,71 In this study we used over the counter hydrogen peroxide that was readily available in pharmacies, for both periodic cleaning as well as a low grade irrigant after dilution in municipal water, and hydrogen peroxide that was marketed as a high-level disinfectant (Sporox, Sultan Chemicals, NY) as periodic flush only. The main thrust of this study was to determine in vitro (but as close as possible to use in dental equipment) the best concentration of H 2 O 2 for periodic cleaning of already existing deposits (both inorganic and biofilms) in dental unit water systems. In addition we wanted to determine a much lower and safer concentration of H 2 O 2 in municipal water as irrigant/coolant for use in dental treatment. After determining periodic cleaning and the irrigant concentrations, the prototype fuel-cell technology based H 2 O 2 electrochemical generator could be calibrated to produce the concentrations on a turn-key basis. The prototype fuel cell based H 2 O 2 generator that was developed and calibrated has demonstrated the capability to produce up to 3.3% H 2 O 2 for greater than 4000 hours of use (based on testing of the prototype at Lynntech Inc. College Station, Texas, USA). Other than the biofilm challenge tests and control of microbial contamination of the dental water system, effects of H 2 O 2 on metal and non-metal components of the water system, Lin, Svoboda, Giletto, Seibert, Puttaiah elemental metal analysis using standardized metal coupons, and effects on composite bonding to enamel and dentin were also studied and will be published in series or in other suitable journals. These types of tests need to be performed not only for efficacy testing of germicides in microbial control, but also to determine deleterious effects if any, on dental unit water systems as well as patient safety. The information generated could be used by regulatory agencies such as the United States Environmental Protection Agency (US-EPA) for disinfectant/device registration, and the United States Food and Drug Administration (US-FDA) in providing marketing clearance of the physical device as well as the electrochemically produced germicide for efficacy as well as for patient safety respectively. H 2 O 2 and ClO 2 were chosen as germicides of choice in this study due to safe history in mouthwash and as a solution to decontaminate items used in patient care. They are relatively less harmful than sodium hypochlorite and do not produce disinfection-by-products such as trihalomethanes in the presence of organic matter. 12,36,47,49,53,64 In this study, lines with naturally grown biofilms attached to an automated in vitro system were used to study effects of the germicide (H 2 O 2 ). Naturally grown biofilms interspersed with salts from hard water cannot be standardized and are hardier than rapidly grown biofilms without salt deposits developed in a laboratory. 72 Natural biofilms in dental waterlines as observed in this study was found interspersed with salt deposits. One of the issues was that the salts may have had a neutralizing/buffering effect on inorganic chemicals (H 2 O 2 ) used as a biofilm control agent as observed in this study. We proposed that contamination control in dental unit water systems should be a combination approach, addressing biofilm removal/control as well as the control of planktonic microbes that contribute to contamination of dental irrigant/ coolant. This combination approach will be helpful in meeting the CDC Guidelines of <500 CFU/mL in dental treatment water from dental units and ultrasonic units for general dental treatment on a consistent basis. This approach of multiple baseline decontamination followed by scheduled periodic treatment of the system to control biofilms and inorganic components demonstrated control of planktonic microbes in dental treatment water (Table 1, Figure 3) as within 3 weeks all treatment groups showed mean counts of <10 CFU/mL. The control group that was initially cleaned but used municipal water had contamination levels exceeding the 500 CFU/mL mark by the second week and reached >400,000 CFU/mL by week 12. In contrast, biofilm eradication within 12 weeks was not accomplished (Figure 4). When concentrations as high as 3% were used for initial and periodic cleaning, the representative scans showed no biofilm or salt deposits by the end of study, however, on further scrutiny of the complete line surface, we found occasional clumps of residual biofilm. Biofilm disruption and control was seen in all treatment units with varying degrees. In the in vitro biofilm challenge test ( Figure 5), 7% H 2 O 2 (very high concentration) did not eradicate biofilms in the waterline, even when in contact with the line surface for 24 hours, demonstrating the tenacity of naturally occurring deposits (inorganic salts and biofilms) in dental waterlines. 7% H 2 O 2 however should not be used for cleaning lines as it is very corrosive on dental unit water system components (even 1% H 2 O 2 showed some corrosion over time). One possible explanation for not being able to eradicate biofilms could be buffering effects of the salts lining the surface of the waterlines on the H 2 O 2 being introduced and weaken its effects on biofilms. More standardized studies need to be conducted to study this phenomenon. Biofilm eradication was difficult and even the control of biofilms could take a long time in older dental units that have never been cleaned before. Multiple initial cleanings followed by weekly cleanings with H 2 O 2 as low as 1% and use of resulting 0.05% H 2 O 2 in municipal water regularly as an irrigant controlled biofilms and planktonic contamination of the dental waterlines and treatment water respectively. After reviewing outcomes of various concentrations of H 2 O 2 on dental unit water system components, standardized metal coupons, composite bonding to enamel and dentin (completed in the broader study 12 that will be published separately), we determined that 2% H 2 O 2 when used as a periodic cleaner (<5 minute contact with lines), and 0.05% mixture in municipal water for irrigation purposes was effective in controlling biofilms and planktonic contamination. concLusIons Based on the findings in this study, we conclude that 2% H 2 O 2 used as a periodic cleaner, and diluted to 0.05% in municipal water for irrigation, was beneficial in controlling biofilm and planktonic contamination in dental unit water systems and met the CDC's guidelines of <500 CFU/mL of heterotrophic microorganisms in dental treatment water. However, to remove well established biofilms, it may take more than 2 months when multiple initial cleanings and repeated weekly cleaning with 2% H 2 O 2 are conducted.
Thermal Monitoring of Natural Source Zone Depletion Natural depletion of subsurface petroleum liquids releases energy in the form of heat. The rate of natural source zone depletion (NSZD) can be derived from subsurface temperature data. An energy balance is performed to resolve NSZD‐generated energy in terms of W/m2. Biodegradation rates are resolved by dividing the NSZD energy by the heat of reaction in joules/mol. Required temperature data are collected using data loggers, wireless connections, and automated data storage and analysis. Continuous thermal resolution of monthly NSZD rates at a field site indicates that apparent monthly NSZD rates vary through time, ranging from 10,000 to 77,000 L/ha/year. Temporal variations in observed apparent NSZD rates are attributed to processes governing the conversion of CH4 to CO2, as opposed to the actual rates of NSZD. Given a year or more of continuous NSZD rate data, it is anticipated that positive and negative biases in apparent NSZD rates will average out, and averaged apparent NSZD rates will converge to true NSZD rates. An 8.4% difference between average apparent NSZD rates over a 31‐month period using the thermal monitoring method and seven rounds of CO2 efflux measurements using CO2 traps supports the validity of both CO2 trap and thermal monitoring methods. A promising aspect of thermal monitoring methods is that continuous data provide a rigorous approach to resolving the true mean NSZD rates as compared to temporally sparse CO2 trap NSZD rate measurements. Overall, a vision is advanced of real‐time sensor‐based groundwater monitoring that can provide better data at lower costs and with greater safety, security, and sustainability.
def build_css_class(localized_fieldname, prefix=''): bits = localized_fieldname.split('_') css_class = '' if len(bits) == 1: css_class = str(localized_fieldname) elif len(bits) == 2: css_class = '-'.join(bits) elif len(bits) > 2: css_class = _join_css_class(bits, 2) if not css_class: css_class = _join_css_class(bits, 1) return '%s-%s' % (prefix, css_class) if prefix else css_class
def distribution_quantiles(mr, quantile, F_sv, norm_consts=None, start_values=None, times=None, method='brentq', tol=1e-8): if times is None: times = mr.times if start_values is None: start_values = np.zeros((len(times),)) if norm_consts is None: norm_consts = np.ones((len(times),)) q_lst = [] for ti in tqdm(range(len(times))): q_lst.append( mr.distribution_quantile( quantile, lambda a: F_sv(a, times[ti]), norm_const=norm_consts[ti], start_value=None, method=method, tol=tol ) ) return np.array(q_lst)
import cv2 as cv import jpeg4py import numpy as np from PIL import Image davis_palette = np.repeat(np.expand_dims(np.arange(0, 256), 1), 3, 1).astype(np.uint8) davis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128], [128, 64, 128]] def default_image_loader(path): """ The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, but reverts to the opencv_loader if the former is not available. """ if default_image_loader.use_jpeg4py is None: # Try using jpeg4py im = jpeg4py_loader(path) if im is None: default_image_loader.use_jpeg4py = False print('using opencv_loader instead') else: default_image_loader.use_jpeg4py = True return im if default_image_loader.use_jpeg4py: return jpeg4py_loader(path) return opencv_loader(path) default_image_loader.use_jpeg4py = None def jpeg4py_loader(path): """ Image reading using jpeg4py. https://github.com/ajkxyz/jpeg4py """ try: return jpeg4py.JPEG(path).decode() except Exception as e: print("could not read image '{}'".format(path)) print(e) return None def opencv_loader(path): """ Read image using opencv's imread function and returns it in rgb format. """ try: im = cv.imread(path, cv.IMREAD_COLOR) # Convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print("could not read image '{}'".format(path)) print(e) return None def jpeg4py_loader_w_failsafe(path): """ Image reading using jpeg4py. https://github.com/ajkxyz/jpeg4py """ try: return jpeg4py.JPEG(path).decode() except: try: im = cv.imread(path, cv.IMREAD_COLOR) # Convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print("could not read image '{}'".format(path)) print(e) return None def opencv_seg_loader(path): """ Read segmentation annotation using opencv's imread function. """ try: return cv.imread(path) except Exception as e: print("could not read image '{}'".format(path)) print(e) return None def imread_indexed(filename): """ Load indexed image with given filename. Used to read segmentation annotations. """ im = Image.open(filename) annotation = np.atleast_3d(im)[..., 0] return annotation def imwrite_indexed(filename, array, color_palette=None): """ Save indexed image as png. Used to save segmentation annotation. """ if color_palette is None: color_palette = davis_palette if np.atleast_3d(array).shape[2] != 1: raise Exception('ERROR: saving indexed PNGs requires 2D array') im = Image.fromarray(array) im.putpalette(color_palette.ravel()) im.save(filename, format='PNG')
#include "DataFormats/ForwardDetId/interface/HFNoseDetId.h" #include "DataFormats/ForwardDetId/interface/HFNoseTriggerDetId.h" #include "DataFormats/ForwardDetId/interface/HGCalTriggerDetId.h" #include "DataFormats/ForwardDetId/interface/HFNoseDetIdToModule.h" #include "DataFormats/DetId/interface/DetId.h" #include <cmath> #include <iomanip> #include <iostream> #include <map> #include <string> void testCell(int type) { int N = (type == 0) ? HFNoseDetId::HFNoseFineN : HFNoseDetId::HFNoseCoarseN; const int waferu(0), waferv(0), layer(1), zside(1); std::map<std::pair<int, int>, int> triggers; int ntot(0); for (int u = 0; u < 2 * N; ++u) { for (int v = 0; v < 2 * N; ++v) { if (((v - u) < N) && (u - v) <= N) { HFNoseDetId id(zside, type, layer, waferu, waferv, u, v); std::cout << "ID " << std::hex << id.rawId() << std::dec << " " << id << " Trigger: " << id.triggerCellU() << ":" << id.triggerCellV() << std::endl; std::pair<int, int> trig = id.triggerCellUV(); std::map<std::pair<int, int>, int>::iterator itr = triggers.find(trig); if (itr == triggers.end()) { triggers[trig] = 0; itr = triggers.find(trig); } ++(itr->second); ++ntot; } } } std::cout << "Total of " << ntot << " cells in type " << type << " with " << triggers.size() << " trigger cells" << std::endl; int k(0); for (auto itr : triggers) { std::cout << "Trigger[" << k << "] (" << (itr.first).first << ":" << (itr.first).second << ") " << itr.second << std::endl; ++k; } } void testWafer(int layer, double rin, double rout) { const double waferSize(167.4408); const double rMaxFine(750.0), rMaxMiddle(1200.0); const int zside(1), cellu(0), cellv(0); const std::string waferType[2] = {"Virtual", "Real "}; double r = 0.5 * waferSize; double R = 2.0 * r / std::sqrt(3.0); double dy = 0.75 * R; double xc[6], yc[6]; int N = (int)(0.5 * rout / r) + 2; int nreal(0), nvirtual(0); int ntype[3] = {0, 0, 0}; for (int v = -N; v <= N; ++v) { for (int u = -N; u <= N; ++u) { int nr = 2 * v; int nc = -2 * u + v; double xpos = nc * r; double ypos = nr * dy; xc[0] = xpos + r; yc[0] = ypos + 0.5 * R; xc[1] = xpos; yc[1] = ypos + R; xc[2] = xpos - r; yc[2] = ypos + 0.5 * R; xc[3] = xpos - r; yc[3] = ypos - 0.5 * R; xc[4] = xpos; yc[4] = ypos - R; xc[5] = xpos + r; yc[5] = ypos - 0.5 * R; int cornerOne(0), cornerAll(1); for (int k = 0; k < 6; ++k) { double rpos = std::sqrt(xc[k] * xc[k] + yc[k] * yc[k]); if (rpos >= rin && rpos <= rout) cornerOne = 1; else cornerAll = 0; } if (cornerOne > 0) { double rr = std::sqrt(xpos * xpos + ypos * ypos); int type = (rr < rMaxFine) ? 0 : ((rr < rMaxMiddle) ? 1 : 2); HFNoseDetId id(zside, type, layer, u, v, cellu, cellv); std::cout << waferType[cornerAll] << " Wafer " << id << std::endl; if (cornerAll == 1) { ++nreal; ++ntype[type]; } else { ++nvirtual; } } } } std::cout << nreal << " full wafers of type 0:" << ntype[0] << " 1:" << ntype[1] << " 2:" << ntype[2] << " and " << nvirtual << " partial wafers for r-range " << rin << ":" << rout << std::endl; } void testTriggerCell(int type) { int N = (type == 0) ? HFNoseDetId::HFNoseFineN : HFNoseDetId::HFNoseCoarseN; const int waferu(0), waferv(0), layer(1); std::string error[2] = {"ERROR", "OK"}; int ntot(0), nerror(0); for (int iz = 0; iz <= 1; ++iz) { int zside = 2 * iz - 1; for (int u = 0; u < 2 * N; ++u) { for (int v = 0; v < 2 * N; ++v) { if (((v - u) < N) && (u - v) <= N) { HFNoseDetId id(zside, type, layer, waferu, waferv, u, v); HFNoseTriggerDetId idt((int)(HFNoseTrigger), id.zside(), id.type(), id.layer(), id.waferU(), id.waferV(), id.triggerCellU(), id.triggerCellV()); std::cout << "ID " << std::hex << id.rawId() << std::dec << " " << id << " Trigger: " << id.triggerCellU() << ":" << id.triggerCellV() << " Trigger " << idt << std::endl; int ok(0); std::vector<std::pair<int, int> > uvs = idt.cellUV(); for (auto const& uv : uvs) { HFNoseDetId idn(idt.zside(), idt.type(), idt.layer(), idt.waferU(), idt.waferV(), uv.first, uv.second); if (idn == id) { ok = 1; break; } } std::cout << "Trigger Cell: " << idt << " obtained from cell (" << error[ok] << ")" << std::endl; std::cout << "Check " << idt << " from rawId " << HGCalTriggerDetId(idt.rawId()) << " from DetId " << HGCalTriggerDetId(DetId(idt.rawId())) << std::endl; ++ntot; if (ok == 0) ++nerror; } } } } std::cout << "Total of " << ntot << " cells in type " << type << " with " << nerror << " errors for trigger cells" << std::endl; } void testModule(HFNoseDetId const& id) { HFNoseDetIdToModule hfn; HFNoseDetId module = hfn.getModule(id); std::vector<HFNoseDetId> ids = hfn.getDetIds(module); std::string ok = "***** ERROR *****"; for (auto const& id0 : ids) { if (id0 == id) { ok = ""; break; } } std::cout << "Module ID of " << id << " is " << module << " which has " << ids.size() << " cells " << ok << std::endl; for (unsigned int k = 0; k < ids.size(); ++k) std::cout << "ID[" << k << "] " << ids[k] << std::endl; } int main() { testCell(0); testWafer(1, 299.47, 1041.45); testWafer(8, 312.55, 1086.97); testTriggerCell(0); testModule(HFNoseDetId(1, 0, 1, 3, 3, 0, 5)); testModule(HFNoseDetId(-1, 0, 5, 2, -2, 7, 5)); return 0; }
def ensure_exists(self, app_id, json_file): logging.info('Check if app "%s" is deployed', app_id) app_exists = self.app_exists(app_id) if not app_exists: logging.info('Deploying app "%s"', app_id) json_contents = self._load_json(json_file) self.deploy_app(json.dumps(json_contents))
/** * This java example will demonstrate right padding a string * of size 10 with spaces and 0 * * @author Justin Musgrove * @see <a href='http://www.leveluplunch.com/java/examples/right-pad-string/'>Right pad string</a> * */ public class RightPadString { @Test public void right_pad_string_with_zeros_java () { String rightPaddedString = String.format("%-10s", "levelup").replace(' ', '0'); assertEquals("levelup000", rightPaddedString); assertEquals(10, rightPaddedString.length()); assertThat(rightPaddedString, endsWith("0")); } @Test public void right_pad_string_with_spaces_java () { String rightPaddedString = String.format("%-10s", "levelup").replace(' ', ' '); assertEquals("levelup ", rightPaddedString); assertEquals(10, rightPaddedString.length()); assertThat(rightPaddedString, endsWith(" ")); } @Test public void right_pad_string_with_zeros_guava () { String rightPaddedString = Strings.padEnd("levelup", 10, '0'); assertEquals("levelup000", rightPaddedString); assertEquals(10, rightPaddedString.length()); assertThat(rightPaddedString, endsWith("0")); } @Test public void right_pad_string_with_spaces_guava () { String rightPaddedString = Strings.padEnd("levelup", 10, ' '); assertEquals("levelup ", rightPaddedString); assertEquals(10, rightPaddedString.length()); assertThat(rightPaddedString, endsWith(" ")); } @Test public void right_pad_string_with_zeros_apache_commons () { String rightPaddedString = StringUtils.rightPad("levelup", 10, "0"); assertEquals("levelup000", rightPaddedString); assertEquals(10, rightPaddedString.length()); assertThat(rightPaddedString, endsWith("0")); } @Test public void right_pad_string_with_spaces_apache_commons () { String rightPaddedString = StringUtils.rightPad("levelup", 10, " "); assertEquals("levelup ", rightPaddedString); assertEquals(10, rightPaddedString.length()); assertThat(rightPaddedString, endsWith(" ")); } }
<gh_stars>100-1000 #include "test.h" #include "lcg.h" #include <cassert> #include <cmath> template <class T> void boxmuller(T* data, size_t count) { assert(count % 2 == 0); static const T twopi = T(2.0 * 3.14159265358979323846); LCG<T> r; for (size_t i = 0; i < count; i += 2) { T u1 = 1.0f - r(); // [0, 1) -> (0, 1] T u2 = r(); T radius = std::sqrt(-2 * std::log(u1)); T theta = twopi * u2; data[i ] = radius * std::cos(theta); data[i + 1] = radius * std::sin(theta); } } static void normaldistf_boxmuller(float* data, size_t count) { boxmuller(data, count); } static void normaldist_boxmuller(double* data, size_t count) { boxmuller(data, count); } REGISTER_TEST(boxmuller);
class TrainingArguments: """ Represents the arguments we use in our PyTorch integration scripts for training tasks Using :class:`NmArgumentParser` we can turn this class into `argparse <https://docs.python.org/3/library/argparse.html#module-argparse>`__ arguments that can be specified on the command line. :param train_batch_size: An int representing the training batch size. :param test_batch_size: An int representing the test batch size. :param arch_key: A str key representing the type of model to use, ex:resnet50. :param dataset: The dataset to use for training, ex imagenet, imagenette, etc; Set to `imagefolder` for a custom dataset. :param dataset_path: Root path to dataset location. :param local_rank: DDP argument set by PyTorch in DDP mode, default -1 :param checkpoint_path: A path to a previous checkpoint to load the state from and resume the state for; Also works with SparseZoo recipes; Set to zoo to automatically download and load weights associated with a recipe. :param init_lr: float representing the initial learning for training, default=1e-9 . :param optim_args: Additional arguments to be passed in to the optimizer as a json object :param recipe_path: The path to the yaml file containing the modifiers and schedule to apply them with; Can also provide a SparseZoo stub prefixed with 'zoo:'. :param sparse_transfer_learn: Boolean to enable sparse transfer learning modifiers to enforce the sparsity for already sparse layers. The modifiers are added to the ones to be loaded from the recipe-path. :param eval_mode: bool to start evaluation mode so that the model can be evaluated on the desired dataset. :param optim: str respresnting the optimizer type to use, one of [SGD, Adam, RMSprop]. :param logs_dir: The path to the directory for saving logs. :param save_best_after: int epoch number to start saving the best validation result after until the end of training. :param save_epochs: int epochs to save checkpoints at. :param use_mixed_precision: bool to train model using mixed precision. Supported environments are single GPU and multiple GPUs using DistributedDataParallel with one GPU per process. :param debug_steps: int represnting amount of steps to run for training and testing for debug mode default=-1. :param pretrained: The type of pretrained weights to use default is true to load the default pretrained weights for the model Otherwise should be set to the desired weights type: [base, optim, optim-perf]; To not load any weights set to one of [none, false]. :param pretrained_dataset: str representing the dataset to load pretrained weights for if pretrained is set; Default is None which will load the default dataset for the architecture; Ex can be set to imagenet, cifar10, etc". :param model_kwargs: json object containing keyword arguments to be passed to model constructor. :param dataset_kwargs: json object to load keyword arguments to be passed to dataset constructor. :param model_tag: A str tag to use for the model for saving results under save-dir, defaults to the model arch and dataset used. :param save_dir: The path to the directory for saving results, default="pytorch_vision". :param device: str represnting the device to run on (can also include ids for data parallel), ex:{cpu, cuda, cuda:0,1}. :param loader_num_workers: int number of workers to use for data loading, default=4. :param loader_pin_memory: bool to use pinned memory for data loading, default=True. """ train_batch_size: int = field( metadata={"help": "The batch size to use while training"} ) test_batch_size: int = field( metadata={"help": "The batch size to use while testing"} ) arch_key: str = field( metadata={ "help": "The type of model to use, ex: resnet50, vgg16, mobilenet " "put as help to see the full list (will raise an exception" "with the list)", } ) dataset: str = field( metadata={ "help": "The dataset to use for training, " "ex: imagenet, imagenette, cifar10, etc. " "Set to imagefolder for a generic dataset setup " "with an image folder structure setup like imagenet or" " loadable by a dataset in sparseml.pytorch.datasets" } ) dataset_path: str = field( metadata={ "help": "The root path to where the dataset is stored", } ) local_rank: int = field( default=-1, metadata={ "keep_underscores": True, "help": argparse.SUPPRESS, }, ) checkpoint_path: str = field( default=None, metadata={ "help": "A path to a previous checkpoint to load the state from " "and resume the state for. If provided, pretrained will " "be ignored . If using a SparseZoo recipe, can also " "provide 'zoo' to load the base weights associated with " "that recipe" }, ) init_lr: float = field( default=1e-9, metadata={ "help": "The initial learning rate to use while training, " "the actual initial value used should be set by the" " sparseml recipe" }, ) optim_args: json.loads = field( default_factory=lambda: { "momentum": 0.9, "nesterov": True, "weight_decay": 0.0001, }, metadata={ "help": "Additional args to be passed to the optimizer passed in" " as a json object", }, ) recipe_path: str = field( default=None, metadata={ "help": "The path to the yaml file containing the modifiers and " "schedule to apply them with. Can also provide a " "SparseZoo stub prefixed with 'zoo:' with an optional " "'?recipe_type=' argument" }, ) sparse_transfer_learn: Optional[bool] = field( default=False, metadata={ "help": "Enable sparse transfer learning modifiers to enforce the " "sparsity for already sparse layers. The modifiers are " "added to the ones to be loaded from the recipe-path" }, ) eval_mode: Optional[bool] = field( default=False, metadata={ "help": "Puts into evaluation mode so that the model can be " "evaluated on the desired dataset" }, ) optim: str = field( default="SGD", metadata={"help": "The optimizer type to use, one of [SGD, Adam, RMSprop]"}, ) logs_dir: str = field( default=os.path.join("pytorch_vision_train", "tensorboard-logs"), metadata={ "help": "The path to the directory for saving logs", }, ) save_best_after: int = field( default=-1, metadata={ "help": "start saving the best validation result after the given " "epoch completes until the end of training" }, ) save_epochs: List[int] = field( default_factory=lambda: [], metadata={"help": "epochs to save checkpoints at"} ) use_mixed_precision: Optional[bool] = field( default=False, metadata={ "help": "Trains model using mixed precision. Supported " "environments are single GPU and multiple GPUs using " "DistributedDataParallel with one GPU per process" }, ) debug_steps: int = field( default=-1, metadata={ "help": "Amount of steps to run for training and testing for a " "debug mode" }, ) pretrained: str = field( default=True, metadata={ "help": "The type of pretrained weights to use, " "default is true to load the default pretrained weights for " "the model. Otherwise should be set to the desired weights " "type: [base, optim, optim-perf]. To not load any weights set" "to one of [none, false]" }, ) pretrained_dataset: str = field( default=None, metadata={ "help": "The dataset to load pretrained weights for if pretrained is " "set. Default is None which will load the default dataset for " "the architecture. Ex can be set to imagenet, cifar10, etc", }, ) model_kwargs: json.loads = field( default_factory=lambda: {}, metadata={ "help": "Keyword arguments to be passed to model constructor, should " "be given as a json object" }, ) dataset_kwargs: json.loads = field( default_factory=lambda: {}, metadata={ "help": "Keyword arguments to be passed to dataset constructor, " "should be given as a json object", }, ) model_tag: str = field( default=None, metadata={ "help": "A tag to use for the model for saving results under save-dir, " "defaults to the model arch and dataset used", }, ) save_dir: str = field( default="pytorch_vision", metadata={ "help": "The path to the directory for saving results", }, ) device: str = field( default=default_device(), metadata={ "help": "The device to run on (can also include ids for data " "parallel), ex: cpu, cuda, cuda:0,1" }, ) loader_num_workers: int = field( default=4, metadata={"help": "The number of workers to use for data loading"} ) loader_pin_memory: bool = field( default=True, metadata={"help": "Use pinned memory for data loading"} ) def __post_init__(self): # add ddp args env_world_size = int(os.environ.get("WORLD_SIZE", 1)) self.world_size = env_world_size env_rank = int(os.environ.get("RANK", -1)) self.rank = env_rank self.is_main_process = self.rank in [ -1, 0, ] # non DDP execution or 0th DDP process # modify training batch size for give world size assert self.train_batch_size % self.world_size == 0, ( f"Invalid training batch size for world size {self.world_size} " f"given batch size {self.train_batch_size}. " f"world size must divide training batch size evenly." ) self.train_batch_size = self.train_batch_size // self.world_size if "preprocessing_type" not in self.dataset_kwargs and ( "coco" in self.dataset.lower() or "voc" in self.dataset.lower() ): if "ssd" in self.arch_key.lower(): self.dataset_kwargs["preprocessing_type"] = "ssd" elif "yolo" in self.arch_key.lower(): self.dataset_kwargs["preprocessing_type"] = "yolo" if self.local_rank != -1: torch.distributed.init_process_group(backend="nccl", init_method="env://") set_deterministic_seeds(0) self.approximate = False
import esphome.codegen as cg binary_ns = cg.esphome_ns.namespace("binary")
/* searches array for a given element num */ void search ( int *arr, int num ) { int i ; for ( i = 0 ; i < MAX ; i++ ) { if ( arr[i] == num ) { printf ( "\n\nThe element %d is present at %dth position.", num, i + 1 ) ; return ; } } if ( i == MAX ) printf ( "\n\nThe element %d is not present in the array.", num ) ; }
/** * Create a StatsdMeterRegistry bean if global metrics are enables * and the statsd is enabled. Will be true by default when this * configuration is included in project. * * @return A StatsdMeterRegistry */ @Bean @Primary @Singleton @Requires(property = MICRONAUT_METRICS_ENABLED, value = StringUtils.TRUE, defaultValue = StringUtils.TRUE) @Requires(property = STATSD_ENABLED, value = StringUtils.TRUE, defaultValue = StringUtils.TRUE) @Requires(beans = CompositeMeterRegistry.class) StatsdMeterRegistry statsdMeterRegistry() { return new StatsdMeterRegistry(statsdConfig, Clock.SYSTEM); }
import java.util.Scanner; public class Main { public static void main(String[] args) { int[] ii = new int[1000100]; ii[0] = 1; ii[1] = 2; for(int i = 2; i < ii.length; i++) if(i % 2 == 0) ii[i] = ii[i-1] + 1; else ii[i] = ii[i-1] + 2; Scanner r = new Scanner(System.in); int a = r.nextInt(); int x = r.nextInt(); int y = r.nextInt(); if(y % a == 0)System.out.println(-1); else{ int level = y / a; if(level == 0 || (level-1) % 2 == 0){ if(-a < 2 * x && 2 * x < a)System.out.println(ii[level]); else System.out.println(-1); }else{ if(-a < x && x < 0)System.out.println(ii[level]); else if(0 < x && x < a)System.out.println(ii[level] + 1); else System.out.println(-1); } } } }
/** * @brief The function tests whether the particle is still within the cubic cell box. If the particle has moved outside the box, it returns 0. Otherwise, it returns 1. * * @param r REBOUND simulation to operate on * @param node is the pointer to a node cell * @return 0 is particle is not in cell, 1 if it is. */ static int reb_tree_particle_is_inside_cell(const struct reb_simulation* const r, struct reb_treecell *node){ if (fabs(r->particles[node->pt].x-node->x) > node->w/2. || fabs(r->particles[node->pt].y-node->y) > node->w/2. || fabs(r->particles[node->pt].z-node->z) > node->w/2. || isnan(r->particles[node->pt].y)) { return 0; } return 1; }
def associate_asset(self, mod_name, asset_name, source=None): name = asset_name.replace('\\', '/').split('/')[-1] if name in self._assets: asset = self._assets[name] if source is not None: t = 'associate_asset() for %s got source, but asset %r already exists.' raise TypeError(t % (mod_name, asset_name)) else: asset = Asset(asset_name, source) self.add_shared_asset(asset) assets = self._associated_assets.setdefault(mod_name, []) if asset.name not in [a.name for a in assets]: assets.append(asset) assets.sort(key=lambda x: x.i) return 'flexx/assets/shared/' + asset.name
<gh_stars>0 import { createContext, FC, useContext, useEffect, useState } from "react"; import { formatPrice } from "../utils/format"; import api from "../services/api"; export interface IProduct { id: string; name: string; price: number; sellingPrice: number; imageUrl: string; priceFormatted: string; sellingPriceFormatted: string; } interface ProductContextData { products: IProduct[]; } const ProductContext = createContext<ProductContextData>( {} as ProductContextData ); const ProductProvider: FC = ({ children }) => { const [products, setProducts] = useState<IProduct[]>([]); useEffect(() => { async function loadProducts() { const response = await api.get<IProduct[]>("/items"); const prods = response.data.map((product) => { return { ...product, priceFormatted: formatPrice(product.price), sellingPriceFormatted: formatPrice(product.sellingPrice), }; }); setProducts(prods); } loadProducts(); }, []); return ( <ProductContext.Provider value={{ products }}> {children} </ProductContext.Provider> ); }; function useProduct(): ProductContextData { const context = useContext(ProductContext); return context ?? ({} as ProductContextData); } export { ProductProvider, useProduct };
import Axios from 'axios'; import Config from '../Config'; export const backendApi = Axios.create({ baseURL: Config.backendServiceURL, });
// GetSubGroups will fill a passed groups array with all the // direct subgroups. func (g *Group) GetSubGroups(groups *[]*Group) { g.lock.Lock() for _, v := range g.subg { *groups = append(*groups, v) } g.lock.Unlock() }
def history(locs: ArrayLike, name: int) -> np.void: locs = np.sort(locs, order=("time", "loc"), kind="stable") dt = [("locs", locs.dtype, locs.shape), ("name", np.int64)] return np.array([(locs, name)], dtype=dt)[0]
/** * Simple Widget to display the FPS count of your application. * You can easily add it to an instance of ExtRenderLoop. * * @author Marvin Froehlich (aka Qudus) */ public class FPSCounter extends Label implements FPSListener { private char decimalSep = '.'; private float lastFPS = -1.0f; private String prefix = null; private String postfix = null; /** * Sets the prefix to be set to the FPS value. */ public void setPrefix( String prefix ) { this.prefix = prefix; setText( prefix, lastFPS, decimalSep, 2, postfix ); } /** * @return the postfix to be appended to the FPS value */ public final String getPrefix() { return ( prefix ); } /** * Sets the postfix to be appended to the FPS value. */ public void setPostfix( String postfix ) { this.postfix = postfix; setText( prefix, lastFPS, decimalSep, 2, postfix ); } /** * Sets the postfix to be appended to the FPS value. */ public final String getPostfix() { return ( postfix ); } /** * Changes the decimal separator to the given char.<br> * Use '\0' to not display any decimal places. * * @param decSep */ public void setDecimalSeparator( char decSep ) { this.decimalSep = decSep; setText( prefix, lastFPS, decimalSep, 2, postfix ); } public final char getDecimalSeparator() { return ( decimalSep ); } /** * @return the last notified FPS value */ public final float getLastFPS() { return ( lastFPS ); } /** * {@inheritDoc} */ public void onFPSCountIntervalHit( float fps ) { this.lastFPS = fps; setText( prefix, lastFPS, decimalSep, 2, postfix ); } /** * Creates a new FPSCounter with the given width and height. * * @param isHeavyWeight * @param width the new width of this Widget * @param height the new height of this Widget * @param backgroundTexture */ public FPSCounter( boolean isHeavyWeight, float width, float height, Texture2D backgroundTexture ) { super( isHeavyWeight, width, height, "", null, null, TextAlignment.CENTER_CENTER ); setBackgroundTexture( backgroundTexture ); } /** * Creates a new FPSCounter with the given width and height. * * @param isHeavyWeight * @param width the new width of this Widget * @param height the new height of this Widget * @param backgroundTexture */ public FPSCounter( boolean isHeavyWeight, float width, float height, String backgroundTexture ) { this( isHeavyWeight, width, height, HUDTextureUtils.getTexture( backgroundTexture, true ) ); } /** * Creates a new FPSCounter with the given width and height. * * @param isHeavyWeight * @param width the new width of this Widget * @param height the new height of this Widget */ public FPSCounter( boolean isHeavyWeight, float width, float height ) { this( isHeavyWeight, width, height, (Texture2D)null ); } /** * Creates a new FPSCounter with the given width and height. * * @param width the new width of this Widget * @param height the new height of this Widget * @param backgroundTexture */ public FPSCounter( float width, float height, Texture2D backgroundTexture ) { this( false, width, height ); } /** * Creates a new FPSCounter with the given width and height. * * @param width the new width of this Widget * @param height the new height of this Widget * @param backgroundTexture */ public FPSCounter( float width, float height, String backgroundTexture ) { this( false, width, height, backgroundTexture ); } /** * Creates a new FPSCounter with the given width and height. * * @param width the new width of this Widget * @param height the new height of this Widget */ public FPSCounter( float width, float height ) { this( false, width, height ); } }
import Data.List (|>) a b = b a main = do s1 <- getLine s2 <- getLine let n = s1 |> (read :: String -> Int) d = s2 |> group |> map length |> length r = min (d + 2) (length s2) print r
/** * Store the locale to the chosen storage, like f. e. the session * * @param invocation the action invocation * @param locale the locale to store * @param storage the place to store this locale (like Storage.SESSSION.toString()) * * @return the locale */ protected Locale storeLocale(ActionInvocation invocation, Locale locale, String storage) { Map<String, Object> session = invocation.getInvocationContext().getSession(); if (session != null) { synchronized (session) { if (locale == null) { storage = Storage.NONE.toString(); locale = readStoredLocale(invocation, session); } if (Storage.SESSION.toString().equals(storage)) { session.put(attributeName, locale); } } } return locale; }
def process(buf: Buffer): eth_frame = ethernet.Ethernet.read_from_buffer(buf) if eth_frame.next_header == ethernet.ETHER_TYPE_ARP: arp_frame = arp.ARP.read_from_buffer(buf) if arp_frame.operation == arp.OPERATION_REPLY: push_data( str(ip_address(arp_frame.target_protocol_address)), ethernet.mac_to_str(arp_frame.target_hardware_address) ) push_data( str(ip_address(arp_frame.sender_protocol_address)), ethernet.mac_to_str(arp_frame.sender_hardware_address) )
def _GetStdioLogTests(stdio_log_url, tests_failed_list): print('\nFetching builder stdio log file from: %s' % stdio_log_url) stdio_text_file = urllib2.urlopen(stdio_log_url) p_test = r'\[\d*/\d*\] (.*?) \(.*\)' p_exit = r'exit code \(as seen by runtest.py\)\: (\d+)' test_lines = [] exit_flag = False exit_code = None for line in stdio_text_file: if not exit_flag: m = re.match(p_test, line) if m: if line not in test_lines: test_lines.append(line) m = re.match(p_exit, line) if m: exit_flag = True exit_code = m.group(1) stdio_text_file.close() print(' Total run tests extracted: %s' % len(test_lines)) if test_lines: print(' Last run test line: "%s"' % test_lines[-1].strip()) stdio_tests_dict = {} for i, line in enumerate(test_lines): m = re.match(p_test, line) if m: long_test_name = m.group(1) if long_test_name in tests_failed_list: test_result = _FAIL else: test_result = _PASS stdio_tests_dict[long_test_name] = test_result else: print('Error: Invalid test line %s) %s' % (i, line.strip())) print(' Test Exit Code: %s' % exit_code) return stdio_tests_dict
// HTTPRequest sends a http request with headers func HTTPRequest(method, uri string, body []byte, ctxRequest *http.Request, headers map[string]string) ([]byte, error) { if ctxRequest != nil { beforeRequest := time.Now().UnixNano() defer logRequest(ctxRequest, method, uri, beforeRequest) } req, err := http.NewRequest(method, uri, bytes.NewBuffer(body)) if err != nil { return nil, err } for k, v := range headers { req.Header.Set(k, v) } if ctxRequest != nil { for _, cookie := range ctxRequest.Cookies() { req.AddCookie(cookie) } traceRequest(ctxRequest, req) } resp, err := httpClient.Do(req) if err != nil { return nil, fmt.Errorf("HTTP %s request failed - %s", method, err.Error()) } if resp.StatusCode >= 400 || resp.StatusCode < 200 { return nil, fmt.Errorf("%s %s: %s", method, uri, resp.Status) } respBody, err := ioutil.ReadAll(resp.Body) _ = resp.Body.Close() return respBody, err }
// Filter implements yaml.Filter func (a *Add) Filter(object *yaml.RNode) (*yaml.RNode, error) { if a.FieldName == "" && a.FieldValue == "" { return nil, errors.Errorf("must specify either fieldName or fieldValue") } if a.Ref == "" { return nil, errors.Errorf("must specify ref") } return object, accept(a, object, a.SettersSchema) }
/* * Access functions for PCI config space using RTAS calls. */ int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose = bus->sysdata; unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->index << 24); int ret = -1; int rval; rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len); *val = ret; return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; }
def main(fileGlob): for fileName in glob.glob(fileGlob) : extract_references(fileName)
def create(exp): import FreeCAD, Part if type(exp) is Box: vec = FreeCAD.Vector(exp.position.x, exp.position.y, exp.position.z) if (exp.rotation.x, exp.rotation.y, exp.rotation.z) == (0,0,0): rotax = FreeCAD.Vector(0,0,1) else: rotax = FreeCAD.Vector(exp.rotation.x, exp.rotation.y, exp.rotation.z) p = Part.makeBox(exp.length, exp.width, exp.height) p.translate(vec) p.rotate(vec,rotax,exp.rotation.a) return p elif type(exp) is Cyl: vec = FreeCAD.Vector(exp.position.x, exp.position.y, exp.position.z) if (exp.rotation.x, exp.rotation.y, exp.rotation.z) == (0,0,0): rotax = FreeCAD.Vector(0,0,1) else: rotax = FreeCAD.Vector(exp.rotation.x, exp.rotation.y, exp.rotation.z) p = Part.makeCylinder(exp.radius, exp.height) p.translate(vec) p.rotate(vec,rotax,exp.rotation.a) return p elif type(exp) is Cmpd: if exp.operator is '-': return create(exp.part1).cut(create(exp.part2)) elif exp.operator is '+': return create(exp.part1).fuse(create(exp.part2)) else: print "Illegal argument to create: ", exp
/* * slot_deform_datarow * Extract data from the DataRow message into Datum/isnull arrays. * We always extract all atributes, as specified in tts_tupleDescriptor, * because there is no easy way to find random attribute in the DataRow. */ static void slot_deform_datarow(TupleTableSlot *slot) { int attnum; int i; int col_count; char *cur = slot->tts_dataRow; StringInfo buffer; uint16 n16; uint32 n32; MemoryContext oldcontext; if (slot->tts_tupleDescriptor == NULL || slot->tts_dataRow == NULL) return; attnum = slot->tts_tupleDescriptor->natts; if (slot->tts_nvalid == attnum) return; Assert(slot->tts_dataRow); memcpy(&n16, cur, 2); cur += 2; col_count = ntohs(n16); if (col_count != attnum) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("Tuple does not match the descriptor"))); oldcontext = MemoryContextSwitchTo(slot->tts_mcxt); if (slot->tts_attinmeta == NULL) slot->tts_attinmeta = TupleDescGetAttInMetadata(slot->tts_tupleDescriptor); buffer = makeStringInfo(); for (i = 0; i < attnum; i++) { int len; memcpy(&n32, cur, 4); cur += 4; len = ntohl(n32); if (len == -1) { slot->tts_values[i] = (Datum) 0; slot->tts_isnull[i] = true; } else { appendBinaryStringInfo(buffer, cur, len); cur += len; slot->tts_values[i] = InputFunctionCall(slot->tts_attinmeta->attinfuncs + i, buffer->data, slot->tts_attinmeta->attioparams[i], slot->tts_attinmeta->atttypmods[i]); slot->tts_isnull[i] = false; resetStringInfo(buffer); } } pfree(buffer->data); pfree(buffer); slot->tts_nvalid = attnum; MemoryContextSwitchTo(oldcontext); }
def _read_until(self, content: str): reply = self._tn.read_until(content.encode('ascii')) reply = Connection.ASNI_REGEX.sub('', reply.decode('ascii')) return reply
<filename>src/main/java/net/aidantaylor/bukkit/sleepytime/SleepyTime.java package net.aidantaylor.bukkit.sleepytime; import java.util.ArrayList; import java.util.List; import org.bukkit.Bukkit; import org.bukkit.ChatColor; import org.bukkit.World; import org.bukkit.configuration.file.FileConfiguration; import org.bukkit.entity.Player; import org.bukkit.event.EventHandler; import org.bukkit.event.Listener; import org.bukkit.event.player.PlayerBedEnterEvent; import org.bukkit.event.player.PlayerBedLeaveEvent; import org.bukkit.event.player.PlayerJoinEvent; import org.bukkit.event.player.PlayerQuitEvent; import org.bukkit.plugin.java.JavaPlugin; public final class SleepyTime extends JavaPlugin implements Listener { private boolean debug = true; private float playerAmount; private ArrayList<Player> sleeping = new ArrayList<Player>(); private List<String> worlds; @Override public void onEnable() { getServer().getPluginManager().registerEvents(this, this); saveDefaultConfig(); load(); log(getName() + " has been enabled!", true); } @Override public void onDisable() { log(getName() + " has been disabled!", true); } public void load() { getConfig().options().copyDefaults(true); FileConfiguration configFile = getConfig(); playerAmount = configFile.getInt("playerAmount"); worlds = configFile.getStringList("worlds"); debug = configFile.getBoolean("debug"); playerAmount = playerAmount / 100; log("Player decimal " + playerAmount); } @EventHandler public void PlayerBedEnter(PlayerBedEnterEvent event){ Player player = event.getPlayer(); if (!player.hasPermission("sleepytime.allow")) { return; } if (sleeping.indexOf(player) < 0) { sleeping.add(player); } log(player + " is sleeping"); checkSleep(player); } @EventHandler public void PlayerBedLeave(PlayerBedLeaveEvent event){ Player player = event.getPlayer(); World world = player.getWorld(); if (sleeping.indexOf(player) > 0) { sleeping.remove(player); } log(player + " stopped sleeping"); } @EventHandler public void onPlayerJoin(PlayerJoinEvent event) { checkSleep(event.getPlayer(), true); } @EventHandler public void onPlayerQuit(PlayerQuitEvent event) { checkSleep(event.getPlayer(), true); } public void checkSleep(Player player) { checkSleep(player, false); } public void checkSleep(Player player, boolean noOutput) { World world = player.getWorld(); for (String world2 : worlds){ if (world.getName().indexOf(world2) >= 0 && world.getTime() >= 13000) { float onlinePlayers = Bukkit.getOnlinePlayers().size(); float delta = Math.round(onlinePlayers * playerAmount); log("Sleep delta " + delta + " sleep decimal " + playerAmount); log("players sleeping " + sleeping.size() + " online players " + onlinePlayers + ""); if (!noOutput) { sendMessage(ChatColor.YELLOW + "" + sleeping.size() + "/" + ((int) delta) + " are currently sleeping..."); } if (sleeping.size() >= delta) { log("setting time to day"); if(world.hasStorm()) { world.setStorm(false); } if(world.isThundering()) { world.setThundering(false); } world.setTime(23450); sleeping.clear(); } break; } } } public void sendMessage(String ...strings) { String output = ""; for(String s : strings) { output += s + " "; } for(Player player : Bukkit.getOnlinePlayers()) { for (String world2 : worlds){ if (player.getWorld().getName().indexOf(world2) >= 0) { player.sendMessage(output); } } } } public void log(String string) { log(string, false); } public void log(String string, boolean bypassdebug) { if (bypassdebug == true || debug == true) { getLogger().info(string); } } }
// withPortMappings sets the container port mappings to the host func withPortMappings(portMappings []PortMapping) createOpt { return func(r *createOpts) *createOpts { r.PortMappings = portMappings return r } }
def contains_all(self,points): return self.contains(points).all()
// Tests that the log file sink cannot write to a log file // if the log file name is not specified. TEST_METHOD(LogFileCannotWriteWithoutFileName) { CTestLoggerService loggerService; loggerService.SetFileFlags(LoggingFlags_Errors); AssertSucceeded(loggerService.Initialize()); loggerService.LogError(L"Error"); AssertNoLogFile(loggerService); AssertSucceeded(loggerService.Shutdown()); }
As the right-wing leadership of the organized U.S. Jewish community defends Israel against international condemnation for its deadly seizure of a flotilla bearing humanitarian supplies for Gaza, a familiar clutch of neoconservative hawks is going on the offensive against what they see as the flotilla’s chief defender, Turkey. Outraged by Prime Minister Recep Tayyip’s Erdogan’s repeated denunciations of the May 31 Israeli raid, as well as his co-sponsorship with Brazil of an agreement with Iran designed to promote renewed negotiations with the West on Tehran’s nuclear program, some neoconservatives are even demanding that the U.S. try to expel Ankara from NATO as one among of several suggested actions aimed at punishing Erdogan’s AKP (Justice and Development Party) government. "Turkey, as a member of NATO, is privy to intelligence information having to do with terrorism and with Iran," noted the latest report by the Jewish Institute for National Security Affairs (JINSA), a hard-line neoconservative group that promotes U.S.-Israeli military ties and has historically cultivated close ties to Turkey’s military, as well. "If Turkey finds its best friends to be Iran, Hamas, Syria and Brazil (look for Venezuela in the future) the security of that information (and Western technology in weapons in Turkey’s arsenal) is suspect. The United States should seriously consider suspending military cooperation with Turkey as a prelude to removing it from the organization," suggested the group. Its board of advisers includes many prominent champions of the 2003 Iraq invasion, including former Defense Policy Board chairman Richard Perle, former Central Intelligence Agency (CIA) director James Woolsey, and former U.N. Amb. John Bolton. Neoconservative publications, notably the Wall Street Journal, the Weekly Standard and the National Review, have also been firing away at the AKP government since the raid. "Turkey now represents a major element in the global panorama of radical Islam," declared the Standard‘s Stephen Schwartz, while Daniel Pipes, the controversial director of the Likudist Middle East Forum (MEF), echoed JINSA’s call for ousting Ankara from NATO and urged Washington to provide direct support for Turkey’s opposition parties in an article published by the National Review Online. The Journal has been running editorials and op-eds attacking Turkey on virtually a daily basis since the raid, accusing its government, among other things, of having "an ingrained hostility toward the Jewish state, remarkable sympathies for nearby radical regimes, and an attitude toward extremist groups like the IHH (the Islamist group that sponsored the flotilla’s flagship, the Mavi Marmara) that borders on complicity." On Monday, it ran an op-ed by long-time hawk Victor Davis Hanson that labeled the IHH "a terrorist organization with ties to al-Qaeda", while an earlier op-ed, by Robert Pollock, its editorial features editor, called Erdogan and his foreign minister, Ahmet Davutoglu, "demagogues appealing to the worst elements in their own country and the broader Middle East." Meanwhile, in an op-ed published by The Forward, a Jewish weekly, Michael Rubin, a Perle protégé at the American Enterprise Institute (AEI), accused Turkey of having "become a conduit for the smuggling of weapons to Israel’s enemies", notably Lebanon’s Hezbollah. The onslaught is ironic both because of the neo- conservatives’ long cultivation of Turkey and their avowed support for promoting democratic governance — of which they have singled out Turkey for special praise — in the Muslim world. Neoconservatives were among the most important promoters of the military alliance between Israel and Turkey that began to take shape in the late 1980s and was consolidated by the mid-1990s. In fact, Perle and another of his protégés, former undersecretary of defense for policy, Douglas Feith, worked as paid lobbyists for Turkey during that period, in major part to persuade the powerful "Israel Lobby" on Capitol Hill to promote Ankara’s interests on Capitol Hill. In 1996, the two men participated in a task force chaired by Perle that proposed to incoming Israeli Prime Minister Binyamin Netanyahu that he work with Turkey and Jordan to remove Iraqi President Saddam Hussein from power as part of an alliance designed to transform the strategic balance in the Middle East permanently in favor of Israel. But the Turkey promoted by Perle and his fellow-neocons in the 1980s and ’90s was one that was dominated by a secular business and political elite carefully monitored by an all- powerful military institution that mounted three coup d’etats between 1960 and 1980 and intervened a fourth time in 1997 to oust an Islamist-led government. Despite its close links to both the U.S. and Israel, however, the Turkish military badly disappointed the neo- cons in the run-up to Washington’s invasion of Iraq in March 2003. Instead of insisting that the civilian government at the time grant U.S. requests to use Turkish territory as a major launching pad into northern Iraq, the armed forces decided to defer to overwhelming parliamentary and public opposition to the invasion. "I think for whatever reason they did not play the strong leadership role on that issue that we would have expected," complained then-Deputy Secretary of Defense Paul Wolfowitz, a long-time Perle friend and colleague who, despite his lavish praise of Turkey as a model Muslim democracy, headed repeated efforts by the George W. Bush administration to persuade Turkey’s national security council — where the military’s voice was dominant — to effectively overrule its parliament. Erdogan, who became prime minister just a week before the invasion and whose political and economic reforms have been widely praised in the West, at first sought good relations with Israel. As late as 2007, he arranged for Shimon Peres to become the first Israeli president to address the Turkish parliament. By then, however, many neocons had become concerned about Erdogan’s efforts to weaken the military’s power, his warm reception of a top Hamas leader in 2005, criticism of Israel’s military campaign against Hezbollah in 2006, and rapprochement with Syria. When the military not so subtly threatened to intervene against Erdogan and the AKP in 2007, some neocons, notably Perle, suggested that the U.S. should not try to discourage it. Others, including the Standard‘s Schwartz and Pipes, encouraged it as the lesser of two evils, even as the Journal defended the AKP as "more democratic than the secularists." Since Erdogan’s furious denunciation of Israel, and Peres personally, at the Davos World Economic Forum (WEF) of Israel’s Cast Lead operation in Gaza in Jan 2009, however, neocons of virtually all stripes — including those, like the Journal‘s editorial writers, who have praised the AKP as a democratizing force — have turned against Ankara. And the flotilla incident, combined with Erdogan’s perceived defense of Iran’s nuclear program, has raised their animus to new heights. "A combination of Islamist rule, resentment at exclusion from Europe, and a neo-Ottomanist ideology that envisions Turkey as a great power in the Middle East have made Turkey a state that is often plainly hostile not only to Israel but to American aims and interests," wrote Eliot Cohen, professor at Johns Hopkins University, in a Journal op-ed Monday. (Inter Press Service) Read more by Jim Lobe
/* tslint:disable */ /** * This file was automatically generated by json-schema-to-typescript. * DO NOT MODIFY IT BY HAND. Instead, modify the source JSONSchema file, * and run json-schema-to-typescript to regenerate this file. */ export type AddressToValidate = PartialAddress; export type PostalCode = string; export type CountryCode = string; export type AddressResidentialIndicator = "unknown" | "yes" | "no"; export type ValidateAddressRequestBody = AddressToValidate[]; export interface PartialAddress { name?: string; phone?: string; company_name?: string; address_line1?: string; address_line2?: string; address_line3?: string; city_locality?: string; state_province?: string; postal_code?: PostalCode; country_code?: CountryCode; address_residential_indicator?: AddressResidentialIndicator & string; }
package com.example.SpringArticle.Service; import com.example.SpringArticle.Entity.Article; import com.example.SpringArticle.dto.ArticleForm; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import javax.transaction.Transactional; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import static org.junit.jupiter.api.Assertions.*; @SpringBootTest class ArticleServiceTest { @Autowired ArticleService articleService; @Test void articles() { Article a = new Article(1L, "AAAA", "1111"); Article b = new Article(2L, "BBBB", "3333"); Article c = new Article(3L, "CCCC", "5555"); List<Article> expected = new ArrayList<Article>(Arrays.asList(a,b,c)); List<Article> articles = articleService.articles(); assertEquals(expected.toString(), articles.toString()); } @Test void detail_Success() { Long id = 1L; Article expected = new Article(1L, "AAAA", "1111"); Article actual = articleService.detail(id); assertEquals(expected.toString(), actual.toString()); } @Test void detail_Failed_noID() { Long id = -1L; Article expected = null; Article actual = articleService.detail(id); assertEquals(expected, actual); } @Test @Transactional void create_Success_TitleAndContent() { String title = "GGGG"; String content = "8888"; ArticleForm articleForm = new ArticleForm(null, title, content); Article expected = new Article(4L, title, content); Article actual = articleService.create(articleForm); assertEquals(expected.toString(), actual.toString()); } @Test @Transactional void create_Failed_ID() { String title = "HHHH"; String content = "9999"; ArticleForm articleForm = new ArticleForm(4L, title, content); Article expected = null; Article actual = articleService.create(articleForm); assertEquals(expected, actual); } @Test @Transactional void update_Success() { Long id = 1L; String title = "RRRR"; String content = "1111"; ArticleForm articleForm = new ArticleForm(1L, title, content); Article updated = articleService.update(id, articleForm); Article expected = articleService.detail(1L); Article actual = articleService.update(1L, articleForm); assertEquals(expected.toString(), actual.toString()); } @Test @Transactional void update_Failed_noID() { Long id = -1L; String title = "RRRR"; String content = "1111"; ArticleForm articleForm = new ArticleForm(id, title, content); Article expected = articleService.detail(id); Article actual = articleService.update(id, articleForm); assertEquals(expected, actual); } @Test @Transactional void delete_Success() { Long id = 1L; Article deleted = articleService.Delete(id); Article expected = articleService.detail(id); Article actual = articleService.Delete(id); assertEquals(expected, actual); } @Test @Transactional void delete_Failed_noID() { Long id = -1L; Article deleted = articleService.Delete(id); Article expected = articleService.detail(id); Article actual = articleService.detail(id); assertEquals(expected, actual); } }
We know that “money” plays significant role in our lives. Though money is not the whole thing, it can help us to achieve our goals. So it’s vital to increase financial intelligence. Realize what factual wealth is: True wealth is not just having luxurious house and cars. The real true wealth is FAMILY, FITNESS and WEALTH. I heard that some people are really very rich but they don’t have family to enjoy it and some people lost their family because of Continuous disputes with his family. Always tired of overworking. Doing job which he hates. In this case, he is rich only in financial but still lack of true wealth. Tempt money instead of chasing it: The best way to make money is by attracting it, not by chasing it. The author defines the situation: Money is like naughty cat. If you chase it around, it flees you. Though, if you ignore it and concentrate on what tempts the cat, it comes to you. Money chasers may ask question that how do we know if we chase money? How can I make inert money? How can I make money starting a business? What you have to do instead is concentrate on resolving people’s problem. Chase necessities, not money. Of course, business that resolves problem entices money. Remember the truth; money is nothing but replication of the value you offer to others. so, the more you resolve people problem, the more money you will make. Construct a business system: How to build the wealth? The best way to build wealth is not by dropping your expenses though getting the same amount of salary. Build a wealth by exploding your income while controlling your expenses. To do so, you have to build a business system. Here “system” plays an important role. Just having business is not enough. It’s significant to have “system” in place, means your business must be capable to run without your continuous existence. If you are not able to do that, then it is just a job not business. Get freedom by building system because it helps you to stop trading your whole time for money. Instead you spend your time on other activities, and money will still come. How to build system? “Automation” is the answer. Taking online presence allows keeping your store open 24/7 without your effort. (1) Find the parts of your business that still require your time. (2) Identify methods to automate them. Be responsible and accountable: It is must to have both responsible and accountable attitude. What is mean by being responsible? Being responsible means if anything bad happens to you, you don’t blame on external factor like; customers, your family, your colleagues, friends or the economy. You blame on yourself and your selections. But that is not ample. You have to be accountable. Being accountable is nothing but taking precautions to confirm that the bad thing won’t occur again. Implementation – no ideas – is king: It’s common that many people think that good idea is important to build strong business. But that is not true. Good execution is significant to build good business. Best example is Google. You all know that there were many search engines before Google, but Google was the best search engine that is because of Google’s best execution. Attention (focus): Being focus is more important to build successful business. Why? Because that’s how you build momentum, so don’t prefer to do multiple tasks at once. Author said, “A scattered focus leads to scattered results”. It is too good to focus on one task and do it in best conceivable way. It takes time and commitment to apply these lessons, but they are achievable. Espouse the mindset, take steady action, and you will get results.
<filename>test.cc<gh_stars>0 #include <gtest/gtest.h> #include <glog/logging.h> #include <gflags/gflags.h> TEST(Math, Adding) { LOG(INFO) << "Hello World!"; EXPECT_EQ(2 + 2, 4); }
/** * Created by Service_User on 2018/7/27. */ public class PlantInfoDialog { static List<String> plantList = new ArrayList<String>(); private static ListView lv_planList; private static EditText et_plantName; private static Button btn_plantSearch; public static void showDialog(Context context){ AlertDialog.Builder builder=new AlertDialog.Builder(context); View view= LayoutInflater.from(context).inflate(R.layout.dialog_plantinfo,null); lv_planList = (ListView) view.findViewById(R.id.lv_planList); lv_planList.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { } }); et_plantName = (EditText) view.findViewById(R.id.et_plantName); btn_plantSearch = (Button) view.findViewById(R.id.btn_plantSearch); et_plantName.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence s, int start, int count, int after) { } @Override public void onTextChanged(CharSequence s, int start, int before, int count) { } @Override public void afterTextChanged(Editable s) { if (s.length() == 0) { plantList.clear(); httpPlantInfo(); } } }); AlertDialog alertDialog=builder.create(); alertDialog.setMessage("type"); alertDialog.show(); } public static void httpPlantInfo() { HttpClient.getRequest("https://api.apishop.net/common/plantFamily/queryPlantList" + "?apiKey=laUuwV4e99fe7400a5ea670e5c6cb78b74c84eeccbe3af4&pageSize=608", new Callback() { @Override public void onFailure(Call call, IOException e) { Log.d("addPlant", e.getMessage()); } @Override public void onResponse(Call call, Response response) throws IOException { String result = response.body().string(); Log.d("addPlant", result); jsonPlantInfo(result); } }); } public static void jsonPlantInfo(String text) { try { JSONObject jsonObject = new JSONObject(text); String result = jsonObject.getString("result"); JSONObject jsonObject1 = new JSONObject(result); String plants = jsonObject1.getString("plantList"); JSONArray array = new JSONArray(plants); for (int i = 0; i < array.length(); i++) { JSONObject object = array.getJSONObject(i); String name = object.getString("name"); plantList.add(name); } new Thread(new Runnable() { @Override public void run() { ArrayAdapter<String> arrayAdapter = new ArrayAdapter<String>(AppContext.getInstance(), android.R.layout.simple_list_item_1, plantList); lv_planList.setAdapter(arrayAdapter); } }).start(); } catch (JSONException e) { e.printStackTrace(); } } }
Characterizing Microblogs with Topic Models As microblogging grows in popularity, services like Twitter are coming to support information gathering needs above and beyond their traditional roles as social networks. But most users’ interaction with Twitter is still primarily focused on their social graphs, forcing the often inappropriate conflation of “people I follow” with “stuff I want to read.” We characterize some information needs that the current Twitter interface fails to support, and argue for better representations of content for solving these challenges. We present a scalable implementation of a partially supervised learning model (Labeled LDA) that maps the content of the Twitter feed into dimensions. These dimensions correspond roughly to substance, style, status, and social characteristics of posts. We characterize users and tweets using this model, and present results on two information consumption oriented tasks.
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Faraday FTGMAC100 Gigabit Ethernet * * (C) Copyright 2009-2011 Faraday Technology * Po-Yu Chuang <[email protected]> */ #ifndef __FTGMAC100_H #define __FTGMAC100_H #define FTGMAC100_OFFSET_ISR 0x00 #define FTGMAC100_OFFSET_IER 0x04 #define FTGMAC100_OFFSET_MAC_MADR 0x08 #define FTGMAC100_OFFSET_MAC_LADR 0x0c #define FTGMAC100_OFFSET_MAHT0 0x10 #define FTGMAC100_OFFSET_MAHT1 0x14 #define FTGMAC100_OFFSET_NPTXPD 0x18 #define FTGMAC100_OFFSET_RXPD 0x1c #define FTGMAC100_OFFSET_NPTXR_BADR 0x20 #define FTGMAC100_OFFSET_RXR_BADR 0x24 #define FTGMAC100_OFFSET_HPTXPD 0x28 #define FTGMAC100_OFFSET_HPTXR_BADR 0x2c #define FTGMAC100_OFFSET_ITC 0x30 #define FTGMAC100_OFFSET_APTC 0x34 #define FTGMAC100_OFFSET_DBLAC 0x38 #define FTGMAC100_OFFSET_DMAFIFOS 0x3c #define FTGMAC100_OFFSET_REVR 0x40 #define FTGMAC100_OFFSET_FEAR 0x44 #define FTGMAC100_OFFSET_TPAFCR 0x48 #define FTGMAC100_OFFSET_RBSR 0x4c #define FTGMAC100_OFFSET_MACCR 0x50 #define FTGMAC100_OFFSET_MACSR 0x54 #define FTGMAC100_OFFSET_TM 0x58 #define FTGMAC100_OFFSET_PHYCR 0x60 #define FTGMAC100_OFFSET_PHYDATA 0x64 #define FTGMAC100_OFFSET_FCR 0x68 #define FTGMAC100_OFFSET_BPR 0x6c #define FTGMAC100_OFFSET_WOLCR 0x70 #define FTGMAC100_OFFSET_WOLSR 0x74 #define FTGMAC100_OFFSET_WFCRC 0x78 #define FTGMAC100_OFFSET_WFBM1 0x80 #define FTGMAC100_OFFSET_WFBM2 0x84 #define FTGMAC100_OFFSET_WFBM3 0x88 #define FTGMAC100_OFFSET_WFBM4 0x8c #define FTGMAC100_OFFSET_NPTXR_PTR 0x90 #define FTGMAC100_OFFSET_HPTXR_PTR 0x94 #define FTGMAC100_OFFSET_RXR_PTR 0x98 #define FTGMAC100_OFFSET_TX 0xa0 #define FTGMAC100_OFFSET_TX_MCOL_SCOL 0xa4 #define FTGMAC100_OFFSET_TX_ECOL_FAIL 0xa8 #define FTGMAC100_OFFSET_TX_LCOL_UND 0xac #define FTGMAC100_OFFSET_RX 0xb0 #define FTGMAC100_OFFSET_RX_BC 0xb4 #define FTGMAC100_OFFSET_RX_MC 0xb8 #define FTGMAC100_OFFSET_RX_PF_AEP 0xbc #define FTGMAC100_OFFSET_RX_RUNT 0xc0 #define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4 #define FTGMAC100_OFFSET_RX_COL_LOST 0xc8 /* * Interrupt status register & interrupt enable register */ #define FTGMAC100_INT_RPKT_BUF (1 << 0) #define FTGMAC100_INT_RPKT_FIFO (1 << 1) #define FTGMAC100_INT_NO_RXBUF (1 << 2) #define FTGMAC100_INT_RPKT_LOST (1 << 3) #define FTGMAC100_INT_XPKT_ETH (1 << 4) #define FTGMAC100_INT_XPKT_FIFO (1 << 5) #define FTGMAC100_INT_NO_NPTXBUF (1 << 6) #define FTGMAC100_INT_XPKT_LOST (1 << 7) #define FTGMAC100_INT_AHB_ERR (1 << 8) #define FTGMAC100_INT_PHYSTS_CHG (1 << 9) #define FTGMAC100_INT_NO_HPTXBUF (1 << 10) /* Interrupts we care about in NAPI mode */ #define FTGMAC100_INT_BAD (FTGMAC100_INT_RPKT_LOST | \ FTGMAC100_INT_XPKT_LOST | \ FTGMAC100_INT_AHB_ERR | \ FTGMAC100_INT_NO_RXBUF) /* Normal RX/TX interrupts, enabled when NAPI off */ #define FTGMAC100_INT_RXTX (FTGMAC100_INT_XPKT_ETH | \ FTGMAC100_INT_RPKT_BUF) /* All the interrupts we care about */ #define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \ FTGMAC100_INT_BAD) /* * Interrupt timer control register */ #define FTGMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0) #define FTGMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4) #define FTGMAC100_ITC_RXINT_TIME_SEL (1 << 7) #define FTGMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8) #define FTGMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12) #define FTGMAC100_ITC_TXINT_TIME_SEL (1 << 15) /* * Automatic polling timer control register */ #define FTGMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0) #define FTGMAC100_APTC_RXPOLL_TIME_SEL (1 << 4) #define FTGMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8) #define FTGMAC100_APTC_TXPOLL_TIME_SEL (1 << 12) /* * DMA burst length and arbitration control register */ #define FTGMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 0) #define FTGMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 3) #define FTGMAC100_DBLAC_RX_THR_EN (1 << 6) #define FTGMAC100_DBLAC_RXBURST_SIZE(x) (((x) & 0x3) << 8) #define FTGMAC100_DBLAC_TXBURST_SIZE(x) (((x) & 0x3) << 10) #define FTGMAC100_DBLAC_RXDES_SIZE(x) (((x) & 0xf) << 12) #define FTGMAC100_DBLAC_TXDES_SIZE(x) (((x) & 0xf) << 16) #define FTGMAC100_DBLAC_IFG_CNT(x) (((x) & 0x7) << 20) #define FTGMAC100_DBLAC_IFG_INC (1 << 23) /* * DMA FIFO status register */ #define FTGMAC100_DMAFIFOS_RXDMA1_SM(dmafifos) ((dmafifos) & 0xf) #define FTGMAC100_DMAFIFOS_RXDMA2_SM(dmafifos) (((dmafifos) >> 4) & 0xf) #define FTGMAC100_DMAFIFOS_RXDMA3_SM(dmafifos) (((dmafifos) >> 8) & 0x7) #define FTGMAC100_DMAFIFOS_TXDMA1_SM(dmafifos) (((dmafifos) >> 12) & 0xf) #define FTGMAC100_DMAFIFOS_TXDMA2_SM(dmafifos) (((dmafifos) >> 16) & 0x3) #define FTGMAC100_DMAFIFOS_TXDMA3_SM(dmafifos) (((dmafifos) >> 18) & 0xf) #define FTGMAC100_DMAFIFOS_RXFIFO_EMPTY (1 << 26) #define FTGMAC100_DMAFIFOS_TXFIFO_EMPTY (1 << 27) #define FTGMAC100_DMAFIFOS_RXDMA_GRANT (1 << 28) #define FTGMAC100_DMAFIFOS_TXDMA_GRANT (1 << 29) #define FTGMAC100_DMAFIFOS_RXDMA_REQ (1 << 30) #define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31) /* * Feature Register */ #define FTGMAC100_REVR_NEW_MDIO_INTERFACE BIT(31) /* * Receive buffer size register */ #define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff) /* * MAC control register */ #define FTGMAC100_MACCR_TXDMA_EN (1 << 0) #define FTGMAC100_MACCR_RXDMA_EN (1 << 1) #define FTGMAC100_MACCR_TXMAC_EN (1 << 2) #define FTGMAC100_MACCR_RXMAC_EN (1 << 3) #define FTGMAC100_MACCR_RM_VLAN (1 << 4) #define FTGMAC100_MACCR_HPTXR_EN (1 << 5) #define FTGMAC100_MACCR_LOOP_EN (1 << 6) #define FTGMAC100_MACCR_ENRX_IN_HALFTX (1 << 7) #define FTGMAC100_MACCR_FULLDUP (1 << 8) #define FTGMAC100_MACCR_GIGA_MODE (1 << 9) #define FTGMAC100_MACCR_CRC_APD (1 << 10) #define FTGMAC100_MACCR_PHY_LINK_LEVEL (1 << 11) #define FTGMAC100_MACCR_RX_RUNT (1 << 12) #define FTGMAC100_MACCR_JUMBO_LF (1 << 13) #define FTGMAC100_MACCR_RX_ALL (1 << 14) #define FTGMAC100_MACCR_HT_MULTI_EN (1 << 15) #define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16) #define FTGMAC100_MACCR_RX_BROADPKT (1 << 17) #define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18) #define FTGMAC100_MACCR_FAST_MODE (1 << 19) #define FTGMAC100_MACCR_SW_RST (1 << 31) /* * test mode control register */ #define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28) #define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27) #define FTGMAC100_TM_DEFAULT \ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV) /* * PHY control register */ #define FTGMAC100_PHYCR_MDC_CYCTHR_MASK 0x3f #define FTGMAC100_PHYCR_MDC_CYCTHR(x) ((x) & 0x3f) #define FTGMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16) #define FTGMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21) #define FTGMAC100_PHYCR_MIIRD (1 << 26) #define FTGMAC100_PHYCR_MIIWR (1 << 27) /* * PHY data register */ #define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff) #define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff) /* * Flow control register */ #define FTGMAC100_FCR_FC_EN (1 << 0) #define FTGMAC100_FCR_FCTHR_EN (1 << 2) #define FTGMAC100_FCR_PAUSE_TIME(x) (((x) & 0xffff) << 16) /* * Transmit descriptor, aligned to 16 bytes */ struct ftgmac100_txdes { __le32 txdes0; /* Control & status bits */ __le32 txdes1; /* Irq, checksum and vlan control */ __le32 txdes2; /* Reserved */ __le32 txdes3; /* DMA buffer address */ } __attribute__ ((aligned(16))); #define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff) #define FTGMAC100_TXDES0_CRC_ERR (1 << 19) #define FTGMAC100_TXDES0_LTS (1 << 28) #define FTGMAC100_TXDES0_FTS (1 << 29) #define FTGMAC100_TXDES0_TXDMA_OWN (1 << 31) #define FTGMAC100_TXDES1_VLANTAG_CI(x) ((x) & 0xffff) #define FTGMAC100_TXDES1_INS_VLANTAG (1 << 16) #define FTGMAC100_TXDES1_TCP_CHKSUM (1 << 17) #define FTGMAC100_TXDES1_UDP_CHKSUM (1 << 18) #define FTGMAC100_TXDES1_IP_CHKSUM (1 << 19) #define FTGMAC100_TXDES1_LLC (1 << 22) #define FTGMAC100_TXDES1_TX2FIC (1 << 30) #define FTGMAC100_TXDES1_TXIC (1 << 31) /* * Receive descriptor, aligned to 16 bytes */ struct ftgmac100_rxdes { __le32 rxdes0; /* Control & status bits */ __le32 rxdes1; /* Checksum and vlan status */ __le32 rxdes2; /* length/type on AST2500 */ __le32 rxdes3; /* DMA buffer address */ } __attribute__ ((aligned(16))); #define FTGMAC100_RXDES0_VDBC 0x3fff #define FTGMAC100_RXDES0_MULTICAST (1 << 16) #define FTGMAC100_RXDES0_BROADCAST (1 << 17) #define FTGMAC100_RXDES0_RX_ERR (1 << 18) #define FTGMAC100_RXDES0_CRC_ERR (1 << 19) #define FTGMAC100_RXDES0_FTL (1 << 20) #define FTGMAC100_RXDES0_RUNT (1 << 21) #define FTGMAC100_RXDES0_RX_ODD_NB (1 << 22) #define FTGMAC100_RXDES0_FIFO_FULL (1 << 23) #define FTGMAC100_RXDES0_PAUSE_OPCODE (1 << 24) #define FTGMAC100_RXDES0_PAUSE_FRAME (1 << 25) #define FTGMAC100_RXDES0_LRS (1 << 28) #define FTGMAC100_RXDES0_FRS (1 << 29) #define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31) /* Errors we care about for dropping packets */ #define RXDES0_ANY_ERROR ( \ FTGMAC100_RXDES0_RX_ERR | \ FTGMAC100_RXDES0_CRC_ERR | \ FTGMAC100_RXDES0_FTL | \ FTGMAC100_RXDES0_RUNT | \ FTGMAC100_RXDES0_RX_ODD_NB) #define FTGMAC100_RXDES1_VLANTAG_CI 0xffff #define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20) #define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20) #define FTGMAC100_RXDES1_PROT_IP (0x1 << 20) #define FTGMAC100_RXDES1_PROT_TCPIP (0x2 << 20) #define FTGMAC100_RXDES1_PROT_UDPIP (0x3 << 20) #define FTGMAC100_RXDES1_LLC (1 << 22) #define FTGMAC100_RXDES1_DF (1 << 23) #define FTGMAC100_RXDES1_VLANTAG_AVAIL (1 << 24) #define FTGMAC100_RXDES1_TCP_CHKSUM_ERR (1 << 25) #define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26) #define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27) #endif /* __FTGMAC100_H */
#[cfg(test)] mod tests { use twentyone::game::{Dealer, DealerRequest, Player, PlayerAction}; use twentyone::{cards, game}; #[test] fn deck_tests() { let mut deck = cards::create_deck(); cards::shuffle_deck(&mut deck); let card = deck.get(0).unwrap().clone(); // Draw card (returns first card of deck and removes it from the vector) assert_eq!(cards::draw_card(&mut deck).unwrap(), card); // Ensure that the vector length has been reduced from 52 to 51 assert_eq!(deck.len(), 51); } #[test] fn shoe_tests() { let mut shoe = cards::create_shoe(6); cards::shuffle_deck(&mut shoe); let card = shoe.get(0).unwrap().clone(); // Draw card (returns first card of deck and removes it from the vector) assert_eq!(cards::draw_card(&mut shoe).unwrap(), card); // Ensure that the vector length has been reduced from 312 to 311 assert_eq!(shoe.len(), 311); } #[test] fn hand_tests() { let mut deck = cards::create_deck(); cards::shuffle_deck(&mut deck); let mut hand: Vec<[char; 2]> = Vec::new(); let card = deck.get(0).unwrap().clone(); // Hit card from deck to hand cards::hit_card(&mut deck, &mut hand); assert_eq!(card, hand[0]); } #[test] fn game_tests() { let mut deck = cards::create_deck(); // Test hand value calculation let deck_slice = &deck[..13].iter().cloned().collect(); assert_eq!(game::get_hand_value(&deck_slice, false), 95); cards::shuffle_deck(&mut deck); let mut hand = Vec::new(); cards::hit_card(&mut deck, &mut hand); cards::hit_card(&mut deck, &mut hand); // Test hand splitting checks assert_eq!(game::can_split(&hand), hand[0][1] == hand[1][1]); } #[test] fn player_dealer_tests() { fn callback(request: DealerRequest, player: Option<&Player>, _: &Dealer) -> PlayerAction { match request { DealerRequest::Play(i) => { println!("Dealer requested play"); let value = game::get_hand_value(&player.unwrap().hands()[i], true); if value < 17 { println!("Hand is <17, hitting"); PlayerAction::Hit } else { println!("Hand is >=17, standing"); PlayerAction::Stand } } DealerRequest::Bet => { println!("Dealer requested bet"); PlayerAction::Bet(10) } DealerRequest::UpCard(card) => { println!("Dealer up card: {}{}", card[0], card[1]); PlayerAction::None } DealerRequest::HitCard(card) => { println!("Dealer hit card: {}{}", card[0], card[1]); PlayerAction::None } DealerRequest::DealerHand(hand) => { println!( "Dealer hand value was {}", game::get_hand_value(&hand, true) ); PlayerAction::None } DealerRequest::Error(_) => { println!("Dealer returned an error"); PlayerAction::None } DealerRequest::LowCards => { println!("Dealer low on cards, automatically creating new shoe"); PlayerAction::None } } } let mut shoe = cards::create_shoe(6); cards::shuffle_deck(&mut shoe); let mut dealer = Dealer::new(shoe, game::DEFAULT_CONFIG, &callback); // Mutable reference to players vector let players = dealer.players_mut(); let player = Player::new(1000); players.push(player); // Try playing 5 rounds for _ in 0..5 { println!("--- New Round ---"); dealer.play_round(true); } } }
Branson, Missouri (CNN) Democrats could pick up the Senate majority this November -- thanks to Donald Trump voters in Missouri. The Missouri Senate race has shaped up to be one of the most unexpected opportunities for Democrats to flip a seat this November. But unlike many Democrats looking for an advantage by tying their opponents to Trump, Missouri Secretary of State Jason Kander's campaign could actually benefit from Trump's outsider message. The underdog has staked his campaign on being a Washington outsider -- painting his opponent, incumbent Roy Blunt, as a lobbyist-beholden insider. Kander's surprising success has caught the attention of national groups. Friday morning, Senate Leadership Fund released a new add, first on CNN, attempting to tie Kander to other Democrats. The ad from the Republican group linked to Senate Majority Leader Mitch McConnell and his allies features Kander's face morphing into that of Hillary Clinton, President Barack Obama, Nancy Pelosi and Bernie Sanders. The ad will run in Missouri's major cities on broadcast and cable as part of an $8 million ad buy. "Kander is a very talented politician and all of his ads portray him as someone who's beyond ideology and beyond and party, and in fact it's somewhat reminiscent of the way that Barack Obama first presented himself in 2008," said Senate Leadership Fund President Steven Law. "The challenge here is to overcome that fresh-face image and drive home the fact that this is someone who has a long track record and very strong ideological views." A Monmouth poll released this week has Blunt leading Kander by only 2 points, 46% to 44%, essentially tied within the 4.9 percentage point margin of error. CNN has ranked it a toss-up. Trump led Clinton by 5 points, 46% to 41%, just outside the margin of error, in the same poll. But you won't find joint Clinton-Kander yard signs anywhere driving around blue St. Louis, and Clinton has spent no money on ads in Missouri, focusing on a path to 270 electoral votes that doesn't need Missouri's 10. Trump's campaign has followed suit, also spending no money on advertising and letting a mostly grass-roots campaign maintain his natural lead. Kander, a 35-year-old Afghanistan veteran and former state legislator, may not mind Clinton's absence in the state. He maintains that he has not tailored his "new generation" message to Trump. His announcement video in February 2015, before Trump had announced his candidacy, already included his pitch about Washington needing new blood less beholden to partisan politics. But the Trump effect has played into Kander's hands. While Kander calls Trump "not qualified to be President," he told CNN in an interview that he understands and can speak to the Trump voter. "I understand why in this gridlock that exists in our national conversation, people would in their search for shaking up that conversation be interested in considering someone who is in my opinion not qualified," he said when asked about the Trump-Kander voter. "So people do want change, and they want authenticity." In an hour-long debate between Blunt, Kander and the three other third-party candidates for the seat here in Branson two weeks ago, the presidential race barely registered. Blunt attacked Kander using Clinton only once, to say Clinton supported a compromise Blunt negotiated with Democratic Sen. Patty Murray of Washington on Zika funding. Trump never came up at all, and Blunt specifically avoided saying Trump's name when he said the "next president" will reshape the court. Asked about whether Trump's outsider message is a drag on his campaign in a very brief Q&A with reporters after the debate, Blunt tried to make the case that their campaigns were complementary before ending the availability. "I think it's really in line with the things I've been talking about," Blunt said, citing burdensome regulations on Missouri. "If you're going to do something about Obamacare ... I think you're going to need something more than the third term of Barack Obama." He only mentioned Trump once, to say, "I think Donald Trump is a guy who would return more responsibility back to the Congress," citing his own push for legislation that would require congressional votes on certain regulations. After a tape of explosive sexually aggressive and lewd comments from Trump was leaked last week, Blunt condemned the comments without revoking support of the Republican. "Donald Trump's statements were disrespectful and inappropriate, and he was right to apologize," Blunt tweeted. National interest Blunt is getting plenty of help from outside Missouri, as is Kander. Between outside groups, the campaigns and party committees, more than $16 million is scheduled to be spent on the race already, roughly $5 million for Kander and $11 million for Blunt, according to ad tracking firm CMAG/Kantar Media. That doesn't include spending from groups like the Koch brothers linked Americans for Prosperity, who can run issue-based advertising attacking or benefiting candidates. Blunt called Susan B. Anthony List President Marjorie Dannenfelser last week to raise more money, and the PAC for the conservative anti-abortion group is planning to spend $500,000 in the state on in-person, digital and mail outreach. Both candidates have been the beneficiary of the races in Florida and Ohio slipping away from Democrats. With those battlegrounds less winnable, Senate hopes have caused an influx of money into the Show Me State. Attack ads have saturated Missouri airwaves. Blunt has been attacked as a Washington insider, including a Kander ad that notes Blunt's wife and adult children are all lobbyists and accuses him of conflicts of interest. Kander, meanwhile, has been attacked for his F rating by the National Rifle Association and ads that try to paint him as an extreme liberal politician. Kander has fought back with one of the most memorable ads of the election cycle, in which he assembles an AR-15 semi-automatic rifle blindfolded while talking about his military record and support for background checks, saying "I would like to see Roy Blunt do this." The lack of a strong focus on the state by the Clinton campaign is a benefit, not a drag, to Dems' hopes of the Statehouse and Senate seat, "It's in Kander's interest to have as much separation as possible, particularly outside of St. Louis and Kansas City," said University of Missouri-St. Louis politics professor Terrence Jones. "Kander is trying to run an outsider campaign against Blunt, and Hillary is another insider as Blunt is, so he'd just as soon be seen as separate from her." The Kander, Clinton and gubernatorial campaigns have mostly run independently of each other while strategizing on how to boost all of them, according to state Democratic volunteers. Kander's campaign has been focused on voter registration drives. The campaign of Democratic gubernatorial hopeful Chris Koster, a former Republican state lawmaker, has been working on canvassing and get-out-the-vote efforts. And the Clinton campaign has been recruiting volunteers, phone banking to encourage identified Democrats to come join the efforts. Former Missouri Democratic Chairman Mike Kelley was bullish that Kander could pull off an underdog win with the strategy. "This should be a layup for Roy Blunt in Missouri," Kelley said. "He's not had some major scandal that has harmed his campaign or his ability to get re-elected, and the fact that it's so close, I do believe that this Senate seat is in play. And with the resources coming in from out of state, with the solid gubernatorial candidate that we have, with the excitement, with the nominees by Republicans, this could be the perfect storm for Jason Kander to pull this off." A split-ticket electorate Key to Kander's chances will be whether he can find split-ticket voters. Clinton's supporters are likely to vote for him, but a winning coalition will require peeling off some Republican support from Blunt. While Missouri has been reliable for Republican presidential candidates in recent cycles, the governor's seat has been held by a Democrat and the other incumbent senator, Claire McCaskill, is also a Democrat. Conversations with voters in the state also reveal a possibility of split tickets this fall. The Trump office in St. Louis is run by volunteers unaffiliated with the party or the campaign, who raised money because they wanted a Trump-specific office. When the state GOP told them they could share space in existing offices, they turned elsewhere. "They suggested we go to various existing offices for certain statewide or federal candidates. And we knew that the demographics didn't necessarily support that," said director Annette Read. "(The volunteers) weren't revolting, they just wanted a strictly Trump office." Read said she wouldn't speak about Blunt's race, but noted one of the things she likes about Trump is his independence. "I am not a fan of party politics, I'd rather we have no affiliation whatsoever," Read said. "I have always mainly supported Republican candidates because that's probably closest to my ideology, but I don't like the direction the Republican Party has gone. ... I see a lot of things behind the scenes, and (Blunt's) probably having a tougher year than most, but I'm not here to speak about those candidates." Another volunteer in the office, Brinda Johnson, said she and her family are life-long Democrats, and she even went to the convention for Clinton in 2008. But she said after she read a book that attacks the Clinton Foundation and family, she switched her allegiance -- but only at the top. "I know I won't vote a straight Republican ticket," she said. Near that office in Fenton, Missouri, at the local Fenton Days Festival, wellness company marketer Jenny Lutterman said she is committed to voting for Trump and sees a need for a Republican president. But she also had doubts about Blunt -- reminiscent of Kander's attack lines. "I wish there was more of that, that people didn't vote straight tickets, because that's what's happened, we've gotten so divided," Lutterman said. "I'm not too fond of Roy Blunt. I just feel like he's really not for the people as much as he could be."
/* * Find out when the next timer event is due to happen. This * is used on S/390 to stop all activity when a cpus is idle. * This functions needs to be called disabled. */ static unsigned long __next_timer_interrupt(tvec_base_t *base) { unsigned long timer_jiffies = base->timer_jiffies; unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; int index, slot, array, found = 0; struct timer_list *nte; tvec_t *varray[4]; index = slot = timer_jiffies & TVR_MASK; do { list_for_each_entry(nte, base->tv1.vec + slot, entry) { if (tbase_get_deferrable(nte->base)) continue; found = 1; expires = nte->expires; if (!index || slot < index) goto cascade; return expires; } slot = (slot + 1) & TVR_MASK; } while (slot != index); cascade: if (index) timer_jiffies += TVR_SIZE - index; timer_jiffies >>= TVR_BITS; varray[0] = &base->tv2; varray[1] = &base->tv3; varray[2] = &base->tv4; varray[3] = &base->tv5; for (array = 0; array < 4; array++) { tvec_t *varp = varray[array]; index = slot = timer_jiffies & TVN_MASK; do { list_for_each_entry(nte, varp->vec + slot, entry) { found = 1; if (time_before(nte->expires, expires)) expires = nte->expires; } if (found) { if (!index || slot < index) break; return expires; } slot = (slot + 1) & TVN_MASK; } while (slot != index); if (index) timer_jiffies += TVN_SIZE - index; timer_jiffies >>= TVN_BITS; } return expires; }
/** * Abstraction of a figure drawable on a BitMap. * * @absFun see specific concrete class. * @repInv r and c are non negative. */ public abstract class Figure { /** row coordinate */ protected final int r; /** column coordinate */ protected final int c; /** * Constructs a new Figure of coordinates r and c. * * @param r * row coordinate. * @param c * column coordinate. * * @throws IllegalArgumentException * if r or c is negative. */ protected Figure(int r, int c) { if (r < 0 || c < 0) throw new IllegalArgumentException(); this.r = r; this.c = c; } /** * Implementation of the representation invariant. Returns true if the * representation respects all its requirements. Used in assertions. * * @return true if the representation of this is ok; false otherwise. */ public boolean repOk() { return r >= 0 && c >= 0; } /** * Draw this Figure on the given {@link BitMap}. * * @param b * BitMap to draw this Figure on. */ abstract void drawOn(BitMap b); }
/****************************************************************************** ** ** GMCache_ClearSampleCache ** ** Forces the Mixer to remove all entries from the sample cache ** NOTE: Do not abuse this function... It can obscure instrument resource ** leaks that may be important to track otherwise ** ** 2000.03.29 AER Function (reluctantly) created ** 2000.05.08 AER Function imported from MiniBAE ** ******************************************************************************/ OPErr GMCache_ClearSampleCache(GM_Mixer * pMixer) { register INT16 count; BAE_ASSERT(pMixer); if (pMixer) { for (count = 0; count < MAX_SAMPLES; count++) { if (pMixer->sampleCaches[count]) { PV_FreeCacheEntry(pMixer, pMixer->sampleCaches[count]); } } return NO_ERR; } return PARAM_ERR; }
/** * MzTab DataAccessController for MzTab Files. The mzTabController is supporting only those files with Protein identification and * PSMs and not Peptide Information is store because the link between both objects is missed. MzTab will be supported only based in * the PSMs and Core proteins the ambiguity proteins will not be supported. * * @author Yasset Perez-Riverol * @author Rui Wang */ public class MzTabControllerImpl extends ReferencedIdentificationController{ private static final Logger logger = LoggerFactory.getLogger(MzTabControllerImpl.class); private static Pattern mzTabVersion = Pattern.compile(".*(mzTab-version).*(1.0)"); private static Pattern mzTabProteinSection = Pattern.compile(".*(protein_search_engine_score).*"); private static Pattern mzTabPSMSection = Pattern.compile(".*(psm_search_engine_score).*"); private static Pattern mzTabPeptideSection = Pattern.compile(".*(peptide_search_engine_score).*"); private static Pattern mzTabQuantitationSection = Pattern.compile(".*(quantification_method).*"); /** * Reader to get information from MzTab file */ private MzTabUnmarshallerAdaptor reader; /* * This is a set of controllers related with the MS information in the mzTab file * one or more controllers can be related with the same file formats. The Comparable * name of the file is an id of the file and the controller is the DataAccessController * related with the file. */ public MzTabControllerImpl(File file) { super(file, DataAccessMode.CACHE_AND_SOURCE); try { initialize(); } catch (IOException e) { String msg = "Failed to create MzTab unmarshaller for mzTab file: " + file.getAbsolutePath() + "\n" + e.getMessage(); logger.error(msg); throw new DataAccessException(msg, e); } } /** * Initialize the mzTab file reader * @throws IOException */ protected void initialize() throws IOException { // create MzTab access utils File file = (File) getSource(); reader = new MzTabUnmarshallerAdaptor(file, new FileOutputStream(file.getAbsolutePath() + "errors.out")); // set data source description this.setName(file.getName()); // set the type this.setType(Type.MZTAB); // init ms data accession controller map this.msDataAccessControllers = new HashMap<Comparable, DataAccessController>(); // set the content categories this.setContentCategories(ContentCategory.PROTEIN, ContentCategory.PEPTIDE, ContentCategory.SAMPLE, ContentCategory.SOFTWARE, ContentCategory.PROTEIN_GROUPS, ContentCategory.SPECTRUM, ContentCategory.STUDY_VARIABLE, ContentCategory.QUANTIFICATION); //Todo: first cases only support identification // set cache builder setCachingStrategy(new MzTabCachingStrategy()); // populate cache populateCache(); } /** * Get the mzTab reader * * @return MzTabUnmarshallerAdaptor mzTab file reader */ public MzTabUnmarshallerAdaptor getReader() { return reader; } /** * Get md5 hash unique id * * @return String unique id */ @Override public String getUid() { String uid = super.getUid(); if (uid == null) { // create a new UUID File file = (File) this.getSource(); try { uid = MD5Utils.generateHash(file.getAbsolutePath()); } catch (NoSuchAlgorithmException e) { String msg = "Failed to generate unique id for mzML file"; logger.error(msg, e); } } return uid; } /** * Get a list of source files. * * @return List<SourceFile> a list of source file objects. * @throws uk.ac.ebi.pride.utilities.data.controller.DataAccessException * */ public List<SourceFile> getSourceFiles() { List<SourceFile> sourceFiles; try { sourceFiles = MzTabTransformer.transformSourceFiles(reader.getSourceFiles()); } catch (Exception ex) { String msg = "Error while getting source files"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } return sourceFiles; } /** * Return the List of Organizations * @return Organization List */ public List<Organization> getOrganizationContacts() { logger.debug("Get organizational contact"); List<Organization> organizationList = new ArrayList<Organization>(); try { organizationList.addAll(MzTabTransformer.transformContactToOrganization(reader.getContacts())); } catch (Exception ex) { String msg = "Error while getting organizational contacts"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } return organizationList; } /** * Return a List of Persons * @return List of Persons */ public List<Person> getPersonContacts() { logger.debug("Get person contacts"); List<Person> personList = new ArrayList<Person>(); try { personList.addAll(MzTabTransformer.transformContactToPersons(reader.getContacts())); } catch (Exception ex) { String msg = "Error while getting person contacts"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } return personList; } /** * Get a list of samples * * @return List<Sample> a list of sample objects. */ @Override public List<Sample> getSamples() { ExperimentMetaData metaData = super.getExperimentMetaData(); if (metaData == null) { logger.debug("Get samples"); List<Sample> samples; try { samples = MzTabTransformer.transformSamples(reader.getSamples(), reader.getMetadata(), hasQuantData()); return samples; } catch (Exception ex) { String msg = "Error while getting samples"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } else { return metaData.getSamples(); } } /** * Returns StudyVariables for mzTab files. The study variables contains * the final values of the expression information * * @return java.lang.Map A Map with all the Study Variables in the file */ @Override public Map<Comparable, StudyVariable> getStudyVariables() { ExperimentMetaData metaData = super.getExperimentMetaData(); if (metaData == null ) { logger.debug("Study Variables"); Map<Comparable, StudyVariable> studyVariables; try { studyVariables = MzTabTransformer.transformStudyVariables(reader.getMetadata(), hasQuantData()); return studyVariables; } catch (Exception ex) { String msg = "Error while getting samples"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } else { return metaData.getStudyVariables(); } } /** * Get the List of File Spectra that the Mzidentml use to identified peptides * * @return List of SpectraData Files associated with mzidentml. */ public List<SpectraData> getSpectraDataFiles() { ExperimentMetaData metaData = super.getExperimentMetaData(); if (metaData == null) { return new ArrayList<SpectraData>(MzTabTransformer.transformMsRunMap(reader.getSourceFiles()).values()); } return metaData.getSpectraDatas(); } /** * Get a list of software * * @return List<Software> a list of software objects. */ public List<Software> getSoftwares() { ExperimentMetaData metaData = super.getExperimentMetaData(); if (metaData == null) { logger.debug("Get software"); List<Software> softwares; try { softwares = MzTabTransformer.transformSoftwares(reader.getDataSoftwares()); return softwares; } catch (Exception ex) { String msg = "Error while getting software list"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } else { return metaData.getSoftwares(); } } /** * Get a list of references * * @return List<Reference> a list of reference objects */ public List<Reference> getReferences() { logger.debug("Get references"); List<Reference> refs = new ArrayList<Reference>(); try { refs.addAll(MzTabTransformer.transformReferences(reader.getReferences())); } catch (Exception ex) { String msg = "Error while getting references"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } return refs; } /** * Get custom parameters * * @return ParamGroup a group of cv parameters and user parameters. */ @Override public ParamGroup getAdditional() { ExperimentMetaData metaData = super.getExperimentMetaData(); if (metaData == null) { logger.debug("Get additional params"); try { return MzTabTransformer.transformAdditional(reader.getAdditionalParams()); } catch (Exception ex) { String msg = "Error while getting additional params"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } else { return metaData; } } /** * Get the protocol object * * @return Protocol protocol object. */ public ExperimentProtocol getProtocol() { logger.debug("Get protocol"); try { return MzTabTransformer.transformProtocol(reader.getProtocol()); } catch (Exception ex) { String msg = "Error while getting protocol"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } /** * Get meta data related to this experiment * * @return MetaData meta data object */ @Override public ExperimentMetaData getExperimentMetaData() { ExperimentMetaData metaData = super.getExperimentMetaData(); if (metaData == null) { logger.debug("Get metadata"); try { // Get Accession for MzTab Object String accession = reader.getExpAccession(); // Get the Version of the MzTab File. String version = reader.getVersion(); //Get Source File List List<SourceFile> sources = getSourceFiles(); // Get Samples objects for MzTab Object List<Sample> samples = getSamples(); // Get all the softwares related with the object List<Software> softwares = getSoftwares(); // Get Contact Persons List<Person> persons = getPersonContacts(); // Get the Contact Organization List<Organization> organizations = getOrganizationContacts(); // Get Additional Information Related with the Project ParamGroup additional = getAdditional(); // Get the Experiment Title String title = reader.getExpTitle(); // Get The Experiment Short Label String shortLabel = reader.getExpTitle(); //Get Experiment Protocol //Todo: We need to check which information should be converted to Protocol ExperimentProtocol protocol = getProtocol(); // Get References From the Experiment List<Reference> references = getReferences(); List<SpectraData> spectraDatas = getSpectraDataFiles(); Map<Comparable, StudyVariable> studyVariables = getStudyVariables(); metaData = new ExperimentMetaData(additional, accession, title, version, shortLabel, samples, softwares, persons, sources, null, organizations, references, null, null, protocol,spectraDatas,studyVariables); // store it in the cache getCache().store(CacheEntry.EXPERIMENT_METADATA, metaData); } catch (Exception ex) { String msg = "Error while getting experiment meta data"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } return metaData; } @Override public IdentificationMetaData getIdentificationMetaData() { IdentificationMetaData metaData = super.getIdentificationMetaData(); if (metaData == null) { List<SpectrumIdentificationProtocol> spectrumIdentificationProtocolList = null; Protocol proteinDetectionProtocol = null; List<SearchDataBase> searchDataBaseList = getSearchDataBases(); metaData = new IdentificationMetaData(null, null, spectrumIdentificationProtocolList, proteinDetectionProtocol, searchDataBaseList); } return metaData; } @Override public MzGraphMetaData getMzGraphMetaData() { MzGraphMetaData metaData = super.getMzGraphMetaData(); if (metaData == null) { List<ScanSetting> scanSettings = null; List<DataProcessing> dataProcessings = null; List<InstrumentConfiguration> instrumentConfigurations = getInstrumentConfigurations(); metaData = new MzGraphMetaData(null, null, scanSettings, instrumentConfigurations, dataProcessings); } return metaData; } /** * Get identification using a identification id, gives the option to choose whether to use cache. * This implementation provides a way of by passing the cache. * * @param proteinId identification id * @param useCache true means to use cache * @return Identification identification object */ @Override public Protein getProteinById(Comparable proteinId, boolean useCache) { Protein ident = super.getProteinById(proteinId, useCache); if (ident == null && useCache) { logger.debug("Get new identification from file: {}", proteinId); try { // when protein groups are not present Tuple<Integer, uk.ac.ebi.pride.jmztab.model.Protein> rawProtein = reader.getProteinById(proteinId); Map<String, uk.ac.ebi.pride.jmztab.model.PSM> spectrumIdentificationItems = getScannedSpectrumIdentificationItems(proteinId); Map<String, uk.ac.ebi.pride.jmztab.model.Peptide> peptideItems = getScannedPeptideItems(proteinId); uk.ac.ebi.pride.jmztab.model.Metadata metadata = reader.getMetadata(); ident = MzTabTransformer.transformIdentification(rawProtein.getValue(), rawProtein.getKey(), spectrumIdentificationItems, peptideItems, metadata, hasQuantData()); if (ident != null) { cacheProtein(ident); } } catch (Exception ex) { throw new DataAccessException("Failed to retrieve protein identification: " + proteinId, ex); } } return ident; } private Map<String, uk.ac.ebi.pride.jmztab.model.PSM> getScannedSpectrumIdentificationItems(Comparable proteinId) { List<Comparable> spectrumIdentIds = null; if (getCache().hasCacheEntry(CacheEntry.PROTEIN_TO_PEPTIDE_EVIDENCES)) { spectrumIdentIds = ((Map<Comparable, List<Comparable>>) getCache().get(CacheEntry.PROTEIN_TO_PEPTIDE_EVIDENCES)).get(proteinId); } return reader.getSpectrumIdentificationsByIds(spectrumIdentIds); } private Map<String, uk.ac.ebi.pride.jmztab.model.Peptide> getScannedPeptideItems(Comparable proteinId) { List<Comparable> peptideIds = new ArrayList<Comparable>(); if(hasQuantData()) if (getCache().hasCacheEntry(CacheEntry.PROTEIN_TO_QUANTPEPTIDES)) { peptideIds = ((Map<Comparable, List<Comparable>>) getCache().get(CacheEntry.PROTEIN_TO_QUANTPEPTIDES)).get(proteinId); } return reader.getPeptideByIds(peptideIds); } /** * Get the number of peptides by Rank, in MzTab all peptides are rank 1. * * @return int the number of peptides. */ @Override public int getNumberOfPeptidesByRank(int rank) { int num; try { // this method is overridden to use the reader directly num = reader.getNumberOfPeptides(rank); } catch (Exception ex) { throw new DataAccessException("Failed to retrieve number of peptides", ex); } return num; } @Override public void close() { reader = null; super.close(); } public List<SearchDataBase> getSearchDataBases() { IdentificationMetaData metaData = super.getIdentificationMetaData(); if (metaData == null) { logger.debug("Get instrument configurations"); try { Map<Comparable, SearchDataBase> searchDataBaseMap = (Map<Comparable, SearchDataBase>) getCache().get(CacheEntry.SEARCH_DATABASE); List<SearchDataBase> databases; if(searchDataBaseMap == null){ databases = MzTabTransformer.transformDatabases(reader.getDatabases()); }else{ databases = new ArrayList<SearchDataBase>(searchDataBaseMap.values()); } return databases; } catch (Exception ex) { String msg = "Error while getting instrument configurations"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } else { return metaData.getSearchDataBases(); } } public List<InstrumentConfiguration> getInstrumentConfigurations() { MzGraphMetaData metaData = super.getMzGraphMetaData(); if (metaData == null) { logger.debug("Get instrument configurations"); List<InstrumentConfiguration> configs = new ArrayList<InstrumentConfiguration>(); try { // NOTE: When there is no instrument, 'transformInstrument' returns 'null', and that is not legal for // a List, that's why the originating method has been changed to return an empty list configs.addAll(MzTabTransformer.transformInstrument(reader.getInstrument())); return configs; } catch (Exception ex) { String msg = "Error while getting instrument configurations"; logger.error(msg, ex); throw new DataAccessException(msg, ex); } } else { return metaData.getInstrumentConfigurations(); } } /** * Check a file is mzTab File is supported, it should contain the protein and psm sections, it must be mztab version 1.0 and finally it should be * mzTab extension file. IT is important to know that in PRIDE we will support the information for complete Experiments in PRIDE: Quantitation and Idnetification * In case on Quantitation experiments the Protein and Peptide Section should be provided. In case of Identification experiments Protein and PSM should be provided. * * @param file input file * @return boolean true means the file is an mztab */ public static boolean isValidFormat(File file) { boolean valid = false; BufferedReader reader = null; int quantitationCount = 0; int identificationCount = 0; if(!file.getName().toLowerCase().endsWith(Constants.MZTAB_EXT)) return false; /** * To validate the mzTab if is supported or not we will read the header line by line until the type appear * It should contains Proteins and PSMs to be a supported file. */ try { reader = new BufferedReader(new FileReader(file)); // read the first 200 lines for (int i = 0; i < 200; i++) { String line = reader.readLine(); if (mzTabProteinSection.matcher(line).find() || mzTabVersion.matcher(line).find()) { quantitationCount++; identificationCount++; } if (mzTabPSMSection.matcher(line).find()) identificationCount++; if (mzTabPeptideSection.matcher(line).find()) quantitationCount++; } } catch (Exception e) { logger.error("Failed to read file", e); } finally { if (reader != null) { try { reader.close(); } catch (IOException e) { // do nothing here } } } String filename = file.getName().toLowerCase(); return filename.endsWith(Constants.MZTAB_EXT) && (quantitationCount >= 3 || identificationCount >= 3) || valid; } /** * The mzTab in the present version will not contains protein Groups because ambiguity members are not Protein Groups * In the future we are thinking to support ambiguity groups in a different way * * @param proteinGroupId Protein Group Identifier * @return ProteinGroup An specific Protein Group in the File */ @Override public ProteinGroup getProteinAmbiguityGroupById(Comparable proteinGroupId) { ProteinGroup proteinGroup = super.getProteinAmbiguityGroupById(proteinGroupId); if (proteinGroup == null) { try { // proteinGroup = MzTabTransformer.transformProteinAmbiguityGroupToProteinGroup(proteinGroupId); if (proteinGroup != null) { // store identification into cache getCache().store(CacheEntry.PROTEIN_GROUP, proteinGroupId, proteinGroup); for (Protein protein : proteinGroup.getProteinDetectionHypothesis()) { cacheProtein(protein); } } } catch (Exception ex) { throw new DataAccessException("Failed to retrieve protein group: " + proteinGroupId, ex); } } return proteinGroup; } /** * Check if the file contains Protein Sequence by looking inside the CVTerms in the MzTab Unmarshaller * @return TRUE if the file contains the SEQUENCE for each PROTEIN and FALSE if proteins are annotated without the sequence */ @Override public boolean hasProteinSequence() { return reader.hasProteinSequence(); } /** * Get the number of peptides. * * @return int the number of peptides. */ @Override public int getNumberOfPeptides() { int num; try { // this method is overridden to use the reader directly num = reader.getNumIdentifiedPeptides(); } catch (Exception ex) { throw new DataAccessException("Failed to retrieve number of peptides", ex); } return num; } @Override public boolean hasQuantData() { return reader.hasQuantitationData(); } @Override public QuantitativeSample getQuantSample() { QuantitativeSample sampleDesc = new QuantitativeSample(); Collection<Sample> samples = getSamples(); Map<Comparable, StudyVariable> studyVariables = getStudyVariables(); Set<Sample> sampleSet = new HashSet<Sample>(); if(studyVariables != null && !studyVariables.isEmpty() && samples != null && !samples.isEmpty()){ for(StudyVariable studyVariable: studyVariables.values()){ for(Assay assay: studyVariable.getAssays()){ sampleSet.add(assay.getSample()); } } int i = 0; Iterator<Sample> sampleIterator = sampleSet.iterator(); while(sampleIterator.hasNext()){ Sample currentSample = sampleIterator.next(); sampleDesc.addsubSample(i); List<CvParam> params = currentSample.getCvParams(); for(CvParam param: params){ if ("newt".equalsIgnoreCase(param.getCvLookupID())) { sampleDesc.setSpecies(i, param); } else if ("bto".equalsIgnoreCase(param.getCvLookupID())) { sampleDesc.setTissue(i, param); } else if ("cl".equalsIgnoreCase(param.getCvLookupID())) { sampleDesc.setCellLine(i, param); } else if ("go".equalsIgnoreCase(param.getCvLookupID())) { sampleDesc.setGOTerm(i, param); } else if ("doid".equalsIgnoreCase(param.getCvLookupID())) { sampleDesc.setDisease(i, param); } } sampleDesc.setDescription(i, CvUtilities.getCVTermFromCvReference(CvTermReference.PRIDE_SAMPLE_DESCRIPTION, currentSample.getName())); for(StudyVariable studyVariable: studyVariables.values()){ List<Assay> assays = studyVariable.getAssays(); for(Assay assay: assays){ Sample sampleAssay = assay.getSample(); if(currentSample == sampleAssay){ sampleDesc.setReagent(i, assay.getReagent()); } } } i++; } } return sampleDesc; } }
/* * This file is part of the MicroPython project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2013, 2014 <NAME> * Copyright (c) 2014 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/dirent.h> #include <sys/unistd.h> #include "py/mpconfig.h" #include "py/runtime.h" #include "py/objtuple.h" #include "py/mphal.h" #include "extmod/vfs.h" #include "extmod/misc.h" // this is implemented in C++ extern int puppy_system_impl(const char*); STATIC mp_obj_t mod_os_system(mp_obj_t path_in) { const char *path = mp_obj_str_get_str(path_in); int res = puppy_system_impl(path); return MP_OBJ_NEW_SMALL_INT(res); } STATIC MP_DEFINE_CONST_FUN_OBJ_1(mod_os_system_obj, mod_os_system); STATIC mp_obj_t mod_os_pipe() { int fd[] = {0, 0}; int ok = pipe(fd); RAISE_ERRNO(ok, errno); mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(2, NULL)); t->items[0] = MP_OBJ_NEW_SMALL_INT(fd[0]); t->items[1] = MP_OBJ_NEW_SMALL_INT(fd[1]); return MP_OBJ_FROM_PTR(t); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_os_pipe_obj, mod_os_pipe); STATIC mp_obj_t mod_os_access(mp_obj_t path_in, mp_obj_t mode_in) { const char *path = mp_obj_str_get_str(path_in); mp_int_t mode = mp_obj_int_get_truncated(mode_in); int res = access(path, mode); return MP_OBJ_NEW_SMALL_INT(res); } STATIC MP_DEFINE_CONST_FUN_OBJ_2(mod_os_access_obj, mod_os_access); STATIC mp_obj_t mod_os_stat(mp_obj_t path_in) { struct stat sb; const char *path = mp_obj_str_get_str(path_in); int res = stat(path, &sb); RAISE_ERRNO(res, errno); mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(10, NULL)); t->items[0] = MP_OBJ_NEW_SMALL_INT(sb.st_mode); t->items[1] = MP_OBJ_NEW_SMALL_INT(sb.st_ino); t->items[2] = MP_OBJ_NEW_SMALL_INT(sb.st_dev); t->items[3] = MP_OBJ_NEW_SMALL_INT(sb.st_nlink); t->items[4] = MP_OBJ_NEW_SMALL_INT(sb.st_uid); t->items[5] = MP_OBJ_NEW_SMALL_INT(sb.st_gid); t->items[6] = mp_obj_new_int_from_uint(sb.st_size); t->items[7] = MP_OBJ_NEW_SMALL_INT(sb.st_atime); t->items[8] = MP_OBJ_NEW_SMALL_INT(sb.st_mtime); t->items[9] = MP_OBJ_NEW_SMALL_INT(sb.st_ctime); return MP_OBJ_FROM_PTR(t); } STATIC MP_DEFINE_CONST_FUN_OBJ_1(mod_os_stat_obj, mod_os_stat); STATIC mp_obj_t mod_os_unlink(mp_obj_t path_in) { const char *path = mp_obj_str_get_str(path_in); int r = unlink(path); RAISE_ERRNO(r, errno); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_1(mod_os_unlink_obj, mod_os_unlink); STATIC mp_obj_t mod_os_getcwd() { const char *s = getcwd(NULL, 0); if (s == NULL) { return mp_const_none; } return mp_obj_new_str(s, strlen(s)); } MP_DEFINE_CONST_FUN_OBJ_0(mod_os_getcwd_obj, mod_os_getcwd); STATIC mp_obj_t mod_os_getenv(mp_obj_t var_in) { const char *s = getenv(mp_obj_str_get_str(var_in)); if (s == NULL) { return mp_const_none; } return mp_obj_new_str(s, strlen(s)); } MP_DEFINE_CONST_FUN_OBJ_1(mod_os_getenv_obj, mod_os_getenv); STATIC mp_obj_t mod_os_mkdir(mp_obj_t path_in) { const char *path = mp_obj_str_get_str(path_in); int r = mkdir(path, 0777); RAISE_ERRNO(r, errno); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_1(mod_os_mkdir_obj, mod_os_mkdir); typedef struct _mp_obj_listdir_t { mp_obj_base_t base; mp_fun_1_t iternext; DIR *dir; } mp_obj_listdir_t; STATIC mp_obj_t listdir_next(mp_obj_t self_in) { mp_obj_listdir_t *self = MP_OBJ_TO_PTR(self_in); if (self->dir == NULL) { goto done; } struct dirent *dirent = readdir(self->dir); if (dirent == NULL) { closedir(self->dir); self->dir = NULL; done: return MP_OBJ_STOP_ITERATION; } mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(3, NULL)); t->items[0] = mp_obj_new_str(dirent->d_name, strlen(dirent->d_name)); if (dirent->d_type == DT_DIR) { t->items[1] = MP_OBJ_NEW_SMALL_INT(MP_S_IFDIR); } else if (dirent->d_type == DT_REG) { t->items[1] = MP_OBJ_NEW_SMALL_INT(MP_S_IFREG); } else { t->items[1] = MP_OBJ_NEW_SMALL_INT(dirent->d_type); } t->items[2] = MP_OBJ_NEW_SMALL_INT(0); return MP_OBJ_FROM_PTR(t); } STATIC mp_obj_t mod_os_ilistdir(size_t n_args, const mp_obj_t *args) { const char *path = "."; if (n_args > 0) { path = mp_obj_str_get_str(args[0]); } mp_obj_listdir_t *o = m_new_obj(mp_obj_listdir_t); o->base.type = &mp_type_polymorph_iter; o->dir = opendir(path); o->iternext = listdir_next; return MP_OBJ_FROM_PTR(o); } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_os_ilistdir_obj, 0, 1, mod_os_ilistdir); STATIC mp_obj_t mod_os_errno(size_t n_args, const mp_obj_t *args) { if (n_args == 0) { return MP_OBJ_NEW_SMALL_INT(errno); } errno = mp_obj_get_int(args[0]); return mp_const_none; } MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_os_errno_obj, 0, 1, mod_os_errno); STATIC const mp_rom_map_elem_t mp_module_os_globals_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_uos) }, { MP_ROM_QSTR(MP_QSTR_errno), MP_ROM_PTR(&mod_os_errno_obj) }, { MP_ROM_QSTR(MP_QSTR_F_OK), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_access), MP_ROM_PTR(&mod_os_access_obj) }, { MP_ROM_QSTR(MP_QSTR_stat), MP_ROM_PTR(&mod_os_stat_obj) }, { MP_ROM_QSTR(MP_QSTR_pipe), MP_ROM_PTR(&mod_os_pipe_obj) }, { MP_ROM_QSTR(MP_QSTR_system), MP_ROM_PTR(&mod_os_system_obj) }, { MP_ROM_QSTR(MP_QSTR_unlink), MP_ROM_PTR(&mod_os_unlink_obj) }, { MP_ROM_QSTR(MP_QSTR_getenv), MP_ROM_PTR(&mod_os_getenv_obj) }, { MP_ROM_QSTR(MP_QSTR_getcwd), MP_ROM_PTR(&mod_os_getcwd_obj) }, { MP_ROM_QSTR(MP_QSTR_mkdir), MP_ROM_PTR(&mod_os_mkdir_obj) }, { MP_ROM_QSTR(MP_QSTR_ilistdir), MP_ROM_PTR(&mod_os_ilistdir_obj) }, }; STATIC MP_DEFINE_CONST_DICT(mp_module_os_globals, mp_module_os_globals_table); const mp_obj_module_t mp_module_os = { .base = { &mp_type_module }, .globals = (mp_obj_dict_t*)&mp_module_os_globals, };
<filename>src/main/java/gov/nasa/pds/web/ui/containers/tabularManagement/TabularLabelContainer.java package gov.nasa.pds.web.ui.containers.tabularManagement; import gov.nasa.arc.pds.tools.util.FileUtils; import gov.nasa.pds.tools.containers.VolumeContainerSimple; import gov.nasa.pds.tools.dict.DictIdentifier; import gov.nasa.pds.tools.dict.Dictionary; import gov.nasa.pds.tools.label.AttributeStatement; import gov.nasa.pds.tools.label.Label; import gov.nasa.pds.tools.label.Numeric; import gov.nasa.pds.tools.label.ObjectStatement; import gov.nasa.pds.tools.label.PointerStatement; import gov.nasa.pds.web.ui.containers.ColumnInfo; import gov.nasa.pds.web.ui.containers.LabelContainer; import gov.nasa.pds.web.ui.utils.TabularData; import java.io.File; import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; public class TabularLabelContainer extends LabelContainer { public TabularLabelContainer(URL labelUrl, VolumeContainerSimple volume, Dictionary dictionary) { super(labelUrl, volume, dictionary); // TODO Auto-generated constructor stub } @SuppressWarnings("nls") public List<PointerStatement> getTabularPointers() { List<PointerStatement> allPointers = this.labelObj.getPointers(); List<PointerStatement> tabularPointers = new ArrayList<PointerStatement>(); for (PointerStatement pointer : allPointers) { DictIdentifier id = pointer.getIdentifier(); if (id.getId().endsWith("TABLE") || id.getId().endsWith("SERIES") || id.getId().endsWith("SPECTRUM") || id.getId().endsWith("STRUCTURE")) { // || id.endsWith("SPREADSHEET")) // { tabularPointers.add(pointer); } } return tabularPointers; } public List<ObjectStatement> getAllObjects(final DictIdentifier identifier) { List<ObjectStatement> objectStatements = new ArrayList<ObjectStatement>(); List<ObjectStatement> innerLevelStatements = new ArrayList<ObjectStatement>(); // get all top level object statements which have the identifier List<ObjectStatement> topLevelStatements = this.labelObj.getObjects(); for (ObjectStatement os : topLevelStatements) { // object statement is at the top level if (os.getIdentifier().equals(identifier)) { objectStatements.add(os); } // find all object statements within data objects, if any if (!os.getObjects().isEmpty()) innerLevelStatements.addAll(os.getObjects()); } // search for object statements within inner data object statements for (ObjectStatement os : innerLevelStatements) { if (os.getIdentifier().equals(identifier)) { objectStatements.add(os); } } return objectStatements; } @SuppressWarnings("nls") @Override public TabularData getTabularData(final String tableType, long numRows) { // try to get table pointer PointerStatement tablePointer = findPointer(tableType); // initialize variables created in if and try below URL tabFileUrl = null; File tabularFile = null; Numeric startPosition = null; // if pointer found, try to get object def for it if (tablePointer != null) { // if no labelFile - using URL if (this.labelFile == null) { try { // get file - assume only one file Entry<Numeric, URI> entry = getURIEntry(tablePointer); tabFileUrl = entry.getValue().toURL(); startPosition = entry.getKey(); } catch (Exception e) { throw new RuntimeException( "Referenced tabular data file does not exist"); //$NON-NLS-1$ } } else { // get file - assume only one file Entry<Numeric, File> entry = getFileEntry(tablePointer); tabularFile = entry.getValue(); startPosition = entry.getKey(); if (!FileUtils.exists(tabularFile)) { throw new RuntimeException( "Referenced tabular data file does not exist"); //$NON-NLS-1$ } } } final long startByte = Label.getSkipBytes(this.labelObj, startPosition); final List<ObjectStatement> foundObjects = this.labelObj .getObjects(tableType); if (foundObjects.size() == 1) { final List<ColumnInfo> columns = new ArrayList<ColumnInfo>(); ObjectStatement tableObj = foundObjects.get(0); AttributeStatement tableFormat = tableObj .getAttribute("INTERCHANGE_FORMAT"); //$NON-NLS-1$ if (tableFormat != null) { String tableFormatString = tableFormat.getValue().toString(); if (tableFormatString.equalsIgnoreCase("BINARY")) { // return null; } } if (tableObj.getIdentifier().toString().equals("SPREADSHEET")) { for (ObjectStatement column : tableObj.getObjects("FIELD")) { //$NON-NLS-1$ columns.add(new ColumnInfo(column)); } } else { for (ObjectStatement column : tableObj.getObjects("COLUMN")) { //$NON-NLS-1$ columns.add(new ColumnInfo(column)); } } if (tabFileUrl != null) { if (tableObj.getIdentifier().toString().equals("SPREADSHEET")) { String delimiterName = tableObj.getAttribute( "FIELD_DELIMITER").getValue().toString(); @SuppressWarnings("unused") String delimiterValue; if (delimiterName.equalsIgnoreCase("COMMA")) delimiterValue = ","; else if (delimiterName.equalsIgnoreCase("SEMICOLON")) delimiterValue = ";"; else if (delimiterName.equalsIgnoreCase("TAB")) delimiterValue = "\t"; else if (delimiterName.equalsIgnoreCase("VERTICAL_BAR")) delimiterValue = "|"; else delimiterValue = ","; // COMMA, SEMICOLON, TAB, or VERTICAL_BAR // return new SpreadsheetData(tabFileUrl, columns, // startByte, // numRows, delimiterValue); } return new TabularData(tabFileUrl, columns, startByte, numRows); } return new TabularData(tabularFile, columns, startByte, numRows); } return null; } }
/* * This is just returns n no. of spaces appended together * I have used it many times in program */ public static StringBuffer spacer(int n) { StringBuffer space = new StringBuffer(); for (int i = 0; i < n; i++) { space.append(" "); } return space; }
Challenges of Drug-Facilitated Sexual Assault. This article provides the reader with an understanding of the numerous challenges of drug-facilitated sexual assaults (DFSA). The challenges are categorized as follows: the drugs, reporting the crime, evidence collection, and laboratory analysis of specimens. The challenges associated with the drugs used to commit DFSA emphasizes the pharmacological effects of strong central nervous system depressants and how the pharmacokinetics and pharmacodynamics of these drugs create difficulties in an investigation. For example, while sexual assaults are generally considered to be a significantly underreported crime, the drug effects further complicate victims' reporting to law enforcement. Any delay in reporting decreases the ability of a laboratory to detect the presence of drugs or metabolites in useful evidentiary specimens such as blood and urine. Finally, differences in instrumentation and mission from one laboratory to the next will impact the ability to provide consistent identification of DFSA drugs or metabolites in these cases. Although the true prevalence of DFSAs will never be fully known, acknowledgment of the many challenges that come with these cases provides insight as to how to improve chances of successfully investigating DFSA allegations.
<gh_stars>0 import { usesWindowServiceResponse } from '../messaging'; declare global { interface Window { urls: { [key: string]: string }; } } // eslint-disable-next-line no-undef const someUrlsUsedInModuleScope = window.urls; export async function usesWindowService() { return usesWindowServiceResponse(someUrlsUsedInModuleScope); }
<reponame>TheGenial/Java-Projects package com.arif.restfulinpeace.dao; import com.arif.restfulinpeace.model.User; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.data.jpa.repository.Query; import org.springframework.stereotype.Repository; @Repository public interface UserRepository extends JpaRepository<User, Integer> { @Query("select p from User p where p.username = ?") User getUserByUserName(String userName); @Query("select p from User p where p.id = ?") User getUserById(Integer userId); }
<gh_stars>100-1000 // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/notifications/scheduler/internal/notification_scheduler.h" #include <set> #include <string> #include <utility> #include <vector> #include "base/bind.h" #include "base/logging.h" #include "base/memory/weak_ptr.h" #include "base/optional.h" #include "base/threading/thread_task_runner_handle.h" #include "chrome/browser/notifications/scheduler/internal/background_task_coordinator.h" #include "chrome/browser/notifications/scheduler/internal/display_decider.h" #include "chrome/browser/notifications/scheduler/internal/impression_history_tracker.h" #include "chrome/browser/notifications/scheduler/internal/notification_entry.h" #include "chrome/browser/notifications/scheduler/internal/notification_scheduler_context.h" #include "chrome/browser/notifications/scheduler/internal/scheduled_notification_manager.h" #include "chrome/browser/notifications/scheduler/internal/scheduler_utils.h" #include "chrome/browser/notifications/scheduler/internal/stats.h" #include "chrome/browser/notifications/scheduler/public/display_agent.h" #include "chrome/browser/notifications/scheduler/public/notification_background_task_scheduler.h" #include "chrome/browser/notifications/scheduler/public/notification_params.h" #include "chrome/browser/notifications/scheduler/public/notification_scheduler_client.h" #include "chrome/browser/notifications/scheduler/public/notification_scheduler_client_registrar.h" #include "chrome/browser/notifications/scheduler/public/user_action_handler.h" namespace notifications { namespace { class NotificationSchedulerImpl; // Helper class to do async initialization in parallel for multiple subsystem // instances. class InitHelper { public: using InitCallback = base::OnceCallback<void(bool)>; InitHelper() : context_(nullptr) {} InitHelper(const InitHelper&) = delete; InitHelper& operator=(const InitHelper&) = delete; ~InitHelper() = default; // Initializes subsystems in notification scheduler, |callback| will be // invoked if all initializations finished or anyone of them failed. The // object should be destroyed along with the |callback|. void Init(NotificationSchedulerContext* context, ImpressionHistoryTracker::Delegate* delegate, InitCallback callback) { // TODO(xingliu): Initialize the databases in parallel, we currently // initialize one by one to work around a shared db issue. See // https://crbug.com/978680. context_ = context; callback_ = std::move(callback); context_->impression_tracker()->Init( delegate, base::BindOnce(&InitHelper::OnImpressionTrackerInitialized, weak_ptr_factory_.GetWeakPtr())); } private: void OnImpressionTrackerInitialized(bool success) { if (!success) { std::move(callback_).Run(false /*success*/); return; } context_->notification_manager()->Init( base::BindOnce(&InitHelper::OnNotificationManagerInitialized, weak_ptr_factory_.GetWeakPtr())); } void OnNotificationManagerInitialized(bool success) { std::move(callback_).Run(success); } NotificationSchedulerContext* context_; InitCallback callback_; base::WeakPtrFactory<InitHelper> weak_ptr_factory_{this}; }; // Helper class to display multiple notifications, and invoke a callback when // finished. class DisplayHelper { public: // Invoked with the total number of notification shown when all the display // flows are done. using FinishCallback = base::OnceCallback<void(int)>; DisplayHelper(const std::set<std::string>& guids, NotificationSchedulerContext* context, FinishCallback finish_callback) : guids_(guids), context_(context), finish_callback_(std::move(finish_callback)), shown_count_(0) { if (guids_.empty()) { std::move(finish_callback_).Run(0); return; } for (const auto& guid : guids) { context_->notification_manager()->DisplayNotification( guid, base::BindOnce(&DisplayHelper::BeforeDisplay, weak_ptr_factory_.GetWeakPtr(), guid)); } } DisplayHelper(const DisplayHelper&) = delete; DisplayHelper& operator=(const DisplayHelper&) = delete; ~DisplayHelper() = default; private: void BeforeDisplay(const std::string& guid, std::unique_ptr<NotificationEntry> entry) { if (!entry) { DLOG(ERROR) << "Notification entry is null"; MaybeFinish(guid, false /*shown*/); return; } // Inform the client to update notification data. base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&DisplayHelper::NotifyClientBeforeDisplay, weak_ptr_factory_.GetWeakPtr(), std::move(entry))); } void NotifyClientBeforeDisplay(std::unique_ptr<NotificationEntry> entry) { auto* client = context_->client_registrar()->GetClient(entry->type); if (!client) { MaybeFinish(entry->guid, false /*shown*/); return; } // Detach the notification data for client to rewrite. auto notification_data = std::make_unique<NotificationData>(std::move(entry->notification_data)); client->BeforeShowNotification( std::move(notification_data), base::BindOnce(&DisplayHelper::AfterClientUpdateData, weak_ptr_factory_.GetWeakPtr(), std::move(entry))); } void AfterClientUpdateData( std::unique_ptr<NotificationEntry> entry, std::unique_ptr<NotificationData> updated_notification_data) { if (!updated_notification_data) { stats::LogNotificationLifeCycleEvent( stats::NotificationLifeCycleEvent::kClientCancel, entry->type); MaybeFinish(entry->guid, false /*shown*/); return; } // Tracks user impression on the notification to be shown. context_->impression_tracker()->AddImpression( entry->type, entry->guid, entry->schedule_params.impression_mapping, updated_notification_data->custom_data, entry->schedule_params.ignore_timeout_duration); stats::LogNotificationShow(*updated_notification_data, entry->type); // Show the notification in UI. auto system_data = std::make_unique<DisplayAgent::SystemData>(); system_data->type = entry->type; system_data->guid = entry->guid; context_->display_agent()->ShowNotification( std::move(updated_notification_data), std::move(system_data)); MaybeFinish(entry->guid, true /*shown*/); } // Called when notification display flow is finished. Invokes // |finish_callback_| when all display flows are done. void MaybeFinish(const std::string& guid, bool shown) { if (base::Contains(guids_, guid) && shown) { shown_count_++; } guids_.erase(guid); if (guids_.empty() && finish_callback_) { std::move(finish_callback_).Run(shown_count_); } } std::set<std::string> guids_; NotificationSchedulerContext* context_; FinishCallback finish_callback_; int shown_count_; base::WeakPtrFactory<DisplayHelper> weak_ptr_factory_{this}; }; // Implementation of NotificationScheduler. class NotificationSchedulerImpl : public NotificationScheduler, public ImpressionHistoryTracker::Delegate { public: explicit NotificationSchedulerImpl( std::unique_ptr<NotificationSchedulerContext> context) : context_(std::move(context)) {} NotificationSchedulerImpl(const NotificationSchedulerImpl&) = delete; NotificationSchedulerImpl& operator=(const NotificationSchedulerImpl&) = delete; ~NotificationSchedulerImpl() override = default; private: // NotificationScheduler implementation. void Init(InitCallback init_callback) override { init_helper_ = std::make_unique<InitHelper>(); init_helper_->Init(context_.get(), this, base::BindOnce(&NotificationSchedulerImpl::OnInitialized, weak_ptr_factory_.GetWeakPtr(), std::move(init_callback))); } void Schedule( std::unique_ptr<NotificationParams> notification_params) override { context_->notification_manager()->ScheduleNotification( std::move(notification_params), base::BindOnce(&NotificationSchedulerImpl::OnNotificationScheduled, weak_ptr_factory_.GetWeakPtr())); } void OnNotificationScheduled(bool success) { if (success) { ScheduleBackgroundTask(); } } void DeleteAllNotifications(SchedulerClientType type) override { context_->notification_manager()->DeleteNotifications(type); } void GetClientOverview( SchedulerClientType type, ClientOverview::ClientOverviewCallback callback) override { context_->impression_tracker()->GetImpressionDetail( type, base::BindOnce( &NotificationSchedulerImpl::OnImpressionDetailQueryCompleted, weak_ptr_factory_.GetWeakPtr(), type, std::move(callback))); } void OnImpressionDetailQueryCompleted( SchedulerClientType type, ClientOverview::ClientOverviewCallback callback, ImpressionDetail impression_detail) { std::vector<const NotificationEntry*> notifications; context_->notification_manager()->GetNotifications(type, &notifications); ClientOverview result(std::move(impression_detail), notifications.size()); std::move(callback).Run(std::move(result)); } void OnInitialized(InitCallback init_callback, bool success) { // TODO(xingliu): Tear down internal components if initialization failed. init_helper_.reset(); std::move(init_callback).Run(success); NotifyClientsAfterInit(success); } void NotifyClientsAfterInit(bool success) { std::vector<SchedulerClientType> clients; context_->client_registrar()->GetRegisteredClients(&clients); for (auto type : clients) { base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&NotificationSchedulerImpl::NotifyClientAfterInit, weak_ptr_factory_.GetWeakPtr(), type, success)); } } void NotifyClientAfterInit(SchedulerClientType type, bool success) { std::vector<const NotificationEntry*> notifications; context_->notification_manager()->GetNotifications(type, &notifications); std::set<std::string> guids; for (const auto* notification : notifications) { DCHECK(notification); guids.emplace(notification->guid); } auto* client = context_->client_registrar()->GetClient(type); DCHECK(client); client->OnSchedulerInitialized(success, std::move(guids)); } // NotificationBackgroundTaskScheduler::Handler implementation. void OnStartTask(TaskFinishedCallback callback) override { stats::LogBackgroundTaskEvent(stats::BackgroundTaskEvent::kStart); // Updates the impression data to compute daily notification shown budget. context_->impression_tracker()->AnalyzeImpressionHistory(); // Show notifications. FindNotificationToShow(std::move(callback)); } void OnStopTask() override { stats::LogBackgroundTaskEvent(stats::BackgroundTaskEvent::kStopByOS); ScheduleBackgroundTask(); } void FindNotificationToShow(TaskFinishedCallback task_finish_callback) { DisplayDecider::Results results; ScheduledNotificationManager::Notifications notifications; context_->notification_manager()->GetAllNotifications(&notifications); DisplayDecider::ClientStates client_states; context_->impression_tracker()->GetClientStates(&client_states); std::vector<SchedulerClientType> clients; context_->client_registrar()->GetRegisteredClients(&clients); context_->display_decider()->FindNotificationsToShow( std::move(notifications), std::move(client_states), &results); display_helper_ = std::make_unique<DisplayHelper>( results, context_.get(), base::BindOnce(&NotificationSchedulerImpl::AfterNotificationsShown, weak_ptr_factory_.GetWeakPtr(), std::move(task_finish_callback))); } void AfterNotificationsShown(TaskFinishedCallback task_finish_callback, int shown_count) { stats::LogBackgroundTaskNotificationShown(shown_count); // Schedule the next background task based on scheduled notifications. ScheduleBackgroundTask(); stats::LogBackgroundTaskEvent(stats::BackgroundTaskEvent::kFinish); std::move(task_finish_callback).Run(false /*need_reschedule*/); } void ScheduleBackgroundTask() { BackgroundTaskCoordinator::Notifications notifications; context_->notification_manager()->GetAllNotifications(&notifications); BackgroundTaskCoordinator::ClientStates client_states; context_->impression_tracker()->GetClientStates(&client_states); context_->background_task_coordinator()->ScheduleBackgroundTask( std::move(notifications), std::move(client_states)); } void OnUserAction(const UserActionData& action_data) override { context_->impression_tracker()->OnUserAction(action_data); ScheduleBackgroundTask(); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&NotificationSchedulerImpl::NotifyClientAfterUserAction, weak_ptr_factory_.GetWeakPtr(), action_data)); } void NotifyClientAfterUserAction(const UserActionData& action_data) { auto* client = context_->client_registrar()->GetClient(action_data.client_type); if (!client) return; auto client_action_data = action_data; // Attach custom data if the impression is not expired. const auto* impression = context_->impression_tracker()->GetImpression( action_data.client_type, action_data.guid); if (impression) { client_action_data.custom_data = impression->custom_data; } client->OnUserAction(client_action_data); } void GetThrottleConfig(SchedulerClientType type, ThrottleConfigCallback callback) override { auto* client = context_->client_registrar()->GetClient(type); if (client) { client->GetThrottleConfig(std::move(callback)); } else { std::move(callback).Run(nullptr); } } std::unique_ptr<NotificationSchedulerContext> context_; std::unique_ptr<InitHelper> init_helper_; std::unique_ptr<DisplayHelper> display_helper_; base::WeakPtrFactory<NotificationSchedulerImpl> weak_ptr_factory_{this}; }; } // namespace // static std::unique_ptr<NotificationScheduler> NotificationScheduler::Create( std::unique_ptr<NotificationSchedulerContext> context) { return std::make_unique<NotificationSchedulerImpl>(std::move(context)); } NotificationScheduler::NotificationScheduler() = default; NotificationScheduler::~NotificationScheduler() = default; } // namespace notifications
/*---------------------------------------------------------------- * Author: Samuel Miravet-Verde * Written: 09/25/2015 * Last updated: 09/25/2015 * * Compilation: javac SumOfTwoDice.java * Execution: java SumOfTwoDice * * Generate 2 integers between 1 and 6, and print their sum. * * % java SumOfTwoDice * 5 * * % java SumOfTwoDice * 9 * * % java SumOfTwoDice * 3 * * % java SumOfTwoDice * 11 * * % java SumOfTwoDice * 8 * * % java SumOfTwoDice * 7 * ******************************************************************************/ public class SumOfTwoDice { public static void main(String[] args) { int SIDES = 6; int a = 1 + (int) (Math.random() * SIDES); int b = 1 + (int) (Math.random() * SIDES); int sum = a + b; System.out.println(sum); } }
/** * Sentiment ingest uses {@link StanfordCoreNLP} class to calculate sentiment scores. */ public class SentimentAnalyzer { private final StanfordCoreNLP pipeline; public SentimentAnalyzer() { Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, parse, sentiment"); pipeline = new StanfordCoreNLP(props); } public double getSentimentScore(String text) { List<CoreMap> annotations = getAnnotations(text); double sentimentType = getSentimentClass(annotations); double sentimentScore = getScore(annotations, sentimentType); return sentimentType * sentimentScore; } private double getSentimentClass(List<CoreMap> sentences) { double sum = 0; int numberOfSentences = 0; for (CoreMap sentence : sentences) { Tree sentiments = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); int predictedClass = RNNCoreAnnotations.getPredictedClass(sentiments); if (predictedClass != 2) { sum += predictedClass; numberOfSentences++; } } return numberOfSentences == 0 ? 0 : (sum / numberOfSentences - 2) / 2; } private double getScore(List<CoreMap> sentences, double sentimentType) { double sum0 = 0; double sum1 = 0; double sum3 = 0; double sum4 = 0; int numberOfSentences = 0; for (CoreMap sentence : sentences) { Tree sentiments = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); int predictedClass = RNNCoreAnnotations.getPredictedClass(sentiments); SimpleMatrix matrix = RNNCoreAnnotations.getPredictions(sentiments); if (predictedClass != 2) { sum0 += matrix.get(0); // very negative sum1 += matrix.get(1); // negative sum3 += matrix.get(3); // positive sum4 += matrix.get(4); // very positive numberOfSentences++; } } double avg0 = sum0 / numberOfSentences; double avg1 = sum1 / numberOfSentences; double avg3 = sum3 / numberOfSentences; double avg4 = sum4 / numberOfSentences; if (sentimentType < -0.5) { return avg0; } else if (sentimentType < 0) { return avg1; } else if (sentimentType < 0.5) { return avg3; } else { return avg4; } } private List<CoreMap> getAnnotations(String text) { Annotation annotation = pipeline.process(text); return annotation.get(CoreAnnotations.SentencesAnnotation.class); } }
/** * Figure out the type of the list: * 1) if t was explicit then use it * 2) if we have field typed as a list, then use its definition * 3) if inferred is false, then drop back to list of Obj * 4) If inferred is true then return null and we'll infer the common type */ private Type toListOfType(Type t, Field curField, boolean infer) { if (t != null) return t; if (curField != null) { Type ft = curField.type().toNonNullable(); if (ft instanceof ListType) return ((ListType)ft).v; } if (infer) return null; return Sys.ObjType.toNullable(); }
// // Copyright (c) Microsoft. All rights reserved. // This code is licensed under the MIT License (MIT). // THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF // ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY // IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR // PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. // // Developed by Minigraph // // Author: <NAME> #pragma once #include "pch.h" #include "ParticleShaderStructs.h" EmissionProperties CreateEmissionProperties() { EmissionProperties emitProps; ZeroMemory(&emitProps, sizeof(EmissionProperties)); emitProps.EmitPosW = emitProps.LastEmitPosW = XMFLOAT3(0.0,0.0,0.0); emitProps.EmitDirW = XMFLOAT3(0.0,0.0,1.0); emitProps.EmitRightW = XMFLOAT3(1.0,0.0,0.0); emitProps.EmitUpW = XMFLOAT3(0.0,1.0,0.0); emitProps.Restitution = 0.6; emitProps.FloorHeight = -0.7; emitProps.EmitSpeed = 1.0; emitProps.Gravity = XMFLOAT3(0, -5, 0); emitProps.MaxParticles = 500; return emitProps; };
<filename>storage/volume/core_util.go<gh_stars>1000+ package volume import ( "context" "path" "github.com/alibaba/pouch/storage/volume/driver" "github.com/alibaba/pouch/storage/volume/types" "github.com/pkg/errors" ) func (c *Core) volumePath(ctx context.Context, v *types.Volume, dv driver.Driver) (string, error) { p, err := dv.Path(ctx, v) if err != nil { return "", err } if !path.IsAbs(p) { return "", errors.Errorf("Volume path: %s not absolute", p) } return p, nil }
<filename>src/shared/__tests__/Observer.spec.ts import { Observer, ObserverCallbackFunc } from '../Observer' describe('Observer', () => { let observer: Observer<number | null> beforeEach(() => { observer = new Observer(null) }) it('subscribe duplicate', () => { const fn = jest.fn() observer.sub(fn) observer.sub(fn) observer.update(1) expect(fn).toBeCalledTimes(1) }) it('sub', (done) => { const fn: ObserverCallbackFunc<number> = (now, pre) => { expect(now).toBe(1) expect(pre).toBe(null) expect(observer.value).toBe(1) done() } observer.sub(fn) observer.update(1) }) it('update', () => { observer.update(1) expect(observer.value).toBe(1) observer.update(null) expect(observer.value).toBe(null) const fn = jest.fn() observer.sub(fn) observer.update(2, true) expect(fn).toBeCalledTimes(0) }) it('sub multi', () => { const fn1 = jest.fn() const fn2 = jest.fn() observer.sub(fn1) observer.sub(fn2) observer.update(1) expect(fn1).toBeCalledTimes(1) expect(fn2).toBeCalledTimes(1) }) it('unSub', () => { const fn = jest.fn() observer.sub(fn) observer.unSub(fn) observer.update(11) expect(fn).toBeCalledTimes(0) expect(observer.value).toBe(11) }) })
<reponame>gauravyeole/KVstoreDB # File system Configuration FIle # @author: <NAME> <<EMAIL>> MAX_BLK_SIZE = 8 MAX_NUM_INODES = 10 NUM_OF_BLKS = 250
//! \exception std::runtime_error Raised if \a newSection has the same tag as a previously //! added section. void iMXImage::addSection(Section *newSection) { section_iterator_t it = beginSection(); for (; it != endSection(); ++it) { if ((*it)->getIdentifier() == newSection->getIdentifier()) { throw std::runtime_error("new section with non-unique tag"); } } m_sections.push_back(newSection); newSection->setImage(this); }
The surprise discovery of traces of European ancestry in the 24,000-year-old bones of a boy unearthed in the heart of Siberia has caught the attention of Canadian experts, who say the find could rewrite the story of the people who first populated ancient Canada and the rest of the Americas. A study published in the journal Nature by a team of 31 researchers from the U.S. and Europe details how the four-year-old’s skeletal remains — excavated at the Mal’ta archeological site in south-central Siberia in the 1920s and kept since then at Russia’s Hermitage State Museum — yielded a DNA signature shared by modern European populations but also by many present-day aboriginal people in the Western Hemisphere. The ancient boy’s DNA profile may help explain why a “European” strain of genetic material can be found among today’s New World indigenous communities, a mystery that many scientists had assumed was the result of contact in recent centuries with successive waves of colonizers from Europe. Another controversial theory to explain the shared DNA is based on the idea that boat-using tribes of seal-hunters from ancient Europe might have migrated westward to the Americas along a North Atlantic ice edge. But the Nature study offers another explanation: that the people who would eventually cross the so-called “Bering Land Bridge” between northeast Asia and Alaska near the end of the last ice age about 13,000 years ago had a key branch of ancestors in prehistoric Europe. Those ancient ancestors, it’s now theorized, must have moved eastward into central and then eastern Asia, mingling with other populations and eventually crossing to the Western Hemisphere when low ocean levels created a temporary land corridor between the eastern tip of Russia and the western edge of Alaska. “The result came as a complete surprise to us,” Danish archeologist Eske Willerslev, a University of Copenhagen researcher who led the study, stated in a summary of the findings. “Who would have thought that present-day Native Americans, who we learned in school derive from East Asians, share recent evolutionary history with contemporary western Eurasians?” Successive waves of human migration from eastern Siberia along the land-bridge route are widely presumed to be the origin of the hundreds of aboriginal nations that were spread throughout the Americas before Renaissance-era Europeans began permanently settling in the New World about 500 years ago. More than 50 million aboriginal people living today in North, Central and South America are believed to be descendants of those initial, bridge-crossing inhabitants of the New World. The study’s findings are significant for “suggesting a huge percentage of the genome of Siberian people during the Ice Age is actually coming from Western Europe,” said Grant Zazula, a Yukon government paleontologist who has studied the relationship between mammoths and humans in ancient “Beringia” — the glacier-free grassland that covered parts of Alaska, Yukon and Siberia more than 10,000 years ago. [It’s] indicating some degree of relatedness – a significant degree of relatedness – to First Nations populations in the New World, which had not been anticipated. So it’s quite significant “If you look at the genomes of modern Native American people, South American native people and Canadian First Nations people, there’s a chunk of that genome that looks European – or is European,” said Zazula. “It’s often thought that was a result of modern mixing of populations” in relatively recent times, he said, but the new study suggests a significant portion of the genetic heritage of the New World’s first inhabitants was “ancestrally coming from the Ice Age in Western Europe.” About 25,000 years ago, he said, “a group of people in Western Europe that started moving east, mixed with some people in central Siberia, and those people kept on going east and mixed with some east Asian people — maybe (from geographical) China, Korea, northern Japanese — and then eventually crossed the land bridge.” University of Alberta archeologist Jack Ives also highlighted that the “Mal’ta Boy” DNA profile “is indicating some degree of relatedness – a significant degree of relatedness – to First Nations populations in the New World, which had not been anticipated. So it’s quite significant.” Ives added that the importance of the finding is enhanced by the scarceness of evidence from a period so deep in human history: “In much of Siberia, as in much of the New World, human remains from this particular time interval are extremely rare.” Ives recently led a team of Canadian researchers who shed new light on the complicated way early New World peoples moved through the so-called “ice-free corridor” east of the Rocky Mountains, an important conduit for populations migrating into the interior of North America as the glaciers retreated at the end of the last Ice Age. He said the Siberian findings reinforce the links between seemingly disparate human populations. “On one primary level, it continues to tell us – if we could just take it into account more often – we’re all fairly closely related, and that European populations and First Nations populations may share more genetic heritage than we previously thought. It’s worth keeping in mind in the modern world.”
def resource_attributes(self): return { 'url': None, 'archive': 'gz', }
/* JTypes * #All about thos types * Showing the various data types in Java * Farai Gandiya */ public class JTypes{ public static void main(String[] args){ //Bytes are whole numbers between -128 and 127. They use one byte of memory System.out.println("Let's talk about bytes!"); byte byte1 = -12; byte byte2 = 106; System.out.println("The first byte is " + byte1); System.out.println("The second byte is " + byte2); //shorts are whole numbers between -32768 and 32767. They use 2 bytes of memory System.out.println("\nLet's talk about shorts!"); short shorty1 = -1130; short shorty2 = 6545; System.out.println("The first short is " + shorty1); System.out.println("The second short is " + shorty2); //ints are whole numbers between -2^31 and 2^31-1. These take up 4 bytes of memory System.out.println("\nLet's talk about ints!"); int inty1 = 1000000000; int inty2 = -43234313; System.out.println("The first int is " + inty1); System.out.println("The second int is " + inty2); //longs are whole numbers between -2^63 and 2^63 -1. These take up 8 bytes of memory //Great for storing Zimbabwe Dollars or other really big numbers. System.out.println("\nLet's talk about longs!"); long longjohn1 = 1000000000000L;//Always add the L at the end of a long long longjohn2 = -432343654232324333L; System.out.println("The first long is " + longjohn1); System.out.println("The second long is " + longjohn2); //Floats are "decimal" numbers between 2^-149 to (2-2^-23)*2^127. They use 4 bytes of memory //You should never use because of it's lack of precision unless you have a really good reason System.out.println("\nLet's move onto floats!"); float floaty1 = 127.973377662646f; //Always add the f after a float float floaty2 = -1333.332277636362f; //Notice how the whole number isn't shown, even though we typed it System.out.println("The first float is " + floaty1);//127.97338 System.out.println("The second float is " + floaty2);//-1333.3323 //DOubles are "decimal" numbers between 2^-1074 and (2-2^52)*2^1023. They use 8 bits of memory //These are more accurate than floats System.out.println("\nLet's talk about doubles"); double dub1 = 127.973377662646; double dub2 = -1333.332277636362; //Notice how unlinke floats, the whole number is shown System.out.println("The first double is " + dub1); System.out.println("The first double is " + dub2); //Booleans are a value that is either true or false //These will be used...ALOT! //You don't always have to set them to true or false. An inline evaluation would work System.out.println("\nLet's talk about booleans!"); Boolean boo = 3<2; //false Boolean hoo = 2<3; //true Boolean bool1 = false; Boolean bool2 = true; System.out.println("3<2 is " + boo); System.out.println("2<3 is " + hoo); System.out.println("bool1 is " + bool1); System.out.println("bool2 is " + bool2); //chars are is a 2 byte representation of a unicode character System.out.println("\nLet's talk about chars!"); char A = 'A'; //single quotes are important! char q = 98; //supports all ASCII codes char pi = '\u03C0'; //for unicode, use \\u followed by the code char snowman = '\u2603'; //Some of these will print depending on the suported font //? means it can't System.out.println("This is " + A);//A System.out.println("65 is the ASCII code for " + q);//x System.out.println("U R A QT" + pi);//pi symbol System.out.println("Do you wanna build a" + snowman + "?");//Little Snowman //Strings are a squence of characters and number, even unicode and hex codes! System.out.println("\nLet's talk about strings!"); String cheese = "I like myself a big block of cheese"; String fish = "My favorite fish is Surstr\u00f6ming";//uu00d6 is umlat System.out.println(cheese); System.out.println(fish); } }
// update updates the fetcher for the next scan request func (f *fetcher) update(resp *pb.ScanResponse, region hrpc.RegionInfo) { if resp.GetMoreResultsInRegion() { if resp.ScannerId != nil { f.scannerID = resp.GetScannerId() } } else { f.scannerID = noScannerID f.startRow = region.StopKey() } }
// @adjivas - github.com/adjivas. See the LICENSE // file at the top-level directory of this distribution and at // https://github.com/adjivas/linear_regression // // This file may not be copied, modified, or distributed // except according to those terms. extern crate linear_regression; #[test] fn test_estimate_price_zero () { assert_eq! ( format!("{}", linear_regression::formula::estimate_price::estimatePrice::new ( 42, (0, 0), ) ), "(estimatePrice(42) ⇒= 0+(0*42)) ⇒ 0" ); }
// Add2Tree method adds vanity package into vanity tree. func Add2Tree(p *models.VanityPackage) error { if err := processVanityPackage(p); err != nil { return err } host := Thumbai.AddHost(p.Host) if p.Path == "@" { host.AddRootVanity(p) } else if err := host.AddVanity2Tree(p.Path, p); err != nil { return err } return nil }
<filename>client/src/modules/components/AnalyticCard/AnalyticCard.tsx import React from 'react' import classNames from 'classnames' import { Loaders, Price, Space, Typography } from 'modules/ui' import s from './AnalyticCard.module.scss' export interface AnalyticCardProps extends React.HTMLAttributes<HTMLDivElement> { title?: string subtitle?: string price?: number percent?: number colorBg?: string colorTrain?: string colorTrack?: string loading?: boolean } /** * Cards contain analytic data about a single subject. */ export const AnalyticCard: React.FC<AnalyticCardProps> = ({ title = '', subtitle = '', price = 0, percent = 0, colorBg, colorTrain, colorTrack, loading = false, className, style, ...otherProps }) => { if (loading) return <Loaders /> const props = { className: classNames(s.card, className), style: { backgroundColor: colorBg, ...style }, ...otherProps } return ( <div {...props}> <Space className={s.content} direction='horizontal' justify='between' align='center' block > <Space direction='vertical' size={5} > <Typography className={s.title} variant='h4' > {title} </Typography> <Typography className={s.subtitle} variant='text' type='secondary' fontSize={16} > {subtitle} </Typography> <Price className={s.price} amount={price} /> </Space> <div className={s.percentContainer}> <svg className={s.circularChart} viewBox='0 0 36 36' > <path className={s.circleBg} d='M18 2.0845 a 15.9155 15.9155 0 0 1 0 31.831 a 15.9155 15.9155 0 0 1 0 -31.831' /> <path className={s.track} d='M18 2.0845 a 15.9155 15.9155 0 0 1 0 31.831 a 15.9155 15.9155 0 0 1 0 -31.831' strokeDasharray={'100, 100'} style={{ stroke: colorTrack }} /> <path className={s.train} d='M18 2.0845 a 15.9155 15.9155 0 0 1 0 31.831 a 15.9155 15.9155 0 0 1 0 -31.831' strokeDasharray={`${Math.abs(percent)}, 100`} style={{ stroke: colorTrain }} /> </svg> <Typography className={s.value} variant='h4' type='strong' > {percent}% </Typography> </div> </Space> </div> ) }
def _add_oclass(self, oclass, graph): attr = "" for key, value in oclass.attributes.items(): attr += self.attribute.format(key.argname, value[0]) label = self.label.format(str(oclass), attr) if oclass.namespace in self._namespaces: graph.node(str(oclass), label=label, color="#EED5C6", style="filled") else: graph.node(str(oclass), label=label)
/** * @param root - root of the given BST * @param key - value of the node to be deleted * @return - root of the tree after deleting the node */ public Node deleteNode(Node root, int key) { if (root == null) { return root; } if (key < root.data) { root.left = deleteNode(root.left, key); } else if (key > root.data) { root.right = deleteNode(root.right, key); } else { if (root.left == null) { return root.right; } if (root.right == null) { return root.left; } root.data = minValue(root.right); root.right = deleteNode(root.right, root.data); } return root; }
*aa,n = map(int, open(0).read().split()) n *= 1000 bb = [250,500,1000,2000] for i in range(1,4): aa[i] = min(aa[i], aa[i-1]*2) ans = 0 for b,a in zip(bb[::-1], aa[::-1]): ans += n // b * a n %= b print(ans)
Bad Quaker Podcast With Ben Stone The clash between Chris Cantwell and the FSP is important, but the underlying issue is more important than the actions of either of the parties involved. Links: Bigger Than Cantwell and the FSP There is a controversy in and around the Free State Project, concerning Chris Cantwell and an article he wrote that was published at Cop Block dot org. (link) Due in part to this article and due in part to Cantwell’s previous statements and behavior, a good many people in the Free State Project want nothing to do with Cantwell, his wild antics, nor what they perceive as his radical and dangerous beliefs. The Free State Project is a non-profit organization, currently seeking Federal 501(c)3 non-profit status. It is comprised of voluntary membership along with about 7 members on its board of trustees. The board of trustees is the final decision making body of the Free State Project. (here further referred to as the FSP) I’ll get into Cantwell’s Cop Block article in a moment, but first I want to touch on the relationship between the FSP and Cantwell. As Cantwell has acknowledged, the board of the FSP has every right to exclude him from their organization, exclude him from their sanctioned events, and even publicly and privately shun him. However, as Cantwell has stated, the FSP has no right and no power to say who can and can’t associate with Cantwell, what Cantwell may or may not say, nor where Cantwell may or may not live or travel. I assume the board of the FSP understands this. On a personal note, this is a great time to point out the difference between leadership and authority. Leaders do what they think is best, and you may follow if you agree. Authoritarians dictate behavior, and force compliance. Dancing between leadership and authority is a burden every organization and every individual must deal with on a regular basis. I’ll say this in defense of Chris Cantwell; when I have talked to Chris at PorcFest he has never demanded I show my PorcFest identification bracelet, and I never asked to see his. The FSP board notified Cantwell by email of their decision, and Cantwell has posted this email, his response, and his comments. (link) In his comments, Cantwell points out that the board never engaged him in debate on the topic, never asked him to explain his statements, nor offered a specific criticism of his article. They simply demanded he retract his statements or be banned from the FSP. It is my understanding that the board of the FSP may be reviewing their decision at some point. Considering Cantwell’s request for a critique, I would like to offer a two part analyses, starting with the Cop Block article in question, and finishing with Cantwell’s stated view that violence will be the final answer to the question of government aggression. In the article at Cop Block, Cantwell begins setting up a straw man argument in the 5th paragraph where he states; “Mislead pacifists have told me for some time, that we have to meet government violence with non-violent resistance, and civil disobedience, because government agents don’t know how to deal with non-violent people. Clearly, there is an ever increasing amount of evidence that these people are completely misinformed. Government agents know exactly how to deal with peaceful people, they deal with them using violence, and the acquisition of the Bearcat in Concord is just one in a long line of examples.” Notice the “Mislead pacifists” classification and the leap-to-conclusion “because government agents don’t know how to deal with non-violent people” as the motivation for pacifist actions. The essence of a straw man is to discredit the opponent by misrepresent his position. By starting with the assumption that pacifists are mislead and placing that firmly in the conversation, then dismissing the pacifists based on the fact that cops handle non-violent protest with more violence, the pacifist is made to look foolish and naive. Cantwell’s conclusions should be ignored unless he can make an argument based on facts and not on fallacies that support his predetermined conclusion. The informed pacifist doesn’t make the assumption that victory will be obtained by baffling a cop through nonviolence. It is not the goal of the informed pacifist to stump a cop by peaceful resistance. The goal of the pacifist is to show by their nonviolence that even in the face of violence they are peaceful. The purpose of this behavior is not to magically cause the frenzied attack cop to suddenly revert from his violent nature and begin hugging hippies and planting trees. The reason the pacifist remains nonviolent is to prove the greater point; you cannot achieve good by doing evil. You cannot archive peace by acting violently. The true pacifist believes that violence, even in self defense is bad, and you cannot achieve a good by doing a bad. Here I must point out, I am not a pacifist. I believe that some situations can morally be solved with violence, and I believe some situations can only be solved by violence. Further I believe that in some situations humans are morally obligated to commit violence on other humans. So just to be clear, while I am defending pacifists against Cantwell’s straw man attack, I am not a pacifist. In the article, Cantwell goes on to make a list: Adam Kokesh is peaceful, that didn’t stop government agents from kidnapping and caging him. Ian Freeman is peaceful, that didn’t stop government agents from kidnapping and caging him. Derrick Horton is peaceful, that didn’t stop government agents from kidnapping and caging him. Ademo Freeman is peaceful, that didn’t stop government agents from kidnapping and caging him. Rich Paul is peaceful, that didn’t stop government agents from kidnapping and caging him. Here we see a misdirect and a non-sequitur. Cantwell was building a straw man against pacifists, then he implies that the arrests of Kokesh, Ian, Derrick, Ademo, and Rich as examples of how pacifism has failed. Are Kokesh, Ian, Derrick, Ademo, and Rich all pacifists, and have they failed? The misdirect is in the attempt to bring sympathy for victims of government aggression into the argument, and the non-sequitur (meaning it doesn’t follow) assumes the conclusion that the arrests of these men prove pacifism is a failure. Considering the first name on Cantwell’s list, Adam Kokesh said May 6th 2013 on the Alex Jones show, speaking directly as to the nature of the proposed, and failed, July 4th “march”; “Alex, this is an armed revolt against the American government, make no mistake about it.” Are those the words of a pacifist? Are we to believe Adam is a pacifist? Adam may or may not be peaceful, and his protest may or may not have been peaceful, but to say Kokesh is a pacifist is an example of Cantwell using his friend as a red herring to misdirect the argument, and draw a conclusion on a topic that has yet to be concluded. What will come of Kokesh’s protest? Cantwell simply doesn’t know, but would like you to believe that his arrest is proof of failure. It’s important to point out what Cantwell clearly doesn’t want to address. This is not an argument about pacifism. Pacifism is a philosophy based on specific principles. Principles Cantwell clearly has no desire to grasp, even if he is capable. A person doesn’t become a pacifist because it’s the most efficient utilitarian method to archive a goal. A person is a pacifist because they believe in the philosophy of pacifism and they follow through with their beliefs by remaining peaceful even when it would be in their best interest to turn violent. When Cantwell uses the label “pacifist” he is either showing his ignorance of the argument, or he is intentionally misdirecting the argument by using name calling, red herrings, straw men, and other forms of deception. Now to motivation; why would Cantwell seek to drive Freestaters into violent actions, as their only solution and their only hope? Cantwell continues talking about the Free State Project: ” …They see this injustice, they want it to stop, and so they are coming together to make a stand against it. The only problem is, now that they have come together, they have absolutely no idea what to do, because their vision of a peaceful evolution to a voluntary society is being shattered on an almost daily basis by government violence.” If I were a Freestater I would be highly insulted by this remark. Are the Freestaters really just stumbling around New Hampshire, without direction, baffled that the government didn’t roll over and die when they moved in? Here we see a continuing flaw of those who tend to view things through the old mindset, no matter what label they attach to themselves and no matter what celebrities they cling to. It’s the notion that there are only two options being talked about; their path that includes a violent bloody revolution, and everyone else that have no plan, are clueless, and who will sit around waiting for government goons to beat them, rob them, jail them, and ultimately kill them. This is the false dichotomy that Cantwell and many others would like to sell to the liberty movement, and unfortunately many people, both inside the movement and outside, are buying these ridicules fallacies. A common variation of this false dichotomy is the argument that you have to get involved in government, vote, support the right politician, support the right issues, and work to “move the government in the right direction”! And if you aren’t doing that then you have no plan, you’re just sitting around complaining, and you’re doomed to failure. Then Cantwell reveals his heart felt fantasy: “So what to do? It’s a terribly unpopular thing to say, but the answer, at some point, is to kill government agents. The government agents know that, and that’s why they want a tank.” Chris Cantwell, like many people around the world, is so fed up with government aggression, and the pathetic sheep that allow it to continue, that he has adopted what I call “the black hole sun” solution. You may also call it the “blaze of glory” solution. In desperation and frustration, people are ready to burn the house down to rid themselves of the bed bugs. I know this thought pattern well, as I sat stewing in those juices for many years before my eyes were pried open and I was forced to see the light. To sum up the remainder of Cantwell’s article, he correctly states the non-aggression principle, and lays out the reason that self defense is allowed by the non-aggression principle. He correctly states the moral implications of self defense, as I see them, and indicates, as I believe, there is nothing morally wrong with defending oneself against an aggressor, even if the aggressor holds some official title or edict. But then in his conclusion, he falls back to his straw man by lumping all the Keene New Hampshire activists into his twisted version of “pacifism” and proclaims them a failure, providing as proof the fact that the Keene PD has a Bearcat vehicle. He then repeats his challenge to confront government violence with more violence, and reiterates his false dichotomy by proclaiming the only alternative is “lying down”, “holding a sign”, “filing paperwork”, or “waving your finger at people”. Stepping away from Chris Cantwell, and his article at Cop Block, I will now address the concept that violence and even revolution are a necessity for a future based on liberty. Let’s reexamine the pacifists for a moment. The great difference between the true pacifist and the internet tough guy is that the pacifist lives his philosophy every day of his life, whereas the internet tough guy wants others to go live their fantasy for them, while they sit safely on their couch eating Cheetos. I wish I had a pre-1964 dime for every time I have seen an internet tough guy telling others that the “line has been crossed” and the “time for talk is over” and “we tried their way, now its time for action”. The internet would be so much more entertaining if these tough guys would shut their mouths, stand up, button their pants, wipe the french fry grease off their hands, and do what they want others to do for them. In that one fatal moment, the internet and the liberty movement would be cleansed of these clowns and their silly routines. But never fear, they aren’t likely to take my advice and walk their talk. When their grand day of action comes, they are more likely to film themselves in predawn light on a vacant street, making their threats, than actually confronting a government killer on his own terms. Another characteristic of the doom and gloom, kill’em-all, way of thinking, is the predominant lack of understanding of basic economics and the lack of understanding of the tendency for the market to provide what people want. Today, in the real world where we walk around and live our lives, the vast majority of humans want government. They fear their neighbors, so they believe they need those government police. Even more, they fear the foreign bogeyman who could, at any moment, attack and take away our freedom and force us to speak some other language or convert us all to some evil religion. So currently there is a market demand for aggressive government. So long as that demand exists, no amount of patriot bravado or revolutionary uprising can ever produce anything other than more government oppression and aggression. Over and over, throughout history and throughout the globe, people have grown sick and tired of the government boot at their throat, and have beat their plows into swords. Over and over they have risen up, burned the castle, and hung the tyrant, only to then watch helplessly as the leaders of the revolution become a worse government than the one they kicked out. The solution for this loop in history is to stop repeating the loop! Taking arms against a government can only produce one of two results; either the government will bring its might upon the rebels and crush them, or the rebels will win and become the government. To continue the same pattern in blind faith of a different result, is madness. Now don’t let it be said (yet again) that I only criticize and that I offer no answers. I do offer answers. But I offer those answers in a language that can only be understood by those who seek the truth, not preconceived notions. And I offer answers that don’t easily fit the bumper sticker culture or the 2 minute 30 second pop-music/pop-news headline-reading public. Understanding the Zero Aggression Principle is the beginning, but it is not the end. Understanding the concepts of private property and the fallacy of public property, is a step on the journey. Understanding the difference between what I may morally do and that which is expedient, is a move toward wisdom. Bringing together that wisdom, with an understanding of property and defense, along with the clarified view of our enemy, are the required steps if victory is to be achieved and freedom for mankind is to found. This knowledge cannot be taught in a brief emotional article. It must be revealed in the heart, before it can be understood by the head. The last thoughts I would like to express today are very old words of advice: Know the art of war before you engage an enemy. Every battle is won or lost before it’s fought. The wise warrior should feign ignorance to his opponent. The strong warrior should feign weakness. If you are far from your enemy, make him think you are near. Worry your enemy, make him lose sleep, and anger him. If you are near your enemy, make him think you are far away. Convince him he is safe. Let him slumber while you loot his castle. Never fight an enemy according to his strength. Never fight an enemy on the field of his choosing. Never allow your enemy to chose the method of battle. Never allow your enemy to chose your weapon. Always cheat at the enemies rules, but never cheat at what is right and true. There is one weapon, one battlefield, and one entity that can wield that weapon on that battlefield, and defeat the State forever. The market is the natural mechanism that produces what humans desire. So long as humans desire government there will be government, but the moment humans stop desiring government and desire freedom, humans will have freedom. It’s not our job to fight government agents. It’s our job to sell the desire for freedom to a hungry market. Ben Stone, 2013 Permission to reprint, copy, alter, claim credit for, or out-right steal, is gladly given because IP law is an imaginary concept made up by statists.
def _any_is_conman(self) -> bool: res = any([self._is_conman, *[x._is_conman for x in self._columns.values()]]) return res