content
stringlengths 10
4.9M
|
---|
import numpy as np
lst=input().split(" ")
A=int(lst[0])
B=int(lst[1])
K=int(lst[2])
lst=[]
for i in range(1,min(A,B)+1):
if A%i==0 and B%i==0:
lst.append(i)
print(lst[len(lst)-K])
|
/**
* Creates a patch from the two passed in files, writing the result
* to <code>os</code>.
*
* @param oldPath TODO
* @param newPath TODO
* @param os TODO
* @param minimal TODO
* @throws IOException TODO
*/
public static void createPatch( String oldPath, String newPath, OutputStream os, boolean minimal )
throws IOException
{
JarFile2 oldJar = new JarFile2( oldPath );
JarFile2 newJar = new JarFile2( newPath );
try
{
Iterator entries;
HashMap moved = new HashMap();
HashSet visited = new HashSet();
HashSet implicit = new HashSet();
HashSet moveSrc = new HashSet();
HashSet newEntries = new HashSet();
entries = newJar.getJarEntries();
if ( entries != null )
{
while ( entries.hasNext() )
{
JarEntry newEntry = (JarEntry) entries.next();
String newname = newEntry.getName();
String oldname = oldJar.getBestMatch( newJar, newEntry );
if ( oldname == null )
{
if ( _debug )
{
System.out.println( "NEW: " + newname );
}
newEntries.add( newname );
}
else
{
if ( oldname.equals( newname ) && !moveSrc.contains( oldname ) )
{
if ( _debug )
{
System.out.println( newname + " added to implicit set!" );
}
implicit.add( newname );
}
else
{
if ( !minimal && ( implicit.contains( oldname ) || moveSrc.contains( oldname ) ) )
{
if ( _debug )
{
System.out.println( "NEW: " + newname );
}
newEntries.add( newname );
}
else
{
if ( _debug )
{
System.err.println( "moved.put " + newname + " " + oldname );
}
moved.put( newname, oldname );
moveSrc.add( oldname );
}
if ( implicit.contains( oldname ) && minimal )
{
if ( _debug )
{
System.err.println( "implicit.remove " + oldname );
System.err.println( "moved.put " + oldname + " " + oldname );
}
implicit.remove( oldname );
moved.put( oldname, oldname );
moveSrc.add( oldname );
}
}
}
}
}
ArrayList deleted = new ArrayList();
entries = oldJar.getJarEntries();
if ( entries != null )
{
while ( entries.hasNext() )
{
JarEntry oldEntry = (JarEntry) entries.next();
String oldName = oldEntry.getName();
if ( !implicit.contains( oldName ) && !moveSrc.contains( oldName ) &&
!newEntries.contains( oldName ) )
{
if ( _debug )
{
System.err.println( "deleted.add " + oldName );
}
deleted.add( oldName );
}
}
}
if ( _debug )
{
: print out moved map
entries = moved.keySet().iterator();
if ( entries != null )
{
System.out.println( "MOVED MAP!!!" );
while ( entries.hasNext() )
{
String newName = (String) entries.next();
String oldName = (String) moved.get( newName );
System.out.println( "key is " + newName + " value is " + oldName );
}
}
: print out IMOVE map
entries = implicit.iterator();
if ( entries != null )
{
System.out.println( "IMOVE MAP!!!" );
while ( entries.hasNext() )
{
String newName = (String) entries.next();
System.out.println( "key is " + newName );
}
}
}
JarOutputStream jos = new JarOutputStream( os );
createIndex( jos, deleted, moved );
entries = newEntries.iterator();
if ( entries != null )
{
while ( entries.hasNext() )
{
String newName = (String) entries.next();
if ( _debug )
{
System.out.println( "New File: " + newName );
}
writeEntry( jos, newJar.getEntryByName( newName ), newJar );
}
}
jos.finish();
jos.close();
}
catch ( IOException ioE )
{
throw ioE;
}
finally
{
try
{
oldJar.getJarFile().close();
}
catch ( IOException e1 )
{
}
try
{
newJar.getJarFile().close();
}
catch ( IOException e1 )
{
}
}
} |
def active_authenticators(self, email, username, password):
try:
for authenticator in self.authenticators:
filter_template = authenticator.filter_template
if filter_template:
filter_str = filter_template.format(email=email, username=username, password=password)
passed_filter = eval(filter_str, {"__builtins__": None}, {'str': str})
if not passed_filter:
continue
options = authenticator.options
options['redact_username_in_logs'] = self.redact_username_in_logs
yield authenticator.plugin, options
except Exception:
log.exception("Active Authenticators Failure")
raise |
/*************************************************************************
> File Name: b.cpp
> Author: ghost_lzw
> Mail: [email protected]
> Created Time: Tue 21 Jan 2020 05:07:31 PM DST
************************************************************************/
#include<bits/stdc++.h>
using namespace std;
#define endl '\n'
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
int n, m;
cin >> n >> m;
if(n == m){cout << "0 0" << endl;}
else {
if(m)cout << 1;
else cout << 0;
cout << " ";
cout << min(m * 2, n - m) << endl;
}
return 0;
}
|
/**
* A simple {@link Fragment} subclass.
*/
public class BlankFragment extends Fragment {
View view;
private Button btnNextMatch;
//coundown=====================
private String EVENT_DATE_TIME = "2018-12-31 10:30:00";
private String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
private LinearLayout linear_layout_1, linear_layout_2;
private TextView tv_days, tv_hour, tv_minute, tv_second;
private Handler handler = new Handler();
private Runnable runnable;
CountDownTimer countDownTimer;
long millis=0;
public BlankFragment() {
// Required empty public constructor
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
view= inflater.inflate(R.layout.fragment_blank, container, false);
btnNextMatch=view.findViewById(R.id.btnNextMatchId);
btnNextMatch.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
NextMatchFragment nextMatchFragment = new NextMatchFragment();
FragmentManager fragmentManager = getFragmentManager();
FragmentTransaction fragmentTransaction = fragmentManager.beginTransaction();
fragmentTransaction.replace(R.id.changeLayout, nextMatchFragment);
fragmentTransaction.commit();
}
});
//coundown====================================
// initUI();
// countDownStart();
tv_days=view.findViewById(R.id.tv_days);
tv_hour=view.findViewById(R.id.tv_hour);
tv_minute=view.findViewById(R.id.tv_minute);
tv_second=view.findViewById(R.id.tv_second);
String date = "2018-06-14 18:00:00 GMT+03:00";
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
df.setTimeZone(TimeZone.getTimeZone("GMT+03:00"));
try {
millis = df.parse(date).getTime() - System.currentTimeMillis();
} catch (ParseException e) {
e.printStackTrace();
}
countDownTimer=new CountDownTimer(millis,1000) {
long different=0;
@Override
public void onTick(long millisUntilFinished) {
different=millisUntilFinished;
Log.d("rtime",String.valueOf(different));
long secondsInMilli = 1000;
long minutesInMilli = secondsInMilli * 60;
long hoursInMilli = minutesInMilli * 60;
long daysInMilli = hoursInMilli * 24;
long elapsedDays = different / daysInMilli;
different = different % daysInMilli;
long elapsedHours = different / hoursInMilli;
different = different % hoursInMilli;
long elapsedMinutes = different / minutesInMilli;
different = different % minutesInMilli;
long elapsedSeconds = different / secondsInMilli;
tv_days.setText(String.valueOf(elapsedDays));
tv_hour.setText(String.valueOf(elapsedHours));
tv_minute.setText(String.valueOf(elapsedMinutes));
tv_second.setText(String.valueOf(elapsedSeconds));
}
@Override
public void onFinish() {
tv_days.setVisibility(View.GONE);
tv_hour.setVisibility(View.GONE);
tv_minute.setVisibility(View.GONE);
tv_second.setVisibility(View.GONE);
MatchFragment matchFragment = new MatchFragment();
FragmentManager fragmentManager = getFragmentManager();
FragmentTransaction fragmentTransaction = fragmentManager.beginTransaction();
fragmentTransaction.replace(R.id.changeLayout, matchFragment);
fragmentTransaction.commit();
}
}.start();
return view;
}
} |
/////////////////////////////////////////////////
/// \brief This feature constructs an empty model and returns its pointer
/// from the given world.
class ConstructEmptyModelFeature : public virtual Feature
{
public: template <typename PolicyT, typename FeaturesT>
class World : public virtual Feature::World<PolicyT, FeaturesT>
{
public: using ModelPtrType = ModelPtr<PolicyT, FeaturesT>;
public: ModelPtrType ConstructEmptyModel(const std::string &_name);
};
public: template <typename PolicyT>
class Implementation : public virtual Feature::Implementation<PolicyT>
{
public: virtual Identity ConstructEmptyModel(
const Identity &_worldID, const std::string &_name) = 0;
};
} |
def forward(self, tasks):
features = self.feature_extractor(tasks)
q = self.q_predictor(features)
q_dictionary = [self.q_vector_to_dictionary(q[b])
for b in range(len(tasks))]
grammars = [self.template_cfg.Q_to_PCFG(q_dictionary[b])
for b in range(len(tasks))]
return grammars |
THE URGENCY OF VIEWING NON-TEST ASSESSMENTS AS HUMANISTIC ASSESSMENT
Assessing students with various characteristics is a challenge for teachers. The effort of being adaptable to each characteristic leads to joyful moments or even frustrating situations. How can we deal with the characteristics? This article is aimed at giving teachers preliminary discussion towards non-test assessments and how urgent they are that teachers should consider in facilitating students’ characteristics. Since syllabus still requires test admission as how to evaluate learning, non-test assessments can take position as students’ supplementary records. Briefly, this article is expected to help teachers cope with their students’ characteristics by supplementing non-test activities in evaluating students’ learning process. |
Platelet Number and Indexes during Acute Pancreatitis
ABSTRACT Aim Acute pancreatitis (AP) is an inflammatory disorder, the incidence of which has been increasing over recent years. Mean platelet volume (MPV) is an index of platelet activation and influenced by inflammation. The objective of the present study is to assess whether MPV would be convenient parameters for predictor factor of patients with AP. Materials and methods A total of 140 AP patients (male/female: 63/77) and 70 healthy subjects (male/female: 23/47) were enrolled in this study. The following data were extracted from the hospital medical records, including age, sex, platelet count, MPV, were recorded at the time of admission and as well as at the 1st day of remission of the disease. Results Mean platelet volume levels at onset and remission of AP were 7.8 ± 1.6 and 7.7 ± 0.9 respectively, and there was no statistically significant difference between these groups. Platelet count at onset and remission of AP and control subjects was 203 ± 74 × 103/μl, 234 ± 76 × 103/μl and 251 ± 87 × 103/μl, respectively, and there was statistically significant difference between these groups. Platelet count at onset and remission of AP was statistically lower than control subjects. Conclusion Some studies in literature suggest that MPV might be a useful parameter to be used as an indicator for AP and a prognostic factor for AP, but, in this study, it was revealed that MPV values do not change at AP compared with controls. Therefore, further prospective studies investigating the factors affecting the platelet size are required to determine whether MPV has a clinical implication and for predictor value of patients with AP. How to cite this article: Kefeli A, Basyigit S, Yeniova AÖ, Küçükazman M, Nazligul Y, Aktas B. Platelet Number and Indexes during Acute Pancreatitis. Euroasian J Hepato-Gastroenterol 2014;4(2):67-69.
INTRODUCTION
Acute pancreatitis (AP) is a common clinical condition; the incidence of which has been increasing over recent years. 1 It is a disease of variable severity in which some patients experience mild, self-limited attacks, while others manifest a severe, highly morbid and frequently lethal attack. The exact mechanisms by which diverse etiological factors induce an attack are still unclear. Most cases are secondary to biliary disease or excess alcohol consumption. 2 AP is an inflammatory disorder, which is characterized by a complex cascade of immunological events, which is not related pathogenesis but also bears importance in determining the course of disease. At present, it is widely accepted that the premature activation of digestive enzymes within the pancreatic acinar cells
10.5005/jp-journals-10018-1104
Research Hospital from January 2008 to September 2011 and with discharge of a diagnosis of AP was included in this study. Seventy healthy subjects were enrolled retrospectively into the study. Healthy controls recruited from the healthy adults without any history of acute/ chronic inflammatory disorders or history of usage of drugs.
Diagnosis of AP was based on the presence of severe abdominal pain, tenderness in the mild epigastrium and serum amylase level three times higher than normal. The following data were extracted from the hospital medical records, including AP etiology, age, sex, radiologic imaging, and laboratory test at onset and remission of disease. Remission was considered by initial symptoms disappeared, the patient started to take oral nutrition and amylase returned to normal levels.
Platelet number, MPV were recorded at the time of admission as well as at the 1st day of remission of the disease.
Exclusion criteria can be summarized as impaired pancreatic function (e.g. due to chronic pancreatitis or pancreatic carcinoma) and impaired platelet function, heart failure, acute or chronic inflammatory disorder, cancer and hepatic disease.
Data were analyzed by using a commercially available statistics software package (SPSS for Windows version 15.0, Chicago, Illinois, USA). Continuous variables were tested for normality by Kolmogorov-Smirnov test. Values were presented as mean ± standard deviation, in the case of non-normally distributed data, as median and range. Comparisons of percentages between different groups of patients were carried out using the chi-square test. Student's t-test was performed for all normally distributed data. Mann-Whitney U-test was performed for normally distributed data for independent subgroups. Results are presented as mean ± SD, and p < 0.05 was regarded as statistically significant.
RESULTS
About 140 patients with AP (63 males and 77 females) and 70 (23 males and 47 females) healthy control subjects were enrolled in this study. The mean ages of AP and control subjects were 57.9 ± 14.8 and 54.1 ± 16.1 years respectively. There was no significantly difference between the ages of the study participants.
The MPV levels at onset of AP and control subjects were 7.8 ± 1.6 and 7.8 ± 1.0 respectively, and there was no statistically significant difference between these groups ( Table 1).
The MPV levels at onset and remission of AP 7.8 ± 1.6 and 7.7 ± 0.9 respectively, and there was no statistically significant difference between these groups (Table 1).
DISCUSSION
Acute pancreatitis is a systemic inflammatory process, which is often accompanied by thrombosis and bleeding disorders. 1 The role of platelets in the pathophysiology of the disease has not been elucidated yet. It was found that both thrombocytopenia and thrombocytosis have been associated with pancreatitis. 4 A study showed that an absence of thrombocytopenia (type 1) generally indicates a favorable prognosis, transient thrombocytopenia (type 2) also associates with a favorable prognosis, and persistent thrombocytopenia (type 3) generally indicate a poor prognosis. 5 These results suggest that platelet count at admission and its change over the course of hospitalization can be useful measures for predicting prognosis in patients with AP. Two other studies have been assessed platelet count as a parameter for assessing the prognosis in AP have reported that the sequential organ failure assessment (SOFA) score, which enumerates the severity of organ failure, is useful for prognostic evaluation. Platelet count can be evaluated easily and promptly at admission in most clinical settings. Hence, assessment of platelet count is recommended from the standpoint of accuracy and convenience. 6,7 In this study, it was revealed that platelet count significantly decreased at the onset and the remission of AP compared with controls, but there was not thrombocytopenia. Measurement of platelets over the course of hospitalization may be one of the most accurate and convenient parameters for precisely assessing the prognosis of patients with acute pancreatitis.
There are several studies indicating that the mean platelet levels would be associated with hypercoagulability in the course of AP were observed in the literature, but there are conflicting data. Three of these studies have reported that MPV, platelet distribution width (PDW) and platelet large cell ratio were decreased at onset of the diseases. Presence of large platelets and a significantly p-value is for comparison between control and study population; NS: Not significant difference between onset and remission of the disease was documented in MPV. They suggested that platelets are directly involved in the systemic inflammatory process of AP, compensated by an immediate bone marrow response. The exact reason of decreased MPV in AP is not clear, but it is speculated that platelets not only control thrombosis and homeostasis but may also regulate inflammatory processes. Moreover, several cytokines that have been found to play a crucial role in the pathogenesis of AP may affect MPV. Studies show increased levels of tumor necrosis factor-α (TNF-α), interleukin-1 (IL-1), IL-6 and monocyte chemotactic protein-1 (MCP-1) in AP. Among these mediators, IL-6 is suggested to be the main factor responsible for the decreased levels of MPV. 11,12 Another study showed that MPV levels elevated in AP compared with controls and MPV levels were still elevated when AP was in remission. They also found a positive correlation between MPV and pancreatic enzymes. Although, inflammation markers reduced at remission, MPV, D-dimer, fibrinogens were continuing to elevate. Indeed, among MPV, fibrinogen and D-dimer there were significantly positive correlations. Therefore, elevated MPV did not seem to be as a cause of acute inflammation in AP. Consequently, they suggest that MPV can reflect thrombotic status in AP. 13 These studies showed that MPV were found changed in AP and suggest that MPV may be convenient parameters for predictor factor of patients with acute pancreatitis.
In this study, in contrast to other studies in the literature, it was revealed that MPV levels do not change at the onset and the remission of AP compared with controls.
CONCLUSION
Some studies in literature suggest that MPV might be a useful parameter to be used as an indicator for AP. This parameter counted by clinical hematology analyzers is a simple, effortless diagnostic tool for platelet function and activation, and adds no extra cost or technical effort, but there are conflicting data about MPV values in AP. Therefore, further prospective studies investigating the factors affecting the platelet size are required, to determine whether MPV has a clinical implication and role in this disease. |
/**
* This method forces the thread to wait if there are less than 2 balls in the circle
* balls are leaving
* @throws InterruptedException
*/
public synchronized void consume()throws InterruptedException {
while (numOfSpritesinCircle <= 2){
wait();
}
numOfSpritesinCircle--;
notifyAll();
} |
KCNA KCNA/Reuters A top US general has told reporters that North Korea has likely achieved the capability of being able to miniaturize nuclear weapons that could be placed on top of a rocket, Felicia Schwartz reports for The Wall Street Journal.
Gen. Curtis Scaparrotti, the commander of US forces on the Korean peninsula, told reporters at the Pentagon today that he believes that North Korea is likely able to miniaturize a nuclear device. However, the US has not yet seen evidence that North Korea has actually conducted a miniaturized nuclear weapon test.
Scaparroti said at the briefing that he believes North Korea has "the capability to miniaturize a device at this point and they have the technology to actually deliver what they say they have."
Although he is unsure of where North Korea may have acquired the technology necessary to overcome the technological hurdle, the general said that the advance may have been aided by proliferation efforts from Iran or Pakistan.
"They have proliferation, relationships with other countries, Iran and Pakistan in particular," Scaparrotti said.
The miniaturization of warheads by North Korea could lead to a serious rebalancing of power dynamics in the region.
The Wall Street Journal:
Such nuclear warheads would be small enough to fit on a ballistic missile and would be a major improvement to Pyongyang's weapons technology. Gen. Scaparrotti said he believed North Korea also had developed a launcher that could carry an ICBM with a miniaturized warhead.
However, successfully fitting nuclear warheads on a missile and carrying out a successful launch is still technologically taxing. Experts believe that an actual launch may be currently beyond North Korea's ballistic capabilities.
This announcement comes on the heels of a recent charm offensive carried out by North Korea. Representatives of the Hermit Kingdom have visited South Korea and the EU, and Kim Jong-Un personally ordered the release of imprisoned American Jeffrey Fowle. |
Pollution for Promotion
This paper demonstrates that China’s high environmental pollution levels can partly be explained by the incentives embedded in the country’s political institutions. Guided by a simple career concerns model with the choice of dirty and clean technologies, I examine empirically how promotion incentives of provincial governors aect pollution. To nd exogenous variation in promotion incentives, I explore within-governor variation in connections with key ocials due to reshuing at the center and document the fact that connections are complementary to economic performance for governors’ promotion. The data conrms the model prediction that connections increase pollution. Auxiliary predictions of the model are also conrmed |
/**
* @brief Adds a task listener.
* @param listener The listener
* @return None
*/
void Task::AddTaskListener(const std::shared_ptr<TaskListener> &listener)
{
APP_LOGI("Task.AddTaskListener listener called start");
taskListeners_.Offer(listener);
} |
<reponame>ETspielberg/alma-connector
package org.unidue.ub.libintel.almaconnector.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.stereotype.Repository;
import org.unidue.ub.libintel.almaconnector.model.run.AlmaExportRun;
import java.util.Date;
import java.util.List;
@Repository
public interface AlmaExportRunRepository extends JpaRepository<AlmaExportRun, String> {
List<AlmaExportRun> findByIdentifierStartsWithOrderByRunIndex(String identifier);
List<AlmaExportRun> findAllByDesiredDateOrderByRunIndex(Date desiredDate);
AlmaExportRun save(AlmaExportRun almaExportRun);
}
|
// IsInstanceRunning returns whether the instance is in running state.
func (g *GCloud) IsInstanceRunning(name string) (bool, error) {
status, err := g.getInstanceStatus(name)
if err != nil {
return false, err
}
return status == instanceStatusRunning, nil
} |
<filename>src/util/config.h
/*******************************************************************\
Module:
Author: <NAME>, <EMAIL>
\*******************************************************************/
#ifndef CPROVER_UTIL_CONFIG_H
#define CPROVER_UTIL_CONFIG_H
#include <util/cmdline.h>
#include <util/options.h>
#ifndef GNUC_PREREQ
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#define GNUC_PREREQ(maj, min, patch) \
((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
((maj) << 20) + ((min) << 10) + (patch))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#define GNUC_PREREQ(maj, min, patch) \
((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
#else
#define GNUC_PREREQ(maj, min, patch) 0
#endif
#endif
#if __has_attribute(noinline) || GNUC_PREREQ(3, 4, 0)
#define ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define ATTRIBUTE_NOINLINE __declspec(noinline)
#else
#define ATTRIBUTE_NOINLINE
#endif
#if __has_attribute(used) || GNUC_PREREQ(3, 1, 0)
#define ATTRIBUTE_USED __attribute__((__used__))
#else
#define ATTRIBUTE_USED
#endif
#if !defined(NDEBUG)
#define DUMP_METHOD ATTRIBUTE_NOINLINE ATTRIBUTE_USED
#else
#define DUMP_METHOD ATTRIBUTE_NOINLINE
#endif
class configt
{
public:
struct triple
{
std::string arch = "none";
std::string vendor = "unknown";
std::string os = "elf";
std::string flavor;
bool is_windows_abi() const;
bool is_freebsd() const;
bool is_macos() const;
std::string to_string() const;
};
#define dm(char, short, int, long, addr, word, long_dbl) \
((uint64_t)(char) | (uint64_t)(short) << 8 | (uint64_t)(int) << 16 | \
(uint64_t)(long) << 24 | (uint64_t)(addr) << 32 | (uint64_t)(word) << 40 | \
(uint64_t)(long_dbl) << 48)
enum data_model : uint64_t
{
/* 16-bit */
IP16 = dm(8, 16, 16, 32, 16, 16, 64), /* unsegmented 16-bit */
LP32 = dm(8, 16, 16, 32, 32, 16, 64), /* segmented 16-bit DOS, Win16 */
/* 32-bit */
IP32 = dm(8, 16, 32, 64, 32, 32, 96), /* Ultrix '82-'95 */
ILP32 = dm(8, 16, 32, 32, 32, 32, 96), /* Win32 || other 32-bit Unix */
/* 64-bit */
LLP64 = dm(8, 16, 32, 32, 64, 64, 128), /* Win64 */
ILP64 = dm(8, 16, 64, 64, 64, 64, 128), /* Unicos for Cray PVP systems */
LP64 = dm(8, 16, 32, 64, 64, 64, 128), /* other 64-bit Unix */
};
#undef dm
struct ansi_ct
{
// for ANSI-C
unsigned int_width;
unsigned long_int_width;
unsigned bool_width;
unsigned char_width;
unsigned short_int_width;
unsigned long_long_int_width;
unsigned pointer_width;
unsigned single_width;
unsigned double_width;
unsigned long_double_width;
unsigned pointer_diff_width;
unsigned word_size;
unsigned wchar_t_width;
bool char_is_unsigned;
bool use_fixed_for_float;
typedef enum
{
NO_ENDIANESS,
IS_LITTLE_ENDIAN,
IS_BIG_ENDIAN
} endianesst;
endianesst endianess;
triple target;
std::list<std::string> defines;
std::list<std::string> include_paths;
std::list<std::string> forces;
std::list<std::string> warnings;
typedef enum
{
LIB_NONE,
LIB_FULL
} libt;
libt lib;
void set_data_model(enum data_model dm);
} ansi_c;
std::string main;
bool set(const cmdlinet &cmdline, const messaget &msg);
optionst options;
static std::string this_architecture();
static std::string this_operating_system();
static triple host();
};
extern configt config;
#endif
|
Okay, that’s not true. But it’s no different than this:
Prosecutors Ask if 8 Banks Duped Rating Agencies
Wall Street played a crucial role in the mortgage market’s path to collapse. Investment banks bundled mortgage loans into securities and then often rebundled those securities one or two more times. Those securities were given high ratings and sold to investors, who have since lost billions of dollars on them. … At Goldman, there was even a phrase for the way bankers put together mortgage securities. The practice was known as “ratings arbitrage,” according to former workers. The idea was to find ways to put the very worst bonds into a deal for a given rating. The cheaper the bonds, the greater the profit to the bank. The rating agencies may have facilitated the banks’ actions by publishing their rating models on their corporate Web sites. The agencies argued that being open about their models offered transparency to investors. But several former agency workers said the practice put too much power in the bankers’ hands. “The models were posted for bankers who develop C.D.O.’s to be able to reverse engineer C.D.O.’s to a certain rating,” one former rating agency employee said in an interview, referring to collateralized debt obligations.
I just finished reading Michael Lewis’ The Big Short, and it’s pretty clear that the banks knew enough about the rating agencies’ models to pretty successfully turn shit into shinola. In fact, the agencies made enough of their ratings models public to make it absolutely certain that the banks would game the system. Not *dupe* the rating agencies, mind you, because the ratings agencies were willing partners.
But I thought about it a little bit more, and I was struck by another thought.
The Democratic house leadership wanted to cost projection of the healthcare bill to come in within a certain number. So what did they do? They duped gamed the CBO rating system to ensure that the bill they wrote would have the price tag they wanted it to have. The CBO is a respected and non-partisan office, but they’re asked only to score what legislators give them, NOT what they think the legislators will do in other bills immediately or a few years down the line.
Essentially both the Wall Street banks and Congressional leadership did the same thing: they were teaching to the test. They knew specifically what was needed in order to generate a favorable outcome from the “test”, and they made sure they did exactly what they wanted, but in such a way that got the right score.
So who’s going to prosecute the Democratic leadership when this healthcare bill inevitably costs the American people more than they advertised? |
P2X7 receptors: role in bone cell formation and function.
The role of the P2X7 receptor (P2X7R) is being explored with intensive interest in the context of normal bone physiology, bone-related diseases and, to an extent, bone cancer. In this review, we cover the current understanding of P2X7R regulation of bone cell formation, function and survival. We will discuss how the P2X7R drives lineage commitment of undifferentiated bone cell progenitors, the vital role of P2X7R activation in bone mineralisation and its relatively unexplored role in osteocyte function. We also review how P2X7R activation is imperative for osteoclast formation and its role in bone resorption via orchestrating osteoclast apoptosis. Variations in the gene for the P2X7R (P2RX7) have implications for P2X7R-mediated processes and we review the relevance of these genetic variations in bone physiology. Finally, we highlight how targeting P2X7R may have therapeutic potential in bone disease and cancer. |
n = int(input())
l = list(map(int,input().split()))
total = sum(l)
majority=(total//2)+1
indices = [0]
parties = 0
alice = l[0]
majority = majority - alice
for i in range(1,n):
seats = l[i]
if (2*seats<=alice)&(majority>0):
majority = majority - seats
parties = parties + 1
indices.append(i)
if majority>0:
print(0)
else:
print(parties+1)
for i in indices:
print(i+1,end=' ') |
/**
* Created by piek on 9/8/14.
*/
public class SrlFrameNetTagger {
/**
* <externalRef confidence="0.165911" reference="nld-21-d_n-36759-n" resource="cdb2.0-nld-all.infv.0.0.no-allwords">
<externalRef resource="predicate-matrix1.1">
<externalRef reference="fn:Fluidic_motion" resource="fn"/>
<externalRef reference="fn-role:Area" resource="fn-role"/>
<externalRef reference="fn-role:Fluid" resource="fn-role"/>
<externalRef reference="fn:flow.v" resource="fn"/>
<externalRef reference="pb:flow.01" resource="pb"/>
<externalRef reference="fn-role:Goal" resource="fn-role"/>
<externalRef reference="fn-pb-role:Fluid#1" resource="fn-pb-role"/>
<externalRef reference="FN_MAPPING;SYNONYMS" resource=""/>
</externalRef>
*/
static final String layer = "srl";
static final String name = "vua-framenet-srl-tagger";
static final String version = "1.0";
static public void main (String[] args) {
String fns = "";
String ilins = "";
String [] rnss = null;
String pathToKafFile = "";
Double confidenceThreshold = -1.0;
Integer frameThreshold = -1;
String format = "naf";
/*
fns = "fn:";
ilins = "mcr:ili";
String [] rnss = {"fn-role:", "pb-role:", "fn-pb-role:"};
pathToKafFile = "/Tools/nwr-dutch-pipeline/vua-ontotagger-v1.0/example/test.srl.lexicalunits.pm.naf";
// pathToKafFile = "/Tools/ontotagger-v1.0/naf-example/spinoza-voorbeeld-ukb.ont.xml";
// pathToKafFile = "/Tools/ontotagger-v1.0/naf-example/89007714_06.tok.alpino.ner.ukb.pm.ht.srl.naf";
confidenceThreshold = new Double(0.25);
frameThreshold = new Integer(70);
format = "naf";
*/
for (int i = 0; i < args.length; i++) {
String arg = args[i];
if ((arg.equalsIgnoreCase("--kaf-file")) && (args.length>(i+1))) {
pathToKafFile = args[i+1];
}
else if ((arg.equalsIgnoreCase("--naf-file")) && (args.length>(i+1))) {
pathToKafFile = args[i+1];
}
else if ((arg.equalsIgnoreCase("--frame-ns")) && (args.length>(i+1))) {
fns = args[i+1];
}
else if ((arg.equalsIgnoreCase("--ili-ns")) && (args.length>(i+1))) {
ilins = args[i+1];
}
else if ((arg.equalsIgnoreCase("--role-ns")) && (args.length>(i+1))) {
rnss = args[i+1].split(";");
}
else if ((arg.equalsIgnoreCase("--format")) && (args.length>(i+1))) {
format = args[i+1];
}
else if ((arg.equalsIgnoreCase("--sense-conf")) && (args.length>(i+1))) {
try {
confidenceThreshold = Double.parseDouble(args[i + 1]);
} catch (NumberFormatException e) {
e.printStackTrace();
}
}
else if ((arg.equalsIgnoreCase("--frame-conf")) && (args.length>(i+1))) {
try {
frameThreshold = Integer.parseInt(args[i + 1]);
} catch (NumberFormatException e) {
e.printStackTrace();
}
}
}
String strBeginDate = eu.kyotoproject.util.DateUtil.createTimestamp();
String strEndDate = null;
KafSaxParser kafSaxParser = new KafSaxParser();
if (pathToKafFile.isEmpty()) {
//kafSaxParser.encoding = "UTF-8";
kafSaxParser.parseFile(System.in);
}
else {
if (pathToKafFile.toLowerCase().endsWith(".gz")) {
try {
InputStream fileStream = new FileInputStream(pathToKafFile);
InputStream gzipStream = new GZIPInputStream(fileStream);
kafSaxParser.parseFile(gzipStream);
} catch (IOException e) {
e.printStackTrace();
}
}
else if (pathToKafFile.toLowerCase().endsWith(".bz2")) {
try {
InputStream fileStream = new FileInputStream(pathToKafFile);
InputStream gzipStream = new CBZip2InputStream(fileStream);
kafSaxParser.parseFile(gzipStream);
} catch (IOException e) {
e.printStackTrace();
}
}
else {
kafSaxParser.parseFile(pathToKafFile);
}
}
processSrlLayer(kafSaxParser, fns, rnss,ilins, confidenceThreshold.doubleValue(), frameThreshold.intValue());
strEndDate = eu.kyotoproject.util.DateUtil.createTimestamp();
String host = "";
try {
host = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
LP lp = new LP(name,version, strBeginDate, strBeginDate, strEndDate, host);
kafSaxParser.getKafMetaData().addLayer(layer, lp);
if (format.equalsIgnoreCase("naf")) {
kafSaxParser.writeNafToStream(System.out);
/* try {
OutputStream fos = new FileOutputStream("/Tools/ontotagger-v1.0/naf-example/89007714_06.ont.srl.naf");
kafSaxParser.writeNafToStream(fos);
fos.close();
} catch (IOException e) {
e.printStackTrace();
}*/
}
else if (format.equalsIgnoreCase("kaf")) {
kafSaxParser.writeKafToStream(System.out);
}
}
static String getResourceFromSenseCode (KafSense kafSense) {
String resource = kafSense.getResource();
int idx = kafSense.getSensecode().indexOf(":");
// System.out.println("kafSense = " + kafSense.getSensecode());
if (idx>-1) {
String ns = kafSense.getSensecode().substring(0, idx);
// System.out.println("ns = " + ns);
if (ns.toLowerCase().equals("fn")) {
resource = "FrameNet";
}
else if (ns.toLowerCase().equals("pb")) {
resource = "ProbBank";
}
else if (ns.toLowerCase().equals("fn-role")) {
resource = "FrameNet";
}
else if (ns.toLowerCase().equals("fn-pb-role")) {
resource = "FrameNet";
}
else if (ns.toLowerCase().equals("pb-role")) {
resource = "PropBank";
}
else if (ns.toLowerCase().equals("vn-role")) {
resource = "VerbNet";
}
else if (ns.toLowerCase().equals("nb")) {
resource = "NomBank";
}
else if (ns.toLowerCase().equals("vn")) {
resource = "VerbNet";
}
else if (ns.toLowerCase().equals("eso")) {
resource = "ESO";
}
else if (ns.toLowerCase().equals("ili")) {
resource = "WordNet";
}
else if (kafSense.getSensecode().toLowerCase().startsWith("mcr:ili")) {
resource = "WordNet";
}
}
return resource;
}
static String removeNameSpaceFromSenseCode (KafSense kafSense) {
String reference = removeNameSpaceFromSenseString(kafSense.getSensecode());
return reference;
}
static String removeNameSpaceFromSenseString (String kafSense) {
String reference = kafSense;
int idx = kafSense.indexOf(":");
if (idx>-1) {
reference = kafSense.substring(idx+1);
}
return reference;
}
static void fixExternalReference (KafSense kafSense) {
kafSense.setResource(getResourceFromSenseCode(kafSense));
kafSense.setSensecode(removeNameSpaceFromSenseCode(kafSense));
}
static public void processSrlLayer (KafSaxParser kafSaxParser,
String fns,
String [] rnss,
String ilins,
double confidenceThreshold,
int framethreshold) {
for (int i = 0; i < kafSaxParser.getKafEventArrayList().size(); i++) {
KafEvent event = kafSaxParser.getKafEventArrayList().get(i);
for (int j = 0; j < event.getSpanIds().size(); j++) {
String termId = event.getSpanIds().get(j);
/// we are assuming that predicates have a span size of one term!!!!
KafTerm kafTerm = kafSaxParser.getTerm(termId);
if (kafTerm!=null) {
HashMap<String, ArrayList<SenseFrameRoles>> frameMap = GetDominantMapping.getFrameMap(kafTerm, confidenceThreshold, fns, rnss, ilins);
if (frameMap.size()>0) {
//System.out.println("frameMap.size() = " + frameMap.size());
double topscore = GetDominantMapping.getTopScore(frameMap);
Set keySet = frameMap.keySet();
Iterator<String> keys = keySet.iterator();
while (keys.hasNext()) {
String key = keys.next();
// System.out.println("frame = " + key);
double score = 0;
ArrayList<SenseFrameRoles> data = frameMap.get(key);
if (data.size()> 0) {
/// we did get FN references and data so we use these for the output
for (int f = 0; f < data.size(); f++) {
SenseFrameRoles senseFrameRoles = data.get(f);
score += senseFrameRoles.getSense().getConfidence();
}
if ((100 * (score / topscore)) > framethreshold) {
KafSense frame = new KafSense();
frame.setSensecode(key);
fixExternalReference(frame);
frame.setConfidence(score / topscore);
frame.setConfidence(data.size());
frame.setConfidence(score);
event.addExternalReferences(frame);
for (int k = 0; k < data.size(); k++) {
SenseFrameRoles senseFrameRoles = data.get(k);
KafSense sense = new KafSense();
sense.setSensecode(senseFrameRoles.getSense().getSensecode());
sense.setConfidence(senseFrameRoles.getSense().getConfidence());
sense.setResource(senseFrameRoles.getSense().getResource());
fixExternalReference(sense);
event.addExternalReferences(sense);
if (!senseFrameRoles.getIli().isEmpty()) {
KafSense ili = new KafSense();
ili.setSensecode(senseFrameRoles.getIli());
fixExternalReference(ili);
event.addExternalReferences(ili);
}
for (int m = 0; m < senseFrameRoles.getEsoClasses().size(); m++) {
String s = senseFrameRoles.getEsoClasses().get(m);
KafSense kafSense = new KafSense();
kafSense.setSensecode(s);
fixExternalReference(kafSense);
event.addExternalReferences(kafSense);
}
}
for (int k = 0; k < event.getParticipants().size(); k++) {
KafParticipant kafParticipant = event.getParticipants().get(k);
String role = kafParticipant.getRole();
for (int l = 0; l < data.size(); l++) {
SenseFrameRoles senseFrameRoles = data.get(l);
for (int m = 0; m < senseFrameRoles.getRoles().size(); m++) {
String fnPbRole = senseFrameRoles.getRoles().get(m);
String fnRole = matchPropBankFrameNetRole(fnPbRole, role);
if (!fnRole.isEmpty()) {
fnRole = removeNameSpaceFromSenseString(fnRole);
KafSense kafSense = new KafSense();
kafSense.setSensecode(key+"@"+fnRole);
fixExternalReference(kafSense);
kafParticipant.addExternalReferences(kafSense);
}
}
/// need to find a way to combine them with ESO classes
for (int m = 0; m < senseFrameRoles.getEsoRoles().size(); m++) {
String s = senseFrameRoles.getEsoRoles().get(m);
KafSense kafSense = new KafSense();
kafSense.setSensecode(s);
fixExternalReference(kafSense);
kafParticipant.addExternalReferences(kafSense);
}
}
role = normalizePropBankRole(role);
kafParticipant.setRole(role);
}
//// now check to participants of the events to add the roles
//System.out.println("score = " + score);
//System.out.println(data.toString());
}
}
}
}
else {
///// there is no framenetmapping, so we get the top senses of the predicate
for (int k = 0; k < kafTerm.getSenseTags().size(); k++) {
KafSense kafSense = kafTerm.getSenseTags().get(j);
KafSense refSense = new KafSense();
refSense.setResource(kafSense.getResource());
refSense.setConfidence(kafSense.getConfidence());
refSense.setSensecode(kafSense.getSensecode());
event.addExternalReferences(refSense);
if (kafSense.getChildren().size()>0) {
//// we take the first sense which has the highest score
KafSense child = kafSense.getChildren().get(0);
// event.addExternalReferences(child);
for (int c = 0; c < child.getChildren().size(); c++) {
KafSense grandChild = child.getChildren().get(j);
//<externalRef reference="mcr:ili-30-02604760-v" resource="mcr"/>
if (grandChild.getSensecode().startsWith(ilins)) {
fixExternalReference(grandChild);
event.addExternalReferences(grandChild);
}
}
}
}
}
}
}
}
}
static String matchPropBankFrameNetRole (String fnPbRole, String pbRole) {
///fn-pb-role:Entity#2
///fn-pb-role:Content#1
if (pbRole.toLowerCase().startsWith("arg") |
(pbRole.startsWith("A"))) {
int idx = fnPbRole.lastIndexOf("#");
if (idx > -1) {
String suffix = fnPbRole.substring(idx+1);
if (pbRole.endsWith(suffix)) {
return fnPbRole.substring(0, idx);
}
}
else {
}
}
return "";
}
static String normalizePropBankRole (String pbRole) {
String newRole = pbRole;
if (pbRole.toLowerCase().startsWith("arg")) {
newRole = "A"+pbRole.substring(3);
}
return newRole;
}
} |
/**
* Returns the Cholesky decomposition of A'A.
*/
public Cholesky CholeskyOfAtA() {
DenseMatrix VD = Matrix.zeros(V.nrows(), V.ncols());
for (int i = 0; i < V.nrows(); i++) {
for (int j = 0; j < V.ncols(); j++) {
VD.set(i, j, V.get(i, j) * s[j]);
}
}
return new Cholesky(VD.aat());
} |
// AnalyzersRun returns a list of IDs associated with the analyzers that were
// run.
func (r *Report) AnalyzersRun() string {
ids := []string{}
for _, analyzer := range r.Analyzers {
ids = append(ids, analyzer.ID())
}
return strings.Join(ids, ", ")
} |
Image caption Can you be obese and healthy?
The idea of "healthy obesity" is a myth, research suggests.
Excess fat still carries health risks even when cholesterol, blood pressure and sugar levels are normal, according to a study of more than 60,000 people.
It has been argued that being overweight does not necessarily imply health risks if individuals remain healthy in other ways.
The research, published in Annals of Internal Medicine, contradicts this idea.
The study looked at findings from published studies tracking heart health and weight in more than 60,000 adults.
This really casts doubt on the existence of healthy obesity Dr Ravi Retnakaran, Mount Sinai Hospital, Toronto
Researchers from the Mount Sinai Hospital, Toronto, found there was no healthy pattern of increased weight when heart health was monitored for more than 10 years.
They argue that people who are metabolically healthy but overweight probably have underlying risk factors that worsen over time.
Study leader Dr Ravi Retnakaran told BBC News: "This really casts doubt on the existence of healthy obesity.
"This data is suggesting that both patients who are obese who are metabolically unhealthy and patients who are obese who are metabolically healthy are both at increased risk of death from cardiovascular disease, such that benign obesity may indeed be a myth."
Heart risk
The British Heart Foundation says obesity is a known risk factor for heart disease and the research shows there is no healthy level of obesity.
Senior cardiac nurse, Doireann Maddock, said: "So, even if your blood pressure, cholesterol and blood sugar levels are normal, being obese can still put your heart at risk."
She said it was useful to think of lifestyle overall rather than individual risk factors.
"As well as watching your weight, if you stop smoking, get regular physical activity and keep your blood pressure and cholesterol levels at a healthy level, you can make a real difference in reducing your risk of heart disease.
"If you are concerned about your weight and want to know more about the changes you should make, visit your GP to talk it through." |
package com.github.fedorchuck.developers_notification.http;
import com.github.fedorchuck.developers_notification.Utils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.HashMap;
/**
* @author <a href="http://vl-fedorchuck.rhcloud.com/"><NAME></a>.
*/
public class HttpClientTest {
@Before
public void setUp() {
String stringHttpConfig = "{\"connect_timeout\":5000,\"user_agent\":\"Mozilla/5.0\"}";
Utils.setConfig(stringHttpConfig);
}
@Test
public void testGet() {
HttpClient client = new HttpClient();
HttpResponse response;
try {
HashMap<String, String> args = new HashMap<String, String>(1);
args.put("q", "qwerty");
response = client.get("https://www.google.com/search", args);
Assert.assertEquals(200, response.getStatusCode());
Assert.assertEquals("OK", response.getResponseMessage());
} catch (IOException e) {
Assert.fail(e.getMessage());
}
}
@Test
public void testPost() {
HttpClient client = new HttpClient();
HttpResponse response;
try {
response = client.post("https://chatapi.viber.com/pa/send_message", "{ \"tracking_data\": \"tracking data\", \"type\": \"picture\", \"text\": \"Photo description\" }");
Assert.assertEquals(200, response.getStatusCode());
Assert.assertEquals("OK", response.getResponseMessage());
} catch (IOException e) {
Assert.fail(e.getMessage());
}
}
@After
public void tearDown() {
Utils.resetConfig();
}
} |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.internal.resolve.result;
import org.gradle.api.artifacts.ModuleVersionIdentifier;
import org.gradle.api.artifacts.component.ComponentIdentifier;
import org.gradle.internal.component.model.ComponentGraphResolveState;
import org.gradle.internal.component.model.ComponentGraphSpecificResolveState;
import org.gradle.internal.resolve.ModuleVersionResolveException;
import javax.annotation.Nullable;
/**
* The result of resolving a module version selector to a particular component.
*
* <p>Very similar to {@link org.gradle.internal.resolve.result.ComponentIdResolveResult}, could probably merge these.
*/
public interface ComponentResolveResult extends ResolveResult {
/**
* Returns the identifier of the component.
*/
ComponentIdentifier getId();
/**
* Returns the module version id of the component.
*
* @throws org.gradle.internal.resolve.ModuleVersionResolveException If resolution was unsuccessful and the id is unknown.
*/
ModuleVersionIdentifier getModuleVersionId() throws ModuleVersionResolveException;
/**
* Returns the graph resolution state for the component.
*
* @throws ModuleVersionResolveException If resolution was unsuccessful and the descriptor is not available.
*/
ComponentGraphResolveState getState() throws ModuleVersionResolveException;
/**
* Returns the graph specific resolution state for the component.
*
* @throws ModuleVersionResolveException If resolution was unsuccessful and the descriptor is not available.
*/
ComponentGraphSpecificResolveState getGraphState() throws ModuleVersionResolveException;
/**
* Returns the resolve failure, if any.
*/
@Override
@Nullable
ModuleVersionResolveException getFailure();
}
|
As of Friday morning, NFLPA records showed the Atlanta Falcons with $23,855,200 in cap space. The contracts for newly signed players Brooks Reed, Justin Durant, Leonard Hankerson and Mike Person are in the system while the contracts for Adrian Clayborn, O'Brien Schofield and Antone Smith are not in the system.
Durant's contract includes provisions obviously based on his injury history. He can earn much more money by remaining healthy.
According to ESPN Stats & Information, here is how the contracts for Durant, Person and Hankerson break down (Reed's details not available just yet):
JUSTIN DURANT, LB (three-year contract)
Base salaries: $1,400,000 (2015); $1,750,000 (2016); $2,000,000 (2017)
Signing bonus: $1,250,000
Cap numbers: $3,666,666 (2015); $3,416,666 (2016); $3,716,668 (2017)
Roster bonuses: $1,850,000 (2015, roster bonus of $805,000 if he plays 65 percent and is on roster last game of season in 2015, additional roster bonus of $65,625 per game active, max value $1.05M); $1,250,000 (2016, roster bonus of $78,125 per game active, max value $1.25M, escalator of up to $1.35M available based on playtime and playoffs); $1,300,000 (2017, roster bonus of $81,250 per game active, max value $1.3M, escalator of up to $1.65M available based on playtime and playoffs).
MIKE PERSON, G (three-year contract)
Base salaries: $745,000 (2015); $1,000,000 (2016); $1,105,000 (2017)
Signing bonus: $500,000
Cap numbers: $911,666 (2015); $1,116,666 (2016); $1,271,668 (2017)
Roster bonus: none
LEONARD HANKERSON, WR (one-year contract)
Base salary: $745,000
Signing bonus: $255,000
Cap number: $1,000,000
Roster bonus: none |
package ca.jrvs.apps.trading.dao.config;
public class MarketDataConfig {
private String host;
private String token;
public String getHost() {
return host;
}
public void setHost(String host) {
if (host == null) {
throw new IllegalArgumentException("Input host is null");
}
this.host = host;
}
public String getToken() {
return token;
}
public void setToken(String token) {
if (token == null) {
throw new IllegalArgumentException("Input token is null");
}
this.token = token;
}
}
|
/// Creates a rotation matrix for a camera looking at a target
/// \param position The camera position
/// \param target The position the camera is looking at
/// \param up The up vector
/// \return The matrix
static inline Matrix4x4 Matrix4x4FromLookAt(Vector3 position, Vector3 target, Vector3 up)
{
Vector3 f = Vector3Normalize(Vector3Subtract(target, position));
Vector3 r = Vector3Normalize(Vector3Cross(up, f));
Vector3 u = Vector3Normalize(Vector3Cross(f, r));
return (Matrix4x4)
{
r.X, u.X, f.X, 0.0,
r.Y, u.Y, f.Y, 0.0,
r.Z, u.Z, f.Z, 0.0,
-Vector3Dot(r, position),
-Vector3Dot(u, position),
-Vector3Dot(f, position),
1.0
};
} |
/** Format a set of rules as an HTML text block to show in an instructions panel */
public static String createRulesPage(String title, Collection<Rule> rules) {
StringBuilder sb = new StringBuilder("<html><br><br>");
sb.append("<font face=\"");
sb.append(WidgetConstants.FONT_INSTRUCTION_PANEL.getName() + "\" size=\"");
sb.append(WidgetConstants.FONT_SIZE_HTML + "\">");
if(title != null) {
sb.append("<center>");
sb.append(title);
sb.append("</center><br><br>");
}
sb.append("<left>");
Integer i = 1;
if(rules != null && !rules.isEmpty()) {
for(Rule rule : rules) {
sb.append("     ");
sb.append(i + ". ");
sb.append(rule.getRuleText());
sb.append("<br>");
i++;
}
}
sb.append("</left>");
sb.append("</font></html>");
return sb.toString();
} |
// BackRepoClassdiagram.CheckoutPhaseTwoInstance Checkouts staged instances of Classdiagram to the BackRepo
// Phase Two is the update of instance with the field in the database
func (backRepoClassdiagram *BackRepoClassdiagramStruct) CheckoutPhaseTwoInstance(backRepo *BackRepoStruct, classdiagramDB *ClassdiagramDB) (Error error) {
classdiagram := (*backRepoClassdiagram.Map_ClassdiagramDBID_ClassdiagramPtr)[classdiagramDB.ID]
_ = classdiagram
classdiagram.Classshapes = classdiagram.Classshapes[:0]
for _, classshapeDB_AssocEnd := range *backRepo.BackRepoClassshape.Map_ClassshapeDBID_ClassshapeDB {
if classshapeDB_AssocEnd.Classdiagram_ClassshapesDBID.Int64 == int64(classdiagramDB.ID) {
classshape_AssocEnd := (*backRepo.BackRepoClassshape.Map_ClassshapeDBID_ClassshapePtr)[classshapeDB_AssocEnd.ID]
classdiagram.Classshapes = append(classdiagram.Classshapes, classshape_AssocEnd)
}
}
sort.Slice(classdiagram.Classshapes, func(i, j int) bool {
classshapeDB_i_ID := (*backRepo.BackRepoClassshape.Map_ClassshapePtr_ClassshapeDBID)[classdiagram.Classshapes[i]]
classshapeDB_j_ID := (*backRepo.BackRepoClassshape.Map_ClassshapePtr_ClassshapeDBID)[classdiagram.Classshapes[j]]
classshapeDB_i := (*backRepo.BackRepoClassshape.Map_ClassshapeDBID_ClassshapeDB)[classshapeDB_i_ID]
classshapeDB_j := (*backRepo.BackRepoClassshape.Map_ClassshapeDBID_ClassshapeDB)[classshapeDB_j_ID]
return classshapeDB_i.Classdiagram_ClassshapesDBID_Index.Int64 < classshapeDB_j.Classdiagram_ClassshapesDBID_Index.Int64
})
return
} |
Image copyright AFP Image caption Some villagers in Guinea have been scared by the appearance of health workers trying to combat Ebola
Eight members of a team trying to raise awareness about Ebola have been killed by villagers using machetes and clubs in Guinea, officials say.
Some of the bodies - of health workers, local officials and journalists - were found in a septic tank in a village school near the city of Nzerekore.
Correspondents say many villagers are suspicious of official attempts to combat the disease.
More than 2,600 people have now died from the Ebola outbreak in West Africa.
It is the world's worst outbreak of Ebola, with officials warning that more than 20,000 people could ultimately be infected.
Neighbouring Sierra Leone has begun a controversial three-day curfew to try to stop the spread of the disease.
Media playback is unsupported on your device Media caption The BBC looks at the scale of the challenge the Ebola outbreak presents
The team disappeared after being pelted with stones by residents when they arrived in the village of Wome - in southern Guinea, where the Ebola outbreak was first recorded.
A journalist who managed to escape told reporters that she could hear villagers looking for them while she was hiding.
A government delegation, led by the health minister, had been dispatched to the region but they were unable to reach the village by road because a main bridge had been blocked.
'Killed in cold blood'
On Thursday night, government spokesman Albert Damantang Camara said the victims had been "killed in cold blood by the villagers".
The bodies showed signs of being attacked with machetes and clubs, officials say.
Six people have been arrested and the village is now reportedly deserted.
The motive for the killings has not been confirmed, but the BBC's Makeme Bamba in Guinea's capital, Conakry, says many villagers accuse the health workers of spreading the disease.
Others still do not believe that the disease exists.
Last month, riots erupted in Nzerekore, 50 km (30 miles) from Wome, after rumours that medics who were disinfecting a market were contaminating people.
Speaking on Thursday, French President Francois Hollande said France was setting up a military hospital in Guinea as part of his country's efforts to support the West African nations affected by the outbreak.
He said the hospital was a sign that France's contribution was not just financial, adding that it would be in "the forests of Guinea, in the heart of the outbreak".
Media playback is unsupported on your device Media caption The BBC's Umaru Fofana reports on a chaotic lockdown in Sierra Leone
The World Health Organization said on Thursday that more than 700 new cases of Ebola have emerged in West Africa in just a week, showing that the outbreak was accelerating.
It said there had been more than 5,300 cases in total and that half of those were recorded in the past three weeks.
The epidemic has struck Liberia, Sierra Leone, Guinea, Nigeria and Senegal.
A three-day lockdown is underway in Sierra Leone in a bid to stop the disease spreading.
Ebola virus disease (EVD)
Symptoms include high fever, bleeding and central nervous system damage
Spread by body fluids, such as blood and saliva
Current outbreak has mortality rate of about 55%
Incubation period is two to 21 days
There is no proven vaccine or cure
Press divided on response to Ebola
Ebola: Mapping the outbreak
How bad can it get?
'Biological war': A week on the Ebola frontline |
<filename>src/core/notification.c
#include "notification.h"
char *MTR_MessageTypeToTitle(uint8_t messageType)
{
switch (messageType)
{
case MTR_DMT_INFO:
return MTR_NTT_INFO;
break;
case MTR_DMT_NOTE:
return MTR_NTT_NOTE;
break;
case MTR_DMT_WARNING:
return MTR_NTT_WARNING;
break;
case MTR_DMT_ERROR:
return MTR_NTT_ERROR;
break;
case MTR_DMT_FATAL:
return MTR_NTT_FATAL;
break;
case MTR_DMT_DEBUG:
return MTR_NTT_DEBUG;
break;
default:
return " ";
break;
}
return " ";
}
/*fa MTR_Notify yes */
void MTR_CALL MTR_Notify(const char *message, uint8_t level,
uint8_t messageType)
{
MTR_LogWrite(message, level, messageType);
MTR_ShowSimpleMessageBox(messageType, MTR_MessageTypeToTitle(messageType),
message);
}
|
/** Dummy distribution trait def for test (handles conversion of
* SimpleDistribution). */
private static class ConvertRelDistributionTraitDef
extends RelTraitDef<SimpleDistribution> {
@Override public Class<SimpleDistribution> getTraitClass() {
return SimpleDistribution.class;
}
@Override public String toString() {
return getSimpleName();
}
@Override public String getSimpleName() {
return "ConvertRelDistributionTraitDef";
}
@Override public @Nullable RelNode convert(RelOptPlanner planner, RelNode rel,
SimpleDistribution toTrait, boolean allowInfiniteCostConverters) {
if (toTrait == SIMPLE_DISTRIBUTION_ANY) {
return rel;
}
return new BridgeRel(rel.getCluster(), rel);
}
@Override public boolean canConvert(RelOptPlanner planner,
SimpleDistribution fromTrait, SimpleDistribution toTrait) {
return (fromTrait == toTrait)
|| (toTrait == SIMPLE_DISTRIBUTION_ANY)
|| (fromTrait == SIMPLE_DISTRIBUTION_SINGLETON
&& toTrait == SIMPLE_DISTRIBUTION_RANDOM);
}
@Override public SimpleDistribution getDefault() {
return SIMPLE_DISTRIBUTION_ANY;
}
} |
package rxfamily.entity;
/**
* Created by Administrator on 2018/1/18 0018.
*/
public class AddDriverLicenseEntity extends BaseEntity{
/**
* data : {"createDate":"2010-01-01","id":"<KEY>"}
*/
private DataBean data;
public DataBean getData() {
return data;
}
public void setData(DataBean data) {
this.data = data;
}
public static class DataBean {
/**
* createDate : 2010-01-01
* id : <KEY>
*/
private String createDate;
private String id;
public String getCreateDate() {
return createDate;
}
public void setCreateDate(String createDate) {
this.createDate = createDate;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
}
}
|
// run in its own goroutine
func (in *TaskInstance) watchPipe(r io.Reader, name string) {
br := bufio.NewReader(r)
for {
sl, isPrefix, err := br.ReadLine()
if err == io.EOF {
return
}
if err != nil {
in.Printf("pipe %q closed: %v", name, err)
return
}
in.output.Add(&Line{
T: time.Now(),
Name: name,
Data: string(sl),
isPrefix: isPrefix,
instance: in,
})
}
panic("unreachable")
} |
#ifndef __PDDL__DETAIL__NORMALIZATION__DOMAIN_H
#define __PDDL__DETAIL__NORMALIZATION__DOMAIN_H
#include <pddl/ASTForward.h>
#include <pddl/Context.h>
#include <pddl/NormalizedASTForward.h>
namespace pddl
{
namespace detail
{
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Domain
//
////////////////////////////////////////////////////////////////////////////////////////////////////
normalizedAST::DomainPointer normalize(ast::DomainPointer &&domain);
////////////////////////////////////////////////////////////////////////////////////////////////////
}
}
#endif
|
On what would be the self-taught artist’s 125th birthday, Intuit: The Center for Intuitive and Outsider Art is holding a daylong celebration formally recognized by the mayor of Chicago.
One hundred and twenty-five years ago today, Henry Darger was born at home to a poor couple in Chicago. His mother died four years later; not long afterwards, Darger was separated from his sick father and placed in a Catholic boys’ home, followed by an asylum. When he was 16, Darger managed to escape and begin the rest of his life: a mostly solitary existence that consisted of janitorial and other menial labor at Chicago hospitals, attending mass, and creating what’s now considered some of the greatest art of the 20th century.
Intuit: The Center for Intuitive and Outsider Art is celebrating Darger’s birthday today, and so is his hometown: Mayor Rahm Emanuel has declared April 12, 2017, official Henry Darger Day in Chicago.
In Darger’s honor, Intuit is open to the public for free until 8:30pm. Programs are planned throughout the day, including a screening of the documentary about the artist, a conversation with his neighbor, birthday cake, and the opening of a new exhibition, Betwixt and Between: Henry Darger’s Vivian Girls. These characters are Darger’s most famous creations: mystical, penis-bearing girls whose crusades against child slavery he captured in drawings, collages, and a roughly 15,000-page novel. Darger is less widely known as a writer (his books have not yet been reproduced or published), and another show currently on view at Intuit explores the relationship between his images and text. A third exhibition examines his source materials, from coloring books to Catholic imagery — and, as if that weren’t enough, the center also houses a permanent installation of the room where Darger did most of his work for four decades. The hushed, cluttered space seems to overflow with inspiration.
As Mayor Emanuel’s official proclamation recounts, Darger died the day after his 81st birthday. His landlords had only just discovered his trove of art, and its greatness would not be understood until he was gone. Today, he “stands as one of the most venerated self-taught artists of all time and reminds every artist and arts patron of the extreme significance of cultivating and curating spaces for the study and presentation of intuitive and outsider art,” according to the decree, which goes on to “urge every Chicagoan to visit Intuit,” today and in the future. I’d encourage everyone passing through Chicago to do so as well — and those in New York to visit the American Folk Art Museum, which has the largest public collection of his work. Happy Henry Darger Day! |
// Respond to request with a message.
func (req *request) Respond(msg interface{}) error {
req.mu.Lock()
defer req.mu.Unlock()
if req.finished {
return ErrAlreadyResponded
}
req.finished = true
fail, ok := msg.(error)
if ok {
select {
case req.failure <- fail:
return nil
default:
panic("grid: respond called multiple times")
}
}
typeName, data, err := codec.Marshal(msg)
if err != nil {
return err
}
res := &Delivery{
Ver: Delivery_V1,
Data: data,
TypeName: typeName,
}
select {
case req.response <- res:
return nil
default:
panic("grid: respond called multiple times")
}
} |
<gh_stars>10-100
//! git hook manager tool
use anyhow::{
bail,
Result,
};
use log::*;
use shared::find_root;
#[cfg(not(windows))]
use std::os::unix::fs::{
symlink,
PermissionsExt,
};
#[cfg(windows)]
use std::os::windows::fs::symlink_file;
use std::{
env,
fs,
io::Write,
path::Path,
};
const HOOKS: [&str; 18] = [
"applypatch-msg",
"post-applypatch",
"pre-commit",
"prepare-commit-msg",
"commit-msg",
"post-commit",
"pre-rebase",
"post-checkout",
"post-merge",
"pre-push",
"pre-receive",
"update",
"post-receive",
"post-update",
"push-to-checkout",
"pre-auto-gc",
"post-rewrite",
"sendemail-validate",
];
#[derive(structopt::StructOpt)]
enum Args {
/// Initialize the repo to use hooked
Init(Language),
/// Link pre existing hooks to your .git folder
Link,
}
/// Which language the repo should be initialized with for hooks
#[derive(Clone, Copy, structopt::StructOpt)]
enum Language {
/// Use Bash for your git hooks
Bash,
/// Use Python 3 for your git hooks
Python,
/// Use Ruby for your git hooks
Ruby,
}
#[paw::main]
fn main(args: Args) {
env::var("RUST_LOG")
.ok()
.map_or_else(|| env::set_var("RUST_LOG", "info"), drop);
pretty_env_logger::init();
if let Err(e) = match args {
Args::Init(lang) => init(lang),
Args::Link => link(),
} {
error!("{}", e);
std::process::exit(1);
}
}
fn init(lang: Language) -> Result<()> {
let root = find_root()?;
let git_hooks = &root.join(".git").join("hooks");
debug!("git_hooks base path: {}", git_hooks.display());
let root = root.join(".dev-suite").join("hooked");
debug!("root base path: {}", root.display());
let wrapper_dir = &root.join("wrapper");
fs::create_dir_all(&wrapper_dir)?;
for hook in &HOOKS {
let mut path = (&root).join(hook);
debug!("dev-suite hook path: {}", path.display());
let git_hook = &git_hooks.join(hook);
debug!("git_hook path: {}", git_hook.display());
let mut wrapper_hook = (&wrapper_dir).join(hook);
let _ = wrapper_hook.set_extension("sh");
let _ = match lang {
Language::Bash => path.set_extension("sh"),
Language::Python => path.set_extension("py"),
Language::Ruby => path.set_extension("rb"),
};
if path.exists() {
debug!("git hook {} already exists. Skipping creation.", hook);
} else {
debug!("Creating dev-suite hook.");
let mut file = fs::File::create(&path)?;
let mut wrapper = fs::File::create(&wrapper_hook)?;
trace!("File created.");
#[cfg(not(windows))]
{
let mut perms = file.metadata()?.permissions();
let mut wrapper_perms = wrapper.metadata()?.permissions();
debug!("Setting dev-suite hook to be executable.");
perms.set_mode(0o755);
wrapper_perms.set_mode(0o755);
file.set_permissions(perms)?;
wrapper.set_permissions(wrapper_perms)?;
trace!("Permissions were set.");
}
match lang {
Language::Bash => {
file.write_all(b"#!/usr/bin/env bash")?;
wrapper.write_all(
format!(
"#!C:\\Program Files\\Git\\bin\\sh.exe\n\
bash.exe .dev-suite/hooked/{}.sh\n",
hook
)
.as_bytes(),
)?;
}
Language::Python => {
file.write_all(b"#!/usr/bin/env python3")?;
wrapper.write_all(
format!(
"#!C:\\Program Files\\Git\\bin\\sh.exe\n\
py.exe .dev-suite/hooked/{}.py\n",
hook
)
.as_bytes(),
)?;
}
Language::Ruby => {
file.write_all(b"#!/usr/bin/env ruby")?;
wrapper.write_all(
format!(
"#!C:\\Program Files\\Git\\bin\\sh.exe\n\
ruby.exe .dev-suite/hooked/{}.rb\n",
hook
)
.as_bytes(),
)?;
}
}
debug!("Writing data to file.");
debug!("Created git hook {}.", hook);
}
#[cfg(not(windows))]
let link_path = path.canonicalize()?;
#[cfg(windows)]
let link_path = wrapper_hook.canonicalize()?;
inner_link(&link_path, &git_hook, hook)?;
}
info!(
"Created and symlinked tickets to .git/hooks from {}.",
root.display()
);
#[cfg(windows)]
{
warn!("Make sure to add the hooks into git with 'git add --chmod=+x .dev-suite\\hooked'");
warn!("If you don't they won't be set as executable on unix systems");
}
Ok(())
}
fn link() -> Result<()> {
let root = find_root()?;
let git_hooks = &root.join(".git").join("hooks");
debug!("git_hooks base path: {}", git_hooks.display());
let root = root.join(".dev-suite").join("hooked");
debug!("root base path: {}", root.display());
for hook in &HOOKS {
let path = {
#[cfg(windows)]
let mut path = root.join("wrapper").join(hook);
#[cfg(not(windows))]
let mut path = root.join(hook);
debug!("PATH: {}", path.display());
let mut path_python = path.clone();
let _ = path_python.set_extension("py");
let mut path_ruby = path.clone();
let _ = path_ruby.set_extension("rb");
let mut path_bash = path.clone();
let _ = path_bash.set_extension("sh");
if path_python.exists() {
path_python
} else if path_ruby.exists() {
path_ruby
} else if path_bash.exists() {
path_bash
} else {
let _ = path.set_extension("");
bail!(
"The path {} does not exist. Have you initialized the repo to use hooked?",
path.display()
);
}
};
let path = path.canonicalize()?;
debug!("dev-suite hook path: {}", path.display());
let git_hook = &git_hooks.join(hook);
debug!("git_hook path: {}", git_hook.display());
inner_link(&path, &git_hook, hook)?;
}
info!("Successfully symlinked all githooks to .git/hooks");
Ok(())
}
fn inner_link(path: &Path, git_hook: &Path, hook: &str) -> Result<()> {
if !git_hook.exists() {
debug!("Symlinking git hook {}.", hook);
#[cfg(not(windows))]
symlink(&path, &git_hook)?;
#[cfg(windows)]
symlink_file(&path, &git_hook)?;
debug!(
"Symlinked git hook {} to {}",
git_hook.display(),
path.display()
);
}
Ok(())
}
|
/**
* If this method returns without throwing an exception, you can be sure that an equivalent
* LynxModule instance is registered with the LynxUsbDevice. However, it is not guaranteed that
* the exact LynxModule instance passed in is the one registered to the LynxUsbDevice. ALWAYS use
* the one connected to the connectedModules map parameter, not one originally passed in.
*/
private void connectModule(LynxUsbDevice lynxUsbDevice, LynxModule module, Map<Integer,String> moduleNames, Map<Integer,LynxModule> connectedModules, boolean enableCharging) throws InterruptedException {
try {
module = lynxUsbDevice.addConfiguredModule(module);
if (enableCharging) {
Note that we only ever enable charging for the first module attached so as to avoid
voltage regulators pushing and pulling at each other (our caller guarantees that
enableCharging will only be true for that first module).
module.enablePhoneCharging(true);
}
connectedModules.put(module.getModuleAddress(), module);
} catch (RobotCoreException|LynxNackException|RuntimeException e) {
lynxUsbDevice.noteMissingModule(module, moduleNames.get(module.getModuleAddress()));
}
} |
// AllEmpty returns a bool whether All of the variadic inputs are "empty" strings.
// see rstrings::IsEmpty for empty-checking logic
// if no inputs are passed, it defaults to true
func AllEmpty(a ...string) bool {
acc := true
for _, v := range a {
acc = acc && rstrings.IsEmpty(v)
}
return acc
} |
/**
* Alternative to using ::converter, will try and lookup the class in the type converter registry.
* If it fails to find anything, this.converter will be null and
* CmdRuntimeException.CreationFailure will be thrown when build() is called.
*
* @param clazz class of converter
* @return this
*/
default R converterOfType(Class<T> clazz) {
TypeConverter<T> tc = TypeConverterRegistry.get(clazz);
Objects.requireNonNull(
tc, "Failed to find type converter in registry for '" + clazz.getName() + "'");
return converter(tc);
} |
// so that it is a valid XML id
private String fixTag(String tag) {
return tag.replaceAll(" ", "-")
.replaceAll("/", "_")
.replaceAll("&", ".");
} |
/**
* Factory method to create a mock object of a given type.
*
* @param <T> the type of the created mock object
* @param type the type of the mock object to create
* @return the created mock object
*/
protected final <T> T createMock( final Class<T> type )
{
final T mock = EasyMock.createMock( type );
mocks.add( mock );
return mock;
} |
def find_directives(self, name, exclude=True): |
import argparse
import json
import multiprocessing as mp
import operator
import time
import sys
import mergelife
import ml_evolve as ev
import numpy as np
bestGenome = None
evalCount = 0
startTime = 0
timeLastUpdate = 0
totalEvalCount = 0
runCount = 1
noImprovement = 0
waitingCount = 0
population = []
def subprocessScore(inputQueue, outputQueue):
while True:
genome = inputQueue.get()
rule_str = genome['rule']
width = config['config']['cols']
height = config['config']['rows']
ml_inst = mergelife.new_ml_instance(height, width, rule_str)
result = mergelife.objective_function(ml_inst, config['config']['evalCycles'], config['objective'])
outputQueue.put({'rule': rule_str, 'score': result['score'], 'run': genome['run']})
def report(config, inputQueue, genome):
global bestGenome, evalCount, totalEvalCount, timeLastUpdate, startTime, runCount, noImprovement, population
requestStop = False
evalCount += 1
totalEvalCount += 1
if bestGenome is None or genome['score'] > bestGenome['score']:
bestGenome = genome
else:
noImprovement += 1
if noImprovement > config['config']['patience']:
requestStop = True
now = time.time()
if requestStop or (now - timeLastUpdate > 60):
elapsed = now - startTime
perSec = totalEvalCount / elapsed
perMin = int(perSec * 60.0)
print("Run #{}, Eval #{}: {}, evals/min={}".format(runCount, evalCount, bestGenome, perMin))
timeLastUpdate = now
if requestStop:
print("No improvement for {}, stopping...".format(config['config']['patience']))
if bestGenome['score'] > config['config']['scoreThreshold']:
render(config, bestGenome['rule'])
noImprovement = 0
runCount += 1
evalCount = 0
population = []
bestGenome = None
randomPopulation(config, inputQueue)
def randomPopulation(config, queue):
global waitingCount
for i in range(config['config']['populationSize']):
queue.put({'score': None, 'rule': mergelife.random_update_rule(), 'run': runCount})
waitingCount += config['config']['populationSize']
def evolve(config):
global timeLastUpdate, waitingCount, startTime
cpus = mp.cpu_count()
print("Forking for {}".format(cpus))
processes = []
startTime = time.time()
timeLastUpdate = startTime
cycles = config['config']['evalCycles']
inputQueue = mp.Queue()
outputQueue = mp.Queue()
for i in range(cpus):
# parent_conn, child_conn = mp.Pipe()
# p = mp.Process(target=subprocessScore, args=(parent_conn,))
p = mp.Process(target=subprocessScore, args=(inputQueue, outputQueue,))
p.start()
processes.append({'process': p})
randomPopulation(config, inputQueue)
population = []
while True:
g = outputQueue.get()
waitingCount -= 1
if g['run'] == runCount:
if len(population) < config['config']['populationSize']:
population.append(g)
report(config, inputQueue, g)
else:
target_idx = ev.select_tournament(population, cycles, operator.lt)
population[target_idx] = g
report(config, inputQueue, g)
if waitingCount < cpus * 2:
if np.random.uniform() < config['config']['crossover']:
# Crossover
parent1_idx = ev.select_tournament(population, cycles, operator.gt)
parent2_idx = parent1_idx
while parent1_idx == parent2_idx:
parent2_idx = ev.select_tournament(population, cycles, operator.gt)
parent1 = population[parent1_idx]['rule']
parent2 = population[parent2_idx]['rule']
if parent1 != parent2:
child1, child2 = ev.crossover(parent1, parent2, cycles)
inputQueue.put({'rule': child1, 'score': None, 'run': runCount})
inputQueue.put({'rule': child2, 'score': None, 'run': runCount})
waitingCount += 2
else:
# Mutate
parent_idx = ev.select_tournament(population, cycles, operator.gt)
parent = population[parent_idx]['rule']
child = ev.mutate(parent)
inputQueue.put({'rule': child, 'score': None, 'run': runCount})
waitingCount += 1
for p in processes:
p['process'].join()
def render(config, ruleText):
width = config['config']['cols']
height = config['config']['rows']
steps = config['config']['renderSteps']
ml_inst = mergelife.new_ml_instance(height, width, ruleText)
for i in range(steps):
mergelife.update_step(ml_inst)
filename = ruleText + ".png"
mergelife.save_image(ml_inst, filename)
print("Saved {}".format(filename))
def score(config, ruleText):
width = config['config']['cols']
height = config['config']['rows']
ml_inst = mergelife.new_ml_instance(height, width, ruleText)
result = mergelife.objective_function(ml_inst, config['config']['evalCycles'], config['objective'], True)
print("Final result: {}".format(result['score']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Mergelife Utility')
parser.add_argument('--rows', nargs=1, type=int, help="the number of rows in the MergeLife grid")
parser.add_argument('--cols', nargs=1, type=int, help="the number of cols in the MergeLife grid")
parser.add_argument('--renderSteps', nargs=1, type=int, help="the number of steps to render")
parser.add_argument('--zoom', nargs=1, type=int, help="the pixel size for rendering")
parser.add_argument('--config', nargs=1, type=str, help="the path to a config file")
parser.add_argument('command', nargs=argparse.REMAINDER, metavar='command', type=str, choices=['evolve', 'score', 'render'],
help='an integer for the accumulator')
args = parser.parse_args()
if args.config is None:
config = {'config': {}}
else:
with open(args.config[0]) as f:
config = json.load(f)
# Override with command line params, if they are there
if args.rows is not None:
config['config']['rows'] = args.rows[0]
if args.cols is not None:
config['config']['cols'] = args.cols[0]
if args.renderSteps is not None:
config['config']['renderSteps'] = args.renderSteps[0]
if args.config is not None:
config['config']['config'] = args.config[0]
if args.zoom is not None:
config['config']['zoom'] = args.zoom[0]
# Default values
if 'cols' not in config['config']:
config['config']['cols'] = 100
if 'rows' not in config['config']:
config['config']['rows'] = 100
if 'evalCycles' not in config['config']:
config['config']['renderSteps'] = 250
if 'zoom' not in config['config']:
config['config']['cols'] = 5
if args.command[0] == 'render':
if len(args.command)<2:
print("Must specify what rule hex-code you wish to render.")
sys.exit(0)
else:
render(config, args.command[1])
elif args.command[0] == 'score':
score(config, args.command[1])
elif args.command[0] == 'evolve':
evolve(config)
|
<reponame>unofficial-mirror/ietfdb
# Copyright The IETF Trust 2011-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import hashlib
import io
import json
import math
import os
import re
import six
from collections import defaultdict
from six.moves.urllib.parse import quote
from django.conf import settings
from django.contrib import messages
from django.forms import ValidationError
from django.utils.html import escape
from django.urls import reverse as urlreverse
import debug # pyflakes:ignore
from ietf.doc.models import Document, DocHistory, State, DocumentAuthor, DocHistoryAuthor
from ietf.doc.models import DocAlias, RelatedDocument, RelatedDocHistory, BallotType, DocReminder
from ietf.doc.models import DocEvent, ConsensusDocEvent, BallotDocEvent, NewRevisionDocEvent, StateDocEvent
from ietf.doc.models import TelechatDocEvent
from ietf.name.models import DocReminderTypeName, DocRelationshipName
from ietf.group.models import Role
from ietf.ietfauth.utils import has_role
from ietf.utils import draft, text
from ietf.utils.mail import send_mail
from ietf.mailtrigger.utils import gather_address_lists
from ietf.utils import log
def save_document_in_history(doc):
"""Save a snapshot of document and related objects in the database."""
def get_model_fields_as_dict(obj):
return dict((field.name, getattr(obj, field.name))
for field in obj._meta.fields
if field is not obj._meta.pk)
# copy fields
fields = get_model_fields_as_dict(doc)
fields["doc"] = doc
fields["name"] = doc.canonical_name()
dochist = DocHistory(**fields)
dochist.save()
# copy many to many
for field in doc._meta.many_to_many:
if field.remote_field.through and field.remote_field.through._meta.auto_created:
hist_field = getattr(dochist, field.name)
hist_field.clear()
hist_field.set(getattr(doc, field.name).all())
# copy remaining tricky many to many
def transfer_fields(obj, HistModel):
mfields = get_model_fields_as_dict(item)
# map doc -> dochist
for k, v in mfields.items():
if v == doc:
mfields[k] = dochist
HistModel.objects.create(**mfields)
for item in RelatedDocument.objects.filter(source=doc):
transfer_fields(item, RelatedDocHistory)
for item in DocumentAuthor.objects.filter(document=doc):
transfer_fields(item, DocHistoryAuthor)
return dochist
def get_state_types(doc):
res = []
if not doc:
return res
res.append(doc.type_id)
if doc.type_id == "draft":
if doc.stream_id and doc.stream_id != "legacy":
res.append("draft-stream-%s" % doc.stream_id)
res.append("draft-iesg")
res.append("draft-iana-review")
res.append("draft-iana-action")
res.append("draft-rfceditor")
return res
def get_tags_for_stream_id(stream_id):
if stream_id == "ietf":
return ["w-expert", "w-extern", "w-merge", "need-aut", "w-refdoc", "w-refing", "rev-wg", "rev-wglc", "rev-ad", "rev-iesg", "sheph-u", "no-adopt", "other"]
elif stream_id == "iab":
return ["need-ed", "w-part", "w-review", "need-rev", "sh-f-up"]
elif stream_id == "irtf":
return ["need-ed", "need-sh", "w-dep", "need-rev", "iesg-com"]
elif stream_id == "ise":
return ["w-dep", "w-review", "need-rev", "iesg-com"]
else:
return []
def can_adopt_draft(user, doc):
if not user.is_authenticated:
return False
if has_role(user, "Secretariat"):
return True
#The IRTF chair can adopt a draft into any RG
if has_role(user, "IRTF Chair"):
return (doc.stream_id in (None, "irtf")
and doc.group.type_id == "individ")
roles = Role.objects.filter(name__in=("chair", "delegate", "secr"),
group__type__in=("wg", "rg", "ag", ),
group__state="active",
person__user=user)
role_groups = [ r.group for r in roles ]
return (doc.stream_id in (None, "ietf", "irtf")
and (doc.group.type_id == "individ" or (doc.group in role_groups and len(role_groups)>1))
and roles.exists())
def can_unadopt_draft(user, doc):
if not user.is_authenticated:
return False
if has_role(user, "Secretariat"):
return True
if doc.stream_id == 'irtf':
if has_role(user, "IRTF Chair"):
return True
return user.person.role_set.filter(name__in=('chair','delegate','secr'),group=doc.group).exists()
elif doc.stream_id == 'ietf':
return user.person.role_set.filter(name__in=('chair','delegate','secr'),group=doc.group).exists()
elif doc.stream_id == 'ise':
return user.person.role_set.filter(name='chair',group__acronym='ise').exists()
elif doc.stream_id == 'iab':
return False # Right now only the secretariat can add a document to the IAB stream, so we'll
# leave it where only the secretariat can take it out.
else:
return False
def two_thirds_rule( recused=0 ):
# For standards-track, need positions from 2/3 of the non-recused current IESG.
active = Role.objects.filter(name="ad",group__type="area",group__state="active").count()
return int(math.ceil((active - recused) * 2.0/3.0))
def needed_ballot_positions(doc, active_positions):
'''Returns text answering the question "what does this document
need to pass?". The return value is only useful if the document
is currently in IESG evaluation.'''
yes = [p for p in active_positions if p and p.pos_id == "yes"]
noobj = [p for p in active_positions if p and p.pos_id == "noobj"]
blocking = [p for p in active_positions if p and p.pos.blocking]
recuse = [p for p in active_positions if p and p.pos_id == "recuse"]
answer = []
if len(yes) < 1:
answer.append("Needs a YES.")
if blocking:
if len(blocking) == 1:
answer.append("Has a %s." % blocking[0].pos.name.upper())
else:
if blocking[0].pos.name.upper().endswith('S'):
answer.append("Has %d %ses." % (len(blocking), blocking[0].pos.name.upper()))
else:
answer.append("Has %d %ss." % (len(blocking), blocking[0].pos.name.upper()))
needed = 1
if doc.type_id == "draft" and doc.intended_std_level_id in ("bcp", "ps", "ds", "std"):
needed = two_thirds_rule(recused=len(recuse))
elif doc.type_id == "statchg":
if isinstance(doc,Document):
related_set = doc.relateddocument_set
elif isinstance(doc,DocHistory):
related_set = doc.relateddochistory_set
else:
related_set = RelatedDocHistory.objects.none()
for rel in related_set.filter(relationship__slug__in=['tops', 'tois', 'tohist', 'toinf', 'tobcp', 'toexp']):
if (rel.target.document.std_level_id in ['bcp','ps','ds','std']) or (rel.relationship_id in ['tops','tois','tobcp']):
needed = two_thirds_rule(recused=len(recuse))
break
else:
if len(yes) < 1:
return " ".join(answer)
have = len(yes) + len(noobj)
if have < needed:
more = needed - have
if more == 1:
answer.append("Needs one more YES or NO OBJECTION position to pass.")
else:
answer.append("Needs %d more YES or NO OBJECTION positions to pass." % more)
else:
if blocking:
answer.append("Has enough positions to pass once %s positions are resolved." % blocking[0].pos.name.upper())
else:
answer.append("Has enough positions to pass.")
return " ".join(answer)
def create_ballot(request, doc, by, ballot_slug, time=None):
closed = close_open_ballots(doc, by)
for e in closed:
messages.warning(request, "Closed earlier open ballot created %s on '%s' for %s" % (e.time.strftime('%Y-%m-%d %H:%M'), e.ballot_type, e.doc.name, ))
if time:
e = BallotDocEvent(type="created_ballot", by=by, doc=doc, rev=doc.rev, time=time)
else:
e = BallotDocEvent(type="created_ballot", by=by, doc=doc, rev=doc.rev)
e.ballot_type = BallotType.objects.get(doc_type=doc.type, slug=ballot_slug)
e.desc = 'Created "%s" ballot' % e.ballot_type.name
e.save()
def create_ballot_if_not_open(request, doc, by, ballot_slug, time=None):
ballot_type = BallotType.objects.get(doc_type=doc.type, slug=ballot_slug)
if not doc.ballot_open(ballot_slug):
if time:
e = BallotDocEvent(type="created_ballot", by=by, doc=doc, rev=doc.rev, time=time)
else:
e = BallotDocEvent(type="created_ballot", by=by, doc=doc, rev=doc.rev)
e.ballot_type = ballot_type
e.desc = 'Created "%s" ballot' % e.ballot_type.name
e.save()
return e
else:
if request:
messages.warning(request, "There already exists an open '%s' ballot for %s. No new ballot created." % (ballot_type, doc.name))
return None
def close_ballot(doc, by, ballot_slug):
b = doc.ballot_open(ballot_slug)
if b:
e = BallotDocEvent(type="closed_ballot", doc=doc, rev=doc.rev, by=by)
e.ballot_type = BallotType.objects.get(doc_type=doc.type,slug=ballot_slug)
e.desc = 'Closed "%s" ballot' % e.ballot_type.name
e.save()
return b
def close_open_ballots(doc, by):
closed = []
for t in BallotType.objects.filter(doc_type=doc.type_id):
e = close_ballot(doc, by, t.slug )
if e:
closed.append(e)
return closed
def get_chartering_type(doc):
chartering = ""
if doc.get_state_slug() not in ("notrev", "approved"):
if doc.group.state_id in ("proposed", "bof"):
chartering = "initial"
elif doc.group.state_id == "active":
chartering = "rechartering"
return chartering
def augment_events_with_revision(doc, events):
"""Take a set of events for doc and add a .rev attribute with the
revision they refer to by checking NewRevisionDocEvents."""
event_revisions = list(NewRevisionDocEvent.objects.filter(doc=doc).order_by('time', 'id').values('id', 'rev', 'time'))
if doc.type_id == "draft" and doc.get_state_slug() == "rfc":
# add fake "RFC" revision
e = doc.latest_event(type="published_rfc")
if e:
event_revisions.append(dict(id=e.id, time=e.time, rev="RFC"))
event_revisions.sort(key=lambda x: (x["time"], x["id"]))
for e in sorted(events, key=lambda e: (e.time, e.id), reverse=True):
while event_revisions and (e.time, e.id) < (event_revisions[-1]["time"], event_revisions[-1]["id"]):
event_revisions.pop()
# Check for all subtypes which have 'rev' fields:
for sub in ['newrevisiondocevent', 'submissiondocevent', ]:
if hasattr(e, sub):
e = getattr(e, sub)
break
if not hasattr(e, 'rev'):
if event_revisions:
cur_rev = event_revisions[-1]["rev"]
else:
cur_rev = "00"
e.rev = cur_rev
def add_links_in_new_revision_events(doc, events, diff_revisions):
"""Add direct .txt links and diff links to new_revision events."""
prev = None
diff_urls = dict(((name, revision), url) for name, revision, time, url in diff_revisions)
for e in sorted(events, key=lambda e: (e.time, e.id)):
if not e.type == "new_revision":
continue
for sub in ['newrevisiondocevent', 'submissiondocevent', ]:
if hasattr(e, sub):
e = getattr(e, sub)
break
if not (e.doc.name, e.rev) in diff_urls:
continue
full_url = diff_url = diff_urls[(e.doc.name, e.rev)]
if doc.type_id in "draft": # work around special diff url for drafts
full_url = "https://tools.ietf.org/id/" + diff_url + ".txt"
# build links
links = r'<a href="%s">\1</a>' % full_url
if prev:
links += ""
if prev != None:
links += ' (<a href="%s?url1=%s&url2=%s">diff from previous</a>)' % (settings.RFCDIFF_BASE_URL, quote(prev, safe="~"), quote(diff_url, safe="~"))
# replace the bold filename part
e.desc = re.sub(r"<b>(.+-[0-9][0-9].txt)</b>", links, e.desc)
prev = diff_url
def add_events_message_info(events):
for e in events:
if not e.type == "added_message":
continue
e.message = e.addedmessageevent.message
e.msgtype = e.addedmessageevent.msgtype
e.in_reply_to = e.addedmessageevent.in_reply_to
def get_unicode_document_content(key, filename, codec='utf-8', errors='ignore'):
try:
with io.open(filename, 'rb') as f:
raw_content = f.read().decode(codec,errors)
except IOError:
if settings.DEBUG:
error = "Error; cannot read ("+filename+")"
else:
error = "Error; cannot read ("+key+")"
return error
return raw_content
def get_document_content(key, filename, split=True, markup=True):
log.unreachable("2017-12-05")
try:
with io.open(filename, 'rb') as f:
raw_content = f.read()
except IOError:
if settings.DEBUG:
error = "Error; cannot read ("+filename+")"
else:
error = "Error; cannot read ("+key+")"
return error
# if markup:
# return markup_txt.markup(raw_content, split)
# else:
# return raw_content
return text.decode(raw_content)
def tags_suffix(tags):
return ("::" + "::".join(t.name for t in tags)) if tags else ""
def add_state_change_event(doc, by, prev_state, new_state, prev_tags=[], new_tags=[], timestamp=None):
"""Add doc event to explain that state change just happened."""
if prev_state and new_state:
assert prev_state.type_id == new_state.type_id
if prev_state == new_state and set(prev_tags) == set(new_tags):
return None
e = StateDocEvent(doc=doc, rev=doc.rev, by=by)
e.type = "changed_state"
e.state_type = (prev_state or new_state).type
e.state = new_state
e.desc = "%s changed to <b>%s</b>" % (e.state_type.label, new_state.name + tags_suffix(new_tags))
if prev_state:
e.desc += " from %s" % (prev_state.name + tags_suffix(prev_tags))
if timestamp:
e.time = timestamp
e.save()
return e
def update_reminder(doc, reminder_type_slug, event, due_date):
reminder_type = DocReminderTypeName.objects.get(slug=reminder_type_slug)
try:
reminder = DocReminder.objects.get(event__doc=doc, type=reminder_type, active=True)
except DocReminder.DoesNotExist:
reminder = None
if due_date:
# activate/update reminder
if not reminder:
reminder = DocReminder(type=reminder_type)
reminder.event = event
reminder.due = due_date
reminder.active = True
reminder.save()
else:
# deactivate reminder
if reminder:
reminder.active = False
reminder.save()
def prettify_std_name(n, spacing=" "):
if re.match(r"(rfc|bcp|fyi|std)[0-9]+", n):
return n[:3].upper() + spacing + n[3:]
else:
return n
def default_consensus(doc):
# if someone edits the consensus return that, otherwise
# ietf stream => true and irtf stream => false
consensus = None
e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
if (e):
return e.consensus
if doc.stream_id == "ietf":
consensus = True
elif doc.stream_id == "irtf":
consensus = False
else: # ise, iab, legacy
return consensus
def nice_consensus(consensus):
mapping = {
None: "Unknown",
True: "Yes",
False: "No"
}
return mapping[consensus]
def has_same_ballot(doc, date1, date2=datetime.date.today()):
""" Test if the most recent ballot created before the end of date1
is the same as the most recent ballot created before the
end of date 2. """
ballot1 = doc.latest_event(BallotDocEvent,type='created_ballot',time__lt=date1+datetime.timedelta(days=1))
ballot2 = doc.latest_event(BallotDocEvent,type='created_ballot',time__lt=date2+datetime.timedelta(days=1))
return ballot1==ballot2
def make_notify_changed_event(request, doc, by, new_notify, time=None):
# FOR REVIEW: This preserves the behavior from when
# drafts and charters had separate edit_notify
# functions. If it should be unified, there should
# also be a migration function cause historic
# events to match
if doc.type.slug=='charter':
event_type = 'changed_document'
else:
event_type = 'added_comment'
e = DocEvent(type=event_type, doc=doc, rev=doc.rev, by=by)
e.desc = "Notification list changed to %s" % (escape(new_notify) or "none")
if doc.notify:
e.desc += " from %s" % escape(doc.notify)
if time:
e.time = time
e.save()
return e
def update_telechat(request, doc, by, new_telechat_date, new_returning_item=None):
on_agenda = bool(new_telechat_date)
prev = doc.latest_event(TelechatDocEvent, type="scheduled_for_telechat")
prev_returning = bool(prev and prev.returning_item)
prev_telechat = prev.telechat_date if prev else None
prev_agenda = bool(prev_telechat)
if new_returning_item == None:
returning = prev_returning
else:
returning = new_returning_item
if returning == prev_returning and new_telechat_date == prev_telechat:
# fully updated, nothing to do
return
# auto-set returning item _ONLY_ if the caller did not provide a value
if ( new_returning_item != None
and on_agenda
and prev_agenda
and new_telechat_date != prev_telechat
and prev_telechat < datetime.date.today()
and has_same_ballot(doc,prev.telechat_date)
):
returning = True
e = TelechatDocEvent()
e.type = "scheduled_for_telechat"
e.by = by
e.doc = doc
e.rev = doc.rev
e.returning_item = returning
e.telechat_date = new_telechat_date
if on_agenda != prev_agenda:
if on_agenda:
e.desc = "Placed on agenda for telechat - %s" % (new_telechat_date)
else:
e.desc = "Removed from agenda for telechat"
elif on_agenda and new_telechat_date != prev_telechat:
e.desc = "Telechat date has been changed to <b>%s</b> from <b>%s</b>" % (
new_telechat_date, prev_telechat)
else:
# we didn't reschedule but flipped returning item bit - let's
# just explain that
if returning:
e.desc = "Set telechat returning item indication"
else:
e.desc = "Removed telechat returning item indication"
e.save()
has_short_fuse = doc.type_id=='draft' and new_telechat_date and (( new_telechat_date - datetime.date.today() ) < datetime.timedelta(days=13))
from ietf.doc.mails import email_update_telechat
if has_short_fuse:
email_update_telechat(request, doc, e.desc+"\n\nWARNING: This may not leave enough time for directorate reviews!\n")
else:
email_update_telechat(request, doc, e.desc)
return e
def rebuild_reference_relations(doc,filename=None):
if doc.type.slug != 'draft':
return None
if not filename:
if doc.get_state_slug() == 'rfc':
filename=os.path.join(settings.RFC_PATH,doc.canonical_name()+".txt")
else:
filename=os.path.join(settings.INTERNET_DRAFT_PATH,doc.filename_with_rev())
try:
with io.open(filename, 'rb') as file:
refs = draft.Draft(file.read().decode('utf8'), filename).get_refs()
except IOError as e:
return { 'errors': ["%s :%s" % (e.strerror, filename)] }
doc.relateddocument_set.filter(relationship__slug__in=['refnorm','refinfo','refold','refunk']).delete()
warnings = []
errors = []
unfound = set()
for ( ref, refType ) in refs.items():
refdoc = DocAlias.objects.filter( name=ref )
count = refdoc.count()
if count == 0:
unfound.add( "%s" % ref )
continue
elif count > 1:
errors.append("Too many DocAlias objects found for %s"%ref)
else:
# Don't add references to ourself
if doc != refdoc[0].document:
RelatedDocument.objects.get_or_create( source=doc, target=refdoc[ 0 ], relationship=DocRelationshipName.objects.get( slug='ref%s' % refType ) )
if unfound:
warnings.append('There were %d references with no matching DocAlias'%len(unfound))
ret = {}
if errors:
ret['errors']=errors
if warnings:
ret['warnings']=warnings
if unfound:
ret['unfound']=list(unfound)
return ret
def set_replaces_for_document(request, doc, new_replaces, by, email_subject, comment=""):
addrs = gather_address_lists('doc_replacement_changed',doc=doc)
to = set(addrs.to)
cc = set(addrs.cc)
relationship = DocRelationshipName.objects.get(slug='replaces')
old_replaces = doc.related_that_doc("replaces")
events = []
e = DocEvent(doc=doc, rev=doc.rev, by=by, type='changed_document')
new_replaces_names = ", ".join(d.name for d in new_replaces) or "None"
old_replaces_names = ", ".join(d.name for d in old_replaces) or "None"
e.desc = "This document now replaces <b>%s</b> instead of %s" % (new_replaces_names, old_replaces_names)
e.save()
events.append(e)
if comment:
events.append(DocEvent.objects.create(doc=doc, rev=doc.rev, by=by, type="added_comment", desc=comment))
for d in old_replaces:
if d not in new_replaces:
other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document)
to.update(other_addrs.to)
cc.update(other_addrs.cc)
RelatedDocument.objects.filter(source=doc, target=d, relationship=relationship).delete()
if not RelatedDocument.objects.filter(target=d, relationship=relationship):
s = 'active' if d.document.expires > datetime.datetime.now() else 'expired'
d.document.set_state(State.objects.get(type='draft', slug=s))
for d in new_replaces:
if d not in old_replaces:
other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document)
to.update(other_addrs.to)
cc.update(other_addrs.cc)
RelatedDocument.objects.create(source=doc, target=d, relationship=relationship)
d.document.set_state(State.objects.get(type='draft', slug='repl'))
if d.document.stream_id in ('irtf','ise','iab'):
repl_state = State.objects.get(type_id='draft-stream-%s'%d.document.stream_id, slug='repl')
d.document.set_state(repl_state)
events.append(StateDocEvent.objects.create(doc=d.document, rev=d.document.rev, by=by, type='changed_state', desc="Set stream state to Replaced",state_type=repl_state.type, state=repl_state))
# make sure there are no lingering suggestions duplicating new replacements
RelatedDocument.objects.filter(source=doc, target__in=new_replaces, relationship="possibly-replaces").delete()
email_desc = e.desc.replace(", ", "\n ")
if comment:
email_desc += "\n" + comment
from ietf.doc.mails import html_to_text
send_mail(request, list(to),
"DraftTracker Mail System <<EMAIL>>",
email_subject,
"doc/mail/change_notice.txt",
dict(text=html_to_text(email_desc),
doc=doc,
url=settings.IDTRACKER_BASE_URL + doc.get_absolute_url()),
cc=list(cc))
return events
def check_common_doc_name_rules(name):
"""Check common rules for document names for use in forms, throws
ValidationError in case there's a problem."""
errors = []
if re.search("[^a-z0-9-]", name):
errors.append("The name may only contain digits, lowercase letters and dashes.")
if re.search("--", name):
errors.append("Please do not put more than one hyphen between any two words in the name.")
if re.search("-[0-9]{2}$", name):
errors.append("This name looks like ends in a version number. -00 will be added automatically. Please adjust the end of the name.")
if errors:
raise ValidationError(errors)
def get_initial_notify(doc,extra=None):
# With the mailtrigger based changes, a document's notify should start empty
receivers = []
if extra:
if isinstance(extra, six.string_types):
extra = extra.split(', ')
receivers.extend(extra)
return ", ".join(set([x.strip() for x in receivers]))
def uppercase_std_abbreviated_name(name):
if re.match('(rfc|bcp|std|fyi) ?[0-9]+$', name):
return name.upper()
else:
return name
def extract_complete_replaces_ancestor_mapping_for_docs(names):
"""Return dict mapping all replaced by relationships of the
replacement ancestors to docs. So if x is directly replaced by y
and y is in names or replaced by something in names, x in
replaces[y]."""
replaces = defaultdict(set)
checked = set()
front = names
while True:
if not front:
break
relations = ( RelatedDocument.objects.filter(source__name__in=front, relationship="replaces")
.select_related("target").values_list("source__name", "target__docs__name") )
if not relations:
break
checked.update(front)
front = []
for source_doc, target_doc in relations:
replaces[source_doc].add(target_doc)
if target_doc not in checked:
front.append(target_doc)
return replaces
def make_rev_history(doc):
# return document history data for inclusion in doc.json (used by timeline)
def get_predecessors(doc, predecessors=[]):
if hasattr(doc, 'relateddocument_set'):
for alias in doc.related_that_doc('replaces'):
if alias.document not in predecessors:
predecessors.append(alias.document)
predecessors.extend(get_predecessors(alias.document, predecessors))
return predecessors
def get_ancestors(doc, ancestors = []):
if hasattr(doc, 'relateddocument_set'):
for alias in doc.related_that('replaces'):
if alias.document not in ancestors:
ancestors.append(alias.document)
ancestors.extend(get_ancestors(alias.document, ancestors))
return ancestors
def get_replaces_tree(doc):
tree = get_predecessors(doc)
tree.extend(get_ancestors(doc))
return tree
history = {}
docs = get_replaces_tree(doc)
if docs is not None:
docs.append(doc)
for d in docs:
for e in d.docevent_set.filter(type='new_revision').distinct():
if hasattr(e, 'newrevisiondocevent'):
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e.newrevisiondocevent.rev + "/"
history[url] = {
'name': d.name,
'rev': e.newrevisiondocevent.rev,
'published': e.time.isoformat(),
'url': url,
}
if d.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = d.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
if doc.type_id == "draft":
e = doc.latest_event(type='published_rfc')
else:
e = doc.latest_event(type='iesg_approved')
if e:
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=e.doc))
history[url] = {
'name': e.doc.canonical_name(),
'rev': e.doc.canonical_name(),
'published': e.time.isoformat(),
'url': url
}
if hasattr(e, 'newrevisiondocevent') and doc.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = doc.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
history = list(history.values())
return sorted(history, key=lambda x: x['published'])
def get_search_cache_key(params):
from ietf.doc.views_search import SearchForm
fields = set(SearchForm.base_fields) - set(['sort',])
kwargs = dict([ (k,v) for (k,v) in list(params.items()) if k in fields ])
key = "doc:document:search:" + hashlib.sha512(json.dumps(kwargs, sort_keys=True).encode('utf-8')).hexdigest()
return key
def label_wrap(label, items, joiner=',', max=50):
lines = []
if not items:
return lines
line = '%s: %s' % (label, items[0])
for item in items[1:]:
if len(line)+len(joiner+' ')+len(item) > max:
lines.append(line+joiner)
line = ' '*(len(label)+len(': ')) + item
else:
line += joiner+' '+item
if line:
lines.append(line)
return lines
def join_justified(left, right, width=72):
count = max(len(left), len(right))
left = left + ['']*(count-len(left))
right = right + ['']*(count-len(right))
lines = []
i = 0
while True:
l = left[i]
r = right[i]
if len(l)+1+len(r) > width:
left = left + ['']
right = right[:i] + [''] + right[i:]
r = right[i]
count += 1
lines.append( l + ' ' + r.rjust(width-len(l)-1) )
i += 1
if i >= count:
break
return lines
def build_doc_meta_block(doc, path):
def add_markup(path, doc, lines):
is_hst = doc.is_dochistory()
rev = doc.rev
if is_hst:
doc = doc.doc
name = doc.name
rfcnum = doc.rfc_number()
errata_url = settings.RFC_EDITOR_ERRATA_URL.format(rfc_number=rfcnum) if not is_hst else ""
ipr_url = "%s?submit=draft&id=%s" % (urlreverse('ietf.ipr.views.search'), name)
for i, line in enumerate(lines):
# add draft links
line = re.sub(r'\b(draft-[-a-z0-9]+)\b', r'<a href="%s/\g<1>">\g<1></a>'%(path, ), line)
# add rfcXXXX to RFC links
line = re.sub(r' (rfc[0-9]+)\b', r' <a href="%s/\g<1>">\g<1></a>'%(path, ), line)
# add XXXX to RFC links
line = re.sub(r' ([0-9]{3,5})\b', r' <a href="%s/rfc\g<1>">\g<1></a>'%(path, ), line)
# add draft revision links
line = re.sub(r' ([0-9]{2})\b', r' <a href="%s/%s-\g<1>">\g<1></a>'%(path, name, ), line)
if rfcnum:
# add errata link
line = re.sub(r'Errata exist', r'<a class="text-warning" href="%s">Errata exist</a>'%(errata_url, ), line)
if is_hst or not rfcnum:
# make current draft rev bold
line = re.sub(r'>(%s)<'%rev, r'><b>\g<1></b><', line)
line = re.sub(r'IPR declarations', r'<a class="text-warning" href="%s">IPR declarations</a>'%(ipr_url, ), line)
line = line.replace(r'[txt]', r'[<a href="%s">txt</a>]' % doc.href())
lines[i] = line
return lines
#
now = datetime.datetime.now()
draft_state = doc.get_state('draft')
block = ''
meta = {}
if doc.type_id == 'draft':
revisions = []
ipr = doc.related_ipr()
if ipr:
meta['ipr'] = [ "IPR declarations" ]
if doc.is_rfc() and not doc.is_dochistory():
if not doc.name.startswith('rfc'):
meta['from'] = [ "%s-%s"%(doc.name, doc.rev) ]
meta['errata'] = [ "Errata exist" ] if doc.tags.filter(slug='errata').exists() else []
meta['obsoletedby'] = [ alias.document.rfc_number() for alias in doc.related_that('obs') ]
meta['obsoletedby'].sort()
meta['updatedby'] = [ alias.document.rfc_number() for alias in doc.related_that('updates') ]
meta['updatedby'].sort()
meta['stdstatus'] = [ doc.std_level.name ]
else:
dd = doc.doc if doc.is_dochistory() else doc
revisions += [ '(%s)%s'%(d.name, ' '*(2-((len(d.name)-1)%3))) for d in dd.replaces() ]
revisions += doc.revisions()
if doc.is_dochistory() and doc.doc.is_rfc():
revisions += [ doc.doc.canonical_name() ]
else:
revisions += [ d.name for d in doc.replaced_by() ]
meta['versions'] = revisions
if not doc.is_dochistory and draft_state.slug == 'active' and now > doc.expires:
# Active past expiration date
meta['active'] = [ 'Document is active' ]
meta['state' ] = [ doc.friendly_state() ]
intended_std = doc.intended_std_level if doc.intended_std_level else None
if intended_std:
if intended_std.slug in ['ps', 'ds', 'std']:
meta['stdstatus'] = [ "Standards Track" ]
else:
meta['stdstatus'] = [ intended_std.name ]
elif doc.type_id == 'charter':
meta['versions'] = doc.revisions()
#
# Add markup to items that needs it.
if 'versions' in meta:
meta['versions'] = label_wrap('Versions', meta['versions'], joiner="")
for label in ['Obsoleted by', 'Updated by', 'From' ]:
item = label.replace(' ','').lower()
if item in meta and meta[item]:
meta[item] = label_wrap(label, meta[item])
#
left = []
right = []
#right = [ '[txt]']
for item in [ 'from', 'versions', 'obsoletedby', 'updatedby', ]:
if item in meta and meta[item]:
left += meta[item]
for item in ['stdstatus', 'active', 'state', 'ipr', 'errata', ]:
if item in meta and meta[item]:
right += meta[item]
lines = join_justified(left, right)
block = '\n'.join(add_markup(path, doc, lines))
#
return block
|
/**
* @author "S. Coutin (CINES)"
*
*/
class FileBasedInterface {
/**
* Used to convert the data object to a string fitting with the rp_console output file format
*
* @return
* The string representing the data object
*/
private String toTextFileOutput(DataObject dataObject) {
StringBuilder sb= new StringBuilder();
sb.append(dataObject.getOperation() +";");
sb.append(dataObject.getStatus() +";");
sb.append(dataObject.getStatusMessage() +";");
sb.append(dataObject.getLaunchDate() + ";");
sb.append(dataObject.getEndDate() + ";");
sb.append(dataObject.getFileName() + ";");
sb.append(dataObject.getLocalFilePath() + ";");
sb.append(dataObject.getRemoteDirPath() + ";");
sb.append(dataObject.getRor() + ";");
sb.append(dataObject.getEudatPid() + ";");
return sb.toString();
}
/**
* Used to convert the data object to a string fitting with the rp_pilot output file format
*
* @return
* The string representing the data object
*/
protected void writeOperationResultToFile (ArrayList<DataObject> replicaResult) {
FileWriter fw = null;
BufferedWriter bw = null;
boolean writeHeader = false;
FileWriter fwErr = null;
BufferedWriter bwErr = null;
boolean writeHeaderErr = false;
// Get file name from the properties
try {
File file = new File(B2safeRpPilot.prop.getProperty("replicationResultFile").trim());
File fileErr = new File(B2safeRpPilot.prop.getProperty("operationErrorResultFile").trim());
// if file doesn't exist, then create it
if (!file.exists()) {
file.createNewFile();
writeHeader = true;
}
// if file error doesn't exist, then create it
if (!fileErr.exists()) {
fileErr.createNewFile();
writeHeaderErr = true;
}
fw = new FileWriter(file.getAbsoluteFile(), true);
bw = new BufferedWriter(fw);
// Write header
if (writeHeader) {
bw.write("Operation;Status;StatusMessage;LaunchDate;EndDate;FileName;LocalFilePath;remoteDirPath;ror;eudatPid;");
bw.newLine();
}
fwErr = new FileWriter(fileErr.getAbsoluteFile(), true);
bwErr = new BufferedWriter(fwErr);
// Write header
if (writeHeaderErr) {
bwErr.write("Operation;Status;StatusMessage;LaunchDate;EndDate;FileName;LocalFilePath;remoteDirPath;ror;eudatPid;");
bwErr.newLine();
}
// write one line per data object and one line in the error file if status is ERROR
for (DataObject dataObject : replicaResult) {
bw.write(toTextFileOutput(dataObject));
bw.newLine();
if (dataObject.getStatus().equals("ERROR")) {
bwErr.write(toTextFileOutput(dataObject));
bwErr.newLine();
}
}
bw.close();
bwErr.close();
} catch (FileNotFoundException ex) {
B2safeRpPilot.log.error("Text file not found");
} catch (IOException ex) {
B2safeRpPilot.log.error("Text file IO exception");
}
finally
{
try
{
//Close the stream of file
if (bw!= null) bw.close();
if (bwErr!= null) bw.close();
}
catch (IOException ex)
{
B2safeRpPilot.log.error("Text file IO exception");
}
}
}
protected ArrayList<DataObject> initToReplicateDOList(){
return textFileToListDO(B2safeRpPilot.prop.getProperty("localIngestFileList").trim());
}
protected ArrayList<DataObject> initToDeleteDOList(){
return textFileToListDO(B2safeRpPilot.prop.getProperty("localDeleteFileList").trim());
}
protected ArrayList<DataObject> initToRetrieveDOList(){
return textFileToListDO(B2safeRpPilot.prop.getProperty("localRetrieveFileList").trim());
}
private ArrayList<DataObject> textFileToListDO(String textFilePath){
BufferedReader reader=null;
File toReadTextFile=null;
boolean headerLine = true; // used to jump over the header line
ArrayList<DataObject> resultDOList = new ArrayList<DataObject>();
try {
//Read Dataobject in file
B2safeRpPilot.log.debug("Text file to read is : "+ textFilePath);
toReadTextFile = new File(textFilePath);
reader = new BufferedReader(new FileReader(toReadTextFile));
String line;
DataObject dataObject;
if(reader!= null)
{
while ((line=reader.readLine())!=null)
{
B2safeRpPilot.log.debug("line is ["+ line+"]");
// Jumps over the header line
if (headerLine) {
headerLine = false;
}
else {
// Check if the line is not empty
if (line.trim().length()>0)
{
//We split each lines from text file to an array of String
String tab[]=line.split(";");
// If the line is not empty, set data object values from the fields
if (tab.length>0)
{
dataObject = new DataObject();
dataObject.setFileName(tab[0]);
if(tab.length>=1 && !tab[1].equals("")) {
dataObject.setLocalFilePath(tab[1]);
}
if(tab.length>=2 && !tab[2].equals("")) {
if ( !tab[2].endsWith("/") ) tab[2] += "/";
dataObject.setRemoteDirPath(tab[2]);
}
if(tab.length>3) {
dataObject.setRor(tab[3]);
//dataObject.addOneEudatMetadata(new AVUMetaData("ROR",tab[3]));
}
else {
dataObject.setRor("None");
}
if(tab.length>4) {
dataObject.setEudatPid(tab[4]);
}
resultDOList.add(dataObject);
}
}
}
}
}
} catch (FileNotFoundException ex) {
B2safeRpPilot.log.error("Text file not found : "+ textFilePath);
} catch (IOException ex) {
B2safeRpPilot.log.error("Text file IO exception : "+ textFilePath);
}
finally
{
try
{
//Close the stream of file
if (reader!= null) reader.close();
}
catch (IOException ex)
{
B2safeRpPilot.log.error("Text file IO exception : "+ textFilePath);
}
}
return resultDOList;
}
} |
import { Injectable } from '@angular/core';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { Observable } from 'rxjs';
@Injectable({
providedIn: 'root',
})
export class StatisticsService {
private statisticUrl = 'http://localhost:5000/api/statistics'; // URL
private httpOptions = {
headers: new HttpHeaders({
'Content-Type': 'application/json',
}),
};
constructor(private http: HttpClient) {}
////////// Service for Manager //////////
/** GET overall */
// done
getOverall(year: number): Observable<any> {
const url = `${this.statisticUrl}/overall/${year}`;
console.log('API cua statistics overall ', url);
return this.http.get<any>(url, this.httpOptions);
}
/** GET by faculty */
// done
getByFaculty(id: number): Observable<any> {
const url = `${this.statisticUrl}/${id}`;
console.log('API cua statistics by id ', url);
return this.http.get<any>(url, this.httpOptions);
}
}
|
def base58_encode(raw_bytes):
return base58.b58encode(raw_bytes) |
X, Y = map(int, input().split())
def comb(n, r, mod=10 ** 9 + 7):
n1 = n + 1
r = min(n, n - r)
numer = denom = 1
for i in range(1, r + 1):
numer = numer * (n1 - i) % mod
denom = denom * i % mod
return numer * pow(denom, mod - 2, mod) % mod
# x + y coordinates increase by 3 per one move
# x + y should be factor of 3
ans = 0
if (X + Y) % 3 == 0 and 2 * Y >= X and 2 * X >= Y:
# The number of up move and right move
up, right = (2 * Y - X) // 3, (2 * X - Y) // 3
# up + right = (X + Y) % 3
ans = comb(up + right, right)
print(ans) |
// Returns the modulo of two Words.
// Note: % operator don't support floats, so
// calling this will set the float value to 0.
func ModWord(a Word, b Word, t TypeRepresentation) (out Word) {
switch t {
case TypeU64:
out = WordU64(a.AsU64 % b.AsU64)
case TypeI64:
out = WordI64(a.AsI64 % b.AsI64)
case TypeF64:
panic("unsupported modulo for type f64")
}
return out
} |
/**
* Creates a sequence of all values in another sequence after the predicate
* fails once.
* @param predFn The predicate.
* @param seq The input sequence.
* @param <T> The type of values in the input sequence.
* @return The output sequence.
*/
public static <T> Seq<T> dropWhile(Predicate<? super T> predFn,
Seq<T> seq) {
while (!seq.empty() && predFn.test(seq.first()))
seq = seq.rest();
return upcast(seq);
} |
package com.adventofcode.day10;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;
public class AdapterArray {
private static final String JOLTAGE_ADAPTERS = "resources/day_10_joltage_adapters_input.txt";
private static final int MAX_JOLT_DIFFERENCE = 3;
private static final int STARTING_JOLTAGE = 0;
private static LinkedList<Integer> adapters = new LinkedList<>();
private static Map<Integer, Integer> joltDifferences = new HashMap<>();
public static void main(String[] args) {
readAdaptersInOrder();
addInitialAndFinalJoltage();
System.out.println("Adapters:" + adapters);
initializeJoltDifferences();
countDifferences();
System.out.println(joltDifferences);
System.out.println("Number of 1-jolt differences multiplied by the number of 3-jolt differences: "
+ calculateProductOf1JoltAnd3JoltsDifferences());
}
private static void readAdaptersInOrder() {
try (BufferedReader br = new BufferedReader(new FileReader(JOLTAGE_ADAPTERS))) {
String line;
while ((line = br.readLine()) != null) {
adapters.add(Integer.valueOf(line));
}
adapters.sort(Comparator.naturalOrder());
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
private static void addInitialAndFinalJoltage() {
adapters.addFirst(STARTING_JOLTAGE);
adapters.addLast(adapters.getLast() + MAX_JOLT_DIFFERENCE);
}
private static void initializeJoltDifferences() {
for (int i = 1; i <= MAX_JOLT_DIFFERENCE; i++) {
joltDifferences.put(i, 0);
}
}
private static void countDifferences() {
int referenceJoltage = STARTING_JOLTAGE;
int difference;
for (int i = 1; i < adapters.size(); i++) {
difference = adapters.get(i) - referenceJoltage;
joltDifferences.put(difference, joltDifferences.get(difference) + 1);
referenceJoltage = adapters.get(i);
}
}
private static int calculateProductOf1JoltAnd3JoltsDifferences() {
return joltDifferences.get(1) * joltDifferences.get(3);
}
}
|
<filename>GoLang/SortTriangle/SortTriangle.go
package main
import (
"fmt"
"math"
)
func main() {
var totalTriangle int
var inputBuffer float64
fmt.Scan(&totalTriangle)
var triangles = make([][]float64, 0)
for i := 0; i < totalTriangle; i++ {
tmp := make([]float64, 0)
for j := 0; j < 3; j++ {
fmt.Scan(&inputBuffer)
tmp = append(tmp, inputBuffer)
}
triangles = append(triangles, tmp)
}
sort(&triangles, totalTriangle)
print(triangles, totalTriangle)
}
func area(triangles []float64) float64 {
a := triangles[0]
b := triangles[0]
c := triangles[0]
p := (a + b + c) / 2
var areaPower float64 = p * ((p - triangles[0]) * (p - triangles[0]) * (p - triangles[0]))
return math.Sqrt(areaPower)
}
// buble sort
func sort(triangles *[][]float64, n int) {
for i := 0; i < n-1; i++ {
for j := 0; j < n-i-1; j++ {
if area((*triangles)[j]) > area((*triangles)[j+1]) {
var temp []float64
temp = (*triangles)[j+1]
(*triangles)[j+1] = (*triangles)[j]
(*triangles)[j] = temp
}
}
}
}
func print(triangles [][]float64, n int) {
for i := 0; i < n; i++ {
for j := 0; j < 3; j++ {
fmt.Print(math.Trunc(triangles[i][j]))
fmt.Print(" ")
}
fmt.Print("\n")
}
}
|
// The command returning non-0 does NOT constitute an error -- that
// is communicated back via the command return code, and the calling
// function is responsible for determining how to handle that
func (cmdResult *CommandResult) Run() (err error) {
var stdout, stderr bytes.Buffer
cmdResult.ExecCmd = exec.Command(cmdResult.CmdPath, cmdResult.CmdArgs...)
cmdResult.CmdString = fmt.Sprintf("%s", cmdResult.ExecCmd)
Debugf("Running command: %s", cmdResult.CmdString)
cmdResult.ExecCmd.Stdout = &stdout
cmdResult.ExecCmd.Stderr = &stderr
cmdResult.CmdErr = cmdResult.ExecCmd.Run()
cmdResult.Ran = true
if cmdResult.CmdErr != nil {
if exitError, ok := cmdResult.CmdErr.(*exec.ExitError); ok {
cmdResult.Rc = exitError.ExitCode()
} else {
cmdResult.Rc = CmdRcCannotGet
Error(cmdResult.CmdErr)
err = fmt.Errorf("Unable to determine command return code")
}
} else {
cmdResult.Rc = 0
}
cmdResult.OutBytes, cmdResult.ErrBytes = stdout.Bytes(), stderr.Bytes()
if cmdResult.Rc != CmdRcCannotGet {
Debugf("Command return code: %d", cmdResult.Rc)
}
if len(cmdResult.OutString()) > 0 {
Debugf("Command stdout:\n%s", cmdResult.OutString())
} else {
Debugf("No stdout from command")
}
if len(cmdResult.ErrString()) > 0 {
Debugf("Command stderr:\n%s", cmdResult.ErrString())
} else {
Debugf("No stderr from command")
}
return
} |
// LoadWords loads a list of words from a file in path locatoin
func LoadWords(path string) (words []string, err error) {
var text *os.File
text, err = os.Open(path)
if err != nil {
return
}
defer text.Close()
wordsMap := make(map[string]int)
words = make([]string, 0, 100000)
s := bufio.NewScanner(text)
for s.Scan() {
word := s.Text()
_, exists := wordsMap[word]
if !exists {
words = append(words, word)
wordsMap[word] = 1
}
}
return
} |
/**
* Moves the cursor to the previous row in this <code>ResultSet</code> object.
* <p/>
* When a call to the <code>previous</code> method returns <code>false</code>, the cursor is positioned before the
* first row. Any invocation of a <code>ResultSet</code> method which requires a current row will result in a
* <code>SQLException</code> being thrown.
* <p/>
* If an input stream is open for the current row, a call to the method <code>previous</code> will implicitly close
* it. A <code>ResultSet</code> object's warning change is cleared when a new row is read.
* <p/>
*
* @return <code>true</code> if the cursor is now positioned on a valid row; <code>false</code> if the cursor is
* positioned before the first row
* @throws java.sql.SQLException if a database access error occurs; this method is called on a closed result set or
* the result set type is <code>TYPE_FORWARD_ONLY</code>
* @throws java.sql.SQLFeatureNotSupportedException
* if the JDBC driver does not support this method
* @since 1.2
*/
public boolean previous() throws SQLException {
if (queryResult.getResultSetType() != ResultSetType.SELECT) {
return false;
}
final SelectQueryResult sqr = (SelectQueryResult) queryResult;
if (sqr.getRows() >= 0) {
sqr.moveRowPointerTo(sqr.getRowPointer() - 1);
return true;
}
return false;
} |
// GetUsers returns the members in an App
// Pass in an optional AppFilterOptions struct to filter the results
// The Users in the app are returned
func (a *AppsService) GetUsers(appID string, opt *AppFilterOptions) (appUsers []AppUser, resp *Response, err error) {
pagesRetreived := 0
var u string
if opt.NextURL != nil {
u = opt.NextURL.String()
} else {
u = fmt.Sprintf("apps/%v/users", appID)
if opt.Limit == 0 {
opt.Limit = defaultLimit
}
u, _ = addOptions(u, opt)
}
req, err := a.client.NewRequest("GET", u, nil)
if err != nil {
fmt.Printf("____ERROR HERE\n")
return nil, nil, err
}
resp, err = a.client.Do(req, &appUsers)
if err != nil {
fmt.Printf("____ERROR HERE 2\n")
return nil, resp, err
}
pagesRetreived++
if (opt.NumberOfPages > 0 && pagesRetreived < opt.NumberOfPages) || opt.GetAllPages {
for {
if pagesRetreived == opt.NumberOfPages {
break
}
if resp.NextURL != nil {
var userPage []AppUser
pageOpts := new(AppFilterOptions)
pageOpts.NextURL = resp.NextURL
pageOpts.Limit = opt.Limit
pageOpts.NumberOfPages = 1
userPage, resp, err = a.GetUsers(appID, pageOpts)
if err != nil {
return appUsers, resp, err
}
appUsers = append(appUsers, userPage...)
pagesRetreived++
} else {
break
}
}
}
return appUsers, resp, err
} |
Ingo Fender
+
41 61 280 8415 [email protected]
Jacob Gyntelberg
+
41 61 280 8891 [email protected]
Overview: global financial crisis spurs unprecedented policy actions
Financial stability concerns took centre stage once again over the period
between end-August and end-November. In the wake of the mid-September
failure of Lehman Brothers, global financial markets seized up and entered a
new and deeper state of crisis. As money market funds and other investors
were forced to write off their Lehman-related investments, counterparty
concerns mounted in the context of large-scale redemption-driven asset sales.
The ensuing sell-off affected all but the safest assets and left key parts of
the global financial system dysfunctional. With credit and money markets
essentially frozen and equity prices plummeting, banks and other financial
firms saw their access to funding eroded and their capital base shrink, owing to
accumulating mark to market losses. Credit spreads surged to record levels,
equity prices saw historic declines and volatilities soared across markets,
indicating extreme financial market stress. Government bond yields declined in
very volatile conditions, as recession concerns and safe haven flows
increasingly outweighed the impact of anticipated increases in fiscal deficits. At
the same time, yield curves steepened from the front end, reflecting repeated
downward adjustments in policy rates.
Emerging market assets also experienced broad-based price declines, as
depressed levels of risk appetite and associated pressures in the industrialised
world spilled over into emerging financial markets. With confidence in the
continued viability of key parts of the international banking system collapsing,
the authorities in several countries embarked on an unprecedented wave of
policy initiatives to arrest the plunge in asset prices and contain systemic risks.
Market developments over the period under review went through four
more or less distinct stages. Stage one, which led into the Lehman bankruptcy
in mid-September, was marked by the takeover of two major US housing
finance agencies by the authorities in the United States. Stage two
encompassed the immediate implications of the Lehman bankruptcy and the
wide-spread crisis of confidence it triggered. Stage three, starting in late
September, was characterised by fast-paced and increasingly broad policy
actions, as responses to the crisis evolved from case by case reactions to a
more international, system-wide approach. In the fourth and final stage, from
mid-October, pricing patterns were increasingly dominated by recession fears,
while markets continued to struggle with the uncertainties surrounding the large
number of newly announced policy initiatives. |
/**
* Validate assertion signature. If none is found and the SAML response did not have one and the SP requires
* the assertions to be signed, the validation fails.
*
* @param signature
* @param context
* @param engine
*/
protected void validateAssertionSignature(final Signature signature, final ExtendedSAMLMessageContext context,
final SignatureTrustEngine engine) {
if (signature != null) {
validateSignature(signature, context.getPeerEntityMetadata().getEntityID(), engine);
} else if (((SPSSODescriptor) context.getLocalEntityRoleMetadata()).getWantAssertionsSigned()
&& !context.isInboundSAMLMessageAuthenticated()) {
throw new SamlException("Assertion or response must be signed");
}
} |
Trump Can Have This Iran Deal or No Iran Deal
Ever since Donald Trump told the American Israel Public Affairs Committee (AIPAC) that his “No. 1 priority is to dismantle the disastrous deal with Iran,” the nuclear agreement has faced frequent predictions of its demise. Trump’s election was seen as heralding the death knell of the deal: On the campaign trail, after all, he said the Islamic Republic was the world’s leading state sponsor of terrorism, a threat across the Middle East, and a country that has covert cells ready to inflict carnage around the globe. Allowing Iran access to billions of dollars in exchange for curbs on its nuclear program, he argued, was not in America’s or the world’s interests.
Opponents of the deal have clung to the AIPAC speech ever since Trump delivered it in March. But their hopes that he will abolish the agreement, or at least pare it back, always rested on shaky ground. Trump was unable to sustain his own argument during the speech, shifting dramatically just six minutes after he’d promised to scrap the agreement. “At the very least, we must enforce the terms of the previous deal to hold Iran totally accountable,” he said.
Confirmation hearings for senior officials in the incoming administration have laid bare the gulf between Trump’s campaign rhetoric and realistic policy options. His new defense secretary said at his confirmation hearing that America must honor the deal, and his nominee for CIA director placed the emphasis on enforcement, saying the agency must be “rigorously objective” on Iran. Neither spoke of a renegotiation.
International inspectors say Tehran is complying with the agreement. The one technical breach — excess production of heavy water that can be used to produce plutonium, a possible route to a bomb — was quickly rectified when Iran shipped it out of the country last November. Officials in Tehran said they had seen the heavy water restriction as a guideline, not a hard target. Iran has its own grievances, blaming U.S. banking restrictions for making it hard for European money to reach Tehran. Iran’s argument that this amounts to a breach is difficult to sustain. Such financial restrictions have long been in force under sanctions imposed for non-nuclear reasons, such as human rights or terrorism, which fell outside the nuclear deal.
There is no doubt that initial hopes for a broader Iran-U.S. détente withered in 2016. In Tehran, the regime’s opinion of the United States remains defined by the 1979 revolution: Just days before Trump’s electoral victory in November, Iran’s supreme leader, Ayatollah Ali Khamenei, said the campaign had proved what he referred to as the moral shortcomings of the United States. The one communication channel that Khamenei allowed — between Foreign Minister Mohammad Javad Zarif and his U.S. counterpart, John Kerry — has also expired, with no signs of a replacement. The U.S. Navy and ships from Iran’s Islamic Revolutionary Guard Corps continue to skirmish around the Strait of Hormuz. A serious clash seems possible.
The debate surrounding the Iran deal’s future under Trump, however, has largely ignored one salient fact: The nuclear agreement was never between Washington and Tehran. It involves five other major partners — Britain, China, France, Russia, and Germany — none of which are interested in renegotiating the “better deal” that Trump has said he can get. The agreement has also been enshrined in a U.N. Security Council resolution, which if violated by the United States would enrage not only Tehran but also the other signatories.
“If unreasonable moves are made by Trump, and Iran continues to abide by the nuclear commitments, Europe, Russia, and China are highly likely to side with Iran, and the unified stance on sanctions in pre-2013 days will be broken,” said Ellie Geranmayeh, a policy fellow at the European Council on Foreign Relations.
The fracturing of this international consensus would make any multilateral effort, akin to the past sanctions regime that brought Iran to the negotiating table in the first place, impossible. Iran has already begun to open its doors to foreign investors: It has increased its global oil exports to pre-sanctions levels and signed major business contracts with foreign companies, including multibillion-dollar orders with Airbus and Boeing to replace its civilian air fleet. The latter contract was Iran’s first deal with a U.S. aviation firm since the Islamic revolution of 1979, marking a concrete sign of change within the regime.
Europe’s desire to do business has been led by Germany — though the gains have been smaller than anticipated. European banks, which were previously fined by U.S. regulators for breaching sanctions, remain wary of doing business in Tehran. Russia, China, and increasingly Turkey have endeavored to fill the gap, seeking to make deals in local currencies rather than the dollar.
The quest for investment explains Iran’s determination to stick to the nuclear deal. Khamenei, who has the final word on all policy matters in the Islamic Republic, backed the accord for economic reasons. The 77-year-old supreme leader wants Iran to overtake Saudi Arabia as the Middle East’s dominant economic power, adding to Tehran’s political and military strength. Insiders in Tehran say this was the biggest factor in his decision to support President Hassan Rouhani’s government in the nuclear talks. Despite his skepticism of diplomacy, Khamenei conceded that Shiite Iran could never supplant Sunni Saudi Arabia economically unless sanctions were lifted.
The nuclear deal has already served as a catalyst for economic growth in Iran. When Rouhani was elected in 2013, the economy was in a deep recession. For the six-month period ending in September last year, it grew at 7.4 percent. No wonder Rouhani wants to keep the deal in place.
“Renegotiation is out of the question,” the Iranian president said last week.
But there remains one way Trump could unilaterally sabotage the agreement. As president, he could allow waivers of past Iran sanctions, signed by former President Barack Obama under executive order, to lapse. Doing so would reinstitute penalties against non-Americans for dealing with Iran in banking, insurance, energy, shipping, and many other industries. This would unwind the whole agreement, according to Geranmayeh. “If Trump fails to renew these [waivers], sanctions snap back, essentially,” she said.
Refusing to sign the waivers would seem to go against the advice of retired Gen. James Mattis, Trump’s choice for secretary of defense and a frequent critic of Iran. At his confirmation hearing on Jan. 12, Mattis pointed out the undesirable consequences of the United States going rogue.
“It is an imperfect arms control agreement; it’s not a friendship treaty. But when America gives her word, we have to live up to it and work with our allies,” he said.
Trump’s pick for CIA director, Rep. Mike Pompeo, is also outspoken on Iran, but he struck a different note in his confirmation hearing. The Kansas congressman named Russia, China, North Korea, and the Islamic State when asked to list the biggest threats to the United States, omitting Iran from the category. “While I opposed the Iran deal as a member of Congress, if confirmed, my role will change,” to verifying that Iran was complying with its terms, Pompeo said. The Iranians, he added, are “professionals at cheating.”
In Tehran, the biggest concerns are Trump’s general unpredictability, the “Iranophobia” of his cabinet appointees, and that pressure from Congress could derail the deal. On Jan. 21, one day after Trump’s inauguration, Israeli Prime Minister Benjamin Netanyahu said he intended to discuss Iran with the new president. The Iranian government isn’t sanguine about Trump, and both Khamenei and Rouhani have become increasingly bellicose about the United States since November.
But it’s also true that Iran no longer feels isolated. Under the anti-Western presidency of Mahmoud Ahmadinejad, Iran was not considered a worthy diplomatic partner by many countries. Rouhani was elected to change that — and he has. The president, who faces a re-election race in May, still aims to stop Tehran and Washington from slipping toward confrontation, but he and other Iranian officials believe they are better positioned to respond to a hostile U.S. administration than before.
“Iran has the option of restarting its nuclear program if it is forced to do so,” said Foad Izadi, a U.S.-educated professor at the University of Tehran.
For all of Trump’s barbs, his stance on Iran has been littered with as many contradictions as in other policy areas. He has said he is not interested in regime change in other countries and argued that Iran and Russia are fighting terrorism in Syria and Iraq more effectively than the United States. Such statements, combined with his “Make America Great Again” slogan, suggest that as president he will place greater emphasis on domestic policy than on international affairs.
If he tries to reverse the Iran deal, however, he could very well find himself disappointed. Trump admitted in the AIPAC speech that he was a “newcomer to politics.” Managing relations with Tehran will certainly be a challenging introduction.
Chip Somodevilla/Getty Images |
<filename>src/main/java/com/active4j/hr/yc/util/YcBaoxianType.java
package com.active4j.hr.yc.util;
public enum YcBaoxianType {
}
|
<gh_stars>0
//! MIPS CP0 Context register.
//!
//! See Figure 6.5 in IDT R30xx Manual on page 6-4.
use bit_field::BitField;
/// Context Register.
#[derive(Clone, Copy, Debug)]
pub struct ContextRegister {
pub bits: u32,
}
impl Default for ContextRegister {
fn default() -> Self {
Self::new()
}
}
impl ContextRegister {
/// Returns a new Context register.
pub fn new() -> Self {
ContextRegister { bits: 0 }
}
register_field!(get_ptebase, set_ptebase, 21, 31);
register_field!(get_badvpn, set_badvpn, 2, 20);
}
impl From<u32> for ContextRegister {
fn from(val: u32) -> Self {
ContextRegister { bits: val }
}
}
impl From<ContextRegister> for u32 {
fn from(val: ContextRegister) -> Self {
val.bits
}
}
|
/**
* A Callable that pauses execution of its calling threads.
*/
public static class PauseCallable<T> implements Callable<T>
{
private final long _pauseCall;
private final CountDownLatch _paused;
private final CountDownLatch _resume;
private final Callable<T> _target;
private final AtomicLong _calls = new AtomicLong(0);
public PauseCallable(long pauseCall, CountDownLatch paused, CountDownLatch resume, Callable<T> target)
{
_pauseCall = pauseCall;
_paused = paused;
_resume = resume;
_target = target;
}
/** The number of times this object has been called. */
public long getCalls()
{
return _calls.get();
}
@Override
public T call()
throws Exception
{
if (_calls.incrementAndGet() >= _pauseCall)
{
_paused.countDown();
_resume.await();
}
return _target.call();
}
} |
<reponame>lucasgabrielsantos/CursoPOO
package cursoemvideo.aula02;
public class Aula02_1 {
public static void main(String[] args) {
Cadeira cad1 = new Cadeira();
cad1.tipo = "Plástico";
cad1.cor = "Preta";
cad1.peso = 5;
cad1.temgente();
cad1.temninguem();
cad1.status();
cad1.sentar();
Cadeira cad2 = new Cadeira();
cad2.tipo = "Escritório";
cad2.cor = "Cinza";
cad2.peso = 6;
cad2.temgente();
cad2.status();
cad2.sentar();
}
}
|
<reponame>chusopr/cloudbreak
package com.sequenceiq.it.cloudbreak.filesystem;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.springframework.beans.factory.annotation.Value;
import org.testng.Assert;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Optional;
import org.testng.annotations.Parameters;
import org.testng.annotations.Test;
import com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint;
import com.sequenceiq.cloudbreak.client.CloudbreakClient;
import com.sequenceiq.it.IntegrationTestContext;
import com.sequenceiq.it.cloudbreak.AbstractCloudbreakIntegrationTest;
import com.sequenceiq.it.cloudbreak.CloudbreakITContextConstants;
import com.sequenceiq.it.cloudbreak.CloudbreakUtil;
import com.sequenceiq.it.cloudbreak.SshUtil;
import com.sequenceiq.it.util.ResourceUtil;
public class FilesystemTest extends AbstractCloudbreakIntegrationTest {
@Value("${integrationtest.defaultPrivateKeyFile}")
private String defaultPrivateKeyFile;
private final Map<String, String> fsParams = new HashMap<>();
@BeforeMethod
public void setContextParameters() {
Assert.assertNotNull(getItContext().getContextParam(CloudbreakITContextConstants.STACK_ID), "Stack id is mandatory.");
Assert.assertNotNull(getItContext().getContextParam(CloudbreakITContextConstants.CLOUDPROVIDER_PARAMETERS, Map.class),
"Cloudprovider parameters are mandatory.");
}
@Test
@Parameters({"filesystemType", "filesystemName", "folderPrefix", "wasbContainerName", "sshCommand", "sshUser", "sshChecker"})
public void testFileSystem(String filesystemType, String filesystemName, String folderPrefix, @Optional("it-container") String wasbContainerName,
String sshCommand, @Optional("cloudbreak") String sshUser, String sshChecker) throws IOException {
//GIVEN
Assert.assertEquals(new File(defaultPrivateKeyFile).exists(), true, "Private cert file not found: " + defaultPrivateKeyFile);
fsParams.put("filesystemType", filesystemType);
fsParams.put("filesystemName", filesystemName);
fsParams.put("folderPrefix", folderPrefix);
fsParams.put("wasbContainerName", wasbContainerName);
IntegrationTestContext itContext = getItContext();
String stackId = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
StackV1Endpoint stackV1Endpoint = itContext.getContextParam(CloudbreakITContextConstants.CLOUDBREAK_CLIENT, CloudbreakClient.class).stackV1Endpoint();
String masterIp = CloudbreakUtil.getAmbariIp(stackV1Endpoint, stackId, itContext);
Map<String, String> cloudProviderParams = itContext.getContextParam(CloudbreakITContextConstants.CLOUDPROVIDER_PARAMETERS, Map.class);
sshCommand = ResourceUtil.readStringFromResource(applicationContext, sshCommand.replaceAll("\n", ""));
if ("WASB".equals(filesystemType)) {
FilesystemUtil.createWasbContainer(cloudProviderParams, filesystemName, wasbContainerName);
}
//WHEN
boolean sshResult = SshUtil.executeCommand(masterIp, defaultPrivateKeyFile, sshCommand, sshUser, SshUtil.getSshCheckMap(sshChecker));
//THEN
Assert.assertTrue(sshResult, "Ssh command executing was not successful");
}
@AfterTest
public void cleanUpFilesystem() throws Exception {
IntegrationTestContext itContext = getItContext();
Map<String, String> cloudProviderParams = itContext.getContextParam(CloudbreakITContextConstants.CLOUDPROVIDER_PARAMETERS, Map.class);
FilesystemUtil.cleanUpFiles(applicationContext, cloudProviderParams, fsParams.get("filesystemType"), fsParams.get("filesystemName"),
fsParams.get("folderPrefix"), fsParams.get("wasbContainerName"));
}
} |
/**
* Globus Information System.
*
* By creating a Globus Resource Factory, the Grid-Globus bindings will be initialized
* when this factory is created!
* The current resource system provides information about the Globus implementation.
*/
public class GlobusRSFactory extends VRSFactory
{
// "grid" is covered by the info system, as "grid" is generic, and "globus" is
// implementation specific.
// " voms" shouldbe covered by GridInfosystem, this (minimal) resource system
// is only for globus stuff.
private static String schemes[]={"globus"};
static
{
// Static Initializer! -> registers Globus Bindings
GlobusUtil.init();
}
@Override
public void clear()
{
}
@Override
public String getName()
{
return "GlobusRS";
}
@Override
public String[] getResourceTypes()
{
return null;
}
@Override
public String[] getSchemeNames()
{
return schemes;
}
@Override
public VResourceSystem createNewResourceSystem(VRSContext context,
ServerInfo info, VRL location) throws VlException
{
return new GlobusInfoSystem(context,location);
}
} |
<reponame>mkannwischer/PQClean<filename>crypto_kem/kyber768-90s/avx2/reduce.h
#ifndef PQCLEAN_KYBER76890S_AVX2_REDUCE_H
#define PQCLEAN_KYBER76890S_AVX2_REDUCE_H
#include "params.h"
#include <immintrin.h>
void PQCLEAN_KYBER76890S_AVX2_reduce_avx(__m256i *r, const __m256i *PQCLEAN_KYBER76890S_AVX2_qdata);
void PQCLEAN_KYBER76890S_AVX2_tomont_avx(__m256i *r, const __m256i *PQCLEAN_KYBER76890S_AVX2_qdata);
#endif
|
package edu.tamu.app.controller;
import static edu.tamu.weaver.response.ApiStatus.SUCCESS;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import edu.tamu.app.model.User;
import edu.tamu.app.model.repo.UserRepo;
import edu.tamu.weaver.auth.annotation.WeaverCredentials;
import edu.tamu.weaver.auth.model.Credentials;
import edu.tamu.weaver.response.ApiResponse;
/**
* User Controller
*/
@RestController
@RequestMapping("/users")
public class UserController {
@Autowired
private UserRepo userRepo;
/**
* Get credentials.
*
* @param credentials
* @WeaverCredentials Credentials
* @return ApiResponse
*/
@RequestMapping("/credentials")
@PreAuthorize("hasRole('USER')")
public ApiResponse credentials(@WeaverCredentials Credentials credentials) {
return new ApiResponse(SUCCESS, credentials);
}
/**
* Get all users.
*
* @return ApiResponse
*/
@RequestMapping
@PreAuthorize("hasRole('MANAGER')")
public ApiResponse allUsers() {
return new ApiResponse(SUCCESS, userRepo.findAll());
}
/**
* Update user.
*
* @param user
* @RequestBody User
* @return ApiResponse
*/
@RequestMapping("/update")
@PreAuthorize("hasRole('MANAGER')")
public ApiResponse updateUser(@RequestBody User user) {
user = userRepo.update(user);
return new ApiResponse(SUCCESS, user);
}
/**
* Delete user.
*
* @param user
* @RequestBody User
* @return ApiResponse
*/
@RequestMapping("/delete")
@PreAuthorize("hasRole('MANAGER')")
public ApiResponse delete(@RequestBody User user) {
userRepo.delete(user);
return new ApiResponse(SUCCESS);
}
} |
Renal replacement lipomatosis and xanthogranulomatous pyelonephritis: differential diagnosis.
Renal replacement lipomatosis (RRL) is a relatively uncommon entity, although misdiagnosis mainly with xanthogranulomatous pyelonephritis (XGP) due to lack of awareness by urologists, radiologists, and pathologists may be responsible for underreporting1,2. We illustrate a case of RRL that was initially misdiagnosed as XGP, and compare it with a classic case of XGP, underscoring the similarities and the differences between them. |
/******************************************************************
* Create a natural language paraphrase of a logical statement. This is the
* entry point for this function, but kifExprPara does most of the work.
*
* @param stmt The statement to be paraphrased.
* @param phraseMap An association list of relations and their natural language format statements.
* @param termMap An association list of terms and their natural language format statements.
* @return A String, which is the paraphrased statement.
*/
public static String nlStmtPara(String stmt, Map phraseMap, Map termMap, String language) {
String theStmt;
Stack phraser = new Stack();
int pos = stmt.indexOf("(");
if (pos != -1) {
theStmt = stmt.substring(pos);
}
else {
theStmt = stmt;
System.out.println("Error in NLformatter.nlStmtPara(): statement: " + stmt + " has no opening parenthesis");
return theStmt;
}
String delimit = " ()\"";
StringTokenizer st = new StringTokenizer(theStmt, delimit, true);
if (phraseMap == null) {
System.out.println("Error in NLformatter.nlStmtPara(): phrase map is null.");
return "";
}
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equalsIgnoreCase(" ")) continue;
if (token.equalsIgnoreCase("\"")) {
StringBuffer sb = new StringBuffer();
sb.append(token);
while (st.hasMoreTokens() && !(token = st.nextToken()).equalsIgnoreCase("\""))
sb.append(token);
sb.append(token);
token = sb.toString();
}
if (token.equalsIgnoreCase(")")) {
String paraExp = kifExprPara(phraser,phraseMap,termMap,language);
if (paraExp != null)
phraser.push(paraExp);
else {
System.out.println("Error in NLformatter.nlStmtPara(): English formatting error: " + theStmt + " with stack:");
while (phraser.size() > 0)
System.out.println(" stack: " + phraser.pop().toString());
}
continue;
}
phraser.push(token);
}
if (phraser.size() > 1)
System.out.println("Error in NLformatter.nlStmtPara(): English paraphrasing doesn't complete.");
else if (phraser.size() < 1) {
System.out.println("Error in NLformatter.nlStmtPara(): English paraphrasing fails.");
return null;
}
return phraser.pop().toString();
} |
<filename>src/seed-data/Fermentables/grains.ts
const grains: TFermentables[] = [
{
name: '<NAME>',
type: 'Grain',
potential: 1.027,
yield: 0.587,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 3,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.06,
notes:
'Acid malt contains acids from natural lactic acids. Used by German brewers to adjust malt PH without chemicals to adhere to German purity laws. Also enhances the head retention.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.015,
moisture: 0.028,
color: 22,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 20,
'max-in-batch': 0.2,
protein: 0.1,
notes:
'Roasted specialty malt used in some English browns, milds and old ales to add color and a biscuit taste. Intense flavor - so limit use. Low diastatic power so must be mashed with well modified malts.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.036,
yield: 0.78,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 26,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 29,
'max-in-batch': 0.1,
protein: 0.118,
notes: 'Provides a very strong malt flavor and aroma to your beer.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.032,
yield: 0.7,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.135,
notes:
'Adds significant body to Porters and Stouts. High haze producing protein prevents use in light beers.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.028,
yield: 0.609,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.15,
protein: 0.117,
notes:
'Raw, unmalted barley can be used to add body to your beer. Use in homebrew requires very fine milling combined with a decoction or multi-stage mash. Performs best when used in small quantities with well modified grains.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.036,
yield: 0.79,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.4,
protein: 0.16,
notes:
'Raw barley that has been popped to open the kernels. Used in place of raw barley for faster conversion and higher s. High in haze producing protein',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.036,
yield: 0.79,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 23,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 6,
'max-in-batch': 0.1,
protein: 0.105,
notes:
'Use for English ales, brown ales and porters. Adds a biscuit like flavor and aroma. Can be used as a substitute for toasted malt.',
},
{
name: 'Black (<NAME>',
type: 'Grain',
potential: 1.025,
yield: 0.55,
'coarse-fine-diff': 0.015,
moisture: 0.06,
color: 500,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.132,
notes:
'Dark color and dry roasted flavor characteristic of Stouts and Porters. Used for Coloring in small amounts, or flavoring of Stouts and Porters in larger amounts.',
},
{
name: '<NAME> (Stout)',
type: 'Grain',
potential: 1.025,
yield: 0.55,
'coarse-fine-diff': 0.015,
moisture: 0.05,
color: 500,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.132,
notes:
'Unmalted barley roasted at high temperature to create a dry, coffee like flavor. Imparts a sharp acrid flavor characteristic of dry stouts. Gives dryness to a stout or porter.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.032,
yield: 0.7,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 65,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0,
notes:
'Imparts a dry, biscuit flavor. Used in nut brown ales, porters and some Belgian ales.',
},
{
name: 'Brumalt',
type: 'Grain',
potential: 1.033,
yield: 0.717,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 23,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.07,
notes:
'Dark German malt developed to add malt flavor of Alt, Marzen and Oktoberfest beers. Helps create authentic maltiness without having to do a decoction mash.',
},
{
name: 'Cara-Pils/Dextrine',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Significantly increases foam/head retention and body of the beer.',
},
{
name: 'CaraFoam',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Significantly increases foam/head retention and body of the beer.',
},
{
name: 'Dextrine',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Significantly increases foam/head retention and body of the beer.',
},
{
name: 'Caraamber',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 30,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: 'Caramel/Crystal Malt - 10L',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 10,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME> - 20L',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 20,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME> - 30L',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 30,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME> - 40L',
type: 'Grain',
potential: 1.034,
yield: 0.74,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 40,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME> - 60L',
type: 'Grain',
potential: 1.034,
yield: 0.74,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 60,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: 'Caramel/Cr<NAME> - 80L',
type: 'Grain',
potential: 1.034,
yield: 0.74,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 80,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: 'Caramel/Cr<NAME> - 90L',
type: 'Grain',
potential: 1.034,
yield: 0.74,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 90,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: 'Caramel/Cr<NAME> - 120L',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 120,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.102,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME> - 140L',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 140,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.102,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME> - 160L',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 160,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.102,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.033,
yield: 0.717,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 56,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0,
notes:
'Caramel, copper colored malt. Used in Belgian ales and German bocks.',
},
{
name: 'Carared',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 20,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0.132,
notes: 'Adds body, color and improves head retention.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.034,
yield: 0.739,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 22,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0,
notes:
'Light Belgian crystal malt. Used in light Trappist and Abbey style Belgian ales.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.028,
yield: 0.6,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 350,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.132,
notes:
'Dark malt that gives a rich red or brown color and nutty flavor. Maintains some malty flavor, not as dark as roasted malt.',
},
{
name: 'Cho<NAME> (UK)',
type: 'Grain',
potential: 1.034,
yield: 0.73,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 450,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.105,
notes:
'Dark malt that gives a rich red or brown color and nutty flavor. Maintains some malty flavor, not as dark as roasted malt.',
},
{
name: 'Corn (Flaked)',
type: 'Grain',
potential: 1.03,
yield: 0.825,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 3,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 1,
protein: 0.1,
notes: 'Generally a neutral flavor, used to reduce maltiness of beer.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.013,
moisture: 0.035,
color: 1,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 10,
'max-in-batch': 1,
protein: 0.1,
notes: 'A base malt from the UK.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.013,
moisture: 0.035,
color: 20,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 10,
'max-in-batch': 0.15,
protein: 0.11,
notes:
'Aromatic malt from Banberg, Germany. Promotes a full flavor and rounds off beer color. Promotes deep red color and malty flavor.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 4,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 53,
'max-in-batch': 1,
protein: 0.106,
notes:
'Also called "English Mild" - a light specialty malt used in Brown Ales',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.013,
moisture: 0.05,
color: 9,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 72,
'max-in-batch': 0.8,
protein: 0.115,
notes:
'Malty-sweet flavor characteristic and adds a reddish amber color to the beer. Does not contribute signficantly to body or head retention.',
},
{
name: '<NAME> - 10L',
type: 'Grain',
potential: 1.035,
yield: 0.77,
'coarse-fine-diff': 0.028,
moisture: 0.05,
color: 10,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 50,
'max-in-batch': 0.8,
protein: 0.135,
notes:
'Malty-sweet flavor characteristic and adds a slight orange color to the beer.',
},
{
name: '<NAME> - 20L',
type: 'Grain',
potential: 1.035,
yield: 0.75,
'coarse-fine-diff': 0.028,
moisture: 0.05,
color: 20,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 25,
'max-in-batch': 0.8,
protein: 0.135,
notes:
'Malty-sweet flavor characteristic and adds a orange to deep orange color to the beer.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 1,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.3,
protein: 0.09,
notes:
'Adds body, mouth feel and head retention to the beer. Adds substantial protein haze to light beers. Protein rest recommended unless flakes are pre-gelatinized.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 1,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.09,
notes:
'Malted oats. Adds body, mouth feel and head retention to the beer Creates chill haze in lighter beers, so is primarily used in dark ones.',
},
{
name: '<NAME> (2 Row) - Belgium',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 3,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 60,
'max-in-batch': 1,
protein: 0.105,
notes: 'Base malt for all beer styles',
},
{
name: '<NAME> (2 Row) - UK',
type: 'Grain',
potential: 1.036,
yield: 0.78,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 3,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 45,
'max-in-batch': 1,
protein: 0.101,
notes: 'Base malt for all English beer styles',
},
{
name: '<NAME> (2 Row) - USA',
type: 'Grain',
potential: 1.036,
yield: 0.79,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 140,
'max-in-batch': 1,
protein: 0.123,
notes: 'Base malt for all beer styles',
},
{
name: '<NAME> (6 Row) - USA',
type: 'Grain',
potential: 1.035,
yield: 0.76,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 150,
'max-in-batch': 1,
protein: 0.13,
notes: 'Base malt for all beer styles',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.034,
yield: 0.74,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 3,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.2,
protein: 0,
notes:
'Robust smoky malt that provides a smoky flavor. Used in scottish ales and wee heavy ales.',
},
{
name: 'Pilsner (2 Row) - Belgium',
type: 'Grain',
potential: 1.036,
yield: 0.79,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 105,
'max-in-batch': 1,
protein: 0.105,
notes: 'Belgian base malt for Continental lagers',
},
{
name: 'Pilsner (2 Row) - Germany',
type: 'Grain',
potential: 1.037,
yield: 0.81,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 110,
'max-in-batch': 1,
protein: 0.11,
notes: 'German base for Pilsners and Bohemian Lagers',
},
{
name: 'Pilsner (2 Row) - UK',
type: 'Grain',
potential: 1.036,
yield: 0.78,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 1,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 60,
'max-in-batch': 1,
protein: 0.1,
notes: 'Pilsner base malt',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.032,
yield: 0.7,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 1,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.25,
protein: 0.1,
notes:
'Used to add fermentable sugar without increasing body. Produces a milder, less grainy tasting beer.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.025,
yield: 0.55,
'coarse-fine-diff': 0.015,
moisture: 0.05,
color: 300,
'recommend-mash': false,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.132,
notes:
'Roasted at high temperature to create a burnt, grainy, coffee like flavor. Imparts a red to deep brown color to beer, and very strong roasted flavor.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.029,
yield: 0.63,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 5,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 75,
'max-in-batch': 0.15,
protein: 0.103,
notes:
'Adds a dry, crisp character to the beer. Yields a deep red color , and a distinctive rye flavor',
},
{
name: 'Rye, Flaked',
type: 'Grain',
potential: 1.036,
yield: 0.783,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0,
notes:
'Imparts a dry, crisp rye flavor to rye beers. Can be easier to mash than raw rye.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.037,
yield: 0.8,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 9,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 1,
protein: 0.115,
notes:
'Malt that has been smoked over an open fire. Creates a distinctive smoke flavor and aroma.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.03,
yield: 0.652,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 180,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0,
notes:
'Extreme caramel aroma and flavored malt. Used in dark Belgian Abbey and Trappist ales.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.033,
yield: 0.72,
'coarse-fine-diff': 0.015,
moisture: 0.025,
color: 50,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 6,
'max-in-batch': 0.1,
protein: 0.105,
notes:
'Use for English ales, nut brown ales and porters. Adds a toasted, biscuit like flavor and aroma',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.033,
yield: 0.717,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 27,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.117,
notes:
'Similar to Biscuit or Victory malt, this malt adds reddish/orange color and improved body without sweetness along with a toasted flavor.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.034,
yield: 0.73,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 25,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 50,
'max-in-batch': 0.15,
protein: 0.132,
notes:
'Toasted malt that adds a Biscuit or toasted flavor to English ales.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.036,
yield: 0.78,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 4,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 50,
'max-in-batch': 0.9,
protein: 0.11,
notes:
'Kiln dried malt darker than Pale Malt, but not as dark as Munich Malt. Imparts a golden to orange color to the beer.',
},
{
name: '<NAME> - Belgium',
type: 'Grain',
potential: 1.037,
yield: 0.81,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 74,
'max-in-batch': 0.6,
protein: 0.115,
notes: 'Malted wheat for use in Wheat beers',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.039,
yield: 0.84,
'coarse-fine-diff': 0.015,
moisture: 0.035,
color: 9,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 10,
'max-in-batch': 0.2,
protein: 0.115,
notes:
'Dark malted wheat base for use in dark wheat styles such as Dunkleweizen.',
},
{
name: 'Wheat Malt - Germany',
type: 'Grain',
potential: 1.039,
yield: 0.84,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 95,
'max-in-batch': 0.6,
protein: 0.125,
notes: 'Malted wheat base for use in all wheat styles',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.035,
yield: 0.77,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.4,
protein: 0.16,
notes:
'Flaked wheat adds to increased body and foam retention. May be used in small amounts to improve head retention and body.',
},
{
name: 'Wheat, Roasted',
type: 'Grain',
potential: 1.025,
yield: 0.543,
'coarse-fine-diff': 0.015,
moisture: 0.04,
color: 425,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.1,
protein: 0.13,
notes:
'Also called Chocolate Wheat Malt. Adds a deep, dark brown color to dunkelweizens and other dark beer styles.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.036,
yield: 0.79,
'coarse-fine-diff': 0.015,
moisture: 0.09,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 0,
'max-in-batch': 0.4,
protein: 0.16,
notes:
'Unmodified wheat that has been popped to open the kernels. Protein rest recommended when mashing.',
},
{
name: '<NAME>',
type: 'Grain',
potential: 1.04,
yield: 0.86,
'coarse-fine-diff': 0.022,
moisture: 0.04,
color: 2,
'recommend-mash': true,
'add-after-boil': false,
'diastatic-power': 130,
'max-in-batch': 0.6,
protein: 0.145,
notes: 'White wheat gives a malty flavor not available from raw wheat.',
},
];
export default grains;
|
ALBANY, N.Y. -- Gov. Andrew Cuomo on Tuesday evening announced a plan to cut taxes on New Yorkers who make between $40,000 and $300,000 a year.
Cuomo told news outlets during a presentation inside the governor's mansion that the plan, if adopted, would save average middle-class taxpayers $250 a year. When fully implemented over the next few years, the savings will rise to $700 annually, he said.
The tax rate decrease proposal is as follows: For those making $40,000 to $150,000, their tax rate would decrease from 6.85 percent to 6.45 percent in the upcoming fiscal year, and the rate would decrease further to 5.5 percent when "fully effective."
For those making between $150,000 and $300,000, the rate would decrease from 6.85 percent to 6.65 percent in 2018 and to 6.0 percent when "fully effective."
The governor did not say when the effects would be fully implemented by under his proposal.
But to afford the tax cut, the so-called "millionaires' tax" will have to stay in effect, the governor said. That tax is an 8.82 percent tax rate for those who make more than $1.6 million a year. The tax was set to expire this year, but Cuomo will push to see it renewed.
"Frankly, we don't have the resources to lose the millionaires' revenues now and have the state function the way it should," Cuomo said.
Earlier in the presentation, Cuomo outlined the fiscal stress the state is facing, including a $3.5 billion deficit.
He also said the tax on high-income earners is crucial to funding other initiatives, including a proposed $163 million plan to cover full tuition costs for SUNY and CUNY students.
There are 45,000 millionaires in the state who pay money under that tax rate, Cuomo said. Of them, half don't live in New York, and 3 percent of them live in Upstate New York, he said.
However, John Flanagan (R-Long Island), the Senate's majority leader, has said he opposes the tax and that Cuomo should expect a budget battle. He said Tuesday that the tax might discourage successful people and corporations from moving here, possibly bringing jobs with them.
The budget presentation, which was broadcast live on the governor's website, angered many in Albany by the time it began. Cuomo initially did not publicly schedule a time to present the budget publicly, breaking tradition.
Instead, he met with lawmakers behind closed doors, angering local news outlets over the lack of transparency. He ultimately agreed to make the public presentation. |
// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
/* Copyright 2013-2018 IBM Corp. */
#ifndef __ERRORLOG_H
#define __ERRORLOG_H
#include <compiler.h>
#include <opal.h>
#include <stdint.h>
#include <ccan/list/list.h>
/* Classification of error/events type reported on OPAL */
/* Platform Events/Errors: Report Machine Check Interrupt */
#define OPAL_PLATFORM_ERR_EVT 0x01
/* INPUT_OUTPUT: Report all I/O related events/errors */
#define OPAL_INPUT_OUTPUT_ERR_EVT 0x02
/* RESOURCE_DEALLOC: Hotplug events and errors */
#define OPAL_RESOURCE_DEALLOC_ERR_EVT 0x03
/* MISC: Miscellaneous error */
#define OPAL_MISC_ERR_EVT 0x04
/* OPAL Subsystem IDs listed for reporting events/errors */
#define OPAL_PROCESSOR_SUBSYSTEM 0x10
#define OPAL_MEMORY_SUBSYSTEM 0x20
#define OPAL_IO_SUBSYSTEM 0x30
#define OPAL_IO_DEVICES 0x40
#define OPAL_CEC_HARDWARE 0x50
#define OPAL_POWER_COOLING 0x60
#define OPAL_MISC_SUBSYSTEM 0x70
#define OPAL_SURVEILLANCE_ERR 0x7A
#define OPAL_PLATFORM_FIRMWARE 0x80
#define OPAL_SOFTWARE 0x90
#define OPAL_EXTERNAL_ENV 0xA0
/*
* During reporting an event/error the following represents
* how serious the logged event/error is. (Severity)
*/
#define OPAL_INFO 0x00
#define OPAL_RECOVERED_ERR_GENERAL 0x10
/* 0x2X series is to denote set of Predictive Error */
/* 0x20 Generic predictive error */
#define OPAL_PREDICTIVE_ERR_GENERAL 0x20
/* 0x21 Predictive error, degraded performance */
#define OPAL_PREDICTIVE_ERR_DEGRADED_PERF 0x21
/* 0x22 Predictive error, fault may be corrected after reboot */
#define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT 0x22
/*
* 0x23 Predictive error, fault may be corrected after reboot,
* degraded performance
*/
#define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_BOOT_DEGRADE_PERF 0x23
/* 0x24 Predictive error, loss of redundancy */
#define OPAL_PREDICTIVE_ERR_LOSS_OF_REDUNDANCY 0x24
/* 0x4X series for Unrecoverable Error */
/* 0x40 Generic Unrecoverable error */
#define OPAL_UNRECOVERABLE_ERR_GENERAL 0x40
/* 0x41 Unrecoverable error bypassed with degraded performance */
#define OPAL_UNRECOVERABLE_ERR_DEGRADE_PERF 0x41
/* 0x44 Unrecoverable error bypassed with loss of redundancy */
#define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY 0x44
/* 0x45 Unrecoverable error bypassed with loss of redundancy and performance */
#define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY_PERF 0x45
/* 0x48 Unrecoverable error bypassed with loss of function */
#define OPAL_UNRECOVERABLE_ERR_LOSS_OF_FUNCTION 0x48
/* 0x50 In case of PANIC */
#define OPAL_ERROR_PANIC 0x50
/*
* OPAL Event Sub-type
* This field provides additional information on the non-error
* event type
*/
#define OPAL_NA 0x00
#define OPAL_MISCELLANEOUS_INFO_ONLY 0x01
#define OPAL_PREV_REPORTED_ERR_RECTIFIED 0x10
#define OPAL_SYS_RESOURCES_DECONFIG_BY_USER 0x20
#define OPAL_SYS_RESOURCE_DECONFIG_PRIOR_ERR 0x21
#define OPAL_RESOURCE_DEALLOC_EVENT_NOTIFY 0x22
#define OPAL_CONCURRENT_MAINTENANCE_EVENT 0x40
#define OPAL_CAPACITY_UPGRADE_EVENT 0x60
#define OPAL_RESOURCE_SPARING_EVENT 0x70
#define OPAL_DYNAMIC_RECONFIG_EVENT 0x80
#define OPAL_NORMAL_SYS_PLATFORM_SHUTDOWN 0xD0
#define OPAL_ABNORMAL_POWER_OFF 0xE0
/* Max user dump size is 14K */
#define OPAL_LOG_MAX_DUMP 14336
/* Origin of error, elog_origin */
#define ORG_SAPPHIRE 1
#define ORG_POWERNV 2
/* Multiple user data sections */
struct elog_user_data_section {
__be32 tag;
__be16 size;
__be16 component_id;
char data_dump[1];
} __packed;
/*
* All the information regarding an error/event to be reported
* needs to populate this structure using pre-defined interfaces
* only
*/
struct errorlog {
uint16_t component_id;
uint8_t error_event_type;
uint8_t subsystem_id;
uint8_t event_severity;
uint8_t event_subtype;
uint8_t user_section_count;
uint8_t elog_origin;
uint32_t user_section_size;
uint32_t reason_code;
uint32_t additional_info[4];
uint32_t plid;
uint32_t log_size;
uint64_t elog_timeout;
char user_data_dump[OPAL_LOG_MAX_DUMP];
struct list_node link;
};
struct opal_err_info {
uint32_t reason_code;
uint8_t err_type;
uint16_t cmp_id;
uint8_t subsystem;
uint8_t sev;
uint8_t event_subtype;
};
/* Component IDs */
/* In PEL error log format, Creator ID is hypervisor
* But we can have various component ID to distinguish
* which component in hypervisor is reporting the error
* This is 2 bytes long,
* first byte corresponds to Component IDs
* Second byte is reserved for the Reason code.
* Component ID is mapped to readable 4-digit ascii
* character name in FSP and displayed.
*/
/* SAPPHIRE components */
#define OPAL_CODEUPDATE 0x4355 /* CU */
#define OPAL_CONSOLE 0x434E /* CN */
#define OPAL_CEC 0x4345 /* CE */
#define OPAL_CHIP 0x4348 /* CH */
#define OPAL_ELOG 0x454C /* EL */
#define OPAL_NVRAM 0x4E56 /* NV */
#define OPAL_RTC 0x5254 /* RT */
#define OPAL_SURVEILLANCE 0x5355 /* SU */
#define OPAL_SYSPARAM 0x5350 /* SP */
#define OPAL_LPC 0x4C50 /* LP */
#define OPAL_UART 0x5541 /* UA */
#define OPAL_OCC 0x4F43 /* OC */
#define OPAL_OP_PANEL 0x4F50 /* OP */
#define OPAL_PHB 0x5048 /* PH */
#define OPAL_PSI 0x5053 /* PS */
#define OPAL_VPD 0x5650 /* VP */
#define OPAL_XSCOM 0x5853 /* XS */
#define OPAL_PCI 0x5043 /* PC */
#define OPAL_MISC 0x4D49 /* MI */
#define OPAL_ATTN 0x4154 /* AT */
#define OPAL_MEM_ERR 0x4D45 /* ME */
#define OPAL_CENTAUR 0x4354 /* CT */
#define OPAL_MFSI 0x4D46 /* MF */
#define OPAL_DUMP 0x4455 /* DU */
#define OPAL_LED 0x4C45 /* LE */
#define OPAL_SENSOR 0x5345 /* SE */
#define OPAL_SLW 0x534C /* SL */
#define OPAL_FSP 0x4650 /* FP */
#define OPAL_I2C 0x4943 /* IC */
#define OPAL_IPMI 0x4950 /* IP */
/* SAPPHIRE SRC component ID*/
#define OPAL_SRC_COMPONENT_CODE_UPDATE 0x1000
#define OPAL_SRC_COMPONENT_XSCOM 0x1100
#define OPAL_SRC_COMPONENT_PCI 0x1200
#define OPAL_SRC_COMPONENT_MISC 0x1300
#define OPAL_SRC_COMPONENT_ATTN 0x1400
#define OPAL_SRC_COMPONENT_MEM_ERR 0x1500
#define OPAL_SRC_COMPONENT_CENTAUR 0x1600
#define OPAL_SRC_COMPONENT_MFSI 0x1700
#define OPAL_SRC_COMPONENT_DUMP 0x1800
#define OPAL_SRC_COMPONENT_LED 0x1900
#define OPAL_SRC_COMPONENT_VPD 0x1a00
#define OPAL_SRC_COMPONENT_CONSOLE 0x1b00
#define OPAL_SRC_COMPONENT_SENSOR 0x2000
#define OPAL_SRC_COMPONENT_SLW 0x2100
#define OPAL_SRC_COMPONENT_FSP 0x2200
#define OPAL_SRC_COMPONENT_I2C 0x2300
#define OPAL_SRC_COMPONENT_IPMI 0x2400
#define OPAL_SRC_COMPONENT_CEC 0x3000
#define OPAL_SRC_COMPONENT_CHIP 0x4000
#define OPAL_SRC_COMPONENT_ELOG 0x5000
#define OPAL_SRC_COMPONENT_NVRAM 0x6000
#define OPAL_SRC_COMPONENT_RTC 0x7000
#define OPAL_SRC_COMPONENT_SURVEILLANCE 0x8000
#define OPAL_SRC_COMPONENT_SYSPARAM 0x9000
#define OPAL_SRC_COMPONENT_LPC 0xa000
#define OPAL_SRC_COMPONENT_UART 0xb000
#define OPAL_SRC_COMPONENT_OCC 0xc000
#define OPAL_SRC_COMPONENT_OP_PANEL 0xd000
#define OPAL_SRC_COMPONENT_PHB 0xe000
#define OPAL_SRC_COMPONENT_PSI 0xf000
enum opal_reasoncode {
/* code update */
OPAL_RC_CU_FLASH = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x10,
OPAL_RC_CU_INIT = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x11,
OPAL_RC_CU_SG_LIST = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x12,
OPAL_RC_CU_COMMIT = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x13,
OPAL_RC_CU_MSG = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x14,
OPAL_RC_CU_NOTIFY = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x15,
OPAL_RC_CU_MARKER_LID = OPAL_SRC_COMPONENT_CODE_UPDATE | 0x16,
/* NVRAM */
OPAL_RC_NVRAM_INIT = OPAL_SRC_COMPONENT_NVRAM | 0x10,
OPAL_RC_NVRAM_OPEN = OPAL_SRC_COMPONENT_NVRAM | 0x11,
OPAL_RC_NVRAM_SIZE = OPAL_SRC_COMPONENT_NVRAM | 0x12,
OPAL_RC_NVRAM_WRITE = OPAL_SRC_COMPONENT_NVRAM | 0x13,
OPAL_RC_NVRAM_READ = OPAL_SRC_COMPONENT_NVRAM | 0x14,
/* CENTAUR */
OPAL_RC_CENTAUR_INIT = OPAL_SRC_COMPONENT_CENTAUR | 0x10,
OPAL_RC_CENTAUR_RW_ERR = OPAL_SRC_COMPONENT_CENTAUR | 0x11,
/* MFSI */
OPAL_RC_MFSI_RW_ERR = OPAL_SRC_COMPONENT_MFSI | 0x10,
/* UART */
OPAL_RC_UART_INIT = OPAL_SRC_COMPONENT_UART | 0x10,
/* OCC */
OPAL_RC_OCC_RESET = OPAL_SRC_COMPONENT_OCC | 0x10,
OPAL_RC_OCC_LOAD = OPAL_SRC_COMPONENT_OCC | 0x11,
OPAL_RC_OCC_PSTATE_INIT = OPAL_SRC_COMPONENT_OCC | 0x12,
OPAL_RC_OCC_TIMEOUT = OPAL_SRC_COMPONENT_OCC | 0x13,
/* RTC */
OPAL_RC_RTC_READ = OPAL_SRC_COMPONENT_RTC | 0x10,
OPAL_RC_RTC_TOD = OPAL_SRC_COMPONENT_RTC | 0x11,
/* SURVEILLANCE */
OPAL_RC_SURVE_INIT = OPAL_SRC_COMPONENT_SURVEILLANCE | 0x10,
OPAL_RC_SURVE_STATUS = OPAL_SRC_COMPONENT_SURVEILLANCE | 0x11,
OPAL_RC_SURVE_ACK = OPAL_SRC_COMPONENT_SURVEILLANCE | 0x12,
OPAL_INJECTED_HIR = OPAL_SRC_COMPONENT_SURVEILLANCE | 0x13,
/* SYSPARAM */
OPAL_RC_SYSPARM_INIT = OPAL_SRC_COMPONENT_SYSPARAM | 0x10,
OPAL_RC_SYSPARM_MSG = OPAL_SRC_COMPONENT_SYSPARAM | 0x11,
/* LPC */
OPAL_RC_LPC_READ = OPAL_SRC_COMPONENT_LPC | 0x10,
OPAL_RC_LPC_WRITE = OPAL_SRC_COMPONENT_LPC | 0x11,
OPAL_RC_LPC_SYNC = OPAL_SRC_COMPONENT_LPC | 0x12,
OPAL_RC_LPC_SYNC_PERF = OPAL_SRC_COMPONENT_LPC | 0x13,
/* OP_PANEL */
OPAL_RC_PANEL_WRITE = OPAL_SRC_COMPONENT_OP_PANEL | 0x10,
/* PSI */
OPAL_RC_PSI_INIT = OPAL_SRC_COMPONENT_PSI | 0x10,
OPAL_RC_PSI_IRQ_RESET = OPAL_SRC_COMPONENT_PSI | 0x11,
OPAL_RC_PSI_TIMEOUT = OPAL_SRC_COMPONENT_PSI | 0X12,
/* XSCOM */
OPAL_RC_XSCOM_RW = OPAL_SRC_COMPONENT_XSCOM | 0x10,
OPAL_RC_XSCOM_INDIRECT_RW = OPAL_SRC_COMPONENT_XSCOM | 0x11,
OPAL_RC_XSCOM_RESET = OPAL_SRC_COMPONENT_XSCOM | 0x12,
OPAL_RC_XSCOM_BUSY = OPAL_SRC_COMPONENT_XSCOM | 0x13,
/* PCI */
OPAL_RC_PCI_INIT_SLOT = OPAL_SRC_COMPONENT_PCI | 0x10,
OPAL_RC_PCI_ADD_SLOT = OPAL_SRC_COMPONENT_PCI | 0x11,
OPAL_RC_PCI_SCAN = OPAL_SRC_COMPONENT_PCI | 0x12,
OPAL_RC_PCI_RESET_PHB = OPAL_SRC_COMPONENT_PCI | 0x10,
/* ATTN */
OPAL_RC_ATTN = OPAL_SRC_COMPONENT_ATTN | 0x10,
/* MEM_ERR */
OPAL_RC_MEM_ERR_RES = OPAL_SRC_COMPONENT_MEM_ERR | 0x10,
OPAL_RC_MEM_ERR_DEALLOC = OPAL_SRC_COMPONENT_MEM_ERR | 0x11,
/* DUMP */
OPAL_RC_DUMP_INIT = OPAL_SRC_COMPONENT_DUMP | 0x10,
OPAL_RC_DUMP_LIST = OPAL_SRC_COMPONENT_DUMP | 0x11,
OPAL_RC_DUMP_ACK = OPAL_SRC_COMPONENT_DUMP | 0x12,
OPAL_RC_DUMP_MDST_INIT = OPAL_SRC_COMPONENT_DUMP | 0x13,
OPAL_RC_DUMP_MDST_UPDATE = OPAL_SRC_COMPONENT_DUMP | 0x14,
OPAL_RC_DUMP_MDST_ADD = OPAL_SRC_COMPONENT_DUMP | 0x15,
OPAL_RC_DUMP_MDST_REMOVE = OPAL_SRC_COMPONENT_DUMP | 0x16,
/* LED */
OPAL_RC_LED_SPCN = OPAL_SRC_COMPONENT_LED | 0x10,
OPAL_RC_LED_BUFF = OPAL_SRC_COMPONENT_LED | 0x11,
OPAL_RC_LED_LC = OPAL_SRC_COMPONENT_LED | 0x12,
OPAL_RC_LED_STATE = OPAL_SRC_COMPONENT_LED | 0x13,
OPAL_RC_LED_SUPPORT = OPAL_SRC_COMPONENT_LED | 0x14,
/* SENSOR */
OPAL_RC_SENSOR_INIT = OPAL_SRC_COMPONENT_SENSOR | 0x10,
OPAL_RC_SENSOR_READ = OPAL_SRC_COMPONENT_SENSOR | 0x11,
OPAL_RC_SENSOR_ASYNC_COMPLETE
= OPAL_SRC_COMPONENT_SENSOR | 0x12,
/* SLW */
OPAL_RC_SLW_INIT = OPAL_SRC_COMPONENT_SLW | 0x10,
OPAL_RC_SLW_SET = OPAL_SRC_COMPONENT_SLW | 0x11,
OPAL_RC_SLW_GET = OPAL_SRC_COMPONENT_SLW | 0x12,
OPAL_RC_SLW_REG = OPAL_SRC_COMPONENT_SLW | 0x13,
/* FSP */
OPAL_RC_FSP_POLL_TIMEOUT = OPAL_SRC_COMPONENT_FSP | 0x10,
OPAL_RC_FSP_MBOX_ERR = OPAL_SRC_COMPONENT_FSP | 0x11,
OPAL_RC_FSP_DISR_HIR_MASK = OPAL_SRC_COMPONENT_FSP | 0x12,
/* I2C */
OPAL_RC_I2C_INIT = OPAL_SRC_COMPONENT_I2C | 0X10,
OPAL_RC_I2C_START_REQ = OPAL_SRC_COMPONENT_I2C | 0X11,
OPAL_RC_I2C_TIMEOUT = OPAL_SRC_COMPONENT_I2C | 0x12,
OPAL_RC_I2C_TRANSFER = OPAL_SRC_COMPONENT_I2C | 0x13,
OPAL_RC_I2C_RESET = OPAL_SRC_COMPONENT_I2C | 0x14,
/* IPMI */
OPAL_RC_IPMI_REQ = OPAL_SRC_COMPONENT_IPMI | 0x10,
OPAL_RC_IPMI_RESP = OPAL_SRC_COMPONENT_IPMI | 0x11,
OPAL_RC_IPMI_DMA_ERROR_RESP
= OPAL_SRC_COMPONENT_IPMI | 0x12,
/* Platform error */
OPAL_RC_ABNORMAL_REBOOT = OPAL_SRC_COMPONENT_CEC | 0x10,
/* FSP console */
OPAL_RC_CONSOLE_HANG = OPAL_SRC_COMPONENT_CONSOLE | 0x10,
};
#define OPAL_ELOG_SEC_DESC 0x44455343
#define DEFINE_LOG_ENTRY(reason, type, id, subsys, \
severity, subtype) static struct opal_err_info err_##reason = \
{ .reason_code = reason, .err_type = type, .cmp_id = id, \
.subsystem = subsys, .sev = severity, .event_subtype = subtype }
/* This is wrapper around the error log function, which creates
* and commits the error to FSP.
* Used for simple error logging.
* Returns a Log ID, if an error involves a service processor needing
* to be kicked, this logid can be sent to the service processor explaining
* *why* we kicked it. Log Id = -1 on error.
*/
uint32_t log_simple_error(struct opal_err_info *e_info,
const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
#define e_info(reason_code) err_##reason_code
struct errorlog *opal_elog_create(struct opal_err_info *e_info,
uint32_t tag) __warn_unused_result;
void log_add_section(struct errorlog *buf, uint32_t tag);
void log_append_data(struct errorlog *buf, unsigned char *data, uint16_t size);
void log_append_msg(struct errorlog *buf,
const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
void log_commit(struct errorlog *elog);
/* Called by the backend after an error has been logged by the
* backend. If the error could not be logged successfully success is
* set to false. */
void opal_elog_complete(struct errorlog *elog, bool success);
int elog_init(void);
#endif /* __ERRORLOG_H */
|
<reponame>tomphp/pb-git-hooks
use std::fmt::Display;
use regex::Regex;
use crate::{
errors::PbCommitMessageLintsError,
external::vcs::Vcs,
lints::{
duplicate_trailers::lint_duplicated_trailers,
missing_jira_issue_key::lint_missing_jira_issue_key,
missing_pivotal_tracker_id::lint_missing_pivotal_tracker_id,
Lints::{DuplicatedTrailers, JiraIssueKeyMissing, PivotalTrackerIdMissing},
},
};
use std::{convert::TryFrom, fs::File, io::Read, path::PathBuf};
pub struct CommitMessage {
contents: String,
}
impl CommitMessage {
#[must_use]
pub fn new(contents: String) -> CommitMessage {
CommitMessage { contents }
}
pub fn matches_pattern(&self, re: &Regex) -> bool {
re.is_match(&self.contents)
}
#[must_use]
pub fn get_trailer(&self, trailer: &str) -> Vec<&str> {
self.contents
.lines()
.filter(|line: &&str| CommitMessage::line_has_trailer(trailer, line))
.collect::<Vec<_>>()
}
fn line_has_trailer(trailer: &str, line: &str) -> bool {
line.starts_with(&format!("{}:", trailer))
}
}
impl TryFrom<PathBuf> for CommitMessage {
type Error = PbCommitMessageLintsError;
fn try_from(value: PathBuf) -> Result<Self, Self::Error> {
let mut file = File::open(value)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)
.map_err(PbCommitMessageLintsError::from)
.map(move |_| CommitMessage::new(buffer))
}
}
impl Display for CommitMessage {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.contents)
}
}
#[cfg(test)]
mod test_commit_message {
use pretty_assertions::assert_eq;
use regex::Regex;
use crate::lints::CommitMessage;
#[test]
fn with_trailers() {
let commit = CommitMessage::new(
r#"Some Commit Message
Anything: Some Trailer
Anything: Some Trailer
Another: Trailer
"#
.into(),
);
assert_eq!(vec!["Another: Trailer"], commit.get_trailer("Another"));
assert_eq!(
vec!["Anything: Some Trailer", "Anything: Some Trailer"],
commit.get_trailer("Anything")
)
}
#[test]
fn regex_matching() {
let commit = CommitMessage::new(
r#"Some Commit Message
Anything: Some Trailer
Anything: Some Trailer
Another: Trailer
"#
.into(),
);
assert_eq!(
true,
commit.matches_pattern(&Regex::new("[AB]nything:").unwrap())
);
assert_eq!(
false,
commit.matches_pattern(&Regex::new("N[oO]thing:").unwrap())
);
}
}
/// The lints that are supported
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum Lints {
DuplicatedTrailers,
PivotalTrackerIdMissing,
JiraIssueKeyMissing,
}
const CONFIG_DUPLICATED_TRAILERS: &str = "duplicated-trailers";
const CONFIG_PIVOTAL_TRACKER_ID_MISSING: &str = "pivotal-tracker-id-missing";
const CONFIG_JIRA_ISSUE_KEY_MISSING: &str = "jira-issue-key-missing";
impl Lints {
pub fn iterator() -> impl Iterator<Item = Lints> {
static LINTS: [Lints; 3] = [
DuplicatedTrailers,
PivotalTrackerIdMissing,
JiraIssueKeyMissing,
];
LINTS.iter().copied()
}
#[must_use]
pub fn config_key(self) -> String {
format!("pb.lint.{}", self)
}
#[must_use]
pub fn lint(self, commit_message: &CommitMessage) -> Option<LintProblem> {
match self {
Lints::DuplicatedTrailers => lint_duplicated_trailers(commit_message),
Lints::PivotalTrackerIdMissing => lint_missing_pivotal_tracker_id(commit_message),
Lints::JiraIssueKeyMissing => lint_missing_jira_issue_key(commit_message),
}
}
}
impl std::fmt::Display for Lints {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.name())
}
}
impl Into<&str> for Lints {
fn into(self) -> &'static str {
self.name()
}
}
impl std::convert::TryFrom<&str> for Lints {
type Error = PbCommitMessageLintsError;
fn try_from(from: &str) -> Result<Self, Self::Error> {
Lints::iterator()
.zip(Lints::iterator().map(|lint| format!("{}", lint)))
.filter_map(
|(lint, name): (Lints, String)| if name == from { Some(lint) } else { None },
)
.collect::<Vec<Lints>>()
.first()
.copied()
.ok_or_else(|| PbCommitMessageLintsError::LintNotFoundError(from.into()))
}
}
impl std::convert::From<Lints> for String {
fn from(from: Lints) -> Self {
format!("{}", from)
}
}
/// Get the lints that are currently enabled
///
/// # Errors
///
/// If there's an error reading from the configuration source
pub fn get_lint_configuration(config: &dyn Vcs) -> Result<Vec<Lints>, PbCommitMessageLintsError> {
Ok(vec![
get_config_or_default(config, Lints::DuplicatedTrailers, true)?,
get_config_or_default(config, Lints::PivotalTrackerIdMissing, false)?,
get_config_or_default(config, Lints::JiraIssueKeyMissing, false)?,
]
.into_iter()
.flatten()
.collect())
}
fn get_config_or_default(
config: &dyn Vcs,
lint: Lints,
default: bool,
) -> Result<Option<Lints>, PbCommitMessageLintsError> {
Ok(config
.get_bool(&lint.config_key())?
.or(Some(default))
.filter(|lint_value| lint_value == &true)
.map(|_| lint))
}
#[cfg(test)]
mod tests_lints {
use std::convert::TryInto;
use pretty_assertions::assert_eq;
use crate::lints::{Lints, Lints::PivotalTrackerIdMissing};
#[test]
fn it_is_convertible_to_string() {
let string: String = Lints::PivotalTrackerIdMissing.into();
assert_eq!("pivotal-tracker-id-missing".to_string(), string)
}
#[test]
fn it_can_be_created_from_string() {
let lint: Lints = "pivotal-tracker-id-missing".try_into().unwrap();
assert_eq!(PivotalTrackerIdMissing, lint)
}
#[test]
fn it_is_printable() {
assert_eq!(
"pivotal-tracker-id-missing",
&format!("{}", Lints::PivotalTrackerIdMissing)
)
}
}
mod missing_pivotal_tracker_id;
mod duplicate_trailers;
mod missing_jira_issue_key;
#[cfg(test)]
mod tests_get_lint_configuration {
use std::collections::HashMap;
use pretty_assertions::assert_eq;
use crate::{
errors::PbCommitMessageLintsError,
external::vcs::InMemory,
lints::{
get_lint_configuration,
Lints,
Lints::{DuplicatedTrailers, JiraIssueKeyMissing, PivotalTrackerIdMissing},
},
};
#[test]
fn defaults() {
let mut strings = HashMap::new();
let config = InMemory::new(&mut strings);
let actual = get_lint_configuration(&config);
let expected = Ok(vec![DuplicatedTrailers]);
assert_eq!(
expected, actual,
"Expected the list of lint identifiers to be {:?}, instead got {:?}",
expected, actual
)
}
#[test]
fn disabled_duplicated_trailers() {
let mut strings = HashMap::new();
strings.insert("pb.lint.duplicated-trailers".into(), "false".into());
let config = InMemory::new(&mut strings);
let actual = get_lint_configuration(&config);
let expected: Result<Vec<Lints>, PbCommitMessageLintsError> = Ok(vec![]);
assert_eq!(
expected, actual,
"Expected the list of lint identifiers to be {:?}, instead got {:?}",
expected, actual
)
}
#[test]
fn enabled_duplicated_trailers() {
let mut strings = HashMap::new();
strings.insert("pb.lint.duplicated-trailers".into(), "true".into());
let config = InMemory::new(&mut strings);
let actual = get_lint_configuration(&config);
let expected: Result<Vec<Lints>, PbCommitMessageLintsError> = Ok(vec![DuplicatedTrailers]);
assert_eq!(
expected, actual,
"Expected the list of lint identifiers to be {:?}, instead got {:?}",
expected, actual
)
}
#[test]
fn enabled_pivotal_tracker_id() {
let mut strings = HashMap::new();
strings.insert("pb.lint.pivotal-tracker-id-missing".into(), "true".into());
let config = InMemory::new(&mut strings);
let actual = get_lint_configuration(&config);
let expected: Result<Vec<Lints>, PbCommitMessageLintsError> =
Ok(vec![DuplicatedTrailers, PivotalTrackerIdMissing]);
assert_eq!(
expected, actual,
"Expected the list of lint identifiers to be {:?}, instead got {:?}",
expected, actual
)
}
#[test]
fn enabled_jira_issue_key_missing() {
let mut strings = HashMap::new();
strings.insert("pb.lint.jira-issue-key-missing".into(), "true".into());
let config = InMemory::new(&mut strings);
let actual = get_lint_configuration(&config);
let expected: Result<Vec<Lints>, PbCommitMessageLintsError> =
Ok(vec![DuplicatedTrailers, JiraIssueKeyMissing]);
assert_eq!(
expected, actual,
"Expected the list of lint identifiers to be {:?}, instead got {:?}",
expected, actual
)
}
#[test]
fn disabled_jira_issue_key_missing() {
let mut strings = HashMap::new();
strings.insert("pb.lint.jira-issue-key-missing".into(), "false".into());
let config = InMemory::new(&mut strings);
let actual = get_lint_configuration(&config);
let expected = Ok(vec![DuplicatedTrailers]);
assert_eq!(
expected, actual,
"Expected the list of lint identifiers to be {:?}, instead got {:?}",
expected, actual
)
}
}
#[cfg(test)]
mod tests_can_enable_lints_via_a_command {
use std::collections::HashMap;
use pretty_assertions::assert_eq;
use crate::{
external::vcs::InMemory,
lints::{set_lint_status, Lints::PivotalTrackerIdMissing},
};
#[test]
fn we_can_enable_lints() {
let mut strings = HashMap::new();
strings.insert("pb.lint.pivotal-tracker-id-missing".into(), "false".into());
let mut config = InMemory::new(&mut strings);
set_lint_status(&[PivotalTrackerIdMissing], &mut config, true).unwrap();
let expected = "true".to_string();
let actual = strings
.get("pb.lint.pivotal-tracker-id-missing")
.unwrap()
.clone();
assert_eq!(expected, actual);
}
#[test]
fn we_can_disable_lints() {
let mut strings = HashMap::new();
strings.insert("pb.lint.pivotal-tracker-id-missing".into(), "true".into());
let mut config = InMemory::new(&mut strings);
set_lint_status(&[PivotalTrackerIdMissing], &mut config, false).unwrap();
let expected = "false".to_string();
let actual = strings
.get("pb.lint.pivotal-tracker-id-missing")
.unwrap()
.clone();
assert_eq!(expected, actual);
}
}
/// # Errors
///
/// Errors if writing to the VCS config fails
pub fn set_lint_status(
lints: &[Lints],
vcs: &mut dyn Vcs,
status: bool,
) -> Result<(), PbCommitMessageLintsError> {
lints
.iter()
.try_for_each(|lint| vcs.set_str(&lint.config_key(), &status.to_string()))?;
Ok(())
}
#[must_use]
pub fn lint(commit_message: &CommitMessage, lints: Vec<Lints>) -> Vec<LintProblem> {
lints
.into_iter()
.flat_map(|lint| lint.lint(commit_message))
.collect::<Vec<LintProblem>>()
}
#[derive(Debug, Eq, PartialEq)]
pub struct LintProblem {
help: String,
code: LintCode,
}
impl LintProblem {
#[must_use]
pub fn new(help: String, code: LintCode) -> LintProblem {
LintProblem { help, code }
}
#[must_use]
pub fn code(self) -> LintCode {
self.code
}
}
impl Display for LintProblem {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.help)
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
#[repr(i32)]
pub enum LintCode {
DuplicatedTrailers = 3,
PivotalTrackerIdMissing,
JiraIssueKeyMissing,
}
impl Lints {
#[must_use]
pub fn name(self) -> &'static str {
match self {
DuplicatedTrailers => CONFIG_DUPLICATED_TRAILERS,
PivotalTrackerIdMissing => CONFIG_PIVOTAL_TRACKER_ID_MISSING,
JiraIssueKeyMissing => CONFIG_JIRA_ISSUE_KEY_MISSING,
}
}
}
|
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from collections import OrderedDict
from dbmodels import Dbinfo, Dbtype, Check_item, Alarm_threshold, Alarm_level
from alarm_message import alarm_message
from check_instruction import check_instruction
conn = sa.create_engine('mysql+pymysql://opr:Opr*[email protected]/dbops')
Session = sessionmaker(bind=conn)
session = Session()
dbs = session.query(Dbinfo)
# dbs = session.query(Dbinfo).filter_by(dbname='KMJTOA')
db_types = session.query(Dbtype).all()
alarm_levels = session.query(Alarm_level).order_by(Alarm_level.level_id).all()
check_items = session.query(Check_item).all()
thresholds = session.query(Alarm_threshold).all()
check_item_dict = {}
check_func_dict = {}
check_insn_dict = {}
alarm_level_dict = OrderedDict()
#alarm_thrd_dict = defaultdict(lambda: (True, 0.01))
alarm_thrd_dict = {}
alarm_msg_dict = {}
db_type_dict = {}
for check_item in check_items:
if check_item.check_name == 'connectivity':
alarm_msg_dict[check_item.check_id] = alarm_message['connectivity']
check_func_dict[check_item.check_id] = 'check_connectivity'
check_func_dict['check_connectivity'] = check_item.check_id
elif check_item.check_name == 'connect_num':
alarm_msg_dict[check_item.check_id] = alarm_message['connect_num']
check_func_dict[check_item.check_id] = 'check_connect_num'
check_func_dict['check_connect_num'] = check_item.check_id
elif check_item.check_name == 'fra usage':
alarm_msg_dict[check_item.check_id] = alarm_message['fra usage']
check_func_dict[check_item.check_id] = 'check_fra_usage'
check_func_dict['check_fra_usage'] = check_item.check_id
else:
pass
check_item_dict[check_item.check_id] = (check_item.check_name, check_item.active, check_item.frequency)
for db_type in db_types:
db_type_dict[db_type.db_type_id] = db_type.db_type_name
for check_item in check_items:
for db_type in db_types:
if check_item.check_name == 'connectivity':
if db_type.db_type_name == 'mysql':
check_insn_dict[(check_item.check_id, db_type.db_type_id)] = check_instruction[('connectivity', 'mysql')]
elif db_type.db_type_name == 'oracle':
check_insn_dict[(check_item.check_id, db_type.db_type_id)] = check_instruction[('connectivity', 'oracle')]
else:
pass
elif check_item.check_name == 'connect_num':
if db_type.db_type_name == 'mysql':
check_insn_dict[(check_item.check_id, db_type.db_type_id)] = check_instruction[('connect_num', 'mysql')]
elif db_type.db_type_name == 'oracle':
check_insn_dict[(check_item.check_id, db_type.db_type_id)] = check_instruction[('connect_num', 'oracle')]
else:
pass
elif check_item.check_name == 'fra usage':
if db_type.db_type_name == 'oracle':
check_insn_dict[(check_item.check_id, db_type.db_type_id)] = check_instruction[('fra usage', 'oracle')]
else:
pass
else:
pass
for lev in alarm_levels:
alarm_level_dict[lev.level_id] = lev.level_name
for thrd in thresholds:
alarm_thrd_dict[(thrd.db_id, thrd.check_id, thrd.level_id)] = (thrd.active, thrd.threshold)
class DbTypeError(ValueError):
pass
|
/* Method: Push data info with config param, Note: it's saved into database! */
static SFakeDataHandle FakeDb_PushDataInfo(SFakeConfigParam * configParamPtr,
SFakeDataInfo * dataInfoPtr)
{
assert (configParamPtr != NULL);
assert (dataInfoPtr != NULL);
{
SFakeDataItem * dataItem = NULL;
dataItem = (SFakeDataItem *)malloc(sizeof(SFakeDataItem));
if (dataItem != NULL)
{
dataItem->configParam = *configParamPtr;
dataItem->dataInfoPtr = dataInfoPtr;
FakeDb_PushDataItem(dataItem);
}
else
{
gFakeLog.Error("Memory not enough for SFakeDataItem "
"size: %d!",
(int)sizeof(SFakeDataItem));
}
return (SFakeDataItem *)dataItem;
}
} |
#include<bits/stdc++.h>
#define ll long long
using namespace std;
int main(){
ll n,k;
cin>>n>>k;
ll t=n;
ll c2=0,c5=0;
while(n%2==0){
c2++;
n/=2;
}
while(n%5==0){
c5++;
n/=5;
}
cout<<max(t,(ll)t*(ll)pow((ll)2,k-c2>=0?k-c2:0)*(ll)pow((ll)5,k-c5>=0?k-c5:0))<<endl;
} |
/**
* Index this directory and return the data store.
*
* @param name the name of the data store
* @param dir the root directory
* @return the data store
*/
public Store indexDirectory(final String name,
final String dir)
{
Store store = new Store(name, dir);
File file = new File(dir);
Node root = visitDirectories(dir, file, null);
if (bInterrupted)
{
return null;
}
store.setDataStore(root);
return store;
} |
Correlation Between High-Density Lipoprotein and Monocyte Subsets in Patients with Stable Coronary Heart Disease
Background High-density lipoprotein (HDL) consists of heterogeneous particles with a variety of structures and functions. Its role in atherosclerosis has been gradually recognized. Studies have shown dysfunction of small HDL in patients with coronary artery disease (CAD). Monocytes play an important role in atherosclerosis, which can be divided into 3 subgroups based on the expression of surface markers CD14 and CD16. This study aimed to investigate the association between HDL and monocyte subsets in CAD patients. Material/Methods A total of 90 patients with stable CAD were selected in this study. Monocytes were divided into classical monocytes (CM, CD14++CD16−), intermediate monocytes (IM, CD14++CD16+), and non-classical monocytes (NCM, CD14+CD16++). HDL components in serum were determined by high-resolution polyacrylamide gel electrophoresis (detected by Quantimetrix HDL Lipoprint system, referring to HDL subfractions analysis: A new laboratory diagnostic assay for patients with cardiovascular diseases and dyslipoproteinemia). Results Serum level of small HDL was positively correlated with circulating proinflammatory NCM (r=0.30; p=0.004), negatively correlated with CM, and not correlated with IM. We also found that disease severity was not associated with diabetes mellitus, glycosylated hemoglobin, hypertension, smoking history, or statin dosage. Conclusions Our study confirmed that small HDL level is associated with an increase in NCM and a decrease in CM, suggesting the proinflammatory relationship between small HDL and intrinsic immune function during the progression of stable CAD.
Background
Cardiovascular disease remains the leading cause of death in elderly patients worldwide despite the rapid development of cardiovascular drugs. Several clinical and epidemiological researches have indicated that the level of high-density lipoprotein (HDL) is closely related to cardiovascular disease. Studies on the treatment of such cardiovascular disease have been focused on how to improve HDL level . Nevertheless, increasing HDL level by cholesteryl ester transfer protein (CETP) inhibitor has not reduced the risk of coronary heart disease, but increased the morbidity and mortality rate in patients with such disease . Some researchers have suggested that the function of HDL might have been damaged in this pathological environment; therefore, it is crucial to ensure its function. HDL consists of heterogeneous lipoprotein particles characterized by specific structures, metabolic functions, and atherosclerosis-resistance. Small HDL in healthy people has been confirmed to impact the progression of atherosclerosis by increasing the consumption of cholesterol, as well as antioxidant and anti-inflammatory responses. However, its function is abnormal in patients with atherosclerosis and dyslipidemia . Clinical research shows that small HDL is associated with the incidence and severity of CAD, whereas large molecular HDL showed a negative correlation.
Monocytes play an important role in the inflammatory reaction during atherosclerosis. Monocytes can be divided into 3 subgroups based on the expression of surface CD14 and CD16: classical monocytes (CM, CD14++CD16-), intermediate monocytes (IM, CD14++CD16+), and non-classical monocytes (NCM, CD14+CD16++). Both IM and NCM are proinflammatory cells, whose proportion is related to the occurrence of CAD, intima-media thickness, and plaque stability. Total cholesterol, low-density lipoprotein (LDL) cholesterol, and triglycerides are related to proinflammatory NCM, whereas HDL has a negative correlation. A previous study on 900 cases of CAD patients showed that IM has a predictive value for cardiovascular disease . In the current study, we explored the correlation between different HDL components and monocytes subsets in CAD patients.
Patients and study design
All subjects were CAD patients treated in our hospital between September 2009 and August 2010. Inclusion criteria were: age above 18 years and, with stable CAD diagnosed by selective coronary angiography. Exclusion criteria were: recent occurrence of acute coronary syndrome with ST segment elevation myocardial infarction, non-ST elevation myocardial infarction, or unstable angina, a history of percutaneous coronary intervention (PCI), cardiac failure, cancer, and acute or chronic liver or renal failure. All selected patients provided signed informed consent. The study was approved by the Institutional Ethics Committee in our hospital.
Blood sample
Before the selective coronary arteriography, a blood sample was collected from the antecubital vein of each patient, using 3.8% sodium citrate tube, serum separation tube, and EDTA tube (Greiner Bio-One, Frickenhausen, Germany) and centrifuged at 3000 rpm at 4°C for 15 min.
Flow cytometry
White blood cells and monocyte subsets were detected by flow cytometry. The staining and gating of cells is shown in Figure 1. CD45-PerCP monoclonal antibody (BD Biosciences, catalog number 345809, San Diego, CA, USA), CD14-FITC monoclonal antibody (BD Biosciences, catalog number 345784), CD16-APC-H7 monoclonal antibody (BD Biosciences, catalog number 560195), CD3-APC monoclonal antibody (BD Biosciences, catalog number 345767), CD19 (BD Biosciences, catalog number 345791), CD56 (Beckton Dickinson, catalog number 341027), and isotype control were used for staining. The cells were incubated in the dark for 15 min, mixed with 1.5 ml of lysate (BD FACS lysing solution, BD Biosciences), incubated in the dark for another 15 min, and washed 3 times with PBS. Cells were resuspended in 3 ml of fixing solution and analyzed by flow cytometry. Data analyses were performed using FACS Canto II and FACS Diva software (BD Biosciences). CD45+CD3-and CD19-CD56-cells with specific forward scatter (FSC) and side scatter (SSC) were monocytes. The monocytes were divided into CM, IM, and NCM as previously described. The absolute number of monocytes was calculated based on the number of white blood cells and CD45+ cells detected in flow cytometry.
Detection of lipid
The serum levels of total cholesterol, HDL, LDL, and triglyceride were determined. HDL subgroup components were quantified using the Quantimetrix HDL Lipoprint System ® (Quantimetrix Corporation, Redondo Beach, CA, USA). HDL was divided into 10 subgroups based on their locations on SDS-PAGE: 1-3, 4-7, and 8-10 represented large, medium, and small HDL particles, respectively ( Figure 2).
Statistical analysis
All statistical analyses were performed using SPSS20.0 software (Chicago, IL, USA). Classified variables are expressed as count or percentage and compared by c 2 or Fisher exact tests. Numerical data are presented as mean±standard deviation (c _ ±S) and analyzed by one-way ANOVA. Deflection data were compared by ANOVA after logarithmic transformation. Relevance was determined by Pearson correlation analysis. Three subgroups (CM, IM, and NCM) were incorporated into the linear regression model. These subgroups were also incorporated into the model when the clinical features, statins usage, or lipid parameters were correlated with monocyte subgroups or small HDL level (p<0.2). P values smaller than 0.05 were considered significantly different.
Patient information
A total of 90 patients diagnosed as having stable CAD by angiography were enrolled in this study, including 72 males (80%) and 21 smokers (23%). The mean age of the patients was 64.1±10.0 years old. Among these patients, a total of 25, 36, and 29 cases suffered from single-, 2-, and 3-vessel coronary arterial disease, respectively; 31% and 52% patients received high-and low-dose statin treatment, respectively and 17% patients received no statin therapy.
Correlation between HDL subsets with lipid parameters and cardiovascular risk factors
Small HDL level was significantly correlated with triglycerides, VLDL, LDL, and total cholesterol ( 3132 hemoglobin, hypertension, and smoking history were not related to HDL subgroup, statin dosage, or severity of disease.
Discussion
Manyepidemiological and prospective studies have clearly shown that serum HDL level is negatively correlated with the risk of coronary heart disease. HDL exerts a variety of protective effects on arteries, including cholesterol outflow, antioxidation, anti-inflammation, cell protection, vasodilator, and antithrombosis . Moreover, several studies have also confirmed that small HDL particles can potentially prevent atherosclerosis.
3133
During dyslipidemia, including elevation of triglycerides or total cholesterol, small HDL level was significantly increased, whereas the number of large HDL particles was substantially reduced, leading to significant change in HDL metabolism and distribution of subsets. This study confirmed that serum level of small HDL was correlated with lipid index, such as total cholesterol, LDL, VLDL, and triglyceride, but not with total HDL, lipoprotein, or statin usage in 90 patients with stable CAD diagnosed by angiography.
The level of small HDL changes in patients with dyslipidemia or obesity and in patients with cardiovascular disease . A study of 115 patients with CAD suggested that large HDL level was significantly increased, as revealed by coronary angiography. Another 10-year follow-up study covering 1000 patients revealed that small and large HDLs have prognostic value to the progression of ischemic heart disease . The incidence of CAD in females is more closely related to smaller HDL particles. Furthermore, small HDL level in patients with acute ischemic shock is significantly higher than that in healthy populations. A clinical study of 60 patients has confirmed that small HDL is associated with non-calcified plaques by coronary artery CT and intravascular ultrasound. In another study, covering 102 patients with myocardial infarction and 200 healthy controls, large and middle HDL are negatively correlated with early-stage acute myocardial infarction, whereas small HDL level is up-regulated in young patients with acute myocardial infarction. Furthermore, small HDL level is also increased in patients with acute coronary syndrome, whereas large HDL level is reduced .
The heterogeneity of monocytes and its association with atherosclerosis has been confirmed by the detection of expression of surface markers CD14 and CD16. The number of CD16+ monocytes is increased in acute and chronic inflammation and atherosclerosis, which are rapidly activated by the stimulation of inflammation . Our study demonstrated that the increase in small HDL level was associated with the distribution of proinflammatory monocyte subsets in patients with stable CAD. Specifically, serum level of small HDL was positively 3134 correlated with cell density of NCM (CD14+CD16++), negatively correlated with CM (CD14++CD16-), and not correlated with IM (CD14++CD16+). Several studies have shown that the correlation between total HDL or LDL and monocyte subsets disappears after adjusting for BMI. In this study, the correlation between small HDL and monocyte subsets was independent of age, sex, smoking history, BMI, diabetes, and statin usage.
The patients were divided into 3 subgroups based on the level of small HDL. Monocyte subsets significantly promoted atherosclerosis and other inflammatory responses in the group with the highest level of small HDL. In addition, the proportion of NCM was increased while that of CM was decreased. However, monocyte subsets were not relevant to serum CRP, IL-6, or IL-10. Currently, the association between CD16+ monocytes and hsCRP level remains controversial. While some studies report that CD16+ monocytes were correlated with hsCRP level in patients with unstable angina, others have shown that CD16+ monocytes were related to TNF-a level instead of the level of hsCRP or IL-6 . Furthermore, it has been confirmed that several colony-stimulating factors (CSFs) are expressed in various vascular cells, which can affect the progression of atherosclerosis by regulating macrophage phenotype and cholesterol intake . In this study, the plasma level of G-CSFs was significantly correlated with small HDL. The association between HDL subgroups and monocyte subsets was only analyzed at a certain time point; therefore, their association with functional changes in the process of atherosclerosis cannot be determined.
Conclusions
Our study has demonstrated that small HDL level is associated with an elevation in IM and a reduction in CM, revealing the proinflammatory correlation between small HDL and intrinsic immune function in stable CAD. |
package intset
import (
"testing"
)
func initIntSet(begin, end int) *IntSet {
s := &IntSet{}
for x := begin; x <= end; x++ {
s.Add(x)
}
return s
}
func initElems(begin, end int) []int {
var elems []int
for x := begin; x <= end; x++ {
elems = append(elems, x)
}
return elems
}
func TestIntSet_Elems(t *testing.T) {
s := initIntSet(0, 100)
got := s.Elems()
want := initElems(0, 100)
if !equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
}
func equal(s1, s2 []int) bool {
if len(s1) != len(s2) {
return false
}
for i, v := range s1 {
if v != s2[i] {
return false
}
}
return true
}
|
//-----------------------------------------------------------------------------
// culls a node to the frustum or area frustum
//-----------------------------------------------------------------------------
bool CBuildWorldListsJob::R_CullNode( mnode_t *pNode )
{
if ( !m_bViewerInSolidSpace && pNode->area > 0 )
{
if ( ( m_RenderAreaBits[pNode->area>>3] & GetBitForBitnum(pNode->area&7) ) == 0 )
return true;
return CullNodeSIMD( m_pAreaFrustum->Element( pNode->area ), pNode );
}
return CullNodeSIMD( *m_pFrustum, pNode );
} |
package charts.builder.spreadsheet;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Set;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.poi.hssf.util.HSSFColor;
import org.apache.poi.ss.formula.eval.ErrorEval;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.CellValue;
import org.apache.poi.ss.usermodel.Color;
import org.apache.poi.ss.usermodel.FormulaError;
import org.apache.poi.ss.usermodel.FormulaEvaluator;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.apache.poi.ss.util.CellReference;
import org.apache.poi.xssf.usermodel.XSSFColor;
import org.apache.tika.io.IOUtils;
import play.Logger;
import charts.builder.DataSource;
import charts.builder.Value;
import charts.builder.spreadsheet.external.ResolvedRef;
import charts.builder.spreadsheet.external.SimpleCellLink;
import charts.builder.spreadsheet.external.UnresolvedRef;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
public abstract class SpreadsheetDataSource implements DataSource {
private Workbook workbook;
private FormulaEvaluator evaluator;
private final int defaultSheet;
private class SpreadsheetCellValue implements Value {
private final Cell cell;
public SpreadsheetCellValue(Cell cell) {
this.cell = cell;
}
@Override
public String getValue() {
String result = "";
try {
CellValue cellValue = evaluator().evaluate(cell);
if (cellValue == null) {
return "";
}
switch (cellValue.getCellType()) {
case Cell.CELL_TYPE_BOOLEAN:
result = Boolean.toString(cellValue.getBooleanValue());
break;
case Cell.CELL_TYPE_NUMERIC:
double val = cellValue.getNumberValue();
result = Double.toString(val);
break;
case Cell.CELL_TYPE_STRING:
result = cellValue.getStringValue();
break;
case Cell.CELL_TYPE_BLANK:
result = "";
break;
case Cell.CELL_TYPE_ERROR:
result = ErrorEval.getText(cellValue.getErrorValue());
break;
// CELL_TYPE_FORMULA will never happen
case Cell.CELL_TYPE_FORMULA:
result = "#FORMULAR";
break;
default:
result = "#DEFAULT";
}
} catch(RuntimeException e) {
if(cell.getCellType() == Cell.CELL_TYPE_FORMULA) {
switch(cell.getCachedFormulaResultType()) {
case Cell.CELL_TYPE_NUMERIC:
double val = cell.getNumericCellValue();
result = Double.toString(val);
break;
case Cell.CELL_TYPE_ERROR:
FormulaError fe = FormulaError.forInt(cell.getErrorCellValue());
result = fe.getString();
break;
case Cell.CELL_TYPE_STRING:
result = cell.getStringCellValue();
break;
case Cell.CELL_TYPE_BOOLEAN:
result = Boolean.toString(cell.getBooleanCellValue());
break;
default:
result = "";
}
}
}
return result;
}
@Override
public String toString() {
return asString();
}
@Override
public String asString() {
return getValue();
}
@Override
public Double asDouble() {
String s = getValue();
try {
return new Double(s);
} catch (NumberFormatException e) {
return null;
}
}
@Override
public Integer asInteger() {
String s = getValue();
try {
return new Integer(Math.round(Float.parseFloat(s)));
} catch (NumberFormatException e) {
return null;
}
}
@Override
public java.awt.Color asColor() {
for(Color c : Lists.newArrayList(cell.getCellStyle().getFillForegroundColorColor(),
cell.getCellStyle().getFillBackgroundColorColor())) {
if (c instanceof HSSFColor && (((HSSFColor)c).getTriplet() != null)) {
final short[] rgb = ((HSSFColor)c).getTriplet();
return new java.awt.Color(rgb[0], rgb[1], rgb[2]);
}
if (c instanceof XSSFColor && (((XSSFColor)c).getRgb() != null)) {
final byte[] rgb = ((XSSFColor)c).getRgb();
// Convert bytes to unsigned integers
return new java.awt.Color(rgb[0] & 0xFF, rgb[1] & 0xFF, rgb[2] & 0xFF);
}
}
return null;
}
@Override
public Date asDate() {
try {
return cell.getDateCellValue();
} catch(Exception e) {
final String s = getValue();
// TODO it would be better if we could somehow parse an arbitrary date format
// http://stackoverflow.com/questions/3850784/recognise-an-arbitrary-date-string
// http://stackoverflow.com/questions/3389348/parse-any-date-in-java
final SimpleDateFormat sdf = new SimpleDateFormat("dd/MM/yyyy");
try {
return sdf.parse(s);
} catch(Exception e2) {
throw e;
}
}
}
@Override
public Double asPercent() {
Double value = asDouble();
if(!cell.getCellStyle().getDataFormatString().contains("%") && (value!=null)) {
value = value / 100.0;
}
return value;
}
}
private static class EmptyCell implements Value {
@Override
public String getValue() {
return null;
}
@Override
public String asString() {
return null;
}
@Override
public Double asDouble() {
return null;
}
@Override
public Integer asInteger() {
return null;
}
@Override
public java.awt.Color asColor() {
return null;
}
@Override
public Date asDate() {
return null;
}
@Override
public Double asPercent() {
return null;
}
}
public SpreadsheetDataSource() {
defaultSheet = 0;
}
SpreadsheetDataSource(Workbook workbook, FormulaEvaluator evaluator, int defaultSheet) {
this.workbook = workbook;
this.evaluator = evaluator;
this.defaultSheet = defaultSheet;
}
void init(Workbook workbook, FormulaEvaluator evaluator) {
this.workbook = workbook;
this.evaluator = evaluator;
}
/**
* select value from 1st sheet
*
* @param row
* - starts with 0
* @param col
* - starts with 0
* @throws MissingDataException
*/
public Value select(int row, int col) throws MissingDataException {
return select(null, row, col);
}
public Value select(String sheetname, int row, int col)
throws MissingDataException {
String cellref = new CellReference(row, col).formatAsString();
if (StringUtils.isNotBlank(sheetname)) {
cellref = sheetname + "!" + cellref;
}
return select(cellref);
}
public Value select(String sheetname, String selector) throws MissingDataException {
return select(sheetname+"!"+selector);
}
@Override
public Value select(String selector) throws MissingDataException {
Cell cell = selectCell(selector);
return cell!=null?new SpreadsheetCellValue(cell):new EmptyCell();
}
private Cell selectCell(String selector) throws MissingDataException {
// currently only CellReference selectors are supported like
// [sheet!]<row><column>
// e.g. Coral!A1 or just B20 which will select the cell from the first
// sheet.
CellReference cr = new CellReference(selector);
Sheet sheet;
String sheetName = cr.getSheetName();
if (sheetName != null) {
sheet = getSheet(sheetName);
if (sheet == null) {
throw new MissingDataException(String.format(
"Sheet '%s' does not exist in workbook", sheetName));
}
} else {
sheet = workbook.getSheetAt(defaultSheet);
if (sheet == null) {
throw new MissingDataException(
String.format("Sheet does not exist in workbook"));
}
}
Row row = sheet.getRow(cr.getRow());
if (row == null) {
return null;
}
Cell cell = row.getCell(cr.getCol());
if (cell == null) {
return null;
}
return cell;
}
private Sheet getSheet(String name) {
Sheet sheet = workbook.getSheet(name);
String strippedName = StringUtils.strip(name);
if (sheet == null) {
for (int i = 0; i < workbook.getNumberOfSheets(); i++) {
if (strippedName.equalsIgnoreCase(StringUtils.strip(workbook
.getSheetName(i)))) {
sheet = workbook.getSheetAt(i);
break;
}
}
}
if (sheet == null) {
for (int i = 0; i < workbook.getNumberOfSheets(); i++) {
if (StringUtils.containsIgnoreCase(
StringUtils.strip(workbook.getSheetName(i)), strippedName)) {
sheet = workbook.getSheetAt(i);
break;
}
}
}
return sheet;
}
public boolean hasSheet(String name) {
return getSheet(name) != null;
}
public String getSheetname(int i) {
Sheet sheet = workbook.getSheetAt(i);
if(sheet != null) {
return sheet.getSheetName();
} else {
return null;
}
}
public int sheets() {
return workbook.getNumberOfSheets();
}
public abstract SpreadsheetDataSource toSheet(int sheet);
public SpreadsheetDataSource toSheet(String sheetname) {
Sheet s = getSheet(sheetname);
if(s!= null) {
return toSheet(workbook.getSheetIndex(s));
} else {
return null;
}
}
public String getDefaultSheet() {
return workbook.getSheetName(defaultSheet);
}
public Integer getColumnCount(int row) {
return getColumnCount(defaultSheet, row);
}
public Integer getColumnCount(int i, int row) {
Sheet sheet = workbook.getSheetAt(i);
if(sheet != null) {
Row r = sheet.getRow(row);
if(r != null) {
return Integer.valueOf(r.getLastCellNum());
}
}
return null;
}
public List<Value> selectRow(int row) throws MissingDataException {
List<Value> result = Lists.newArrayList();
Integer max = getColumnCount(row);
if(max == null) {
return result;
}
for(int col = 0;col <= max;col++) {
result.add(select(row, col));
}
return result;
}
public List<Value> selectColumn(int column) throws MissingDataException {
return selectColumn(column, 100);
}
public List<Value> selectColumn(int column, int limit) throws MissingDataException {
List<Value> result = Lists.newArrayList();
Sheet sheet = workbook.getSheetAt(defaultSheet);
int max = Math.min(sheet.getLastRowNum(), limit);
for(int row = 0; row <= max;row++) {
result.add(select(row, column));
}
return result;
}
public static boolean containsString(List<Value> values, String s) {
for(Value v : values) {
if(StringUtils.equals(v.asString(),s)) {
return true;
}
}
return false;
}
Workbook workbook() {
return workbook;
}
FormulaEvaluator evaluator() {
return evaluator;
}
public boolean hasExternalReferences() {
for (int si = 0; si < workbook.getNumberOfSheets();si++) {
Sheet sheet = workbook.getSheetAt(si);
for (Row row : sheet) {
for (Cell cell : row) {
if (externalReference(cell) != null) {
return true;
}
}
}
}
return false;
}
public Set<UnresolvedRef> externalReferences() {
Set<UnresolvedRef> urefs = Sets.newHashSet();
for(int si = 0; si < workbook.getNumberOfSheets();si++) {
Sheet sheet = workbook.getSheetAt(si);
for(Row row : sheet) {
for(Cell cell : row) {
UnresolvedRef uref = externalReference(cell);
if(uref != null) {
//Logger.debug(String.format(
// "found external reference source '%s', source cell '%s', destination cell '%s'",
// uref.source(), uref.link().source(), uref.link().destination()));
urefs.add(uref);
}
}
}
}
return urefs;
}
abstract UnresolvedRef externalReference(Cell cell);
protected UnresolvedRef uref(String sIdOrName, final String sSelector,
final String dSelector) {
return new UnresolvedRef(sIdOrName,
new SimpleCellLink(sSelector, dSelector));
}
public InputStream updateExternalReferences(Set<ResolvedRef> refs) throws IOException {
boolean dirty = false;
for (ResolvedRef ref : refs) {
try {
final Cell dCell = selectCell(ref.link().destination());
if (dCell == null)
continue;
if (ref.source().isDefined()) {
final SpreadsheetDataSource source = ref.source().get();
try {
final Cell sCell = source.selectCell(ref.link().source());
dirty |= updatePrecalculatedValue(dCell, sCell, source.evaluator());
} catch (MissingDataException e) {
dirty |= updatePrecalculatedError(dCell, FormulaError.REF);
}
} else {
dirty |= updatePrecalculatedError(dCell, FormulaError.REF);
}
} catch (Exception e) {
e.printStackTrace();
}
}
try {
evaluateAll();
} catch(RuntimeException e) {
Logger.debug("evaluateAll() failed on updateExternalReferences," +
"some cached formula results may be out of date", e);
}
return dirty ? writeToTempFile() : null;
}
// evaluate all formula cells but external references. the XSSF evaluator seem to have
// issues with external references even if setIgnoreMissingWorkbooks(...) is set to true
// this workaround will probably not fully resolve the issue as formulas that depend on
// problematic external references might still fail.
private void evaluateAll() {
for(int i=0; i<workbook.getNumberOfSheets(); i++) {
Sheet sheet = workbook.getSheetAt(i);
for(Row r : sheet) {
for (Cell c : r) {
if (c.getCellType() == Cell.CELL_TYPE_FORMULA && !isExternalReference(c)) {
try {
evaluator.evaluateFormulaCell(c);
} catch(RuntimeException e) {
CellReference cr = new CellReference(c);
Logger.debug(String.format("failed to evaluate cell %s!%s, formula %s." +
" some cached formula results may be out of date",
sheet.getSheetName(), cr.formatAsString(), c.getCellFormula()), e);
}
}
}
}
}
}
private boolean isExternalReference(Cell cell) {
return externalReference(cell) != null;
}
private boolean updatePrecalculatedValue(Cell destination,
Cell source, FormulaEvaluator sEvaluator) {
if(source != null) {
switch(source.getCellType()) {
case Cell.CELL_TYPE_BLANK:
return updatePrecalculatedBlank(destination);
case Cell.CELL_TYPE_BOOLEAN:
return updatePrecalculatedBoolean(destination, source.getBooleanCellValue());
case Cell.CELL_TYPE_ERROR:
return updatePrecalculatedError(destination,
FormulaError.forInt(source.getErrorCellValue()));
case Cell.CELL_TYPE_FORMULA:
try {
return updatePrecalculatedCellValue(destination, sEvaluator.evaluate(source));
} catch(Exception e) {
switch(source.getCachedFormulaResultType()) {
case Cell.CELL_TYPE_NUMERIC:
return updatePrecalculatedNumeric(destination, source.getNumericCellValue());
case Cell.CELL_TYPE_STRING:
return updatePrecalculatedString(destination, source.getStringCellValue());
case Cell.CELL_TYPE_BOOLEAN:
return updatePrecalculatedBoolean(destination, source.getBooleanCellValue());
case Cell.CELL_TYPE_ERROR:
return updatePrecalculatedError(destination,
FormulaError.forInt(source.getErrorCellValue()));
}
}
case Cell.CELL_TYPE_NUMERIC:
return updatePrecalculatedNumeric(destination, source.getNumericCellValue());
case Cell.CELL_TYPE_STRING:
return updatePrecalculatedString(destination, source.getStringCellValue());
default:
return false;
}
} else {
return updatePrecalculatedError(destination, FormulaError.REF);
}
}
private boolean updatePrecalculatedCellValue(Cell destination, CellValue val) {
if(val != null) {
switch(val.getCellType()) {
case Cell.CELL_TYPE_BOOLEAN:
return updatePrecalculatedBoolean(destination, val.getBooleanValue());
case Cell.CELL_TYPE_NUMERIC:
return updatePrecalculatedNumeric(destination, val.getNumberValue());
case Cell.CELL_TYPE_STRING:
return updatePrecalculatedString(destination, val.getStringValue());
case Cell.CELL_TYPE_BLANK:
return updatePrecalculatedBlank(destination);
case Cell.CELL_TYPE_ERROR:
return updatePrecalculatedError(destination,
FormulaError.forInt(val.getErrorValue()));
default: return false;
}
} else {
return updatePrecalculatedError(destination, FormulaError.REF);
}
}
private boolean updatePrecalculatedBlank(Cell destination) {
return updatePrecalculatedNumeric(destination, 0);
}
private boolean updatePrecalculatedNumeric(Cell destination, double sVal) {
if(isFormula(destination)) {
try {
double dVal = destination.getNumericCellValue();
if(dVal != sVal) {
destination.setCellValue(sVal);
return true;
}
} catch(Exception e) {
destination.setCellValue(sVal);
return true;
}
}
return false;
}
private boolean updatePrecalculatedString(Cell destination, String sVal) {
if(isFormula(destination)) {
try {
String dVal = destination.getStringCellValue();
if(!StringUtils.equals(sVal, dVal)) {
destination.setCellValue(sVal);
return true;
}
} catch(Exception e) {
destination.setCellValue(sVal);
return true;
}
}
return false;
}
private boolean updatePrecalculatedError(Cell destination, FormulaError sError) {
if(isFormula(destination)) {
try {
FormulaError dError = FormulaError.forInt(destination.getErrorCellValue());
if(sError != dError) {
destination.setCellErrorValue(sError.getCode());
return true;
}
} catch(Exception e) {
destination.setCellErrorValue(sError.getCode());
return true;
}
}
return false;
}
private boolean updatePrecalculatedBoolean(Cell destination, boolean sVal) {
if(isFormula(destination)) {
try {
boolean dVal = destination.getBooleanCellValue();
if(dVal != sVal) {
destination.setCellValue(sVal);
return true;
}
} catch(Exception e) {
destination.setCellValue(sVal);
return true;
}
}
return false;
}
private boolean isFormula(Cell cell) {
return (cell != null) && (cell.getCellType() == Cell.CELL_TYPE_FORMULA);
}
private InputStream writeToTempFile() throws IOException {
final File f = File.createTempFile("spreadsheet", "poi");
FileOutputStream out = new FileOutputStream(f);
workbook.write(out);
IOUtils.closeQuietly(out);
return new FileInputStream(f) {
@Override
public void close() throws IOException {
super.close();
FileUtils.deleteQuietly(f);
}
};
}
public int getColumns(int row) {
return workbook.getSheetAt(defaultSheet).getRow(row).getLastCellNum();
}
public int getRows() {
return workbook.getSheetAt(defaultSheet).getLastRowNum();
}
public Iterable<Value> rangeSelect(final int row1, final int column1,
final int row2, final int column2) {
if(row1 == row2) {
return rangeColumnSelect(row1, column1, column2);
} else if(column1 == column2 ) {
return rangeRowSelect(column1, row1, row2);
} else {
throw new IllegalArgumentException("can only select from 1 row or 1 column");
}
}
public Iterable<Value> rangeRowSelect(final int column, final int row1, final int row2) {
return new Iterable<Value>() {
@Override
public Iterator<Value> iterator() {
return rangeIterator(row1, row2, new ValueSelector() {
@Override
public Value select(int i) throws MissingDataException {
return SpreadsheetDataSource.this.select(i, column);
}});
}};
}
public Iterable<Value> rangeColumnSelect(final int row, final int column1, final int column2) {
return new Iterable<Value>() {
@Override
public Iterator<Value> iterator() {
return rangeIterator(column1, column2, new ValueSelector() {
@Override
public Value select(int i) throws MissingDataException {
return SpreadsheetDataSource.this.select(row, i);
}});
}};
}
private interface ValueSelector {
public Value select(int i) throws MissingDataException;
}
private Iterator<Value> rangeIterator(final int from, final int to,
final ValueSelector selector) {
return new Iterator<Value> () {
boolean hasNext = true;
int i = from;
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public Value next() {
if(hasNext) {
hasNext = !(i == to);
try {
return selector.select(i);
} catch(MissingDataException e) {
return new EmptyCell();
} finally {
i += from < to ? 1 : -1;
}
} else {
throw new NoSuchElementException();
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}};
}
}
|
import { IElectronDashboardState } from "../type_defs/IDashboardState"
import { ipcRenderer } from "electron"
import { ChannelNames } from "../../../../ipc/ChannelNames"
import { ISaveDashboardFromDialogResp } from "../../../../server/dashboard/dashboardIpcManager"
export const saveDashboardFromDialog = async (state: IElectronDashboardState): Promise<boolean> => {
return new Promise(function (resolve, reject) {
ipcRenderer.send('' + ChannelNames.saveDashboardFromDialog, state)
ipcRenderer.once('' + ChannelNames.saveDashboardFromDialogResp, (event, obj: ISaveDashboardFromDialogResp) => {
resolve(obj.isSuccess)
})
})
} |
<gh_stars>1-10
/* { dg-do compile } */
typedef int U __attribute__ ((vector_size (16)));
int
foo (int i)
{
#if __SSE2__
register
#endif
U u
#if __SSE2__
asm ("xmm0")
#endif
;
return u[i];
}
|
Electron Enhanced Atomic Layer Deposition (EE-ALD)
Electron enhanced atomic layer deposition (EE-ALD) can dramatically reduce the temperatures required for film growth. Temperature reduction is possible because of electron stimulated desorption (ESD) of surface species. The desorption process creates highly reactive "dangling bond" surface sites. Precursors can then adsorb efficiently on the dangling bonds. Our work has demonstrated the EE-ALD of GaN, BN, Si and Co at room temperature. Film growth was performed using alternating exposures of chemical precursors and low energy electrons. In situ ellipsometry measurements have monitored linear film growth versus number of reaction cycles. Additionally, we have observed the dependence ofthe EE-ALD growth rates on electron energy. Maximum growth rates have varied from 0.3 A/cycle for Si films at 100-150 eV to 3.2 A/cycle for BN films at 80-160 eV. Recent measurements have also obtained maximum growth rates of 0.5 A/cycle for Co films at 125 eV. EE-ALD is also topographically selective. This area selectivity is derived from the directionality of the electron flux. Surfaces normal to the incident electrons receive full electron flux, whereas surfaces parallel to the incident electrons receive no electron flux. EE-ALD should be useful for the bottom-up fill of high aspect ratio structures. |
#pragma once
#include "misc/functionnal.hh"
#include "misc/parse-error.hh"
#include "misc/random.hh"
#include "misc/random-mt.hh"
#include "misc/rect.hh"
#include "misc/timer.hh"
#include "misc/xor-shift.hh"
|
As interest in the alleged warming of the planet wanes, the global warming inquisition is hoping to make an example of a heretical reporter whose only sin is healthy skepticism.
The enviro-Left is busy attempting to subject London-based Paul Ingrassia, an American journalist brought in by Reuters to beef up its worldwide news operation, to a digital auto-da-fé for insisting that the 2,800 journalists at the news agency at least try to provide fair and balanced accounts of the events of the day.
Ingrassia, by the way, won a Pulitzer Prize and a Gerald Loeb Award in 1993 for his news coverage of management turmoil at General Motors.
This newfound interest in objectivity at Reuters, where the word militant is still preferred over terrorist, appears to mean the agency is running fewer stories about climate change.
That’s fewer, not none. Reuters still diligently covers climate-related issues.
But that’s not good enough for those who embrace the increasingly shaky theory of anthropogenic global warming with religious zeal.
“It is just not responsible in our opinion to be cutting back on an issue that is having such a profound impact on every sector of the economy,” emoted Mindy Lubber, who runs the Ceres sustainable business network, which represents companies and investors worth more than $11 trillion in assets. “This is a financial risk that needs to be looked at and addressed.”
ThinkProgress, a hard-left blog run by John Podesta’s Center for American Progress Action Fund, referred to Ingrassia in a headline as “Openly Hostile to Climate Coverage.”
As Steven Hayward writes at Powerline, a slew of media outlets “are all on the chase, proving 1) the dependence of the climate campaign on a media monopoly, and 2) that the environmental version of the Brezhnev Doctrine lives—what’s there is theirs, and don’t dare change your news coverage.”
The mainstream media freakout began when a disgruntled former Reuters reporter who had covered the global warming beat threw an online temper tantrum after leaving the company. Singapore-based David Fogarty blogged about the editorial direction Reuters took after it hired journalism industry heavyweight Ingrassia, an experienced business reporter and editor, in 2011 to overhaul the company’s approach to news gathering.
Fogarty said he met Ingrassia at a work-related event in 2012 at which the veteran journalist allegedly acknowledged being a climate change skeptic. “Not a rabid skeptic, just someone who wanted to see more evidence mankind was changing the global climate,” Fogarty wrote.
“From very early in 2012, I was repeatedly told that climate and environment stories were no longer a top priority for Reuters and I was asked to look at other areas. Being stubborn, and passionate about my climate change beat, I largely ignored the directive.
“By mid-October, I was informed that climate change just wasn’t a big story for the present, but that it would be if there was a significant shift in global policy, such as the US introducing an emissions cap-and-trade system.”
Fogarty left the company in December, two months after his climate beat was excised. Although two full-time environment beat reporters now cover the subject area for Reuters, Fogarty resorted to conspiracy theorizing.
He claimed there is a growing “climate of fear” within Reuters that makes reporters reluctant to write about climate change.
Smelling blood, the George Soros-funded slander shop Media Matters for America promptly hopped on the anti-Ingrassia bandwagon, hastily beatifying Fogarty by calling him a “whistleblower” in a headline.
MMfA claimed that “Reuters’ coverage of climate change declined by nearly 50 percent under the regime of the current managing editor, lending credence to a former reporter’s claim that a ‘climate of fear’ has gripped the agency.”
But the facts suggest what happened was more akin to an outbreak of journalistic rigor, something disgraced ex-journalist David Brock’s cherry-picking character assassins at Media Matters would have difficulty recognizing.
The so-called study by the Democratic Party’s leading public relations agency examined how many climate change or global warming stories Reuters ran in two distinct periods. The first period was Oct. 19, 2010 to April 19, 2011, before Ingrassia worked at Reuters, and April 19, 2012 to Oct. 19, 2012, after he joined the company.
In the latter period “Reuters filed 48 percent fewer articles on climate change under the new regime, despite the fact that the latter period featured the United Nations Conference on Sustainable Development in Rio de Janeiro, a continuing fight over the European Union’s proposal to impose a carbon tax on international flights, record heat in the U.S. and other noteworthy developments,” according to Media Matters.
Of course none of those events would be of much interest to ordinary news consumers. Only climate change zealots, hardcore leftists, and unusually adventurous investors would consider most of those events to be highly newsworthy.
What else was happening in the world from mid-April to mid-October of last year?
It turns out there were plenty of exigent, newsworthy events that Reuters might have considered to be more worthy of coverage than an old, scientifically dubious doom-and-gloom theory embraced by an affective former U.S. vice president but cared about by few people outside of the green movement.
As the U.S. economy sputtered along and the national debt continued to balloon, there was an unusually nasty, bruising presidential election cycle that happened to be the most expensive and media-saturated in history. Terrorists attacked a U.S. mission in Benghazi, Libya, killing four Americans including a sitting U.S. ambassador, whom they may have sexually tortured. Before the Obama administration eventually admitted that the incident in Libya was a coordinated Islamic terrorist attack, it blamed an obscure anti-Islam filmmaker for the sacking of the mission and made him a real-life political prisoner. The U.S. Supreme Court upheld the constitutionally questionable Affordable Care Act, a.k.a. Obamacare.
Facebook’s hyped-to-the-nth-degree public stock offering floundered. France elected dogmatic socialist Francois Hollande who promptly launched a new reign of terror against that nation’s long-suffering taxpayers leading prominent citizens like actor Gerard Depardieu to flee.
The Middle East and North Africa continued to be rocked by fallout from the so-called Arab Spring. The Eurozone crisis, festering since 2009, continued to bring misery to those living in or investing in countries bound by the Maastricht Treaty. London hosted the summer Olympics. A psychopathic dictator who succeeded his father, another psychopathic dictator, made great strides in solidifying his reign in reportedly nuclear North Korea. Gay marriage became legal in several countries.
In short, there was no shortage of interesting, important things to write about.
And naturally, as the scientific case for global warming continues to fall apart the likelihood of the formation of huge markets for trading carbon also declines commensurately. This means the possibility of high-dollar carbon trading will get less media attention.
Strangely, even the ever-watchful guardians of liberal journalism at the venerable Columbia Journalism Review dismiss the green-generated hype. CJR writer Alexis Sobel Fitts volunteers that most U.S. newsrooms have scaled back their climate change coverage since 2010:
In 2011, Environment & Energy Publishing, which produces Greenwire, ClimateWire, and four other news services, estimated they reduced climate coverage by about 13 percent. According to an assessment published by The Daily Climate, The New York Times cut its global warming article count by 15 percent, and the Guardian slashed coverage by 21 percent that same year.
The Times, it should also be recalled, actually closed its news department assigned to cover environmental issues in January of 2013.
Fitts adds parenthetically that “Reuters, too, dropped its climate coverage by 27 percent in 2011, before Ingrassia came aboard.”
Fitts writes that several unidentified Reuters reporters spoke to her on background about a change in the news agency’s editorial stance. Since Ingrassia came aboard “they’ve felt pressure from management to add ‘balance’ to climate change stories by including the views of global-warming skeptics.”
“I’m really glad someone outside the company is looking into this,” she quoted one staffer saying. “I think this is the most worrying thing any of us have seen here.”
How dare Reuters strive to tell more than one side of the story.
Meanwhile, it is worth pointing out that the warming trend ended 15 years ago and since then global temperatures have held steady, if not decreased, while carbon dioxide emissions worldwide have skyrocketed.
“The world added roughly 100 billion tonnes of carbon to the atmosphere between 2000 and 2010. That is about a quarter of all the CO2 put there by humanity since 1750,” the Economist reported in the spring.
The attacks on Ingrassia come as fresh evidence of panic emanates from the environmentalist Left. Activists seem to be realizing that they are losing the battle over this speculative phenomenon known as anthropogenic global warming.
To boost their sagging fortunes, desperate environmentalists are making particularly outrageous claims.
A new “metastudy” spoon-fed to incurious media outlets purports to show a clear link between rising temperatures and violence, especially on the African continent.
President Obama declared war on the coal industry and its workers a few weeks ago. Ignoring the science, the Alarmist-in-Chief declared that climate change was having “profound impacts” on the planet and must be dealt with.
Obama called for America to take the lead in a “coordinated assault” on the perceived problem and snarled, “We don’t have time for a meeting of the flat-earth society.”
The Obama administration is also attempting to stifle climate change skeptics who work for the federal government.
Interior Secretary Sally Jewell told Interior Department staffers this week that fighting climate change is a “privilege” and a “moral imperative.”
“I hope there are no climate change deniers in the Department of Interior,” she said. Labeling climate change skeptics “deniers” is a crude but oft-used smear used by global warming true believers to blacken the names of their adversaries by associating them with anti-Semitic fanatics who deny the Holocaust happened during World War Two.
Marlo Lewis of the Competitive Enterprise Institute opined that “[s]uch moralizing would be funny were it not for the chilling effect it is bound to have in an agency already mired in group think.”
The British-born Jewell, who was packaged throughout her confirmation process earlier this year as a reasonable environmental activist, started her career as an oil industry engineer. Before taking up her post in Obama’s cabinet, she was CEO of Recreational Equipment Inc. (REI).
Under Ingrassia’s able leadership Reuters may even cover the Jewell story.
Climate change heretics at the Interior Department and elsewhere in the U.S. government would be well advised to hold their tongues — or lose their jobs.
This post first appeared at FrontPage Magazine. |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <mariana-trench/Assert.h>
#include <mariana-trench/CallPositionFrames.h>
#include <mariana-trench/Features.h>
#include <mariana-trench/JsonValidation.h>
#include <mariana-trench/Log.h>
namespace marianatrench {
namespace {
void materialize_via_type_of_ports(
const Method* callee,
Context& context,
const Frame& frame,
const std::vector<const DexType * MT_NULLABLE>& source_register_types,
std::vector<const Feature*>& via_type_of_features_added,
FeatureMayAlwaysSet& inferred_features) {
if (!frame.via_type_of_ports().is_value() ||
frame.via_type_of_ports().elements().empty()) {
return;
}
// Materialize via_type_of_ports into features and add them to the inferred
// features
for (const auto& port : frame.via_type_of_ports().elements()) {
if (!port.is_argument() ||
port.parameter_position() >= source_register_types.size()) {
ERROR(
1,
"Invalid port {} provided for via_type_of ports of method {}.{}",
port,
callee->get_class()->str(),
callee->get_name());
continue;
}
const auto* feature = context.features->get_via_type_of_feature(
source_register_types[port.parameter_position()]);
via_type_of_features_added.push_back(feature);
inferred_features.add_always(feature);
}
}
void materialize_via_value_of_ports(
const Method* callee,
Context& context,
const Frame& frame,
const std::vector<std::optional<std::string>>& source_constant_arguments,
FeatureMayAlwaysSet& inferred_features) {
if (!frame.via_value_of_ports().is_value() ||
frame.via_value_of_ports().elements().empty()) {
return;
}
// Materialize via_value_of_ports into features and add them to the inferred
// features
for (const auto& port : frame.via_value_of_ports().elements()) {
if (!port.is_argument() ||
port.parameter_position() >= source_constant_arguments.size()) {
ERROR(
1,
"Invalid port {} provided for via_value_of ports of method {}.{}",
port,
callee->get_class()->str(),
callee->get_name());
continue;
}
const auto* feature = context.features->get_via_value_of_feature(
source_constant_arguments[port.parameter_position()]);
inferred_features.add_always(feature);
}
}
} // namespace
CallPositionFrames::CallPositionFrames(std::initializer_list<Frame> frames)
: position_(nullptr) {
for (const auto& frame : frames) {
add(frame);
}
}
void CallPositionFrames::add(const Frame& frame) {
if (position_ == nullptr) {
position_ = frame.call_position();
} else {
mt_assert(position_ == frame.call_position());
}
frames_.update(frame.kind(), [&](const Frames& old_frames) {
auto new_frames = old_frames;
new_frames.add(frame);
return new_frames;
});
}
bool CallPositionFrames::leq(const CallPositionFrames& other) const {
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
return frames_.leq(other.frames_);
}
bool CallPositionFrames::equals(const CallPositionFrames& other) const {
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
return frames_.equals(other.frames_);
}
void CallPositionFrames::join_with(const CallPositionFrames& other) {
mt_if_expensive_assert(auto previous = *this);
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
frames_.join_with(other.frames_);
mt_expensive_assert(previous.leq(*this) && other.leq(*this));
}
void CallPositionFrames::widen_with(const CallPositionFrames& other) {
mt_if_expensive_assert(auto previous = *this);
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
frames_.widen_with(other.frames_);
mt_expensive_assert(previous.leq(*this) && other.leq(*this));
}
void CallPositionFrames::meet_with(const CallPositionFrames& other) {
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
frames_.meet_with(other.frames_);
}
void CallPositionFrames::narrow_with(const CallPositionFrames& other) {
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
frames_.narrow_with(other.frames_);
}
void CallPositionFrames::difference_with(const CallPositionFrames& other) {
mt_assert(is_bottom() || other.is_bottom() || position_ == other.position());
frames_.difference_like_operation(
other.frames_, [](const Frames& frames_left, const Frames& frames_right) {
auto frames_copy = frames_left;
frames_copy.difference_with(frames_right);
return frames_copy;
});
}
void CallPositionFrames::map(const std::function<void(Frame&)>& f) {
frames_.map([&](const Frames& frames) {
auto new_frames = frames;
new_frames.map(f);
return new_frames;
});
}
void CallPositionFrames::add_inferred_features(
const FeatureMayAlwaysSet& features) {
if (features.empty()) {
return;
}
map([&features](Frame& frame) { frame.add_inferred_features(features); });
}
LocalPositionSet CallPositionFrames::local_positions() const {
// Ideally this can be stored within `CallPositionFrames` instead of `Frame`.
// Local positions should be the same for a given (callee, call_position).
auto result = LocalPositionSet::bottom();
for (const auto& [_, frames] : frames_.bindings()) {
for (const auto& frame : frames) {
result.join_with(frame.local_positions());
}
}
return result;
}
void CallPositionFrames::add_local_position(const Position* position) {
map([position](Frame& frame) { frame.add_local_position(position); });
}
void CallPositionFrames::set_local_positions(
const LocalPositionSet& positions) {
map([&positions](Frame& frame) { frame.set_local_positions(positions); });
}
void CallPositionFrames::add_inferred_features_and_local_position(
const FeatureMayAlwaysSet& features,
const Position* MT_NULLABLE position) {
if (features.empty() && position == nullptr) {
return;
}
map([&features, position](Frame& frame) {
if (!features.empty()) {
frame.add_inferred_features(features);
}
if (position != nullptr) {
frame.add_local_position(position);
}
});
}
CallPositionFrames CallPositionFrames::propagate(
const Method* callee,
const AccessPath& callee_port,
const Position* call_position,
int maximum_source_sink_distance,
Context& context,
const std::vector<const DexType * MT_NULLABLE>& source_register_types,
const std::vector<std::optional<std::string>>& source_constant_arguments)
const {
if (is_bottom()) {
return CallPositionFrames::bottom();
}
CallPositionFrames result;
auto partitioned_by_kind = partition_map<const Kind*>(
[](const Frame& frame) { return frame.kind(); });
for (const auto& [kind, frames] : partitioned_by_kind) {
std::vector<std::reference_wrapper<const Frame>> non_crtex_frames;
std::vector<std::reference_wrapper<const Frame>> crtex_frames;
for (const auto& frame : frames) {
if (frame.get().is_crtex_producer_declaration()) {
crtex_frames.push_back(frame);
} else {
non_crtex_frames.push_back(frame);
}
}
result.join_with(propagate_crtex_frames(
callee,
callee_port,
call_position,
maximum_source_sink_distance,
context,
source_register_types,
crtex_frames));
// Non-CRTEX frames can be joined into the same callee
std::vector<const Feature*> via_type_of_features_added;
auto non_crtex_frame = propagate_frames(
callee,
callee_port,
call_position,
maximum_source_sink_distance,
context,
source_register_types,
source_constant_arguments,
non_crtex_frames,
via_type_of_features_added);
if (!non_crtex_frame.is_bottom()) {
result.add(non_crtex_frame);
}
}
return result;
}
template <class T>
std::unordered_map<T, std::vector<std::reference_wrapper<const Frame>>>
CallPositionFrames::partition_map(
const std::function<T(const Frame&)>& map) const {
std::unordered_map<T, std::vector<std::reference_wrapper<const Frame>>>
result;
for (const auto& [_, frames] : frames_.bindings()) {
for (const auto& frame : frames) {
auto value = map(frame);
result[value].push_back(std::cref(frame));
}
}
return result;
}
Frame CallPositionFrames::propagate_frames(
const Method* callee,
const AccessPath& callee_port,
const Position* call_position,
int maximum_source_sink_distance,
Context& context,
const std::vector<const DexType * MT_NULLABLE>& source_register_types,
const std::vector<std::optional<std::string>>& source_constant_arguments,
std::vector<std::reference_wrapper<const Frame>> frames,
std::vector<const Feature*>& via_type_of_features_added) const {
if (frames.size() == 0) {
return Frame::bottom();
}
const auto* kind = frames.begin()->get().kind();
int distance = std::numeric_limits<int>::max();
auto origins = MethodSet::bottom();
auto field_origins = FieldSet::bottom();
auto inferred_features = FeatureMayAlwaysSet::bottom();
for (const Frame& frame : frames) {
// Only frames sharing the same kind can be propagated this way.
mt_assert(frame.kind() == kind);
if (frame.distance() >= maximum_source_sink_distance) {
continue;
}
distance = std::min(distance, frame.distance() + 1);
origins.join_with(frame.origins());
field_origins.join_with(frame.field_origins());
// Note: This merges user features with existing inferred features.
inferred_features.join_with(frame.features());
materialize_via_type_of_ports(
callee,
context,
frame,
source_register_types,
via_type_of_features_added,
inferred_features);
materialize_via_value_of_ports(
callee, context, frame, source_constant_arguments, inferred_features);
}
return Frame(
kind,
callee_port,
callee,
/* field_callee */ nullptr, // Since propagate is only called at method
// callsites and not field accesses
call_position,
distance,
std::move(origins),
std::move(field_origins),
std::move(inferred_features),
/* locally_inferred_features */ FeatureMayAlwaysSet::bottom(),
/* user_features */ FeatureSet::bottom(),
/* via_type_of_ports */ {},
/* via_value_of_ports */ {},
/* local_positions */ {},
/* canonical_names */ {});
}
CallPositionFrames CallPositionFrames::propagate_crtex_frames(
const Method* callee,
const AccessPath& callee_port,
const Position* call_position,
int maximum_source_sink_distance,
Context& context,
const std::vector<const DexType * MT_NULLABLE>& source_register_types,
std::vector<std::reference_wrapper<const Frame>> frames) const {
if (frames.size() == 0) {
return CallPositionFrames::bottom();
}
CallPositionFrames result;
const auto* kind = frames.begin()->get().kind();
for (const Frame& frame : frames) {
// Only frames sharing the same kind can be propagated this way.
mt_assert(frame.kind() == kind);
std::vector<const Feature*> via_type_of_features_added;
auto propagated = propagate_frames(
callee,
callee_port,
call_position,
maximum_source_sink_distance,
context,
source_register_types,
{}, // TODO: Support via-value-of for crtex frames
{std::cref(frame)},
via_type_of_features_added);
if (propagated.is_bottom()) {
continue;
}
auto canonical_names = frame.canonical_names();
if (!canonical_names.is_value() || canonical_names.elements().empty()) {
WARNING(
2,
"Encountered crtex frame without canonical names. Frame: `{}`",
frame);
continue;
}
CanonicalNameSetAbstractDomain instantiated_names;
for (const auto& canonical_name : canonical_names.elements()) {
auto instantiated_name = canonical_name.instantiate(
propagated.callee(), via_type_of_features_added);
if (!instantiated_name) {
continue;
}
instantiated_names.add(*instantiated_name);
}
auto canonical_callee_port =
propagated.callee_port().canonicalize_for_method(propagated.callee());
// All fields should be propagated like other frames, except the crtex
// fields. Ideally, origins should contain the canonical names as well,
// but canonical names are strings and cannot be stored in MethodSet.
// Frame is not propagated if none of the canonical names instantiated
// successfully.
if (instantiated_names.is_value() &&
!instantiated_names.elements().empty()) {
result.add(Frame(
kind,
canonical_callee_port,
propagated.callee(),
propagated.field_callee(),
propagated.call_position(),
/* distance (always leaves for crtex frames) */ 0,
propagated.origins(),
propagated.field_origins(),
propagated.inferred_features(),
propagated.locally_inferred_features(),
propagated.user_features(),
propagated.via_type_of_ports(),
propagated.via_value_of_ports(),
propagated.local_positions(),
/* canonical_names */ instantiated_names));
}
}
return result;
}
} // namespace marianatrench
|
Where has Marcel Reece been in the Raiders offense? There is little doubt that Marcel is a difference maker with the ball in his hands but he has not been able to get going yet this year in OC Greg Olson's offense. The Raiders need to do something about this mismanagement of assets because Reece is too damn good of a player not to utilize.
Marcel on the season has touched the ball only 19 times in 6 games so far this year. He has 9 carries for 47 yards with a TD and 10 receptions for 86 yards with a TD receiving as well in 2013, so why has he not been more involved? When he gets the ball he is productive and an undeniable mismatch with his 4.4 speed in a 250+ pound frame.
According to Greg Olson, Reece has been at the top of progressions in a number of play calls but the ball just isn't going his way. This is something that needs to be fixed, even if it is at the expense of Darren McFadden getting his touches.
"Again," said Coach Olson, "unless you're handing him the football, which we have to monitor what we're doing because we have to make sure that Darren McFadden is getting enough touches, and with Marcel it's changed a little bit this year because of the quarterback position. We've become more of a read-option kind of an offense. A lot of times in the passing game, he may be involved, or may be in the progression, but if he's not catching it in the passing game, the best chance of getting him the football is to hand the ball off."
At this point even if it does mean less carries for Darren McFadden then so be it. If the passes are not heading in Marcel's direction then hand it off to him, he is a playmaker that needs to be utilized. Darren has not been making the difference the Raiders need in the rushing department this season (though the depleted offensive line shoulders part of the blame there) so they don't have much to lose by giving some of his carries to the more powerful Reece anyway.
"We'll continue, again, as we look at it." continued Olson, "Every game or every week when we finish a game, we go back through a stat sheet and look at how many times the guys were targeted and how many times they actually touched the ball. We're aware of that with Marcel. We're constantly aware of it throughout the game. We'll call some things up in the passing game."
Being aware that one of the team's best players hadn't been getting enough touches so far this year isn't good enough. We have all been painfully aware of Reece's targets, or lack there of, and it is starting to get silly. Olson is a coach that builds around his best players talents, so something clearly needs to be done about this. Unfortunately Greg Olson says that Reece is already a top priority in the passing game and that it just hasn't worked out that way so far this year.
"He's number one in progression and for some reason or another, we don't get the ball to him. We'll keep continuing to put him in the position to be at that number one in the progression, but the only way you can really and truly ensure it, is to hand him the ball."
If he is already the number one progression on many plays but he isn't getting thrown to then the coaches need to talk to Terrelle Pryor about getting the ball to him. If Pryor can't do better at throwing to Marcel then they absolutely need to hand him the ball more often. You simply can not have one of the best players on the team not be getting his touches, averaging barely over 3 touches a game is not getting the job done.
The bye week is here now and that means going over what has worked and what hasn't a little bit more than in other weeks. Well what works is getting Reece the ball and what hasn't worked is not getting Reece the ball. Hopefully we will see him more involved when football resumes against the Pittsburgh Steelers, its clearly an area that needs to be addressed. Only 19 touches in 6 games for Marcel Reece is not going to cut it going forward. |
<reponame>Webonnix/beam
#pragma once
#include "../Math.h"
#pragma pack (push, 1)
struct HomogenousPool
{
typedef MultiPrecision::Float Float;
struct Scale
{
static const uint32_t s_Threshold = 20; // 1mln
static bool IsSane(const Float& x, uint32_t nThreshold)
{
// should be (nThreshold > x.m_Order >= -nThreshold)
// (2*nThreshold > x.m_Order + nThreshold >= 0)
uint32_t val = nThreshold + x.m_Order;
return (val < nThreshold * 2);
}
};
struct Pair
{
Amount s;
Amount b;
Pair get_Fraction(const Float& kRatio) const
{
Pair p;
p.s = Float(s) * kRatio;
p.b = Float(b) * kRatio;
return p;
}
Pair get_Fraction(Amount w1, Amount wTotal) const
{
assert(wTotal);
return get_Fraction(Float(w1) / Float(wTotal));
}
Pair operator - (const Pair& p) const
{
Pair ret;
ret.s = s - p.s;
ret.b = b - p.b;
return ret;
}
};
enum Mode {
Neutral, // s doesn't change during the trade (i.e. farming)
Burn, // s is decreased during the trade (i.e. exchange, s burns-out)
Grow, // s grows during the trade (i.e. redistribution)
};
struct Epoch
{
uint32_t m_Users;
Pair m_Balance;
Float m_Sigma;
Float m_kScale;
template <Mode m>
void Trade_(const Pair& d)
{
if (!m_Balance.s)
{
assert(!d.s && !d.b);
return;
}
Float kScale_div_s = m_kScale / Float(m_Balance.s);
if (d.b)
{
// dSigma = m_kScale * db / s
m_Sigma = m_Sigma + kScale_div_s * Float(d.b);
Strict::Add(m_Balance.b, d.b);
}
if (d.s)
{
if constexpr (Mode::Burn == m)
{
assert(m_Balance.s >= d.s);
m_Balance.s -= d.s;
}
else
{
if constexpr (Mode::Grow == m)
Strict::Add(m_Balance.s, d.s);
}
if constexpr (Mode::Neutral != m)
// m_kScale *= sNew / sOld;
m_kScale = kScale_div_s * Float(m_Balance.s);
}
}
struct User
{
Float m_Sigma0;
Float m_SellScaled;
void Add_(Epoch& e, Amount valSell)
{
assert(valSell);
m_Sigma0 = e.m_Sigma;
m_SellScaled = Float(valSell) / e.m_kScale;
Strict::Add(e.m_Balance.s, valSell);
e.m_Users++; // won't overflow, 4bln isn't feasible
}
void DelRO_(const Epoch& e, Pair& out) const
{
assert(e.m_Users);
if (1 == e.m_Users)
out = e.m_Balance;
else
{
out.s = std::min<Amount>(e.m_Balance.s, m_SellScaled * e.m_kScale);
out.b = std::min<Amount>(e.m_Balance.b, m_SellScaled * (e.m_Sigma - m_Sigma0));
}
}
template <bool bReadOnly>
void Del_(Epoch& e, Pair& out) const
{
DelRO_(e, out);
if constexpr (!bReadOnly)
{
e.m_Users--;
e.m_Balance = e.m_Balance - out;
}
}
};
};
template <Mode m>
struct SingleEpoch
{
Epoch m_Active;
Amount get_TotalSell() const {
return m_Active.m_Balance.s;
}
void Reset()
{
_POD_(*this).SetZero();
m_Active.m_kScale = 1u;
}
typedef Epoch::User User;
void UserAdd(User& u, Amount valSell)
{
u.Add_(m_Active, valSell);
}
template <bool bReadOnly = false>
void UserDel(User& u, Pair& out)
{
u.Del_<bReadOnly>(m_Active, out);
if constexpr (!bReadOnly)
{
if (!m_Active.m_Users)
Reset();
}
}
void Trade(const Pair& d)
{
assert(d.s);
m_Active.Trade_<m>(d);
}
};
template <Mode m>
struct MultiEpoch
{
Epoch m_Active;
Epoch m_Draining;
Amount get_TotalSell() const {
// won't overflow, we test for overflow when user joins
return m_Active.m_Balance.s + m_Draining.m_Balance.s;
}
uint32_t m_iActive;
void Init()
{
_POD_(*this).SetZero();
ResetActiveScale();
m_iActive = 1;
}
struct User
:public Epoch::User
{
uint32_t m_iEpoch;
};
void UserAdd(User& u, Amount valSell)
{
u.m_iEpoch = m_iActive;
u.Add_(m_Active, valSell);
Env::Halt_if(get_TotalSell() < m_Active.m_Balance.s); // overflow test
}
template <bool bReadOnly = false, class Storage>
void UserDel(User& u, Pair& out, Storage& stor)
{
if (u.m_iEpoch == m_iActive)
{
u.template Del_<bReadOnly>(m_Active, out);
if constexpr (!bReadOnly)
{
if (!m_Active.m_Users)
ResetActive();
}
}
else
{
if (u.m_iEpoch + 1 == m_iActive)
u.template Del_<bReadOnly>(m_Draining, out);
else
{
Epoch e;
stor.Load(u.m_iEpoch, e);
u.template Del_<bReadOnly>(e, out);
if constexpr (!bReadOnly)
{
if (e.m_Users)
stor.Save(u.m_iEpoch, e);
else
stor.Del(u.m_iEpoch);
}
}
}
}
void Trade(const Pair& d)
{
assert(d.s);
// Active epoch must always be valid
// Account for draining epoch iff not empty
if (m_Draining.m_Users)
{
Amount totalSell = get_TotalSell();
assert(d.s <= totalSell);
Pair d0 = d.get_Fraction(m_Active.m_Balance.s, totalSell);
m_Active.Trade_<m>(d0);
d0 = d - d0;
m_Draining.Trade_<m>(d0);
}
else
m_Active.Trade_<m>(d);
}
template <class Storage>
void OnPostTrade(Storage& stor)
{
if (!Scale::IsSane(m_Active.m_kScale, Scale::s_Threshold))
{
UnloadDraining(stor);
_POD_(m_Draining) = m_Active;
ResetActive();
m_iActive++;
}
if (!Scale::IsSane(m_Draining.m_kScale, Scale::s_Threshold * 2))
{
UnloadDraining(stor);
_POD_(m_Draining).SetZero();
}
}
private:
void ResetActive()
{
_POD_(m_Active).SetZero();
ResetActiveScale();
}
void ResetActiveScale()
{
m_Active.m_kScale.m_Num = m_Active.m_kScale.s_HiBit;
m_Active.m_kScale.m_Order = 0;
}
template <class Storage>
void UnloadDraining(Storage& stor)
{
if (m_Draining.m_Users)
stor.Save(m_iActive - 1, m_Draining);
}
};
};
typedef HomogenousPool::MultiEpoch<HomogenousPool::Mode::Burn> ExchangePool;
typedef HomogenousPool::SingleEpoch<HomogenousPool::Mode::Grow> DistributionPool;
template <typename TWeight, typename TValue, uint32_t nDims>
struct StaticPool
{
typedef MultiPrecision::Float Float;
TWeight m_Weight;
TValue m_pValue[nDims];
Float m_pSigma[nDims];
void AddValue(TValue v, uint32_t iDim)
{
// dSigma = d / s0
m_pSigma[iDim] = m_pSigma[iDim] + Float(v) / Float(m_Weight);
Strict::Add(m_pValue[iDim], v);
}
bool IsEmpty() const {
return !m_Weight;
}
void Reset()
{
_POD_(*this).SetZero();
}
struct User
{
TWeight m_Weight;
Float m_pSigma0[nDims];
};
void Add(User& u)
{
Strict::Add(m_Weight, u.m_Weight);
for (uint32_t i = 0; i < nDims; i++)
u.m_pSigma0[i] = m_pSigma[i];
}
void Remove(TValue* pRet, const User& u)
{
if (m_Weight == u.m_Weight)
{
for (uint32_t i = 0; i < nDims; i++)
pRet[i] = m_pValue[i];
Reset();
}
else
{
assert(m_Weight > u.m_Weight);
m_Weight -= u.m_Weight;
Float w(u.m_Weight);
for (uint32_t i = 0; i < nDims; i++)
{
pRet[i] = w * (m_pSigma[i] - u.m_pSigma0[i]);
pRet[i] = std::min(pRet[i], m_pValue[i]);
m_pValue[i] -= pRet[i];
}
}
}
};
#pragma pack (pop)
|
/**
* Adds a fractal panel to the main frame.
*/
public void addFractal(JPanel pnl) {
frm.remove(fractal);
fractal = pnl;
c.gridx = 0;
c.gridy = 2;
c.fill = GridBagConstraints.BOTH;
c.weightx = 1.0;
c.weighty = 1.0;
c.insets = new Insets(10, 10, 10, 10);
frm.add(fractal, c);
frm.repaint();
frm.revalidate();
} |
<filename>etcdctlv3/command/snapshot_command.go<gh_stars>0
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"fmt"
"io"
"os"
"github.com/zhuzhengyang/etcd/Godeps/_workspace/src/github.com/spf13/cobra"
"github.com/zhuzhengyang/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/zhuzhengyang/etcd/clientv3"
"github.com/zhuzhengyang/etcd/clientv3/mirror"
"github.com/zhuzhengyang/etcd/etcdserver/api/v3rpc/rpctypes"
)
// NewSnapshotCommand returns the cobra command for "snapshot".
func NewSnapshotCommand() *cobra.Command {
return &cobra.Command{
Use: "snapshot [filename]",
Short: "Snapshot streams a point-in-time snapshot of the store",
Run: snapshotCommandFunc,
}
}
// snapshotCommandFunc watches for the length of the entire store and records
// to a file.
func snapshotCommandFunc(cmd *cobra.Command, args []string) {
switch {
case len(args) == 0:
snapshotToStdout(mustClientFromCmd(cmd))
case len(args) == 1:
snapshotToFile(mustClientFromCmd(cmd), args[0])
default:
err := fmt.Errorf("snapshot takes at most one argument")
ExitWithError(ExitBadArgs, err)
}
}
// snapshotToStdout streams a snapshot over stdout
func snapshotToStdout(c *clientv3.Client) {
// must explicitly fetch first revision since no retry on stdout
wr := <-c.Watch(context.TODO(), "", clientv3.WithPrefix(), clientv3.WithRev(1))
if wr.Err() == nil {
wr.CompactRevision = 1
}
if rev := snapshot(os.Stdout, c, wr.CompactRevision+1); rev != 0 {
err := fmt.Errorf("snapshot interrupted by compaction %v", rev)
ExitWithError(ExitInterrupted, err)
}
os.Stdout.Sync()
}
// snapshotToFile atomically writes a snapshot to a file
func snapshotToFile(c *clientv3.Client, path string) {
partpath := path + ".part"
f, err := os.Create(partpath)
defer f.Close()
if err != nil {
exiterr := fmt.Errorf("could not open %s (%v)", partpath, err)
ExitWithError(ExitBadArgs, exiterr)
}
rev := int64(1)
for rev != 0 {
f.Seek(0, 0)
f.Truncate(0)
rev = snapshot(f, c, rev)
}
f.Sync()
if err := os.Rename(partpath, path); err != nil {
exiterr := fmt.Errorf("could not rename %s to %s (%v)", partpath, path, err)
ExitWithError(ExitIO, exiterr)
}
}
// snapshot reads all of a watcher; returns compaction revision if incomplete
// TODO: stabilize snapshot format
func snapshot(w io.Writer, c *clientv3.Client, rev int64) int64 {
s := mirror.NewSyncer(c, "", rev)
rc, errc := s.SyncBase(context.TODO())
for r := range rc {
for _, kv := range r.Kvs {
fmt.Fprintln(w, kv)
}
}
err := <-errc
if err != nil {
if err == rpctypes.ErrCompacted {
// will get correct compact revision on retry
return rev + 1
}
// failed for some unknown reason, retry on same revision
return rev
}
wc := s.SyncUpdates(context.TODO())
for wr := range wc {
if wr.Err() != nil {
return wr.CompactRevision
}
for _, ev := range wr.Events {
fmt.Fprintln(w, ev)
}
rev := wr.Events[len(wr.Events)-1].Kv.ModRevision
if rev >= wr.Header.Revision {
break
}
}
return 0
}
|
This article is over 1 year old
World Wildlife Fund calls for public pressure on the Palaszczuk government to reduce habitat destruction
Tree clearing may have killed 180 koalas in Queensland in two years, says wildlife group
Tree clearing may have killed as many as 180 koalas in south-east Queensland in the two years after the former state government relaxed vegetation protection laws, according to an analysis by the World Wildlife Fund.
The environmental group says a crisis gripping koala populations has its root in a surge in tree clearing given the political green light in both Queensland and New South Wales.
The koala deaths in south-east Queensland, compounding a trend that has wiped out half the koala population statewide in the last two decades, came from the bulldozing of 44 sq km of bushland between mid-2013 and mid-2015, WWF scientist Martin Taylor argues.
Koalas are at the centre of a perfect storm. The species is slipping away | Kevin Evans Read more
The wave of deaths pushed the iconic animal further towards local extinctions in former strongholds, particularly to Brisbane’s north.
They were followed by an ongoing surge in fatal koala injuries from vehicles and dog attacks that has the RSPCA fearing for their long-term survival in the region.
In NSW, there are also fears of local koala populations being wiped out after total numbers fell by an estimated 26% in the past two decades, according to a separate WWF report by University of Queensland academic Christine Adams-Hosking.
The report declared tree clearing, also relaxed by the NSW state government in late 2016, a major factor.
Martin, using koala density maps from a state government study, calculated that the equivalent of 4,400 rugby league fields destroyed in south east Queensland would have supported 179 koalas.
“Bulldoze their trees and you kiss the koalas goodbye – they’re forced to look for new homes and are then killed by cars or dogs,” Taylor said.
“The only solution is state government action to rein in excessive tree clearing.”
The Palaszczuk government, in a hung parliament, tried and failed to restore tree protections after losing the vote of former Labor MP-turned-independent, Billy Gordon.
It is likely to take its proposed reforms to the next election, due by January. WWF has called on the public to lobby key Queensland politicians to reduce tree clearing.
Only 2% of the more than 2000 koalas treated in southeast Queensland wildlife hospitals for bone fractures over 13 years survived, almost all of them injured in vehicle collisions or dog attack.
RSPCA Queensland’s Wacol hospital treated a “staggering” 323 koalas in the year to 1 April, spokesman Michael Beatty said.
Beatty said their plight was “of course linked to habitat destruction”.
“There are also increasing concerns about where koalas can be safely re-released,” he said.
The Adams-Hosking report found steep, long-term declines that would see populations around the region extinct within a few koala generations (six to eight years).
On the “Koala Coast” to Brisbane’s south-east, the population had plummeted by about 80% between 1996 and 2014.
In Pine Rivers to Brisbane’s north, it fell by about 55%.
The destruction of forest across Queensland is forcing koalas into “increasingly fragmented pockets of habitat”, the WWF report finds.
Between 1995 and 2009, before the resurgence in clearing, koala numbers in southern inland Queensland plunged from an estimated 59,000 to 11,600, a reduction of 80%.
The report predicted koalas in Ballina in northern NSW were heading for extinction, with deaths outnumbering births.
Numbers in the Pilliga Forests had sunk 80% since the 1990s.
In the NSW town of Gunnedah, known as the “koala capital of the world”, a quarter of the population died during the 2009 heatwaves as they struggled to find water and tree shade.
In the NSW Port Macquarie-Hastings council area, at least 10% of a population of 2,000 koalas are admitted to hospital every year. |
#include <bits/stdc++.h>
using namespace std;
#define sd(x) scanf("%d",&x)
#define su(x) scanf("%u",&x)
#define slld(x) scanf("%lld",&x)
#define sc(x) scanf("%c",&x)
#define ss(x) scanf("%s",x)
#define sf(x) scanf("%f",&x)
#define slf(x) scanf("%lf",&x)
#define ll long long int
#define mod(x,n) (x+n)%n
int RMQ[100007][18][7];
int pow2[100007];
void init()
{
int i;
pow2[1] = 0;
for(i=2;i<=100000;i++)
{
if( (1<<(pow2[i-1]+1)) <= i )
pow2[i] = pow2[i-1]+1;
else
pow2[i] = pow2[i-1];
// if(i<=20)
// printf("%d\n", pow2[i] );
}
}
void createRMQ(int x,int n,int m)
{
int i,j;
for(i=1;(1<<i)<=n;i++)
{
for(j=0;(j+(1<<i))-1<n;j++)
{
RMQ[j][i][x] = max( RMQ[j][i-1][x],RMQ[j+(1<<(i-1))][i-1][x] );
}
}
}
int maxQuery(int x,int l,int r)
{
int z = r-l+1;
if(z==0)
return 0;
return max( RMQ[l][pow2[z]][x], RMQ[r-(1<<pow2[z])+1][pow2[z]][x] );
}
bool chk(int l,int r,int k,int m)
{
if(r-l+1==0)
return true;
int i;ll z = 0;
for(i=0;i<m;i++)
{
z+=maxQuery(i,l,r);
}
if(z<=k)
return true;
return false;
}
int binary(int n,int m,int k)
{
int first = 0,last = n,mid,i;
bool flag;
while(first+1<last)
{
mid = (first+last)/2;
flag = false;
for(i=0;i+mid-1<n;i++)
{
if(chk(i,i+mid-1,k,m))
{
flag = true;
break;
}
}
if(flag)
first = mid;
else
last = mid;
}
mid = last;
flag = false;
for(i=0;i+mid-1<n;i++)
{
if(chk(i,i+mid-1,k,m))
{
flag = true;
break;
}
}
if(flag)
return last;
return first;
}
int main()
{
// freopen("input_file_name.in","r",stdin);
// freopen("output_file_name.out","w",stdout);
int i,j,k,l,n,m,ans;
init();
sd(n); sd(m); sd(k);
for(i=0;i<n;i++)
{
for(j=0;j<m;j++)
{
sd(RMQ[i][0][j]);
}
}
for(i=0;i<m;i++)
{
createRMQ(i,n,m);
}
ans = binary(n,m,k);
for(i=0;i+ans-1<n;i++)
{
if(chk(i,i+ans-1,k,m))
{
for(j=0;j<m;j++)
{
printf("%d ", maxQuery(j,i,i+ans-1) );
}
printf("\n");
return 0;
}
}
return 0;
} |
/** Compare logP from logit model with that calculated by JulesLogit */
public void testLogitModelLogP() throws LearnerException {
JulesLogit logit = new JulesLogit();
logit.nodeCost(LogitFN.getX(node4,cdmsData),LogitFN.getZ(node4,cdmsData));
Value.Structured y = logit.getParams();
Value.Vector x = LogitFN.getX(node4,cdmsData);
Value.Vector z = LogitFN.getZ(node4,cdmsData);
assertEquals(Logit.logit.logP(x,y,z), -logit.emlcost,0.0000000001);
} |
def add(self, widgetType, widgetName, widget, group=None):
widgGroup = self.group(widgetType, group)
if widgetName in widgGroup:
raise ItemLookupError("Duplicate key: '" + widgetName + "' already exists")
else:
widgGroup[widgetName] = widget
widget.APPJAR_TYPE = widgetType |
/// Send a .caidx file to the remote.
///
pub async fn send_index(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let file = read(path)?;
let name = Path::new(path.file_name().expect("This must be a valid filename"));
self.push_file(name, &file).await?;
Ok(())
} |
// Copyright 2012-2017 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pci
import (
"bytes"
)
//Devices contains a slice of one or more PCI devices
type Devices []*PCI
// String stringifies the PCI devices. Currently it just calls the device String().
func (d Devices) String() string {
var buffer bytes.Buffer
for _, pci := range d {
buffer.WriteString(pci.String())
buffer.WriteString("\n")
}
return buffer.String()
}
// SetVendorDeviceName sets all numeric IDs of all the devices
// using the pci device SetVendorDeviceName.
func (d Devices) SetVendorDeviceName() {
for _, p := range d {
p.SetVendorDeviceName()
}
}
// ReadConfig reads the config info for all the devices.
func (d Devices) ReadConfig() error {
for _, p := range d {
if err := p.ReadConfig(); err != nil {
return err
}
}
return nil
}
// ReadConfigRegister reads the config info for all the devices.
func (d Devices) ReadConfigRegister(offset, size int64) ([]uint64, error) {
var vals []uint64
for _, p := range d {
val, err := p.ReadConfigRegister(offset, size)
if err != nil {
return nil, err
}
vals = append(vals, val)
}
return vals, nil
}
// WriteConfigRegister writes the config info for all the devices.
func (d Devices) WriteConfigRegister(offset, size int64, val uint64) error {
for _, p := range d {
if err := p.WriteConfigRegister(offset, size, val); err != nil {
return err
}
}
return nil
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.