content
stringlengths
10
4.9M
Four-dimensional Monte Carlo simulations demonstrating how the extent of intensity-modulation impacts motion effects in proton therapy lung treatments. PURPOSE To compare motion effects in intensity modulated proton therapy (IMPT) lung treatments with different levels of intensity modulation. METHODS Spot scanning IMPT treatment plans were generated for ten lung cancer patients for 2.5Gy(RBE) and 12Gy(RBE) fractions and two distinct energy-dependent spot sizes (σ ∼8-17 mm and ∼2-4 mm). IMPT plans were generated with the target homogeneity of each individual field restricted to <20% (IMPT20%). These plans were compared to full IMPT (IMPTfull), which had no restriction on the single field homogeneity. 4D Monte Carlo simulations were performed upon the patient 4DCT geometry, including deformable image registration and incorporating the detailed timing structure of the proton delivery system. Motion effects were quantified via comparison of the results of the 4D simulations (4D-IMPT20%, 4D-IMPTfull) with those of a 3D Monte Carlo simulation (3D-IMPT20%, 3D-IMPTfull) upon the planning CT using the equivalent uniform dose (EUD), V95 and D1-D99. The effects in normal lung were quantified using mean lung dose (MLD) and V90%. RESULTS For 2.5Gy(RBE), the mean EUD for the large spot size is 99.9% ± 2.8% for 4D-IMPT20% compared to 100.1% ± 2.9% for 4D-IMPTfull. The corresponding values are 88.6% ± 8.7% (4D-IMPT20%) and 91.0% ± 9.3% (4D-IMPTfull) for the smaller spot size. The EUD value is higher in 69.7% of the considered deliveries for 4D-IMPTfull. The V95 is also higher in 74.7% of the plans for 4D-IMPTfull, implying that IMPTfull plans experience less underdose compared to IMPT20%. However, the target dose homogeneity is improved in the majority (67.8%) of plans for 4D-IMPT20%. The higher EUD and V95 suggests that the degraded homogeneity in IMPTfull is actually due to the introduction of hot spots in the target volume, perhaps resulting from the sharper in-target dose gradients. The greatest variations between the IMPT20% and IMPTfull deliveries are observed for patients with the largest motion amplitudes. These patients would likely be treated using gating or another motion mitigation technique, which was not the focus of this study. CONCLUSIONS For the treatment parameters considered in this study, the differences between IMPTfull and IMPT20% are only likely to be clinically significant for patients with large (>20 mm) motion amplitudes.
<gh_stars>1-10 /* Package set implements a `Set` and a `SyncSet` for common set operations. This is pretty standard data structure implementation that uses a `map[interface{}]struct{}` and also serves as a template for more specific set type implementations. The SyncSet provides thread-safe concurrent access to a set using locking. */ package set import ( "errors" "fmt" ) type void struct{} var member void // Errors that might be returned from various set operations. var ( ErrEmptySet = errors.New("empty sets do not support this operation") ) // New creates and returns a new set, you can also use `make(Set)`. func New(elems ...interface{}) (s Set) { s = make(Set) for _, elem := range elems { s.Add(elem) } return s } // Set is an unordered collection of distinct objects of any type. Common uses include // membership testing, deduplication, and mathematical operations such as intersection, // union, difference, and symmetric difference. type Set map[interface{}]void //=========================================================================== // Element Operations //=========================================================================== // Add element to the set. Can also use `s[elem] = struct{}{}`. func (s Set) Add(elem interface{}) { s[elem] = member } // Remove element from the set. Returns an error if elem is not contained in the set. func (s Set) Remove(elem interface{}) error { if _, ok := s[elem]; !ok { return fmt.Errorf("%v is not a member of the set", elem) } delete(s, elem) return nil } // Discard element from the set if it is present. Can also use `delete(s, elem)`. func (s Set) Discard(elem interface{}) { delete(s, elem) } // Pop removes and returns an arbitrary element from teh set. Returns an error if the // set is empty. Note that it is far more efficient to range over the set then pop // until empty. Also note that a type assertion is required to used the returned elem. func (s Set) Pop() (interface{}, error) { for elem := range s { delete(s, elem) return elem, nil } return nil, ErrEmptySet } // Clear the set in place, removing all elements. func (s Set) Clear() { for elem := range s { delete(s, elem) } } //=========================================================================== // Set Comparisons //=========================================================================== // Contains returns true if the elem is included in the set. func (s Set) Contains(elem interface{}) bool { _, ok := s[elem] return ok } // IsNull returns true iff the set has zero elements or is nil func IsNull(s Set) bool { return s == nil || len(s) == 0 } // IsEmpty returns true iff the set has zero elements func (s Set) IsEmpty() bool { return len(s) == 0 } // IsDisjoint returns true if the set has no elements in common with other. Sets are // disjoint if and only if their intersection is the empty set. func (s Set) IsDisjoint(other Set) bool { if len(other) < len(s) { // Loop through the smaller set to determine disjointedness return other.IsDisjoint(s) } for elem := range s { if other.Contains(elem) { return false } } return true } // IsSubset tests if every element in the set is in other. func (s Set) IsSubset(other Set) bool { if len(s) > len(other) { // It can't be a subset if it has more items than the other return false } for elem := range s { if !other.Contains(elem) { return false } } return true } // IsSuperset tests if every element of other is in the set. func (s Set) IsSuperset(other Set) bool { return other.IsSubset(s) } //=========================================================================== // Set Math //=========================================================================== // Union returns a new set with elements from the set and all others. func (s Set) Union(others ...Set) Set { r := s.Copy() for _, other := range others { for elem := range other { r.Add(elem) } } return r } // Intersection returns a new set with elements common to the set and all others. func (s Set) Intersection(others ...Set) Set { // TODO: find the smallest set to perform the intersection on and benchmark if // that makes a difference in the performance of this computation. r := make(Set) outer: for elem := range s { // Determine if the element is in all other sets otherwise skip it for _, other := range others { if !other.Contains(elem) { continue outer } } // Because we didn't continue outer, this elem is in all other sets r.Add(elem) } return r } // Difference returns a new set with elements in the set that are not in the others. func (s Set) Difference(others ...Set) Set { r := make(Set) outer: for elem := range s { // Determine if element is in any other set and if so, skip it for _, other := range others { if other.Contains(elem) { continue outer } } // Because we didn't continue outer, this elem is not in any other set r.Add(elem) } return r } // SymmetricDifference returns a new set with elements in either the set or other // but not both. Note that this method can only accept a single input set. func (s Set) SymmetricDifference(other Set) Set { r := make(Set) for elem := range s { if other.Contains(elem) { continue } r.Add(elem) } for elem := range other { if s.Contains(elem) { continue } r.Add(elem) } return r } // Copy returns a shallow copy of the set. func (s Set) Copy() Set { r := make(Set) for elem := range s { r.Add(elem) } return r }
<gh_stars>0 // import anypackage.in.which.you.put.Exevalator; /** * Function available in expressions. */ class MyFunction implements Exevalator.FunctionInterface { /** * Invoke the function. * * @param arguments An array storing values of arguments. * @return The return value of the function. */ @Override public double invoke(double[] arguments) { if (arguments.length != 2) { throw new Exevalator.Exception("Incorrected number of args"); } return arguments[0] + arguments[1]; } } /** * An example to create a function for available in expressions. */ public class Example5 { public static void main(String[] args) { // Create an instance of Exevalator Engine Exevalator exevalator = new Exevalator(); // Connects the function available for using it in expressions MyFunction fun = new MyFunction(); exevalator.connectFunction("fun", fun); // Evaluate the value of an expression double result = exevalator.eval("fun(1.2, 3.4)"); // Display the result System.out.println("result: " + result); } }
<gh_stars>0 package uk.gov.hmcts.ccd.domain.service.search.elasticsearch; import static java.lang.String.format; import uk.gov.hmcts.ccd.ApplicationParams; public abstract class CaseSearchRequestFactory<T> { private final ApplicationParams applicationParams; private final CaseSearchQuerySecurity caseSearchQuerySecurity; protected CaseSearchRequestFactory(ApplicationParams applicationParams, CaseSearchQuerySecurity caseSearchQuerySecurity) { this.applicationParams = applicationParams; this.caseSearchQuerySecurity = caseSearchQuerySecurity; } public final T create(String caseTypeId, String query) { String securedQuery = caseSearchQuerySecurity.secureQuery(caseTypeId, query); return createSearchRequest(caseTypeId, securedQuery); } protected abstract T createSearchRequest(String caseTypeId, String query); protected String getCaseIndexName(String caseTypeId) { return format(applicationParams.getCasesIndexNameFormat(), caseTypeId.toLowerCase()); } protected String getCaseIndexType() { return applicationParams.getCasesIndexType(); } }
package org.opendatakit.utilities; import org.junit.After; import org.junit.Before; import org.junit.FixMethodOrder; import org.junit.runners.MethodSorters; import org.opendatakit.TestConsts; import org.opendatakit.database.service.DbHandle; import org.opendatakit.services.database.AndroidConnectFactory; import org.opendatakit.services.database.OdkConnectionFactorySingleton; import org.opendatakit.services.database.utilities.ODKDatabaseImplUtils; import java.util.List; /** * Created by wrb on 9/21/2015. * * Put the actual unit tests in the Abstract class as there are two setups. * * In ODKDatabaseImplUtilsKeepState it keeps the database initalized between tests whereas * in ODKDatabaseImplUtilsResetState, it wipes the database from the file system between each test */ @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class ODKDatabaseImplUtilsKeepState extends AbstractODKDatabaseUtilsTest { private static boolean initialized = false; private static final String APPNAME = TestConsts.APPNAME; private static final DbHandle uniqueKey = new DbHandle(AbstractODKDatabaseUtilsTest.class.getSimpleName() + AndroidConnectFactory.INTERNAL_TYPE_SUFFIX); @Override protected String getAppName() { return APPNAME; } @Before public synchronized void setUp() throws Exception { ODKFileUtils.verifyExternalStorageAvailability(); ODKFileUtils.assertDirectoryStructure(APPNAME); boolean beganUninitialized = !initialized; if (beganUninitialized) { initialized = true; // Used to ensure that the singleton has been initialized properly AndroidConnectFactory.configure(); } // +1 referenceCount if db is returned (non-null) db = OdkConnectionFactorySingleton.getOdkConnectionFactoryInterface() .getConnection(getAppName(), uniqueKey); if (beganUninitialized) { // start clean List<String> tableIds = ODKDatabaseImplUtils.get().getAllTableIds(db); // Drop any leftover table now that the test is done for (String id : tableIds) { ODKDatabaseImplUtils.get().deleteTableAndAllData(db, id); } } else { verifyNoTablesExistNCleanAllTables(); } } @After public void tearDown() throws Exception { verifyNoTablesExistNCleanAllTables(); if (db != null) { db.releaseReference(); } } }
Top twelve myths about fiber The recent popularity of fiber in medicine and nutrition is based on old wives' tales and intentional lies that have little to do with either science or medicine. These lies and tales are retold ad nauseam in medical offices, web sites, diet books, college courses, government pamphlets, and on and on. Tell a lie one time too many, and it soon turns into a pervasive and believable myth. I don't have to tell you, just how dangerous medical doctrines are, when they are built on mythology. — Fiber is bad? Konstantin, have you gone postal?.. Well, judge for yourself! Find a flaw, or prove otherwise. If you don't find any, you'll go postal too. If you continue to have doubts after reviewing the presented facts, just follow the links to the primary sources, and keep researching these facts until you are fully convinced that there is no quoting out of context going on here. Myth #1: For maximum health, obtain 30 to 40 g of fiber daily from fresh fruits and vegetables. Reality: Here is how many fresh fruits you'll need to eat throughout the day in order to obtain those 30 to 40 grams (1-1.4 oz) of daily fiber: That comes to five apples, three pears, and two oranges. A small apple contains 3.6 g of fiber and 15.5 g of sugars. A small pear—4.6 g and 14.5 g; and a small orange—2.3 g and 11.3 g respectively (USDA National Nutrient Database; NDB #s: 09003; 09200; 09252). These ten small (not medium or large) fruits will provide you with 36.4 g of indigestible fiber and a whopping 143.6 g of digestible sugars, or an equivalent of that many (ten) tablespoons of plain table sugar! And that‘s before accounting for all the other carbs consumed throughout the day for breakfast, lunch, dinner, and from snacks and beverages. So ask yourself this question: even if you are a 100% healthy 25-year-old muscle-bound athlete, would you ever ingest that much sugar willingly? The answer is obvious—no way! Well, maybe under the influence of a controlled substance or torture. But certainly not while of sound mind! But that's exactly what's being recommended for “health purposes” to American children and adults. It‘s not surprising that so many are suffering from the ravages of diabetes and obesity—the total daily carbohydrate requirement for an average adult is under 200 grams, even less for children. The ratio of digestible carbohydrates (sugars) to fiber in vegetables, cereals, breads, beans, and legumes is, on average, similar to fruits. Thus, no matter how hard you try to mix'n'match, you'll be getting screwed all the same. Incidentally, that's the meaning of those brass screws in the cereal bowl on the front cover of Fiber Menace. This myth—that fruits and vegetables are the best source of fiber—is probably the most pervasive and damaging of all. If fiber is what you‘re really after, you‘re better off getting it from fiber supplements. These, after all, have almost no digestible carbs. But, then, of course, you run into those other persistent falsehoods... Myth #2: Fiber reduces blood sugar levels and prevents diabetes, metabolic disorders, and weight gain. Reality: That's a blatant deception. If you consume 100 g of plain table sugar at once, the blood absorbs all 100 g of sugar almost as soon as it reaches the small intestine, where the assimilation takes place. If you add 30 g of fiber into the mix, the fiber will first clog the stomach for a while. Second, it will partially block intestinal absorption, which, in turn, will extend the rate of sugar assimilation into the blood, from, let‘s say, one hour to three. But at the end of those extra three hours the blood will still absorb exactly the same 100 g of sugar—not an iota more, not an iota less. If you are a diabetic, the only difference will be that you‘ll require more extended (long-acting) insulin (for type 1 diabetes) or larger doses of medicine (for type 2 diabetes) to deal with slow-digesting sugars, and your blood glucose test will not spike as high after the meal. But you‘re fooling no one but a glucose meter. In all other respects, the damage will be all the same, or even worse. That's because the carbohydrate load on the cells, liver, pancreas, and kidneys from extended sugar digestion, elevated insulin, and high triglycerides (assimilated fatty acids) is much higher. And that‘s even before taking into account the negative impact of fiber on the digestive organs, or hyperinsulinemia and triglycerides on the heart, blood vessels, and blood pressure. Once inside the large intestine, most of that fiber will get fermented into volatile gases (cause bloating, cramping, and flatulence); short-chain fatty acids (at 2.5 to 4 calories per g, in excess cause anal itching, diarrhea, and hemorrhoidal inflammation); and alcohols (at 7 calories per g). Most of those substances get assimilated into your blood as fast as bacteria can make it happen. Now, on top of nauseating gases and extra calories from fatty acids, you are also getting hit with alcohols, including methanol, which is quite toxic even in trace amounts. So if you still can't pin down the causes of that nagging migraine, or lousy sleep pattern, or anxiety, or depression, or fatigue, then just shut down that little distillery inside your gut. 'Sober up,' and enjoy some peace and quiet. Myth #3: Fiber-rich foods improve digestion by slowing down the digestive process. Reality: Fiber indeed slows down the “digestive process,” because it interferes with digestion in the stomach and, later, clogs the intestines the “whole nine yards.” The myth is that it can be good for health and the digestive process. Here is what you get from delayed digestion: indigestion (dyspepsia), heartburn (GERD), gastritis (the inflammation of the stomach‘s mucosal membrane), peptic ulcers, enteritis (the inflammation of the intestinal mucosal membrane), and further down the chain, constipation, irritable bowel syndrome, ulcerative colitis, and Crohn‘s disease. All this, in fact, is the core message of Fiber Menace: fiber slows down the digestive process! And slow digestion is ruinous for your health. Don‘t mess with fiber unless your gut is made of steel! Myth #4: Fiber speeds food through the digestive tract, helping to protect it against cancer. Reality: Not true. In fact, this claim directly contradicts the claim that fiber-rich foods slow down the digestive process. For a reality check, here‘s an excerpt from a college-level physiology textbook that reveals the truth: Colonic Motility Energy-rich meals with a high fat content increase motility [the rate of intestinal propulsion]; carbohydrates and proteins have no effect. R.F. Schmidt, G. Thews; Human Physiology, 2nd edition. 29.7:730 This, incidentally, is why low-fat diets and constipation commonly accompany each other. And don't count on getting any cancer protection from fiber, either. That‘s yet another oft-repeated deception. Myth #5: Fiber promotes a healthy digestive tract and reduces cancer risk. Reality: Not true. Here's what doctors-in-the-know have to say on the subject of the colon cancer/fiber connection: Lack of Effect of a Low-Fat, High-Fiber Diet on the Recurrence of Colorectal Adenomas “Adopting a diet that is low in fat and high in fiber, fruits, and vegetables does not influence the risk of recurrence of colorectal adenomas.” Arthur Schatzkin, M.D et al. The New England Journal of Medicine; April 20, 2000; 342:1149-1155. PMID: 10770979 The excerpt below comes, of all places, from the Harvard School of Public Health: Fiber and colon cancer “For years, Americans have been told to consume a high-fiber diet to lower the risk of colon cancer—mainly on the basis of results from relatively small studies. Larger and better-designed studies have failed to show a link between fiber and colon cancer.” Fiber: Start Roughing It Harvard School of Public Health Not convinced yet? Well, here is even more damning evidence from the U.S. Food and Drug Administration: Letter Regarding Dietary Supplement Health Claim for Fiber With Respect to Colorectal Cancer “Based on its review of the scientific evidence, FDA finds that (1) the most directly relevant, scientifically probative, and therefore most persuasive evidence (i.e., randomized, controlled clinical trials with fiber as a test substance) consistently finds that dietary fiber has no [preventive] effect on incidence of adenomatous polyps, a precursor of and surrogate marker for colorectal cancer; and (2) other available human evidence does not adequately differentiate dietary fiber from other components of diets rich in foods of plant origin, and thus is inconclusive as to whether diet-disease associations can be directly attributed to dietary fiber. FDA has concluded from this review that the totality of the publicly available scientific evidence not only demonstrates lack of significant scientific agreement as to the validity of a [preventive] relationship between dietary fiber and colorectal cancer, but also provides strong evidence that such a relationship does not exist.” U. S. Food and Drug Administration Center for Food Safety and Applied Nutrition Office of Nutritional Products, Labeling, and Dietary Supplements; October 10, 2000 [link] Alas, the story doesn't end there. Adding insult to injury, Chapter 10, Colon Cancer cites studies that demonstrate the connection between increased fiber consumption and colon cancer. Also, countries with the highest and lowest consumption of meat are compared. Not surprisingly, the countries with the lowest consumption of meat and, correspondingly, the highest consumption of carbohydrates, including fiber, have the highest rate of digestive cancers, particularly of the stomach. Myth #6: Fiber offers protection from breast cancer. Reality: A blatant, preposterous lie. According to the recent massive study jointly conducted by the U.S. Center for Disease Control and Prevention, the Ministry of Health of Mexico, and the American Institute for Cancer Research, it‘s the opposite: women with the highest consumption of carbohydrates, and, correspondingly, of fiber, had the highest rates of breast cancer: Carbohydrates and the Risk of Breast Cancer among Mexican Women “In this population, a high percentage of calories from carbohydrate, but not from fat, was associated with increased breast cancer risk.” Isabelle Romieu, et al; Cancer, Epidemiology, Biomarkers & Prevention; 2004 13: 1283–1289. PMID: 15298947 A similar relationship had been established between the risk of colorectal cancers and the consumption of carbohydrates: Digestible‘ Carbohydrate [sic] May Boost Colorectal Cancer Risk “...people consuming the highest amounts of digestible carbohydrates had a higher risk for developing colorectal cancer compared with those eating the lowest amounts.” Joene Hendry; Reuters Health; June 27, 2002. Although these studies single out carbohydrates as the culprit behind various cancers, where there's smoke, there's also fire: carbs and fiber are as inseparable as Siamese twins, as I have already explained in Myth #1. Myth #7: Fiber lowers blood cholesterol levels, triglycerides, and prevents heart disease. The myths about fiber‘s role in coronary heart disease (CHD) and the management of elevated cholesterol have their roots in some dubious research, which culminated in “reduced mineral absorption and myriad of gastrointestinal disturbances” after the study participants were given supplements containing a mixture of guar gum, pectin, soy fiber, pea fiber, and corn bran along with a low-fat and reduced cholesterol diet. The total reduction of LDL cholesterol after 15 weeks was from “7% to 8%”. As any cardiologist will tell you, the reduction of “bad” cholesterol from, let's say, 180 to 166 mg/dL (-8%) is completely meaningless. Besides, if you cause someone to have a “myriad of gastrointestinal disturbances” in the process, that person is more likely to die prematurely from malnutrition and cancer than of stroke or heart attack. Even then, this marginal reduction of cholesterol had little to do with fiber, and everything to do with the reduction of dietary fats. LDL cholesterol happens to be a major precursor to bile. The moment a person is placed on a low-fat diet, their cholesterol level drops because their liver no longer needs to produce as much bile. In addition, intestinal inflammation caused by soluble fiber blocks the ability of bile components to get absorbed back into the bloodstream, further lowering the cholesterol level. This is as basic as the physiology of nutrition gets, and it makes the whole claim of a fiber-cholesterol connection a deliberate con. There is another dimension to the con used to ‘prove‘ fiber‘s role in reducing cholesterol. Most of the studies of fiber's cholesterol-lowering effect — particularly psyllium — used The American Heart Association's (AHA) Step I diet. The Step I diet is high in carbohydrates and low in fat by design, with less than 10% of total energy derived from saturated fat. During clinical studies among people using the Step I diet without added fiber, their total cholesterol fell by 8%, LDL cholesterol fell by 6%, and HDL cholesterol fell by 16%. In other words, the Step I diet on its own, without any extra fiber and/or digestive side effects, demonstrates an almost identical drop in cholesterol as with added fiber. In legalese, this particular 'coincidence' is called fraud, plain and simple. — So one fraud more, one fraud less… What‘s the worry, Konstantin, if my cholesterol goes down? — Well, there is a legitimate worry, at least, according to this respected source: Problem with American Heart Association "Step 1" diet “Although the AHA Step I diet decreased total and LDL cholesterol levels in this group of women, it decreased HDL cholesterol by an even greater proportion. In women, a low HDL cholesterol concentration is a stronger independent predictor of cardiovascular disease risk than is elevated total cholesterol or LDL cholesterol. Therefore, women who follow AHA guidelines for lowering their serum cholesterol may actually be increasing their risk of heart disease” Alan R. Gaby, M.D. Townsend Letter for Doctors and Patients Amazingly, back in 2001, the AHA replaced the Step I diet with the Step II, TLC, and ATP III diets [link], which are even more restrictive in terms of fat, and even more permissive in terms of carbohydrates. For more details on this controversy, please watch (video) my investigative report Dietary Fiber: A Heart Savior From Heaven, Or A Death Wish From Hell? And don't get me started on triglycerides... First, nothing raises triglycerides as profoundly as a high-fiber diet does, because, paraphrasing the smoke-fire cliché, where there's fiber, there're carbohydrates, usually eight to ten times as much. This fact — the more fiber you consume, particularly from natural sources, the higher your level of triglycerides from carbohydrates intake — has been dodging Dr. Dean Ornish, one of the most prominent proponents of a high-carb/high-fiber diet. Second, once inside the colon, fiber itself gets fermented by intestinal bacteria. Among the the byproducts of bacterial fermentation are short-chain fatty acids — butyrate, acetate, and propionate. Most of these fatty acids get assimilated directly into the bloodstream to provide energy. According to the Dietary Reference Intakes manual “current data indicate that the [energy] yield is in the range of 1.5 to 2.5” calories per each gram of consumed fiber [link]. If you aren't starving, the absorbed fatty acids unused for energy get metabolized by the liver into triglycerides for further storage as body fat. Granted, a few calories here, a few calories there, may not seem like a lot. Still, if you are consuming 30 to 40 grams of fiber daily plus whatever hidden fiber fillers you are ingesting unknowingly along with processed food, it all adds up to epidemics of obesity, diabetes, and heart disease. That‘s why I called Fiber Menace's section that discusses these myths Fiber‘s effect on heart disease: a bargain with the devil. Indeed! Myth #8: Fiber satisfies hunger and reduces appetite. Reality: That‘s yet another dubious benefit of fiber. Because fiber rapidly absorbs water and expands in the stomach up to five times its original size and weight, it indeed pacifies the appetite for a short while. Unfortunately, while faking satiety, expanded fiber also stretches out the stomach‘s chamber, and each new fill-up requires progressively more and more fiber to accomplish the same trick. When a person becomes overweight beyond the point of no return, surgeons suture the stretched-out stomach or squeeze it with a bridle (LAP-BAND©) in order to reduce its capacity and “speed up” satiety. This particular aspect of fiber‘s impact on appetite, satiety, and obesity is explained in Chapter 3, Atkins Goes To South Beach. As with other “true myths,” it‘s not so much that “it ain‘t so,” but that filling up the stomach with fiber is actually not good for health and weight loss. Myth #9: Fiber prevents gallstones and kidney stones. Reality: I‘ve seen several observational studies that claim fiber can prevent gallstones. It isn‘t true. It‘s common knowledge that diabetes and obesity are consistently associated with higher risk for gallstones, and both of these conditions are the direct outcome of excessive consumption of carbohydrates, and correspondingly, of fiber. Beyond these few studies, there isn‘t a shred of physiological, anatomical, clinical, or nutritional evidence that connects gallstone formation with fiber consumption. Here‘s an excerpt from Fiber Menace that sheds further light on the gallstone-fiber connection: Fiber‘s effect on the small intestine: Not welcome at any price Gallstones are formed from concentrated bile salts when the outflow of bile from the gallbladder is blocked. […] before they can form, something else must first obstruct the biliary ducts. Just like with pancreatitis, that “something” is either inflammatory disease or obstruction caused by fiber. Women [in the West] are affected by gallstones far more than men, because they are more likely to maintain a “healthy” diet, which nowadays means a diet that is low in fat and high in fiber. Since the gallbladder concentrates bile pending a fatty meal, no fat in the meal means no release of bile. The longer the concentrated bile remains in the gallbladder, the higher the chance for gallstones to form [from bile salts -ed.]. Konstantin Monastyrsky; Fiber Menace, p. 25 Just as with gallstones, kidney stones are also common among people who suffer from diabetes and obesity, because excessive consumption of carbohydrates increases the excretion of urine, changes its chemistry, and predisposes to kidney stones. To investigate this myth further, I consulted PubMed, a service of the National Library of Medicine, which is the most thorough compendium of medical research. I reviewed eighty-one articles published between 1972 and 2005 that mention the words “fiber” and “kidney stones” in the same breath. Not a single one of them connected kidney stones to fiber consumption, while several specifically pointed out that an increased consumption of carbohydrates is one of the major contributing factors. One article suggested that a diet free of digestible carbs, but containing fiber, makes urine composition less stones-prone. You don‘t have to be Dr. Watson to deduce that fiber—an indigestible substance—can‘t materially affect urine chemistry, because what can‘t get digested also can‘t reach the kidneys. Besides, it wasn't the presence of fiber that did the “trick,” for those investigators, but the reduction in carbs. Using this kind of methodology, one can also conclude that the wearing of black underpants along with a carb-free diet may prevent kidney stones, too. Some “science!..." Myth #10: Fiber prevents diverticular disease. The therapeutic and preventative role of fiber in diverticular disease is steeped in its own mythology. Let‘s review those myths, as detailed in the article entitled Diverticular Disease by the National Institutes of Health. For starters, even the opening statement reveals that the beneficial role of fiber in the prevention and treatment of diverticular disease is just conjecture (a theory) without any proof: “Although not proven, the dominant theory is that a low-fiber diet is the main cause of diverticular disease.” [link] Here are the other “dominant” falsehoods from the same source: “The [diverticular] disease was first noticed in the United States in the early 1900s. At about the same time, processed foods were introduced into the American diet. Many processed foods contain refined, low-fiber flour. Unlike whole-wheat flour, refined flour has no wheat bran.” Not true. The “disease was first noticed” in the early 1900s not because of dietary changes in the American diet, but because in 1895 Wilhelm Conrad Röntgen accidentally discovered X-rays. Before X-rays became commonplace, people were dying from undiagnosed and unknown internal diseases because there were no non-invasive diagnostic tools, no exploratory surgeries, and autopsies were extremely rare. Secondly, since diverticular disease affects primarily people over 50, dietary changes in the early 1900s wouldn‘t even show up in people until the late 1930s or early 1940s. “Diverticular disease is common in developed or industrialized countries—particularly the United States, England, and Australia—where low-fiber diets are common.” Not true. Also common in these countries is watching television, drinking beer, and driving a car. But just like any other conjecture, it doesn‘t mean these activities cause diverticular disease. Diverticular disease is more common in developed Western countries not because the traditional Western diet is low in fiber, but because of excessive consumption of fiber and fiber laxatives. If Westerners consumed even more fiber, the incidence of diverticular disease would be even higher, as described in the next myth. “The [diverticular] disease is rare in countries such as Asia and Africa, where people eat high-fiber vegetable diets.” Not true. (a) High-fiber diets are prevalent only among the poor and very poor, usually in rural areas; (b) poor people in these regions die well before the age commonly associated with diverticular disease in the West; (c) no reliable healthcare system exists in rural Africa and Asia to provide reliable and relevant health statistics regarding diverticular disease; (d) when Africans do have access to hospitals, doctors have concluded: “The study shows that the African colon has a number of pathological lesions contrary to previous reported literature.” (Ogutu EO, at al; Colonoscopic findings in Kenyan African patients; East Afr Med J. 1998 Sep;75(9):540-3); and (e) affluent Africans and Asians consume very little fiber—as is apparent to anyone who‘s ever visited an authentic Asian (Japanese, Chinese, Thai, Korean, Indian) or African (Moroccan, Ethiopian, Kenyan, South African) restaurant, where the dominant dishes are meat, fish, and sea food, and the side dishes are primarily white rice, whose fiber content is just 0.4%. “Both kinds of fiber help make stools soft and easy to pass,” which is good for diverticular disease. Not true. Insoluble fiber is a bulking laxative. It makes stools large and hard to pass. That‘s why fiber is called “roughage.” Soluble fiber is a hyperosmolar laxative and diarrhea-causing agent. It does make stools watery, but it also causes bowel inflammation, bloating, and flatulence, and isn‘t suitable for extended use. “Fiber also prevents constipation,” which is essential for diverticular disease. Not true. Fiber DOES NOT prevent constipation. Just like aspirin can relieve pain, natural and medicinal fiber can 'relieve' constipation in people because it is a potent laxative. But fiber can‘t prevent constipation, just like aspirin can‘t prevent migraines or arthritis. In fact, if any aspirin manufacturer made such an outlandish claim, the FDA would shut it down. Also, note that fiber DOES NOT relieve chronic constipation, only sporadic constipation in healthy people. When a few legitimate attempts were made to prove fiber‘s effectiveness for “chronic constipation,” according to the American College of Gastroenterology Functional Gastrointestinal Disorders Task Force (2005), they really didn‘t pan out as explained in Fiber Menace's Introduction: Guidelines for the Treatment of Chronic Constipation: What is the Evidence? Specifically, there are 3 RCTs [randomized controlled trials] of wheat bran in patients with chronic constipation, but only 1 is placebo-controlled. This trial did not demonstrate a significant improvement in stool frequency or consistency when compared with placebo—neither did 2 trials that compared wheat bran with corn biscuit or corn bran. Philip S. Schoenfeld, MD, MSEd, MSc; Medscape Today from WebMD Why? Because people who are affected by chronic constipation are also likely to be affected by hemorrhoidal disease and anorectal nerve damage. In this case, large, rough stools are not only undesirable, but are outright damaging. if you already have diverticular disease, your goal is not “large stools more often,” but small stools without straining, and fiber is never going to help you accomplish this reasonable and easily attainable goal. Myth #11: Fiber is safe and effective for the treatment and prevention of diarrhea. Reality: Actually, it‘s the complete opposite—fiber, particularly soluble, is the most common cause of diarrhea in children and adults. That‘s why it‘s recommended as a laxative to begin with. The idea of fiber as a preventive treatment for diarrhea is one of the most preposterous and harmful fiber-related frauds. Soluble fiber is widely present in fruits, vegetables, laxatives, and processed foods, such as yogurt, ice cream, sour cream, cream cheese, soy milk, non-dairy creamers, preserves, jellies, candies, cakes, snack bars, canned soups, frozen dinners, sauces, dressings, and endless others. It‘s always expertly concealed from scrutiny behind obscure names such as agar-agar, algae, alginate, β-glucan, cellulose gum, carrageen, fructooligosaccharides, guaran, guar gum, hemicellulose, inulin, Irish moss, kelp, lignin, mucilage, pectin, oligofructose, polydextrose, polylos, resistant dextrin, resistant starch, red algae, and others. These inexpensive industrial fillers are added as stabilizers and volumizers to practically all processed foods, because they hold water, maintain shape, and fake “fattiness.” Besides, they are cheaply bought by the ton, and are resold retail by the gram for immense profit. Once inside the body, these fiber fillers remain indigestible, hold onto water just as tight, and prevent absorption. This property—the malabsorption of fluids—lies behind soluble fiber‘s laxative effect: under normal circumstances a very limited amount of fluids enter the large intestine. When their amount exceeds the colon‘s holding capacity, you get hit with diarrhea. In other words, the term “laxative” is just a euphemism for a “diarrheal” agent. If you overdose on a fiber laxative, you‘ll end up with diarrhea. If you “overdose” on fiber from food, you‘ll end up with exactly the same diarrhea. But since fiber in food can‘t be measured up as reliably as fiber in capsules, wafers, or powders, it‘s much easier to “overdose” the latter fiber and cause severe diarrhea. Besides, fiber is even more offensive than synthetic laxatives, because the byproducts of its fermentation cause intestinal inflammation, flatulence, bloating, and cramping — just as described in medical references: Malabsorption Syndromes Colonic bacteria ferment unabsorbed carbohydrates into CO2, methane, H2, and short-chain fatty acids (butyrate, propionate, acetate, and lactate). These fatty acids cause diarrhea. The gases cause abdominal distention and bloating. Gastrointestinal Disorders The Merck Manual of Diagnosis and Therapy The diarrheal effect of soluble fiber is particularly harmful for children, because their smaller intestines need lesser amounts to provoke diarrhea. According to the Centers for Disease Control and Prevention: The Management of Acute Diarrhea in Children …diarrhea remains one of the most common pediatric illnesses. Each year, children less than 5 years of age experience 20-35 million episodes of diarrhea, which result in 2-3.5 million doctor visits, greater than 200,000 hospitalizations, and 325-425 deaths. Centers for Disease Control and Prevention MMWR 1992;41(No. RR-16) That's from 1992, the latest statistic I could find. It must be much worse today because fiber is so much more prevalent. And if you analyze the most basic facts, you'll understand immediately why this travesty is taking place. Consider this: A single adult dose of Metamucil® —a popular fiber laxatives made from psyllium seed husks—contains 2 g of soluble fiber in 6 capsules. One apple, one orange, and one banana—not an unusual number of fruits a child may eat throughout the day—contain a total 4 g of soluble fiber, or an equivalent of 12 capsules of Metamucil for a much larger adult. And that‘s on top of juices, cereals, yogurts, ice creams, candies, cakes, and all other processed food consumed on the same day, all loaded with fiber. No wonder that “diarrhea remains one of the most common pediatric illnesses” in the United States, and there is an acute shortage of pediatricians nationwide. God bless our kids. With nutrition like this, they need a lot of blessings. Myth #12: Fiber has been consumed for generations. Though it may seem as if fibermania has been around since the Earth was born, its mainstream acceptance as a health food is actually quite recent. According to Dr. James Whorton‘s book, Inner Hygiene: Constipation and the Pursuit of Health in Modern Society: …the ‘dietary fiber hypothesis,‘ as it was initially known, was put forward in the 1970s, and much of it was accepted as a major addition to medicine and nutrition by the 1980s. As the Introduction to Fiber Menace explains, the original intent for adding fiber wasn‘t for anyone‘s good health, good stools, or longevity, but rather to curb sexuality and build 'character.' Then, in the early 20th century, fiber‘s supposed 'health benefits' were hijacked by the purveyors of grain cereals, such as Kellogg. All other 'cure-all' benefits of fiber—cholesterol reducer, heart disease curative, diabetes antidote, cancer preventive—are recent 'innovations' ruthlessly promoted by the likes of Kellogg Company (All-Bran®, Raisin Bran®), General Mills (FiberOne®), and Procter & Gamble, the makers of Metamucil®. Kellogg Company alone spends over $3.5 billion annually to promote its immensely profitable products, including high-fiber ones. Yet not long ago, fiber in cereals — a.k.a. miller‘s or wheat bran — was sold to feedlots as cattle feed for pennies in a pound. Now, in the best tradition of P.T. Barnum, this byproduct of industrial milling has become a curative 'health food.' Paraphrasing Scott Adams — never underestimate the power of greed. With so much profit riding on cereals, laxatives, and fiber-enriched foods, they could declare fiber the President of the United States if they so desired. Procter & Gamble, for example, markets Metamucil® Fiber Capsules Plus Calcium to, among other things, “build strong bones.” Here is an example of these properties promoted on Proctor & Gamble's Metamucil® web site (screen capture modified to fit this page, highlights are mine; click the picture to open actual web page): — What is the grossest irony here (besides other blatant deceptions already deciphered above)? The soluble fiber in Metamucil blocks the absorption of fats and fat-soluble minerals, required for assimilation of fat-soluble vitamin D and essential minerals, including calcium. So if you take this 'snake oil' to protect your bones, not only will you not get much calcium from it, you'll also be suffering a precipitous loss of calcium from your bones by interfering with vitamin D absorption. Oh, well... Author's note As you can see, it isn't me who has gone postal over fiber. I am just a messenger—and a darn lucky one, because my diet is mercifully fiber-free, and fiber no longer wrecks my health. Change will not come any time soon unless you e-mail a link to this site to your relatives, friends, colleagues, bosses, elected representatives; your favorite radio station, television channel, newspaper, and internet columnists; your children's teachers and school administrators, and, of course, to your doctors. If you want to effect change, help them to learn the true facts! For a greater impact, consider gifting Fiber Menace to people you love and to people of considerable influence. Amazon will gladly send your gift to any person you choose. When people see those brass screws inside the cereal bowl, they act, because nothing motivates people as much as fear does for their own health and future. And that's how you bring about effective change! Konstantin Monastyrsky
// // $Date: 2008-08-20 16:59:17 +0300 (K, 20 aug 2008) $ $Revision: 599 $ // /* * l�pugruppide sisselugemine, uute tegemine ja faili kirjutamine */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include "../../../lib/etana/post-fsc.h" #include "noomen.h" extern tyvestr tyvi[]; //extern FSXSTRING inbuf; /* toodeldav rida */ extern FSXSTRING sliik; extern int tyybinr; FSWCHAR *lgr[ MAX_LGR ]; /* viidad l�pugruppidele m�lus */ //FSWCHAR lopugr[ sizeof(FSWCHAR) * BUFSIZE ]; /* 1 ahela alusel tehtav l�pugrupp */ void loe_lgr( CPFSFile *lgrf ) { int i; CFSWString inbuf; for (i=0; i < MAX_LGR; i++) lgr[i] = NULL; /* alginitsialiseerimine */ // for (i=1 ;; i++) /* selle tsykli igakordse // * l@bimisega analyysitakse // * yks rida failist LGR */ // { // if( fgets( inbuf, BUFSIZE, lgrf ) == NULL ) // if ( !( feof( lgrf ) ) ) // { // printf( "\nError reading file LGR\n" ); // exit( 1 ); // } // else // break; lgrf->Seek(0L); for (i=1; lgrf->ReadLine(&inbuf)==true; i++) { lgr[i] = (FSWCHAR *)malloc( (inbuf.GetLength()+1)*(sizeof(FSWCHAR)) ); if (!lgr[i]) { printf("\nloe_lgr()-l ei j�tku m�lu lgr[] sisselugemiseks"); exit(1); } PFSStrCpy(lgr[i], inbuf.GetLength()+1, (const FSWCHAR *)inbuf); } } /* * teeb m�lus olevate l�ppude/vormide ahelate alusel LGR-faili * sobivad stringid nr|vorm-l�pp/vorm-l�pp/...; * kirjutab igale m�lus olevale t�vele juurde, mis nr l�pugrupp tal on; * kui sellist l�pugruppi veel LGR-failis ei ole, siis lisab ta sinna */ void lisa_lgr( CPFSFile *lgrf ) { int i,j; // char *k; struct lyli *koht; FSXSTRING lopugr; FSXSTRING tmp; int k; // for (i=0; *(tyvi[i].tyv); i++) /* vt k�iki tyvesid */ for (i=0; tyvi[i].tyv.GetLength() > 0; i++) /* vt k�iki tyvesid */ { lopugr = FSWSTR(""); koht = tyvi[i].yks->next; /* 1. l�li on ainult algoritmi tarvis */ if ( !koht ) /* tyvel pole 1-gi l�ppu */ { if ( tyybinr == 0 && i == 0 ) ; /* ei peagi loppe olema; OK */ else /* see tyvi on siin ilmaasjata */ { // strcpy( tyvi[i].grnr, "*" ); /* et tee_rida() teaks */ tyvi[i].grnr = FSWSTR("*"); continue; } } else { for (;;) /* ahelast k�ik l�pud/vormid lopugr[]-sse */ { /* strcat( lopugr, koht->vorm ); strcat( lopugr, "-" ); strcat( lopugr, koht->lp ); strcat( lopugr, "/" ); */ lopugr += koht->vorm; lopugr += FSWSTR("-"); lopugr += koht->lp; lopugr += FSWSTR("/"); koht = koht->next; if (!koht) break; } } // strcat( lopugr, "\n" ); lopugr += FSWSTR("\n"); #if !defined(NDEBUG) ///unusedint dbtv_lopugr_GetLength=lopugr.GetLength(); #endif if ( lopugr.GetLength() > BUFSIZE ) { printf("\nl�pugrupp liiga suur"); exit(1); } for (j=1; lgr[j]; j++) /* v�rdle lopugr[]-i k�igi l�pugruppidega */ { if (j >= MAX_LGR - 1) { printf("\nlisa_lgr()-s liiga palju lgr[]-sid: %d", j); exit(1); } tmp = lgr[j]; // k = strchr( lgr[j], '|' ); k = tmp.Find(FSWSTR("|")); if (k==-1) { printf("\nlisa_lgr() ei leia %d-nda lgr-i nr", j); exit(1); } // if (!strcmp( k+1, lopugr )) /* on selline lgr */ if (lopugr == (const FSWCHAR *)tmp.Mid(k+1)) { // *k = '\0'; // strcpy( tyvi[i].grnr, lgr[j] ); /* grnr tyvi[]-sse */ // *k = '|'; /* taasta esialgne seis */ tyvi[i].grnr = (const FSWCHAR *)tmp.Left(k); break; } } if (!lgr[j]) /* leidus uus l�pugrupp */ { // lgr[j] = malloc( strlen(lopugr)+6 ); lgr[j] = (FSWCHAR *)malloc( (lopugr.GetLength() + 6) * sizeof(FSWCHAR) ); if (!lgr[j]) { printf("\nloe_lgr()-l ei j�tku m�lu lopugr jaoks"); exit(1); } // itoa( j, lgr[j], 10 ); // strcpy( tyvi[i].grnr, lgr[j] ); /* grnr tyvi[]-sse */ // strcat( lgr[j], "|" ); // strcat( lgr[j], lopugr ); tmp.Format(FSWSTR("%d"), j); tyvi[i].grnr = (const FSWCHAR *)tmp; tmp += FSWSTR("|"); tmp += lopugr; PFSStrCpy(lgr[j], lopugr.GetLength() + 6, (const FSWCHAR *)tmp); // if ( fputs( lgr[j], lgrf) == EOF ) if ((lgrf->WriteString((const FSWCHAR *)tmp, (int)(tmp.GetLength()))) == false) { printf("\ntulemuse kirjut. LGR-faili eba�nnestus"); exit(1); } } } }
#include<stdio.h> int MIN(int a,int b) { return a<b?a:b; } int main() { int s,x1,x2,t1,t2,p,d,ans; scanf("%d %d %d %d %d %d %d",&s,&x1,&x2,&t1,&t2,&p,&d); if(d==1 && x1<x2 && p<=x1) ans = MIN((abs(x2-x1)*t2),(abs(x2-p)*t1)); else if(d==1 && x1<x2 && p>x1) ans = MIN((abs(x2-x1)*t2),((abs(s-p)+s+x2)*t1)); else if(d==-1 && x1<x2) ans = MIN((abs(x2-x1)*t2),((p+x2)*t1)); else if(d==-1 && x1>x2 && p>=x1) ans = MIN((abs(x1-x2)*t2),(abs(p-x2)*t1)); else if(d==-1 && x1>x2 && p<x1) ans = MIN((abs(x1-x2)*t2),((abs(s-x2)+s+p)*t1)); else if(d==1 && x1>x2) ans = MIN((abs(x1-x2)*t2),((abs(s-p)+abs(s-x2))*t1)); printf("%d\n",ans); return 0; }
def full_message(self, course_key): cache_key = f"status_message.{str(course_key)}" if cache.get(cache_key): return cache.get(cache_key) msg = self.message if course_key: try: course_home_message = self.coursemessage_set.get(course_key=course_key) if course_home_message: msg = HTML("{} <br /> {}").format(HTML(msg), HTML(course_home_message.message)) except CourseMessage.DoesNotExist: pass cache.set(cache_key, msg) return msg
/** * Graph stream viewer. * * @author Min Cai */ public class GraphStreamViewer { private ACOHelper acoHelper; private Graph graph; static { System.setProperty("org.graphstream.ui.renderer", "org.graphstream.ui.j2dviewer.J2DGraphRenderer"); } /** * Create a graph stream viewer. * * @param acoHelper the ACO helper */ public GraphStreamViewer(ACOHelper acoHelper) { this.acoHelper = acoHelper; this.graph = new SingleGraph("ACO for TSP"); // this.graph.addAttribute("ui.quality"); // this.graph.addAttribute("ui.antialias"); acoHelper.getNodes().forEach(node -> { Node graphNode = graph.addNode(node.getName()); graphNode.addAttribute("xy", node.getX(), node.getY()); graphNode.addAttribute("label", String.format("%s", node.getName())); }); acoHelper.getEdges().forEach(edge -> { Edge graphEdge = graph.addEdge( edge.getNodeFrom().getName() + "-" + edge.getNodeTo().getName(), edge.getNodeFrom().getName(), edge.getNodeTo().getName()); // graphEdge.addAttribute("label", String.format("%.4f", edge.getPheromone())); // graphEdge.addAttribute("ui.style", "shape: angle;"); // graphEdge.addAttribute("ui.style", "stroke-width: 10;"); // graphEdge.addAttribute("ui.style", "fill-color: white;"); }); graph.addAttribute("ui.stylesheet", "url('src/main/java/archimulator/util/ai/aco/stylesheet.css')"); this.graph.display(false); } public static void main(String[] args) { Graph graph = new MultiGraph("random walk"); Generator gen = new PetersenGraphGenerator(); gen.addSink(graph); gen.begin(); for (int i = 0; i < 400; i++) { gen.nextEvents(); } gen.end(); graph.addAttribute("ui.quality"); graph.addAttribute("ui.antialias"); graph.display(false); } /** * Get the ACO helper. * * @return the ACO helper */ public ACOHelper getAcoHelper() { return acoHelper; } /** * Get the graph. * * @return the graph */ public Graph getGraph() { return graph; } }
def sql_summary(lines, show_times=False, show_details=False, **options): if sqlparse is None: raise Exception("sql_summary() requires the sqlparse package") d = {} for l in lines: l = l.replace('"', '') l = l.replace('`', '') m = re.match(regex, l) if m: g = m.groupdict() entry = Entry(g['sql'], g['time']) k = entry.group_key() if k in d: d[k].collect(entry) else: d[k] = entry else: raise Exception("Invalid line {!r}".format(l)) if d: if show_details: for e in sorted(d.values(), key=lambda x: x.total_time): p(e, **options) print("-------------------") print("The slowest SQL call was:") e = d[max(d, key=lambda x: x.time)] p(e, **options) print("-------------------") else: if show_times: headers = 'total_time count table stmt_type time'.split() values = sorted(d.values(), key=lambda x: -x.total_time) else: headers = 'table stmt_type count'.split() values = sorted(d.values(), key=lambda x: x.table) rows = [] for e in values: rows.append([getattr(e, h) for h in headers]) rows.sort() print(rstgen.table(headers, rows)) else: print("No sql queries found")
/** * Persist recent list asynchronously on a background thread * * @return this recent list */ public RecentRepositories saveAsync() { if (ids != null) new AsyncTask<Void, Void, Void>() { @Override protected Void doInBackground(Void... params) { save(); return null; } }.execute(); return this; }
<gh_stars>1-10 package model // Permission : Permission to assign. type Permission string // List of possible Permission values const ( Permission_GET Permission = "GET" Permission_DELETE Permission = "DELETE" Permission_POST Permission = "POST" Permission_PUT Permission = "PUT" Permission_PATCH Permission = "PATCH" )
def addHelpSection(self, accessLevel : int, sectionName : str): if accessLevel < 0 or accessLevel > self.numAccessLevels - 1: raise IndexError("accessLevel must be at least 0, and less than " + str(self.numAccessLevels)) if sectionName in self.helpSections: raise ValueError("The given section name already exists in this DB '" + sectionName + "'") self.helpSections[accessLevel][sectionName] = [] self.helpSectionEmbeds[accessLevel][sectionName] = [Embed(title="BB " + bbConfig.accessLevelNames[accessLevel] + " Commands", description=bbData.helpIntro + "\n__" + sectionName.title() + "__")] self.helpSectionEmbeds[accessLevel][sectionName][0].set_footer(text="Page 1 of 1") self.totalEmbeds[accessLevel] += 1
// relocateSpec creates a spec for where the new machine will be located. func (c *vsphereClientImpl) relocateSpec(ctx context.Context, s *vsphereSettings) (types.VirtualMachineRelocateSpec, error) { var spec types.VirtualMachineRelocateSpec var morRP types.ManagedObjectReference var morDS types.ManagedObjectReference rp, err := c.Finder.ResourcePool(ctx, s.ResourcePool) if err != nil { err = errors.Wrapf(err, "error finding pool %s", s.ResourcePool) grip.Error(err) return spec, err } morRP = rp.Common.Reference() spec.Pool = &morRP if s.Datastore != "" { ds, err := c.Finder.Datastore(ctx, s.Datastore) if err != nil { err = errors.Wrapf(err, "error finding datastore %s", s.Datastore) grip.Error(err) return spec, err } morDS = ds.Common.Reference() spec.Datastore = &morDS } grip.Info(message.Fields{ "message": "created spec to relocate clone", "resource_pool": morRP, "datastore": morDS, }) return spec, nil }
Below is a recent interview with the Hungarian intelligence analyst László Földi, who has been featured in several videos here in the past. In this clip Mr. Földi and the presenter discuss the shadowy political forces that have generated the current “refugee crisis” in Europe for their own ends. In particular he addresses the important question: “Cui bono?” Many thanks to CrossWare for the translation, and to Vlad Tepes for the subtitling: Transcript: 0:00 The national security expert László Földi is our guest. Good evening. —Good evening. 0:04 Let’s stay with the civilian organizations a bit, because with the 0:08 under-secretary we talked about this previously, that a civilian organization 0:12 could be a state security risk. 0:16 Is it possible to examine such risk with counter-intelligence tools, or 0:20 must Parliament be asked for permission beforehand? 0:24 It is not important whether it is a civilian organization or not, 0:28 this is not how we should phrase this, but everybody who endangers the safety of the state 0:32 that actually commits such an act. 0:36 So what needs to be measured, 0:40 this is done individually, or with a group, 0:44 or in as a civil organization, just a form, yes or no, 0:48 the authorities must decide that what it does 0:52 violates the national security of the state, the activity 0:56 of the organization. In that meaning, yes, 1:00 it is possible to disband any groups, no matter what was their original purpose, 1:04 or whatever ideology they try to align themselves with. 1:08 This example from Florence we were talking about, could it be included here? 1:12 Or we can say only hot-headed young people invented a cause for themselves, 1:16 and they represent that cause no matter what? 1:20 The situation is: the laws must be obeyed. 1:24 That is the basics. Whoever does not comply with the law is committing a crime. 1:28 It does not matter if they are older, or doing it 1:32 out of youthful vigor. If we examine the migration, 1:36 the issue is not how much the migrants want integration, 1:40 but the fact they do not obey laws. 1:44 While they are escaping and running away [from their homelands], I can accept 1:48 that there is no law there to obey, but 1:52 they travel through countries until they reach their destination, 1:56 where they are not in danger at all. 2:00 From that point they have only one task: 2:04 Obey the law. If they do not do that, they are criminals. 2:08 Would you confirm the V4 [Visegrad Four] point of view, 2:12 that we must provide or strengthen the security 2:16 of the external borders of the European Union? 2:20 I think that is a very basic question, in the EU Charter, 2:24 all kind of documents like the NATO Charter or the EU defense document, 2:28 contains such things, that Europe must be defended if anyone 2:32 violates that order, or the sovereignty of the states is violated. 2:36 These are now interesting-sounding sentences, 2:40 and this Australian general who gave this interview, 2:44 he does not really understand why the EU — Jim Molan right? — Yes. 2:48 So he does not understand why we do not solve this problem, 2:52 because it would be very easy to handle this professionally, and he explains it there: 2:56 we must not let the ships in; they must be returned, etc. etc. etc. Listing it. 3:00 One thing he does not take into consideration: he is a soldier, a professional, 3:04 an officer. In Europe the problem is that we could not defend our borders, 3:08 or the European officers could not implement the same logic, 3:12 or follow the same path, but the fact is, in Europe there are such political forces — 3:16 not just in Europe — that created this whole situation itself. 3:20 Australia did not invite anybody there; no political force did such a thing. 3:24 Europe is not playing defense, pulling the short stick, 3:28 but just the opposite: they generated the whole invasion. 3:32 Whose interest is it, to do such a thing? 3:36 Well, this got pointed out at many levels. There are economic groups 3:40 in the background that hope to gain from the migration industry, 3:44 such political forces in the background 3:48 that hope these masses will become their voter base, 3:52 and they will support them, and there are groups in Europe who simply 3:56 wants to convert the continent into a watered-down 4:00 multicultural system opposed against the nation-states. 4:04 These groups really created this invasion, 4:08 they really messed up these poor people here, 4:12 who very often become criminals or will at least be frustrated. 4:16 Jim Molan is that man, who… 4:20 General Jim Molan, who gave the interview to Politico talking about the Australian 4:24 Situation. He said something very interesting. He said 4:28 we should ruin the human smuggler’s business model. 4:32 Those people come here 4:36 and paid the 10-20 thousand euros or dollars. 4:40 Take them back where they came from and they would figure out instantly, 4:44 Oops, they lost 20,000 dollars or euros. 4:48 They would not jump into a deal like that again, and they could tell their friends, too. 4:52 Yes, but I’d like to emphasize the human smugglers just built on something; they didn’t start it, 4:56 So the problem is really this, and the human smugglers… 5:00 Could it be that the business groups and human smugglers have a connection? 5:04 Yes, that is possible. They could have a business relationship between them; 5:08 this is a multi-billion-euro business, and the profit is going somewhere, 5:12 not just to criminals but others, especially in a system 5:16 that was perfectly built for them. Let’s not kid around, 5:20 a gigantic logistical system is required to move such large masses, 5:24 across many borders and countries. By itself, 5:28 this could not spontaneously come into existence at any level. 5:32 The human smugglers are simply tools in the hands of 5:36 the system, which wants to “remake” Europe in this way. 5:40 So not only a political correlation exists here, 5:44 but also an economic one; so politicians and human traffickers are working together? 5:48 Yes those are facts, already well-established facts. 5:52 Because there some political forces in Europe who 5:56 themselves have become part of the migrant industry, 6:00 and participate in the profit-gathering. 6:04 Now you have published a book about the migration crisis. What is your 6:08 Expectation for the near future? Let’s take a one-year period, 6:12 and after a five-year interval, what will change in Europe, 6:16 and what will change in the migration crisis? 6:19 In one year, nothing. It will be interesting how the 2017 6:22 European elections… parliamentary elections turn out. 6:26 That will determine the next five years. 6:30 That is true. What is the event 6:34 that could turn around the outcome of the crisis? 6:38 Those who are thinking about a minimal defense system, 6:42 — it is enough, the minimal defense? Yes, it’s enough 6:46 to stop the process. It is not enough 6:50 to restore order out of the anarchy that we have now; 6:54 that will be a much longer and more arduous process. 6:58 We would need very hard-hearted politicians to take on these issues, 7:02 to work with the problems, the way the Australians are doing it. 7:06 In Australia the politicians are not worse or more hard-core than anywhere else, but 7:10 they defend the interests of their country and their people. 7:14 This is about political correctness (PC) in Europe? Yes, a significant portion
def create_random_bytes( min_length: Optional[int] = None, max_length: Optional[int] = None, lower_case: bool = False ) -> bytes: if min_length is None: min_length = 0 if max_length is None: max_length = min_length + 1 * 2 length = random.randint(min_length, max_length) result = hexlify(urandom(length)) if lower_case: result = result.lower() if max_length and len(result) > max_length: end = random.randint(min_length or 0, max_length) return result[0:end] return result
/** * Returns a flattened JSON string. * * @return a flattened JSON string */ public String flatten() { flattenAsMap(); if (source.isObject() || isObjectifiableArray()) return flattenedMap.toString(printMode); else return javaObj2Json(flattenedMap.get(ROOT)); }
// class union advanced.union1(advanced.a) advanced.union1(advanced.b) // primative union advanced.union2(123) advanced.union2("foo")
I think it's safe to say that we as Cape Bretoners are not Hollywood types. As I walk down Charlotte Street on my way to work, past the parade of flannel and baseball caps, "glamorous" just isn't the word that comes to mind. Combined with a near obsessive compulsion to be ever-humble, the limelight does little more to a Cape Bretoner than make us uncomfortably warm. Also, at some point in history, humanity banded together and decided actors and musicians were the professions most worthy of our attention, relegating the important jobs of mining and fishing to the cultural afterthought of "I'm sure glad they exist!" Yet even with our penchant for humility and professional obscurity, a select few of us have found ways to make lasting impacts far beyond the shores of our little island. Whether it was for battling a smallpox epidemic or fighting Germans in European trenches, the following Cape Bretoners went above and beyond their duty as mere humans, and became... Episode 1: The Man who Replaced his Hands with Oscars Harold Russell won two more Oscars than Leonardo DiCaprio, was one of only two non-professional actors to win an Academy Award, and is the only person to ever win two Oscars for the same role. Also, Harold had no hands. Yeah, actually it was a bit more impressive than that. Even though Harold Russell was born in North Sydney in 1914, it's surprising to me he isn't a household name around our island. A big reason for that is, like me, he moved to America at a young age, and it was in America that Captain Russell earned his unexpected fame and accolades, but let's not hold that against him. At the ripe ole age of 7, Harold and his family moved to Massachusetts after his father's death, where Harold took to being American like the English take to drinking tea. It was this fervent sense of national pride that led Harold to an Army recruitment office the day after the Japanese attack on Pearl Harbor. Unfortunately for him (but fortunately for us), Harold never saw the Pacific front. As an Army instructor in North Carolina, Sergeant Russell was filming a training video in which he had to handle explosives. During the filming, two mistakes were made. 1. The explosive was needlessly and dangerously live, and 2. the fuse on the live explosive was defective. Said explosive ever-so-rudely blew both Harold's hands clean off. Records regarding the incident are sparse, but the Army did conclude that the experience hurt really, really bad. Sergeant Russell was given two hooks and scratched fighting the Japanese off his bucketlist. He would have made an incredibly charismatic pirate, don't you think? Still holding the raw magnetism to be in front of the camera (without the ability to hold much else), Harold was cast in The Best Years of Our Lives, and played a Navy sailor who lost both hands during the war. A role Russell didn't really need to reach too terribly far for. Now, the more astute readers amongst you might be thinking "Rory! It's theoretically impossible to win two Oscars for the same role!" And you would be 100% right. Good job! Giving Mr. Russell two Academy Awards was actually a huge, pretentious mistake (marking the first and last time Hollywood was ever pretentious). The Academy, thinking Russell had little chance of winning an Oscar against "real, professional actors," gave him an Honorary Award for his service to his fellow veterans. This was about an hour before Harold won an actual Best Supporting Actor for his role, and made the Academy look silly. Knowing that taking an honorary Oscar away from a handless veteran was a PR nightmare waiting to happen, The Academy let Russell keep both, making it the only time in Oscar history any person was awarded two Oscars for the same performance. Proof that Harold Russell enjoyed winning Oscars and making Hollywood types look dumb at the same time. Harold Russell walked away from acting after winning twice as many Oscars as Al Pacino, because there "weren't many roles for actors without hands." This remains the most sensible acting career decision in Hollywood history. I would like to think Harold made custom mounts to attach his awards to his wrists, and called himself "Johnny Oscarhands," but in reality he spent the rest of his life fighting for handicapped rights and writing to Presidents. Harold Russell is unique as the only Cape Bretoner to ever win an Oscar, not to mention the only person in history to win 2 for the same role. And as a fellow Caper, I'm proud he did it in the weirdest way possible. Next Episode: The Man who Enjoyed World War 1 a bit too much....
// Copyright 1998-2016 Epic Games, Inc. All Rights Reserved. #include "BlueprintCompilerCppBackendModulePrivatePCH.h" #include "BlueprintCompilerCppBackendUtils.h" #include "Editor/UnrealEd/Public/Kismet2/StructureEditorUtils.h" #include "Engine/InheritableComponentHandler.h" #include "Engine/DynamicBlueprintBinding.h" void FEmitDefaultValueHelper::OuterGenerate(FEmitterLocalContext& Context , const UProperty* Property , const FString& OuterPath , const uint8* DataContainer , const uint8* OptionalDefaultDataContainer , EPropertyAccessOperator AccessOperator , bool bAllowProtected) { // Determine if the given property contains an instanced default subobject reference. We only get here if the values are not identical. auto IsInstancedSubobjectLambda = [&](int32 ArrayIndex) -> bool { if (auto ObjectProperty = Cast<UObjectProperty>(Property)) { check(DataContainer); check(OptionalDefaultDataContainer); auto ObjectPropertyValue = ObjectProperty->GetObjectPropertyValue_InContainer(DataContainer, ArrayIndex); auto DefaultObjectPropertyValue = ObjectProperty->GetObjectPropertyValue_InContainer(OptionalDefaultDataContainer, ArrayIndex); if (ObjectPropertyValue && ObjectPropertyValue->IsDefaultSubobject() && DefaultObjectPropertyValue && DefaultObjectPropertyValue->IsDefaultSubobject() && ObjectPropertyValue->GetFName() == DefaultObjectPropertyValue->GetFName()) { return true; } } return false; }; if (Property->HasAnyPropertyFlags(CPF_EditorOnly | CPF_Transient)) { UE_LOG(LogK2Compiler, Verbose, TEXT("FEmitDefaultValueHelper Skip EditorOnly or Transient property: %s"), *Property->GetPathName()); return; } for (int32 ArrayIndex = 0; ArrayIndex < Property->ArrayDim; ++ArrayIndex) { if (!OptionalDefaultDataContainer || (!Property->Identical_InContainer(DataContainer, OptionalDefaultDataContainer, ArrayIndex) && !IsInstancedSubobjectLambda(ArrayIndex))) { FString PathToMember; auto PropertyOwnerAsBPGC = Cast<UBlueprintGeneratedClass>(Property->GetOwnerClass()); if (PropertyOwnerAsBPGC && !Context.Dependencies.WillClassBeConverted(PropertyOwnerAsBPGC)) { ensure(EPropertyAccessOperator::None != AccessOperator); const FString OperatorStr = (EPropertyAccessOperator::Dot == AccessOperator) ? TEXT("&") : TEXT(""); const FString ContainerStr = (EPropertyAccessOperator::None == AccessOperator) ? TEXT("this") : FString::Printf(TEXT("%s(%s)"), *OperatorStr, *OuterPath); PathToMember = FString::Printf(TEXT("FUnconvertedWrapper__%s(%s).GetRef__%s()"), *FEmitHelper::GetCppName(PropertyOwnerAsBPGC), *ContainerStr , *UnicodeToCPPIdentifier(Property->GetName(), false, nullptr)); } else if (Property->HasAnyPropertyFlags(CPF_NativeAccessSpecifierPrivate) || (!bAllowProtected && Property->HasAnyPropertyFlags(CPF_NativeAccessSpecifierProtected))) { ensure(EPropertyAccessOperator::None != AccessOperator); const FString OperatorStr = (EPropertyAccessOperator::Dot == AccessOperator) ? TEXT("&") : TEXT(""); const FString ContainerStr = (EPropertyAccessOperator::None == AccessOperator) ? TEXT("this") : OuterPath; const FString StaticArrayIndexArg = FString::Printf(TEXT(", %d"), ArrayIndex); const FString GetPtrStr = FEmitHelper::AccessInaccessibleProperty(Context, Property, ContainerStr, OperatorStr, StaticArrayIndexArg); PathToMember = Context.GenerateUniqueLocalName(); Context.AddLine(FString::Printf(TEXT("auto& %s = %s;"), *PathToMember, *GetPtrStr)); } else { const FString AccessOperatorStr = (EPropertyAccessOperator::None == AccessOperator) ? TEXT("") : ((EPropertyAccessOperator::Pointer == AccessOperator) ? TEXT("->") : TEXT(".")); const bool bStaticArray = (Property->ArrayDim > 1); const FString ArrayPost = bStaticArray ? FString::Printf(TEXT("[%d]"), ArrayIndex) : TEXT(""); PathToMember = FString::Printf(TEXT("%s%s%s%s"), *OuterPath, *AccessOperatorStr, *FEmitHelper::GetCppName(Property), *ArrayPost); } const uint8* ValuePtr = Property->ContainerPtrToValuePtr<uint8>(DataContainer, ArrayIndex); const uint8* DefaultValuePtr = OptionalDefaultDataContainer ? Property->ContainerPtrToValuePtr<uint8>(OptionalDefaultDataContainer, ArrayIndex) : nullptr; InnerGenerate(Context, Property, PathToMember, ValuePtr, DefaultValuePtr); } } } void FEmitDefaultValueHelper::GenerateGetDefaultValue(const UUserDefinedStruct* Struct, FEmitterLocalContext& Context) { check(Struct); const FString StructName = FEmitHelper::GetCppName(Struct); Context.Header.AddLine(FString::Printf(TEXT("static %s GetDefaultValue()"), *StructName)); Context.Header.AddLine(TEXT("{")); Context.Header.IncreaseIndent(); Context.Header.AddLine(FString::Printf(TEXT("%s DefaultData__;"), *StructName)); { TGuardValue<FCodeText*> OriginalDefaultTarget(Context.DefaultTarget, &Context.Header); FStructOnScope StructData(Struct); FStructureEditorUtils::Fill_MakeStructureDefaultValue(Struct, StructData.GetStructMemory()); for (auto Property : TFieldRange<const UProperty>(Struct)) { OuterGenerate(Context, Property, TEXT("DefaultData__"), StructData.GetStructMemory(), nullptr, EPropertyAccessOperator::Dot); } } Context.Header.AddLine(TEXT("return DefaultData__;")); Context.Header.DecreaseIndent(); Context.Header.AddLine(TEXT("}")); } void FEmitDefaultValueHelper::InnerGenerate(FEmitterLocalContext& Context, const UProperty* Property, const FString& PathToMember, const uint8* ValuePtr, const uint8* DefaultValuePtr, bool bWithoutFirstConstructionLine) { auto OneLineConstruction = [](FEmitterLocalContext& LocalContext, const UProperty* LocalProperty, const uint8* LocalValuePtr, FString& OutSingleLine, bool bGenerateEmptyStructConstructor) -> bool { bool bComplete = true; FString ValueStr = HandleSpecialTypes(LocalContext, LocalProperty, LocalValuePtr); if (ValueStr.IsEmpty()) { ValueStr = LocalContext.ExportTextItem(LocalProperty, LocalValuePtr); auto StructProperty = Cast<const UStructProperty>(LocalProperty); if (ValueStr.IsEmpty() && StructProperty) { check(StructProperty->Struct); if (bGenerateEmptyStructConstructor) { ValueStr = FString::Printf(TEXT("%s{}"), *FEmitHelper::GetCppName(StructProperty->Struct)); //don;t override existing values } bComplete = false; } else if (ValueStr.IsEmpty()) { UE_LOG(LogK2Compiler, Error, TEXT("FEmitDefaultValueHelper Cannot generate initilization: %s"), *LocalProperty->GetPathName()); } } OutSingleLine += ValueStr; return bComplete; }; auto StructProperty = Cast<const UStructProperty>(Property); check(!StructProperty || StructProperty->Struct); auto ArrayProperty = Cast<const UArrayProperty>(Property); check(!ArrayProperty || ArrayProperty->Inner); if (!bWithoutFirstConstructionLine) { FString ValueStr; const bool bComplete = OneLineConstruction(Context, Property, ValuePtr, ValueStr, false); if (!ValueStr.IsEmpty()) { Context.AddLine(FString::Printf(TEXT("%s = %s;"), *PathToMember, *ValueStr)); } // array initialization "array_var = TArray<..>()" is complete, but it still needs items. if (bComplete && !ArrayProperty) { return; } } if (StructProperty) { for (auto LocalProperty : TFieldRange<const UProperty>(StructProperty->Struct)) { OuterGenerate(Context, LocalProperty, PathToMember, ValuePtr, DefaultValuePtr, EPropertyAccessOperator::Dot); } } if (ArrayProperty) { FScriptArrayHelper ScriptArrayHelper(ArrayProperty, ValuePtr); for (int32 Index = 0; Index < ScriptArrayHelper.Num(); ++Index) { const uint8* LocalValuePtr = ScriptArrayHelper.GetRawPtr(Index); FString ValueStr; const bool bComplete = OneLineConstruction(Context, ArrayProperty->Inner, LocalValuePtr, ValueStr, true); Context.AddLine(FString::Printf(TEXT("%s.Add(%s);"), *PathToMember, *ValueStr)); if (!bComplete) { const FString LocalPathToMember = FString::Printf(TEXT("%s[%d]"), *PathToMember, Index); // There is no point in doing diff with a "clean" struct, since we don't know what is really set by the native constructor. InnerGenerate(Context, ArrayProperty->Inner, LocalPathToMember, LocalValuePtr, nullptr, true); } } } } FString FEmitDefaultValueHelper::HandleSpecialTypes(FEmitterLocalContext& Context, const UProperty* Property, const uint8* ValuePtr) { //TODO: Use Path maps for Objects if (auto ObjectProperty = Cast<UObjectProperty>(Property)) { UObject* Object = ObjectProperty->GetPropertyValue(ValuePtr); if (Object) { { UClass* ObjectClassToUse = Context.GetFirstNativeOrConvertedClass(ObjectProperty->PropertyClass); const FString MappedObject = Context.FindGloballyMappedObject(Object, ObjectClassToUse); if (!MappedObject.IsEmpty()) { return MappedObject; } } const bool bCreatingSubObjectsOfClass = (Context.CurrentCodeType == FEmitterLocalContext::EGeneratedCodeType::SubobjectsOfClass); { auto BPGC = Context.GetCurrentlyGeneratedClass(); auto CDO = BPGC ? BPGC->GetDefaultObject(false) : nullptr; if (BPGC && Object && CDO && Object->IsIn(BPGC) && !Object->IsIn(CDO) && bCreatingSubObjectsOfClass) { return HandleClassSubobject(Context, Object, FEmitterLocalContext::EClassSubobjectList::MiscConvertedSubobjects, true, true); } } if (!bCreatingSubObjectsOfClass && Property->HasAnyPropertyFlags(CPF_InstancedReference)) { const FString CreateAsInstancedSubobject = HandleInstancedSubobject(Context, Object, Object->HasAnyFlags(RF_ArchetypeObject)); if (!CreateAsInstancedSubobject.IsEmpty()) { return CreateAsInstancedSubobject; } } } else if (ObjectProperty->HasMetaData(FBlueprintMetadata::MD_LatentCallbackTarget)) { return TEXT("this"); } } if (auto StructProperty = Cast<UStructProperty>(Property)) { if (TBaseStructure<FTransform>::Get() == StructProperty->Struct) { check(ValuePtr); const FTransform* Transform = reinterpret_cast<const FTransform*>(ValuePtr); const auto Rotation = Transform->GetRotation(); const auto Translation = Transform->GetTranslation(); const auto Scale = Transform->GetScale3D(); return FString::Printf(TEXT("FTransform(FQuat(%f, %f, %f, %f), FVector(%f, %f, %f), FVector(%f, %f, %f))") , Rotation.X, Rotation.Y, Rotation.Z, Rotation.W , Translation.X, Translation.Y, Translation.Z , Scale.X, Scale.Y, Scale.Z); } if (TBaseStructure<FVector>::Get() == StructProperty->Struct) { const FVector* Vector = reinterpret_cast<const FVector*>(ValuePtr); return FString::Printf(TEXT("FVector(%f, %f, %f)"), Vector->X, Vector->Y, Vector->Z); } if (TBaseStructure<FGuid>::Get() == StructProperty->Struct) { const FGuid* Guid = reinterpret_cast<const FGuid*>(ValuePtr); return FString::Printf(TEXT("FGuid(0x%08X, 0x%08X, 0x%08X, 0x%08X)"), Guid->A, Guid->B, Guid->C, Guid->D); } } return FString(); } struct FNonativeComponentData { FString NativeVariablePropertyName; UActorComponent* ComponentTemplate; UObject* ObjectToCompare; //// FString ParentVariableName; bool bSetNativeCreationMethod; FNonativeComponentData() : ComponentTemplate(nullptr) , ObjectToCompare(nullptr) , bSetNativeCreationMethod(false) { } void EmitProperties(FEmitterLocalContext& Context) { ensure(!NativeVariablePropertyName.IsEmpty()); if (bSetNativeCreationMethod) { Context.AddLine(FString::Printf(TEXT("%s->CreationMethod = EComponentCreationMethod::Native;"), *NativeVariablePropertyName)); } if (!ParentVariableName.IsEmpty()) { Context.AddLine(FString::Printf(TEXT("%s->AttachParent = %s;"), *NativeVariablePropertyName, *ParentVariableName)); } UClass* ComponentClass = ComponentTemplate->GetClass(); for (auto Property : TFieldRange<const UProperty>(ComponentClass)) { FEmitDefaultValueHelper::OuterGenerate(Context, Property, NativeVariablePropertyName , reinterpret_cast<const uint8*>(ComponentTemplate) , reinterpret_cast<const uint8*>(ObjectToCompare) , FEmitDefaultValueHelper::EPropertyAccessOperator::Pointer); } } }; FString FEmitDefaultValueHelper::HandleNonNativeComponent(FEmitterLocalContext& Context, const USCS_Node* Node, TSet<const UProperty*>& OutHandledProperties, TArray<FString>& NativeCreatedComponentProperties, const USCS_Node* ParentNode, TArray<FNonativeComponentData>& ComponenntsToInit) { check(Node); check(Context.CurrentCodeType == FEmitterLocalContext::EGeneratedCodeType::CommonConstructor); FString NativeVariablePropertyName; UBlueprintGeneratedClass* BPGC = CastChecked<UBlueprintGeneratedClass>(Context.GetCurrentlyGeneratedClass()); if (UActorComponent* ComponentTemplate = Node->GetActualComponentTemplate(BPGC)) { const FString VariableCleanName = Node->VariableName.ToString(); const UObjectProperty* VariableProperty = FindField<UObjectProperty>(BPGC, *VariableCleanName); if (VariableProperty) { NativeVariablePropertyName = FEmitHelper::GetCppName(VariableProperty); OutHandledProperties.Add(VariableProperty); } else { NativeVariablePropertyName = VariableCleanName; } Context.AddCommonSubObject_InConstructor(ComponentTemplate, NativeVariablePropertyName); if (ComponentTemplate->GetOuter() == BPGC) { FNonativeComponentData NonativeComponentData; NonativeComponentData.NativeVariablePropertyName = NativeVariablePropertyName; NonativeComponentData.ComponentTemplate = ComponentTemplate; UClass* ComponentClass = ComponentTemplate->GetClass(); check(ComponentClass != nullptr); UObject* ObjectToCompare = ComponentClass->GetDefaultObject(false); if (ComponentTemplate->HasAnyFlags(RF_InheritableComponentTemplate)) { ObjectToCompare = Node->GetActualComponentTemplate(Cast<UBlueprintGeneratedClass>(BPGC->GetSuperClass())); } else { Context.AddLine(FString::Printf(TEXT("%s%s = CreateDefaultSubobject<%s>(TEXT(\"%s\"));") , (VariableProperty == nullptr) ? TEXT("auto ") : TEXT("") , *NativeVariablePropertyName , *FEmitHelper::GetCppName(ComponentClass) , *VariableCleanName)); NonativeComponentData.bSetNativeCreationMethod = true; NativeCreatedComponentProperties.Add(NativeVariablePropertyName); FString ParentVariableName; if (ParentNode) { const FString CleanParentVariableName = ParentNode->VariableName.ToString(); const UObjectProperty* ParentVariableProperty = FindField<UObjectProperty>(BPGC, *CleanParentVariableName); ParentVariableName = ParentVariableProperty ? FEmitHelper::GetCppName(ParentVariableProperty) : CleanParentVariableName; } else if (USceneComponent* ParentComponentTemplate = Node->GetParentComponentTemplate(CastChecked<UBlueprint>(BPGC->ClassGeneratedBy))) { ParentVariableName = Context.FindGloballyMappedObject(ParentComponentTemplate, USceneComponent::StaticClass()); } NonativeComponentData.ParentVariableName = ParentVariableName; } NonativeComponentData.ObjectToCompare = ObjectToCompare; ComponenntsToInit.Add(NonativeComponentData); } } // Recursively handle child nodes. for (auto ChildNode : Node->ChildNodes) { HandleNonNativeComponent(Context, ChildNode, OutHandledProperties, NativeCreatedComponentProperties, Node, ComponenntsToInit); } return NativeVariablePropertyName; } struct FDependenciesHelper { private: // Keep sync with FTypeSingletonCache::GenerateSingletonName static FString GenerateZConstructor(UField* Item) { FString Result; if (!ensure(Item)) { return Result; } for (UObject* Outer = Item; Outer; Outer = Outer->GetOuter()) { if (!Result.IsEmpty()) { Result = TEXT("_") + Result; } if (Cast<UClass>(Outer) || Cast<UScriptStruct>(Outer)) { FString OuterName = FEmitHelper::GetCppName(CastChecked<UField>(Outer), true); Result = OuterName + Result; // Structs can also have UPackage outer. if (Cast<UClass>(Outer) || Cast<UPackage>(Outer->GetOuter())) { break; } } else { Result = Outer->GetName() + Result; } } // Can't use long package names in function names. if (Result.StartsWith(TEXT("/Script/"), ESearchCase::CaseSensitive)) { Result = FPackageName::GetShortName(Result); } const FString ClassString = Item->IsA<UClass>() ? TEXT("UClass") : TEXT("UScriptStruct"); const FString PostFix = Item->IsA<UClass>() ? TEXT("_NoRegister") : TEXT(""); return FString(TEXT("Z_Construct_")) + ClassString + TEXT("_") + Result + PostFix + TEXT("()"); } public: static void AddDependenciesInConstructor(FEmitterLocalContext& Context) { const bool bUseZConstructorInGeneratedCode = true; if (Context.Dependencies.ConvertedClasses.Num()) { Context.AddLine(TEXT("// List of all referenced converted classes")); } for (auto LocStruct : Context.Dependencies.ConvertedClasses) { FString ClassConstructor; if (bUseZConstructorInGeneratedCode) { ClassConstructor = GenerateZConstructor(Context.Dependencies.FindOriginalClass(LocStruct)); Context.AddLine(FString::Printf(TEXT("extern UClass* %s;"), *ClassConstructor)); } else { ClassConstructor = FEmitHelper::GetCppName(LocStruct, true) + TEXT("::StaticClass()"); } Context.AddLine(FString::Printf(TEXT("CastChecked<UDynamicClass>(GetClass())->ReferencedConvertedFields.Add(%s);"), *ClassConstructor)); } if (Context.Dependencies.ConvertedStructs.Num()) { Context.AddLine(TEXT("// List of all referenced converted structures")); } for (auto LocStruct : Context.Dependencies.ConvertedStructs) { FString StructConstructor; if (bUseZConstructorInGeneratedCode) { StructConstructor = GenerateZConstructor(LocStruct); Context.AddLine(FString::Printf(TEXT("extern UScriptStruct* %s;"), *StructConstructor)); } else { StructConstructor = FEmitHelper::GetCppName(LocStruct, true) + TEXT("::StaticStruct()"); } Context.AddLine(FString::Printf(TEXT("CastChecked<UDynamicClass>(GetClass())->ReferencedConvertedFields.Add(%s);"), *StructConstructor)); } if (Context.Dependencies.Assets.Num()) { Context.AddLine(TEXT("// List of all referenced assets")); } for (auto LocAsset : Context.Dependencies.Assets) { const FString AssetStr = Context.FindGloballyMappedObject(LocAsset, UObject::StaticClass(), true, false); Context.AddLine(FString::Printf(TEXT("CastChecked<UDynamicClass>(GetClass())->UsedAssets.Add(%s);"), *AssetStr)); } } static void AddStaticFunctionsForDependencies(FEmitterLocalContext& Context) { auto SourceClass = Context.GetCurrentlyGeneratedClass(); auto OriginalClass = Context.Dependencies.FindOriginalClass(SourceClass); const FString CppClassName = FEmitHelper::GetCppName(OriginalClass); // __StaticDependenciesAssets Context.AddLine(FString::Printf(TEXT("void %s::__StaticDependenciesAssets(TArray<FBlueprintDependencyData>& AssetsToLoad)"), *CppClassName)); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); for (UObject* LocAsset : Context.Dependencies.Assets) { auto GetConvertedClass = [&](const UClass* AssetType) -> const UClass* { const UBlueprintGeneratedClass* BPGC = Cast<const UBlueprintGeneratedClass>(AssetType); if (AssetType->IsChildOf<UUserDefinedEnum>()) { return UEnum::StaticClass(); } else if (AssetType->IsChildOf<UUserDefinedStruct>()) { return UScriptStruct::StaticClass(); } else if (BPGC && Context.Dependencies.WillClassBeConverted(BPGC)) { return UDynamicClass::StaticClass(); } return AssetType; }; const UClass* ClassToUse = GetConvertedClass(LocAsset->GetClass()); Context.AddLine(FString::Printf(TEXT("AssetsToLoad.Add({FName(TEXT(\"%s\")), FName(TEXT(\"%s\")), FName(TEXT(\"%s\")), FName(TEXT(\"%s\"))});") , *LocAsset->GetOutermost()->GetPathName() , *LocAsset->GetName() , *ClassToUse->GetOutermost()->GetPathName() , *ClassToUse->GetName())); } Context.DecreaseIndent(); Context.AddLine(TEXT("}")); // Register Helper Struct const FString RegisterHelperName = FString::Printf(TEXT("FRegisterHelper__%s"), *CppClassName); Context.AddLine(FString::Printf(TEXT("struct %s"), *RegisterHelperName)); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); Context.AddLine(FString::Printf(TEXT("%s()"), *RegisterHelperName)); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); Context.AddLine(FString::Printf( TEXT("FConvertedBlueprintsDependencies::Get().RegisterClass(TEXT(\"%s\"), &%s::__StaticDependenciesAssets);") , *OriginalClass->GetOutermost()->GetPathName() , *CppClassName)); Context.DecreaseIndent(); Context.AddLine(TEXT("}")); Context.AddLine(FString::Printf(TEXT("static %s Instance;"), *RegisterHelperName)); Context.DecreaseIndent(); Context.AddLine(TEXT("};")); Context.AddLine(FString::Printf(TEXT("%s %s::Instance;"), *RegisterHelperName, *RegisterHelperName)); } }; void FEmitDefaultValueHelper::GenerateConstructor(FEmitterLocalContext& Context) { auto BPGC = CastChecked<UBlueprintGeneratedClass>(Context.GetCurrentlyGeneratedClass()); const FString CppClassName = FEmitHelper::GetCppName(BPGC); UClass* SuperClass = BPGC->GetSuperClass(); const bool bSuperHasOnlyDefaultConstructor = SuperClass && SuperClass->HasMetaData(TEXT("OnlyDefaultConstructorDeclared")); Context.AddLine(FString::Printf(TEXT("%s::%s(const FObjectInitializer& ObjectInitializer) : Super(%s)") , *CppClassName , *CppClassName , bSuperHasOnlyDefaultConstructor ? TEXT("") : TEXT("ObjectInitializer"))); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); // When CDO is created create all subobjects owned by the class { TArray<UActorComponent*> ActorComponentTempatesOwnedByClass = BPGC->ComponentTemplates; // Gather all CT from SCS and IH, the remaining ones are generated for class.. if (auto SCS = BPGC->SimpleConstructionScript) { for (auto Node : SCS->GetAllNodes()) { ActorComponentTempatesOwnedByClass.RemoveSwap(Node->ComponentTemplate); } } if (auto IH = BPGC->GetInheritableComponentHandler()) { TArray<UActorComponent*> AllTemplates; IH->GetAllTemplates(AllTemplates); ActorComponentTempatesOwnedByClass.RemoveAllSwap([&](UActorComponent* Component) -> bool { return AllTemplates.Contains(Component); }); } Context.AddLine(FString::Printf(TEXT("if(HasAnyFlags(RF_ClassDefaultObject) && (%s::StaticClass() == GetClass()))"), *CppClassName)); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); Context.AddLine(TEXT("ensure(0 == CastChecked<UDynamicClass>(GetClass())->MiscConvertedSubobjects.Num());")); Context.AddLine(TEXT("ensure(0 == CastChecked<UDynamicClass>(GetClass())->ReferencedConvertedFields.Num());")); Context.AddLine(TEXT("ensure(0 == CastChecked<UDynamicClass>(GetClass())->UsedAssets.Num());")); Context.AddLine(TEXT("ensure(0 == CastChecked<UDynamicClass>(GetClass())->DynamicBindingObjects.Num());")); Context.AddLine(TEXT("ensure(0 == CastChecked<UDynamicClass>(GetClass())->ComponentTemplates.Num());")); Context.AddLine(TEXT("ensure(0 == CastChecked<UDynamicClass>(GetClass())->Timelines.Num());")); Context.AddLine(TEXT("ensure(nullptr == CastChecked<UDynamicClass>(GetClass())->AnimClassImplementation);")); Context.CurrentCodeType = FEmitterLocalContext::EGeneratedCodeType::SubobjectsOfClass; FDependenciesHelper::AddDependenciesInConstructor(Context); auto CreateAndInitializeClassSubobjects = [&](bool bCreate, bool bInitilize) { for (auto ComponentTemplate : ActorComponentTempatesOwnedByClass) { if (ComponentTemplate) { HandleClassSubobject(Context, ComponentTemplate, FEmitterLocalContext::EClassSubobjectList::ComponentTemplates, bCreate, bInitilize); } } for (auto TimelineTemplate : BPGC->Timelines) { if (TimelineTemplate) { HandleClassSubobject(Context, TimelineTemplate, FEmitterLocalContext::EClassSubobjectList::Timelines, bCreate, bInitilize); } } for (auto DynamicBindingObject : BPGC->DynamicBindingObjects) { if (DynamicBindingObject) { HandleClassSubobject(Context, DynamicBindingObject, FEmitterLocalContext::EClassSubobjectList::DynamicBindingObjects, bCreate, bInitilize); } } FBackendHelperUMG::CreateClassSubobjects(Context, bCreate, bInitilize); }; CreateAndInitializeClassSubobjects(true, false); CreateAndInitializeClassSubobjects(false, true); FBackendHelperAnim::CreateAnimClassData(Context); Context.DecreaseIndent(); Context.AddLine(TEXT("}")); } // Components that must be fixed after serialization TArray<FString> NativeCreatedComponentProperties; { Context.CurrentCodeType = FEmitterLocalContext::EGeneratedCodeType::CommonConstructor; // Let's have an easy access to generated class subobjects Context.AddLine(TEXT("{")); // no shadow variables Context.IncreaseIndent(); UObject* CDO = BPGC->GetDefaultObject(false); UObject* ParentCDO = BPGC->GetSuperClass()->GetDefaultObject(false); check(CDO && ParentCDO); Context.AddLine(TEXT("")); FString NativeRootComponentFallback; TSet<const UProperty*> HandledProperties; // Generate ctor init code for native class default subobjects that are always instanced (e.g. components). // @TODO (pkavan) - We can probably make this faster by generating code to index through the DSO array instead (i.e. in place of HandleInstancedSubobject which will generate a lookup call per DSO). TArray<UObject*> NativeDefaultObjectSubobjects; BPGC->GetDefaultObjectSubobjects(NativeDefaultObjectSubobjects); for (auto DSO : NativeDefaultObjectSubobjects) { if (DSO && DSO->GetClass()->HasAnyClassFlags(CLASS_DefaultToInstanced)) { // Determine if this is an editor-only subobject. bool bIsEditorOnlySubobject = false; if (const UActorComponent* ActorComponent = Cast<UActorComponent>(DSO)) { bIsEditorOnlySubobject = ActorComponent->IsEditorOnly(); } // Skip ctor code gen for editor-only subobjects, since they won't be used by the runtime. Any dependencies on editor-only subobjects will be handled later (see HandleInstancedSubobject). if (!bIsEditorOnlySubobject) { const FString VariableName = HandleInstancedSubobject(Context, DSO, false, true); // Keep track of which component can be used as a root, in case it's not explicitly set. if (NativeRootComponentFallback.IsEmpty()) { USceneComponent* SceneComponent = Cast<USceneComponent>(DSO); if (SceneComponent && !SceneComponent->AttachParent && SceneComponent->CreationMethod == EComponentCreationMethod::Native) { NativeRootComponentFallback = VariableName; } } } } } // Check for a valid RootComponent property value; mark it as handled if already set in the defaults. bool bNeedsRootComponentAssignment = false; static const FName RootComponentPropertyName(TEXT("RootComponent")); const UObjectProperty* RootComponentProperty = FindField<UObjectProperty>(BPGC, RootComponentPropertyName); if (RootComponentProperty) { if (RootComponentProperty->GetObjectPropertyValue_InContainer(CDO)) { HandledProperties.Add(RootComponentProperty); } else if (!NativeRootComponentFallback.IsEmpty()) { Context.AddLine(FString::Printf(TEXT("RootComponent = %s;"), *NativeRootComponentFallback)); HandledProperties.Add(RootComponentProperty); } else { bNeedsRootComponentAssignment = true; } } // Generate ctor init code for the SCS node hierarchy (i.e. non-native components). SCS nodes may have dependencies on native DSOs, but not vice-versa. TArray<const UBlueprintGeneratedClass*> BPGCStack; const bool bErrorFree = UBlueprintGeneratedClass::GetGeneratedClassesHierarchy(BPGC, BPGCStack); if (bErrorFree) { TArray<FNonativeComponentData> ComponentsToInit; // Start at the base of the hierarchy so that dependencies are handled first. for (int32 i = BPGCStack.Num() - 1; i >= 0; --i) { if (BPGCStack[i]->SimpleConstructionScript) { for (auto Node : BPGCStack[i]->SimpleConstructionScript->GetRootNodes()) { if (Node) { const FString NativeVariablePropertyName = HandleNonNativeComponent(Context, Node, HandledProperties, NativeCreatedComponentProperties, nullptr, ComponentsToInit); if (bNeedsRootComponentAssignment && Node->ComponentTemplate && Node->ComponentTemplate->IsA<USceneComponent>() && !NativeVariablePropertyName.IsEmpty()) { // Only emit the explicit root component assignment statement if we're looking at the child BPGC that we're generating ctor code // for. In all other cases, the root component will already be set up by a chained parent ctor call, so we avoid stomping it here. if (i == 0) { Context.AddLine(FString::Printf(TEXT("RootComponent = %s;"), *NativeVariablePropertyName)); HandledProperties.Add(RootComponentProperty); } bNeedsRootComponentAssignment = false; } } } } } for (auto& ComponentToInit : ComponentsToInit) { ComponentToInit.EmitProperties(Context); } } // Generate ctor init code for generated Blueprint class property values that may differ from parent class defaults (or that otherwise belong to the generated Blueprint class). for (auto Property : TFieldRange<const UProperty>(BPGC)) { const bool bNewProperty = Property->GetOwnerStruct() == BPGC; const bool bIsAccessible = bNewProperty || !Property->HasAnyPropertyFlags(CPF_NativeAccessSpecifierPrivate); if (bIsAccessible && !HandledProperties.Contains(Property)) { OuterGenerate(Context, Property, TEXT(""), reinterpret_cast<const uint8*>(CDO), bNewProperty ? nullptr : reinterpret_cast<const uint8*>(ParentCDO), EPropertyAccessOperator::None, true); } } Context.DecreaseIndent(); Context.AddLine(TEXT("}")); } Context.DecreaseIndent(); Context.AddLine(TEXT("}")); Context.CurrentCodeType = FEmitterLocalContext::EGeneratedCodeType::Regular; { Context.AddLine(FString::Printf(TEXT("void %s::PostLoadSubobjects(FObjectInstancingGraph* OuterInstanceGraph)"), *CppClassName)); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); Context.AddLine(TEXT("Super::PostLoadSubobjects(OuterInstanceGraph);")); for (auto& ComponentToFix : NativeCreatedComponentProperties) { Context.AddLine(FString::Printf(TEXT("if(ensure(%s))"), *ComponentToFix)); Context.AddLine(TEXT("{")); Context.IncreaseIndent(); Context.AddLine(FString::Printf(TEXT("%s->CreationMethod = EComponentCreationMethod::Native;"), *ComponentToFix)); Context.DecreaseIndent(); Context.AddLine(TEXT("}")); } Context.DecreaseIndent(); Context.AddLine(TEXT("}")); } FDependenciesHelper::AddStaticFunctionsForDependencies(Context); FBackendHelperUMG::EmitWidgetInitializationFunctions(Context); } FString FEmitDefaultValueHelper::HandleClassSubobject(FEmitterLocalContext& Context, UObject* Object, FEmitterLocalContext::EClassSubobjectList ListOfSubobjectsType, bool bCreate, bool bInitilize) { ensure(Context.CurrentCodeType == FEmitterLocalContext::EGeneratedCodeType::SubobjectsOfClass); FString LocalNativeName; if (bCreate) { FString OuterStr = Context.FindGloballyMappedObject(Object->GetOuter()); if (OuterStr.IsEmpty()) { OuterStr = HandleClassSubobject(Context, Object->GetOuter(), ListOfSubobjectsType, bCreate, bInitilize); if (OuterStr.IsEmpty()) { return FString(); } const FString AlreadyCreatedObject = Context.FindGloballyMappedObject(Object); if (!AlreadyCreatedObject.IsEmpty()) { return AlreadyCreatedObject; } } const bool AddAsSubobjectOfClass = Object->GetOuter() == Context.GetCurrentlyGeneratedClass(); LocalNativeName = Context.GenerateUniqueLocalName(); Context.AddClassSubObject_InConstructor(Object, LocalNativeName); UClass* ObjectClass = Object->GetClass(); Context.AddLine(FString::Printf( TEXT("auto %s = NewObject<%s>(%s, TEXT(\"%s\"));") , *LocalNativeName , *FEmitHelper::GetCppName(ObjectClass) , *OuterStr , *Object->GetName())); if (AddAsSubobjectOfClass) { Context.RegisterClassSubobject(Object, ListOfSubobjectsType); Context.AddLine(FString::Printf(TEXT("CastChecked<UDynamicClass>(GetClass())->%s.Add(%s);") , Context.ClassSubobjectListName(ListOfSubobjectsType) , *LocalNativeName)); } } if (bInitilize) { if (LocalNativeName.IsEmpty()) { LocalNativeName = Context.FindGloballyMappedObject(Object); } ensure(!LocalNativeName.IsEmpty()); auto CDO = Object->GetClass()->GetDefaultObject(false); for (auto Property : TFieldRange<const UProperty>(Object->GetClass())) { OuterGenerate(Context, Property, LocalNativeName , reinterpret_cast<const uint8*>(Object) , reinterpret_cast<const uint8*>(CDO) , EPropertyAccessOperator::Pointer); } } return LocalNativeName; } FString FEmitDefaultValueHelper::HandleInstancedSubobject(FEmitterLocalContext& Context, UObject* Object, bool bCreateInstance, bool bSkipEditorOnlyCheck) { check(Object); // Make sure we don't emit initialization code for the same object more than once. FString LocalNativeName = Context.FindGloballyMappedObject(Object); if (!LocalNativeName.IsEmpty()) { return LocalNativeName; } else { LocalNativeName = Context.GenerateUniqueLocalName(); } if (Context.CurrentCodeType == FEmitterLocalContext::EGeneratedCodeType::SubobjectsOfClass) { Context.AddClassSubObject_InConstructor(Object, LocalNativeName); } else if (Context.CurrentCodeType == FEmitterLocalContext::EGeneratedCodeType::CommonConstructor) { Context.AddCommonSubObject_InConstructor(Object, LocalNativeName); } UClass* ObjectClass = Object->GetClass(); // Determine if this is an editor-only subobject. When handling as a dependency, we'll create a "dummy" object in its place (below). bool bIsEditorOnlySubobject = false; if (!bSkipEditorOnlyCheck) { if (UActorComponent* ActorComponent = Cast<UActorComponent>(Object)) { bIsEditorOnlySubobject = ActorComponent->IsEditorOnly(); if (bIsEditorOnlySubobject) { // Replace the potentially editor-only class with a base actor/scene component class that's available to the runtime. We'll create a "dummy" object of this type to stand in for the editor-only subobject below. ObjectClass = ObjectClass->IsChildOf<USceneComponent>() ? USceneComponent::StaticClass() : UActorComponent::StaticClass(); } } } auto BPGC = Context.GetCurrentlyGeneratedClass(); auto CDO = BPGC ? BPGC->GetDefaultObject(false) : nullptr; if (!bIsEditorOnlySubobject && ensure(CDO) && (CDO == Object->GetOuter())) { if (bCreateInstance) { Context.AddLine(FString::Printf(TEXT("auto %s = CreateDefaultSubobject<%s>(TEXT(\"%s\"));") , *LocalNativeName, *FEmitHelper::GetCppName(ObjectClass), *Object->GetName())); } else { Context.AddLine(FString::Printf(TEXT("auto %s = CastChecked<%s>(GetDefaultSubobjectByName(TEXT(\"%s\")));") , *LocalNativeName, *FEmitHelper::GetCppName(ObjectClass), *Object->GetName())); } const UObject* ObjectArchetype = Object->GetArchetype(); for (auto Property : TFieldRange<const UProperty>(ObjectClass)) { OuterGenerate(Context, Property, LocalNativeName , reinterpret_cast<const uint8*>(Object) , reinterpret_cast<const uint8*>(ObjectArchetype) , EPropertyAccessOperator::Pointer); } } else { const FString OuterStr = Context.FindGloballyMappedObject(Object); if (OuterStr.IsEmpty()) { ensure(false); return FString(); } Context.AddLine(FString::Printf(TEXT("auto %s = NewObject<%s>(%s, TEXT(\"%s\"));") , *LocalNativeName , *FEmitHelper::GetCppName(ObjectClass) , *OuterStr , *Object->GetName())); } return LocalNativeName; }
Subsurface structure of St. Elmo submarine volcanic mound (Austin, Texas) and its volcanic conduits imaged using geophysical methods Geophysical survey techniques including electrical resistivity imaging and magnetics were utilized to study the late-Cretaceous submarine volcanic mound of St. Elmo railroad cut located in south Austin, TX. The St. Elmo site cut exposes a sequence of late Cretaceous volcaniclastic rocks in contact with carbonate rocks. Resistivity imaging results provided subsurface evidence that the St. Elmo submarine mound has high resistivity limestone blocks scattered randomly within the resistivity sections. These limestone blocks are interpreted to be erratic blocks of Austin Chalk, which were probably torn from the walls of the volcanic conduits (vents) and ejected. Magnetic anomalies correlate well with the resistivity anomalies, strengthening the interpretation of the resistivity data. Resistivity results from the residential site, not far from the St. Elmo volcanic mound, are interpreted as a conduit which is similar to the conduit anomalies observed on the St. Elmo railroad cut site. Introduction Near-surface geophysical imaging is used in a wide range of volcanic settings to understand the internal structure of volcanic centers and the nature of volcanic deposits . In this study, electrical resistivity imaging is used to resolve near-vent structures. Electrical imaging has a specific capability to resolve subsurface features of the buried volcanic deposits based on electrical resistivity contrasts in some volcanic deposits. Such electrical resistivity contrasts occur in the Cretaceous strata of Austin Chalk of south-central Texas, where they contain electrically conductive (low resistivity) volcanic domes or mounds . These volcanic mounds, referred to locally Corresponding author: [email protected] as "serpentine plugs", are part of a period of distributed monogenetic volcanism . Within this province there are approximately 200 occurrences of igneous outcrops emplaced during the deposition of the Austin Chalk. These bodies consist of shallowly emplaced igneous structures associated with vents, and pyroclastic rocks and lavas erupted on the now-deeply eroded and sub-aerially exposed shallow Cretaceous seafloor. These outcrops tend to align along NE-SW regional faults and fractures of the Balcones faults of the Miocene age (~20 Ma). Thus, this distributed volcanic field is of interest because it is among the youngest volcanism found in the south-central USA, is associated with a major tectonic feature that was active long after the cessation of volcanism, and provides an opportunity to study the geophysics of vents formed in a shallow marine environment. Saribudak , have demonstrated that these tuff mounds have all the hallmarks of smallvolume submarine volcanic vents , including shallow diatreme structures in which Austin Chalk is brecciated and reworked, tuff-ring structures, remobilized palagonitized tuff as gravity currents (submarine lahars), and intrusive features (dikes, small sills and conduits, as shown in Figure 2). In this study, a volcano-sedimentary section was mapped at the St. Elmo railroad cut, which was previously identified as a submarine tuff mound . Both resistivity imaging and magnetic methods were used. The purpose of these geophysical surveys was to image the near-vent volcanic facies, especially their relationship to the Austin Chalk limestone . Geophysical data of submarine vents (conduits) is understandably sparse, so the St. Elmo site provides an excellent opportunity to study the vent structure in a now terrestrial setting. In addition, three resistivity profiles were surveyed on eastern and western sides of a residence located about 360 m to the northwest of the St. Elmo site. The house was recently built on a small hilltop. We were hired to characterize the subsurface in terms of geology and geological structure to help assess the foundation's integrity. Resistivity results helped identify the volcanic rocks around the house, which improved our understanding of the vent system comprising St. Elmo volcanic mound. Previous geophysical work Since the volcanic mounds were first described in the Balcones Magmatic Province (BMP) by Udden and Bybe in 1916, significant hydrocarbon traps have been exploited in and around tuff mounds. Magnetic surveys over tuff mounds led directly to the discovery of similar oil fields, such as Hilbig, Jim Smith, Yoast, and Chapmon-Abbott fields in Texas . The integrated geophysical work on Pilot Knob, a volcanic vent in south Austin, included the application of magnetic and gravity methods, such as the study published by Romberg and Barnes in 1954 (Figure 3). In that seminal study, gravity and magnetic data were collected along several traverses with station spacing of 700 m in some sections and 1,400 m in others. The results revealed strong positive gravity and magnetic anomalies over the central basalt mass, and weaker anomalies caused by attendant flows and dikes. They also concluded that "serpentinized" pyroclastic rocks show weak, negative anomalies. More recently, resistivity and magnetic data were collected along two profiles across Pilot Knob , which is located about 10 km to the southeast of the current study area (Figure 3). Both the resistivity and magnetic data together indicate significant anomalies, which were interpreted to be due to volcanic features: a conduit, a dike and a crater. They also indicate the large tuff mound is cut by faults. More geophysical results (2D and 3D resistivity and magnetics) over the known outcrops of tuff and lava at the Williamson Creek site revealed a volcanic vent . Results of these geophysical studies , for the first time, revealed the presence of volcanic features that included vents, dikes and craters. This study is the extension of those previous studies, with particular emphasis on the structures of tuff mounds. Geological background The Austin Chalk Group lies within the Balcones Fault Zone, and outcrops in the Austin area. This unit comprises all of the surface geology, other than volcanic rocks. Young and Woodruff . In the railroad cut below St. Elmo road, palagonitized tuff outcrops are believed to have erupted from a nearby volcanic edifice . The majority of the volcanic outcrop consists of pyroclas- Figure 3: Map showing the current study area, Williamson Creek and Pilot Knob sites, and volcanic oil fields (modified from Saribudak and Caran ). tic deposits, including brown lapilli tuff comprised of poorly sorted tephra, some layers of altered ash and limestone clasts . Clasts of limestone are present at all levels in the tuff, and they range in size from around 1 to 25 cm. The lapilli tuff at this location is interpreted to result from an eruption-fed density current on the basis of the presence of low angle cross-bedding, and intercalation of ash layers . The limestone clasts indicate near-vent deposition and sufficient explosive energy to erode the conduit wall. Thus the depositional environment of the St. Elmo railroad cut area was geologically interpreted to be the central part of a tuff mound. Several layers of interlayered ash and tephra deposits with limestone clasts and lapilli tuff sections observed within the tuff mound represent an eruptive cycle of a volcano near the tuff mound . A detailed geologic map of St. Elmo Bridge and surrounding area was published first by Young et al. , and is redrawn in Figure 5. The geological map shows St. Elmo railroad cut and the residential site on the map with letters of A and B, respectively. A thick sequence of tuff (20 m) exposed around the South Austin Hospital during its construction is shown on the map. The site falls into the Balcones Fault Zone (BFZ), and there is a significant fault between the St. Elmo site and the residential site, which are indicated Resistivity method The electrical resistivity method inolves imaging the bulk electrical resistivity distribution of the subsurface in two, three, or four dimensions (the three dimensions of space plus time when monitoring is performed) from the meter to hundreds of meter scale depending on the electrode spacing . Resistivity has been well documented in the geophysical literature . Resistivity values (Ω m) measured by the method are highly affected by several variables, including the presence of water or moisture, the amount and distribution of pore space in the material, and temperature . In this study, the Advanced Geosciences, Inc. (AGI) SuperSting R1 and R8 resistivity meters were used with a dipole-dipole electrode array. Compared to other arrays, this array is more sensitive to horizontal variations in the subsurface and, when the data are inverted, provides a 2D resistivity model that can be interpreted in terms of near-surface geology and geological structure. A contact resistance test was performed before data collection. Contact resistance measures the resistance to current flow at electrodes caused by imperfect electrical contact with the earth. Poor data quality or anomalous data can result from high or highly variable electrode contact resistance along a profile. To decrease the effect of contact resistance along each profile a saltwater solution was added to each electrode before the con-tact resistance test was performed. Typical contact resistance for profiles varied between 100 and 250 Ω. Seven resistivity profiles were surveyed in the study area, on and off, between the years of 2013 and 2019. A 2D inversion of the resistivity imaging profile was obtained using AGI's 2D EarthImager commercial software . A topographic correction was applied to the resistivity profile where needed. The final output, which is the inverted section, represents the result of an iterative process that tends to minimize the difference between measured and calculated resistivity values. The Root Mean Squared (RMS) error gives a measure of this difference. In this study, RMS values of six inverted resistivity sections range between 3 and 8, and the seventh profile has a RMS value of 11. These are acceptable results, being of the same magnitude of the values that characterize inversion of data collected in volcanic environments . Magnetic method The magnetometer measures magnetic field. The unit of measurement for the magnetometer is the nanoTesla (nT). In this study, a Geometrics G-858 Cesium magnetometer was employed in the collection of the data along profiles P5, P6, and P7 which are located on the western and eastern parts of the site, respectively (Figure 6). The collection rate of the magnetic data was 10 Hz, which corresponds to better than one data point every 0.3 m along the magnetic profiles. A base station was established in the vicinity of the site to record the daily variations of Earth's external magnetic field. The magnetic survey time was less than 30 minutes, and there were no significant diurnal variations. For this reason, a diurnal correction was not applied to the magnetic data. A low-pass filter was applied to the magnetic Figure 5: A geological map of the study and surrounding areas. The map is based on the original map published by Young et al. . Note that a significant fault of the BFZ is crossing south of the residential site (B). data to reduce noise. The site presented challenging conditions for the magnetometer surveys due to the presence of some fences, roads with busy traffic, the bridge, and the railroad track. However, the locations of magnetic profiles were a minimum distance of 13 m from the railroad tracks, which did not appear to have any significant effect on the magnetic readings. The presence of the bridge did affect the readings, and these anomalous data were removed from the dataset along profile P5. The locations of profiles P6 and P7 were restricted to south of the bridge only. A strongly enforced ferrous property fence further to the north on the eastern part of the railroad track made data collection there impossible. Thus we stayed in the southern section, and the only challenge to the quality of the magnetic data being collected there was the road traffic. To avoid potential interference, the magnetometer was paused approximately 30 m before any car drove by. In addition, we collected the magne-tic data in two directions (north to south and south to north); both datasets were comparable, indicating that the effect of ambient noise was not significant. St. Elmo Four resistivity profiles (P1, P2, P3, and P4) and three magnetic profiles (P5, P6, and P7) were surveyed at the site. Locations of the geophysical profiles are shown on a site map ( Figure 6A) and on a detailed, schematic geological map of the site, which shows the locations of late Cretaceous volcaniclastic and carbonate rocks of Austin Group (Dessau Chalk) where they are in a geological contact ( Figure 6B). The geological contact is interpreted as fault between the volcaniclastic rocks and the Austin chalk by Caran Figure 6B). The azimuth of this fault is considerably more northerly than that of most Balcones faults in this area, and the westerly dip of the fault is opposite to the faults of the Balcones system . Two resistivity profiles (P1 and P2) were established to the immediate East and West of the railroad tracks beneath St. Elmo Bridge. The spacing between the profiles was 8 m, and the electrode spacing was held at 6 m on both profiles. A train runs every few hours, and extra caution was taken while working at this locality. For this reason, contact resistance through the rails was not measurable. Because the trains always sound their horns, we were able to shut the resistivity meter long before the train arrived at our survey location in order to avoid anomalous electrical measurements. We restarted the survey after the train disappeared from the view. The third and fourth resistivity profiles (P3 and P4) were located in the northern section of St. Elmo Bridge above the railroad track in the western and eastern parts of the site, respectively. The electrode spacing was held at~2.5 m on both surveys. Note that both profiles are located higher (2.5 m) and farther (8 m) from the railroad tracks (see Figure 6A for location). Figure 7 shows the resistivity data collected along the West (P1) and East (P2) of St. Elmo railroad track, respectively. The two profiles are separated by about 8 meters. Locations of the contact between the boundaries of Austin Chalk and pyroclastic rocks, as well as the first and last piers of the bridge of St. Elmo Road are also shown on resistivity profiles as reference points. St. Elmo Resistivity Profiles It has been previously shown that railroad tracks have an effect on the apparent resistivity data . Apparent resistivity measurements made in proximity to a rail track with any type array configuration are likely to be reduced by preferential current channeling through the conductive rails, with greater distortion of the data at larger depths of investigation. The effect is minimized, however, for the dipole-dipole configuration, which is therefore recommended for resistivity surveys undertaken parallel to conductive rails . Despite the presence of railroad tracks, the quality of the resistivity data on both profiles of P1 and P2, based on their RMS values, is good to excellent. Lower and upper resistivity values of both profiles are fixed similarly so that a correlation between the Figure 6 for locations). Boundaries of exposed pyroclastic and limestone outcrops, the contact, and most southern and northernmost bridge piers are shown for reference purposes. two can be made correctly. Low resistivity values (5 to 15 Ω m), which are shown in blue on Figure 7 and correspond to the pyroclastic outcrops in the field, cover the majority of the resistivity profile P1. There are high resistivity blocks within the pyroclastic rocks to the north of the geological contact. The contact, which is interpreted to be a fault by Caran et al. , separates low resistivity rocks (pyroclastic or tuff) in the North from the moderate resistivity rocks (limestone) in the south superficially. However, this separation is only valid at the surface because an irregular limestone block of significant size extends in the vicinity of the contact and terminates against tuff (see white-dashed line in Figure 7A). In return, tuff underlying the limestone outcrop continue across the contact and ascend, with an irregular geometry ( Figure 7A). Overall, the resistivity data along P1 indicates a tuff mound, which is shown with the blue (low resistivity) color along the profile, which extends as deep as~35 m. The resistivity data along profile P2, which is located 8 m to the east of profile P1, is provided in Figure 7B. Profile P2 indicates more chaotic distribution of low, medium and high resistivity values. The geological contact appears to separate pyroclastic rocks (low resistivity) in the north from the carbonate rocks (medium to high resistivity), but it is again only valid at shallow depths. Large, randomly distributed limestone blocks are located within the resistivity section. These limestone blocks are probably erratic blocks of Austin Chalk (Dessau Formation) which were torn from the walls of the conduit during eruption and reworked in the nearsurface diatreme. The resistivity data on profile P2 indicate two conduit-like anomalies continuous at depth, which are labeled C1 and C2 (see Figure 7B). The resistivity data on profiles P3 and P4, which were surveyed on the West and East sides of the railroad track, respectively, are shown in Figure 8. The data reflect topographic correction, and the maximum depth of investigation is about 35 m. Low resistivity values (3 to 20 Ω m) underlie the entire section along profile P3, which in turn, are overlain by an exposed, thin terrace deposit ( Figure 8A). The resistivity data indicate mostly tuff overlain by the terrace deposit layer, and do not indicate any presence of limestone blocks. In contrast to profile P3, resistivity data on profile P4 display a chaotic distribution of limestone and tuff units as shown in Figure 8B. The resistivity data indicate a conduit-like anomaly (C3) ascending and cutting through Austin limestone blocks. It should also be noted that profile P4 crosses the geological contact between Austin limestone and tuff at station 133 m (see Figure 8B). The geological layers underlying the terrace deposit, which has a thickness of a few meters, do not show any displacement at the contact. This observation indicates that the contact between tuff and limestone units is not due to a fault. We must point out that the resistivity values obtained from four profiles along the railroad track show similar ranges of resistivity values with the Williamson Creek vent, where there was no anthropogenic effect of any kind . This observation suggests that the resistivity values obtained at this site have not been significantly compromised by the railroad tracks. St. Elmo Magnetic Profiles The observed magnetic data of profile P5 contains very short-wavelength noise associated with material deposited at the surface by human activity (trash, pipes, etc.). The magnetic values vary between 46,300 and 47,800 nT. The average background magnetic value is about 47,400 nT in the south Austin area. The highest magnetic values occur near the St. Elmo Bridge, which is probably due to the metallic sources in and around the bridge (Figure 9). We applied a 4-meter low-pass filter to the observed data presented in Figure 9. Due to the high magnetic noise originating from the bridge, that portion of the survey was removed from the data. The remaining magnetic data indicate two significant anomalies: 1) A magnetic high starting at the contact between the chalk and volcanic rocks near the 60-m station; 2) A magnetic low anomaly starting at about the 174-m station within the volcanic rocks. These two anomalies were present in the raw data on both survey directions from both north to south and south to north. Two magnetic profiles (P6 and P7) were collected on the east terrace of the railroad track. Both magnetic data sets indicate a magnetic high anomaly between the stations at 53 m and 62 m, and are annotated as A and B (see Figure 10). Locations of magnetic high anomalies (A and B) observed on profiles P6 and P7 align well with the location of the volcanic conduit (C1) anomaly interpreted on the resistivity profile P2 (see Figure 7B). Residential Results The residential site is located 350 m to the northwest of the St Elmo railroad cut, where we conducted two resistivity surveys (P8 and P9) along the western and eastern part of the house, respectively (Figure 11). We conducted another resistivity profile (P10) 30 m to the south of profile P9. We were not able to conduct magnetic surveys along resistivity profiles due to ferrous cultural sources, such as the house itself, the concrete driveway around the house, air-conditioning units, and cars. The house displayed significant cracks on the walls, windows and doors near the cistern (old water well). An eyewitness account of the cistern indicated that the water well might have been 4 to 6 m deep and had been backfilled, but the backfill exhibited continuous settlement which motivated the owner of the house to pursue geophysical testing. The objective of the resistivity study then was to characterize the subsurface in terms of geological layers and structure, and help determine the cause of the deformation of the house. Residential Resistivity Profiles The resistivity data obtained along profiles P8 and P9 are provided in Figure 12. The spacing between both profiles was about~20 m. Locations of the cistern and corners of the house are also provided for reference purposes. The resistivity data indicate a significant lowresistivity anomaly between the stations at 24 m and 38 m on both profiles. This anomaly has sharp contacts Volcanica 2(2): 253 -268. doi: 10.30909/vol.02.02.253268 on both sides at the depth of the Austin Chalk, which are shown with higher resistivity values of green, yellow and red colors. Thus the source of the low resistivity anomaly in the center of the profiles could be due to a volcanic conduit, and is marked as C4. The geometry and dimension of the conduit anomaly C4 are similar and comparable to the conduit-like anomalies of C1, C2 and C3 observed on resistivity profiles P1, P2 and P4. The resistivity data along profile P10 is provided in Figure 13. We surveyed this resistivity profile 30 m to the south of profile P9. The purpose was an attempt to delineate the horizontal extension of the conduit-like anomaly (C4) observed on profile P9. The resistivity data do not indicate any conduit anomaly as observed on both profiles of P8 and P9. Instead, low resistivity values, between 6 and 20 Ω m, are dominant, and layered along the entire length of the profile. Relatively high resistivity values, between 20 and 200 Ω m, are only observed in the southwest corner of the profile. Resistivity profiles and borehole locations of the residential site are shown using Google Maps™ and are shown in Figure 14. As part of the foundation evaluation, five soil borings were taken around the perimeter of the foundation of the house, and results were provided in two interim reports by two foundation companies . The borehole data and their subsurface interpretations were taken and modified from these reports and are provided here. The depth of these boreholes varied between 4 and 8 m. A picture of borehole operation at the study area is provided in Figure 15. Boreholes B-1 and B-2 are located within the low resistivity anomaly that was interpreted to be due to a volcanic conduit. The contact between Austin Chalk and tuff deposit start with a "suspected ash layer" at Discussion Small-volume shallow marine volcanism is relatively common on Earth, but relatively poorly resolved due to the logistical and technical difficulty in studying these features in subaqueous environments . The BMP offers an excellent opportunity to perform high-resolution geophysical investigations of shallow marine vents that are exposed and eroded. Nevertheless, the study of these features is hampered because Austin is a rapidly developing urban area. Here, electrical resistivity and magnetic surveys were designed around anthropogenic features (houses, railroad tracks) to optimize resolution. While limited to the shallow subsurface (up to 35 m in this case) the methods provide a Figure 15: Picture showing the location of the cistern (old water well) and drilling at borehole location B-1. Resistivity profile P8 was surveyed along the corridor between the wooden fence and the house. much clearer perspective on the near vent volcanic features than is otherwise possible. These techniques may work well in a variety of volcanic fields in urban areas, such as Mexico City and Auckland . The critical feature revealed by the resistivity results from St. Elmo railroad cut site is that high resistivity limestone blocks or clusters of blocks are located in zones within the vent complex that are otherwise characterized by low resistivity. This interpretation is founded on the results of Williamson Creek volcanic mound, which is located about 2 km to the south of St. Elmo site . At the Williamson Creek site, scattered high resistivity blocks were also observed on resistivity profiles. Some of these limestone blocks out-crops within the volcanic mound. In addition, there have been also numerous similar observations made over oil fields containing inclusions of fragments of Austin Chalk or older rocks within the volcanic eruption centers . These isolated high resistivity zones are more common near the edges of the vent complex. Some resistivity profile lines (e.g. P3 and P10) are relatively free of high resistivity zones. Furthermore, in the near-vent Figure 16: Correlation of data from five boreholes. Note that all boreholes are shown aligned; however, boreholes B-4 and B-5 are actually located to the southeast of boreholes B-1, B-2 and B-3 (see Figure 14). facies, two low resistivity zones can be distinguished (roughly 15 Ω m and roughly 6 Ω m). This variation in resistivity may be related to variations within conduits and near-vent deposits. Slightly higher resistivity is expected in near-vent facies enriched with limestone lithic fragments, whereas the lowest resistivities are expected in zones of relatively clean and limestone-free conduit. Finally, the boreholes and limited outcrop data show that the moderately low resistivity zones correspond to altered volcaniclastic rocks commonly occurring in near-vent facies in distributed volcanic fields-in this case, with limestone lithics. Thus, the geophysical surveys show that the conduits retain structure, with predominantly central areas of uniformly low resistivity, and marginal areas of rapid variation in resistivity, with low resistivity zones. Comparing the scale and form of the inverted resistivity anomalies with geologic data from other regions suggests that these tuff mounds of the BMP are largely near-vent facies developed in the shallowest parts of diatremes through reworking of juvenile material (palagonitized basalt) and lithic material (brecciated Austin Chalk) during short-lived explosive subaqueous eruptions. Geologic investigations of shallow diatremes have shown that they most frequently have complex structure, with zones of predominantly brecciated host rock (in this case Austin Chalk), zones of intense mixing and reworking of pyroclastic and lithic material, predominantly but not exclusively at the margins of the conduit, and some zones of predominantly juvenile material (in this case basalt) . These features are commonly associated with vent complexes in distributed volcanic fields . Resistivity results from the residential site, not far from the St. Elmo volcanic mound, indicate a conduit which is similar to the conduit anomalies observed on the St. Elmo railroad cut site. Borehole data obtained from tuff deposits within the conduit anomaly display multiple ash layers. Conclusions Results from three resistivity profiles are interpreted as subsurface evidence that the St. Elmo submarinemound has high resistivity limestone blocks scattered randomly within the resistivity sections as deep as 35 m. The distribution of these chaotic limestone blocks is more evident on the eastern resistivity profiles than the western resistivity profiles. These limestone blocks are interpreted to be erratic blocks of Austin Chalk, which were probably torn from the walls of the volcanic vent and ejected. Resistivity data from eastern profiles indicate three conduit-like anomalies, and are probably part of the eruption center in the study area. Magnetic data along one profile correlates well with the location of contact between the mound and the limestone unit. Magnetic data along two other profiles show a high magnetic anomaly that correlate with the approximate location of an inferred conduit. These high Structure of St. Elmo submarine volcanic mound Saribudak, 2019 magnetic anomalies correlate well with the resistivity anomalies, strengthening the interpretation of the resistivity data. Results of this study and others showcase the effectiveness of integrated resistivity and magnetic imaging for mapping and characterizing volcanic mounds in detail, as well as constraining the lateral and vertical boundaries of volcanic mounds, and in this case, the adjacent Austin Chalk formation. These results also indicate that the combination of resistivity and magnetic data may provide valuable information in terms of delineating volcanic vents and dikes and defining the geological contacts of volcanic rocks in the Austin area and in the state of Texas. Additional resistivity surveys, in conjunction with magnetic surveys, could also offer useful information on the structure of volcanic plugs, which are potential oil and gas traps in the state of Texas. The procedures developed here may have applications in other areas with comparable geological conditions.
/** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. * * Increase the device's usage count and clear its power.runtime_auto flag, * so that it cannot be suspended at run time until pm_runtime_allow() is called * for it. */ void pm_runtime_forbid(struct device *dev) { spin_lock_irq(&dev->power.lock); if (!dev->power.runtime_auto) goto out; dev->power.runtime_auto = false; atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); out: spin_unlock_irq(&dev->power.lock); }
/** * \brief Transforms an \ref error from a T gate. Converts an X error to a Y error * * \param[in] error The \ref error to transform */ void qc_T_transform_error(ERROR *error) { if (error->op == X) { error->op = Y; } }
<filename>ferrite-session/src/internal/session/choice/internal/utils/mod.rs mod receiver_to_selector; mod run_case_cont; mod unit_to_session; pub use receiver_to_selector::*; pub use run_case_cont::*; pub use unit_to_session::*;
/** * Determines if a location counts as in-city, and has passed the required wait * time since the city was founded. * * @param room_data *loc The location to check. * @param empire_data *emp The empire to check. * @param bool check_wait If TRUE, requires the city wait time to have passed. * @param bool *too_soon Will be set to TRUE if there was a city but it was founded too recently. * @return bool TRUE if in-city, FALSE if not. */ bool is_in_city_for_empire(room_data *loc, empire_data *emp, bool check_wait, bool *too_soon) { struct empire_city_data *city; int dist; int wait = config_get_int("minutes_to_full_city") * SECS_PER_REAL_MIN; *too_soon = FALSE; if (!emp) { return FALSE; } if (ROOM_BLD_FLAGGED(loc, BLD_SECONDARY_TERRITORY)) { return TRUE; } for (city = EMPIRE_CITY_LIST(emp); city; city = city->next) { dist = compute_distance(loc, city->location); if (dist <= city_type[city->type].radius || (LARGE_CITY_RADIUS(loc) && dist <= (3 * city_type[city->type].radius))) { if (!check_wait || (get_room_extra_data(city->location, ROOM_EXTRA_FOUND_TIME) + wait) < time(0)) { return TRUE; } else { *too_soon = TRUE; } } } return FALSE; }
<gh_stars>1-10 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * * This file is released under the GPL. */ #include "dm-zoned.h" #include <linux/module.h> #include <linux/crc32.h> #include <linux/sched/mm.h> #define DM_MSG_PREFIX "zoned metadata" /* * Metadata version. */ #define DMZ_META_VER 1 /* * On-disk super block magic. */ #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \ (((unsigned int)('Z')) << 16) | \ (((unsigned int)('B')) << 8) | \ ((unsigned int)('D'))) /* * On disk super block. * This uses only 512 B but uses on disk a full 4KB block. This block is * followed on disk by the mapping table of chunks to zones and the bitmap * blocks indicating zone block validity. * The overall resulting metadata format is: * (1) Super block (1 block) * (2) Chunk mapping table (nr_map_blocks) * (3) Bitmap blocks (nr_bitmap_blocks) * All metadata blocks are stored in conventional zones, starting from * the first conventional zone found on disk. */ struct dmz_super { /* Magic number */ __le32 magic; /* 4 */ /* Metadata version number */ __le32 version; /* 8 */ /* Generation number */ __le64 gen; /* 16 */ /* This block number */ __le64 sb_block; /* 24 */ /* The number of metadata blocks, including this super block */ __le32 nr_meta_blocks; /* 28 */ /* The number of sequential zones reserved for reclaim */ __le32 nr_reserved_seq; /* 32 */ /* The number of entries in the mapping table */ __le32 nr_chunks; /* 36 */ /* The number of blocks used for the chunk mapping table */ __le32 nr_map_blocks; /* 40 */ /* The number of blocks used for the block bitmaps */ __le32 nr_bitmap_blocks; /* 44 */ /* Checksum */ __le32 crc; /* 48 */ /* Padding to full 512B sector */ u8 reserved[464]; /* 512 */ }; /* * Chunk mapping entry: entries are indexed by chunk number * and give the zone ID (dzone_id) mapping the chunk on disk. * This zone may be sequential or random. If it is a sequential * zone, a second zone (bzone_id) used as a write buffer may * also be specified. This second zone will always be a randomly * writeable zone. */ struct dmz_map { __le32 dzone_id; __le32 bzone_id; }; /* * Chunk mapping table metadata: 512 8-bytes entries per 4KB block. */ #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map)) #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES)) #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1) #define DMZ_MAP_UNMAPPED UINT_MAX /* * Meta data block descriptor (for cached metadata blocks). */ struct dmz_mblock { struct rb_node node; struct list_head link; sector_t no; unsigned int ref; unsigned long state; struct page *page; void *data; }; /* * Metadata block state flags. */ enum { DMZ_META_DIRTY, DMZ_META_READING, DMZ_META_WRITING, DMZ_META_ERROR, }; /* * Super block information (one per metadata set). */ struct dmz_sb { sector_t block; struct dmz_mblock *mblk; struct dmz_super *sb; }; /* * In-memory metadata. */ struct dmz_metadata { struct dmz_dev *dev; sector_t zone_bitmap_size; unsigned int zone_nr_bitmap_blocks; unsigned int zone_bits_per_mblk; unsigned int nr_bitmap_blocks; unsigned int nr_map_blocks; unsigned int nr_useable_zones; unsigned int nr_meta_blocks; unsigned int nr_meta_zones; unsigned int nr_data_zones; unsigned int nr_rnd_zones; unsigned int nr_reserved_seq; unsigned int nr_chunks; /* Zone information array */ struct dm_zone *zones; struct dm_zone *sb_zone; struct dmz_sb sb[2]; unsigned int mblk_primary; u64 sb_gen; unsigned int min_nr_mblks; unsigned int max_nr_mblks; atomic_t nr_mblks; struct rw_semaphore mblk_sem; struct mutex mblk_flush_lock; spinlock_t mblk_lock; struct rb_root mblk_rbtree; struct list_head mblk_lru_list; struct list_head mblk_dirty_list; struct shrinker mblk_shrinker; /* Zone allocation management */ struct mutex map_lock; struct dmz_mblock **map_mblk; unsigned int nr_rnd; atomic_t unmap_nr_rnd; struct list_head unmap_rnd_list; struct list_head map_rnd_list; unsigned int nr_seq; atomic_t unmap_nr_seq; struct list_head unmap_seq_list; struct list_head map_seq_list; atomic_t nr_reserved_seq_zones; struct list_head reserved_seq_zones_list; wait_queue_head_t free_wq; }; /* * Various accessors */ unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone) { return ((unsigned int)(zone - zmd->zones)); } sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) { return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift; } sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) { return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift; } unsigned int dmz_nr_chunks(struct dmz_metadata *zmd) { return zmd->nr_chunks; } unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd) { return zmd->nr_rnd; } unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd) { return atomic_read(&zmd->unmap_nr_rnd); } /* * Lock/unlock mapping table. * The map lock also protects all the zone lists. */ void dmz_lock_map(struct dmz_metadata *zmd) { mutex_lock(&zmd->map_lock); } void dmz_unlock_map(struct dmz_metadata *zmd) { mutex_unlock(&zmd->map_lock); } /* * Lock/unlock metadata access. This is a "read" lock on a semaphore * that prevents metadata flush from running while metadata are being * modified. The actual metadata write mutual exclusion is achieved with * the map lock and zone state management (active and reclaim state are * mutually exclusive). */ void dmz_lock_metadata(struct dmz_metadata *zmd) { down_read(&zmd->mblk_sem); } void dmz_unlock_metadata(struct dmz_metadata *zmd) { up_read(&zmd->mblk_sem); } /* * Lock/unlock flush: prevent concurrent executions * of dmz_flush_metadata as well as metadata modification in reclaim * while flush is being executed. */ void dmz_lock_flush(struct dmz_metadata *zmd) { mutex_lock(&zmd->mblk_flush_lock); } void dmz_unlock_flush(struct dmz_metadata *zmd) { mutex_unlock(&zmd->mblk_flush_lock); } /* * Allocate a metadata block. */ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, sector_t mblk_no) { struct dmz_mblock *mblk = NULL; /* See if we can reuse cached blocks */ if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) { spin_lock(&zmd->mblk_lock); mblk = list_first_entry_or_null(&zmd->mblk_lru_list, struct dmz_mblock, link); if (mblk) { list_del_init(&mblk->link); rb_erase(&mblk->node, &zmd->mblk_rbtree); mblk->no = mblk_no; } spin_unlock(&zmd->mblk_lock); if (mblk) return mblk; } /* Allocate a new block */ mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO); if (!mblk) return NULL; mblk->page = alloc_page(GFP_NOIO); if (!mblk->page) { kfree(mblk); return NULL; } RB_CLEAR_NODE(&mblk->node); INIT_LIST_HEAD(&mblk->link); mblk->ref = 0; mblk->state = 0; mblk->no = mblk_no; mblk->data = page_address(mblk->page); atomic_inc(&zmd->nr_mblks); return mblk; } /* * Free a metadata block. */ static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) { __free_pages(mblk->page, 0); kfree(mblk); atomic_dec(&zmd->nr_mblks); } /* * Insert a metadata block in the rbtree. */ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) { struct rb_root *root = &zmd->mblk_rbtree; struct rb_node **new = &(root->rb_node), *parent = NULL; struct dmz_mblock *b; /* Figure out where to put the new node */ while (*new) { b = container_of(*new, struct dmz_mblock, node); parent = *new; new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right); } /* Add new node and rebalance tree */ rb_link_node(&mblk->node, parent, new); rb_insert_color(&mblk->node, root); } /* * Lookup a metadata block in the rbtree. If the block is found, increment * its reference count. */ static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd, sector_t mblk_no) { struct rb_root *root = &zmd->mblk_rbtree; struct rb_node *node = root->rb_node; struct dmz_mblock *mblk; while (node) { mblk = container_of(node, struct dmz_mblock, node); if (mblk->no == mblk_no) { /* * If this is the first reference to the block, * remove it from the LRU list. */ mblk->ref++; if (mblk->ref == 1 && !test_bit(DMZ_META_DIRTY, &mblk->state)) list_del_init(&mblk->link); return mblk; } node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; } return NULL; } /* * Metadata block BIO end callback. */ static void dmz_mblock_bio_end_io(struct bio *bio) { struct dmz_mblock *mblk = bio->bi_private; int flag; if (bio->bi_status) set_bit(DMZ_META_ERROR, &mblk->state); if (bio_op(bio) == REQ_OP_WRITE) flag = DMZ_META_WRITING; else flag = DMZ_META_READING; clear_bit_unlock(flag, &mblk->state); smp_mb__after_atomic(); wake_up_bit(&mblk->state, flag); bio_put(bio); } /* * Read an uncached metadata block from disk and add it to the cache. */ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, sector_t mblk_no) { struct dmz_mblock *mblk, *m; sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; struct bio *bio; if (dmz_bdev_is_dying(zmd->dev)) return ERR_PTR(-EIO); /* Get a new block and a BIO to read it */ mblk = dmz_alloc_mblock(zmd, mblk_no); if (!mblk) return ERR_PTR(-ENOMEM); bio = bio_alloc(GFP_NOIO, 1); if (!bio) { dmz_free_mblock(zmd, mblk); return ERR_PTR(-ENOMEM); } spin_lock(&zmd->mblk_lock); /* * Make sure that another context did not start reading * the block already. */ m = dmz_get_mblock_fast(zmd, mblk_no); if (m) { spin_unlock(&zmd->mblk_lock); dmz_free_mblock(zmd, mblk); bio_put(bio); return m; } mblk->ref++; set_bit(DMZ_META_READING, &mblk->state); dmz_insert_mblock(zmd, mblk); spin_unlock(&zmd->mblk_lock); /* Submit read BIO */ bio->bi_iter.bi_sector = dmz_blk2sect(block); bio_set_dev(bio, zmd->dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); return mblk; } /* * Free metadata blocks. */ static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd, unsigned long limit) { struct dmz_mblock *mblk; unsigned long count = 0; if (!zmd->max_nr_mblks) return 0; while (!list_empty(&zmd->mblk_lru_list) && atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks && count < limit) { mblk = list_first_entry(&zmd->mblk_lru_list, struct dmz_mblock, link); list_del_init(&mblk->link); rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); count++; } return count; } /* * For mblock shrinker: get the number of unused metadata blocks in the cache. */ static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) { struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker); return atomic_read(&zmd->nr_mblks); } /* * For mblock shrinker: scan unused metadata blocks and shrink the cache. */ static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) { struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker); unsigned long count; spin_lock(&zmd->mblk_lock); count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan); spin_unlock(&zmd->mblk_lock); return count ? count : SHRINK_STOP; } /* * Release a metadata block. */ static void dmz_release_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) { if (!mblk) return; spin_lock(&zmd->mblk_lock); mblk->ref--; if (mblk->ref == 0) { if (test_bit(DMZ_META_ERROR, &mblk->state)) { rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) { list_add_tail(&mblk->link, &zmd->mblk_lru_list); dmz_shrink_mblock_cache(zmd, 1); } } spin_unlock(&zmd->mblk_lock); } /* * Get a metadata block from the rbtree. If the block * is not present, read it from disk. */ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, sector_t mblk_no) { struct dmz_mblock *mblk; /* Check rbtree */ spin_lock(&zmd->mblk_lock); mblk = dmz_get_mblock_fast(zmd, mblk_no); spin_unlock(&zmd->mblk_lock); if (!mblk) { /* Cache miss: read the block from disk */ mblk = dmz_get_mblock_slow(zmd, mblk_no); if (IS_ERR(mblk)) return mblk; } /* Wait for on-going read I/O and check for error */ wait_on_bit_io(&mblk->state, DMZ_META_READING, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { dmz_release_mblock(zmd, mblk); dmz_check_bdev(zmd->dev); return ERR_PTR(-EIO); } return mblk; } /* * Mark a metadata block dirty. */ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) { spin_lock(&zmd->mblk_lock); if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state)) list_add_tail(&mblk->link, &zmd->mblk_dirty_list); spin_unlock(&zmd->mblk_lock); } /* * Issue a metadata block write BIO. */ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, unsigned int set) { sector_t block = zmd->sb[set].block + mblk->no; struct bio *bio; if (dmz_bdev_is_dying(zmd->dev)) return -EIO; bio = bio_alloc(GFP_NOIO, 1); if (!bio) { set_bit(DMZ_META_ERROR, &mblk->state); return -ENOMEM; } set_bit(DMZ_META_WRITING, &mblk->state); bio->bi_iter.bi_sector = dmz_blk2sect(block); bio_set_dev(bio, zmd->dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); return 0; } /* * Read/write a metadata block. */ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, struct page *page) { struct bio *bio; int ret; if (dmz_bdev_is_dying(zmd->dev)) return -EIO; bio = bio_alloc(GFP_NOIO, 1); if (!bio) return -ENOMEM; bio->bi_iter.bi_sector = dmz_blk2sect(block); bio_set_dev(bio, zmd->dev->bdev); bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); ret = submit_bio_wait(bio); bio_put(bio); if (ret) dmz_check_bdev(zmd->dev); return ret; } /* * Write super block of the specified metadata set. */ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) { sector_t block = zmd->sb[set].block; struct dmz_mblock *mblk = zmd->sb[set].mblk; struct dmz_super *sb = zmd->sb[set].sb; u64 sb_gen = zmd->sb_gen + 1; int ret; sb->magic = cpu_to_le32(DMZ_MAGIC); sb->version = cpu_to_le32(DMZ_META_VER); sb->gen = cpu_to_le64(sb_gen); sb->sb_block = cpu_to_le64(block); sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks); sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq); sb->nr_chunks = cpu_to_le32(zmd->nr_chunks); sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks); sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks); sb->crc = 0; sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE)); ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); if (ret == 0) ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); return ret; } /* * Write dirty metadata blocks to the specified set. */ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, struct list_head *write_list, unsigned int set) { struct dmz_mblock *mblk; struct blk_plug plug; int ret = 0, nr_mblks_submitted = 0; /* Issue writes */ blk_start_plug(&plug); list_for_each_entry(mblk, write_list, link) { ret = dmz_write_mblock(zmd, mblk, set); if (ret) break; nr_mblks_submitted++; } blk_finish_plug(&plug); /* Wait for completion */ list_for_each_entry(mblk, write_list, link) { if (!nr_mblks_submitted) break; wait_on_bit_io(&mblk->state, DMZ_META_WRITING, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { clear_bit(DMZ_META_ERROR, &mblk->state); dmz_check_bdev(zmd->dev); ret = -EIO; } nr_mblks_submitted--; } /* Flush drive cache (this will also sync data) */ if (ret == 0) ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); return ret; } /* * Log dirty metadata blocks. */ static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd, struct list_head *write_list) { unsigned int log_set = zmd->mblk_primary ^ 0x1; int ret; /* Write dirty blocks to the log */ ret = dmz_write_dirty_mblocks(zmd, write_list, log_set); if (ret) return ret; /* * No error so far: now validate the log by updating the * log index super block generation. */ ret = dmz_write_sb(zmd, log_set); if (ret) return ret; return 0; } /* * Flush dirty metadata blocks. */ int dmz_flush_metadata(struct dmz_metadata *zmd) { struct dmz_mblock *mblk; struct list_head write_list; int ret; if (WARN_ON(!zmd)) return 0; INIT_LIST_HEAD(&write_list); /* * Make sure that metadata blocks are stable before logging: take * the write lock on the metadata semaphore to prevent target BIOs * from modifying metadata. */ down_write(&zmd->mblk_sem); /* * This is called from the target flush work and reclaim work. * Concurrent execution is not allowed. */ dmz_lock_flush(zmd); if (dmz_bdev_is_dying(zmd->dev)) { ret = -EIO; goto out; } /* Get dirty blocks */ spin_lock(&zmd->mblk_lock); list_splice_init(&zmd->mblk_dirty_list, &write_list); spin_unlock(&zmd->mblk_lock); /* If there are no dirty metadata blocks, just flush the device cache */ if (list_empty(&write_list)) { ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); goto err; } /* * The primary metadata set is still clean. Keep it this way until * all updates are successful in the secondary set. That is, use * the secondary set as a log. */ ret = dmz_log_dirty_mblocks(zmd, &write_list); if (ret) goto err; /* * The log is on disk. It is now safe to update in place * in the primary metadata set. */ ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary); if (ret) goto err; ret = dmz_write_sb(zmd, zmd->mblk_primary); if (ret) goto err; while (!list_empty(&write_list)) { mblk = list_first_entry(&write_list, struct dmz_mblock, link); list_del_init(&mblk->link); spin_lock(&zmd->mblk_lock); clear_bit(DMZ_META_DIRTY, &mblk->state); if (mblk->ref == 0) list_add_tail(&mblk->link, &zmd->mblk_lru_list); spin_unlock(&zmd->mblk_lock); } zmd->sb_gen++; out: dmz_unlock_flush(zmd); up_write(&zmd->mblk_sem); return ret; err: if (!list_empty(&write_list)) { spin_lock(&zmd->mblk_lock); list_splice(&write_list, &zmd->mblk_dirty_list); spin_unlock(&zmd->mblk_lock); } if (!dmz_check_bdev(zmd->dev)) ret = -EIO; goto out; } /* * Check super block. */ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb) { unsigned int nr_meta_zones, nr_data_zones; struct dmz_dev *dev = zmd->dev; u32 crc, stored_crc; u64 gen; gen = le64_to_cpu(sb->gen); stored_crc = le32_to_cpu(sb->crc); sb->crc = 0; crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE); if (crc != stored_crc) { dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)", crc, stored_crc); return -ENXIO; } if (le32_to_cpu(sb->magic) != DMZ_MAGIC) { dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)", DMZ_MAGIC, le32_to_cpu(sb->magic)); return -ENXIO; } if (le32_to_cpu(sb->version) != DMZ_META_VER) { dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)", DMZ_META_VER, le32_to_cpu(sb->version)); return -ENXIO; } nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1) >> dev->zone_nr_blocks_shift; if (!nr_meta_zones || nr_meta_zones >= zmd->nr_rnd_zones) { dmz_dev_err(dev, "Invalid number of metadata blocks"); return -ENXIO; } if (!le32_to_cpu(sb->nr_reserved_seq) || le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) { dmz_dev_err(dev, "Invalid number of reserved sequential zones"); return -ENXIO; } nr_data_zones = zmd->nr_useable_zones - (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq)); if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) { dmz_dev_err(dev, "Invalid number of chunks %u / %u", le32_to_cpu(sb->nr_chunks), nr_data_zones); return -ENXIO; } /* OK */ zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks); zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq); zmd->nr_chunks = le32_to_cpu(sb->nr_chunks); zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks); zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks); zmd->nr_meta_zones = nr_meta_zones; zmd->nr_data_zones = nr_data_zones; return 0; } /* * Read the first or second super block from disk. */ static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set) { return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block, zmd->sb[set].mblk->page); } /* * Determine the position of the secondary super blocks on disk. * This is used only if a corruption of the primary super block * is detected. */ static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd) { unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks; struct dmz_mblock *mblk; int i; /* Allocate a block */ mblk = dmz_alloc_mblock(zmd, 0); if (!mblk) return -ENOMEM; zmd->sb[1].mblk = mblk; zmd->sb[1].sb = mblk->data; /* Bad first super block: search for the second one */ zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks; for (i = 0; i < zmd->nr_rnd_zones - 1; i++) { if (dmz_read_sb(zmd, 1) != 0) break; if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC) return 0; zmd->sb[1].block += zone_nr_blocks; } dmz_free_mblock(zmd, mblk); zmd->sb[1].mblk = NULL; return -EIO; } /* * Read the first or second super block from disk. */ static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set) { struct dmz_mblock *mblk; int ret; /* Allocate a block */ mblk = dmz_alloc_mblock(zmd, 0); if (!mblk) return -ENOMEM; zmd->sb[set].mblk = mblk; zmd->sb[set].sb = mblk->data; /* Read super block */ ret = dmz_read_sb(zmd, set); if (ret) { dmz_free_mblock(zmd, mblk); zmd->sb[set].mblk = NULL; return ret; } return 0; } /* * Recover a metadata set. */ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) { unsigned int src_set = dst_set ^ 0x1; struct page *page; int i, ret; dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set); if (dst_set == 0) zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone); else { zmd->sb[1].block = zmd->sb[0].block + (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); } page = alloc_page(GFP_NOIO); if (!page) return -ENOMEM; /* Copy metadata blocks */ for (i = 1; i < zmd->nr_meta_blocks; i++) { ret = dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[src_set].block + i, page); if (ret) goto out; ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, zmd->sb[dst_set].block + i, page); if (ret) goto out; } /* Finalize with the super block */ if (!zmd->sb[dst_set].mblk) { zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0); if (!zmd->sb[dst_set].mblk) { ret = -ENOMEM; goto out; } zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data; } ret = dmz_write_sb(zmd, dst_set); out: __free_pages(page, 0); return ret; } /* * Get super block from disk. */ static int dmz_load_sb(struct dmz_metadata *zmd) { bool sb_good[2] = {false, false}; u64 sb_gen[2] = {0, 0}; int ret; /* Read and check the primary super block */ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone); ret = dmz_get_sb(zmd, 0); if (ret) { dmz_dev_err(zmd->dev, "Read primary super block failed"); return ret; } ret = dmz_check_sb(zmd, zmd->sb[0].sb); /* Read and check secondary super block */ if (ret == 0) { sb_good[0] = true; zmd->sb[1].block = zmd->sb[0].block + (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); ret = dmz_get_sb(zmd, 1); } else ret = dmz_lookup_secondary_sb(zmd); if (ret) { dmz_dev_err(zmd->dev, "Read secondary super block failed"); return ret; } ret = dmz_check_sb(zmd, zmd->sb[1].sb); if (ret == 0) sb_good[1] = true; /* Use highest generation sb first */ if (!sb_good[0] && !sb_good[1]) { dmz_dev_err(zmd->dev, "No valid super block found"); return -EIO; } if (sb_good[0]) sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen); else ret = dmz_recover_mblocks(zmd, 0); if (sb_good[1]) sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen); else ret = dmz_recover_mblocks(zmd, 1); if (ret) { dmz_dev_err(zmd->dev, "Recovery failed"); return -EIO; } if (sb_gen[0] >= sb_gen[1]) { zmd->sb_gen = sb_gen[0]; zmd->mblk_primary = 0; } else { zmd->sb_gen = sb_gen[1]; zmd->mblk_primary = 1; } dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)", zmd->mblk_primary, zmd->sb_gen); return 0; } /* * Initialize a zone descriptor. */ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data) { struct dmz_metadata *zmd = data; struct dm_zone *zone = &zmd->zones[idx]; struct dmz_dev *dev = zmd->dev; /* Ignore the eventual last runt (smaller) zone */ if (blkz->len != dev->zone_nr_sectors) { if (blkz->start + blkz->len == dev->capacity) return 0; return -ENXIO; } INIT_LIST_HEAD(&zone->link); atomic_set(&zone->refcount, 0); zone->chunk = DMZ_MAP_UNMAPPED; switch (blkz->type) { case BLK_ZONE_TYPE_CONVENTIONAL: set_bit(DMZ_RND, &zone->flags); break; case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: set_bit(DMZ_SEQ, &zone->flags); break; default: return -ENXIO; } if (dmz_is_rnd(zone)) zone->wp_block = 0; else zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); if (blkz->cond == BLK_ZONE_COND_OFFLINE) set_bit(DMZ_OFFLINE, &zone->flags); else if (blkz->cond == BLK_ZONE_COND_READONLY) set_bit(DMZ_READ_ONLY, &zone->flags); else { zmd->nr_useable_zones++; if (dmz_is_rnd(zone)) { zmd->nr_rnd_zones++; if (!zmd->sb_zone) { /* Super block zone */ zmd->sb_zone = zone; } } } return 0; } /* * Free zones descriptors. */ static void dmz_drop_zones(struct dmz_metadata *zmd) { kfree(zmd->zones); zmd->zones = NULL; } /* * Allocate and initialize zone descriptors using the zone * information from disk. */ static int dmz_init_zones(struct dmz_metadata *zmd) { struct dmz_dev *dev = zmd->dev; int ret; /* Init */ zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3; zmd->zone_nr_bitmap_blocks = max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT); zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks, DMZ_BLOCK_SIZE_BITS); /* Allocate zone array */ zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL); if (!zmd->zones) return -ENOMEM; dmz_dev_info(dev, "Using %zu B for zone information", sizeof(struct dm_zone) * dev->nr_zones); /* * Get zone information and initialize zone descriptors. At the same * time, determine where the super block should be: first block of the * first randomly writable zone. */ ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone, zmd); if (ret < 0) { dmz_drop_zones(zmd); return ret; } return 0; } static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx, void *data) { struct dm_zone *zone = data; clear_bit(DMZ_OFFLINE, &zone->flags); clear_bit(DMZ_READ_ONLY, &zone->flags); if (blkz->cond == BLK_ZONE_COND_OFFLINE) set_bit(DMZ_OFFLINE, &zone->flags); else if (blkz->cond == BLK_ZONE_COND_READONLY) set_bit(DMZ_READ_ONLY, &zone->flags); if (dmz_is_seq(zone)) zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); else zone->wp_block = 0; return 0; } /* * Update a zone information. */ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) { unsigned int noio_flag; int ret; /* * Get zone information from disk. Since blkdev_report_zones() uses * GFP_KERNEL by default for memory allocations, set the per-task * PF_MEMALLOC_NOIO flag so that all allocations are done as if * GFP_NOIO was specified. */ noio_flag = memalloc_noio_save(); ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1, dmz_update_zone_cb, zone); memalloc_noio_restore(noio_flag); if (ret == 0) ret = -EIO; if (ret < 0) { dmz_dev_err(zmd->dev, "Get zone %u report failed", dmz_id(zmd, zone)); dmz_check_bdev(zmd->dev); return ret; } return 0; } /* * Check a zone write pointer position when the zone is marked * with the sequential write error flag. */ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd, struct dm_zone *zone) { unsigned int wp = 0; int ret; wp = zone->wp_block; ret = dmz_update_zone(zmd, zone); if (ret) return ret; dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)", dmz_id(zmd, zone), zone->wp_block, wp); if (zone->wp_block < wp) { dmz_invalidate_blocks(zmd, zone, zone->wp_block, wp - zone->wp_block); } return 0; } static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id) { return &zmd->zones[zone_id]; } /* * Reset a zone write pointer. */ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) { int ret; /* * Ignore offline zones, read only zones, * and conventional zones. */ if (dmz_is_offline(zone) || dmz_is_readonly(zone) || dmz_is_rnd(zone)) return 0; if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) { struct dmz_dev *dev = zmd->dev; ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET, dmz_start_sect(zmd, zone), dev->zone_nr_sectors, GFP_NOIO); if (ret) { dmz_dev_err(dev, "Reset zone %u failed %d", dmz_id(zmd, zone), ret); return ret; } } /* Clear write error bit and rewind write pointer position */ clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); zone->wp_block = 0; return 0; } static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone); /* * Initialize chunk mapping. */ static int dmz_load_mapping(struct dmz_metadata *zmd) { struct dmz_dev *dev = zmd->dev; struct dm_zone *dzone, *bzone; struct dmz_mblock *dmap_mblk = NULL; struct dmz_map *dmap; unsigned int i = 0, e = 0, chunk = 0; unsigned int dzone_id; unsigned int bzone_id; /* Metadata block array for the chunk mapping table */ zmd->map_mblk = kcalloc(zmd->nr_map_blocks, sizeof(struct dmz_mblk *), GFP_KERNEL); if (!zmd->map_mblk) return -ENOMEM; /* Get chunk mapping table blocks and initialize zone mapping */ while (chunk < zmd->nr_chunks) { if (!dmap_mblk) { /* Get mapping block */ dmap_mblk = dmz_get_mblock(zmd, i + 1); if (IS_ERR(dmap_mblk)) return PTR_ERR(dmap_mblk); zmd->map_mblk[i] = dmap_mblk; dmap = (struct dmz_map *) dmap_mblk->data; i++; e = 0; } /* Check data zone */ dzone_id = le32_to_cpu(dmap[e].dzone_id); if (dzone_id == DMZ_MAP_UNMAPPED) goto next; if (dzone_id >= dev->nr_zones) { dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u", chunk, dzone_id); return -EIO; } dzone = dmz_get(zmd, dzone_id); set_bit(DMZ_DATA, &dzone->flags); dzone->chunk = chunk; dmz_get_zone_weight(zmd, dzone); if (dmz_is_rnd(dzone)) list_add_tail(&dzone->link, &zmd->map_rnd_list); else list_add_tail(&dzone->link, &zmd->map_seq_list); /* Check buffer zone */ bzone_id = le32_to_cpu(dmap[e].bzone_id); if (bzone_id == DMZ_MAP_UNMAPPED) goto next; if (bzone_id >= dev->nr_zones) { dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u", chunk, bzone_id); return -EIO; } bzone = dmz_get(zmd, bzone_id); if (!dmz_is_rnd(bzone)) { dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u", chunk, bzone_id); return -EIO; } set_bit(DMZ_DATA, &bzone->flags); set_bit(DMZ_BUF, &bzone->flags); bzone->chunk = chunk; bzone->bzone = dzone; dzone->bzone = bzone; dmz_get_zone_weight(zmd, bzone); list_add_tail(&bzone->link, &zmd->map_rnd_list); next: chunk++; e++; if (e >= DMZ_MAP_ENTRIES) dmap_mblk = NULL; } /* * At this point, only meta zones and mapped data zones were * fully initialized. All remaining zones are unmapped data * zones. Finish initializing those here. */ for (i = 0; i < dev->nr_zones; i++) { dzone = dmz_get(zmd, i); if (dmz_is_meta(dzone)) continue; if (dmz_is_rnd(dzone)) zmd->nr_rnd++; else zmd->nr_seq++; if (dmz_is_data(dzone)) { /* Already initialized */ continue; } /* Unmapped data zone */ set_bit(DMZ_DATA, &dzone->flags); dzone->chunk = DMZ_MAP_UNMAPPED; if (dmz_is_rnd(dzone)) { list_add_tail(&dzone->link, &zmd->unmap_rnd_list); atomic_inc(&zmd->unmap_nr_rnd); } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) { list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list); atomic_inc(&zmd->nr_reserved_seq_zones); zmd->nr_seq--; } else { list_add_tail(&dzone->link, &zmd->unmap_seq_list); atomic_inc(&zmd->unmap_nr_seq); } } return 0; } /* * Set a data chunk mapping. */ static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, unsigned int dzone_id, unsigned int bzone_id) { struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; int map_idx = chunk & DMZ_MAP_ENTRIES_MASK; dmap[map_idx].dzone_id = cpu_to_le32(dzone_id); dmap[map_idx].bzone_id = cpu_to_le32(bzone_id); dmz_dirty_mblock(zmd, dmap_mblk); } /* * The list of mapped zones is maintained in LRU order. * This rotates a zone at the end of its map list. */ static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) { if (list_empty(&zone->link)) return; list_del_init(&zone->link); if (dmz_is_seq(zone)) { /* LRU rotate sequential zone */ list_add_tail(&zone->link, &zmd->map_seq_list); } else { /* LRU rotate random zone */ list_add_tail(&zone->link, &zmd->map_rnd_list); } } /* * The list of mapped random zones is maintained * in LRU order. This rotates a zone at the end of the list. */ static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) { __dmz_lru_zone(zmd, zone); if (zone->bzone) __dmz_lru_zone(zmd, zone->bzone); } /* * Wait for any zone to be freed. */ static void dmz_wait_for_free_zones(struct dmz_metadata *zmd) { DEFINE_WAIT(wait); prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE); dmz_unlock_map(zmd); dmz_unlock_metadata(zmd); io_schedule_timeout(HZ); dmz_lock_metadata(zmd); dmz_lock_map(zmd); finish_wait(&zmd->free_wq, &wait); } /* * Lock a zone for reclaim (set the zone RECLAIM bit). * Returns false if the zone cannot be locked or if it is already locked * and 1 otherwise. */ int dmz_lock_zone_reclaim(struct dm_zone *zone) { /* Active zones cannot be reclaimed */ if (dmz_is_active(zone)) return 0; return !test_and_set_bit(DMZ_RECLAIM, &zone->flags); } /* * Clear a zone reclaim flag. */ void dmz_unlock_zone_reclaim(struct dm_zone *zone) { WARN_ON(dmz_is_active(zone)); WARN_ON(!dmz_in_reclaim(zone)); clear_bit_unlock(DMZ_RECLAIM, &zone->flags); smp_mb__after_atomic(); wake_up_bit(&zone->flags, DMZ_RECLAIM); } /* * Wait for a zone reclaim to complete. */ static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone) { dmz_unlock_map(zmd); dmz_unlock_metadata(zmd); wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ); dmz_lock_metadata(zmd); dmz_lock_map(zmd); } /* * Select a random write zone for reclaim. */ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) { struct dm_zone *dzone = NULL; struct dm_zone *zone; if (list_empty(&zmd->map_rnd_list)) return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_rnd_list, link) { if (dmz_is_buf(zone)) dzone = zone->bzone; else dzone = zone; if (dmz_lock_zone_reclaim(dzone)) return dzone; } return ERR_PTR(-EBUSY); } /* * Select a buffered sequential zone for reclaim. */ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) { struct dm_zone *zone; if (list_empty(&zmd->map_seq_list)) return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_seq_list, link) { if (!zone->bzone) continue; if (dmz_lock_zone_reclaim(zone)) return zone; } return ERR_PTR(-EBUSY); } /* * Select a zone for reclaim. */ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd) { struct dm_zone *zone; /* * Search for a zone candidate to reclaim: 2 cases are possible. * (1) There is no free sequential zones. Then a random data zone * cannot be reclaimed. So choose a sequential zone to reclaim so * that afterward a random zone can be reclaimed. * (2) At least one free sequential zone is available, then choose * the oldest random zone (data or buffer) that can be locked. */ dmz_lock_map(zmd); if (list_empty(&zmd->reserved_seq_zones_list)) zone = dmz_get_seq_zone_for_reclaim(zmd); else zone = dmz_get_rnd_zone_for_reclaim(zmd); dmz_unlock_map(zmd); return zone; } /* * Get the zone mapping a chunk, if the chunk is mapped already. * If no mapping exist and the operation is WRITE, a zone is * allocated and used to map the chunk. * The zone returned will be set to the active state. */ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op) { struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK; unsigned int dzone_id; struct dm_zone *dzone = NULL; int ret = 0; dmz_lock_map(zmd); again: /* Get the chunk mapping */ dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id); if (dzone_id == DMZ_MAP_UNMAPPED) { /* * Read or discard in unmapped chunks are fine. But for * writes, we need a mapping, so get one. */ if (op != REQ_OP_WRITE) goto out; /* Allocate a random zone */ dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!dzone) { if (dmz_bdev_is_dying(zmd->dev)) { dzone = ERR_PTR(-EIO); goto out; } dmz_wait_for_free_zones(zmd); goto again; } dmz_map_zone(zmd, dzone, chunk); } else { /* The chunk is already mapped: get the mapping zone */ dzone = dmz_get(zmd, dzone_id); if (dzone->chunk != chunk) { dzone = ERR_PTR(-EIO); goto out; } /* Repair write pointer if the sequential dzone has error */ if (dmz_seq_write_err(dzone)) { ret = dmz_handle_seq_write_err(zmd, dzone); if (ret) { dzone = ERR_PTR(-EIO); goto out; } clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags); } } /* * If the zone is being reclaimed, the chunk mapping may change * to a different zone. So wait for reclaim and retry. Otherwise, * activate the zone (this will prevent reclaim from touching it). */ if (dmz_in_reclaim(dzone)) { dmz_wait_for_reclaim(zmd, dzone); goto again; } dmz_activate_zone(dzone); dmz_lru_zone(zmd, dzone); out: dmz_unlock_map(zmd); return dzone; } /* * Write and discard change the block validity of data zones and their buffer * zones. Check here that valid blocks are still present. If all blocks are * invalid, the zones can be unmapped on the fly without waiting for reclaim * to do it. */ void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone) { struct dm_zone *bzone; dmz_lock_map(zmd); bzone = dzone->bzone; if (bzone) { if (dmz_weight(bzone)) dmz_lru_zone(zmd, bzone); else { /* Empty buffer zone: reclaim it */ dmz_unmap_zone(zmd, bzone); dmz_free_zone(zmd, bzone); bzone = NULL; } } /* Deactivate the data zone */ dmz_deactivate_zone(dzone); if (dmz_is_active(dzone) || bzone || dmz_weight(dzone)) dmz_lru_zone(zmd, dzone); else { /* Unbuffered inactive empty data zone: reclaim it */ dmz_unmap_zone(zmd, dzone); dmz_free_zone(zmd, dzone); } dmz_unlock_map(zmd); } /* * Allocate and map a random zone to buffer a chunk * already mapped to a sequential zone. */ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, struct dm_zone *dzone) { struct dm_zone *bzone; dmz_lock_map(zmd); again: bzone = dzone->bzone; if (bzone) goto out; /* Allocate a random zone */ bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!bzone) { if (dmz_bdev_is_dying(zmd->dev)) { bzone = ERR_PTR(-EIO); goto out; } dmz_wait_for_free_zones(zmd); goto again; } /* Update the chunk mapping */ dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone), dmz_id(zmd, bzone)); set_bit(DMZ_BUF, &bzone->flags); bzone->chunk = dzone->chunk; bzone->bzone = dzone; dzone->bzone = bzone; list_add_tail(&bzone->link, &zmd->map_rnd_list); out: dmz_unlock_map(zmd); return bzone; } /* * Get an unmapped (free) zone. * This must be called with the mapping lock held. */ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags) { struct list_head *list; struct dm_zone *zone; if (flags & DMZ_ALLOC_RND) list = &zmd->unmap_rnd_list; else list = &zmd->unmap_seq_list; again: if (list_empty(list)) { /* * No free zone: if this is for reclaim, allow using the * reserved sequential zones. */ if (!(flags & DMZ_ALLOC_RECLAIM) || list_empty(&zmd->reserved_seq_zones_list)) return NULL; zone = list_first_entry(&zmd->reserved_seq_zones_list, struct dm_zone, link); list_del_init(&zone->link); atomic_dec(&zmd->nr_reserved_seq_zones); return zone; } zone = list_first_entry(list, struct dm_zone, link); list_del_init(&zone->link); if (dmz_is_rnd(zone)) atomic_dec(&zmd->unmap_nr_rnd); else atomic_dec(&zmd->unmap_nr_seq); if (dmz_is_offline(zone)) { dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone)); zone = NULL; goto again; } return zone; } /* * Free a zone. * This must be called with the mapping lock held. */ void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone) { /* If this is a sequential zone, reset it */ if (dmz_is_seq(zone)) dmz_reset_zone(zmd, zone); /* Return the zone to its type unmap list */ if (dmz_is_rnd(zone)) { list_add_tail(&zone->link, &zmd->unmap_rnd_list); atomic_inc(&zmd->unmap_nr_rnd); } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) { list_add_tail(&zone->link, &zmd->reserved_seq_zones_list); atomic_inc(&zmd->nr_reserved_seq_zones); } else { list_add_tail(&zone->link, &zmd->unmap_seq_list); atomic_inc(&zmd->unmap_nr_seq); } wake_up_all(&zmd->free_wq); } /* * Map a chunk to a zone. * This must be called with the mapping lock held. */ void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone, unsigned int chunk) { /* Set the chunk mapping */ dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone), DMZ_MAP_UNMAPPED); dzone->chunk = chunk; if (dmz_is_rnd(dzone)) list_add_tail(&dzone->link, &zmd->map_rnd_list); else list_add_tail(&dzone->link, &zmd->map_seq_list); } /* * Unmap a zone. * This must be called with the mapping lock held. */ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone) { unsigned int chunk = zone->chunk; unsigned int dzone_id; if (chunk == DMZ_MAP_UNMAPPED) { /* Already unmapped */ return; } if (test_and_clear_bit(DMZ_BUF, &zone->flags)) { /* * Unmapping the chunk buffer zone: clear only * the chunk buffer mapping */ dzone_id = dmz_id(zmd, zone->bzone); zone->bzone->bzone = NULL; zone->bzone = NULL; } else { /* * Unmapping the chunk data zone: the zone must * not be buffered. */ if (WARN_ON(zone->bzone)) { zone->bzone->bzone = NULL; zone->bzone = NULL; } dzone_id = DMZ_MAP_UNMAPPED; } dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED); zone->chunk = DMZ_MAP_UNMAPPED; list_del_init(&zone->link); } /* * Set @nr_bits bits in @bitmap starting from @bit. * Return the number of bits changed from 0 to 1. */ static unsigned int dmz_set_bits(unsigned long *bitmap, unsigned int bit, unsigned int nr_bits) { unsigned long *addr; unsigned int end = bit + nr_bits; unsigned int n = 0; while (bit < end) { if (((bit & (BITS_PER_LONG - 1)) == 0) && ((end - bit) >= BITS_PER_LONG)) { /* Try to set the whole word at once */ addr = bitmap + BIT_WORD(bit); if (*addr == 0) { *addr = ULONG_MAX; n += BITS_PER_LONG; bit += BITS_PER_LONG; continue; } } if (!test_and_set_bit(bit, bitmap)) n++; bit++; } return n; } /* * Get the bitmap block storing the bit for chunk_block in zone. */ static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block) { sector_t bitmap_block = 1 + zmd->nr_map_blocks + (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) + (chunk_block >> DMZ_BLOCK_SHIFT_BITS); return dmz_get_mblock(zmd, bitmap_block); } /* * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone. */ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, struct dm_zone *to_zone) { struct dmz_mblock *from_mblk, *to_mblk; sector_t chunk_block = 0; /* Get the zones bitmap blocks */ while (chunk_block < zmd->dev->zone_nr_blocks) { from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block); if (IS_ERR(from_mblk)) return PTR_ERR(from_mblk); to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block); if (IS_ERR(to_mblk)) { dmz_release_mblock(zmd, from_mblk); return PTR_ERR(to_mblk); } memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE); dmz_dirty_mblock(zmd, to_mblk); dmz_release_mblock(zmd, to_mblk); dmz_release_mblock(zmd, from_mblk); chunk_block += zmd->zone_bits_per_mblk; } to_zone->weight = from_zone->weight; return 0; } /* * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone, * starting from chunk_block. */ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, struct dm_zone *to_zone, sector_t chunk_block) { unsigned int nr_blocks; int ret; /* Get the zones bitmap blocks */ while (chunk_block < zmd->dev->zone_nr_blocks) { /* Get a valid region from the source zone */ ret = dmz_first_valid_block(zmd, from_zone, &chunk_block); if (ret <= 0) return ret; nr_blocks = ret; ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks); if (ret) return ret; chunk_block += nr_blocks; } return 0; } /* * Validate all the blocks in the range [block..block+nr_blocks-1]. */ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) { unsigned int count, bit, nr_bits; unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks; struct dmz_mblock *mblk; unsigned int n = 0; dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks", dmz_id(zmd, zone), (unsigned long long)chunk_block, nr_blocks); WARN_ON(chunk_block + nr_blocks > zone_nr_blocks); while (nr_blocks) { /* Get bitmap block */ mblk = dmz_get_bitmap(zmd, zone, chunk_block); if (IS_ERR(mblk)) return PTR_ERR(mblk); /* Set bits */ bit = chunk_block & DMZ_BLOCK_MASK_BITS; nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits); if (count) { dmz_dirty_mblock(zmd, mblk); n += count; } dmz_release_mblock(zmd, mblk); nr_blocks -= nr_bits; chunk_block += nr_bits; } if (likely(zone->weight + n <= zone_nr_blocks)) zone->weight += n; else { dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u", dmz_id(zmd, zone), zone->weight, zone_nr_blocks - n); zone->weight = zone_nr_blocks; } return 0; } /* * Clear nr_bits bits in bitmap starting from bit. * Return the number of bits cleared. */ static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits) { unsigned long *addr; int end = bit + nr_bits; int n = 0; while (bit < end) { if (((bit & (BITS_PER_LONG - 1)) == 0) && ((end - bit) >= BITS_PER_LONG)) { /* Try to clear whole word at once */ addr = bitmap + BIT_WORD(bit); if (*addr == ULONG_MAX) { *addr = 0; n += BITS_PER_LONG; bit += BITS_PER_LONG; continue; } } if (test_and_clear_bit(bit, bitmap)) n++; bit++; } return n; } /* * Invalidate all the blocks in the range [block..block+nr_blocks-1]. */ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) { unsigned int count, bit, nr_bits; struct dmz_mblock *mblk; unsigned int n = 0; dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks", dmz_id(zmd, zone), (u64)chunk_block, nr_blocks); WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks); while (nr_blocks) { /* Get bitmap block */ mblk = dmz_get_bitmap(zmd, zone, chunk_block); if (IS_ERR(mblk)) return PTR_ERR(mblk); /* Clear bits */ bit = chunk_block & DMZ_BLOCK_MASK_BITS; nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); count = dmz_clear_bits((unsigned long *)mblk->data, bit, nr_bits); if (count) { dmz_dirty_mblock(zmd, mblk); n += count; } dmz_release_mblock(zmd, mblk); nr_blocks -= nr_bits; chunk_block += nr_bits; } if (zone->weight >= n) zone->weight -= n; else { dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u", dmz_id(zmd, zone), zone->weight, n); zone->weight = 0; } return 0; } /* * Get a block bit value. */ static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block) { struct dmz_mblock *mblk; int ret; WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks); /* Get bitmap block */ mblk = dmz_get_bitmap(zmd, zone, chunk_block); if (IS_ERR(mblk)) return PTR_ERR(mblk); /* Get offset */ ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS, (unsigned long *) mblk->data) != 0; dmz_release_mblock(zmd, mblk); return ret; } /* * Return the number of blocks from chunk_block to the first block with a bit * value specified by set. Search at most nr_blocks blocks from chunk_block. */ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks, int set) { struct dmz_mblock *mblk; unsigned int bit, set_bit, nr_bits; unsigned int zone_bits = zmd->zone_bits_per_mblk; unsigned long *bitmap; int n = 0; WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks); while (nr_blocks) { /* Get bitmap block */ mblk = dmz_get_bitmap(zmd, zone, chunk_block); if (IS_ERR(mblk)) return PTR_ERR(mblk); /* Get offset */ bitmap = (unsigned long *) mblk->data; bit = chunk_block & DMZ_BLOCK_MASK_BITS; nr_bits = min(nr_blocks, zone_bits - bit); if (set) set_bit = find_next_bit(bitmap, zone_bits, bit); else set_bit = find_next_zero_bit(bitmap, zone_bits, bit); dmz_release_mblock(zmd, mblk); n += set_bit - bit; if (set_bit < zone_bits) break; nr_blocks -= nr_bits; chunk_block += nr_bits; } return n; } /* * Test if chunk_block is valid. If it is, the number of consecutive * valid blocks from chunk_block will be returned. */ int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block) { int valid; valid = dmz_test_block(zmd, zone, chunk_block); if (valid <= 0) return valid; /* The block is valid: get the number of valid blocks from block */ return dmz_to_next_set_block(zmd, zone, chunk_block, zmd->dev->zone_nr_blocks - chunk_block, 0); } /* * Find the first valid block from @chunk_block in @zone. * If such a block is found, its number is returned using * @chunk_block and the total number of valid blocks from @chunk_block * is returned. */ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t *chunk_block) { sector_t start_block = *chunk_block; int ret; ret = dmz_to_next_set_block(zmd, zone, start_block, zmd->dev->zone_nr_blocks - start_block, 1); if (ret < 0) return ret; start_block += ret; *chunk_block = start_block; return dmz_to_next_set_block(zmd, zone, start_block, zmd->dev->zone_nr_blocks - start_block, 0); } /* * Count the number of bits set starting from bit up to bit + nr_bits - 1. */ static int dmz_count_bits(void *bitmap, int bit, int nr_bits) { unsigned long *addr; int end = bit + nr_bits; int n = 0; while (bit < end) { if (((bit & (BITS_PER_LONG - 1)) == 0) && ((end - bit) >= BITS_PER_LONG)) { addr = (unsigned long *)bitmap + BIT_WORD(bit); if (*addr == ULONG_MAX) { n += BITS_PER_LONG; bit += BITS_PER_LONG; continue; } } if (test_bit(bit, bitmap)) n++; bit++; } return n; } /* * Get a zone weight. */ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) { struct dmz_mblock *mblk; sector_t chunk_block = 0; unsigned int bit, nr_bits; unsigned int nr_blocks = zmd->dev->zone_nr_blocks; void *bitmap; int n = 0; while (nr_blocks) { /* Get bitmap block */ mblk = dmz_get_bitmap(zmd, zone, chunk_block); if (IS_ERR(mblk)) { n = 0; break; } /* Count bits in this block */ bitmap = mblk->data; bit = chunk_block & DMZ_BLOCK_MASK_BITS; nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); n += dmz_count_bits(bitmap, bit, nr_bits); dmz_release_mblock(zmd, mblk); nr_blocks -= nr_bits; chunk_block += nr_bits; } zone->weight = n; } /* * Cleanup the zoned metadata resources. */ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) { struct rb_root *root; struct dmz_mblock *mblk, *next; int i; /* Release zone mapping resources */ if (zmd->map_mblk) { for (i = 0; i < zmd->nr_map_blocks; i++) dmz_release_mblock(zmd, zmd->map_mblk[i]); kfree(zmd->map_mblk); zmd->map_mblk = NULL; } /* Release super blocks */ for (i = 0; i < 2; i++) { if (zmd->sb[i].mblk) { dmz_free_mblock(zmd, zmd->sb[i].mblk); zmd->sb[i].mblk = NULL; } } /* Free cached blocks */ while (!list_empty(&zmd->mblk_dirty_list)) { mblk = list_first_entry(&zmd->mblk_dirty_list, struct dmz_mblock, link); dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", (u64)mblk->no, mblk->ref); list_del_init(&mblk->link); rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); } while (!list_empty(&zmd->mblk_lru_list)) { mblk = list_first_entry(&zmd->mblk_lru_list, struct dmz_mblock, link); list_del_init(&mblk->link); rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); } /* Sanity checks: the mblock rbtree should now be empty */ root = &zmd->mblk_rbtree; rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", (u64)mblk->no, mblk->ref); mblk->ref = 0; dmz_free_mblock(zmd, mblk); } /* Free the zone descriptors */ dmz_drop_zones(zmd); mutex_destroy(&zmd->mblk_flush_lock); mutex_destroy(&zmd->map_lock); } /* * Initialize the zoned metadata. */ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata) { struct dmz_metadata *zmd; unsigned int i, zid; struct dm_zone *zone; int ret; zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL); if (!zmd) return -ENOMEM; zmd->dev = dev; zmd->mblk_rbtree = RB_ROOT; init_rwsem(&zmd->mblk_sem); mutex_init(&zmd->mblk_flush_lock); spin_lock_init(&zmd->mblk_lock); INIT_LIST_HEAD(&zmd->mblk_lru_list); INIT_LIST_HEAD(&zmd->mblk_dirty_list); mutex_init(&zmd->map_lock); atomic_set(&zmd->unmap_nr_rnd, 0); INIT_LIST_HEAD(&zmd->unmap_rnd_list); INIT_LIST_HEAD(&zmd->map_rnd_list); atomic_set(&zmd->unmap_nr_seq, 0); INIT_LIST_HEAD(&zmd->unmap_seq_list); INIT_LIST_HEAD(&zmd->map_seq_list); atomic_set(&zmd->nr_reserved_seq_zones, 0); INIT_LIST_HEAD(&zmd->reserved_seq_zones_list); init_waitqueue_head(&zmd->free_wq); /* Initialize zone descriptors */ ret = dmz_init_zones(zmd); if (ret) goto err; /* Get super block */ ret = dmz_load_sb(zmd); if (ret) goto err; /* Set metadata zones starting from sb_zone */ zid = dmz_id(zmd, zmd->sb_zone); for (i = 0; i < zmd->nr_meta_zones << 1; i++) { zone = dmz_get(zmd, zid + i); if (!dmz_is_rnd(zone)) goto err; set_bit(DMZ_META, &zone->flags); } /* Load mapping table */ ret = dmz_load_mapping(zmd); if (ret) goto err; /* * Cache size boundaries: allow at least 2 super blocks, the chunk map * blocks and enough blocks to be able to cache the bitmap blocks of * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow * the cache to add 512 more metadata blocks. */ zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16; zmd->max_nr_mblks = zmd->min_nr_mblks + 512; zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count; zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan; zmd->mblk_shrinker.seeks = DEFAULT_SEEKS; /* Metadata cache shrinker */ ret = register_shrinker(&zmd->mblk_shrinker); if (ret) { dmz_dev_err(dev, "Register metadata cache shrinker failed"); goto err; } dmz_dev_info(dev, "Host-%s zoned block device", bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ? "aware" : "managed"); dmz_dev_info(dev, " %llu 512-byte logical sectors", (u64)dev->capacity); dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors", dev->nr_zones, (u64)dev->zone_nr_sectors); dmz_dev_info(dev, " %u metadata zones", zmd->nr_meta_zones * 2); dmz_dev_info(dev, " %u data zones for %u chunks", zmd->nr_data_zones, zmd->nr_chunks); dmz_dev_info(dev, " %u random zones (%u unmapped)", zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd)); dmz_dev_info(dev, " %u sequential zones (%u unmapped)", zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq)); dmz_dev_info(dev, " %u reserved sequential data zones", zmd->nr_reserved_seq); dmz_dev_debug(dev, "Format:"); dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)", zmd->nr_meta_blocks, zmd->max_nr_mblks); dmz_dev_debug(dev, " %u data zone mapping blocks", zmd->nr_map_blocks); dmz_dev_debug(dev, " %u bitmap blocks", zmd->nr_bitmap_blocks); *metadata = zmd; return 0; err: dmz_cleanup_metadata(zmd); kfree(zmd); *metadata = NULL; return ret; } /* * Cleanup the zoned metadata resources. */ void dmz_dtr_metadata(struct dmz_metadata *zmd) { unregister_shrinker(&zmd->mblk_shrinker); dmz_cleanup_metadata(zmd); kfree(zmd); } /* * Check zone information on resume. */ int dmz_resume_metadata(struct dmz_metadata *zmd) { struct dmz_dev *dev = zmd->dev; struct dm_zone *zone; sector_t wp_block; unsigned int i; int ret; /* Check zones */ for (i = 0; i < dev->nr_zones; i++) { zone = dmz_get(zmd, i); if (!zone) { dmz_dev_err(dev, "Unable to get zone %u", i); return -EIO; } wp_block = zone->wp_block; ret = dmz_update_zone(zmd, zone); if (ret) { dmz_dev_err(dev, "Broken zone %u", i); return ret; } if (dmz_is_offline(zone)) { dmz_dev_warn(dev, "Zone %u is offline", i); continue; } /* Check write pointer */ if (!dmz_is_seq(zone)) zone->wp_block = 0; else if (zone->wp_block != wp_block) { dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)", i, (u64)zone->wp_block, (u64)wp_block); zone->wp_block = wp_block; dmz_invalidate_blocks(zmd, zone, zone->wp_block, dev->zone_nr_blocks - zone->wp_block); } } return 0; }
// GetAccounts is part of idb.IndexerDB func (db *IndexerDb) GetAccounts(ctx context.Context, opts idb.AccountQueryOptions) (<-chan idb.AccountRow, uint64) { out := make(chan idb.AccountRow, 1) if opts.HasAssetID != 0 { opts.IncludeAssetHoldings = true } else if (opts.AssetGT != nil) || (opts.AssetLT != nil) { err := fmt.Errorf("AssetGT=%d, AssetLT=%d, but HasAssetID=%d", uintOrDefault(opts.AssetGT), uintOrDefault(opts.AssetLT), opts.HasAssetID) out <- idb.AccountRow{Error: err} close(out) return out, 0 } tx, err := db.db.BeginTx(ctx, readonlyRepeatableRead) if err != nil { err = fmt.Errorf("account tx err %v", err) out <- idb.AccountRow{Error: err} close(out) return out, 0 } round, err := db.getMaxRoundAccounted(ctx, tx) if err != nil { err = fmt.Errorf("account round err %v", err) out <- idb.AccountRow{Error: err} close(out) tx.Rollback(ctx) return out, round } row := tx.QueryRow(ctx, `SELECT header FROM block_header WHERE round = $1`, round) var headerjson []byte err = row.Scan(&headerjson) if err != nil { err = fmt.Errorf("account round header %d err %v", round, err) out <- idb.AccountRow{Error: err} close(out) tx.Rollback(ctx) return out, round } blockheader, err := encoding.DecodeBlockHeader(headerjson) if err != nil { err = fmt.Errorf("account round header %d err %v", round, err) out <- idb.AccountRow{Error: err} close(out) tx.Rollback(ctx) return out, round } if opts.MaxResources != 0 { err = db.checkAccountResourceLimit(ctx, tx, opts) if err != nil { out <- idb.AccountRow{Error: err} close(out) tx.Rollback(ctx) return out, round } } query, whereArgs := db.buildAccountQuery(opts, false) req := &getAccountsRequest{ opts: opts, blockheader: blockheader, query: query, out: out, start: time.Now(), } req.rows, err = tx.Query(ctx, query, whereArgs...) if err != nil { err = fmt.Errorf("account query %#v err %v", query, err) out <- idb.AccountRow{Error: err} close(out) tx.Rollback(ctx) return out, round } go func() { db.yieldAccountsThread(req) close(req.out) tx.Rollback(ctx) }() return out, round }
def _compare_document(self, system, reference): num_f1_scores = 8 all_matched = np.zeros((num_f1_scores, len(system), len(reference))) annotation_labels = defaultdict(list) for i, system_annotation in enumerate(system): for j, reference_annotation in enumerate(reference): ( matched, severity_exact_matched, category_exact_matched, both_exact_matched, category_exact_top_matched, severity_discount_matched, category_discount_matched, both_discount_matched, ) = reference_annotation.count_overlap( system_annotation, severity_match=self.severity_match, category_match=self.category_match ) all_matched[:, i, j] = [ matched, severity_exact_matched, category_exact_matched, both_exact_matched, category_exact_top_matched, severity_discount_matched, category_discount_matched, both_discount_matched, ] if matched > 0: annotation_labels['system_severities'].append( system_annotation.severity) annotation_labels['system_categories'].append( system_annotation.category) annotation_labels['reference_severities'].append( reference_annotation.severity) annotation_labels['reference_categories'].append( reference_annotation.category) lengths_sys = np.array([len(annotation) for annotation in system]) lengths_ref = np.array([len(annotation) for annotation in reference]) if lengths_sys.sum() == 0: precision = np.ones(num_f1_scores) elif lengths_ref.sum() == 0: precision = np.zeros(num_f1_scores) else: precision_by_annotation = all_matched.max(2) / lengths_sys precision = precision_by_annotation.mean(1) if lengths_ref.sum() == 0: recall = np.ones(num_f1_scores) elif lengths_sys.sum() == 0: recall = np.zeros(num_f1_scores) else: recall_by_annotation = all_matched.max(1) / lengths_ref recall = recall_by_annotation.mean(1) f1 = np.divide( 2*precision*recall, precision + recall, out=np.zeros_like(precision), where=precision + recall != 0 ) assert 0. <= f1.min() and f1.max() <= 1. return tuple(f1), dict(annotation_labels)
/* * #%L * de.metas.elasticsearch.server * %% * Copyright (C) 2018 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ final class ESPOModelDenormalizerBuilder { private static final List<String> COLUMNNAMES_StandardColumns = ImmutableList.of("Created", "CreatedBy", "Updated", "UpdatedBy", "IsActive"); private final IESDenormalizerFactory factory; private final ESModelIndexerProfile profile; private final POInfo poInfo; private final String modelTableName; private final String keyColumnName; private final Set<String> columnsToAlwaysInclude = new HashSet<>(); private final Set<String> columnsToInclude = new HashSet<>(); private final Set<String> columnsToExclude = new HashSet<>(); private final Map<String, ESPOModelDenormalizerColumn> columnDenormalizersByColumnName = new HashMap<>(); private final Map<String, ESIndexType> columnsIndexType = new HashMap<>(); private String currentColumnName; ESPOModelDenormalizerBuilder( @NonNull final IESDenormalizerFactory factory, @NonNull final ESModelIndexerProfile profile, @NonNull final String modelTableName) { Check.assumeNotEmpty(modelTableName, "modelTableName is not empty"); this.factory = factory; this.profile = profile; this.modelTableName = modelTableName; poInfo = POInfo.getPOInfo(modelTableName); keyColumnName = poInfo.getKeyColumnName(); columnsToAlwaysInclude.add(keyColumnName); excludeColumn("DocAction"); // FIXME: hardcoded } public ESPOModelDenormalizer build() { final Map<String, ESPOModelDenormalizerColumn> columnDenormalizersEffective = new HashMap<>(); final Set<String> fullTextSearchFieldNames = new LinkedHashSet<>(); // // Add registered denormalizers for (final Map.Entry<String, ESPOModelDenormalizerColumn> entry : columnDenormalizersByColumnName.entrySet()) { final String columnName = entry.getKey(); if (!isIncludeColumn(columnName)) { continue; } final ESPOModelDenormalizerColumn valueExtractorAndDenormalizer = entry.getValue(); if (valueExtractorAndDenormalizer == null) { continue; } columnDenormalizersEffective.put(columnName, valueExtractorAndDenormalizer); if (isFullTextSearchField(columnName)) { fullTextSearchFieldNames.add(columnName); } } // // Autodetect column normalizers for (int columnIndex = 0, columnsCount = poInfo.getColumnCount(); columnIndex < columnsCount; columnIndex++) { final String columnName = poInfo.getColumnName(columnIndex); // skip if it was explicitly banned if (!isIncludeColumn(columnName)) { continue; } // skip if already considered if (columnDenormalizersEffective.containsKey(columnName)) { continue; } // skip if not eligible for column auto-generation if (!isEligibleForColumnAutoGeneration(columnName)) { continue; } // Generate and add final ESPOModelDenormalizerColumn valueExtractorAndDenormalizer = generateColumn(columnName); if (valueExtractorAndDenormalizer == null) { continue; } columnDenormalizersEffective.put(columnName, valueExtractorAndDenormalizer); if (isFullTextSearchField(columnName)) { fullTextSearchFieldNames.add(columnName); } } return new ESPOModelDenormalizer(profile, modelTableName, keyColumnName, columnDenormalizersEffective, fullTextSearchFieldNames); } private boolean isFullTextSearchField(String columnName) { final int displayType = poInfo.getColumnDisplayType(columnName); // Exclude passwords if (DisplayType.isPassword(columnName, displayType)) { return false; } if (DisplayType.isText(displayType)) { return true; } else if (displayType == DisplayType.Location) { return true; } else { return false; } } private final ESPOModelDenormalizerColumn generateColumn(final String columnName) { final int displayType = poInfo.getColumnDisplayType(columnName); final ESIndexType indexType = getIndexType(columnName, displayType); // // ID column if (DisplayType.ID == displayType) { return ESPOModelDenormalizerColumn.passThrough(ESDataType.String, indexType); } // // Parent link column // NOTE: don't skip parent columns because it might be that we have a value deserializer registered for it // if (poInfo.isColumnParent(columnName)) // { // return ESModelDenormalizerColumn.passThrough(ESDataType.String, indexType); // } // // Text if (DisplayType.isText(displayType)) { final String analyzer = getAnalyzer(columnName); return ESPOModelDenormalizerColumn.passThrough(ESDataType.String, indexType, analyzer); } // // Numeric if (DisplayType.isNumeric(displayType)) { final ESDataType dataType = DisplayType.Integer == displayType ? ESDataType.Integer : ESDataType.Double; return ESPOModelDenormalizerColumn.passThrough(dataType, indexType); } // // Date if (DisplayType.isDate(displayType)) { return ESPOModelDenormalizerColumn.rawValue(DateDenormalizer.of(displayType, indexType)); } // // Boolean if (DisplayType.YesNo == displayType) { return ESPOModelDenormalizerColumn.passThrough(ESDataType.Boolean, indexType); } // // List/Button list final int AD_Reference_ID = poInfo.getColumnReferenceValueId(columnName); if (DisplayType.Button == displayType && AD_Reference_ID <= 0) { return null; } if ((DisplayType.List == displayType || DisplayType.Button == displayType) && AD_Reference_ID > 0) { return ESPOModelDenormalizerColumn.of(PORawValueExtractor.instance, AD_Ref_List_Denormalizer.of(AD_Reference_ID)); } // // Lookups: // * generic: TableDir, Table, Search // * special: ASI, Location, Locator, Color etc if (DisplayType.isAnyLookup(displayType)) { final String refTableName = poInfo.getReferencedTableNameOrNull(columnName); if (refTableName == null) { return null; } final IESModelDenormalizer valueModelDenormalizer = factory.getModelValueDenormalizer(profile, refTableName); if (valueModelDenormalizer == null) { return null; } return ESPOModelDenormalizerColumn.of(valueModelDenormalizer); } return null; } private boolean isEligibleForColumnAutoGeneration(final String columnName) { final int displayType = poInfo.getColumnDisplayType(columnName); // ID column if (DisplayType.ID == displayType) { return true; } if (profile == ESModelIndexerProfile.FULL_TEXT_SEARCH) { return DisplayType.isText(displayType) || DisplayType.isAnyLookup(displayType); } return true; } private boolean isIncludeColumn(final String columnName) { if (columnsToAlwaysInclude.contains(columnName)) { return true; } if (!columnsToInclude.isEmpty() && !columnsToInclude.contains(columnName)) { return false; } if (columnsToExclude.contains(columnName)) { return false; } return true; } public ESPOModelDenormalizerBuilder includeColumn(final String columnName) { Check.assumeNotEmpty(columnName, "columnName is not empty"); columnsToInclude.add(columnName); currentColumnName = columnName; return this; } public ESPOModelDenormalizerBuilder excludeColumn(final String columnName) { columnsToExclude.add(columnName); return this; } public ESPOModelDenormalizerBuilder excludeStandardColumns() { columnsToExclude.addAll(COLUMNNAMES_StandardColumns); return this; } public ESPOModelDenormalizerBuilder index(final ESIndexType esIndexType) { Check.assumeNotEmpty(currentColumnName, "lastIncludedColumn is not empty"); Check.assumeNotNull(esIndexType, "Parameter esIndexType is not null"); columnsIndexType.put(currentColumnName, esIndexType); return this; } private ESIndexType getIndexType(final String columnName, final int displayType) { final ESIndexType esIndexType = columnsIndexType.get(columnName); return esIndexType != null ? esIndexType : getDefaultIndexType(displayType); } private ESIndexType getDefaultIndexType(final int displayType) { if (profile == ESModelIndexerProfile.FULL_TEXT_SEARCH) { if (DisplayType.isText(displayType)) { return ESIndexType.Analyzed; } } // fallback return ESIndexType.NotAnalyzed; } private String getAnalyzer(final String columnName) { return profile.getDefaultAnalyzer(); } }
Resolution and Prevention of Feline Immunodeficiency Virus-Induced Neurological Deficits by Treatment with the Protease Inhibitor TL-3 ABSTRACT In vivo tests were performed to assess the influence of the protease inhibitor TL-3 on feline immunodeficiency virus (FIV)-induced central nervous system (CNS) deficits. Twenty cats were divided into four groups of five animals each. Group 1 received no treatment, group 2 received TL-3 only, group 3 received FIV strain PPR (FIV-PPR) only, and group 4 received FIV-PPR and TL-3. Animals were monitored for immunological and virological status, along with measurements of brain stem auditory evoked potential (BAEP) changes. Groups 1 and 2 remained FIV negative, and groups 3 and 4 became virus positive and seroconverted by 3 to 5 weeks postinoculation. No adverse effects were noted with TL-3 only. The average peak viral load for the virus-only group 3 animals was 1.32 × 106 RNA copies/ml, compared to 6.9 × 104 copies/ml for TL-3-treated group 4 cats. Group 3 (virus-only) cats exhibited marked progressive delays in BAEPs starting at 2 weeks post virus exposure, which is typical of infection with FIV-PPR. In contrast, TL-3-treated cats of group 4 exhibited BAEPs similar to those of control and drug-only cats. At 97 days postinfection, treatments were switched; i.e., group 4 animals were taken off TL-3 and group 3 animals were treated with TL-3. BAEPs in group 3 animals returned to control levels, while BAEPs in group 4 animals remained at control levels. After 70 days on TL-3, group 3 was removed from the drug treatment regimen. Delays in BAEPs immediately increased to levels observed prior to TL-3 treatment. The findings show that early TL-3 treatment can effectively eliminate FIV-induced changes in the CNS. Furthermore, TL-3 can counteract FIV effects on the CNS of infected cats, although continued treatment is required to maintain unimpaired CNS function.
# [Project Ku CES Noty-Service] Konkuk Univ. Class Empty Seat Notification Service for EE. # 0.1.0va, 19.12.27. First launched. # written by acoustikue(<NAME>) # __ _ __ # ____ __________ __ _______/ /_(_) /____ _____ # / __ `/ ___/ __ \/ / / / ___/ __/ / //_/ / / / _ \ # / /_/ / /__/ /_/ / /_/ (__ ) /_/ / ,< / /_/ / __/ # \__,_/\___/\____/\__,_/____/\__/_/_/|_|\__,_/\___/ # # Visual Studio Code # # This project requires pyfcm library, # thus first install it by # $ pip3 install pyfcm # In initial version, many of original KENS/KNS functions are refactored, # since KLNS can be distinguished by only methods of sending notifications to devices. # This is a test code. import os, sys import platform import copy from config_url import showConfigUrl # for import from parent directory sys.path.append( os.path.dirname( os.path.abspath(os.path.dirname(__file__))) ) # Here the module's base directory will be set to folder where it is located. # Every config files and other saved files must be in folder below the base directory. PROJECT_CODE = 'klns' PROJECT_OS = 'Windows' PROJECT_SYS = '' PROJECT_VERSION = '0.1.0va' CURRENT_CODE = 'klns_w' CURRENT_OS = str(platform.system()) CURRENT_SYS = CURRENT_OS + ' ' + str(platform.release()) + ' ' + str(platform.version()) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) FCM_DIR = '' MESSAGE_DIR = '' LOG_DIR = '' if CURRENT_OS == 'Windows': LOG_DIR = BASE_DIR + '\\log\\' FCM_DIR = BASE_DIR + '\\fcm\\' MESSAGE_DIR = BASE_DIR + '\\fcm\\message\\' elif CURRENT_OS == 'Linux': LOG_DIR = BASE_DIR + '/log/' FCM_DIR = BASE_DIR + '/fcm/' MESSAGE_DIR = BASE_DIR + '/fcm/message/' # First!! # Make directory if there is no db folder if not(os.path.isdir(LOG_DIR)): os.makedirs(os.path.join(LOG_DIR)) if not(os.path.isdir(FCM_DIR)): os.makedirs(os.path.join(FCM_DIR)) if not(os.path.isdir(MESSAGE_DIR)): os.makedirs(os.path.join(MESSAGE_DIR)) PROJECT_BANNER = '[ku_ces_noty_service] ' + PROJECT_CODE + ', ' + PROJECT_VERSION PROJECT_BANNER += (', ' + CURRENT_SYS + '\n\tCopyright (C) 2019 <NAME>') # Scripts # platform # Make sure to print just necessary information, for simple logs. # print(PROJECT_BANNER) # # Parameter: - # Returns: - # Author: acoustikue def showConfigScript(): #if KENS_ENABLE is True: # print('\tKENS_ENABLE(1). KENS module will be loaded.') print('Script config:') print('\tPROJECT_CODE \t' + PROJECT_CODE) print('\tPROJECT_OS \t' + PROJECT_OS) print('\tPROJECT_SYS \t' + PROJECT_SYS) print('\tPROJECT_VERSION \t' + PROJECT_VERSION) print('\tCURRENT_CODE \t' + CURRENT_CODE) print('\tCURRENT_OS \t' + CURRENT_OS) print('\tCURRENT_SYS \t' + CURRENT_SYS) print('\tBASE_DIR \t' + BASE_DIR) print('\tDB_DIR \t' + DB_DIR) # Executing this script? if __name__ == '__main__': # First print banner print(PROJECT_BANNER) print('\tExecuting KnsfConfig script. Running in debug mode.\n') # Show configuration variables showConfigScript() print('') showConfigUrl()
import sys import time lineNo = 0 for line in sys.stdin: if lineNo == 0: arrCount = int(line) else: loe = line.strip(' ') lineNo = lineNo + 1 fp = 0 sp = -1 nloe = '' loe = list(loe[:len(loe) - 1]) rtimes = 0 rdels = 0 dindices = [i for i, x in enumerate(loe) if x == "D"] rindices = [i for i, x in enumerate(loe) if x == "R"] dindex = len(dindices) rindex = len(rindices) nextd = 0 nextr = 0 while len(loe) >= 1: #print fp if loe[fp] == 'R': if dindex <= 1: print 'R' break; loe[dindices[nextd]] = "I" dindices[nextd] = -1 dindex = dindex - 1 while (dindices[nextd] == -1): nextd = (nextd + 1) % len(dindices) nextr = (nextr + 1) % len(rindices) while (rindices[nextr] == -1): nextr = (nextr + 1) % len(rindices) elif loe[fp] == 'D': #rinits = time.time() if rindex <= 1: print 'D' break; loe[rindices[nextr]] = "I" rindices[nextr] = -1 rindex = rindex - 1 while (rindices[nextr] == -1): nextr = (nextr + 1) % len(rindices) nextd = (nextd + 1) % len(dindices) while (dindices[nextd] == -1): nextd = (nextd + 1) % len(dindices) #rfinals = time.time() #rtimes = rtimes + rfinals - rinits fp = (fp + 1) % len(loe)
import * as React from "react" import { Link } from "gatsby" import SimpleSlider from "../slider/slider" import "./hero-slider.scss" const HeroSliderSettings = { responsive: [ { breakpoint: 1140, settings: { slidesToShow: 1, }, }, ], } const HeroSlider = ({ items, currentLocale }) => ( <section className="hero-slider"> <SimpleSlider customSettings={HeroSliderSettings}> {items.map((props, id) => ( <HeroSliderItem key={id.toString()} {...props} currentLocale={currentLocale} /> ))} </SimpleSlider> </section> ) const HeroSliderItem = ({ name, title, description, ctaText, ctaLink, externalCtaLink, image, currentLocale, }) => { const sliderItemImage = { backgroundImage: `url(${image.file.url})`, } return ( <div className="hero-slider__item"> <div className="hero-slider__inner-wrap container"> <div className="hero-slider__text"> <span className="hero-slider__name subtitle-1">{name}</span> <h1 className="hero-slider__title">{title}</h1> <p className="hero-slider__description"> {description.childMarkdownRemark.rawMarkdownBody} </p> {Boolean(ctaLink?.slug) && ( <Link to={ctaLink ? `/${currentLocale}/${ctaLink.slug}` : null} className="hero-slider__cta cta-link" > {ctaText} </Link> )} {/* {Boolean(externalCtaLink !== null) && ( <a href={externalCtaLink} target="_blank" className="hero-slider__cta cta-link" > {ctaText} </a> )} */} </div> <div className="hero-slider__image" style={sliderItemImage} /> </div> </div> ) } export default HeroSlider
/** * This exception is produced when wrong password is entered by the user while confirming **/ public class SameNamePasswordException extends Exception { private static final long serialVersionUID = 1L; /** * constructor will set a new object of same name password exception * * @param String for thrown string **/ public SameNamePasswordException(String s) { super(s); } /** * This function will return the string representation of the exception object * * @return String for display message **/ public String toString() { return "java.Tetris.SameNamePasswordException\n"+ this.getMessage(); } }
<filename>testsuite/tests/th/T7021.hs {-# LANGUAGE TemplateHaskell #-} module T7021 where import T7021a func :: a -> Int func = $(test)
// NextWorkerID increments the global variable // workerID by 1 and is protected by a mutex lock func NextWorkerID() int64 { mu.Lock() workerID = workerID + 1 defer mu.Unlock() return workerID }
/* * Store a copy of linkname associated with the DLPI handle. * Save errno before closing the dlpi handle so that the * correct error value is used if 'err' is a system error. */ void pr_errdlpi(dlpi_handle_t dh, const char *cmd, int err) { int save_errno = errno; char linkname[DLPI_LINKNAME_MAX]; (void) strlcpy(linkname, dlpi_linkname(dh), sizeof (linkname)); dlpi_close(dh); errno = save_errno; pr_err("%s on \"%s\": %s", cmd, linkname, dlpi_strerror(err)); }
<filename>vm/actor/tests/common/mod.rs // Copyright 2020 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use actor::{ self, ACCOUNT_ACTOR_CODE_ID, CRON_ACTOR_CODE_ID, INIT_ACTOR_CODE_ID, MARKET_ACTOR_CODE_ID, MINER_ACTOR_CODE_ID, MULTISIG_ACTOR_CODE_ID, PAYCH_ACTOR_CODE_ID, POWER_ACTOR_CODE_ID, REWARD_ACTOR_CODE_ID, SYSTEM_ACTOR_CODE_ID, VERIFIED_ACTOR_CODE_ID, }; use address::Address; use cid::{multihash::Blake2b256, Cid}; use clock::ChainEpoch; use crypto::DomainSeparationTag; use encoding::{de::DeserializeOwned, Cbor}; use ipld_blockstore::BlockStore; use message::{Message, UnsignedMessage}; use runtime::{ActorCode, Runtime, Syscalls}; use std::cell::{Cell, RefCell}; use std::collections::{HashMap, VecDeque}; use vm::{ActorError, ExitCode, MethodNum, Randomness, Serialized, TokenAmount}; pub struct MockRuntime<'a, BS: BlockStore> { pub epoch: ChainEpoch, pub caller_type: Cid, pub miner: Address, //pub value_received: TokenAmount, pub id_addresses: HashMap<Address, Address>, pub actor_code_cids: HashMap<Address, Cid>, pub new_actor_addr: Option<Address>, pub message: UnsignedMessage, // TODO: syscalls: syscaller // Actor State pub state: Option<Cid>, pub balance: TokenAmount, // VM Impl pub in_call: bool, pub store: &'a BS, pub in_transaction: bool, // Expectations pub expect_validate_caller_any: Cell<bool>, pub expect_validate_caller_addr: RefCell<Option<Vec<Address>>>, pub expect_validate_caller_type: RefCell<Option<Vec<Cid>>>, pub expect_sends: VecDeque<ExpectedMessage>, pub expect_create_actor: Option<ExpectCreateActor>, } #[derive(Clone, Debug)] pub struct ExpectCreateActor { pub code_id: Cid, pub address: Address, } #[derive(Clone, Debug)] pub struct ExpectedMessage { pub to: Address, pub method: MethodNum, pub params: Serialized, pub value: TokenAmount, // returns from applying expectedMessage pub send_return: Serialized, pub exit_code: ExitCode, } impl<'a, BS: BlockStore> MockRuntime<'a, BS> { pub fn new(bs: &'a BS, message: UnsignedMessage) -> Self { Self { epoch: 0, caller_type: Cid::default(), miner: Address::new_id(0), id_addresses: HashMap::new(), actor_code_cids: HashMap::new(), new_actor_addr: None, message: message, state: None, balance: 0u8.into(), // VM Impl in_call: false, store: bs, in_transaction: false, // Expectations expect_validate_caller_any: Cell::new(false), expect_validate_caller_addr: RefCell::new(None), expect_validate_caller_type: RefCell::new(None), expect_sends: VecDeque::new(), expect_create_actor: None, } } fn require_in_call(&self) { assert!( self.in_call, "invalid runtime invocation outside of method call", ) } fn check_argument(&self, predicate: bool, msg: String) -> Result<(), ActorError> { if !predicate { return Err(ActorError::new(ExitCode::SysErrorIllegalArgument, msg)); } Ok(()) } fn put<C: Cbor>(&self, o: &C) -> Result<Cid, ActorError> { Ok(self.store.put(&o, Blake2b256).unwrap()) } fn _get<T: DeserializeOwned>(&self, cid: Cid) -> Result<T, ActorError> { Ok(self.store.get(&cid).unwrap().unwrap()) } #[allow(dead_code)] pub fn get_state<T: DeserializeOwned>(&self) -> Result<T, ActorError> { let data: T = self .store .get(&self.state.as_ref().unwrap()) .unwrap() .unwrap(); Ok(data) } pub fn expect_validate_caller_addr(&self, addr: &[Address]) { assert!(addr.len() > 0, "addrs must be non-empty"); *self.expect_validate_caller_addr.borrow_mut() = Some(addr.to_vec()); } #[allow(dead_code)] pub fn expect_validate_caller_any(&self) { self.expect_validate_caller_any.set(true); } pub fn call( &mut self, to_code: &Cid, method_num: MethodNum, params: &Serialized, ) -> Result<Serialized, ActorError> { self.in_call = true; let prev_state = self.state.clone(); let res = match to_code { x if x == &*SYSTEM_ACTOR_CODE_ID => { actor::system::Actor.invoke_method(self, method_num, params) } x if x == &*INIT_ACTOR_CODE_ID => { actor::init::Actor.invoke_method(self, method_num, params) } x if x == &*CRON_ACTOR_CODE_ID => { actor::cron::Actor.invoke_method(self, method_num, params) } x if x == &*ACCOUNT_ACTOR_CODE_ID => { actor::account::Actor.invoke_method(self, method_num, params) } x if x == &*POWER_ACTOR_CODE_ID => { actor::power::Actor.invoke_method(self, method_num, params) } x if x == &*MINER_ACTOR_CODE_ID => { actor::miner::Actor.invoke_method(self, method_num, params) } x if x == &*MARKET_ACTOR_CODE_ID => { actor::market::Actor.invoke_method(self, method_num, params) } x if x == &*PAYCH_ACTOR_CODE_ID => { actor::paych::Actor.invoke_method(self, method_num, params) } x if x == &*MULTISIG_ACTOR_CODE_ID => { actor::multisig::Actor.invoke_method(self, method_num, params) } x if x == &*REWARD_ACTOR_CODE_ID => { actor::reward::Actor.invoke_method(self, method_num, params) } x if x == &*VERIFIED_ACTOR_CODE_ID => { actor::verifreg::Actor.invoke_method(self, method_num, params) } _ => Err(ActorError::new( ExitCode::SysErrForbidden, "invalid method id".to_owned(), )), }; if res.is_err() { self.state = prev_state; } self.in_call = false; return res; } pub fn verify(&mut self) { assert!( !self.expect_validate_caller_any.get(), "expected ValidateCallerAny, not received" ); assert!( self.expect_validate_caller_addr.borrow().as_ref().is_none(), "expected ValidateCallerAddr {:?}, not received", self.expect_validate_caller_addr.borrow().as_ref().unwrap() ); assert!( self.expect_validate_caller_type.borrow().as_ref().is_none(), "expected ValidateCallerType {:?}, not received", self.expect_validate_caller_type.borrow().as_ref().unwrap() ); assert!( self.expect_sends.is_empty(), "expected all message to be send, unsent messages {:?}", self.expect_sends ); assert!( self.expect_create_actor.is_none(), "expected actor to be created, uncreated actor: {:?}", self.expect_create_actor ); self.reset(); } pub fn reset(&mut self) { self.expect_validate_caller_any.set(false); *self.expect_validate_caller_addr.borrow_mut() = None; *self.expect_validate_caller_type.borrow_mut() = None; self.expect_create_actor = None; } #[allow(dead_code)] pub fn expect_send( &mut self, to: Address, method: MethodNum, params: Serialized, value: TokenAmount, send_return: Serialized, exit_code: ExitCode, ) { self.expect_sends.push_back(ExpectedMessage { to, method, params, value, send_return, exit_code, }) } #[allow(dead_code)] pub fn expect_create_actor(&mut self, code_id: Cid, address: Address) { let a = ExpectCreateActor { code_id, address }; self.expect_create_actor = Some(a); } #[allow(dead_code)] pub fn set_caller(&mut self, code_id: Cid, address: Address) { self.message = UnsignedMessage::builder() .to(self.message.to().clone()) .from(address.clone()) .value(self.message.value().clone()) .build() .unwrap(); self.caller_type = code_id.clone(); self.actor_code_cids.insert(address, code_id); } } impl<BS: BlockStore> Runtime<BS> for MockRuntime<'_, BS> { fn message(&self) -> &UnsignedMessage { self.require_in_call(); &self.message } fn curr_epoch(&self) -> ChainEpoch { self.require_in_call(); self.epoch } fn validate_immediate_caller_accept_any(&self) { self.require_in_call(); assert!( self.expect_validate_caller_any.get(), "unexpected validate-caller-any" ); self.expect_validate_caller_any.set(false); } fn validate_immediate_caller_is<'a, I>(&self, addresses: I) -> Result<(), ActorError> where I: IntoIterator<Item = &'a Address>, { self.require_in_call(); let addrs: Vec<Address> = addresses.into_iter().cloned().collect(); self.check_argument(addrs.len() > 0, "addrs must be non-empty".to_owned())?; assert!( self.expect_validate_caller_addr.borrow().is_some(), "unexpected validate caller addrs" ); assert!( &addrs == self.expect_validate_caller_addr.borrow().as_ref().unwrap(), "unexpected validate caller addrs {:?}, expected {:?}", addrs, self.expect_validate_caller_addr.borrow().as_ref() ); for expected in &addrs { if self.message().from() == expected { *self.expect_validate_caller_addr.borrow_mut() = None; return Ok(()); } } *self.expect_validate_caller_addr.borrow_mut() = None; return Err(ActorError::new( ExitCode::ErrForbidden, format!( "caller address {:?} forbidden, allowed: {:?}", self.message().from(), &addrs ), )); } fn validate_immediate_caller_type<'a, I>(&self, types: I) -> Result<(), ActorError> where I: IntoIterator<Item = &'a Cid>, { self.require_in_call(); let types: Vec<Cid> = types.into_iter().cloned().collect(); self.check_argument(types.len() > 0, "types must be non-empty".to_owned())?; assert!( self.expect_validate_caller_type.borrow().is_some(), "unexpected validate caller code" ); assert!( &types == self.expect_validate_caller_type.borrow().as_ref().unwrap(), "unexpected validate caller code {:?}, expected {:?}", types, self.expect_validate_caller_type ); for expected in &types { if &self.caller_type == expected { *self.expect_validate_caller_type.borrow_mut() = None; return Ok(()); } } *self.expect_validate_caller_type.borrow_mut() = None; Err(self.abort( ExitCode::ErrForbidden, format!( "caller type {:?} forbidden, allowed: {:?}", self.caller_type, types ), )) } fn current_balance(&self) -> Result<TokenAmount, ActorError> { self.require_in_call(); Ok(self.balance.clone()) } fn resolve_address(&self, address: &Address) -> Result<Address, ActorError> { self.require_in_call(); if address.protocol() == address::Protocol::ID { return Ok(address.clone()); } let resolved = self.id_addresses.get(&address).unwrap(); return Ok(resolved.clone()); } fn get_actor_code_cid(&self, addr: &Address) -> Result<Cid, ActorError> { self.require_in_call(); let ret = self.actor_code_cids.get(&addr).unwrap(); Ok(ret.clone()) } fn get_randomness( &self, _personalization: DomainSeparationTag, _rand_epoch: ChainEpoch, _entropy: &[u8], ) -> Result<Randomness, ActorError> { unimplemented!() } fn create<C: Cbor>(&mut self, obj: &C) -> Result<(), ActorError> { if self.state.is_some() == true { return Err(self.abort( ExitCode::SysErrorIllegalActor, "state already constructed".to_owned(), )); } self.state = Some(self.store.put(obj, Blake2b256).unwrap()); Ok(()) } fn state<C: Cbor>(&self) -> Result<C, ActorError> { Ok(self .store .get(&self.state.as_ref().unwrap()) .unwrap() .unwrap()) } fn transaction<C: Cbor, R, F>(&mut self, f: F) -> Result<R, ActorError> where F: FnOnce(&mut C, &mut Self) -> R, { if self.in_transaction { return Err(self.abort(ExitCode::SysErrorIllegalActor, "nested transaction")); } let mut read_only = self.state()?; self.in_transaction = true; let ret = f(&mut read_only, self); self.state = Some(self.put(&read_only).unwrap()); self.in_transaction = false; Ok(ret) } fn store(&self) -> &BS { self.store } fn send( &mut self, to: &Address, method: MethodNum, params: &Serialized, value: &TokenAmount, ) -> Result<Serialized, ActorError> { self.require_in_call(); if self.in_transaction { return Err(self.abort( ExitCode::SysErrorIllegalActor, "side-effect within transaction", )); } assert!( !self.expect_sends.is_empty(), "unexpected expectedMessage to: {:?} method: {:?}, value: {:?}, params: {:?}", to, method, value, params ); let expected_msg = self.expect_sends.pop_front().unwrap(); assert!(&expected_msg.to == to && expected_msg.method == method && &expected_msg.params == params && &expected_msg.value == value, "expectedMessage being sent does not match expectation.\nMessage -\t to: {:?} method: {:?} value: {:?} params: {:?}\nExpected -\t {:?}", to, method, value, params, self.expect_sends[0]); if value > &self.balance { return Err(self.abort( ExitCode::SysErrSenderStateInvalid, format!( "cannot send value: {:?} exceeds balance: {:?}", value, self.balance ), )); } self.balance -= value; match expected_msg.exit_code { ExitCode::Ok => return Ok(expected_msg.send_return), x => { return Err(ActorError::new(x, "Expected message Fail".to_string())); } } } fn abort<S: AsRef<str>>(&self, exit_code: ExitCode, msg: S) -> ActorError { ActorError::new(exit_code, msg.as_ref().to_owned()) } fn new_actor_address(&mut self) -> Result<Address, ActorError> { self.require_in_call(); let ret = self .new_actor_addr .as_ref() .expect("unexpected call to new actor address") .clone(); self.new_actor_addr = None; return Ok(ret); } fn create_actor(&mut self, code_id: &Cid, address: &Address) -> Result<(), ActorError> { self.require_in_call(); if self.in_transaction { return Err(self.abort( ExitCode::SysErrorIllegalActor, "side-effect within transaction".to_owned(), )); } let expect_create_actor = self .expect_create_actor .take() .expect("unexpected call to create actor"); assert!(&expect_create_actor.code_id == code_id && &expect_create_actor.address == address, "unexpected actor being created, expected code: {:?} address: {:?}, actual code: {:?} address: {:?}", expect_create_actor.code_id, expect_create_actor.address, code_id, address); Ok(()) } fn delete_actor(&mut self, _beneficiary: &Address) -> Result<(), ActorError> { self.require_in_call(); if self.in_transaction { return Err(self.abort( ExitCode::SysErrorIllegalActor, "side-effect within transaction".to_owned(), )); } todo!("implement me???") } fn syscalls(&self) -> &dyn Syscalls { unimplemented!() } fn total_fil_circ_supply(&self) -> Result<TokenAmount, ActorError> { unimplemented!() } }
<reponame>7coil/react-uwp<filename>src/styles/getAcrylicTextureStyle.ts import { isSupportBackdropFilter } from "../utils/browser/backdropFilterDetector"; export { isSupportBackdropFilter }; export interface AcrylicConfig { tintColor: string; blurSize: number; background?: string; } export function getAcrylicTextureStyle(config: AcrylicConfig, useFluentDesign = true) { const { tintColor, blurSize, background } = config; let style: React.CSSProperties = useFluentDesign ? { /** * Add theme.baseLow color. */ background: background ? `${background}, ${tintColor}` : tintColor, backgroundBlendMode: "exclusion", /** * Add blur filter. */ backdropFilter: `blur(${blurSize}px)`, transform: "translate3d(0, 0, 0)" } : { /** * Add theme.baseLow color. */ background: background ? `linear-gradient(${tintColor}, ${tintColor}), ${background}` : tintColor, backgroundBlendMode: "overlay", transform: "translate3d(0, 0, 0)" }; return style; }
/** * Statistics of object database size, aggregated by table names. Enabled with * {@link ReportType#DATABASE_TABLE_SIZE}. * <p> * Thread unsafe (assumes external synchronization) for concurrent updates of * the same object. * * @author mzawirski */ public class DatabaseSizeStats { public static final String DB_STATS_REPORT_PERIOD_SEC_PROP_NAME = "swift.dbReportPeriodSec"; public static final String DEFAULT_STATS_REPORT_PERIOD_SEC = "10.0"; private final MetadataStatsCollector statsCollector; private final ConcurrentHashMap<String, TableStats> tablesStats = new ConcurrentHashMap<>(); private final AtomicInteger totalSize = new AtomicInteger(); public DatabaseSizeStats(final MetadataStatsCollector statsCollector) { this.statsCollector = statsCollector; } public void init(Properties props) { final double periodSec = Double.parseDouble(props.getProperty(DB_STATS_REPORT_PERIOD_SEC_PROP_NAME, DEFAULT_STATS_REPORT_PERIOD_SEC)); if (statsCollector.isDatabaseTableReportEnabled()) { new PeriodicTask(0.0, periodSec) { // TODO: implement stop() public void run() { statsCollector.recordDatabaseTableStats("ALL", totalSize.get()); for (final Entry<String, TableStats> entry : tablesStats.entrySet()) { statsCollector.recordDatabaseTableStats(entry.getKey(), entry.getValue().getSize()); } }; }; } } /** * Informs of object removal. * * @param id */ public void removeObject(CRDTIdentifier id) { updateObject(id, null); } /** * Informs of object update that could have potentially caused changes in * its size. * * @param id * object id * @param crdt * object */ public void updateObject(CRDTIdentifier id, final ManagedCRDT<?> crdt) { if (ReportType.DATABASE_TABLE_SIZE.isEnabled()) { // Optimistic concurrency control optimized for get(). TableStats tableStats = tablesStats.get(id.getTable()); if (tableStats == null) { final TableStats existingStats = tablesStats.putIfAbsent(id.getTable(), tableStats = new TableStats()); if (existingStats != null) { tableStats = existingStats; } } tableStats.updateObject(id.getKey(), crdt); } } private class TableStats { private final AtomicInteger tableSize = new AtomicInteger(); private final ConcurrentHashMap<String, Integer> keySizes = new ConcurrentHashMap<>(); public int getSize() { return tableSize.get(); } public void updateObject(final String key, final ManagedCRDT crdt) { final int newSize; Integer oldSize; if (crdt == null) { newSize = 0; oldSize = keySizes.remove(key); } else { newSize = computeSize(crdt); oldSize = keySizes.put(key, newSize); } if (oldSize == null) { oldSize = 0; } final int deltaSize = newSize - oldSize; tableSize.addAndGet(deltaSize); totalSize.addAndGet(deltaSize); } } private int computeSize(ManagedCRDT crdt) { final Kryo kryo = statsCollector.getFreshKryo(); final Output buffer = statsCollector.getFreshKryoBuffer(); crdt.write(kryo, buffer); return buffer.position(); } }
def sync_open_rooms(self, default_connector=None, filter_token=None): rooms = self.get_open_rooms() for room in rooms: if filter_token: if filter_token not in room.get("v", {}).get("token"): pass else: LiveChatRoom = apps.get_model("envelope.LiveChatRoom") room_item, created = LiveChatRoom.objects.get_or_create( connector=default_connector, token=room.get("v", {}).get("token"), room_id=room.get("_id", {}), ) room_item.open = True room_item.save() if created: print("ROOM CREATED:", room["v"]["token"]) else: print("ROOM UPDATED:", room["v"]["token"])
/// Adds the downloaded binary in Installer to a Windows PATH pub fn add_binary_to_path(installer: &Installer) -> Result<(), InstallerError> { let windows_path = get_windows_path_var()?; let bin_path = installer.get_bin_dir_path()?.to_string(); if let Some(old_path) = windows_path { if let Some(new_path) = add_to_path(&old_path, &bin_path) { apply_new_path(&new_path)?; } } Ok(()) }
<filename>app/client/src/pages/CommitCreate.tsx<gh_stars>1-10 import React, { useState, Fragment } from 'react' import { useQuery, useLazyQuery, useMutation } from '@apollo/client' import { Link, Router, RouteComponentProps } from '@reach/router' import { Button, Form, Input } from 'antd' import * as queries from '../store/queries' import * as QT from '../store/queryTypes' // function CreateMergeCommitForm(symbolId: string, action: QT.CommitAction) { // const [createCommit, { data, loading, error }] = useMutation<QT.createCommit, QT.createCommitVariables>( // queries.CREATE_COMMIT // ) // const { register, handleSubmit, setValue, errors } = useForm({ // defaultValues: { // // title: page?.title, // choice: 1, // content: "some content goes here", // } // }) // if (loading) return <p>Loading...</p> // if (error) return <p>ERROR: {error.message}</p> // // if (data?.createPost.id) return <Link to={`/post/${data.createPost.id}`}>Post Created</Link> // const onSubmit = (data: any) => { // createCommit({ // variables: { // data: { // // symbolId?: string, // action: QT.CommitAction.MERGE, // content: "string" // } // } // }) // } // return ( // <> // <form onSubmit={handleSubmit(onSubmit)}> // <div> // <label htmlFor="parent">parent</label> // <input // name="parent" // placeholder="parent merge to..." // ref={register} // /> // </div> // <button type="submit">Submit</button> // </form> // </> // ) // } interface CommitFormProps { commit?: QT.commitDetail } // export const CommitForm: React.FC<CommitFormProps> = ({ commit }) => { // const { register, handleSubmit, setValue, errors } = useForm({ // defaultValues: { // // title: page?.title, // choice: 1, // content: "some content goes here", // } // }) // // if (commit === undefined) return <CreateCommitForm /> // if (loading) return <p>Loading...</p> // if (error) return <p>ERROR: {error.message}</p> // // if (data?.createPost.id) return <Link to={`/post/${data.createPost.id}`}>Post Created</Link> // // const onSubmit = (data: any) => { // // updateCommitReview({ // // variables: { // // id: review.id, // // data: { // // choice: data.choice // // } // // } // // }) // // } // return ( // <> // {/* <form onSubmit={handleSubmit(onSubmit)}> // <div> // <label htmlFor="choice">choice</label> // <input // name="choice" // placeholder="choice..." // ref={register} // /> // </div> // <button type="submit">Submit</button> // </form> */} // </> // ) // } const layout = { labelCol: { span: 8 }, wrapperCol: { span: 16 }, }; const tailLayout = { wrapperCol: { offset: 8, span: 16 }, }; function CommitForm() { const [form] = Form.useForm() const [createCommit, { data, loading, error }] = useMutation<QT.createCommit, QT.createCommitVariables>( queries.CREATE_COMMIT ) const onFinish = (values: any) => { console.log('Success:', values) createCommit({ variables: { data: { // symbolId?: string | null; action: QT.CommitAction.CREATE, content: JSON.stringify({}) } } }) } const onFinishFailed = (errorInfo: any) => { console.log('Failed:', errorInfo); } return ( <Form {...layout} form={form} name="basic" initialValues={{ remember: true }} onFinish={onFinish} onFinishFailed={onFinishFailed} > <Form.Item label="Username" name="username" rules={[{ required: true, message: 'Please input your username!' }]} > <Input /> </Form.Item> <Form.Item label="Password" name="password" rules={[{ required: true, message: 'Please input your password!' }]} > <Input.Password /> </Form.Item> <Form.Item {...tailLayout}> <Button type="primary" htmlType="submit"> Submit </Button> </Form.Item> </Form> ) } interface CommitCreateProps extends RouteComponentProps { } export const CommitCreate: React.FC<CommitCreateProps> = () => { const getMe = useQuery<QT.me>(queries.ME) const [showResult, setShowResult] = useState(false) const [showCreateReview, setShowCreateReview] = useState(false) return <CommitForm /> }
<reponame>dozedoff/commonj<gh_stars>1-10 /* * The MIT License (MIT) * Copyright (c) 2022 <NAME> * http://opensource.org/licenses/MIT */ package com.github.dozedoff.commonj.io; import java.sql.Connection; import java.sql.SQLException; import com.j256.ormlite.support.ConnectionSource; /** * Interface for database connection pools. * * @author <NAME> * */ public interface ConnectionPool { /** * Stop the pool and clean up resources. */ public void stopPool(); /** * Start the pool so it can provide connections. * * @throws Exception * if there is an error starting the pool */ public void startPool() throws Exception; /** * Return a connection to the pool. * * @param cn * to return */ public void returnConnection(Connection cn); /** * Get a connection from the pool. * * @return a database connection * @throws SQLException * if there is an error getting a connection */ public Connection getConnection() throws SQLException; /** * Get a connection from the pool. * * @return a database connection * @throws SQLException * if there is an error getting a connection */ public ConnectionSource getConnectionSource() throws SQLException; }
<filename>main/svx/source/form/fmcontrollayout.cxx /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_svx.hxx" #include "fmcontrollayout.hxx" #include "fmprop.hrc" /** === begin UNO includes === **/ #include <com/sun/star/form/FormComponentType.hpp> #include <com/sun/star/awt/VisualEffect.hpp> #include <com/sun/star/i18n/ScriptType.hpp> #include <com/sun/star/lang/Locale.hpp> #include <com/sun/star/awt/FontDescriptor.hpp> #include <com/sun/star/style/XStyleFamiliesSupplier.hpp> #include <com/sun/star/lang/XServiceInfo.hpp> #include <com/sun/star/container/XChild.hpp> /** === end UNO includes === **/ #include <comphelper/processfactory.hxx> #include <i18npool/mslangid.hxx> #include <unotools/syslocale.hxx> #include <toolkit/helper/vclunohelper.hxx> #include <tools/debug.hxx> #include <tools/diagnose_ex.h> #include <vcl/outdev.hxx> //........................................................................ namespace svxform { //........................................................................ using namespace ::utl; /** === begin UNO using === **/ using ::com::sun::star::uno::Reference; using ::com::sun::star::uno::XInterface; using ::com::sun::star::uno::UNO_QUERY; using ::com::sun::star::uno::UNO_QUERY_THROW; using ::com::sun::star::uno::UNO_SET_THROW; using ::com::sun::star::uno::Exception; using ::com::sun::star::uno::RuntimeException; using ::com::sun::star::uno::Any; using ::com::sun::star::uno::makeAny; using ::com::sun::star::uno::Sequence; using ::com::sun::star::uno::Type; using ::com::sun::star::beans::XPropertySet; using ::com::sun::star::beans::XPropertySetInfo; using ::com::sun::star::lang::Locale; using ::com::sun::star::awt::FontDescriptor; using ::com::sun::star::style::XStyleFamiliesSupplier; using ::com::sun::star::lang::XServiceInfo; using ::com::sun::star::container::XNameAccess; using ::com::sun::star::container::XChild; /** === end UNO using === **/ namespace FormComponentType = ::com::sun::star::form::FormComponentType; namespace VisualEffect = ::com::sun::star::awt::VisualEffect; namespace ScriptType = ::com::sun::star::i18n::ScriptType; //-------------------------------------------------------------------- namespace { //.................................................................... template< class INTERFACE_TYPE > Reference< INTERFACE_TYPE > getTypedModelNode( const Reference< XInterface >& _rxModelNode ) { Reference< INTERFACE_TYPE > xTypedNode( _rxModelNode, UNO_QUERY ); if ( xTypedNode.is() ) return xTypedNode; else { Reference< XChild > xChild( _rxModelNode, UNO_QUERY ); if ( xChild.is() ) return getTypedModelNode< INTERFACE_TYPE >( xChild->getParent() ); else return NULL; } } //.................................................................... static bool lcl_getDocumentDefaultStyleAndFamily( const Reference< XInterface >& _rxDocument, ::rtl::OUString& _rFamilyName, ::rtl::OUString& _rStyleName ) SAL_THROW(( Exception )) { bool bSuccess = true; Reference< XServiceInfo > xDocumentSI( _rxDocument, UNO_QUERY ); if ( xDocumentSI.is() ) { if ( xDocumentSI->supportsService( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.text.TextDocument" ) ) ) || xDocumentSI->supportsService( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.text.WebDocument" ) ) ) ) { _rFamilyName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "ParagraphStyles" ) ); _rStyleName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "Standard" ) ); } else if ( xDocumentSI->supportsService( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.sheet.SpreadsheetDocument" ) ) ) ) { _rFamilyName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "CellStyles" ) ); _rStyleName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "Default" ) ); } else if ( xDocumentSI->supportsService( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.drawing.DrawingDocument" ) ) ) || xDocumentSI->supportsService( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.presentation.PresentationDocument" ) ) ) ) { _rFamilyName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "graphics" ) ); _rStyleName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "standard" ) ); } else bSuccess = false; } return bSuccess; } //.................................................................... static void lcl_initializeControlFont( const Reference< XPropertySet >& _rxModel ) { try { Reference< XPropertySet > xStyle( ControlLayouter::getDefaultDocumentTextStyle( _rxModel ), UNO_SET_THROW ); Reference< XPropertySetInfo > xStylePSI( xStyle->getPropertySetInfo(), UNO_SET_THROW ); // determine the script type associated with the system locale const LocaleDataWrapper& rSysLocaleData = SvtSysLocale().GetLocaleData(); const sal_Int16 eSysLocaleScriptType = MsLangId::getScriptType( MsLangId::convertLocaleToLanguage( rSysLocaleData.getLocale() ) ); // depending on this script type, use the right property from the document's style which controls the // default locale for document content const sal_Char* pCharLocalePropertyName = "CharLocale"; switch ( eSysLocaleScriptType ) { case ScriptType::LATIN: // already defaulted above break; case ScriptType::ASIAN: pCharLocalePropertyName = "CharLocaleAsian"; break; case ScriptType::COMPLEX: pCharLocalePropertyName = "CharLocaleComplex"; break; default: OSL_ENSURE( false, "lcl_initializeControlFont: unexpected script type for system locale!" ); break; } ::rtl::OUString sCharLocalePropertyName = ::rtl::OUString::createFromAscii( pCharLocalePropertyName ); Locale aDocumentCharLocale; if ( xStylePSI->hasPropertyByName( sCharLocalePropertyName ) ) { OSL_VERIFY( xStyle->getPropertyValue( sCharLocalePropertyName ) >>= aDocumentCharLocale ); } // fall back to CharLocale property at the style if ( !aDocumentCharLocale.Language.getLength() ) { sCharLocalePropertyName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "CharLocale" ) ); if ( xStylePSI->hasPropertyByName( sCharLocalePropertyName ) ) { OSL_VERIFY( xStyle->getPropertyValue( sCharLocalePropertyName ) >>= aDocumentCharLocale ); } } // fall back to the system locale if ( !aDocumentCharLocale.Language.getLength() ) { aDocumentCharLocale = rSysLocaleData.getLocale(); } // retrieve a default font for this locale, and set it at the control Font aFont = OutputDevice::GetDefaultFont( DEFAULTFONT_SANS, MsLangId::convertLocaleToLanguage( aDocumentCharLocale ), DEFAULTFONT_FLAGS_ONLYONE ); FontDescriptor aFontDesc = VCLUnoHelper::CreateFontDescriptor( aFont ); _rxModel->setPropertyValue( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "FontDescriptor" ) ), makeAny( aFontDesc ) ); } catch( const Exception& ) { DBG_UNHANDLED_EXCEPTION(); } } } //==================================================================== //= ControlLayouter //==================================================================== //-------------------------------------------------------------------- Reference< XPropertySet > ControlLayouter::getDefaultDocumentTextStyle( const Reference< XPropertySet >& _rxModel ) { // the style family collection Reference< XStyleFamiliesSupplier > xSuppStyleFamilies( getTypedModelNode< XStyleFamiliesSupplier >( _rxModel.get() ), UNO_SET_THROW ); Reference< XNameAccess > xStyleFamilies( xSuppStyleFamilies->getStyleFamilies(), UNO_SET_THROW ); // the names of the family, and the style - depends on the document type we live in ::rtl::OUString sFamilyName, sStyleName; if ( !lcl_getDocumentDefaultStyleAndFamily( xSuppStyleFamilies.get(), sFamilyName, sStyleName ) ) throw RuntimeException( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "unknown document type!" ) ), NULL ); // the concrete style Reference< XNameAccess > xStyleFamily( xStyleFamilies->getByName( sFamilyName ), UNO_QUERY_THROW ); return Reference< XPropertySet >( xStyleFamily->getByName( sStyleName ), UNO_QUERY_THROW ); } //-------------------------------------------------------------------- void ControlLayouter::initializeControlLayout( const Reference< XPropertySet >& _rxControlModel, DocumentType _eDocType ) { DBG_ASSERT( _rxControlModel.is(), "ControlLayouter::initializeControlLayout: invalid model!" ); if ( !_rxControlModel.is() ) return; try { Reference< XPropertySetInfo > xPSI( _rxControlModel->getPropertySetInfo(), UNO_SET_THROW ); // the control type sal_Int16 nClassId = FormComponentType::CONTROL; _rxControlModel->getPropertyValue( FM_PROP_CLASSID ) >>= nClassId; // the document type if ( _eDocType == eUnknownDocumentType ) _eDocType = DocumentClassification::classifyHostDocument( _rxControlModel.get() ); // let's see what the configuration says about the visual effect OConfigurationNode aConfig = getLayoutSettings( _eDocType ); Any aVisualEffect = aConfig.getNodeValue( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "VisualEffect" ) ) ); if ( aVisualEffect.hasValue() ) { ::rtl::OUString sVisualEffect; OSL_VERIFY( aVisualEffect >>= sVisualEffect ); sal_Int16 nVisualEffect = VisualEffect::NONE; if ( sVisualEffect.equalsAscii( "flat" ) ) nVisualEffect = VisualEffect::FLAT; else if ( sVisualEffect.equalsAscii( "3D" ) ) nVisualEffect = VisualEffect::LOOK3D; if ( xPSI->hasPropertyByName( FM_PROP_BORDER ) ) { if ( ( nClassId != FormComponentType::COMMANDBUTTON ) && ( nClassId != FormComponentType::RADIOBUTTON ) && ( nClassId != FormComponentType::CHECKBOX ) && ( nClassId != FormComponentType::GROUPBOX ) && ( nClassId != FormComponentType::FIXEDTEXT ) && ( nClassId != FormComponentType::SCROLLBAR ) && ( nClassId != FormComponentType::SPINBUTTON ) ) { _rxControlModel->setPropertyValue( FM_PROP_BORDER, makeAny( nVisualEffect ) ); if ( ( nVisualEffect == VisualEffect::FLAT ) && ( xPSI->hasPropertyByName( FM_PROP_BORDERCOLOR ) ) ) // light gray flat border _rxControlModel->setPropertyValue( FM_PROP_BORDERCOLOR, makeAny( (sal_Int32)0x00C0C0C0 ) ); } } if ( xPSI->hasPropertyByName( FM_PROP_VISUALEFFECT ) ) _rxControlModel->setPropertyValue( FM_PROP_VISUALEFFECT, makeAny( nVisualEffect ) ); } // the font (only if we use the document's ref devices for rendering control text, otherwise, the // default font of VCL controls is assumed to be fine) if ( useDocumentReferenceDevice( _eDocType ) && xPSI->hasPropertyByName( FM_PROP_FONT ) ) lcl_initializeControlFont( _rxControlModel ); } catch( const Exception& ) { OSL_ENSURE( sal_False, "ControlLayouter::initializeControlLayout: caught an exception!" ); } } //-------------------------------------------------------------------- ::utl::OConfigurationNode ControlLayouter::getLayoutSettings( DocumentType _eDocType ) { ::rtl::OUString sConfigName = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "/org.openoffice.Office.Common/Forms/ControlLayout/" ) ); sConfigName += DocumentClassification::getModuleIdentifierForDocumentType( _eDocType ); return OConfigurationTreeRoot::createWithServiceFactory( ::comphelper::getProcessServiceFactory(), // TODO sConfigName ); } //-------------------------------------------------------------------- bool ControlLayouter::useDynamicBorderColor( DocumentType _eDocType ) { OConfigurationNode aConfig = getLayoutSettings( _eDocType ); Any aDynamicBorderColor = aConfig.getNodeValue( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "DynamicBorderColors" ) ) ); bool bDynamicBorderColor = false; OSL_VERIFY( aDynamicBorderColor >>= bDynamicBorderColor ); return bDynamicBorderColor; } //-------------------------------------------------------------------- bool ControlLayouter::useDocumentReferenceDevice( DocumentType _eDocType ) { if ( _eDocType == eUnknownDocumentType ) return false; OConfigurationNode aConfig = getLayoutSettings( _eDocType ); Any aUseRefDevice = aConfig.getNodeValue( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "UseDocumentTextMetrics" ) ) ); bool bUseRefDevice = false; OSL_VERIFY( aUseRefDevice >>= bUseRefDevice ); return bUseRefDevice; } //........................................................................ } // namespace svxform //........................................................................
// LoginHandler handles the login procedure. func LoginHandler(cont *gin.Context) { state := RandToken(32) session := sessions.Default(cont) session.Set("state", state) session.Save() scopes := strings.Join(confTemp.Scopes, " ") linkStr := string(confTemp.Endpoint.AuthURL + "?client_id=" + confTemp.ClientID + "&redirect_uri=" + confTemp.RedirectURL + "&response_type=code&scope=" + scopes + "&state=" + state) cont.JSON(http.StatusOK, gin.H{"link": linkStr}) }
/** * Entity Container Builder. */ public static final class Builder { private String name; private String namespace; private String baseEntityContainerName; private final Map<String, EntitySet> entitySetsBuilder = new LinkedHashMap<>(); private final Map<String, Singleton> singletonsBuilder = new LinkedHashMap<>(); private final Map<String, FunctionImport> functionImportsBuilder = new LinkedHashMap<>(); private final Map<String, ActionImport> actionImportsBuilder = new LinkedHashMap<>(); public Builder setName(String builderName) { this.name = builderName; return this; } public Builder setNamespace(String builderNamespace) { this.namespace = builderNamespace; return this; } public Builder setBaseEntityContainerName(String containerName) { this.baseEntityContainerName = containerName; return this; } public Builder addEntitySet(EntitySet entitySet) { this.entitySetsBuilder.put(entitySet.getName(), entitySet); return this; } public Builder addEntitySets(Iterable<EntitySet> entitySets) { for (EntitySet entitySet : entitySets) { addEntitySet(entitySet); } return this; } public Builder addSingleton(Singleton singleton) { this.singletonsBuilder.put(singleton.getName(), singleton); return this; } public Builder addSingletons(Iterable<Singleton> singletons) { for (Singleton singleton : singletons) { addSingleton(singleton); } return this; } public Builder addFunctionImport(FunctionImport functionImport) { this.functionImportsBuilder.put(functionImport.getName(), functionImport); return this; } public Builder addFunctionImports(Iterable<FunctionImport> functionImports) { for (FunctionImport functionImport : functionImports) { addFunctionImport(functionImport); } return this; } public Builder addActionImport(ActionImport actionImport) { this.actionImportsBuilder.put(actionImport.getName(), actionImport); return this; } public Builder addActionImports(Iterable<ActionImport> actionImports) { for (ActionImport actionImport : actionImports) { addActionImport(actionImport); } return this; } public EntityContainerImpl build() { return new EntityContainerImpl(this); } }
/// Finds in the name section all functions from specified banned modules and /// returns their names and indexes. fn find_banned_fns_idxs<'a>( module: &'a Module, banned_modules: &[&str], ) -> Result<HashMap<u32, &'a str>, Error> { use parity_wasm::elements::NameSection; // name section is actually Map<fn_idx, fn_name> for each function in a module module .names_section() .ok_or_else(|| err_msg("Name section is absent, verification is aborted.")) .and_then(|name_sec| { if let NameSection::Function(fn_name_sec) = name_sec { let banned_fns: HashMap<u32, &'a str> = fn_name_sec .names() .iter() .filter_map(|(idx, name)| { banned_modules .iter() .find(|m| name.starts_with(*m)) .map(|_| (idx, name.as_str())) }) .collect(); Ok(banned_fns) } else { Err(err_msg( "Name section for functions is absent, verification is aborted.", )) } }) }
/* Frees a block of memory that was successfully allocated by ** a prior call the vcms_alloc. ** ** The handle should be considered invalid upon return from this ** call. ** ** Whether any memory is actually freed up or not as the result of ** this call will depends on many factors, if all goes well it will ** be freed. If something goes wrong, the memory will likely end up ** being freed up as part of the vcsm_exit process. In the end the ** memory is guaranteed to be freed one way or another. */ void vcsm_free( unsigned int handle ) { int rc; void *usr_ptr = NULL; if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) { vcos_log_error( "[%s]: [%d]: invalid device or handle!", __func__, getpid() ); goto out; } if (using_vc_sm_cma) { VCSM_PAYLOAD_ELEM_T *elem; elem = vcsm_payload_list_find_handle(handle); if (!elem) { vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. elem %p\n", __func__, handle, elem); goto out; } rc = munmap( elem->mem, elem->size ); vcos_log_trace( "[%s]: ioctl unmap fd: %d, addr %p, size %u. rc %d", __func__, elem->fd, elem->mem, elem->size, rc ); close(elem->fd); vcsm_payload_list_release(elem); } else { struct vmcs_sm_ioctl_free alloc_free; struct vmcs_sm_ioctl_size sz; struct vmcs_sm_ioctl_map map; memset( &sz, 0, sizeof(sz) ); memset( &alloc_free, 0, sizeof(alloc_free) ); memset( &map, 0, sizeof(map) ); sz.handle = handle; rc = ioctl( vcsm_handle, VMCS_SM_IOCTL_SIZE_USR_HDL, &sz ); vcos_log_trace( "[%s]: [%d]: ioctl size-usr-hdl %d (hdl: %x) - size %u", __func__, getpid(), rc, sz.handle, sz.size ); if ( (rc < 0) || (sz.size == 0) ) { goto out; } usr_ptr = (void *) vcsm_usr_address( sz.handle ); if ( usr_ptr != NULL ) { munmap( usr_ptr, sz.size ); vcos_log_trace( "[%s]: [%d]: ioctl unmap hdl: %x", __func__, getpid(), sz.handle ); } else { vcos_log_trace( "[%s]: [%d]: freeing unmapped area (hdl: %x)", __func__, getpid(), map.handle ); } alloc_free.handle = sz.handle; rc = ioctl( vcsm_handle, VMCS_SM_IOCTL_MEM_FREE, &alloc_free ); vcos_log_trace( "[%s]: [%d]: ioctl mem-free %d (hdl: %x)", __func__, getpid(), rc, alloc_free.handle ); } out: return; }
As we layered up for the New Hampshire winter, my friend and I were indulging in a popular pastime – comparing our gear. We both enjoy the outdoors and have spent our fair share of time working in forestry or on farms, so we share any tips we can get. Such as, TitleNine makes great sports bras. Oiselle has soft waistbands on their running gear. And a go-girl is a must have for peeing during a long multi-pitch. But even though we’ve spent our whole lives looking for the alternative deal, we’re still surprised by how hard it is to find women’s athletic clothing that fits right and doesn’t make you look like an idiot. The outdoor industry has exploded in the past few decades, focusing on creating lightweight, highly efficient gear from base layers to backpacks. As far as women’s gear is concerned, we’ve come a long way. But despite a newfound focus on the female demographic, I can’t help but feel like a lot of companies are still falling short of delivering the same quality to women that has been available in men’s gear for years. So, here is an open letter to marketing companies around the world about what you can you do better, along with a reader tip about which companies are ahead of their time and already getting it right. 1. Make the arms larger My arms are muscular. This is because I climb and I like to do active things. Hence why I’m trying to buy wicking shirts and flannel work shirts. I’ve come to understand that, despite my personal opinions, ‘muscular’ is not necessarily the ideal female form in society. In fact, when I injured myself and had to stop climbing for two months last year, I got endless compliments on how skinny I looked. And a miracle also happened – my arms fit into all of my shirts! Having grown up in my brother’s hand-me-down flannels, I was pretty excited when outdoor companies started making ‘women’s flannels’. Imagine my disappointment when I realized that that meant much less arm space. And when it comes to why, the ads say it all. Men’s flannels are shown in ads being worn by modern day, romanticized lumber jacks, but women’s shirts are modeled by waifish women who look as though they couldn’t even lift an axe, much less use it. Why would a woman want a shirt that was stereotypically worn by people cutting wood if it wouldn’t fit her while she actually chops wood? What is the message here? That women should dress the way men do, but remain weaker? Women’s arms are getting a bigger workout today than serving turkey dinner; let’s try to keep up. …So, who does it right? Well, for one, Duluth makes a pretty solid line of women’s work shirts. 2. Change the colors It’s hard enough to stand your ground as a woman in outdoor sports. Now try doing it dressed like a female Power Ranger. The colors available for women’s clothing are a horrible mix of pastels and neons; Line up women on a hiking or biking trip and it looks like Abba visiting a nursery. OK, so it does play right into marketing stereotypes that I care about the color of my clothes, but I do think that color affects how we’re perceived. Women want to be taken seriously – as mountaineers, climbers, hikers, skiers, snowboarders, runners, and cyclists. You name the sport and I guarantee you there is a humiliating teal outfit for it. Even hunting clothing, which is designed for one primary purpose – camouflage – is tainted by female stereotypes: There’s nothing like bright, glaring pink to simulate a natural environment for wary deer. And you know what? Some women want to wear neon pink and yellow, and that’s OK. But in a market that is gradually incorporating more and more fitness boutiques like LuluLemon and Fabletics, it would be nice if the more serious gear companies – especially the ones who try to convince us that they support women’s equality and athleticism – offered the same color choices for both genders. It is 2015. We’ve had the right to vote for almost a century – can we please have adult colors now? Who does it right? Patagonia and the North Face are expensive, but slightly more likely to use palettes which don’t assault your eyes. Unfortunately, I still haven’t found a company that reliably sells their women’s gear in the more subtle burnt oranges, olive greens, and slate grey colors that men’s wear comes in – but please tell me if you know of one. 3. Make work clothes for women. Seriously. I don’t want to hear about how there are totally work pants for women out there. I know there are. But frankly, they suck. To illustrate the divide, here is the picture you get when visit the website for Dickies, a well known supplier of work pants, and click on ‘Men’: And here are the categories for work pants underneath that picture: Cargo Pants, Coveralls, Bib Overalls, Painter’s Wear, High Visibility, Flame Resistant, Built to Work. What about the women’s department, which primarily caters to women who stare meaningfully into the distant scenery on their farm? If you want to guess how many of those categories repeat in the women’s pants department, the answer would be none (I don’t know what I expected from a company named Dickies…). In all fairness, a more comprehensive search of their site turns up women’s work pants that look like they might be durable enough, but the selection is limited and includes no female flame-resistant wear. There is a huge flame-resistant wear section for men, complete with a page-long explanation for why their gear is essential to occupations dealing with fire. The long list of reasons I should protect myself from fire damage as a firewoman, female smoke jumper, female hot shot, or wildland firefighter would be a lot more helpful if it was put out by a company which also actually made fire-resistant clothing for women. The presence of women in fire-fighting jobs is growing every year; maybe the gear could grow – or shrink – with it. Of course, there is always reliable old Carthartt – your friendly, post-misogynism blue collar company. Except that, despite a recent effort to make double-knee work pants which fit women (I own four pairs and can’t complain), there is still a noticeable divide. Sure, I can get pants for chainsawing, but what about rain gear? Does Carhartt create its famous heavy-duty rain pants for the thousands of female foresters and trail maintainers out there? That would be a ‘no’. In my mind, feminism can be summed up in one word: pants. We finally have pants that we can go to the office in. Now how about pants that can resist fire, water, motor oil, paint thinner, and all the other elements that women are encountering on a daily basis in this brave new world of non-gender-specific jobs? Who does it right? Basically, Carhartt has a pretty decent women’s line, although for rain pants you’re on your own. Speaking of pants…. 4. Help us cover our asses Side by side, here are men’s and women’s long compression shorts from UnderArmour, which illustrates how much variety can exist in the word ‘long’: The above shorts aren’t bad, but the vast majority of base layer shorts you find for women still seem to look like this: Booty shorts have a time and place. For some women, they are the most comfortable thing to wear while being active. But for many women, no amount of hiking, biking, or exercise will ever lead to their thighs not touching. So please consider this, trendy sport companies. Not only do the standard short sizes that you offer make me feel like a sausage who has outgrown its casing, but they cause chafing that can create a serious, long-term problem. I am happy to buy men’s shorts for now, but I’m growing tired of explaining the opening in the front, so let’s transcend women’s shorts and begin making women’s longs. Who does it right? Shredly and LaSportiva both make some pretty great shorts for women. I’ve got to give marketers some credit. As perspectives on women’s athleticism are changing, they have improved a lot. I’m by no means advocating for all gear to be exactly equal – the differences in many female-specific products are necessary. In fact, that’s the whole point of this article. Outdoor gear is not like a bic pen – you can’t just re-market men’s clothing by changing the name to women’s clothing. Women need smaller gear that accommodates our bodies. We get colder because of our fat distribution which means that the ‘female temperature rating’ on sleeping bags is one of the first specs I look at. And making backpacks that curve to fit women’s shoulders is only one example of how companies like ULA and Osprey really listen to feedback from women. But still, when I’m standing in the women’s shoe department of REI looking at ‘fit flops’, I can’t shake the feeling that marketing teams must view women as an alien species. So, how do they make contact with us? What does the complex, enigmatic female brain desire? Simple: Men’s gear – made to fit women. Did I get something wrong? Did I forget something? Let me know in the comments – improving gear choices is a community effort, and one I know I’m not working on alone!
def solve_partition_function(self, A, h, k): n = len(A) all_strings = self.__generate_strings(n, k) mx = -np.inf sm_list = [] for s in all_strings: sm = get_f(A, h, s) mx = max(mx, sm) sm_list.append(sm) sm_list = [elem - mx for elem in sm_list] logZ = np.log(np.sum(np.exp(sm_list))) + mx return logZ
Blood-membrane interaction: C3a, an indicator of biocompatibility. An interest in hemodialysis-related patient symptomatology dating back to the beginnings of maintenance hemodialysis (HD) therapy prompted our investigations to determine the etiology of such discomfort. Since the discovery by Craddock et al (1) of the activation of the alternative complement pathway by hemodialyzer membrane, our efforts toward defining the relationship of symptoms to complement activation have been done in collaboration with the Minnesota group. Results of an earlier blinded study, (2) and continuing investigations of HD membranes provide data which support the contention that complement activation, although not necessarily etiologic in the symptoms related to dialysis, serves as an indicator of membrane compatibility. These newer data reveal C3a rises in vivo and in vitro by a Japanese processed cuprammonium membrane for dialysis to be similar to cuprophane. In vivo C3a elevation using Travenol CA-110 hemodialyzers of cellulose acetate are significantly lower and are similar to earlier results obtained by cellulose acetate of CD manufacture.
Platinum is one of the costly metals used as catalysts in new technologies employed for industrial chemical processes, renewable energy sources, pollution control and many other purposes. In particular, it is used for fuel cells, devices that turn chemical energy directly into electrical energy, without combustion. Research has shown that the greatest efficiency is achieved when the catalyst is available in the form of nanoparticles (smaller than 10-9 m). Simply put, the greater the dispersion of the material and the smaller the size of the particles, the more is it available for catalysis. Unfortunately, the laws of thermodynamics cause the particles to "stick" to one another and form larger clusters, which is why the material becomes less effective over time. So what can be done to maintain maximal dispersion of the "nanopowder"? A group of SISSA/CNR IOM scientists (with the collaboration of the Univerzita Karlova in Prague) has studied a way to produce tiny platinum grains consisting of one atom only and to keep them dispersed in a stable manner, by exploiting the properties of the substrate on which they rest. "Theoretical work demonstrated that irregularities in the surface known as steps and observed in experiments conducted at the Trieste Synchrotron tend to attract and separate the nanoparticles, causing them to remain literally attached in the form of single atoms", explains Stefano Fabris, CNR-IOM/SISSA research fellow. "The particles adhering to the steps were no longer visible even using an atomic resolution microscope" explains Nguyen-Dung Tran, a SISSA PhD student. "However, their presence was detected by spectroscopy, so they were indeed there, but they were no longer visible or free to move around". "Our computer simulations solved this dilemma, showing that the particles on the steps are reduced to single atoms" adds Matteo Farnesi Camellone (CNR-IOM), another author of the study. "If the surface is engineered to contain a large number of these defects, then the force that binds the particles to the substrate effectively offsets the aggregation force", explains Fabris. The theoretical work, led by Fabris, allowed the researchers to develop a "system model" on the computer able predict the behaviour of the material. The model's predictions were confirmed by the experimental measurements. Materials like this can be used for fuel cell electrodes, with far lower costs than the current ones. "Reducing the amount of platinum used in fuel cell electrodes is a priority, not only to contain costs but also to ensure environmental sustainability, as also indicated by the recent European directives" concludes Fabris. The European project ChipCAT, which funded this research, aims precisely to achieve this goal. ###
/** * xml utils * * @author ispong * @since 0.0.1 */ @Slf4j public class XmlUtils { /** * parse inputSteam content * * @param inputStream inputStream * @param targetClass targetClass * @param <T> T * @return target * @since 0.0.1 */ public static <T extends DefaultHandler> T parseXmlInputStream(InputStream inputStream, Class<T> targetClass) { SAXParserFactory saxParserFactory = SAXParserFactory.newInstance(); try { SAXParser saxParser = saxParserFactory.newSAXParser(); T target = ReflectUtils.newInstance(targetClass); saxParser.parse(inputStream, target); return target; } catch (ParserConfigurationException | SAXException | IOException e) { throw new OxygenException(e.getMessage()); } } /** * parse string content * * @param data stringData * @param targetClass targetClass * @param <T> T * @return target * @since 0.0.1 */ public static <T extends DefaultHandler> T parseXmlString(String data, Class<T> targetClass) { return parseXmlInputStream(new ByteArrayInputStream(data.getBytes()), targetClass); } }
<reponame>Ventiv-Technology/DockerManager /** * Copyright (c) 2014 - 2015 Ventiv Technology * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.ventiv.docker.manager.dockerjava; import com.github.dockerjava.core.command.AbstrDockerCmd; import java.util.List; /** * Created by jcrygier on 5/1/15. */ public class ImageHistoryCmdImpl extends AbstrDockerCmd<ImageHistoryCmd, List<ImageHistoryCmd.ImageHistory>> implements ImageHistoryCmd { private String imageName; public ImageHistoryCmdImpl(ImageHistoryCmd.Exec execution, String imageName) { super(execution); withImageName(imageName); } @Override public String getImageName() { return imageName; } @Override public ImageHistoryCmd withImageName(String imageName) { this.imageName = imageName; return this; } }
package com.example.ronensabag.animationsandgestures.scrollDetector; import android.annotation.SuppressLint; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.support.v7.widget.Toolbar; import android.view.MenuItem; import android.widget.TextView; import com.example.ronensabag.animationsandgestures.R; public class ScrollDetectorActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_scroll_detector); setSupportActionBar((Toolbar) findViewById(R.id.scroll_toolbar)); if (getSupportActionBar() != null) { getSupportActionBar().setHomeButtonEnabled(true); getSupportActionBar().setDisplayHomeAsUpEnabled(true); } final float expandViewMinHeight = getResources().getDimension(R.dimen.scroll_detector_expend_view_min_height); final float expandViewMaxHeight = getResources().getDimension(R.dimen.scroll_detector_expend_view_max_height); initExpendView(expandViewMinHeight, expandViewMaxHeight); } @Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case android.R.id.home: onBackPressed(); return true; } return super.onOptionsItemSelected(item); } @SuppressLint("ClickableViewAccessibility") private void initExpendView(final float expandViewMinHeight, float expandViewMaxHeight) { final TextView expendableView = findViewById(R.id.expendableView); final float maxScroll = expandViewMaxHeight - expandViewMinHeight; ScrollDetector scrollDetector = new ScrollDetector(this, maxScroll, new ScrollDetector.ScrollPercentageUpdate() { @Override public void onScrollPercentageUpdate(float newPercentage) { expendableView.getLayoutParams().height = (int)(maxScroll * newPercentage) + (int)expandViewMinHeight; int percentage = (int)(newPercentage * 100f); expendableView.setText(String.format(getString(R.string.expand_view_pattern), percentage)); expendableView.requestLayout(); } }); expendableView.setOnTouchListener(scrollDetector); } }
<filename>go/src/infra/tools/migrator/internal/plugsupport/repo.go // Copyright 2020 The LUCI Authors. All rights reserved. // Use of this source code is governed under the Apache License, Version 2.0 // that can be found in the LICENSE file. package plugsupport import ( "bufio" "bytes" "context" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "sync" "google.golang.org/protobuf/encoding/prototext" "go.chromium.org/luci/common/data/stringset" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/logging" lucipb "go.chromium.org/luci/common/proto" configpb "go.chromium.org/luci/common/proto/config" "go.chromium.org/luci/common/sync/parallel" "infra/tools/migrator" ) const localBranch = "fix_config" var errSkipped = errors.New("the repo is skipped by projects_re filter") type repo struct { projectDir ProjectDir // the root migrator project directory checkoutID string // how to name the checkout directory on disk projects []*configpb.Project // LUCI projects located within this repo root string // the absolute path to the repo checkout } // configRootKey is a key for "git config". func configRootKey(projID string) string { return fmt.Sprintf("migrator.%s.configRoot", projID) } // generatedConfigRootKey is a key for "git config". func generatedConfigRootKey(projID string) string { return fmt.Sprintf("migrator.%s.generatedConfigRoot", projID) } // projectsMetadataFile is a path to the file with projects metadata. func projectsMetadataFile(repoRoot string) string { return filepath.Join(repoRoot, ".git", "luci-projects.cfg") } // discoverRepo looks at the checkout directory on disk and returns the // corresponding &repo{...} if it is a valid git checkout with all necessary // metadata. // // Returns ErrNotExist if there's no checkout there or errSkipped if all // projects in the repo are skipped by the project filter. Any other error // indicates there's a checkout, but it appears to be broken. func discoverRepo(ctx context.Context, projectDir ProjectDir, checkoutID string, filter Filter) (*repo, error) { root := projectDir.CheckoutDir(checkoutID) projects, err := readProjectsMetadata(projectsMetadataFile(root)) if err != nil { return nil, err } // Skip the repo if *all* projects there are skipped by the filter. Keep *all* // projects if at least one project matches the filter: partially skipping // projects in a multi-project repo leads to hard-to-reason-about states. if len(filter.Apply(projects)) == 0 { return nil, errSkipped } r := &repo{ projectDir: projectDir, checkoutID: checkoutID, projects: projects, root: root, } if err := r.load(ctx, false); err != nil { return nil, err } return r, nil } // discoverAllRepos discovers all checked out repositories in the project dir. func discoverAllRepos(ctx context.Context, dir ProjectDir) ([]*repo, error) { filter, err := dir.LoadProjectFilter() if err != nil { return nil, err } infos, err := ioutil.ReadDir(string(dir)) if err != nil { return nil, err } var repos []*repo for _, info := range infos { if !info.IsDir() || strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") { continue } switch r, err := discoverRepo(ctx, dir, info.Name(), filter); { case err == nil: repos = append(repos, r) case err != errSkipped && !os.IsNotExist(err): logging.Errorf(ctx, "Error when scanning checkout %q: %s", info.Name(), err) } } return repos, nil } // visitReposInParallel calls the callback for checked out repositories that // contain at least one non-skipped project. // // Repositories that only contain projects skipped by `projects_re` filter are // skipped themselves too. // // The callback gets a per-repo context with the reports sink configured. The // report dump is written into `dumpPath` file and also returned. The callback // should communicate errors through the report. func visitReposInParallel(ctx context.Context, projectDir ProjectDir, dumpPath string, cb func(ctx context.Context, r *repo)) (*migrator.ReportDump, error) { repos, err := discoverAllRepos(ctx, projectDir) if err != nil { return nil, err } ctx = InitReportSink(ctx) parallel.WorkPool(32, func(ch chan<- func() error) { for _, r := range repos { r := r ch <- func() error { ctx := logging.SetField(ctx, "checkout", r.checkoutID) cb(ctx, r) return nil } } }) dump := DumpReports(ctx) // Write the reports out as CSV. if dumpPath != "" { scanOut, err := os.Create(dumpPath) if err != nil { return nil, err } defer scanOut.Close() if err := dump.WriteToCSV(scanOut); err != nil { return nil, err } } return dump, nil } // git returns an object that can execute git commands in the repo. func (r *repo) git(ctx context.Context) gitRunner { return gitRunner{ctx: ctx, root: r.root} } // localProject returns a reference to the local checked out project. func (r *repo) localProject(ctx context.Context, projID string) migrator.LocalProject { git := r.git(ctx) return &localProject{ id: migrator.ReportID{ Checkout: r.checkoutID, Project: projID, }, repo: r, ctx: ctx, relConfigRoot: git.read("config", configRootKey(projID)), relGeneratedConfigRoot: git.read("config", generatedConfigRootKey(projID)), } } // initialize either creates or loads the repo checkout. func (r *repo) initialize(ctx context.Context, remoteURL, remoteRef string) (newCheckout bool, err error) { r.root = r.projectDir.CheckoutDir(r.checkoutID) switch _, err = os.Stat(r.root); { case os.IsNotExist(err): return true, r.create(ctx, remoteURL, remoteRef) case err == nil: return false, r.load(ctx, true) default: return false, errors.Annotate(err, "statting checkout").Err() } } // load verifies the checkout has all LUCI projects we need. func (r *repo) load(ctx context.Context, writeMetadata bool) error { git := r.git(ctx) for _, proj := range r.projects { configRoot := git.read("config", configRootKey(proj.Id)) generatedConfigRoot := git.read("config", generatedConfigRootKey(proj.Id)) if configRoot == "" || generatedConfigRoot == "" { return errors.Reason( "the checkout %q is lacking LUCI project %q; you may need to rerun the migration with -squeaky -clean flags", r.checkoutID, proj.Id, ).Err() } } if git.err != nil { return git.err } // Make sure the metadata file is up-to-date (has no extra entries). if writeMetadata { if err := writeProjectsMetadata(projectsMetadataFile(r.root), r.projects); err != nil { return err } } return nil } // create initializes a new repo checkout. func (r *repo) create(ctx context.Context, remoteURL, remoteRef string) error { // We do this because `git cl` makes very broad assumptions about ref names. var originRef string if prefix := "refs/heads/"; strings.HasPrefix(remoteRef, prefix) { originRef = strings.Replace(remoteRef, prefix, "refs/remotes/origin/", 1) } else if prefix := "refs/branch-heads/"; strings.HasPrefix(remoteRef, prefix) { originRef = strings.Replace(remoteRef, prefix, "refs/remotes/branch-heads/", 1) } else { return errors.Reason("malformed remote ref, must be `refs/heads/` or `refs/branch-heads/`: %q", remoteRef).Err() } // Bail early if the migrator config is broken. migratorCfg, err := r.projectDir.LoadConfigFile() if err != nil { return errors.Annotate(err, "bad migrator config in %q", r.projectDir).Err() } git := gitRunner{ctx: ctx, root: r.projectDir.CheckoutTemp(r.checkoutID)} if err = os.Mkdir(git.root, 0777); err != nil { return errors.Annotate(err, "creating repo checkout").Err() } // "sso://" simplifies authenticating into internal repos. remoteURL = strings.Replace(remoteURL, "https://", "sso://", 1) // Bail early with a clear error message if we have no read access. git.run("ls-remote", remoteURL, remoteRef) if git.err != nil { return errors.Reason("no read access to %q ref %q", remoteURL, remoteRef).Err() } // Fetch the state into the git guts, but do not check out it yet. git.run("init") for key, val := range migratorCfg.GetGit().Config { git.run("config", key, val) } git.run("config", "extensions.PartialClone", "origin") git.run("config", "depot-tools.upstream", originRef) git.run("remote", "add", "origin", remoteURL) git.run("config", "remote.origin.fetch", "+"+remoteRef+":"+originRef) git.run("config", "remote.origin.partialclonefilter", "blob:none") git.run("fetch", "--depth", "1", "origin") // Figure out what directories we need to have in the checkout. toAdd := stringset.Set{} for _, proj := range r.projects { if err := r.prepRepoForProject(&git, originRef, proj, toAdd); err != nil { return errors.Annotate(err, "when examining LUCI project %q", proj.Id).Err() } } // We do a sparse checkout iff the stuff we want is somewhere deeper than // the root of the repo. Otherwise the whole checkout is the config // directory. if !toAdd.Has(".") { git.run("sparse-checkout", "init") git.run(append([]string{"sparse-checkout", "add"}, toAdd.ToSortedSlice()...)...) } git.run("new-branch", localBranch) if git.err != nil { return git.err } if err := writeProjectsMetadata(projectsMetadataFile(git.root), r.projects); err != nil { return err } return os.Rename(git.root, r.root) } // fetch fetches the most recent state of the remote ref. func (r *repo) fetch(ctx context.Context) error { git := r.git(ctx) git.run("fetch", "--depth", "1", "origin") return git.err } // reset updates the repo to the state as if it was just fetched. // // Fetches the most recent state of the remote ref, recreates fix_config branch. func (r *repo) reset(ctx context.Context) error { git := r.git(ctx) git.run("fetch", "--depth", "1", "origin") git.run("reset", "--hard", "FETCH_HEAD") git.run("clean", "-ffxd") git.run("-c", "advice.detachedHead=false", "checkout", "FETCH_HEAD") // Delete the branch only if it actually exists, otherwise the command fails. if git.read("config", fmt.Sprintf("branch.%s.remote", localBranch)) != "" { git.run("branch", "-D", localBranch) } git.run("new-branch", localBranch) return git.err } // prepRepoForProject figures out what directories we need to check out. func (r *repo) prepRepoForProject(git *gitRunner, originRef string, proj *configpb.Project, toAdd stringset.Set) error { // Path where generated configs (e.g. project.cfg) are. generatedRoot := proj.GetGitilesLocation().GetPath() if generatedRoot == "" { generatedRoot = "." } // Need to checkout all generated files themselves. toAdd.Add(generatedRoot) // Run from generatedRoot all the way up to "."; We need to add all OWNERS // files. for cur := generatedRoot; cur != "."; cur = path.Dir(cur) { toAdd.Add(filepath.Join(cur, "DIR_METADATA")) toAdd.Add(filepath.Join(cur, "OWNERS")) toAdd.Add(filepath.Join(cur, "PRESUBMIT.py")) } // Attempt to read project.cfg from the git guts. It contains lucicfg metadata // describing how to find the root of the lucicfg config tree. var projectCfg configpb.ProjectCfg blob := git.read("cat-file", "-p", fmt.Sprintf("%s:%s/project.cfg", originRef, generatedRoot)) if blob != "" { if err := lucipb.UnmarshalTextML(blob, &projectCfg); err != nil { return errors.Annotate(err, "failed to unmarshal project.cfg").Err() } } // We need to checkout the directory with lucicfg's main package. Grab its // location from the project config metadata but fallback to a heuristic of // finding the main.star for projects that don't have the metadata yet. var configRoot string if packageDir := projectCfg.GetLucicfg().GetPackageDir(); packageDir != "" { configRoot = path.Join(generatedRoot, packageDir) } else { // Go up until we see main.star. for configRoot = generatedRoot; configRoot != "."; configRoot = path.Dir(configRoot) { if git.check("cat-file", "-t", originRef+":"+configRoot+"/main.star") { break } } } toAdd.Add(configRoot) // Store these directories for reuse in load(...) and localProject(...). git.run("config", configRootKey(proj.Id), configRoot) git.run("config", generatedConfigRootKey(proj.Id), generatedRoot) return git.err } // reportID returns ID to use for reports about this specific checkout. func (r *repo) reportID() migrator.ReportID { return migrator.ReportID{Checkout: r.checkoutID} } // report adds a report about this checkout to the sink. func (r *repo) report(ctx context.Context, tag, description string, opts ...migrator.ReportOption) { getReportSink(ctx).add(r.reportID(), tag, description, opts...) } // writeProjectsMetadata writes a metadata file with []configpb.Project. func writeProjectsMetadata(path string, projects []*configpb.Project) error { blob, err := (prototext.MarshalOptions{Indent: " "}).Marshal(&configpb.ProjectsCfg{ Projects: projects, }) if err != nil { return err } return ioutil.WriteFile(path, blob, 0600) } // readProjectsMetadata reads the file written by writeProjectsMetadata. func readProjectsMetadata(path string) ([]*configpb.Project, error) { blob, err := ioutil.ReadFile(path) if err != nil { return nil, err } var cfg configpb.ProjectsCfg if err := (prototext.UnmarshalOptions{}).Unmarshal(blob, &cfg); err != nil { return nil, err } return cfg.Projects, nil } type gitRunner struct { root string err error ctx context.Context } // Sets up redirection for cmd.Std{out,err} to `log`. // // If cmd.Std{err,out} are non-nil prior to running this, they're left alone. // // The `log` function will be invoked with each line parsed from Std{out,err}. // It should actually log this somewhere. `fromStdout` will be true if the line // originated from the process' Stdout, false otherwise. // // If cmd.Args[-1] is exactly the string "2>&1" (i.e. migrator.TieStderr), then // this will tie Stderr to Stdout. This means that `fromStdout` will always be // true. func redirectIOAndWait(cmd *exec.Cmd, log func(fromStdout bool, line string)) error { var wg sync.WaitGroup shuttleStdio := func(reader io.Reader, stdout bool) { wg.Add(1) go func() { defer wg.Done() scanner := bufio.NewReader(reader) for { line, err := scanner.ReadBytes('\n') line = bytes.TrimRight(line, "\r\n") if err == io.EOF && len(line) == 0 { break } log(stdout, fmt.Sprintf("%s: %s", cmd.Args[0], line)) if err != nil { if err != io.EOF { panic(err) } break } } }() } tieStderr := false if cmd.Args[len(cmd.Args)-1] == migrator.TieStderr { tieStderr = true cmd.Args = cmd.Args[:len(cmd.Args)-1] } if cmd.Stdout == nil { outReader, err := cmd.StdoutPipe() if err != nil { panic(err) } shuttleStdio(outReader, true) } if cmd.Stderr == nil { if tieStderr { cmd.Stderr = cmd.Stdout } else { errReader, err := cmd.StderrPipe() if err != nil { panic(err) } shuttleStdio(errReader, false) } } if err := cmd.Start(); err != nil { panic(err) } wg.Wait() return cmd.Wait() } func (r *gitRunner) check(args ...string) bool { cmd := exec.CommandContext(r.ctx, "git", args...) cmd.Dir = r.root return cmd.Run() == nil } func (r *gitRunner) run(args ...string) { if r.err != nil { return } // git uses stderr for normal logging, but uses 'fatal' to indicate that bad // stuff happened. See the log function on redirectIOAndWait below. fatalLine := false args = append(args, migrator.TieStderr) logging.Infof(r.ctx, "running git %q", args) cmd := exec.CommandContext(r.ctx, "git", args...) cmd.Dir = r.root err := redirectIOAndWait(cmd, func(fromStdout bool, line string) { if strings.HasPrefix(line, "git: fatal: ") { fatalLine = true } if !fatalLine { logging.Infof(r.ctx, "%s", line) } else { logging.Errorf(r.ctx, "%s", line) } }) r.err = errors.Annotate(err, "running git %q", args).Err() } func (r *gitRunner) read(args ...string) string { if r.err != nil { return "" } logging.Debugf(r.ctx, "running git %q", args) buf := &bytes.Buffer{} cmd := exec.CommandContext(r.ctx, "git", args...) cmd.Stdout = buf cmd.Dir = r.root err := redirectIOAndWait(cmd, func(fromStdout bool, line string) { logging.Errorf(r.ctx, "%s", line) }) // Ignore exit status of "git config <key>" commands. Non-zero exit code // usually means the config key is absent. if len(args) != 2 || args[0] != "config" { r.err = errors.Annotate(err, "running git %q", args).Err() } return strings.TrimSpace(buf.String()) } func (r *gitRunner) gerritCL() string { if host := r.read("config", fmt.Sprintf("branch.%s.gerritserver", localBranch)); host != "" { issue := r.read("config", fmt.Sprintf("branch.%s.gerritissue", localBranch)) if issue != "" { return fmt.Sprintf("%s/c/%s", host, issue) } } return "" }
A comparative study of the effects of ketoconazole and fluconazole on 17-beta estradiol production by rat ovaries in vitro. In this study we have compared the effects of ketoconazole and fluconazole, a novel triazole antifungal agent, on 17-beta estradiol production in rat ovaries in vitro. For both compounds there was a lag phase, immediately after addition to the test system, during which the rate of oestradiol synthesis remained at control values. This may have been due to the time required for uptake of the compound and transfer to its site of action or for depletion of endogenous pools of intermediates. After the lag phase both compounds produced a reduction in the rate of estradiol synthesis. At any given concentration, fluconazole produced a reduction which was substantially less than that observed with ketoconazole. Indeed 2 microM ketoconazole reduced the rate of oestradiol production by greater than 90% while 10 microM fluconazole caused only a 70% reduction. These findings are consistent with reports that these compounds are inhibitors of cytochrome P450 and with the reduced sensitivity of mammalian cytochrome P450 to fluconazole as compared with ketoconazole.
/** * Call the graph resource, return user information * * @return Response with graph data */ @PreAuthorize("hasAuthority('SCOPE_Obo.Graph.Read')") @GetMapping("call-graph-with-repository") public String callGraphWithRepository() { Authentication principal = SecurityContextHolder.getContext().getAuthentication(); RequestAttributes requestAttributes = RequestContextHolder.currentRequestAttributes(); ServletRequestAttributes sra = (ServletRequestAttributes) requestAttributes; OAuth2AuthorizedClient graph = oAuth2AuthorizedClientRepository .loadAuthorizedClient("graph", principal, sra.getRequest()); return callMicrosoftGraphMeEndpoint(graph); }
/*----------------------------------------------------------*/ /* */ /* LIBMESH V 7.0 */ /* */ /*----------------------------------------------------------*/ /* */ /* Description: handle .meshb file format I/O */ /* Author: <NAME> */ /* Creation date: dec 08 2015 */ /* Last modification: jan 19 2016 */ /* */ /*----------------------------------------------------------*/ /*----------------------------------------------------------*/ /* Headers' macros */ /*----------------------------------------------------------*/ #ifdef F77API #ifdef F77_NO_UNDER_SCORE #define NAMF77(c,f) f #define APIF77(x) x #else #define NAMF77(c,f) f ## _ #define APIF77(x) x ## _ #endif #define VALF77(v) *v #define TYPF77(t) t* #define PRCF77(p) *((int *)p) #else #define NAMF77(c,f) c #define VALF77(v) v #define TYPF77(t) t #define PRCF77(p) p #endif /*----------------------------------------------------------*/ /* Includes */ /*----------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <float.h> #include <math.h> #include <ctype.h> #include <setjmp.h> #include <fcntl.h> #include <unistd.h> #include "libmesh7.h" /*----------------------------------------------------------*/ /* Defines */ /*----------------------------------------------------------*/ #define Asc 1 #define Bin 2 #define MshFil 4 #define SolFil 8 #define InfKwd 1 #define RegKwd 2 #define SolKwd 3 #define CmtKwd 4 #define WrdSiz 4 #define FilStrSiz 64 #define BufSiz 10000 #define MaxArg 20 /*----------------------------------------------------------*/ /* Structures */ /*----------------------------------------------------------*/ typedef struct { int typ, SolSiz, NmbWrd, NmbTyp, TypTab[ GmfMaxTyp ]; long long NmbLin, pos; char fmt[ GmfMaxTyp*9 ]; }KwdSct; typedef struct { int dim, ver, mod, typ, cod, FilDes; long long NexKwdPos, siz, pos; jmp_buf err; KwdSct KwdTab[ GmfMaxKwd + 1 ]; FILE *hdl; int *IntBuf; float *FltBuf; char *buf; char FilNam[ GmfStrSiz ]; double DblBuf[1000/8]; unsigned char blk[ BufSiz + 1000 ]; }GmfMshSct; /*----------------------------------------------------------*/ /* Global variables */ /*----------------------------------------------------------*/ const char *GmfKwdFmt[ GmfMaxKwd + 1 ][4] = { {"Reserved", "", "", ""}, {"MeshVersionFormatted", "", "", "i"}, {"Reserved", "", "", ""}, {"Dimension", "", "", "i"}, {"Vertices", "Vertex", "i", "dri"}, {"Edges", "Edge", "i", "iii"}, {"Triangles", "Triangle", "i", "iiii"}, {"Quadrilaterals", "Quadrilateral", "i", "iiiii"}, {"Tetrahedra", "Tetrahedron", "i", "iiiii"}, {"Prisms", "Prism", "i", "iiiiiii"}, {"Hexahedra", "Hexahedron", "i", "iiiiiiiii"}, {"IterationsAll", "IterationAll","","i"}, {"TimesAll", "TimeAll","","r"}, {"Corners", "Corner", "i", "i"}, {"Ridges", "Ridge", "i", "i"}, {"RequiredVertices", "RequiredVertex", "i", "i"}, {"RequiredEdges", "RequiredEdge", "i", "i"}, {"RequiredTriangles", "RequiredTriangle", "i", "i"}, {"RequiredQuadrilaterals", "RequiredQuadrilateral", "i", "i"}, {"TangentAtEdgeVertices", "TangentAtEdgeVertex", "i", "iii"}, {"NormalAtVertices", "NormalAtVertex", "i", "ii"}, {"NormalAtTriangleVertices", "NormalAtTriangleVertex", "i", "iii"}, {"NormalAtQuadrilateralVertices", "NormalAtQuadrilateralVertex", "i", "iiii"}, {"AngleOfCornerBound", "", "", "r"}, {"TrianglesP2", "TriangleP2", "i", "iiiiiii"}, {"EdgesP2", "EdgeP2", "i", "iiii"}, {"SolAtPyramids", "SolAtPyramid", "i", "sr"}, {"QuadrilateralsQ2", "QuadrilateralQ2", "i", "iiiiiiiiii"}, {"ISolAtPyramids", "ISolAtPyramid", "i", "iiiii"}, {"SubDomainFromGeom", "SubDomainFromGeom", "i", "iii"}, {"TetrahedraP2", "TetrahedronP2", "i", "iiiiiiiiiii"}, {"Fault_NearTri", "Fault_NearTri", "i", "i"}, {"Fault_Inter", "Fault_Inter", "i", "i"}, {"HexahedraQ2", "HexahedronQ2", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiii"}, {"ExtraVerticesAtEdges", "ExtraVerticesAtEdge", "i", "in"}, {"ExtraVerticesAtTriangles", "ExtraVerticesAtTriangle", "i", "in"}, {"ExtraVerticesAtQuadrilaterals", "ExtraVerticesAtQuadrilateral", "i", "in"}, {"ExtraVerticesAtTetrahedra", "ExtraVerticesAtTetrahedron", "i", "in"}, {"ExtraVerticesAtPrisms", "ExtraVerticesAtPrism", "i", "in"}, {"ExtraVerticesAtHexahedra", "ExtraVerticesAtHexahedron", "i", "in"}, {"VerticesOnGeometricVertices", "VertexOnGeometricVertex", "i", "iir"}, {"VerticesOnGeometricEdges", "VertexOnGeometricEdge", "i", "iirr"}, {"VerticesOnGeometricTriangles", "VertexOnGeometricTriangle", "i", "iirrr"}, {"VerticesOnGeometricQuadrilaterals", "VertexOnGeometricQuadrilateral", "i", "iirrr"}, {"EdgesOnGeometricEdges", "EdgeOnGeometricEdge", "i", "iir"}, {"Fault_FreeEdge", "Fault_FreeEdge", "i", "i"}, {"Polyhedra", "Polyhedron", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii"}, {"Polygons", "Polygon", "", "iiiiiiiii"}, {"Fault_Overlap", "Fault_Overlap", "i", "i"}, {"Pyramids", "Pyramid", "i", "iiiiii"}, {"BoundingBox", "", "", "drdr"}, {"Body","i", "drdrdrdr"}, {"PrivateTable", "PrivateTable", "i", "i"}, {"Fault_BadShape", "Fault_BadShape", "i", "i"}, {"End", "", "", ""}, {"TrianglesOnGeometricTriangles", "TriangleOnGeometricTriangle", "i", "iir"}, {"TrianglesOnGeometricQuadrilaterals", "TriangleOnGeometricQuadrilateral", "i", "iir"}, {"QuadrilateralsOnGeometricTriangles", "QuadrilateralOnGeometricTriangle", "i", "iir"}, {"QuadrilateralsOnGeometricQuadrilaterals", "QuadrilateralOnGeometricQuadrilateral", "i", "iir"}, {"Tangents", "Tangent", "i", "dr"}, {"Normals", "Normal", "i", "dr"}, {"TangentAtVertices", "TangentAtVertex", "i", "ii"}, {"SolAtVertices", "SolAtVertex", "i", "sr"}, {"SolAtEdges", "SolAtEdge", "i", "sr"}, {"SolAtTriangles", "SolAtTriangle", "i", "sr"}, {"SolAtQuadrilaterals", "SolAtQuadrilateral", "i", "sr"}, {"SolAtTetrahedra", "SolAtTetrahedron", "i", "sr"}, {"SolAtPrisms", "SolAtPrism", "i", "sr"}, {"SolAtHexahedra", "SolAtHexahedron", "i", "sr"}, {"DSolAtVertices", "DSolAtVertex", "i", "sr"}, {"ISolAtVertices", "ISolAtVertex", "i", "i"}, {"ISolAtEdges", "ISolAtEdge", "i", "ii"}, {"ISolAtTriangles", "ISolAtTriangle", "i", "iii"}, {"ISolAtQuadrilaterals", "ISolAtQuadrilateral", "i", "iiii"}, {"ISolAtTetrahedra", "ISolAtTetrahedron", "i", "iiii"}, {"ISolAtPrisms", "ISolAtPrism", "i", "iiiiii"}, {"ISolAtHexahedra", "ISolAtHexahedron", "i", "iiiiiiii"}, {"Iterations", "","","i"}, {"Time", "","","r"}, {"Fault_SmallTri", "Fault_SmallTri","i","i"}, {"CoarseHexahedra", "CoarseHexahedron", "i", "i"}, {"Comments", "Comment", "i", "c"}, {"PeriodicVertices", "PeriodicVertex", "i", "ii"}, {"PeriodicEdges", "PeriodicEdge", "i", "ii"}, {"PeriodicTriangles", "PeriodicTriangle", "i", "ii"}, {"PeriodicQuadrilaterals", "PeriodicQuadrilateral", "i", "ii"}, {"PrismsP2", "PrismP2", "i", "iiiiiiiiiiiiiiiiiii"}, {"PyramidsP2", "PyramidP2", "i", "iiiiiiiiiiiiiii"}, {"QuadrilateralsQ3", "QuadrilateralQ3", "i", "iiiiiiiiiiiiiiiii"}, {"QuadrilateralsQ4", "QuadrilateralQ4", "i", "iiiiiiiiiiiiiiiiiiiiiiiiii"}, {"TrianglesP3", "TriangleP3", "i", "iiiiiiiiiii"}, {"TrianglesP4", "TriangleP4", "i", "iiiiiiiiiiiiiiii"}, {"EdgesP3", "EdgeP3", "i", "iiiii"}, {"EdgesP4", "EdgeP4", "i", "iiiiii"} }; /*----------------------------------------------------------*/ /* Prototypes of local procedures */ /*----------------------------------------------------------*/ static void ScaWrd(GmfMshSct *, void *); static void ScaDblWrd(GmfMshSct *, void *); static long long GetPos(GmfMshSct *); static void RecWrd(GmfMshSct *, const void *); static void RecDblWrd(GmfMshSct *, const void *); static void RecBlk(GmfMshSct *, const void *, int); static void SetPos(GmfMshSct *, long long); static int ScaKwdTab(GmfMshSct *); static void ExpFmt(GmfMshSct *, int); static void ScaKwdHdr(GmfMshSct *, int); static void SwpWrd(char *, int); static int SetFilPos(GmfMshSct *, long long); static long long GetFilPos(GmfMshSct *msh); static long long GetFilSiz(GmfMshSct *); static void CalF77Prc(long long, long long, void *, int, void **); /*----------------------------------------------------------*/ /* Fscanf and fgets checking for errors */ /*----------------------------------------------------------*/ #define safe_fscanf(hdl, format, ptr, JmpErr) \ do { \ if( fscanf(hdl, format, ptr) != 1 ) \ longjmp( JmpErr, -1); \ } while(0) #define safe_fgets(ptr, siz, hdl, JmpErr) \ do { \ if( fgets(ptr, siz, hdl) == NULL ) \ longjmp( JmpErr, -1); \ } while(0) /*----------------------------------------------------------*/ /* Open a mesh file in read or write mod */ /*----------------------------------------------------------*/ long long GmfOpenMesh(char *FilNam, int mod, ...) { int KwdCod, res, *PtrVer, *PtrDim; long long MshIdx; char str[ GmfStrSiz ]; va_list VarArg; GmfMshSct *msh; /*---------------------*/ /* MESH STRUCTURE INIT */ /*---------------------*/ if(!(msh = calloc(1, sizeof(GmfMshSct)))) return(0); MshIdx = (long long)msh; /* Save the current stack environment for longjmp */ if(setjmp(msh->err) != 0) { if(msh->hdl != NULL) fclose(msh->hdl); if(msh->FilDes != 0) close(msh->FilDes); free(msh); return(0); } /* Copy the FilNam into the structure */ if(strlen(FilNam) + 7 >= GmfStrSiz) longjmp(msh->err, -1); strcpy(msh->FilNam, FilNam); /* Store the opening mod (read or write) and guess the filetype (binary or ascii) depending on the extension */ msh->mod = mod; msh->buf = (void *)msh->DblBuf; msh->FltBuf = (void *)msh->DblBuf; msh->IntBuf = (void *)msh->DblBuf; if(strstr(msh->FilNam, ".meshb")) msh->typ |= (Bin | MshFil); else if(strstr(msh->FilNam, ".mesh")) msh->typ |= (Asc | MshFil); else if(strstr(msh->FilNam, ".solb")) msh->typ |= (Bin | SolFil); else if(strstr(msh->FilNam, ".sol")) msh->typ |= (Asc | SolFil); else longjmp(msh->err, -1); /* Open the file in the required mod and initialyse the mesh structure */ if(msh->mod == GmfRead) { /*-----------------------*/ /* OPEN FILE FOR READING */ /*-----------------------*/ va_start(VarArg, mod); PtrVer = va_arg(VarArg, int *); PtrDim = va_arg(VarArg, int *); va_end(VarArg); /* Read the endian coding tag, the mesh version and the mesh dimension (mandatory kwd) */ if(msh->typ & Bin) { /* Create the name string and open the file */ msh->FilDes = open(msh->FilNam, O_RDONLY, 0666); if(msh->FilDes <= 0) longjmp(msh->err, -1); /* Read the endian coding tag, the mesh version and the mesh dimension (mandatory kwd) */ if(read(msh->FilDes, &msh->cod, WrdSiz) != WrdSiz) longjmp(msh->err, -1); if( (msh->cod != 1) && (msh->cod != 16777216) ) longjmp(msh->err, -1); ScaWrd(msh, (unsigned char *)&msh->ver); if( (msh->ver < 1) || (msh->ver > 4) ) longjmp(msh->err, -1); if( (msh->ver >= 3) && (sizeof(long long) != 8) ) longjmp(msh->err, -1); ScaWrd(msh, (unsigned char *)&KwdCod); if(KwdCod != GmfDimension) longjmp(msh->err, -1); GetPos(msh); ScaWrd(msh, (unsigned char *)&msh->dim); } else { /* Create the name string and open the file */ if(!(msh->hdl = fopen(msh->FilNam, "rb"))) longjmp(msh->err, -1); do { res = fscanf(msh->hdl, "%s", str); }while( (res != EOF) && strcmp(str, "MeshVersionFormatted") ); if(res == EOF) longjmp(msh->err, -1); safe_fscanf(msh->hdl, "%d", &msh->ver, msh->err); if( (msh->ver < 1) || (msh->ver > 4) ) longjmp(msh->err, -1); do { res = fscanf(msh->hdl, "%s", str); }while( (res != EOF) && strcmp(str, "Dimension") ); if(res == EOF) longjmp(msh->err, -1); safe_fscanf(msh->hdl, "%d", &msh->dim, msh->err); } if( (msh->dim != 2) && (msh->dim != 3) ) longjmp(msh->err, -1); (*PtrVer) = msh->ver; (*PtrDim) = msh->dim; /*------------*/ /* KW READING */ /*------------*/ /* Read the list of kw present in the file */ if(!ScaKwdTab(msh)) return(0); return(MshIdx); } else if(msh->mod == GmfWrite) { /*-----------------------*/ /* OPEN FILE FOR WRITING */ /*-----------------------*/ msh->cod = 1; /* Check if the user provided a valid version number and dimension */ va_start(VarArg, mod); msh->ver = va_arg(VarArg, int); msh->dim = va_arg(VarArg, int); va_end(VarArg); if( (msh->ver < 1) || (msh->ver > 4) ) longjmp(msh->err, -1); if( (msh->ver >= 3) && (sizeof(long long) != 8) ) longjmp(msh->err, -1); if( (msh->dim != 2) && (msh->dim != 3) ) longjmp(msh->err, -1); /* Create the mesh file */ if(msh->typ & Bin) { msh->FilDes = creat(msh->FilNam, 0666); if(msh->FilDes <= 0) longjmp(msh->err, -1); } else if(!(msh->hdl = fopen(msh->FilNam, "wb"))) longjmp(msh->err, -1); /*------------*/ /* KW WRITING */ /*------------*/ /* Write the mesh version and dimension */ if(msh->typ & Asc) { fprintf(msh->hdl, "%s %d\n\n", GmfKwdFmt[ GmfVersionFormatted ][0], msh->ver); fprintf(msh->hdl, "%s %d\n", GmfKwdFmt[ GmfDimension ][0], msh->dim); } else { RecWrd(msh, (unsigned char *)&msh->cod); RecWrd(msh, (unsigned char *)&msh->ver); GmfSetKwd(MshIdx, GmfDimension, 0); RecWrd(msh, (unsigned char *)&msh->dim); } return(MshIdx); } else { free(msh); return(0); } } /*----------------------------------------------------------*/ /* Close a meshfile in the right way */ /*----------------------------------------------------------*/ int GmfCloseMesh(long long MshIdx) { int res = 1; GmfMshSct *msh = (GmfMshSct *)MshIdx; RecBlk(msh, msh->buf, 0); /* In write down the "End" kw in write mode */ if(msh->mod == GmfWrite) { if(msh->typ & Asc) fprintf(msh->hdl, "\n%s\n", GmfKwdFmt[ GmfEnd ][0]); else GmfSetKwd(MshIdx, GmfEnd, 0); } /* Close the file and free the mesh structure */ if(msh->typ & Bin) close(msh->FilDes); else if(fclose(msh->hdl)) res = 0; free(msh); return(res); } /*----------------------------------------------------------*/ /* Read the number of lines and set the position to this kwd*/ /*----------------------------------------------------------*/ long long GmfStatKwd(long long MshIdx, int KwdCod, ...) { int i, *PtrNmbTyp, *PtrSolSiz, *TypTab; GmfMshSct *msh = (GmfMshSct *)MshIdx; KwdSct *kwd; va_list VarArg; if( (KwdCod < 1) || (KwdCod > GmfMaxKwd) ) return(0); kwd = &msh->KwdTab[ KwdCod ]; if(!kwd->NmbLin) return(0); /* Read further arguments if this kw is a sol */ if(kwd->typ == SolKwd) { va_start(VarArg, KwdCod); PtrNmbTyp = va_arg(VarArg, int *); *PtrNmbTyp = kwd->NmbTyp; PtrSolSiz = va_arg(VarArg, int *); *PtrSolSiz = kwd->SolSiz; TypTab = va_arg(VarArg, int *); for(i=0;i<kwd->NmbTyp;i++) TypTab[i] = kwd->TypTab[i]; va_end(VarArg); } return(kwd->NmbLin); } /*----------------------------------------------------------*/ /* Set the current file position to a given kwd */ /*----------------------------------------------------------*/ int GmfGotoKwd(long long MshIdx, int KwdCod) { GmfMshSct *msh = (GmfMshSct *)MshIdx; KwdSct *kwd = &msh->KwdTab[ KwdCod ]; if( (KwdCod < 1) || (KwdCod > GmfMaxKwd) || !kwd->NmbLin ) return(0); return(SetFilPos(msh, kwd->pos)); } /*----------------------------------------------------------*/ /* Write the kwd and set the number of lines */ /*----------------------------------------------------------*/ int GmfSetKwd(long long MshIdx, int KwdCod, ...) { int i, *TypTab; long long NmbLin=0, CurPos; va_list VarArg; GmfMshSct *msh = (GmfMshSct *)MshIdx; KwdSct *kwd; RecBlk(msh, msh->buf, 0); if( (KwdCod < 1) || (KwdCod > GmfMaxKwd) ) return(0); kwd = &msh->KwdTab[ KwdCod ]; /* Read further arguments if this kw has a header */ if(strlen(GmfKwdFmt[ KwdCod ][2])) { va_start(VarArg, KwdCod); NmbLin = va_arg(VarArg, long long); if(!strcmp(GmfKwdFmt[ KwdCod ][3], "sr")) { kwd->NmbTyp = va_arg(VarArg, int); TypTab = va_arg(VarArg, int *); for(i=0;i<kwd->NmbTyp;i++) kwd->TypTab[i] = TypTab[i]; } va_end(VarArg); } /* Setup the kwd info */ ExpFmt(msh, KwdCod); if(!kwd->typ) return(0); else if(kwd->typ == InfKwd) kwd->NmbLin = 1; else kwd->NmbLin = NmbLin; /* Store the next kwd position in binary file */ if( (msh->typ & Bin) && msh->NexKwdPos ) { CurPos = GetFilPos(msh); if(!SetFilPos(msh, msh->NexKwdPos)) return(0); SetPos(msh, CurPos); if(!SetFilPos(msh, CurPos)) return(0); } /* Write the header */ if(msh->typ & Asc) { fprintf(msh->hdl, "\n%s\n", GmfKwdFmt[ KwdCod ][0]); if(kwd->typ != InfKwd) fprintf(msh->hdl, "%zd\n", kwd->NmbLin); /* In case of solution field, write the extended header */ if(kwd->typ == SolKwd) { fprintf(msh->hdl, "%d ", kwd->NmbTyp); for(i=0;i<kwd->NmbTyp;i++) fprintf(msh->hdl, "%d ", kwd->TypTab[i]); fprintf(msh->hdl, "\n\n"); } } else { RecWrd(msh, (unsigned char *)&KwdCod); msh->NexKwdPos = GetFilPos(msh); SetPos(msh, 0); if(kwd->typ != InfKwd) { if(msh->ver < 4) { i = (int)kwd->NmbLin; RecWrd(msh, (unsigned char *)&i); } else RecDblWrd(msh, (unsigned char *)&kwd->NmbLin); } /* In case of solution field, write the extended header at once */ if(kwd->typ == SolKwd) { RecWrd(msh, (unsigned char *)&kwd->NmbTyp); for(i=0;i<kwd->NmbTyp;i++) RecWrd(msh, (unsigned char *)&kwd->TypTab[i]); } } /* Reset write buffer position */ msh->pos = 0; /* Estimate the total file size and check whether it crosses the 2GB threshold */ msh->siz += kwd->NmbLin * kwd->NmbWrd * WrdSiz; return(1); } /*----------------------------------------------------------*/ /* Read a full line from the current kwd */ /*----------------------------------------------------------*/ extern int NAMF77(GmfGetLin, gmfgetlin)(TYPF77(long long) MshIdx, TYPF77(int) KwdCod, ...) { int i, j; float *FltSolTab; double *DblSolTab; va_list VarArg; GmfMshSct *msh = (GmfMshSct *) VALF77(MshIdx); KwdSct *kwd = &msh->KwdTab[ VALF77(KwdCod) ]; if( (VALF77(KwdCod) < 1) || (VALF77(KwdCod) > GmfMaxKwd) ) return(0); /* Save the current stack environment for longjmp */ if(setjmp(msh->err) != 0) return(0); /* Start decoding the arguments */ va_start(VarArg, KwdCod); switch(kwd->typ) { case InfKwd : case RegKwd : case CmtKwd : { if(msh->typ & Asc) { for(i=0;i<kwd->SolSiz;i++) if(kwd->fmt[i] == 'r') if(msh->ver <= 1) safe_fscanf(msh->hdl, "%f", va_arg(VarArg, float *), msh->err); else safe_fscanf(msh->hdl, "%lf", va_arg(VarArg, double *), msh->err); else if(kwd->fmt[i] == 'i') if(msh->ver <= 3) safe_fscanf(msh->hdl, "%d", va_arg(VarArg, int *), msh->err); else safe_fscanf(msh->hdl, "%ld", va_arg(VarArg, long *), msh->err); else if(kwd->fmt[i] == 'c') safe_fgets(va_arg(VarArg, char *), WrdSiz * FilStrSiz, msh->hdl, msh->err); } else { for(i=0;i<kwd->SolSiz;i++) if(kwd->fmt[i] == 'r') if(msh->ver <= 1) ScaWrd(msh, (unsigned char *)va_arg(VarArg, float *)); else ScaDblWrd(msh, (unsigned char *)va_arg(VarArg, double *)); else if(kwd->fmt[i] == 'i') if(msh->ver <= 3) ScaWrd(msh, (unsigned char *)va_arg(VarArg, int *)); else ScaDblWrd(msh, (unsigned char *)va_arg(VarArg, long *)); else if(kwd->fmt[i] == 'c') fread(va_arg(VarArg, char *), WrdSiz, FilStrSiz, msh->hdl); } }break; case SolKwd : { if(msh->ver == 1) { FltSolTab = va_arg(VarArg, float *); if(msh->typ & Asc) for(j=0;j<kwd->SolSiz;j++) safe_fscanf(msh->hdl, "%f", &FltSolTab[j], msh->err); else for(j=0;j<kwd->SolSiz;j++) ScaWrd(msh, (unsigned char *)&FltSolTab[j]); } else { DblSolTab = va_arg(VarArg, double *); if(msh->typ & Asc) for(j=0;j<kwd->SolSiz;j++) safe_fscanf(msh->hdl, "%lf", &DblSolTab[j], msh->err); else for(j=0;j<kwd->SolSiz;j++) ScaDblWrd(msh, (unsigned char *)&DblSolTab[j]); } }break; } va_end(VarArg); return(1); } /*----------------------------------------------------------*/ /* Write a full line from the current kwd */ /*----------------------------------------------------------*/ extern int NAMF77(GmfSetLin, gmfsetlin)(TYPF77(long long) MshIdx, TYPF77(int) KwdCod, ...) { int i, j, pos, *IntBuf; long *LngBuf; float *FltSolTab, *FltBuf; double *DblSolTab, *DblBuf; va_list VarArg; GmfMshSct *msh = (GmfMshSct *) VALF77(MshIdx); KwdSct *kwd = &msh->KwdTab[ VALF77(KwdCod) ]; if( ( VALF77(KwdCod) < 1) || ( VALF77(KwdCod) > GmfMaxKwd) ) return(0); /* Start decoding the arguments */ va_start(VarArg, KwdCod); if(kwd->typ != SolKwd) { if(msh->typ & Asc) { for(i=0;i<kwd->SolSiz;i++) { if(kwd->fmt[i] == 'r') { if(msh->ver <= 1) fprintf(msh->hdl, "%g ", VALF77(va_arg(VarArg, TYPF77(double)))); else fprintf(msh->hdl, "%.15g ", VALF77(va_arg(VarArg, TYPF77(double)))); } else if(kwd->fmt[i] == 'i') { if(msh->ver <= 3) fprintf(msh->hdl, "%d ", VALF77(va_arg(VarArg, TYPF77(int)))); else fprintf(msh->hdl, "%ld ", VALF77(va_arg(VarArg, TYPF77(long)))); } else if(kwd->fmt[i] == 'c') fprintf(msh->hdl, "%s", va_arg(VarArg, char *)); } } else { pos = 0; for(i=0;i<kwd->SolSiz;i++) { if(kwd->fmt[i] == 'r') { if(msh->ver <= 1) { FltBuf = (void *)&msh->buf[ pos ]; *FltBuf = (float) VALF77(va_arg(VarArg, TYPF77(double))); pos += 4; } else { DblBuf = (void *)&msh->buf[ pos ]; *DblBuf = VALF77(va_arg(VarArg, TYPF77(double))); pos += 8; } } else if(kwd->fmt[i] == 'i') { if(msh->ver <= 3) { IntBuf = (void *)&msh->buf[ pos ]; *IntBuf = VALF77(va_arg(VarArg, TYPF77(int))); pos += 4; } else { LngBuf = (void *)&msh->buf[ pos ]; *LngBuf = VALF77(va_arg(VarArg, TYPF77(long))); pos += 8; } } else if(kwd->fmt[i] == 'c') { memset(&msh->buf[ pos ], 0, FilStrSiz * WrdSiz); strncpy(&msh->buf[ pos ], va_arg(VarArg, char *), FilStrSiz * WrdSiz); pos += FilStrSiz; } } RecBlk(msh, msh->buf, kwd->NmbWrd); } } else { if(msh->ver == 1) { FltSolTab = va_arg(VarArg, float *); if(msh->typ & Asc) for(j=0;j<kwd->SolSiz;j++) fprintf(msh->hdl, "%g ", (double)FltSolTab[j]); else RecBlk(msh, (unsigned char *)FltSolTab, kwd->NmbWrd); } else { DblSolTab = va_arg(VarArg, double *); if(msh->typ & Asc) for(j=0;j<kwd->SolSiz;j++) fprintf(msh->hdl, "%.15g ", DblSolTab[j]); else RecBlk(msh, (unsigned char *)DblSolTab, kwd->NmbWrd); } } va_end(VarArg); if(msh->typ & Asc) fprintf(msh->hdl, "\n"); return(1); } /*----------------------------------------------------------*/ /* Private procedure for transmesh : copy a whole line */ /*----------------------------------------------------------*/ #ifdef TRANSMESH int GmfCpyLin(int InpIdx, int OutIdx, int KwdCod) { char s[ WrdSiz * FilStrSiz ]; double d; float f; int i, a; long long l; GmfMshSct *InpMsh = GmfMshTab[ InpIdx ], *OutMsh = GmfMshTab[ OutIdx ]; KwdSct *kwd = &InpMsh->KwdTab[ KwdCod ]; /* Save the current stack environment for longjmp */ if(setjmp(InpMsh->err) != 0) return(0); for(i=0;i<kwd->SolSiz;i++) { if(kwd->fmt[i] == 'r') { if(InpMsh->ver == 1) { if(InpMsh->typ & Asc) safe_fscanf(InpMsh->hdl, "%f", &f, InpMsh->err); else ScaWrd(InpMsh, (unsigned char *)&f); d = (double)f; } else { if(InpMsh->typ & Asc) safe_fscanf(InpMsh->hdl, "%lf", &d, InpMsh->err); else ScaDblWrd(InpMsh, (unsigned char *)&d); f = (float)d; } if(OutMsh->ver == 1) if(OutMsh->typ & Asc) fprintf(OutMsh->hdl, "%g ", (double)f); else RecWrd(OutMsh, (unsigned char *)&f); else if(OutMsh->typ & Asc) fprintf(OutMsh->hdl, "%.15g ", d); else RecDblWrd(OutMsh, (unsigned char *)&d); } else if(kwd->fmt[i] == 'i') { if(InpMsh->ver <= 3) { if(InpMsh->typ & Asc) safe_fscanf(InpMsh->hdl, "%d", &a, InpMsh->err); else ScaWrd(InpMsh, (unsigned char *)&a); l = (long long)a; } else { if(InpMsh->typ & Asc) safe_fscanf(InpMsh->hdl, "%zd", &l, InpMsh->err); else ScaDblWrd(InpMsh, (unsigned char *)&l); a = (int)l; } if(OutMsh->ver <= 3) { if(OutMsh->typ & Asc) fprintf(OutMsh->hdl, "%d ", a); else RecWrd(OutMsh, (unsigned char *)&a); } else { if(OutMsh->typ & Asc) fprintf(OutMsh->hdl, "%zd ", l); else RecDblWrd(OutMsh, (unsigned char *)&l); } } else if(kwd->fmt[i] == 'c') { memset(s, 0, FilStrSiz * WrdSiz); if(InpMsh->typ & Asc) safe_fgets(s, WrdSiz * FilStrSiz, InpMsh->hdl, InpMsh->err); else read(InpMsh->FilDes, s, WrdSiz * FilStrSiz); if(OutMsh->typ & Asc) fprintf(OutMsh->hdl, "%s", s); else write(OutMsh->FilDes, s, WrdSiz * FilStrSiz); } } if(OutMsh->typ & Asc) fprintf(OutMsh->hdl, "\n"); return(1); } #endif /*----------------------------------------------------------*/ /* Bufferized reading of all keyword's lines */ /*----------------------------------------------------------*/ extern int NAMF77(GmfGetBlock, gmfgetblock)(TYPF77(long long) MshIdx, TYPF77(int) KwdCod, void *prc, ...) { char *UsrDat[ GmfMaxTyp ], *FilBuf=NULL, *FilPos; char *StrTab[5] = { "", "%f", "%lf", "%d", "%lld" }; int b, i, j, LinSiz, *FilPtrI32, *UsrPtrI32, FilTyp[ GmfMaxTyp ], UsrTyp[ GmfMaxTyp ]; int NmbBlk, NmbArg, SizTab[5] = {0,4,8,4,8}; long long NmbLin, *FilPtrI64, *UsrPtrI64, BegIdx, EndIdx=0; float *FilPtrR32, *UsrPtrR32; double *FilPtrR64, *UsrPtrR64; void (*UsrPrc)(long long, long long, void *) = NULL, *UsrArg, *ArgTab[ MaxArg ]; size_t UsrLen[ GmfMaxTyp ]; va_list VarArg; GmfMshSct *msh = (GmfMshSct *) VALF77(MshIdx); KwdSct *kwd = &msh->KwdTab[ VALF77(KwdCod) ]; /* Save the current stack environment for longjmp */ if(setjmp(msh->err) != 0) { if(FilBuf) free(FilBuf); return(0); } /* Check mesh and keyword */ if( (VALF77(KwdCod) < 1) || (VALF77(KwdCod) > GmfMaxKwd) || !kwd->NmbLin ) return(0); /* Make sure it's not a simple information keyword */ if( (kwd->typ != RegKwd) && (kwd->typ != SolKwd) ) return(0); /* Start decoding the arguments */ va_start(VarArg, prc); LinSiz = 0; /* Get the user's preporcessing procedure and argument adresses, if any */ #ifdef F77API if(PRCF77(prc)) { UsrPrc = (void (*)(long long, long long, void *))prc; NmbArg = *(va_arg(VarArg, int *)); for(i=0;i<NmbArg;i++) ArgTab[i] = va_arg(VarArg, void *); } #else if(prc) { UsrPrc = (void (*)(long long, long long, void *))prc; UsrArg = va_arg(VarArg, void *); } #endif for(i=0;i<kwd->SolSiz;i++) { /* Get the user's data type and pointers to first and second adress to compute the stride */ UsrTyp[i] = VALF77(va_arg(VarArg, TYPF77(int))); UsrDat[i] = va_arg(VarArg, char *); UsrLen[i] = (size_t)(va_arg(VarArg, char *) - UsrDat[i]); /* Get the file's data type */ if(kwd->fmt[i] == 'r') if(msh->ver <= 1) FilTyp[i] = GmfFloat; else FilTyp[i] = GmfDouble; else if(msh->ver <= 3) FilTyp[i] = GmfInt; else FilTyp[i] = GmfLong; /* Compute the file stride */ LinSiz += SizTab[ FilTyp[i] ]; } va_end(VarArg); /* Move file pointer to the keyword data */ SetFilPos(msh, kwd->pos); /* Read the whole kwd data */ if(msh->typ & Asc) { for(i=0;i<kwd->NmbLin;i++) for(j=0;j<kwd->SolSiz;j++) { safe_fscanf(msh->hdl, StrTab[ UsrTyp[j] ], UsrDat[j], msh->err); UsrDat[j] += UsrLen[j]; } /* Call the user's preprocessing procedure */ if(UsrPrc) #ifdef F77API CalF77Prc(1, kwd->NmbLin, UsrPrc, NmbArg, ArgTab); #else UsrPrc(1, kwd->NmbLin, UsrArg); #endif } else { /* Allocate a small buffer and split the main loop into chunks */ if(!(FilBuf = malloc((size_t)(BufSiz * LinSiz)))) return(0); NmbBlk = kwd->NmbLin / BufSiz; for(b=0;b<=NmbBlk;b++) { if(b == NmbBlk) NmbLin = kwd->NmbLin - b * BufSiz; else NmbLin = BufSiz; /* Read a chunk of data */ if(read(msh->FilDes, FilBuf, (size_t)(LinSiz * NmbLin)) == -1) longjmp(msh->err, -1); FilPos = FilBuf; BegIdx = EndIdx+1; EndIdx += NmbLin; /* Then decode it and store it in the user's data structure */ for(i=0;i<NmbLin;i++) for(j=0;j<kwd->SolSiz;j++) { if(msh->cod != 1) SwpWrd(FilPos, SizTab[ FilTyp[j] ]); if(FilTyp[j] == GmfInt) { FilPtrI32 = (int *)FilPos; if(UsrTyp[j] == GmfInt) { UsrPtrI32 = (int *)UsrDat[j]; *UsrPtrI32 = *FilPtrI32; } else { UsrPtrI64 = (long long *)UsrDat[j]; *UsrPtrI64 = (long long)*FilPtrI32; } } else if(FilTyp[j] == GmfLong) { FilPtrI64 = (long long *)FilPos; if(UsrTyp[j] == GmfLong) { UsrPtrI64 = (long long *)UsrDat[j]; *UsrPtrI64 = *FilPtrI64; } else { UsrPtrI32 = (int *)UsrDat[j]; *UsrPtrI32 = (int)*FilPtrI64; } } else if(FilTyp[j] == GmfFloat) { FilPtrR32 = (float *)FilPos; if(UsrTyp[j] == GmfFloat) { UsrPtrR32 = (float *)UsrDat[j]; *UsrPtrR32 = *FilPtrR32; } else { UsrPtrR64 = (double *)UsrDat[j]; *UsrPtrR64 = (double)*FilPtrR32; } } else if(FilTyp[j] == GmfDouble) { FilPtrR64 = (double *)FilPos; if(UsrTyp[j] == GmfDouble) { UsrPtrR64 = (double *)UsrDat[j]; *UsrPtrR64 = *FilPtrR64; } else { UsrPtrR32 = (float *)UsrDat[j]; *UsrPtrR32 = (float)*FilPtrR64; } } FilPos += SizTab[ FilTyp[j] ]; UsrDat[j] += UsrLen[j]; } /* Call the user's preprocessing procedure */ if(UsrPrc) #ifdef F77API CalF77Prc(BegIdx, EndIdx, UsrPrc, NmbArg, ArgTab); #else UsrPrc(BegIdx, EndIdx, UsrArg); #endif } free(FilBuf); } return(1); } /*----------------------------------------------------------*/ /* Bufferized writing of all keyword's lines */ /*----------------------------------------------------------*/ extern int NAMF77(GmfSetBlock, gmfsetblock)(TYPF77(long long) MshIdx, TYPF77(int) KwdCod, void *prc, ...) { char *UsrDat[ GmfMaxTyp ], *FilBuf=NULL, *FilPos; char *StrTab[5] = { "", "%g", "%.15g", "%d", "%lld" }; int i, j, LinSiz, *FilPtrI32, *UsrPtrI32, FilTyp[ GmfMaxTyp ], UsrTyp[ GmfMaxTyp ]; int NmbBlk, NmbLin, b, SizTab[5] = {0,4,8,4,8}, NmbArg; long long *FilPtrI64, *UsrPtrI64, BegIdx, EndIdx=0; float *FilPtrR32, *UsrPtrR32; double *FilPtrR64, *UsrPtrR64; void (*UsrPrc)(long long, long long, void *) = NULL, *UsrArg, *ArgTab[ MaxArg ]; size_t UsrLen[ GmfMaxTyp ]; va_list VarArg; GmfMshSct *msh = (GmfMshSct *) VALF77(MshIdx); KwdSct *kwd = &msh->KwdTab[ VALF77(KwdCod) ]; /* Save the current stack environment for longjmp */ if(setjmp(msh->err) != 0) { if(FilBuf) free(FilBuf); return(0); } /* Check mesh and keyword */ if( (VALF77(KwdCod) < 1) || (VALF77(KwdCod) > GmfMaxKwd) || !kwd->NmbLin ) return(0); /* Make sure it's not a simple information keyword */ if( (kwd->typ != RegKwd) && (kwd->typ != SolKwd) ) return(0); /* Start decoding the arguments */ va_start(VarArg, prc); LinSiz = 0; /* Get the user's postprocessing procedure and argument adresses, if any */ #ifdef F77API if(PRCF77(prc)) { UsrPrc = (void (*)(long long, long long, void *))prc; NmbArg = *(va_arg(VarArg, int *)); for(i=0;i<NmbArg;i++) ArgTab[i] = va_arg(VarArg, void *); } #else if(prc) { UsrPrc = (void (*)(long long, long long, void *))prc; UsrArg = va_arg(VarArg, void *); } #endif for(i=0;i<kwd->SolSiz;i++) { /* Get the user's data type and pointers to first and second adress to compute the stride */ UsrTyp[i] = VALF77(va_arg(VarArg, TYPF77(int))); UsrDat[i] = va_arg(VarArg, char *); UsrLen[i] = (size_t)(va_arg(VarArg, char *) - UsrDat[i]); /* Get the file's data type */ if(kwd->fmt[i] == 'r') if(msh->ver <= 1) FilTyp[i] = GmfFloat; else FilTyp[i] = GmfDouble; else if(msh->ver <= 3) FilTyp[i] = GmfInt; else FilTyp[i] = GmfLong; /* Compute the file stride */ LinSiz += SizTab[ FilTyp[i] ]; } va_end(VarArg); /* Write the whole kwd data */ if(msh->typ & Asc) { if(UsrPrc) #ifdef F77API CalF77Prc(1, kwd->NmbLin, UsrPrc, NmbArg, ArgTab); #else UsrPrc(1, kwd->NmbLin, UsrArg); #endif for(i=0;i<kwd->NmbLin;i++) for(j=0;j<kwd->SolSiz;j++) { if(UsrTyp[j] == GmfFloat) { UsrPtrR32 = (float *)UsrDat[j]; fprintf(msh->hdl, StrTab[ UsrTyp[j] ], (double)*UsrPtrR32); } else if(UsrTyp[j] == GmfDouble) { UsrPtrR64 = (double *)UsrDat[j]; fprintf(msh->hdl, StrTab[ UsrTyp[j] ], *UsrPtrR64); } else if(UsrTyp[j] == GmfInt) { UsrPtrI32 = (int *)UsrDat[j]; fprintf(msh->hdl, StrTab[ UsrTyp[j] ], *UsrPtrI32); } else if(UsrTyp[j] == GmfLong) { UsrPtrI64 = (long long *)UsrDat[j]; fprintf(msh->hdl, StrTab[ UsrTyp[j] ], *UsrPtrI64); } if(j < kwd->SolSiz -1) fprintf(msh->hdl, " "); else fprintf(msh->hdl, "\n"); UsrDat[j] += UsrLen[j]; } } else { if(!(FilBuf = malloc((size_t)BufSiz * (size_t)LinSiz))) return(0); NmbBlk = kwd->NmbLin / BufSiz; for(b=0;b<=NmbBlk;b++) { if(b == NmbBlk) NmbLin = kwd->NmbLin - b * BufSiz; else NmbLin = BufSiz; FilPos = FilBuf; BegIdx = EndIdx+1; EndIdx += NmbLin; if(UsrPrc) #ifdef F77API CalF77Prc(BegIdx, EndIdx, UsrPrc, NmbArg, ArgTab); #else UsrPrc(BegIdx, EndIdx, UsrArg); #endif for(i=0;i<NmbLin;i++) for(j=0;j<kwd->SolSiz;j++) { if(FilTyp[j] == GmfInt) { FilPtrI32 = (int *)FilPos; if(UsrTyp[j] == GmfInt) { UsrPtrI32 = (int *)UsrDat[j]; *FilPtrI32 = *UsrPtrI32; } else { UsrPtrI64 = (long long *)UsrDat[j]; *FilPtrI32 = (int)*UsrPtrI64; } } else if(FilTyp[j] == GmfLong) { FilPtrI64 = (long long *)FilPos; if(UsrTyp[j] == GmfLong) { UsrPtrI64 = (long long *)UsrDat[j]; *FilPtrI64 = *UsrPtrI64; } else { UsrPtrI32 = (int *)UsrDat[j]; *FilPtrI64 = (long long)*UsrPtrI32; } } else if(FilTyp[j] == GmfFloat) { FilPtrR32 = (float *)FilPos; if(UsrTyp[j] == GmfFloat) { UsrPtrR32 = (float *)UsrDat[j]; *FilPtrR32 = *UsrPtrR32; } else { UsrPtrR64 = (double *)UsrDat[j]; *FilPtrR32 = (float)*UsrPtrR64; } } else if(FilTyp[j] == GmfDouble) { FilPtrR64 = (double *)FilPos; if(UsrTyp[j] == GmfDouble) { UsrPtrR64 = (double *)UsrDat[j]; *FilPtrR64 = *UsrPtrR64; } else { UsrPtrR32 = (float *)UsrDat[j]; *FilPtrR64 = (double)*UsrPtrR32; } } FilPos += SizTab[ FilTyp[j] ]; UsrDat[j] += UsrLen[j]; } if(write(msh->FilDes, FilBuf, (size_t)(LinSiz * NmbLin)) == -1) { free(FilBuf); return(0); } } free(FilBuf); } return(1); } /*----------------------------------------------------------*/ /* Find every kw present in a meshfile */ /*----------------------------------------------------------*/ static int ScaKwdTab(GmfMshSct *msh) { int KwdCod, c; long long NexPos, EndPos; char str[ GmfStrSiz ]; if(msh->typ & Asc) { /* Scan each string in the file until the end */ while(fscanf(msh->hdl, "%s", str) != EOF) { /* Fast test in order to reject quickly the numeric values */ if(isalpha(str[0])) { /* Search which kwd code this string is associated with, then get its header and save the curent position in file (just before the data) */ for(KwdCod=1; KwdCod<= GmfMaxKwd; KwdCod++) if(!strcmp(str, GmfKwdFmt[ KwdCod ][0])) { ScaKwdHdr(msh, KwdCod); break; } } else if(str[0] == '#') while((c = fgetc(msh->hdl)) != '\n' && c != EOF); } } else { /* Get file size */ EndPos = GetFilSiz(msh); /* Jump through kwd positions in the file */ do { /* Get the kwd code and the next kwd position */ ScaWrd(msh, ( char *)&KwdCod); NexPos = GetPos(msh); if(NexPos > EndPos) longjmp(msh->err, -1); /* Check if this kwd belongs to this mesh version */ if( (KwdCod >= 1) && (KwdCod <= GmfMaxKwd) ) ScaKwdHdr(msh, KwdCod); /* Go to the next kwd */ if(NexPos && !(SetFilPos(msh, NexPos))) longjmp(msh->err, -1); }while(NexPos && (KwdCod != GmfEnd)); } return(1); } /*----------------------------------------------------------*/ /* Read and setup the keyword's header */ /*----------------------------------------------------------*/ static void ScaKwdHdr(GmfMshSct *msh, int KwdCod) { int i; KwdSct *kwd = &msh->KwdTab[ KwdCod ]; if(!strcmp("i", GmfKwdFmt[ KwdCod ][2])) if(msh->typ & Asc) safe_fscanf(msh->hdl, "%zd", &kwd->NmbLin, msh->err); else if(msh->ver <= 3) { ScaWrd(msh, (unsigned char *)&i); kwd->NmbLin = i; } else ScaDblWrd(msh, (unsigned char *)&kwd->NmbLin); else kwd->NmbLin = 1; if(!strcmp("sr", GmfKwdFmt[ KwdCod ][3])) { if(msh->typ & Asc) { safe_fscanf(msh->hdl, "%d", &kwd->NmbTyp, msh->err); for(i=0;i<kwd->NmbTyp;i++) safe_fscanf(msh->hdl, "%d", &kwd->TypTab[i], msh->err); } else { ScaWrd(msh, (unsigned char *)&kwd->NmbTyp); for(i=0;i<kwd->NmbTyp;i++) ScaWrd(msh, (unsigned char *)&kwd->TypTab[i]); } } ExpFmt(msh, KwdCod); kwd->pos = GetFilPos(msh); } /*----------------------------------------------------------*/ /* Expand the compacted format and compute the line size */ /*----------------------------------------------------------*/ static void ExpFmt(GmfMshSct *msh, int KwdCod) { int i, j, TmpSiz=0, IntWrd, FltWrd; char chr; const char *InpFmt = GmfKwdFmt[ KwdCod ][3]; KwdSct *kwd = &msh->KwdTab[ KwdCod ]; /* Set the kwd's type */ if(!strlen(GmfKwdFmt[ KwdCod ][2])) kwd->typ = InfKwd; else if(!strcmp(InpFmt, "sr")) kwd->typ = SolKwd; else kwd->typ = RegKwd; /* Get the solution-field's size */ if(kwd->typ == SolKwd) for(i=0;i<kwd->NmbTyp;i++) switch(kwd->TypTab[i]) { case GmfSca : TmpSiz += 1; break; case GmfVec : TmpSiz += msh->dim; break; case GmfSymMat : TmpSiz += (msh->dim * (msh->dim+1)) / 2; break; case GmfMat : TmpSiz += msh->dim * msh->dim; break; } /* Scan each character from the format string */ i = kwd->SolSiz = kwd->NmbWrd = 0; while(i < (int)strlen(InpFmt)) { chr = InpFmt[ i++ ]; if(chr == 'd') { chr = InpFmt[i++]; for(j=0;j<msh->dim;j++) kwd->fmt[ kwd->SolSiz++ ] = chr; } else if(chr == 's') { chr = InpFmt[i++]; for(j=0;j<TmpSiz;j++) kwd->fmt[ kwd->SolSiz++ ] = chr; } else kwd->fmt[ kwd->SolSiz++ ] = chr; } if(msh->ver <= 1) FltWrd = 1; else FltWrd = 2; if(msh->ver <= 3) IntWrd = 1; else IntWrd = 2; for(i=0;i<kwd->SolSiz;i++) switch(kwd->fmt[i]) { case 'i' : kwd->NmbWrd += IntWrd; break; case 'c' : kwd->NmbWrd += FilStrSiz; break; case 'r' : kwd->NmbWrd += FltWrd;break; } } /*----------------------------------------------------------*/ /* Read a four bytes word from a mesh file */ /*----------------------------------------------------------*/ static void ScaWrd(GmfMshSct *msh, void *ptr) { if(read(msh->FilDes, ptr, WrdSiz) != WrdSiz) longjmp(msh->err, -1); if(msh->cod != 1) SwpWrd((char *)ptr, WrdSiz); } /*----------------------------------------------------------*/ /* Read an eight bytes word from a mesh file */ /*----------------------------------------------------------*/ static void ScaDblWrd(GmfMshSct *msh, void *ptr) { if(read(msh->FilDes, ptr, WrdSiz * 2) != WrdSiz * 2) longjmp(msh->err, -1); if(msh->cod != 1) SwpWrd((char *)ptr, 2 * WrdSiz); } /*----------------------------------------------------------*/ /* Read a 4 or 8 bytes position in mesh file */ /*----------------------------------------------------------*/ static long long GetPos(GmfMshSct *msh) { int IntVal; long long pos; if(msh->ver >= 3) ScaDblWrd(msh, (unsigned char*)&pos); else { ScaWrd(msh, (unsigned char*)&IntVal); pos = (long long)IntVal; } return(pos); } /*----------------------------------------------------------*/ /* Write a four bytes word to a mesh file */ /*----------------------------------------------------------*/ static void RecWrd(GmfMshSct *msh, const void *wrd) { write(msh->FilDes, wrd, WrdSiz); } /*----------------------------------------------------------*/ /* Write an eight bytes word to a mesh file */ /*----------------------------------------------------------*/ static void RecDblWrd(GmfMshSct *msh, const void *wrd) { write(msh->FilDes, wrd, WrdSiz * 2); } /*----------------------------------------------------------*/ /* Write a block of four bytes word to a mesh file */ /*----------------------------------------------------------*/ static void RecBlk(GmfMshSct *msh, const void *blk, int siz) { /* Copy this line-block into the main mesh buffer */ if(siz) { memcpy(&msh->blk[ msh->pos ], blk, (size_t)(siz * WrdSiz)); msh->pos += siz * WrdSiz; } /* When the buffer is full or this procedure is APIF77ed with a 0 size, flush the cache on disk */ if( (msh->pos > BufSiz) || (!siz && msh->pos) ) { write(msh->FilDes, msh->blk, (size_t)msh->pos); msh->pos = 0; } } /*----------------------------------------------------------*/ /* Write a 4 or 8 bytes position in a mesh file */ /*----------------------------------------------------------*/ static void SetPos(GmfMshSct *msh, long long pos) { int IntVal; if(msh->ver >= 3) RecDblWrd(msh, (unsigned char*)&pos); else { IntVal = (int)pos; RecWrd(msh, (unsigned char*)&IntVal); } } /*----------------------------------------------------------*/ /* Endianness conversion */ /*----------------------------------------------------------*/ static void SwpWrd(char *wrd, int siz) { char swp; int i; for(i=0;i<siz/2;i++) { swp = wrd[ siz-i-1 ]; wrd[ siz-i-1 ] = wrd[i]; wrd[i] = swp; } } /*----------------------------------------------------------*/ /* Set current position in a file */ /*----------------------------------------------------------*/ static int SetFilPos(GmfMshSct *msh, long long pos) { if(msh->typ & Bin) return((lseek(msh->FilDes, pos, 0) != -1)); else return((fseek(msh->hdl, pos, SEEK_SET) == 0)); } /*----------------------------------------------------------*/ /* Get current position in a file */ /*----------------------------------------------------------*/ static long long GetFilPos(GmfMshSct *msh) { if(msh->typ & Bin) return(lseek(msh->FilDes, 0, 1)); else return(ftell(msh->hdl)); } /*----------------------------------------------------------*/ /* Move the position to the end of file and return the size */ /*----------------------------------------------------------*/ static long long GetFilSiz(GmfMshSct *msh) { long long CurPos, EndPos = 0; if(msh->typ & Bin) { CurPos = lseek(msh->FilDes, 0, 1); EndPos = lseek(msh->FilDes, 0, 2); lseek(msh->FilDes, CurPos, 0); } else { CurPos = ftell(msh->hdl); if(fseek(msh->hdl, 0, SEEK_END) != 0) longjmp(msh->err, -1); EndPos = ftell(msh->hdl); if(fseek(msh->hdl, CurPos, SEEK_SET) != 0) longjmp(msh->err, -1); } return(EndPos); } /*----------------------------------------------------------*/ /* Fortran 77 API */ /*----------------------------------------------------------*/ #ifdef F77API long long APIF77(gmfopenmesh)(char *FilNam, int *mod, int *ver, int *dim, int StrSiz) { int i; char TmpNam[ GmfStrSiz ]; for(i=0;i<StrSiz;i++) TmpNam[i] = FilNam[i]; TmpNam[ StrSiz ] = 0; if(*mod == GmfRead) return(GmfOpenMesh(TmpNam, *mod, ver, dim)); else return(GmfOpenMesh(TmpNam, *mod, *ver, *dim)); } int APIF77(gmfclosemesh)(long long *idx) { return(GmfCloseMesh(*idx)); } int APIF77(gmfgotokwd)(long long *MshIdx, int *KwdIdx) { return(GmfGotoKwd(*MshIdx, *KwdIdx)); } int APIF77(gmfstatkwd)(long long *MshIdx, int *KwdIdx, int *NmbTyp, int *SolSiz, int *TypTab) { if(!strcmp(GmfKwdFmt[ *KwdIdx ][3], "sr")) return(GmfStatKwd(*MshIdx, *KwdIdx, NmbTyp, SolSiz, TypTab)); else return(GmfStatKwd(*MshIdx, *KwdIdx)); } int APIF77(gmfsetkwd)(long long *MshIdx, int *KwdIdx, int *NmbLin, int *NmbTyp, int *TypTab) { if(!strcmp(GmfKwdFmt[ *KwdIdx ][3], "sr")) return(GmfSetKwd(*MshIdx, *KwdIdx, *NmbLin, *NmbTyp, TypTab)); else if(strlen(GmfKwdFmt[ *KwdIdx ][2])) return(GmfSetKwd(*MshIdx, *KwdIdx, *NmbLin)); else return(GmfSetKwd(*MshIdx, *KwdIdx)); } /*----------------------------------------------------------*/ /* Duplication macros */ /*----------------------------------------------------------*/ #define DUP(s,n) DUP ## n (s) #define DUP1(s) s #define DUP2(s) DUP1(s),s #define DUP3(s) DUP2(s),s #define DUP4(s) DUP3(s),s #define DUP5(s) DUP4(s),s #define DUP6(s) DUP5(s),s #define DUP7(s) DUP6(s),s #define DUP8(s) DUP7(s),s #define DUP9(s) DUP8(s),s #define DUP10(s) DUP9(s),s #define DUP11(s) DUP10(s),s #define DUP12(s) DUP11(s),s #define DUP13(s) DUP12(s),s #define DUP14(s) DUP13(s),s #define DUP15(s) DUP14(s),s #define DUP16(s) DUP15(s),s #define DUP17(s) DUP16(s),s #define DUP18(s) DUP17(s),s #define DUP19(s) DUP18(s),s #define DUP20(s) DUP19(s),s #define ARG(a,n) ARG ## n (a) #define ARG1(a) a[0] #define ARG2(a) ARG1(a),a[1] #define ARG3(a) ARG2(a),a[2] #define ARG4(a) ARG3(a),a[3] #define ARG5(a) ARG4(a),a[4] #define ARG6(a) ARG5(a),a[5] #define ARG7(a) ARG6(a),a[6] #define ARG8(a) ARG7(a),a[7] #define ARG9(a) ARG8(a),a[8] #define ARG10(a) ARG9(a),a[9] #define ARG11(a) ARG10(a),a[10] #define ARG12(a) ARG11(a),a[11] #define ARG13(a) ARG12(a),a[12] #define ARG14(a) ARG13(a),a[13] #define ARG15(a) ARG14(a),a[14] #define ARG16(a) ARG15(a),a[15] #define ARG17(a) ARG16(a),a[16] #define ARG18(a) ARG17(a),a[17] #define ARG19(a) ARG18(a),a[18] #define ARG20(a) ARG19(a),a[19] /*----------------------------------------------------------*/ /* Call a fortran thread with 1 to 20 arguments */ /*----------------------------------------------------------*/ static void CalF77Prc(long long BegIdx, long long EndIdx, void *prc, int NmbArg, void **ArgTab) { switch(NmbArg) { case 1 : { void (*prc1)(long long *, long long *, DUP(void *, 1)) = \ (void (*)(long long *, long long *, DUP(void *, 1)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 1)); }break; case 2 : { void (*prc1)(long long *, long long *, DUP(void *, 2)) = \ (void (*)(long long *, long long *, DUP(void *, 2)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 2)); }break; case 3 : { void (*prc1)(long long *, long long *, DUP(void *, 3)) = \ (void (*)(long long *, long long *, DUP(void *, 3)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 3)); }break; case 4 : { void (*prc1)(long long *, long long *, DUP(void *, 4)) = \ (void (*)(long long *, long long *, DUP(void *, 4)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 4)); }break; case 5 : { void (*prc1)(long long *, long long *, DUP(void *, 5)) = \ (void (*)(long long *, long long *, DUP(void *, 5)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 5)); }break; case 6 : { void (*prc1)(long long *, long long *, DUP(void *, 6)) = \ (void (*)(long long *, long long *, DUP(void *, 6)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 6)); }break; case 7 : { void (*prc1)(long long *, long long *, DUP(void *, 7)) = \ (void (*)(long long *, long long *, DUP(void *, 7)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 7)); }break; case 8 : { void (*prc1)(long long *, long long *, DUP(void *, 8)) = \ (void (*)(long long *, long long *, DUP(void *, 8)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 8)); }break; case 9 : { void (*prc1)(long long *, long long *, DUP(void *, 9)) = \ (void (*)(long long *, long long *, DUP(void *, 9)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 9)); }break; case 10 : { void (*prc1)(long long *, long long *, DUP(void *, 10)) = \ (void (*)(long long *, long long *, DUP(void *, 10)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 10)); }break; case 11 : { void (*prc1)(long long *, long long *, DUP(void *, 11)) = \ (void (*)(long long *, long long *, DUP(void *, 11)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 11)); }break; case 12 : { void (*prc1)(long long *, long long *, DUP(void *, 12)) = \ (void (*)(long long *, long long *, DUP(void *, 12)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 12)); }break; case 13 : { void (*prc1)(long long *, long long *, DUP(void *, 13)) = \ (void (*)(long long *, long long *, DUP(void *, 13)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 13)); }break; case 14 : { void (*prc1)(long long *, long long *, DUP(void *, 14)) = \ (void (*)(long long *, long long *, DUP(void *, 14)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 14)); }break; case 15 : { void (*prc1)(long long *, long long *, DUP(void *, 15)) = \ (void (*)(long long *, long long *, DUP(void *, 15)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 15)); }break; case 16 : { void (*prc1)(long long *, long long *, DUP(void *, 16)) = \ (void (*)(long long *, long long *, DUP(void *, 16)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 16)); }break; case 17 : { void (*prc1)(long long *, long long *, DUP(void *, 17)) = \ (void (*)(long long *, long long *, DUP(void *, 17)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 17)); }break; case 18 : { void (*prc1)(long long *, long long *, DUP(void *, 18)) = \ (void (*)(long long *, long long *, DUP(void *, 18)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 18)); }break; case 19 : { void (*prc1)(long long *, long long *, DUP(void *, 19)) = \ (void (*)(long long *, long long *, DUP(void *, 19)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 19)); }break; case 20 : { void (*prc1)(long long *, long long *, DUP(void *, 20)) = \ (void (*)(long long *, long long *, DUP(void *, 20)))prc; prc1(&BegIdx, &EndIdx, ARG(ArgTab, 20)); }break; } } #endif
/// Generates trait implementations for auxiliary traits for the account wrapper. /// /// # Note /// /// Auxiliary traits currently include: /// /// - `Clone`: To allow cloning contract references in the long run. /// - `Debug`: To better debug internal contract state. fn generate_auxiliary_trait_impls(&self) -> TokenStream2 { let span = self.span(); let call_forwarder_ident = self.ident(); quote_spanned!(span=> impl<E> ::core::clone::Clone for #call_forwarder_ident<E> where E: ::ink_env::Environment, <E as ::ink_env::Environment>::AccountId: ::core::clone::Clone, { #[inline] fn clone(&self) -> Self { Self { builder: <<Self as ::ink_lang::codegen::TraitCallBuilder>::Builder as ::core::clone::Clone>::clone(&self.builder), } } } impl<E> ::core::fmt::Debug for #call_forwarder_ident<E> where E: ::ink_env::Environment, <E as ::ink_env::Environment>::AccountId: ::core::fmt::Debug, { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { f.debug_struct(::core::stringify!(#call_forwarder_ident)) .field("account_id", &self.builder.account_id) .finish() } } ) }
#include <bits/stdc++.h> #include <unordered_map> #include <unordered_set> #include <string> #define pii pair<int, int> #define pb push_back #define f first #define s second typedef long long ll; const int MOD = 1e9 + 7; const int INF = 1e9 + 7; const int MAXN = 1e5 + 5; using namespace std; char str[MAXN]; ll cnt[4]; pair<ll,ll> ans; int main() { scanf("%s", str); int n = strlen(str); memset(cnt, 0, sizeof cnt); for (int i = 0; i < n; i++) { if ((i & 1) == 0) cnt[2 + str[i] - 'a']++; else cnt[str[i] - 'a']++; if (str[i] == 'a') { if ((i & 1) == 0) { ans.f += (ll) cnt[2]; ans.s += (ll) cnt[0]; } else { ans.f += (ll) cnt[0]; ans.s += (ll) cnt[2]; } } else { if ((i & 1) == 0) { ans.f += (ll) cnt[3]; ans.s += (ll) cnt[1]; } else { ans.f += (ll) cnt[1]; ans.s += (ll) cnt[3]; } } } printf("%I64d %I64d\n", ans.s, ans.f); return 0; }
def layout_document(html, root_box, context, max_loops=8): initialize_page_maker(context, root_box) pages = [] original_footnotes = [] actual_total_pages = 0 for loop in range(max_loops): if loop > 0: PROGRESS_LOGGER.info( 'Step 5 - Creating layout - Repagination #%d', loop) context.footnotes = original_footnotes.copy() initial_total_pages = actual_total_pages if loop == 0: original_footnotes = context.footnotes.copy() pages = list(make_all_pages(context, root_box, html, pages)) actual_total_pages = len(pages) reloop_content = False reloop_pages = False for page_data in context.page_maker: _, _, _, page_state, remake_state = page_data page_counter_values = page_state[1] page_counter_values['pages'] = [actual_total_pages] if remake_state['content_changed']: reloop_content = True if remake_state['pages_wanted']: reloop_pages = initial_total_pages != actual_total_pages if not reloop_content and not reloop_pages: break watch_elements = [] watch_elements_before = [] watch_elements_after = [] for i, page in enumerate(pages): _, _, _, page_state, _ = context.page_maker[i + 1] page_counter_values = page_state[1] for child in page.descendants(): if child.bookmark_label: if child.element_tag.endswith('::before'): checklist = watch_elements_before elif child.element_tag.endswith('::after'): checklist = watch_elements_after else: checklist = watch_elements if child.element in checklist: child.bookmark_label = '' else: checklist.append(child.element) if hasattr(child, 'missing_link'): for (box, css_token), item in ( context.target_collector.counter_lookup_items.items()): if child.missing_link == box and css_token != 'content': if (css_token == 'bookmark-label' and not child.bookmark_label): continue item.parse_again(page_counter_values) if css_token == 'bookmark-label': child.bookmark_label = box.bookmark_label string_sets = child.string_set if string_sets and string_sets != 'none': for string_set in string_sets: string_name, text = string_set context.string_set[string_name][i+1].append(text) for i, page in enumerate(pages): root_children = [] root, footnote_area = page.children root_children.extend(layout_fixed_boxes(context, pages[:i], page)) root_children.extend(root.children) root_children.extend(layout_fixed_boxes(context, pages[i + 1:], page)) root.children = root_children context.current_page = i + 1 state = context.page_maker[context.current_page][3] page.children = (root,) if footnote_area.children: page.children += (footnote_area,) page.children += tuple(make_margin_boxes(context, page, state)) layout_backgrounds(page, context.get_image_from_uri) yield page
import { TransformStream } from "isomorphic-streams"; /** * Filters chunks emitted by the stream using a predicate. * * @param predicate - The predicate which is applied on each chunk * @returns A `TransformStream` through which a `ReadableStream` can be piped */ export function filter<I>(predicate: (value: I) => boolean): TransformStream<I, I> { return new TransformStream<I, I>({ transform(chunk, controller) { try { if (predicate(chunk)) { controller.enqueue(chunk); } } catch (reason) { controller.error(reason); } }, }); }
Tourism and Virtual Reality: a bibliometric analysis of scientific production from the Scopus database To identify the most developed terms in the field of Tourism and virtual reality, a quantitative analysis was developed in February 2020. This analysis was focused on seven hundred and eighty-four (784) publications from the Scopus database, published between 1995 and February 2022 and limited to the following areas: Business Management and Accounting, Computer Science, Social Sciences and Engineering.A bibliometric analysis was performed using the VOSviewer software and a technique of matching terms and co-authoring by authors and countries. Were found 5 clusters for the co-occurrence of terms and 7 clusters for the co-authorship of authors and countries.
// node is basic unit of tree which will be responsible to hold data and two children class node { public: node * left; node *right; int data; }
# SPDX-License-Identifier: Apache-2.0 # To register converter for sparkml operators, import associated modules here. from . import bucketed_random_projection_lsh from . import aft_survival_regression from . import element_wise_product from . import min_hash_lsh from . import word2vec from . import index_to_string from . import chi_sq_selector from . import one_vs_rest from . import gbt_classifier from . import dct from . import pca from . import polynomial_expansion from . import tokenizer from . import naive_bayes from . import vector_slicer from . import stop_words_remover from . import bucketizer from . import random_forest_classifier from . import random_forest_regressor from . import decision_tree_regressor from . import decision_tree_classifier from . import vector_indexer from . import linear_regressor from . import imputer from . import scaler from . import normalizer from . import binarizer from . import string_indexer from . import linear_classifier from . import onehot_encoder from . import vector_assembler
<filename>generated/java/proto-google-common-protos/src/main/java/com/google/api/servicemanagement/v1/ListServiceRolloutsResponseOrBuilder.java // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/api/servicemanagement/v1/servicemanager.proto package com.google.api.servicemanagement.v1; public interface ListServiceRolloutsResponseOrBuilder extends // @@protoc_insertion_point(interface_extends:google.api.servicemanagement.v1.ListServiceRolloutsResponse) com.google.protobuf.MessageOrBuilder { /** * <pre> * The list of rollout resources. * </pre> * * <code>repeated .google.api.servicemanagement.v1.Rollout rollouts = 1;</code> */ java.util.List<com.google.api.servicemanagement.v1.Rollout> getRolloutsList(); /** * <pre> * The list of rollout resources. * </pre> * * <code>repeated .google.api.servicemanagement.v1.Rollout rollouts = 1;</code> */ com.google.api.servicemanagement.v1.Rollout getRollouts(int index); /** * <pre> * The list of rollout resources. * </pre> * * <code>repeated .google.api.servicemanagement.v1.Rollout rollouts = 1;</code> */ int getRolloutsCount(); /** * <pre> * The list of rollout resources. * </pre> * * <code>repeated .google.api.servicemanagement.v1.Rollout rollouts = 1;</code> */ java.util.List<? extends com.google.api.servicemanagement.v1.RolloutOrBuilder> getRolloutsOrBuilderList(); /** * <pre> * The list of rollout resources. * </pre> * * <code>repeated .google.api.servicemanagement.v1.Rollout rollouts = 1;</code> */ com.google.api.servicemanagement.v1.RolloutOrBuilder getRolloutsOrBuilder( int index); /** * <pre> * The token of the next page of results. * </pre> * * <code>optional string next_page_token = 2;</code> */ java.lang.String getNextPageToken(); /** * <pre> * The token of the next page of results. * </pre> * * <code>optional string next_page_token = 2;</code> */ com.google.protobuf.ByteString getNextPageTokenBytes(); }
// NextOrFailure calls next.ServeDNS when next is not nill, otherwise it will return, a ServerFailure // and a nil error. func NextOrFailure(name string, next Handler, ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { if next != nil { if span := ot.SpanFromContext(ctx); span != nil { child := span.Tracer().StartSpan(next.Name(), ot.ChildOf(span.Context())) defer child.Finish() ctx = ot.ContextWithSpan(ctx, child) } return next.ServeDNS(ctx, w, r) } return dns.RcodeServerFailure, Error(name, errors.New("no next plugin found")) }
/** * Restores the console threshold log level. Based on if {@code Class} or {@code Method} is annotated by * {@link ConsoleLogLevelThreshold}. * <p> * {@link #restoreConsoleLog()} * * @since 11.1 */ protected void restoreConsoleThresholdLogLevel(FeaturesRunner runner, FrameworkMethod method) { ConsoleLogLevelThreshold consoleLogThreshold = getAnnotation(runner, method, ConsoleLogLevelThreshold.class); if (consoleLogThreshold.value() != null) { restoreConsoleLog(); } }
/* * Copyright Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags and * the COPYRIGHT.txt file distributed with this work. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teiid.query.function; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.TreeSet; import org.teiid.api.exception.query.InvalidFunctionException; import org.teiid.core.CoreConstants; import org.teiid.core.types.DataTypeManager; import org.teiid.core.types.Transform; import org.teiid.metadata.AggregateAttributes; import org.teiid.metadata.FunctionMethod; import org.teiid.metadata.FunctionParameter; import org.teiid.query.QueryPlugin; import org.teiid.query.function.metadata.FunctionCategoryConstants; import org.teiid.query.resolver.util.ResolverUtil; import org.teiid.query.sql.symbol.AggregateSymbol; import org.teiid.query.sql.symbol.AggregateSymbol.Type; import org.teiid.query.sql.symbol.Constant; import org.teiid.query.sql.symbol.Expression; import org.teiid.query.sql.symbol.Function; import org.teiid.translator.SourceSystemFunctions; /** * The function library is the primary way for the system to find out what * functions are available, resolve function signatures, and invoke system * and user-defined functions. */ public class FunctionLibrary { // Special type conversion functions public static final String CONVERT = "convert"; //$NON-NLS-1$ public static final String CAST = "cast"; //$NON-NLS-1$ // Special lookup function public static final String LOOKUP = "lookup"; //$NON-NLS-1$ // Special user function public static final String USER = "user"; //$NON-NLS-1$ // Special environment variable lookup function public static final String ENV = "env"; //$NON-NLS-1$ public static final String SYS_PROP = "sys_prop"; //$NON-NLS-1$ public static final String ENV_VAR = "env_var"; //$NON-NLS-1$ public static final String SESSION_ID = "session_id"; //$NON-NLS-1$ // Misc. public static final String DECODESTRING = "decodestring"; //$NON-NLS-1$ public static final String DECODEINTEGER = "decodeinteger"; //$NON-NLS-1$ public static final String COMMAND_PAYLOAD = "commandpayload"; //$NON-NLS-1$ public static final String CONCAT = "CONCAT"; //$NON-NLS-1$ public static final String CONCAT2 = "CONCAT2"; //$NON-NLS-1$ public static final String CONCAT_OPERATOR = "||"; //$NON-NLS-1$ public static final String SUBSTRING = "substring"; //$NON-NLS-1$ public static final String NVL = "NVL"; //$NON-NLS-1$ public static final String IFNULL = "IFNULL"; //$NON-NLS-1$ public static final String FROM_UNIXTIME = "from_unixtime"; //$NON-NLS-1$ public static final String TIMESTAMPADD = "timestampadd"; //$NON-NLS-1$ public static final String PARSETIME = "parsetime"; //$NON-NLS-1$ public static final String PARSEDATE = "parsedate"; //$NON-NLS-1$ public static final String FORMATTIME = "formattime"; //$NON-NLS-1$ public static final String FORMATDATE = "formatdate"; //$NON-NLS-1$ public static final String NULLIF = "nullif"; //$NON-NLS-1$ public static final String COALESCE = "coalesce"; //$NON-NLS-1$ public static final String SPACE = "space"; //$NON-NLS-1$ public static final String ARRAY_GET = "array_get"; //$NON-NLS-1$ public static final String JSONARRAY = "jsonarray"; //$NON-NLS-1$ public static final String MVSTATUS = "mvstatus"; //$NON-NLS-1$ public static final Set<String> INTERNAL_SCHEMAS = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER); static { INTERNAL_SCHEMAS.add(CoreConstants.SYSTEM_MODEL); INTERNAL_SCHEMAS.add(CoreConstants.SYSTEM_ADMIN_MODEL); INTERNAL_SCHEMAS.add(CoreConstants.ODBC_MODEL); } // Function tree for system functions (never reloaded) private FunctionTree systemFunctions; // Function tree for user-defined functions private FunctionTree[] userFunctions; /** * Construct the function library. This should be called only once by the * FunctionLibraryManager. */ public FunctionLibrary(FunctionTree systemFuncs, FunctionTree... userFuncs) { systemFunctions = systemFuncs; userFunctions = userFuncs; } public FunctionTree[] getUserFunctions() { return userFunctions; } public FunctionTree getSystemFunctions() { return systemFunctions; } /** * Get all function categories, sorted in alphabetical order * @return List of function category names, sorted in alphabetical order */ public List<String> getFunctionCategories() { // Remove category duplicates TreeSet<String> categories = new TreeSet<String>(); categories.addAll( systemFunctions.getCategories() ); if (this.userFunctions != null) { for (FunctionTree tree: this.userFunctions) { categories.addAll(tree.getCategories()); } } ArrayList<String> categoryList = new ArrayList<String>(categories); return categoryList; } /** * Get all function in a category. * @param category Category name * @return List of {@link FunctionMethod}s in a category */ public List<FunctionMethod> getFunctionsInCategory(String category) { List<FunctionMethod> forms = new ArrayList<FunctionMethod>(); forms.addAll(systemFunctions.getFunctionsInCategory(category)); if (this.userFunctions != null) { for (FunctionTree tree: this.userFunctions) { forms.addAll(tree.getFunctionsInCategory(category)); } } return forms; } /** * Find function form based on function name and # of arguments. * @param name Function name * @param numArgs Number of arguments * @return Corresponding form or null if not found */ public boolean hasFunctionMethod(String name, int numArgs) { List<FunctionMethod> methods = systemFunctions.findFunctionMethods(name, numArgs); if (!methods.isEmpty()) { return true; } if(this.userFunctions != null) { for (FunctionTree tree: this.userFunctions) { methods = tree.findFunctionMethods(name, numArgs); if (!methods.isEmpty()) { return true; } } } return false; } /** * Find a function descriptor given a name and the types of the arguments. * This method matches based on case-insensitive function name and * an exact match of the number and types of parameter arguments. * @param name Name of the function to resolve * @param types Array of classes representing the types * @return Descriptor if found, null if not found */ public FunctionDescriptor findFunction(String name, Class<?>[] types) { // First look in system functions FunctionDescriptor descriptor = systemFunctions.getFunction(name, types); // If that fails, check the user defined functions if(descriptor == null && this.userFunctions != null) { for (FunctionTree tree: this.userFunctions) { descriptor = tree.getFunction(name, types); if (descriptor != null) { break; } } } return descriptor; } /** * Find a function descriptor given a name and the types of the arguments. * This method matches based on case-insensitive function name and * an exact match of the number and types of parameter arguments. * @param name Name of the function to resolve * @param types Array of classes representing the types * @return Descriptor if found, null if not found */ public List<FunctionDescriptor> findAllFunctions(String name, Class<?>[] types) { // First look in system functions FunctionDescriptor descriptor = systemFunctions.getFunction(name, types); // If that fails, check the user defined functions if(descriptor == null && this.userFunctions != null) { List<FunctionDescriptor> result = new LinkedList<FunctionDescriptor>(); for (FunctionTree tree: this.userFunctions) { descriptor = tree.getFunction(name, types); if (descriptor != null) { //pushdown function takes presedence //TODO: there may be multiple translators contributing functions with the same name / types //need "conformed" logic so that the right pushdown can occur if (CoreConstants.SYSTEM_MODEL.equals(descriptor.getSchema())) { return Arrays.asList(descriptor); } result.add(descriptor); } } return result; } if (descriptor != null) { return Arrays.asList(descriptor); } return Collections.emptyList(); } public static class ConversionResult { public ConversionResult(FunctionMethod method) { this.method = method; } public FunctionMethod method; public boolean needsConverion; } /** * Get the conversions that are needed to call the named function with arguments * of the given type. In the case of an exact match, the list will contain all nulls. * In other cases the list will contain one or more non-null values where the value * is a conversion function that can be used to convert to the proper types for * executing the function. * @param name Name of function * @param returnType * @param args * @param types Existing types passed to the function * @throws InvalidFunctionException */ public ConversionResult determineNecessaryConversions(String name, Class<?> returnType, Expression[] args, Class<?>[] types, boolean hasUnknownType) throws InvalidFunctionException { //First find existing functions with same name and same number of parameters final Collection<FunctionMethod> functionMethods = new LinkedList<FunctionMethod>(); functionMethods.addAll( this.systemFunctions.findFunctionMethods(name, types.length) ); if (this.userFunctions != null) { for (FunctionTree tree: this.userFunctions) { functionMethods.addAll( tree.findFunctionMethods(name, types.length) ); } } //Score each match, reject any where types can not be converted implicitly //Score of current method (lower score means better match with less converts //Current best score (lower score is best. Higher score results in more implicit conversions int bestScore = Integer.MAX_VALUE; boolean ambiguous = false; FunctionMethod result = null; boolean isSystem = false; boolean narrowing = false; outer: for (FunctionMethod nextMethod : functionMethods) { int currentScore = 0; boolean nextNarrowing = false; final List<FunctionParameter> methodTypes = nextMethod.getInputParameters(); //Holder for current signature with converts where required //Iterate over the parameters adding conversions where required or failing when //no implicit conversion is possible for(int i = 0; i < types.length; i++) { final String tmpTypeName = methodTypes.get(Math.min(i, methodTypes.size() - 1)).getRuntimeType(); Class<?> targetType = DataTypeManager.getDataTypeClass(tmpTypeName); Class<?> sourceType = types[i]; if (sourceType == null) { currentScore++; continue; } if (sourceType.isArray()&& targetType.isArray() && sourceType.getComponentType().equals(targetType.getComponentType())) { currentScore++; continue; } if (sourceType.isArray()) { if (isVarArgArrayParam(nextMethod, types, i, targetType)) { //vararg array parameter continue; } //treat the array as object type until proper type handling is added sourceType = DataTypeManager.DefaultDataClasses.OBJECT; } try { Transform t = getConvertFunctionDescriptor(sourceType, targetType); if (t != null) { if (t.isExplicit()) { if (!(args[i] instanceof Constant) || ResolverUtil.convertConstant(DataTypeManager.getDataTypeName(sourceType), tmpTypeName, (Constant)args[i]) == null) { continue outer; } nextNarrowing = true; currentScore++; } else { currentScore++; } } } catch (InvalidFunctionException e) { continue outer; } } //If the method is valid match and it is the current best score, capture those values as current best match if (currentScore > bestScore) { continue; } if (hasUnknownType) { if (returnType != null) { try { Transform t = getConvertFunctionDescriptor(DataTypeManager.getDataTypeClass(nextMethod.getOutputParameter().getRuntimeType()), returnType); if (t != null) { if (t.isExplicit()) { //there still may be a common type, but use any other valid conversion over this one currentScore += types.length + 1; nextNarrowing = true; } else { currentScore++; } } } catch (InvalidFunctionException e) { //there still may be a common type, but use any other valid conversion over this one currentScore += (types.length * types.length); } } } if (nextNarrowing && result != null && !narrowing) { continue; } boolean useNext = false; if (!nextNarrowing && narrowing) { useNext = true; } boolean isSystemNext = nextMethod.getParent() == null || INTERNAL_SCHEMAS.contains(nextMethod.getParent().getName()); if ((isSystem && isSystemNext) || (!isSystem && !isSystemNext && result != null)) { int partCount = partCount(result.getName()); int nextPartCount = partCount(nextMethod.getName()); if (partCount < nextPartCount) { //the current is more specific //this makes us more consistent with the table resolving logic continue outer; } if (nextPartCount < partCount) { useNext = true; } } else if (isSystemNext) { useNext = true; } if (currentScore == bestScore && !useNext) { ambiguous = true; boolean useCurrent = false; List<FunctionParameter> bestParams = result.getInputParameters(); for (int j = 0; j < types.length; j++) { String t1 = bestParams.get(Math.min(j, bestParams.size() - 1)).getRuntimeType(); String t2 = methodTypes.get((Math.min(j, methodTypes.size() - 1))).getRuntimeType(); if (types[j] == null || t1.equals(t2)) { continue; } String commonType = ResolverUtil.getCommonRuntimeType(new String[] {t1, t2}); if (commonType == null) { continue outer; //still ambiguous } if (commonType.equals(t1)) { if (!useCurrent) { useNext = true; } } else if (commonType.equals(t2)) { if (!useNext) { useCurrent = true; } } else { continue outer; } } if (useCurrent) { ambiguous = false; //prefer narrower } else { String sysName = result.getProperty(FunctionMethod.SYSTEM_NAME, false); String sysNameOther = nextMethod.getProperty(FunctionMethod.SYSTEM_NAME, false); if (sysName != null && sysName.equalsIgnoreCase(sysNameOther)) { ambiguous = false; } } } if (currentScore < bestScore || useNext) { ambiguous = false; if (currentScore == 0 && isSystemNext) { return new ConversionResult(nextMethod); } bestScore = currentScore; result = nextMethod; isSystem = isSystemNext; narrowing = nextNarrowing; } } if (ambiguous) { throw GENERIC_EXCEPTION; } ConversionResult cr = new ConversionResult(result); if (result != null) { cr.needsConverion = (bestScore != 0); } return cr; } private int partCount(String name) { int result = 0; int index = 0; while (true) { index = name.indexOf('.', index+1); if (index > 0) { result++; } else { break; } } return result; } public FunctionDescriptor[] getConverts(FunctionMethod method, Class<?>[] types) { final List<FunctionParameter> methodTypes = method.getInputParameters(); FunctionDescriptor[] result = new FunctionDescriptor[types.length]; for(int i = 0; i < types.length; i++) { //treat all varags as the same type final String tmpTypeName = methodTypes.get(Math.min(i, methodTypes.size() - 1)).getRuntimeType(); Class<?> targetType = DataTypeManager.getDataTypeClass(tmpTypeName); Class<?> sourceType = types[i]; if (sourceType == null) { result[i] = findTypedConversionFunction(DataTypeManager.DefaultDataClasses.NULL, targetType); } else if (sourceType != targetType){ if (isVarArgArrayParam(method, types, i, targetType)) { //vararg array parameter continue; } result[i] = findTypedConversionFunction(sourceType, targetType); } } return result; } public boolean isVarArgArrayParam(FunctionMethod method, Class<?>[] types, int i, Class<?> targetType) { return i == types.length - 1 && method.isVarArgs() && i == method.getInputParameterCount() - 1 && types[i].isArray() && targetType.isAssignableFrom(types[i].getComponentType()); } private static final InvalidFunctionException GENERIC_EXCEPTION = new InvalidFunctionException(QueryPlugin.Event.TEIID30419); private Transform getConvertFunctionDescriptor(Class<?> sourceType, Class<?> targetType) throws InvalidFunctionException { //If exact match no conversion necessary if(sourceType.equals(targetType)) { return null; } Transform result = DataTypeManager.getTransform(sourceType, targetType); //Else see if an implicit conversion is possible. if(result == null){ throw GENERIC_EXCEPTION; } return result; } /** * Find conversion function and set return type to proper type. * @param sourceType The source type class * @param targetType The target type class * @return A CONVERT function descriptor or null if not possible */ public FunctionDescriptor findTypedConversionFunction(Class<?> sourceType, Class<?> targetType) { //TODO: should array to string be prohibited? FunctionDescriptor fd = findFunction(CONVERT, new Class[] {sourceType, DataTypeManager.DefaultDataClasses.STRING}); if (fd != null) { return copyFunctionChangeReturnType(fd, targetType); } return null; } /** * Return a copy of the given FunctionDescriptor with the specified return type. * @param fd FunctionDescriptor to be copied. * @param returnType The return type to apply to the copied FunctionDescriptor. * @return The copy of FunctionDescriptor. */ public FunctionDescriptor copyFunctionChangeReturnType(FunctionDescriptor fd, Class<?> returnType) { if(fd != null) { FunctionDescriptor fdImpl = fd; FunctionDescriptor copy = fdImpl.clone(); copy.setReturnType(returnType); return copy; } return fd; } public static boolean isConvert(Function function) { Expression[] args = function.getArgs(); String funcName = function.getName(); return args.length == 2 && (funcName.equalsIgnoreCase(FunctionLibrary.CONVERT) || funcName.equalsIgnoreCase(FunctionLibrary.CAST)); } /** * Return a list of the most general forms of built-in aggregate functions. * <br>count(*) - is not included * <br>textagg - is not included due to its non standard syntax * * @param includeAnalytic - true to include analytic functions that must be windowed * @return */ public List<FunctionMethod> getBuiltInAggregateFunctions(boolean includeAnalytic) { ArrayList<FunctionMethod> result = new ArrayList<FunctionMethod>(); if (this.systemFunctions != null) { FunctionDescriptor stExtent = this.systemFunctions.getFunction(SourceSystemFunctions.ST_EXTENT, new Class[] {DataTypeManager.DefaultDataClasses.GEOMETRY}); result.add(stExtent.getMethod()); } for (Type type : AggregateSymbol.Type.values()) { AggregateAttributes aa = new AggregateAttributes(); if (type.isAnalytical()) { if (!includeAnalytic) { continue; } aa.setAnalytic(true); aa.setAllowsDistinct(false); aa.setAllowsOrderBy(false); } String returnType = null; String[] argTypes = null; aa.setAllowsDistinct(true); switch (type) { case TEXTAGG: case USER_DEFINED: continue; case DENSE_RANK: case RANK: case ROW_NUMBER: aa.setAllowsDistinct(false); returnType = AggregateSymbol.LONG_RANKS?DataTypeManager.DefaultDataTypes.LONG:DataTypeManager.DefaultDataTypes.INTEGER; argTypes = new String[] {}; break; case ANY: case SOME: case EVERY: returnType = DataTypeManager.DefaultDataTypes.BOOLEAN; argTypes = new String[] {DataTypeManager.DefaultDataTypes.BOOLEAN}; break; case COUNT_BIG: returnType = DataTypeManager.DefaultDataTypes.LONG; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT}; break; case COUNT: returnType = DataTypeManager.DefaultDataTypes.INTEGER; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT}; break; case MAX: case MIN: case AVG: case SUM: returnType = DataTypeManager.DefaultDataTypes.OBJECT; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT}; break; case STDDEV_POP: case STDDEV_SAMP: case VAR_POP: case VAR_SAMP: returnType = DataTypeManager.DefaultDataTypes.DOUBLE; argTypes = new String[] {DataTypeManager.DefaultDataTypes.DOUBLE}; break; case STRING_AGG: returnType = DataTypeManager.DefaultDataTypes.OBJECT; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT}; aa.setAllowsOrderBy(true); break; case ARRAY_AGG: returnType = DataTypeManager.DefaultDataTypes.OBJECT; argTypes = new String[] {DataTypeManager.getDataTypeName(DataTypeManager.getArrayType(DataTypeManager.DefaultDataClasses.OBJECT))}; aa.setAllowsOrderBy(true); aa.setAllowsDistinct(false); break; case JSONARRAY_AGG: returnType = DataTypeManager.DefaultDataTypes.JSON; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT}; aa.setAllowsOrderBy(true); aa.setAllowsDistinct(false); break; case XMLAGG: returnType = DataTypeManager.DefaultDataTypes.XML; argTypes = new String[] {DataTypeManager.DefaultDataTypes.XML}; aa.setAllowsOrderBy(true); aa.setAllowsDistinct(false); break; case FIRST_VALUE: case LAST_VALUE: returnType = DataTypeManager.DefaultDataTypes.OBJECT; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT}; break; case LEAD: case LAG: returnType = DataTypeManager.DefaultDataTypes.OBJECT; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT, DataTypeManager.DefaultDataTypes.INTEGER, DataTypeManager.DefaultDataTypes.OBJECT}; break; case NTILE: returnType = DataTypeManager.DefaultDataTypes.INTEGER; argTypes = new String[] {DataTypeManager.DefaultDataTypes.INTEGER}; break; case PERCENT_RANK: returnType = DataTypeManager.DefaultDataTypes.DOUBLE; argTypes = new String[] {}; break; case CUME_DIST: returnType = DataTypeManager.DefaultDataTypes.DOUBLE; argTypes = new String[] {}; break; case NTH_VALUE: returnType = DataTypeManager.DefaultDataTypes.OBJECT; argTypes = new String[] {DataTypeManager.DefaultDataTypes.OBJECT, DataTypeManager.DefaultDataTypes.INTEGER}; break; } FunctionMethod fm = FunctionMethod.createFunctionMethod(type.name(), type.name(), FunctionCategoryConstants.AGGREGATE, returnType, argTypes); fm.setAggregateAttributes(aa); result.add(fm); } return result; } public boolean userFunctionExists(String resourceName) { String[] parts = resourceName.split("\\.", 2); //$NON-NLS-1$ if (parts.length < 0) { return false; } for (FunctionTree tree : userFunctions) { if (parts[0].equalsIgnoreCase(tree.getSchemaName())) { return tree.hasFunctionWithName(parts[1]); } } return false; } }
/** * Apply the function. * @param args The function arguments including uri and rest config. * @param context Stellar context */ @Override public Object apply(List<Object> args, Context context) throws ParseException { String uriString = getArg(0, String.class, args); Map<String, Object> functionRestConfig = null; Map<String, Object> queryParameters = new HashMap<>(); if (args.size() > 1) { functionRestConfig = getArg(1, Map.class, args); if (args.size() == 3) { queryParameters = getArg(2, Map.class, args); } } Map<String, Object> globalRestConfig = (Map<String, Object>) getGlobalConfig(context).get(STELLAR_REST_SETTINGS); Map<String, Object> getRestConfig = (Map<String, Object>) getGlobalConfig(context).get(STELLAR_REST_GET_SETTINGS); RestConfig restConfig = buildRestConfig(globalRestConfig, getRestConfig, functionRestConfig); try { HttpGet httpGet = buildGetRequest(uriString, queryParameters); return executeRequest(restConfig, httpGet); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage(), e); } catch (IOException e) { LOG.error(e.getMessage(), e); return restConfig.getErrorValueOverride(); } }
package com.wukong.concurrency.imooccache; /** * 描述: 线程池看缓存效果 */ public class ImoocCache11 { }
<reponame>owanturist/react-inner-store<gh_stars>0 import { useCallback } from "react" import { act, renderHook } from "@testing-library/react-hooks" import { Sweety, useWatchSweety } from "../../src" import { Counter, WithSpy, WithStore } from "../common" describe.each([ [ "without comparator", ({ store, spy }: WithStore & WithSpy) => { return useWatchSweety(() => { spy() return store.getState() }) }, ], [ "with inline comparator", ({ store, spy }: WithStore & WithSpy) => { return useWatchSweety( () => { spy() return store.getState() }, (prev, next) => Counter.compare(prev, next), ) }, ], [ "with memoized comparator", ({ store, spy }: WithStore & WithSpy) => { return useWatchSweety(() => { spy() return store.getState() }, Counter.compare) }, ], ])("inline watcher %s", (_, useHook) => { const setup = () => { const spy = vi.fn() const store = Sweety.of({ count: 1 }) const { rerender } = renderHook(useHook, { initialProps: { store, spy }, }) return { spy, store, rerender } } it.concurrent("should call watcher 2 times on init", () => { const { spy } = setup() // 1st extracts the watcher result // 2nd subscribes to the included stores' changes expect(spy).toHaveBeenCalledTimes(2) }) it.concurrent("should call watcher 2 times on subsequent renders", () => { const { spy, store, rerender } = setup() spy.mockReset() rerender({ spy, store }) expect(spy).toHaveBeenCalledTimes(2) }) it.concurrent( "should call watcher 3 times when a watching store changes", () => { const { spy, store } = setup() spy.mockReset() act(() => { store.setState(Counter.inc) }) // 1st executes watcher to extract new result // --it causes reconciliation-- // 2nd extracts the watcher result // 3rd subscribes to the included stores' changes expect(spy).toHaveBeenCalledTimes(3) }, ) }) describe.each([ [ "without comparator", ({ store, spy }: WithStore & WithSpy) => { return useWatchSweety( useCallback(() => { spy() return store.getState() }, [store, spy]), ) }, ], [ "with inline comparator", ({ store, spy }: WithStore & WithSpy) => { return useWatchSweety( useCallback(() => { spy() return store.getState() }, [store, spy]), (prev, next) => Counter.compare(prev, next), ) }, ], [ "with memoized comparator", ({ store, spy }: WithStore & WithSpy) => { return useWatchSweety( useCallback(() => { spy() return store.getState() }, [store, spy]), Counter.compare, ) }, ], ])("memoized watcher %s", (__, useHook) => { const setup = () => { const spy = vi.fn() const store = Sweety.of({ count: 1 }) const { rerender } = renderHook(useHook, { initialProps: { store, spy }, }) return { spy, store, rerender } } it.concurrent("should call watcher 2 times on init", () => { const { spy } = setup() // 1st extracts the watcher result // 2nd subscribes to the included stores' changes expect(spy).toHaveBeenCalledTimes(2) }) it.concurrent("should not call watcher on subsequent renders", () => { const { spy, store, rerender } = setup() spy.mockReset() rerender({ spy, store }) expect(spy).not.toHaveBeenCalled() }) it.concurrent( "should call watcher 1 time when a watching store changes", () => { const { spy, store } = setup() spy.mockReset() act(() => { store.setState(Counter.inc) }) // 1st executes watcher to extract new result expect(spy).toHaveBeenCalledTimes(1) }, ) })
package org.socraticgrid.hl7.services.uc.functional; import org.socraticgrid.hl7.services.uc.model.Message; public class SimpleMessageSumarizer { public String getMesageSummary(Message msg){ StringBuffer buf = new StringBuffer(); buf.append(msg.getHeader().getMessageType().name()); buf.append(" message"); return buf.toString(); } }
A, B, T = map(int, input().split()) for x in range(1, 21): if (x*A < T+0.5) & ((x+1)*A > T+0.5): print(B*x) break elif x*A > T+0.5: print(0) break
First terrestrial discovery of an extremely rare mineral called ringwoodite confirms theory about huge water ‘reservoirs’ 410 to 660 km beneath the surface of our planet, says a team of researchers led by Prof Graham Pearson from the University of Alberta, Canada. Ringwoodite is a form of the mineral peridot, believed to exist in large quantities under high pressures in the transition zone. Ringwoodite has been found in meteorites but, until now, no terrestrial sample has ever been unearthed because scientists haven’t been able to conduct fieldwork at extreme depths. Analysis of the mineral shows it contains a significant amount of water – 1.5 per cent of its weight. The mineral was found in 2008 in the Juina area of Mato Grosso, Brazil, where artisan miners unearthed the host diamond from shallow river gravels. The diamond had been brought to the Earth’s surface by a volcanic rock known as kimberlite – the most deeply derived of all volcanic rocks. “This sample really provides extremely strong confirmation that there are local wet spots deep in the Earth in this area. That particular zone in the Earth, the transition zone, might have as much water as all the world’s oceans put together,” Prof Pearson said. The discovery was almost accidental in that scientists had been looking for another mineral when they paid about USD 20 for a 3-mm-wide, dirty-looking brown diamond. The ringwoodite itself is invisible to the naked eye, buried beneath the surface, so it was fortunate that it was found by Prof Pearson’s team member, John McNeill, in 2009. “It’s so small, this inclusion, it’s extremely difficult to find, never mind work on, so it was a bit of a piece of luck, this discovery, as are many scientific discoveries,” said Prof Pearson, who is the first author of a paper appearing in the journal Nature. The sample underwent years of analysis using spectroscopy and X-ray diffraction before it was officially confirmed as ringwoodite. The discovery confirms about 50 years of theoretical and experimental work by geophysicists, seismologists and other scientists trying to understand the makeup of the Earth’s interior. Scientists have been deeply divided about the composition of the transition zone and whether it is full of water or desert-dry. Knowing water exists beneath the crust has implications for the study of volcanism and plate tectonics, affecting how rock melts, cools and shifts below the crust. “One of the reasons the Earth is such a dynamic planet is because of the presence of some water in its interior. Water changes everything about the way a planet works,” Prof Pearson concluded. ______ D.G. Pearson et al. 2014. Hydrous mantle transition zone indicated by ringwoodite included within diamond. Nature 507, 221–224; doi: 10.1038/nature13080
Welcome to the “HERE WE ARE: The History of Experimental Cinema” Project! What would a history of experimental/avant-garde cinema look like in which we *solely* explore the contributions of women, non-binary, trans and genderqueer filmmakers? Who would you want to see included in such a compendium? (Recent history included here too! And not just straight white women…)— I am working to create an online, editable, ongoing, living, growing compendium / digital zine that explores the rich history of experimental cinema. You are invited to take part in the process! Ways to Get Involved: 1. Adding more names to the compendium! You can check out the growing google doc excel sheet, and add names HERE. To add a name: simply add a name at the bottom of column A. Then, right click column A and choose “Sort Sheet A –> Z” so that the column gets back in alphabetical order by first name. 2. Signing up to write a digital zine “page” on (a) filmmaker(s) of your choosing! Anyone is welcome to volunteer their time and labor to create a write-up on the history and major contributions of the women, non-binary, trans and genderqueer filmmakers on this list. Ideally your write-up will be at least 3-4 paragraphs. (If the filmmaker is alive, please reach out to them and let them know you’re interested in writing about them for this project. You could also decide to do an interview with them for your zine page!) Bibliographies and/or additional resources/links for your zine page are encouraged (so that folks may know where they can access more info about your filmmaker!). Cis men are welcome and encouraged to participate in this labor too, especially since the rest of us have been putting in the work of making our own labor visible since the beginning of time. However: women, non-binary, trans and genderqueer folks’ are to the front! (If you notice a cis-man has chosen someone you’d like to write on, simply email them and let them know you’d like to instead.) You are welcome to contribute drawings, collages, gifs, and other artistic media you might be inspired to create (remember, think “digital zine”), in addition to your write-up. I also encourage you to include links to their vimeo pages, distro sites, websites, etc. (Also: you’re allowed to write your own page on yourself and your work too! Don’t be shy!) How to sign up: Write your name in Column B next to the filmmaker of your choosing. Write your email address in Column C. Then, work on your write-up/ zine-page! Send a finished word doc or pdf and other related or attached media to Kelly Gallagher at [email protected]. Kelly will then work to begin compiling the digital zine which will be made available free on this site. There is no due date for digital zine page submissions! Submissions are rolling!
<filename>apps/app/ui-tests-app/list-view/main-view-model.ts import { Observable } from "tns-core-modules/data/observable"; import { ObservableArray } from "tns-core-modules/data/observable-array"; export class Item extends Observable { private _name: string; private _id: number; private _age: number; constructor(name: string, id: number, age: number) { super(); this._name = name; this._id = id; this._age = age; } get name(): string { return this._name; } set name(value: string) { if (this._name !== value) { this._name = value; this.notifyPropertyChange("name", value) } } get id(): number { return this._id; } set id(value: number) { if (this._id !== value) { this._id = value; this.notifyPropertyChange("id", value) } } get age(): number { return this._age; } set age(value: number) { if (this._age !== value) { this._age = value; this.notifyPropertyChange("age", value) } } public toString() { return `${this.name} ${this.id}`; } } export class ViewModel extends Observable { private _items: ObservableArray<Item>; get items(): ObservableArray<Item> { this._items = new ObservableArray<Item>(); for (let i = 0; i < 100; i++) { this._items.push(new Item(`Item`, i, 0)); } return this._items; } }
/** * Diff for missing/deleted resources. */ private void diffViewForMissingAndRemoved() { URL lastCommitedFileURL = null; try { lastCommitedFileURL = GitRevisionURLHandler.encodeURL( VersionIdentifier.INDEX_OR_LAST_COMMIT, file.getFileLocation()); } catch (MalformedURLException e1) { if (logger.isDebugEnabled()) { logger.debug(e1, e1); } } showDiffFrame(null, lastCommitedFileURL, null); }
Long may this continue: here we are, heading into Christmas, and the 20 men who started the season as Barclays Premier League managers all retain their jobs. If it was down to me, they would all still be in their roles on May 24 next year, when the campaign reaches its conclusion. I’d certainly back a rule being brought in that prevented clubs sacking their manager during a season, as there are so many implications from one decision. Alan Pardew came under fire earlier this season and was favourite to be the first boss sacked in the top flight Newcastle have won six of their last nine games, including victory over previously unbeaten Chelsea Think about the negative domino effect that is created: if one club is performing poorly and sacks its manager, that means another club will lose their manager through no fault of their own. They, in turn, then have no option but to try to lure someone else away... and on it goes. Why should owners and chairman be able to do that? If the man they started out with in August isn’t delivering, they shouldn’t be allowed to correct their own mistake by inflicting huge change elsewhere: they make the appointments, so they should live with the consequences. How long this stability continues is anyone’s guess, given that there is a lot of scrutiny on some managers, but it is looking like this will be the first season since the opening Premier League campaign in 1992-93 that no managerial changes will have taken place before Christmas. What I have found fascinating this year is how some managers, who were seemingly walking a tightrope not so long ago when they were in the middle of bad runs, are now being hailed for doing ‘brilliant’ jobs. Use Alan Pardew as an example. The outsiders’ perception is that he will have changed his methods to generate Newcastle’s improvement but the reality will be different. New signings, like Ayoze Perez, have settled, while Moussa Sissoko is showing the form he did when first arriving here. Sam Allardyce is another case in point. West Ham’s owners wanted him to play more attacking football and it looks like that is what he has given them. Has Allardyce, with more than 20 years’ coaching experience, completely altered his approach in one summer? Sam Allardyce also came under fire early in the season after an underwhelming start by West Ham Yet signings like Diafra Sakho (front) have propelled the Hammers to fourth in the Premier League table Or is it down to two new signings — Enner Valencia and Diafra Sakho — making an instant impression and Stewart Downing hitting the form of his life? David Moyes’ recruitment at Everton was excellent. Players such as Tim Cahill and Seamus Coleman were real finds, but during his time at Manchester United he was constantly reminded about how his one big signing, Marouane Fellaini, was struggling. Similarly, look at Brendan Rodgers. He said this week ‘it wasn’t rocket science’ to see why Liverpool’s fortunes had dipped. Losing strikers of the calibre of Luis Suarez and Daniel Sturridge would have an impact on any side. It shows how vital it is to buy well and keep your good players. I’ve long felt managers receive too much criticism when things are going badly and they can receive too much praise when they are winning. Of course, as Sir Alex Ferguson says, the manager must be the most important person at a club — but is he more important than players in securing results? As in any walk of life, there will be some who are more capable of influencing situations than others. Ferguson was a serial winner wherever he went and he could be worth up to 10 points a season for Manchester United. Likewise, Jose Mourinho gets results no matter what club he is at. Both men would tell you they have been blessed to work with great squads of players. Some would say they were fortunate to be able to utilise such talent, but let’s not forget how good they have been at attracting the right men to add to their teams; they know how essential recruitment is. Brendan Rodgers has struggled in his second season at Liverpool after losing Luis Suarez The Uruguayan hitman now plays in Barcelona, leaving Liverpool with a daunting hole to fill Every manager I speak to tells me the most important part of the job is recruitment and it is impossible to disagree. When you speak to someone who loses their job abruptly, the first reason they will give is that their signings didn’t work out. Nothing more needs to be said after that. That is why you have to wonder whether there is any point in sacking a manager in the middle of a season. Yes, clubs tend to benefit with an initial boost for the first five or six games when a new man comes in but then, after the honeymoon period, results tend to level back out. Again, there is evidence of men who have come in and transformed a club’s fortunes. Tony Pulis, at Crystal Palace last season, is probably the most spectacular example of someone rescuing a team that looked to be dead and buried. Owen Coyle (with Bolton) and the late Alan Ball (at Southampton in 1994), however, are the only others who have joined a club in the relegation zone after Christmas and been able to keep them in the Premier League. Alan Ball (left) embraces Matthew Le Tissier - the pair helped Southampton escape the drop in 1994 Crystal Palace were bottom when Tony Pulis took over last December; by May he had taken them to 11th Changing managers doesn’t mean a club is waving a magic wand to make things better and that is why I keep coming back to players being absolutely essential to the prospects of those who oversee operations from the sidelines. Take Louis van Gaal and Manchester United. When he first arrived at Old Trafford and he had those poor results early on, it was questioned whether he could adapt to the English game or if the methods that had served him well in Europe and with the Dutch national team would pay off. Four months on and United are thriving, but is it just down to Van Gaal? No. United spent huge money to buy top talents, such as Angel di Maria, and have others in Robin van Persie and Wayne Rooney who are making a difference. Their balance is starting to look right. And that is essential. If players perform, managers flourish and then you have a winning formula. If players lose their way, the manager, ultimately, will pay. Louis van Gaal has steered United to five straight wins and his side are being talked about as title contenders De Gea could be player of the year You have to go back to 1985 for the last time a goalkeeper was recognised as being the outstanding player of a season. Back then, Neville Southall — a man I grew up watching — received the FWA award for the remarkable form he showed when Everton won the League Championship and last week at Old Trafford I began to wonder whether David de Gea might emulate him. There has been a lot of debate about where De Gea stands at the moment in terms of the goalkeepers we have in this country, but there is no doubt in my mind he is now the No 1, having just edged past Chelsea’s Thibaut Courtois. David de Gea has been nothing short of exceptional this season, pulling off a string of fine saves Thibaut Courtois, who took over from De Gea at Atletico Madrid, has impressed in his first year in England Neville Southall won the FWA award in 1985 helping the Toffees to win the First Division title by 13 points I thought his performance against Liverpool was amazing. He never did anything wrong at any stage in the game and, most importantly, gave his defenders the confidence that he would save them when he was called into action. He looks a different goalkeeper to the one who first arrived in England. If De Gea keeps playing as he has been, it will be difficult to overlook him at the end of the season. With Manuel Neuer in contention for the Ballon d’Or, we are seeing what a difference a top-class goalkeeper makes to a team. History shows how hard it is for No 1s to be singled out — Pat Jennings (1976) and Peter Shilton (1978) are the only keepers to win the PFA gong — but if De Gea maintains this consistency, it might be hard to overlook him. This week I'm excited about... Sky signing Thierry Henry Thierry Henry is the best of the Premier League era There has been widespread acclaim for Thierry after he announced his retirement this week — and all of the praise has been richly deserved. At the peak of his powers, it was almost impossible to stop him. I will never forget the moment, during one game at Anfield, when he engaged overdrive and ran away from me with a change of speed I had not witnessed. It felt as though I was trying to chase after someone on a motorbike. He is rightly regarded as the best player of the Barclays Premier League era and looking back at some of the footage that has been shown of him in recent days provided a reminder of just how ahead of the game Thierry was; the medals he won accurately reflect his talent.
<reponame>DreamTeamGDL/SmartGL import IPoint from "./interfaces/IPoint"; import IBufferSettings from "./interfaces/IBufferSettings"; import {mat4} from "gl-matrix"; export default class GLVector implements Iterable<IPoint> { public readonly size: number; public readonly program: WebGLProgram; private readonly gl: WebGLRenderingContext; private points: IPoint[]; private transformMatrix: mat4 | null = null; private attributeAddress: number = -1; private bufferObj: WebGLBuffer | null = null; private rawArray: Float32Array | null = null; private bufferSettings: IBufferSettings[] = []; private currentSetting = 0; public static reuse(other: GLVector): GLVector { const vector = new GLVector(other.gl, other.size); vector.attributeAddress = other.attributeAddress; vector.bufferObj = other.bufferObj; return vector; } public constructor( gl: WebGLRenderingContext, elementSize: number, vertex: string = "shader-vs", fragment: string = "shader-fs" ) { this.gl = gl; this.size = elementSize; this.points = []; const vertexCodeElement = document.getElementById(vertex); const fragmentCodeElement = document.getElementById(fragment); if (vertexCodeElement == null || fragmentCodeElement == null) { throw new Error("Could not find shaders"); } const vertexShaderSrc = vertexCodeElement.innerText; const fragmentShaderSrc = fragmentCodeElement.innerText; const vertexShader = this.createShader(this.gl.VERTEX_SHADER, vertexShaderSrc); const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, fragmentShaderSrc); this.program = this.createProgram(vertexShader, fragmentShader); } public get buffer(): WebGLBuffer { if (this.bufferObj == null) { this.bufferObj = this.initBuffer(); } return this.bufferObj; } public get transform(): mat4 { if (this.transformMatrix == null) { this.transformMatrix = mat4.create(); } return this.transformMatrix; } public set transform(matrix: mat4) { this.transformMatrix = matrix; } public get attribute(): number { return this.attributeAddress; } public set attributeName(name: string) { this.attributeAddress = this.gl.getAttribLocation(this.program, name); } public addPoint(point: IPoint): GLVector { this.points.push(point); return this; } public clear() { this.points = []; } public addBufferSetting(setting: IBufferSettings) { this.bufferSettings.push(setting); } public nextBufferSettings(): IBufferSettings { const settings = this.bufferSettings[this.currentSetting]; this.currentSetting++; this.currentSetting %= this.bufferSettings.length; return settings; } public hasBufferSettings(): boolean { return this.bufferSettings.length != 0; } public get array(): Float32Array { const array: number[] = []; this.points.forEach(point => { const rawPoint = point.asArray(); for (let i = 0; i < this.size; i++) { array.push(rawPoint[i]); } }); this.rawArray = new Float32Array(array); return this.rawArray; } public updateBuffer(): void { const values = this.array; this.gl.bindBuffer(this.gl.ARRAY_BUFFER, this.bufferObj); this.gl.bufferData(this.gl.ARRAY_BUFFER, values, this.gl.STATIC_DRAW); } public initBuffer(): WebGLBuffer { const buffer = this.gl.createBuffer(); if (buffer == null) throw new Error("Buffer could not be allocated"); this.updateBuffer(); return buffer; } public [Symbol.iterator](): Iterator<IPoint> { return this.points.values(); } private createShader(type: number, source: string): WebGLShader { const shader = this.gl.createShader(type); if (shader == null) throw new Error("Shader could not be created"); this.gl.shaderSource(shader, source); this.gl.compileShader(shader); const success = this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS); if (!success) { const log = this.gl.getShaderInfoLog(shader); this.gl.deleteShader(shader); throw new Error(`Could not create shader: \n${log}`); } return shader; } private createProgram(...shaders: WebGLShader[]): WebGLProgram { const program = this.gl.createProgram(); if (program == null) throw new Error("Program could not be created"); shaders.forEach(shader => this.gl.attachShader(program, shader)); this.gl.linkProgram(program); const success = this.gl.getProgramParameter(program, this.gl.LINK_STATUS); if (!success) { const log = this.gl.getProgramInfoLog(program); this.gl.deleteProgram(program); throw new Error(`Could not create shader: \n${log}`); } return program; } }
/** * * @author vincent * @author walker * */ public class Shielded implements ShieldedInterface{ private double myTotalShieldHealth; private double myCurrentShieldHealth; private double myTotalTime; private double myTimeRemaining; public Shielded(){ myTotalShieldHealth=0.0; myCurrentShieldHealth = 0.0; myTotalTime = 0.0; myTimeRemaining = 0.0; } public Shielded (double health, double time) { myTotalShieldHealth=health; myCurrentShieldHealth=health; setTotalTime(time); myTimeRemaining=time; } @Override public double getCurrentHealth () { return myCurrentShieldHealth; } @Override public void decreaseCurrentHealth (double health) { myCurrentShieldHealth-=health; } @Override public double getTotalShieldHealth () { return myTotalShieldHealth; } @Override public void setTotalShieldHealth (double health) { myTotalShieldHealth=health; } @Override public void setCurrentShieldHealth (double health) { myCurrentShieldHealth=health; } @Override public double getTimeRemaining () { return myTimeRemaining; } @Override public double getTotalTime () { return myTotalTime; } @Override public void setTotalTime (double time) { myTotalTime=time; } @Override public void decreaseRemainingTime (double time) { myTimeRemaining-=time; } @Override public void setTimeRemaining (double time) { myTimeRemaining=time; } }
import random print('주사위를 굴립니다') com = random.randint(1, 6) user = random.randint(1, 6) print('컴퓨터의 주사위 눈은 ' + str(com) + '입니다') print('당신의 주사위 눈은 ' + str(user) + '입니다')
// AsSha256OfOrderedMap hash function // My tests showed this is unnecessary because apparently, the order of the key is the same // if the content of the row is the same. So, two rows could have different field order, but // it should be always the same for the specific row func AsSha256OfOrderedMap(o map[string]interface{}, orderedKey []string) string { type ordered struct { key string value interface{} } var list []ordered for _, j := range orderedKey { a := o[j] list = append(list, ordered{ key: j, value: a, }) } return AsSha256(list) }
<reponame>silloi/yuutsu-records /** * @file Vuex内で共通して使う型、インターフェース */ import { Store as CounterStore } from '@/store/counter' export type RootStore = CounterStore export type BaseAxiosAction = { key?: symbol } export type AxiosAction<T, P = {}> = BaseAxiosAction & (T extends void ? {} : { data: T }) & P
/** * A simple state-machine for parsing CSV records. * * The parser starts in this state. */ static void state_normal (struct CSV_context *ctx) { if (ctx->c_in == ctx->delimiter) { ctx->state = STATE_STOP; return; } switch (ctx->c_in) { case -1: ctx->state = STATE_EOF; break; case '"': ctx->state = STATE_QUOTED; break; case '\r': break; case '\n': if (ctx->field_num > 0) ctx->state = STATE_STOP; break; case '#': if (ctx->field_num == 0) ctx->state = STATE_COMMENT; else PUTC (ctx->c_in); break; default: PUTC (ctx->c_in); break; } }
package com.symantec.cpe.analytics; import io.dropwizard.Configuration; import javax.validation.Valid; public class KafkaMonitorConfiguration extends Configuration { @Valid private String zookeeperUrls = "localhost:2181"; @Valid private int refreshSeconds = 60; @Valid private String statsDHost = "localhost"; @Valid private int statsDPort = 8125; @Valid private String statsDPrefix = "kafka-monitoring"; @Valid private boolean pushToStatsD = false; public String getZookeeperUrls() { return zookeeperUrls; } public void setZookeeperUrls(String zookeeperUrls) { this.zookeeperUrls = zookeeperUrls; } public int getRefreshSeconds() { return refreshSeconds; } public void setRefreshSeconds(int refreshSeconds) { this.refreshSeconds = refreshSeconds; } public String getStatsDHost() { return statsDHost; } public void setStatsDHost(String statsDHost) { this.statsDHost = statsDHost; } public int getStatsDPort() { return statsDPort; } public void setStatsDPort(int statsDPort) { this.statsDPort = statsDPort; } public String getStatsDPrefix() { return statsDPrefix; } public void setStatsDPrefix(String statsDPrefix) { this.statsDPrefix = statsDPrefix; } public boolean isPushToStatsD() { return pushToStatsD; } public void setPushToStatsD(boolean pushToStatsD) { this.pushToStatsD = pushToStatsD; } }
/** * Returns true if the message corresponding to this messageSeq and epoch is complete, returns false otherwise. */ public boolean isFragmentedMessageComplete(Integer messageSeq, Integer epoch) { FragmentKey key = new FragmentKey(messageSeq, epoch); FragmentCollector collector = fragments.get(key); if (collector == null) { return false; } return collector.isMessageComplete(); }
<filename>marklogic-junit5/src/test/java/com/marklogic/junit5/spring/XmlNodeTest.java package com.marklogic.junit5.spring; import com.marklogic.client.io.StringHandle; import com.marklogic.junit5.MarkLogicNamespaceProvider; import com.marklogic.junit5.NamespaceProvider; import com.marklogic.junit5.XmlNode; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; public class XmlNodeTest extends AbstractSpringMarkLogicTest { @Override protected NamespaceProvider getNamespaceProvider() { return new MarkLogicNamespaceProvider("m", "org:example"); } @Test public void test() { getDatabaseClient().newXMLDocumentManager().write("/test/1.xml", new StringHandle("" + "<message xmlns='org:example'>" + "<color important='true'>red</color>" + "<color>blue</color>" + "<size>medium</size>" + "<parent><kid>hello</kid></parent>" + "</message>")); XmlNode xml = readXmlDocument("/test/1.xml"); assertEquals("/test/1.xml", xml.getUri()); xml.assertElementValue("/m:message/m:size", "medium"); assertEquals("medium", xml.getElementValue("/m:message/m:size")); assertEquals("true", xml.getAttributeValue("/m:message/m:color[. = 'red']", "important")); xml.assertElementExists("/m:message"); xml.assertElementCount("/m:message/m:color", 2); xml.getXmlNode("/m:message/m:parent").assertElementExists("/m:parent/m:kid[. = 'hello']"); assertEquals(2, xml.getXmlNodes("/m:message/m:color").size()); XmlNode other = new XmlNode(xml); Assertions.assertNotNull(other.getInternalDoc()); xml.prettyPrint(); assertNotNull(xml.getPrettyXml()); } }
import torch from torch.nn import functional as F from torch.nn.modules import loss class DistributionLoss(loss._Loss): def forward(self, model_output, real_output): self.size_average = True # Target is ignored at training time. Loss is defined as KL divergence # between the model output and the refined labels. if real_output.requires_grad: raise ValueError("real network output should not require gradients.") model_output_log_prob = F.log_softmax(model_output, dim=1) real_output_soft = F.softmax(real_output, dim=1) del model_output, real_output # Loss is -dot(model_output_log_prob, real_output). Prepare tensors # for batch matrix multiplicatio real_output_soft = real_output_soft.unsqueeze(1) model_output_log_prob = model_output_log_prob.unsqueeze(2) # Compute the loss, and average/sum for the batch. cross_entropy_loss = -torch.bmm(real_output_soft, model_output_log_prob) if self.size_average: cross_entropy_loss = cross_entropy_loss.mean() else: cross_entropy_loss = cross_entropy_loss.sum() # Return a pair of (loss_output, model_output). Model output will be # used for top-1 and top-5 evaluation. # model_output_log_prob = model_output_log_prob.squeeze(2) return cross_entropy_loss class DistillationLoss(torch.nn.Module): def __init__(self, alpha=0.9): super(DistillationLoss, self).__init__() self.criterion1 = torch.nn.CrossEntropyLoss() self.criterion2 = DistributionLoss() self.alpha = alpha def forward(self, stu_model_output, tea_model_output, target): loss1 = self.criterion1(stu_model_output, target) loss2 = self.criterion2(stu_model_output, tea_model_output) loss = self.alpha * loss2 + (1. - self.alpha) * loss1 return loss, loss1 class KdLoss(torch.nn.Module): def __init__(self, alpha=0.9, T = 5): super(KdLoss, self).__init__() self.alpha = alpha self.T= T self.criterion = torch.nn.KLDivLoss() def forward(self, outputs, teacher_outputs, labels): alpha = self.alpha T = self.T KD_loss = self.criterion(F.log_softmax(outputs / T, dim=1), F.softmax(teacher_outputs /T, dim=1)) * (alpha * T * T) + \ F.cross_entropy(outputs, labels) * (1. - alpha) return KD_loss
MIPSers: MIPS extension release 6 simulator MIPSers is a simulator based on the current Release 6 version of Microprocessor without Interlocked Pipeline Stages (MIPS). Currently, there are no existing MIPS simulators that support MIPS Release 6. Hence, MIPSers is the only simulator that can demonstrate the major changes in the instruction set such as branch without branch delay slots, multiplication and division without the use of HI/LO registers, selection operations, floating point comparison and bit swap operations. Users can use this tool to visualize the new introductions made by Imagination Technologies in MIPS Architecture. The design and performance of the simulator was assessed through manual verification and comparisons with other simulators.
/* * Copyright (c) 2015 EMC Corporation * All Rights Reserved */ package com.emc.storageos.api.service.impl.resource; import static com.emc.storageos.api.mapper.ComputeMapper.map; import static com.emc.storageos.api.mapper.DbObjectMapper.toNamedRelatedResource; import java.net.URI; import java.util.Arrays; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import com.emc.storageos.api.mapper.ComputeMapper; import com.emc.storageos.api.service.impl.resource.utils.ComputeSystemUtils; import com.emc.storageos.api.service.impl.response.BulkList; import com.emc.storageos.db.client.constraint.ContainmentConstraint; import com.emc.storageos.db.client.constraint.URIQueryResultList; import com.emc.storageos.db.client.DbClient; import com.emc.storageos.db.client.model.ComputeElement; import com.emc.storageos.db.client.model.ComputeVirtualPool; import com.emc.storageos.db.client.model.Cluster; import com.emc.storageos.db.client.model.Host; import com.emc.storageos.db.client.model.StringSet; import com.emc.storageos.db.client.model.DiscoveredDataObject.RegistrationStatus; import com.emc.storageos.db.client.util.NullColumnValueGetter; import com.emc.storageos.db.exceptions.DatabaseException; import com.emc.storageos.model.BulkIdParam; import com.emc.storageos.model.ResourceTypeEnum; import com.emc.storageos.model.compute.ComputeElementBulkRep; import com.emc.storageos.model.compute.ComputeElementList; import com.emc.storageos.model.compute.ComputeElementRestRep; import com.emc.storageos.security.authorization.CheckPermission; import com.emc.storageos.security.authorization.DefaultPermissions; import com.emc.storageos.security.authorization.Role; import com.emc.storageos.services.OperationTypeEnum; import com.emc.storageos.svcs.errorhandling.resources.APIException; import com.emc.storageos.svcs.errorhandling.resources.BadRequestException; import com.emc.storageos.volumecontroller.impl.monitoring.RecordableBourneEvent; import com.emc.storageos.volumecontroller.impl.monitoring.RecordableEventManager; import com.emc.storageos.volumecontroller.impl.monitoring.cim.enums.RecordType; import com.google.common.base.Function; @Path("/vdc/compute-elements") @DefaultPermissions(readRoles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR }, writeRoles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN }) public class ComputeElementService extends TaskResourceService { private static final String EVENT_SERVICE_TYPE = "ComputeElement"; private static final String EVENT_SERVICE_SOURCE = "ComputeElementService"; @Autowired private RecordableEventManager _evtMgr; @Autowired private ComputeVirtualPoolService computeVirtualPoolService; @Override public String getServiceType() { return EVENT_SERVICE_TYPE; } private static final Logger _log = LoggerFactory.getLogger(ComputeElementService.class); @Override protected URI getTenantOwner(URI id) { return null; } @Override protected ComputeElement queryResource(URI id) { ArgValidator.checkUri(id); ComputeElement ce = _dbClient.queryObject(ComputeElement.class, id); ArgValidator.checkEntity(ce, id, isIdEmbeddedInURL(id)); return ce; } @Override protected ResourceTypeEnum getResourceType() { return ResourceTypeEnum.COMPUTE_ELEMENT; } /** * Gets the compute element with the passed id from the database. * * @param id the URN of a ViPR compute element. * * @return A reference to the registered compute element. * * @throws BadRequestException When the compute element is not registered. */ protected ComputeElement queryRegisteredResource(URI id) { ArgValidator.checkUri(id); ComputeElement ce = _dbClient.queryObject(ComputeElement.class, id); ArgValidator.checkEntityNotNull(ce, id, isIdEmbeddedInURL(id)); if (!RegistrationStatus.REGISTERED.toString().equalsIgnoreCase(ce.getRegistrationStatus())) { throw APIException.badRequests.resourceNotRegistered(ComputeElement.class.getSimpleName(), id); } return ce; } /** * Gets the ids and self links for all compute elements. * * @brief List compute elements * @return A ComputeElementList reference specifying the ids and self links for * the compute elements. */ @GET @Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR }) public ComputeElementList getComputeElements() { ComputeElementList computeElements = new ComputeElementList(); List<URI> ids = _dbClient.queryByType(ComputeElement.class, true); for (URI id : ids) { ComputeElement computeElement = _dbClient.queryObject(ComputeElement.class, id); if (computeElement != null && !computeElement.getInactive()) { computeElements.getComputeElements().add(toNamedRelatedResource(computeElement)); } } return computeElements; } /** * Gets the data for a compute element. * * @param id the URN of a ViPR compute element. * * @brief Show compute element * @return A ComputeElementRestRep reference specifying the data for the * compute element with the passed id. */ @GET @Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @Path("/{id}") @CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR }) public ComputeElementRestRep getComputeElement(@PathParam("id") URI id) { ArgValidator.checkFieldUriType(id, ComputeElement.class, "id"); ComputeElement ce = queryResource(id); ArgValidator.checkEntity(ce, id, isIdEmbeddedInURL(id)); Host associatedHost = getAssociatedHost(ce, _dbClient); Cluster cluster = null; if (associatedHost!=null && !NullColumnValueGetter.isNullURI(associatedHost.getCluster())){ cluster = _dbClient.queryObject(Cluster.class, associatedHost.getCluster()); } return ComputeMapper.map(ce, associatedHost, cluster); } /** * Allows the user to deregister a registered compute element so that it is no * longer used by the system. This simply sets the registration_status of * the compute element to UNREGISTERED. * * @param id the URN of a ViPR compute element to deregister. * * @brief Unregister compute element * @return Status indicating success or failure. */ @POST @Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @Path("/{id}/deregister") @CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN }) public ComputeElementRestRep deregisterComputeElement(@PathParam("id") URI id) { ArgValidator.checkFieldUriType(id, ComputeElement.class, "id"); ComputeElement ce = queryResource(id); URIQueryResultList uris = new URIQueryResultList(); _dbClient.queryByConstraint(ContainmentConstraint.Factory .getHostComputeElementConstraint(ce.getId()), uris); List<Host> hosts = _dbClient.queryObject(Host.class, uris, true); if (!hosts.isEmpty()) { throw APIException.badRequests.unableToDeregisterProvisionedComputeElement(ce.getLabel(), hosts.get(0).getHostName()); } if (RegistrationStatus.REGISTERED.toString().equalsIgnoreCase(ce.getRegistrationStatus())) { ce.setRegistrationStatus(RegistrationStatus.UNREGISTERED.toString()); _dbClient.persistObject(ce); // Remove the element being deregistered from all CVPs it is part of. URIQueryResultList cvpList = new URIQueryResultList(); _log.debug("Looking for CVPs this blade is in"); _dbClient.queryByConstraint(ContainmentConstraint.Factory .getMatchedComputeElementComputeVirtualPoolConstraint(id), cvpList); Iterator<URI> cvpListItr = cvpList.iterator(); while (cvpListItr.hasNext()) { ComputeVirtualPool cvp = _dbClient.queryObject(ComputeVirtualPool.class, cvpListItr.next()); _log.debug("Found cvp:" + cvp.getLabel() + "containing compute element being deregistered"); StringSet currentElements = new StringSet(); if (cvp.getMatchedComputeElements() != null) { currentElements.addAll(cvp.getMatchedComputeElements()); currentElements.remove(ce.getId().toString()); } cvp.setMatchedComputeElements(currentElements); _dbClient.updateAndReindexObject(cvp); _log.debug("Removed ce from cvp"); } // Record the compute element deregister event. // recordComputeElementEvent(OperationTypeEnum.DEREGISTER_COMPUTE_ELEMENT, // COMPUTE_ELEMENT_DEREGISTERED_DESCRIPTION, ce.getId()); recordAndAudit(ce, OperationTypeEnum.DEREGISTER_COMPUTE_ELEMENT, true, null); } return ComputeMapper.map(ce,null, null); } /** * Manually register the discovered compute element with the passed id on the * registered compute system with the passed id. * * @param computeElementId The id of the compute element. * * @brief Register compute system compute element * @return A reference to a ComputeElementRestRep specifying the data for the * registered compute element. */ @POST @Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN }) @Path("/{id}/register") public ComputeElementRestRep registerComputeElement(@PathParam("id") URI id) { ArgValidator.checkFieldUriType(id, ComputeElement.class, "id"); ComputeElement ce = _dbClient.queryObject(ComputeElement.class, id); ArgValidator.checkEntity(ce, id, isIdEmbeddedInURL(id)); if (ce == null) { throw APIException.badRequests.computeElementNotFound(id); } if (ce.getComputeSystem() == null) { throw APIException.badRequests.computeElementNotBelongingToSystem(id, null); } else { ComputeSystemUtils.queryRegisteredSystem(ce.getComputeSystem(), _dbClient, isIdEmbeddedInURL(ce.getComputeSystem())); } // if not registered, registered it. Otherwise, dont do anything if (RegistrationStatus.UNREGISTERED.toString().equalsIgnoreCase(ce.getRegistrationStatus())) { registerComputeElement(ce); List<URI> cvpIds = _dbClient.queryByType(ComputeVirtualPool.class, true); Iterator<ComputeVirtualPool> iter = _dbClient.queryIterativeObjects(ComputeVirtualPool.class, cvpIds); while (iter.hasNext()) { ComputeVirtualPool cvp = iter.next(); if (cvp.getUseMatchedElements()) { _log.debug("Compute pool " + cvp.getLabel() + " configured to use dynamic matching -- refresh matched elements"); computeVirtualPoolService.getMatchingCEsforCVPAttributes(cvp); _dbClient.updateAndReindexObject(cvp); } } } return map(ce,null,null); } private void registerComputeElement(ComputeElement ce) { ce.setRegistrationStatus(RegistrationStatus.REGISTERED.toString()); _dbClient.updateAndReindexObject(ce); recordAndAudit(ce, OperationTypeEnum.REGISTER_COMPUTE_ELEMENT, true, null); } /** * Retrieves resource representations based on input ids. * * @param param POST data containing the id list. * @brief List data of compute element resources * @return list of representations. * * @throws DatabaseException When an error occurs querying the database. */ @POST @Path("/bulk") @Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) @Override public ComputeElementBulkRep getBulkResources(BulkIdParam param) { return (ComputeElementBulkRep) super.getBulkResources(param); } @Override public ComputeElementBulkRep queryBulkResourceReps(List<URI> ids) { Iterator<ComputeElement> _dbIterator = _dbClient.queryIterativeObjects(getResourceClass(), ids); return new ComputeElementBulkRep(BulkList.wrapping(_dbIterator, new Function<ComputeElement, ComputeElementRestRep>() { @Override public ComputeElementRestRep apply(ComputeElement ce) { Host associatedHost = getAssociatedHost(ce, _dbClient); Cluster cluster = null; if (associatedHost!=null && !NullColumnValueGetter.isNullURI(associatedHost.getCluster())){ cluster = _dbClient.queryObject(Cluster.class, associatedHost.getCluster()); } ComputeElementRestRep restRep = ComputeMapper.map(ce, associatedHost, cluster); return restRep; } })); } private Host getAssociatedHost(ComputeElement ce, DbClient dbClient) { Host associatedHost = null; URIQueryResultList uris = new URIQueryResultList(); _dbClient.queryByConstraint(ContainmentConstraint.Factory .getHostComputeElementConstraint(ce.getId()), uris); List<Host> hosts = _dbClient.queryObject(Host.class, uris, true); // we expect to find just one host that uses this CE if (hosts!=null && !hosts.isEmpty()){ associatedHost = hosts.get(0); } return associatedHost; } @SuppressWarnings("unchecked") @Override public Class<ComputeElement> getResourceClass() { return ComputeElement.class; } /** * Record ViPR Event for the completed operations * * @param computeElement * @param type * @param description */ private void recordComputeEvent(ComputeElement computeElement, OperationTypeEnum typeEnum, boolean status) { RecordableBourneEvent event = new RecordableBourneEvent( /* String */typeEnum.getEvType(status), /* tenant id */null, /* user id ?? */URI.create("ViPR-User"), /* project ID */null, /* CoS */null, /* service */EVENT_SERVICE_TYPE, /* resource id */computeElement.getId(), /* description */typeEnum.getDescription(), /* timestamp */System.currentTimeMillis(), /* extensions */null, /* native guid */computeElement.getNativeGuid(), /* record type */RecordType.Event.name(), /* Event Source */EVENT_SERVICE_SOURCE, /* Operational Status codes */"", /* Operational Status Descriptions */""); try { _evtMgr.recordEvents(event); } catch (Exception ex) { _log.error("Failed to record event. Event description: {}. Error: {}.", typeEnum.getDescription(), ex); } } private void recordAndAudit(ComputeElement ce, OperationTypeEnum typeEnum, boolean status, String operationalStage) { recordComputeEvent(ce, typeEnum, status); auditOp(typeEnum, status, operationalStage, ce.getId().toString(), ce.getLabel(), ce.getNativeGuid(), ce.getUuid(), ce.getOriginalUuid()); } }
<reponame>jbalmant/auth<filename>auth/models/__init__.py<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- from auth.app import db from datetime import datetime class BaseModel (db.Model): __abstract__ = True created_dt = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) updated_dt = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) from auth.models.user import User
// Upload fileUrl to Google Photos by Cloud Upload plug-in private void cloudUpload(String fileUrl){ int lastIndex = fileUrl.lastIndexOf('/'); String dirAndFileName = fileUrl.substring(fileUrl.lastIndexOf('/', lastIndex-1) + 1); String filePath = "/storage/emulated/0/DCIM/"+dirAndFileName; LOGGER.d("cloudUpload: " + filePath); Intent intent=new Intent(); ArrayList<String> photoList = new ArrayList(); photoList.add(filePath); intent.setClassName("com.theta360.cloudupload","com.theta360.cloudupload.MainActivity"); intent.putStringArrayListExtra("com.theta360.cloudupload.photoList", photoList); startActivityForResult(intent, CLOUD_UPLOAD_REQUSEST_CODE); }
package s3_test import ( "io/ioutil" "log" "os" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/ctrox/csi-s3/pkg/s3" "github.com/kubernetes-csi/csi-test/pkg/sanity" ) var _ = Describe("S3Driver", func() { mntDir, _ := ioutil.TempDir("", "mnt") stagingDir, _ := ioutil.TempDir("", "staging") AfterSuite(func() { os.RemoveAll(mntDir) os.RemoveAll(stagingDir) }) Context("goofys", func() { socket := "/tmp/csi-goofys.sock" csiEndpoint := "unix://" + socket cfg := &s3.Config{ AccessKeyID: "FJDSJ", SecretAccessKey: "<KEY>", Endpoint: "http://127.0.0.1:9000", Mounter: "goofys", } if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { Expect(err).NotTo(HaveOccurred()) } driver, err := s3.NewS3("test-node", csiEndpoint, cfg) if err != nil { log.Fatal(err) } go driver.Run() Describe("CSI sanity", func() { sanityCfg := &sanity.Config{ TargetPath: mntDir, StagingPath: stagingDir, Address: csiEndpoint, } sanity.GinkgoTest(sanityCfg) }) }) Context("s3fs", func() { socket := "/tmp/csi-s3fs.sock" csiEndpoint := "unix://" + socket cfg := &s3.Config{ AccessKeyID: "FJDSJ", SecretAccessKey: "<KEY>", Endpoint: "http://127.0.0.1:9000", Mounter: "s3fs", } if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { Expect(err).NotTo(HaveOccurred()) } driver, err := s3.NewS3("test-node", csiEndpoint, cfg) if err != nil { log.Fatal(err) } go driver.Run() Describe("CSI sanity", func() { sanityCfg := &sanity.Config{ TargetPath: mntDir, StagingPath: stagingDir, Address: csiEndpoint, } sanity.GinkgoTest(sanityCfg) }) }) Context("s3ql", func() { socket := "/tmp/csi-s3ql.sock" csiEndpoint := "unix://" + socket cfg := &s3.Config{ AccessKeyID: "FJDSJ", SecretAccessKey: "<KEY>", Endpoint: "http://127.0.0.1:9000", Mounter: "s3ql", } if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { Expect(err).NotTo(HaveOccurred()) } driver, err := s3.NewS3("test-node", csiEndpoint, cfg) if err != nil { log.Fatal(err) } go driver.Run() defer os.RemoveAll(mntDir) Describe("CSI sanity", func() { sanityCfg := &sanity.Config{ TargetPath: mntDir, StagingPath: stagingDir, Address: csiEndpoint, } sanity.GinkgoTest(sanityCfg) }) }) Context("s3backer", func() { socket := "/tmp/csi-s3backer.sock" csiEndpoint := "unix://" + socket cfg := &s3.Config{ AccessKeyID: "FJDSJ", SecretAccessKey: "<KEY>", Endpoint: "http://127.0.0.1:9000", Mounter: "s3backer", } if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { Expect(err).NotTo(HaveOccurred()) } // Clear loop device so we cover the creation of it os.Remove(s3.S3backerLoopDevice) driver, err := s3.NewS3("test-node", csiEndpoint, cfg) if err != nil { log.Fatal(err) } go driver.Run() Describe("CSI sanity", func() { sanityCfg := &sanity.Config{ TargetPath: mntDir, StagingPath: stagingDir, Address: csiEndpoint, } sanity.GinkgoTest(sanityCfg) }) }) Context("rclone", func() { socket := "/tmp/csi-rclone.sock" csiEndpoint := "unix://" + socket cfg := &s3.Config{ AccessKeyID: "FJDSJ", SecretAccessKey: "<KEY>", Endpoint: "http://127.0.0.1:9000", Mounter: "rclone", } if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { Expect(err).NotTo(HaveOccurred()) } driver, err := s3.NewS3("test-node", csiEndpoint, cfg) if err != nil { log.Fatal(err) } go driver.Run() Describe("CSI sanity", func() { sanityCfg := &sanity.Config{ TargetPath: mntDir, StagingPath: stagingDir, Address: csiEndpoint, } sanity.GinkgoTest(sanityCfg) }) }) })
<gh_stars>0 package rabbitmq import "github.com/kozgot/go-log-processing/postprocessor/pkg/models" // MessageProducer encapsulates methods used to publish data for ES uploader service. type MessageProducer interface { PublishEvent(event models.SmcEvent) PublishConsumption(cons models.ConsumtionValue) Connect() CloseChannelAndConnection() }
// The virtual machine scale set extension profile. func (o VirtualMachineScaleSetVMProfileOutput) ExtensionProfile() VirtualMachineScaleSetExtensionProfilePtrOutput { return o.ApplyT(func(v VirtualMachineScaleSetVMProfile) *VirtualMachineScaleSetExtensionProfile { return v.ExtensionProfile }).(VirtualMachineScaleSetExtensionProfilePtrOutput) }
/** * Create a new thread, starting with execution of lf_thread * getting passed arguments. The new handle is stored in thread. * * @return 0 on success, 1 otherwise. */ int lf_thread_create(_lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) { uintptr_t handle = _beginthread((windows_thread)lf_thread,0,arg); thread->handle = (HANDLE)handle; if(thread->handle == (HANDLE)-1){ return 1; }else{ return 0; } }