content
stringlengths
10
4.9M
The Representation of the “Other” in the Turkish TV Advertisements Turkey, which has a rich cultural mosaic, consists of the combination of many ‘Others’, including cultural, religious and ethnic the ‘Others’; the ‘Other’ as a gender role; as refugees, emigrants, etc. In such a multicultural climate, our research aim is to identify the stereotypes that represent the ‘Other’ in TV advertisements on Turkey’s mainstream channels. For this purpose, we examined 101 prime-time TV commercials that were broadcast on the five most watched mainstream TV channels between September and December 2020. Having conducted the quantitative and qualitative content analysis of TV commercials, we revealed the symbolic annihilation of the ‘Others’ in the Turkish advertising environment, which is accordance with the conservative perception of the country. In line with the international research, we came to the conclusion that the white Turkish men with medium-high socioeconomic status were heavily represented in the prime-time Turkish TV advertising. Nevertheless, it was also revealed that gendered visibility of the others as well as women portrayals were considered only as the ‘Other’ in the Turkish TV ads. Besides, our research findings overwhelmingly reflect the hegemonic culture and highly traditional views on gender roles.
<gh_stars>0 /* * Project Euler Solutions * Copyright (c) 2018 <NAME> * * LICENSE: * https://github.com/DominicRutkowski/Project-Euler/blob/master/LICENSE * * PROBLEM: * https://projecteuler.net/problem=14 */ package com.dominicrutkowski.projecteuler.solutions; import com.dominicrutkowski.projecteuler.Solution; public class P0014 extends Solution { private static final int MAX = 1000000; private static int[] terms = new int[MAX]; @Override public String run() { terms[1] = 1; int max = 1; int maxTerms = 1; for (int i = 2; i < MAX; i++) { int terms = numberOfTerms(i, 0); P0014.terms[i] = terms; if (terms > maxTerms) { maxTerms = terms; max = i; } } return Integer.toString(max); } private int numberOfTerms(long n, int tailCount) { if (n < P0014.terms.length && terms[(int) n] != 0) { return terms[(int) n] + tailCount; } else if ((n & 1) == 0) { return numberOfTerms(n >> 1, tailCount + 1); } else { return numberOfTerms(3 * n + 1, tailCount + 1); } } }
def count_time_mask(array:np.ndarray)->bool: count_zero_columns = 0 for col_index in range(array.shape[1]): if array[0][col_index] == 0: if np.sum(array[:, col_index]) == 0: count_zero_columns += 1 return count_zero_columns
Effects of Radio-Frequency Sputtering Power on Low Temperature Formation of MoS₂ Thin Films on Soda-Lime Glass Substrates. For the realization of the economical and reliable fabrication process of molybdenum disulfide (MoS²) layers, MoS² thin films were directly formed a on soda-lime glass substrate by RF sputtering and subsequent rapid thermal annealing (RTA) at a temperature range of 400-550 °C. Using scanning electron microscopy and atomic force microscopy, it was possible to investigate more stable surface morphologies of MoS² layers at lower RF sputtering powers irrespective of the RTA temperature. Even at an RTA temperature of less than 550 °C, the Raman exhibited more distinct E12g and A1g peaks for the MoS² layers sputtered at lower RF powers. The X-ray photoelectron spectroscopy results revealed that more distinct peaks were observed at a higher RTA temperature, and the peak positions were moved to higher energies at a lower RF sputtering power. Based on the Hall measurements, higher carrier densities were obtained for the MoS² layers sputtered at lower RF powers.
/** * Returns a {@link CloudObjectTranslator} that produces a {@link CloudObject} that is of kind * "length_prefix". */ static CloudObjectTranslator<LengthPrefixCoder> lengthPrefix() { return new CloudObjectTranslator<LengthPrefixCoder>() { @Override public CloudObject toCloudObject(LengthPrefixCoder target, SdkComponents sdkComponents) { return addComponents( CloudObject.forClassName(CloudObjectKinds.KIND_LENGTH_PREFIX), Collections.<Coder<?>>singletonList(target.getValueCoder()), sdkComponents); } @Override public LengthPrefixCoder fromCloudObject(CloudObject object) { List<Coder<?>> components = getComponents(object); checkArgument(components.size() == 1, "Expecting 1 component, got %s", components.size()); return LengthPrefixCoder.of(components.get(0)); } @Override public Class<? extends LengthPrefixCoder> getSupportedClass() { return LengthPrefixCoder.class; } @Override public String cloudObjectClassName() { return CloudObjectKinds.KIND_LENGTH_PREFIX; } }; }
// Exercises RunAsUser() with current user token. TEST(VistaUtilsTest, RunAsCurrentUserTest) { CString cmd_path = ConcatenatePath(app_util::GetSystemDir(), _T("cmd.exe")); EnclosePath(&cmd_path); CString exit_path = cmd_path + _T(" /c exit 702"); EXPECT_SUCCEEDED(vista::RunAsCurrentUser(exit_path, NULL, NULL)); scoped_process process; EXPECT_SUCCEEDED( vista::RunAsCurrentUser(exit_path, NULL, address(process))); ASSERT_EQ(WAIT_OBJECT_0, WaitForSingleObject(get(process), 16 * kMsPerSec)); DWORD exit_code; ASSERT_TRUE(::GetExitCodeProcess(get(process), &exit_code)); ASSERT_EQ(702, exit_code); reset(process); scoped_handle stdout_pipe; CString echo_path = cmd_path + _T(" /c \"echo Hello World\""); EXPECT_SUCCEEDED(vista::RunAsCurrentUser(echo_path, address(stdout_pipe), address(process))); ASSERT_EQ(WAIT_OBJECT_0, WaitForSingleObject(get(process), 16 * kMsPerSec)); char buffer[32] = {0}; DWORD bytes_read = 0; ASSERT_TRUE(::ReadFile(get(stdout_pipe), buffer, arraysize(buffer) - 1, &bytes_read, NULL)); ASSERT_EQ(13, bytes_read); ASSERT_EQ(CString(_T("Hello World\r\n")), AnsiToWideString(buffer, bytes_read + 1)); }
<gh_stars>100-1000 import os import re from typing import Dict import jinja2 def generate_config_from_template(template_path: str, config_path: str, remove_comments: bool = False, **template_kwargs: Dict[str, str]): """ Parameters ---------- template_path: str The path to the config template config_path: str The path to which the rendered config should be written remove_comments: bool If `True`, removes comments from the rendered config before writing it to disk template_kwargs: Keyword arguments to pass to your template, e.g. `path='config.yaml', foo='bar'` Example config: ```yaml !Experiment foo: {{ bar }} baz: {{ skittles }} ``` If saved as config.yaml.template, then invoking: ```python generate_config_from_template('config.yaml.template', 'config.yaml', bar='pickles', skittles='yum') ``` the following config will be written to 'config.yaml': ```yaml !Experiment foo: pickles baz: yum ``` """ dirname = os.path.dirname(template_path) basename = os.path.basename(template_path) loader = jinja2.FileSystemLoader(searchpath=dirname) env = jinja2.Environment(loader=loader, autoescape=True) template = env.get_template(basename) with open(config_path, 'w') as f: for line in template.render(**template_kwargs).split('\n'): if remove_comments: line = re.sub('# .*', '', line).rstrip() if line: f.write(line + '\n')
L = input().split(" ") for i in range(len(L)): L[i] = int(L[i]) myiter = iter(L) k, n, w = [(myiter.__next__()) for i in range(3)] cost = 0 summe = 0 for i in range(1,w+1,1): summe += i * k if n >= summe: print("0") else: print(summe-n)
<gh_stars>1-10 package main import ( "fmt" "log" "net/http" "time" ) //给response添加cookie func setCookieHandle(w http.ResponseWriter, req *http.Request) { //设置到期时间 c1 := http.Cookie{ Name: "first_cookie", Value: "vanyar", HttpOnly: true, Expires: time.Now().Add(time.Hour), } //不设置到期时间,session型cookie c2 := http.Cookie{ Name: "second_cookie", Value: "noldor", HttpOnly: true, } http.SetCookie(w, &c1) http.SetCookie(w, &c2) } //得到request的cookie func getCookieHandle(w http.ResponseWriter, req *http.Request) { c, err := req.Cookie("flash") if err != nil { if err == http.ErrNoCookie { fmt.Fprintln(w, "No flash cookie found") } } else { fmt.Fprintln(w, "flash=", c) } cs := req.Cookies() fmt.Fprintln(w, "cs=", cs) } func main() { http.HandleFunc("/setCookie", setCookieHandle) http.HandleFunc("/getCookie", getCookieHandle) log.Println("listening...") http.ListenAndServe("127.0.0.1:9999", nil) }
/** * Tupla con cuatro elementos. */ public static class T4<T,U,V,W> implements Serializable { /** * */ private static final long serialVersionUID = -1452077726779806296L; public final T _1; public final U _2; public final V _3; public final W _4; public T4(T t, U u, V v, W w) { super(); _1 = t; _2 = u; _3 = v; _4 = w; } @Override public String toString() { return String.format("(%s, %s, %s, %s)", _1, _2, _3, _4); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((_1 == null) ? 0 : _1.hashCode()); result = prime * result + ((_2 == null) ? 0 : _2.hashCode()); result = prime * result + ((_3 == null) ? 0 : _3.hashCode()); result = prime * result + ((_4 == null) ? 0 : _4.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (!(obj instanceof T4)) return false; @SuppressWarnings("rawtypes") T4 other = (T4) obj; if (_1 == null) { if (other._1 != null) return false; } else if (!_1.equals(other._1)) return false; if (_2 == null) { if (other._2 != null) return false; } else if (!_2.equals(other._2)) return false; if (_3 == null) { if (other._3 != null) return false; } else if (!_3.equals(other._3)) return false; if (_4 == null) { if (other._4 != null) return false; } else if (!_4.equals(other._4)) return false; return true; } public T4<T,U,V,W> withFirst(T n) { return new T4<T,U,V,W>(n, _2, _3, _4); } public T4<T,U,V,W> withSecond(U n) { return new T4<T,U,V,W>(_1, n, _3, _4); } public T4<T,U,V,W> withThird(V n) { return new T4<T,U,V,W>(_1, _2, n, _4); } public T4<T,U,V,W> withFourth(W n) { return new T4<T,U,V,W>(_1, _2, _3, n); } }
// RemoveResource removes the resource of given kind to the set of owned objects func RemoveResource(name string, gk schema.GroupKind) { rc.mu.Lock() defer rc.mu.Unlock() delete(rc.resources[gk], name) }
/* write a lump of data at a specified offset */ static int tdb_write(TDB_CONTEXT *tdb, tdb_off offset, const char *buf, tdb_len len) { if (tdb_oob(tdb, offset + len) != 0) { return -1; } if (tdb->map_ptr) { memcpy(offset + (char *)tdb->map_ptr, buf, len); } else { if (lseek(tdb->fd, offset, SEEK_SET) != offset || write(tdb->fd, buf, len) != (ssize_t)len) { tdb->ecode = TDB_ERR_IO; return -1; } } return 0; }
package io.virtualapp.abs.reflect; /** * @author Lody */ public class ReflectException extends RuntimeException { private static final long serialVersionUID = 663038727503637969L; public ReflectException(Throwable cause) { super(cause); } }
/** * Observe the duration of the already started timer with a specific name and timer ID. to start a timer, please use the startTimer method. * @param name * @param timerID * @return */ public static boolean observeDuration(String name, String timerID){ try { timersByName.get(name).get(timerID).getFirst().observeDuration(); timersByName.get(name).get(timerID).removeFirst(); Simulator.getLogger().debug("[SimulatorHistogram] duration of name " + name + " and id " + timerID + " was observed at time " + System.currentTimeMillis()); return true; } catch (Exception e){ Simulator.getLogger().error("[SimulatorHistogram] timer with name " + name + " and ID " + timerID + " is not initialized"); return false; } }
/// Note: Does not apply boundary conditions to the damping matrix itself pub fn compute_damping_matrix_into<D>( model: &dyn ElasticityModelParallel<f64, D>, u: DVectorSlice<f64>, stiffness_matrix: &mut CsrMatrix<f64>, damping_matrix: &mut CsrMatrix<f64>, mass_matrix: &CsrMatrix<f64>, mass_damping_coefficient: Option<f64>, stiffness_damping_coefficient: Option<f64>, material_model: &(dyn Sync + solid::ElasticMaterialModel<f64, D>), ) -> bool where D: DimName, DefaultAllocator: Allocator<f64, D> + Allocator<f64, D, D>, { profile!("assemble damping matrix"); if mass_damping_coefficient.is_none() && stiffness_damping_coefficient.is_none() { return false; } if stiffness_damping_coefficient.is_some() { compute_stiffness_matrix_into( model, 0.0, DVectorSlice::from(u), DVectorSlice::from(u), stiffness_matrix, None, material_model, // Always use projection to ensure that the damping matrix is // at least positive semidefinite true, ); } // Compute damping matrix as `D = gamma * M + alpha * K` compute_jacobian_combination_into( damping_matrix, stiffness_matrix, None, mass_matrix, stiffness_damping_coefficient.map(|alpha| -alpha), None, mass_damping_coefficient, None, ); return true; }
t=int(input()) while 1: t=str(t) y=list(t) x=list(map(int,y)) p=sum(x) if p%4==0: z="".join(map(str,x)) print(z) break t=int(t)+1
//! get max params for this vertex inline void ScdVertexData::max_params(int &i, int &j, int &k) const { i = i_max(); j = j_max(); k = k_max(); }
// TestMultiRangeEmptyAfterTruncate exercises a code path in which a // multi-range request deals with a range without any active requests after // truncation. In that case, the request is skipped. func TestMultiRangeEmptyAfterTruncate(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitServer(t) ctx := context.TODO() defer s.Stopper().Stop(ctx) db := s.DB() if err := setupMultipleRanges(ctx, db, "c", "d"); err != nil { t.Fatal(err) } if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { b := txn.NewBatch() b.DelRange("a", "b", false) b.DelRange("e", "f", false) return txn.CommitInBatch(ctx, b) }); err != nil { t.Fatalf("unexpected error on transactional DeleteRange: %s", err) } }
Complications of Distal Biceps Tendon Repairs Surgical repair is the most reliable method of restoring flexion and supination strength of the elbow and forearm after acute rupture of the distal biceps tendon. Although there may be small measurable deficits in power, endurance, and terminal forearm rotation when carefully evaluated, most of the patients regain near normal upper extremity motion and function and can return to preinjury activities. There are currently 2 basic surgical approaches for distal biceps tendon repair, using 1 anterior incision or using 1 anterior and 1 lateral incision. Anterior repair alone has the advantage of a minimal risk of heterotopic bone formation, but carries a greater chance of injury to the posterior interosseous nerve. In turn, the 2-incision technique markedly diminishes the risk of radial nerve palsy, but is associated with a greater likelihood of heterotopic bone formation limiting forearm rotation. Rerupture of the distal biceps tendon after repair is uncommon with either technique, and the risk of all complications seems to increase with a delay in surgical intervention after rupture. When motion limiting heterotopic ossification does occur, surgical resection can proceed when the process becomes mature as defined by plain radiographs. Fortunately, functional forearm motion can be commonly restored in these cases with careful attention to surgical details and postoperative rehabilitation.
/// Field is the field the filter is refering to and `value` is the passed filter. E.g. `where: { <field>: <value> }. /// `value` can be either a flat scalar (for shorthand filter notation) or an object (full filter syntax). fn extract_scalar_filters(field: &ScalarFieldRef, value: ParsedInputValue) -> QueryGraphBuilderResult<Vec<Filter>> { match value { ParsedInputValue::Single(pv) => Ok(vec![field.equals(pv)]), ParsedInputValue::Map(mut filter_map) => { let mode = match filter_map.remove("mode") { Some(i) => parse_query_mode(i)?, None => QueryMode::Default, }; let mut filters: Vec<Filter> = filter_map .into_iter() .map(|(k, v)| scalar::parse(&k, field, v, false)) .collect::<QueryGraphBuilderResult<Vec<Vec<_>>>>()? .into_iter() .flatten() .collect(); filters.iter_mut().for_each(|f| f.set_mode(mode.clone())); Ok(filters) } x => Err(QueryGraphBuilderError::InputError(format!( "Invalid scalar filter input: {:?}", x ))), } }
package sommarengine.core; public interface Layer { void update(); Application getApplication(); }
/* * main.h * * Created on: 10/08/2011 * Author: bsr */ #ifndef MAIN_H_ #define MAIN_H_ #include <QtGui> #include <QWidget> #define VER "CPC Builder 0.2.0 (" __DATE__ ")" #endif /* MAIN_H_ */
/** * Rounds Person Days to the next higher Quarter of a Person Day. */ Double roundPersonDays(Double personDays) { Double roundedPersonDays = 0.0; roundedPersonDays = Math.ceil(personDays * 4.0) / 4.0; return roundedPersonDays; }
<filename>logs/get_nr_visits.py #!/usr/bin/env python import sys if (len(sys.argv) <= 1): sys.stderr.write("usage: " + sys.argv[0] + " <logfile ..>\n") sys.exit(1) nr_visits = 0 ips_already_seen = [] for logfile in sys.argv[1:]: print logfile fp = open(logfile, "r") line = fp.readline() while line: tokens = line.split() ip = tokens[0] date = tokens[3] month = date.split("/")[1] # date example [13/Sep/2014:02:37:53 # print ip, date, month if not ip in ips_already_seen: nr_visits += 1 ips_already_seen.append(ip) line = fp.readline() fp.close() print ips_already_seen print len(ips_already_seen) print nr_visits
// ConstructionCombine implements the /construction/combine endpoint. func (rs *RosettaService) ConstructionCombine(ctx context.Context, request *rtypes.ConstructionCombineRequest) (*rtypes.ConstructionCombineResponse, *rtypes.Error) { txn, err := decodeTxn(request.UnsignedTransaction) if err != nil { return nil, errInvalidTxn(err) } for _, sig := range request.Signatures { var sigHash crypto.Hash copy(sigHash[:], sig.SigningPayload.Bytes) for sigIndex := range txn.TransactionSignatures { if txn.SigHash(sigIndex, stypes.ASICHardforkHeight+1) == sigHash { txn.TransactionSignatures[sigIndex].Signature = sig.Bytes break } } } return &rtypes.ConstructionCombineResponse{ SignedTransaction: base64.StdEncoding.EncodeToString(encoding.Marshal(txn)), }, nil }
. On the basis of experiences during 10 years with the cooperation of the departments of neurosurgery and maxillofacial surgery in the Province Hospital in Rzeszów the methods of surgical-orthopaedic management were assessed in craniofacial injuries. Out of 95 cases of craniofacial injuries treated in the years 1976-1985, in 42 cases (44.2%) nasal liquorrohea was present. For immobilization of the maxillary block in 32 cases Federspiel's traction was used, and in 5 cases interosseous binding of Adams. In 6 cases intermaxillary immobilization was applied. In this group of 42 cases of craniofacial injuries complicated with liquorrhoea in 24 (57.1%) liquorrhoea was stopped.
package io.rebloom.client; import org.junit.Before; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; import redis.clients.jedis.util.Pool; public class TestBase { private final Pool<Jedis> pool = new JedisPool(); protected final Client cl = new Client(pool); @Before public void clearDB() { try (Jedis jedis = pool.getResource()) { jedis.flushDB(); } } }
Locomotor depression by the opioid benzodiazepine tifluadom in mice. Several doses of tifluadom, an opioid benzodiazepine with affinity for opioid kappa receptors, were tested on analgesia and locomotor activity in two strains of mice, the C57BL/6 and the DBA/2. The analgesic properties of the compound were confirmed in both strains by a tail flick test. As concerns locomotor activity both strains, that in previous researches showed opposite responses to opiates, were depressed by tifluadom, suggesting the involvement of kappa receptors in this effect.
def show_image_with_matches(self, result, save_to_disk = False): cv2.imshow("Matches between images", result) if save_to_disk: cv2.imwrite("matches_between_images.jpg", result) cv2.waitKey(0) cv2.destroyAllWindows()
def rescale(data): return data / 255.0
package com.pesna.screens; import com.badlogic.gdx.Gdx; import com.badlogic.gdx.graphics.glutils.ShapeRenderer.ShapeType; import com.pesna.Main; import com.pesna.entities.Bear; import com.pesna.entities.EnemyBot; import com.pesna.objects.ScreenObject; import java.util.LinkedList; public class ErrorScreen implements IScreen { public void draw( Main _reference ) { _reference.batch.setProjectionMatrix(_reference.camera.combined); _reference.shapeRenderer.setProjectionMatrix(_reference.camera.combined); int gwidth = Gdx.graphics.getWidth(); int gheight = Gdx.graphics.getHeight(); _reference.shapeRenderer.begin( ShapeType.Filled ); _reference.shapeRenderer.setColor( 1, 0, 0, 1 ); _reference.shapeRenderer.circle( gwidth/2, gheight/2, 50, 30 ); _reference.shapeRenderer.setColor( 0.5f, 0.5f, 0.5f , 1 ); _reference.shapeRenderer.circle( gwidth/2, gheight/2-30, 8 ); _reference.shapeRenderer.ellipse( gwidth/2-8, gheight/2-15, 16, 50 ); _reference.shapeRenderer.end(); } /** * Called in the screen manager ( via main loop ) for update. */ public void update( Main _reference ){} @Override public void SpellForceAdd(ScreenObject newObject) { } @Override public void ObjectForceAdd(ScreenObject newObject) { } @Override public LinkedList<Bear> GetLevelEnemy() { return null; } }
/** * * @author michaelGRU * * Formatting with printf() */ public class StringFormating { public static void main(String[] args) { //format: %[flags][width][.precision]conversion //flags can 1. display numbers with comma separators (,) //2. Pad numbers with leading zeros (0) //3. To left-justify numbers (-) *by default the number is right justified // s = strings // d = decimals // f = floating-point num // t = date/time //Example 1 double money = 12345.67; System.out.printf("Your number is %.2f\n", money); //round to 2 decimal places System.out.printf("Your number is %,.2f\n", money); //adding a (,) flag System.out.printf("Your number is %020.2f\n", money); //adding leading 0s } }
Long term measurements from the M\'atra Gravitational and Geophysical Laboratory Summary of the long term data taking, related to one of the proposed next generation ground-based gravitational detector's location is presented here. Results of seismic and infrasound noise, electromagnetic attenuation and cosmic muon radiation measurements are reported in the underground Matra Gravitational and Geophysical Laboratory near Gy\"ongy\"osoroszi, Hungary. The collected seismic data of more than two years is evaluated from the point of view of the Einstein Telescope, a proposed third generation underground gravitational wave observatory. Applying our results for the site selection will significantly improve the signal to nose ratio of the multi-messenger astrophysics era, especially at the low frequency regime. Introduction Preparation for the next generation ground-based gravitational detectors requires careful pre-analysis at the proposed locations. As for previous gravitational wave detectors, identifying noise sources and constructional risks is a key task. In case of underground installation and improved low frequency operation this must be based on long-term measurements to identify noise types and its origins. Parallel and combined measurements applying the most novel techniques for environmental tests is also beneficiary for the design of the proposed facility and for high-accuracy measurements with it. The main advantage of underground operation for gravitational observatories is the improved sensitivity at the low frequency regime, 1-10Hz. The observation of gravitational waves with earth-based detectors and the birth of multimessenger astronomy in the last years revived interest in the scientific and technological challenges due to the underground operation . The scientific value of the existing plans of building an all-in-one observation facility increased considerably. Its improved sensitivity, extended frequency range, together with capabilities of measuring polarization and direction offer an unprecedented discovery potential . In order to explore the technological and scientific background of underground operation the Mátra Gravitational and Geophysical Laboratory (MGGL) was established by MTA ), along a horizontal tunnel of the mine, 1280 m from the entrance and 88 m below surface, however, the related research activities are extended to other areas of the mine and the surroundings in the last years. The initial instrumentation and the first results were reported in Refs. . In this paper we report the results of continued research summarizing the geophysical characteristics of the Mátra Mountains as an Einstein Telescope site candidate. In this respect the analysis of the collected seismic noise data of more than two years are probably the most informative. We report them together with geophysical data, rock characteristics, infrasound, electromagnetic and muon flux measurements. The paper is organized as follows. First we introduce the geological and seismological environment of Mátra Mountains. A short report of the mechanical properties of the grey andesite rock of the Mátra is part of the geophysical survey. Then the seismological data collected in the last two years are shown and analysed. Here both the data from broadband seismometers of the Wigner RCP and also the custom made seismometers of the Warsaw University are shown. In the third section underground infrasound measurements are reported. Then the damping of electromagnetic waves in the andesitic rocks of Mátra is estimated using underground and surface electromagnetic measurements. Finally the evaluation of the collected muon flux data is shown, demonstrating the tomographic capabilities of the installed detector technology with rock density maps. In the following sections we shortly introduce the instruments and elaborate the long term data, whenever it is possible. This includes all seismic information that was collected in MGGL. In the following the complete data collection period will be referred as Run-1 when compared to the previous, shorter, Run-0 data of our previous report . The geophysical environment of MGGL The lithological composition of Mátra mountain range is moderately blocky andesite, as it is shown in Fig. 1, where the green areas denote various andesite types formed at the same geological era . In the following we characterise the seismic activity of the surrounding area and also the mechanical properties of the typical hard rock from Gyöngyösoroszi mine. In order to enhance comparison with other sites we also show quantitative measures by calculating the related ground displacement and seismic hazard of the Mátra Mountains. 2.1. Seismicity of the Mátra Mountains and the surrounding areas. In general earthquakes are more important for the stability of the underground facility and small ones are not critical regarding the observations. The seismicity of the Carpathian basin as a whole can be considered moderate. The known earthquakes of the area with magnitude larger than 3.0 are shown in Fig. 3. According to the historical collected data on average one earthquake with magnitude M ≥ 5.0 can be observed in the Carpatho-Pannonian region annualy. Also the seismic activity from neighbouring open pit mines can be identified by the ET1H seismological station deployed in the MGGL with the help of the stations of the Hungarian National Seismological Network . The level of seismic activity in the Mátra Mountains is quite low. Fig. 2 shows the epicentres of the known M ≥ 3 earthquakes in the area (19.69-20.18E, 47.8-48.0N) based on the data of the MTA CSFK GGI Earthquake Catalogue and Hungarian National Seismological Bulletins . Only earthquakes with magnitude greater than 3.0 are shown as lower magnitude events can be misclassified quarry explosions. It can be seen that only three small earthquakes (M ≤ 3.5) were ever observed in the area which occurred in 1879, 1895 and 1980. 2.1.1. Ground displacements caused by seismic events. We have selected four events to characterise the ground displacement in the MGGL caused by different seismic events. For the computations of displacements 100 sps streams were used. The data before the instrument correction were detrended and filtered by a second order high pass Butterworth filter with a corner frequency of 1 Hz. As the seismicity in the Mátra area is mainly determined by the mining activities in the quarries, we computed the ground displacement for characteristic explosions carried out in the three most active mines. Table 1 shows the maximum displacement amplitudes for the three components in the case of the selected events. The magnitude of the displacement is similar for the mines Gyöngyössolymos and Kisnána, while the maximum displacement for the explosion belonging to Kisnána mine is an order of magnitude smaller. As the recent Tenk earthquake (2013-04-22, M=4.8, epicentral distance=42 km) occurred relatively close to the MGGL it may be of interest. Unfortunately, at the time of the earthquake the ET1H station was not yet installed. However we can use the data of Piszkéstető station, which is located on the surface, 4.88 km from MGGL and was operational. The fourth row of Table 1. shows the displacements observed there. It can be seen that the Tenk earthquake produced around 20-30 times larger displacements than the strongest explosion. It must be noted that in the MGGLdue to the large subsurface depth -this value probably would have been smaller. (Table 2.). These correspond to 50%, 39%, 10%, 5% and 2% exceedance in 50 years, respectively. 2.2.1. Measured elastic moduli and rheological parameters. As it is well known, the static elastic moduli of rocks measured in laboratory are different from the dynamic ones determined from wave propagation speeds . The difference is usually attributed to various heterogeneities, like microcracks, porosity and grain structure. The Kluitenberg-Verhás body of thermodynamic rheology, which is derived from non-equilibrium thermodynamics with internal variables, provides a simple modelling possibility and explanation . If the material relaxation times of geometric effects are in the order of the operational domain of the low frequency part of ET, then rheological properties can influence the reliable detection of gravitational waves. Therefore we have performed laboratory measurements in order to determine the rheological properties of the gray andesite of Mátra. The investigated samples were cutted from blocks originated in construction works at the vicinity of MGGL. This is a middle gray, small grained, isotropic piroxen type andesitic rock, which is a differentiated upper miocen (tortona type), about 14.5 million years old and considered typical in this region. The measurements were performed in the rock mechanics laboratory of Kőmérő Ltd. with the help of a hydraulic instrument (maximal compression is 150kN), HBM C6A (1MN) load sensor and a HBM Spider 8 & CatmanEasy collected the data. The diameter and the lengh of the cylindrical sample was 37.99 mm, and 78.33 mm and its mass was 0.2213 kg. The uploading speed was 0.7 kN/s in every cycles. A hysteresis type measurement with increasing stress amplitudes was chosen to determine rheological parameters. In our calculations only the uploading parts of the last two cycles are considered. The force and the axial deformation are shown on the left side of Fig. 4 as functions of time. The right side of Fig. 4 shows the axial stress as function of axial and lateral deformations at the positive and negative horizontal axes, respectively. The rheological hysteresis and also the apparent permanent deformation is visible at the end of creep periods. Therefore andesitic rocks from Mátra show clear deviation from ideal elasticity with properly designed laboratory experiments. 2.2.2. Kluitenberg-Verhás body and static-dynamic elasticity. The isotropic Kluitenberg-Verhás body is given by the following relations between the stress σ and strain : Here the dot denotes time differentiation, the subscripts d and s refer to the deviatoric and spherical stresses and deformations as well as to the related material parameters, like the τ d and τ s deviatoric and spherical relaxation times. In our case a cylindrical laboratory sample was prepared according to ISRM (International Society of Rock Mechanics) standards, therefore the uniaxial stress, σ, and the axial and lateral deformations a and l determine the spherical and deviatoric stresses and strains as σ d = 2σ/3, σ s = σ/3, d = 2( a − l )/3 and s = ( a + 2 l )/3. The elastic moduli E 0d = 2G and E 0s = 3K are the well known static Lamé coefficients and E 1d and E 1s are the deviatoric and spherical viscoelastic material coefficients. If the E 2d and E 2s dynamic parameters are neglectable, then the Kluitenberg-Verhás body simplifies to the Poynting-Thomson-Zener body , which is the standard model of creep and relaxation phenomena in rock mechanics (also with the names generalized Kelvin-Voigt and Hill-Maxwell body ). Then it is convenient to transform equations (1) and (2) into a hierarchical form: Here the parameters b d = E 1d /(τ d E 0d ) and b s = E 1s /(τ s E 0s ) measure the deviation from the ideal elastic Hook body. If b d = b s = 1, then the material is apparently completely elastic. The propagation speed of longitudinal and transversal waves determine the material parameters, E d,dyn = b d E 0d and E s,dyn = b s E 0s , respectively. These are called dynamic Lamé coefficients. Both for the static and dynamic cases the Young modulus and the Poisson coefficient are calculated from the Lamé coef- Material parameters. One can determine the static and dynamic elastic moduli both from the cyclic loading of the sample given above and the dynamic ones can be determined from direct laboratory measurements of the sound speeds. This direct laboratory measurement of the propagation speed of longitudinal and transversal waves gives the dynamic Young modulus (38.6 ± 1.1) GPa, and the dynamic Poisson coefficient 0.18 ± 0.01. Furthermore, the time dependent data of the chosen rock sample was analysed using the differential equations of the rheological models. In particular we have assumed, that the measured deformation values are given and we have determined the best parameters from the differential equations (1) and (2) of the Kluitenberg-Verhás body to obtain the stress, both for the deviatoric and spherical components. According to these calculations the coefficents of the second derivatives of the deformation, the E 2d and E 2s coefficients, can be neglected and the Poynting-Thomson-Zener model, (3) and (4), was applied. The obtained best fit parameters are shown in Table 3. Table 3. The static and dynamic elastic coefficients according to rheological data evaluation of the uploading parts of the 3 th and 4 th cycles of the laboratory experiment. The dynamic Young modulus and dynamic Poisson ratio ν are consistent with the values obtained from propagation speeds of the P and S waves. Table 4. The rheological parameter, b = E 1 E 0 τ , characterising the deviation from the ideal elastic regime both in the deviatoric and spherical cases. According to our measurements the typical gray andesite of Mátra is not ideally elastic and the deviation from elasticity is of rheological origin. The experimental parameters can be obtained both from the wave propagation speed measurements and from the cyclic loading experiments. These coefficients are consistent as it is shown by our preliminary calculations. The obtained transition regime, which is proportional to the inverse relaxation times, is 0.02-0.1Hz, which is below the low frequency sensitivity of ET. However, our experimental methodology prevents the detection of faster relaxation modes, therefore further investigations are necessary. Seismological measurements I. The ET site selection preparation measurements selected three best candidate sites for the Einstein Telescope in Europe. The selection is based on the low seismic noise level in the critical low frequency range 1-10Hz. The survey investigated several underground locations, each of them about for a week. In order to have a better estimate of the average seismic noise it is reasonable to collect at least two years of data and analyse the annual and seasonal changes in noise level originating from natural sources and human activity. The above mentioned survey was the main motivation in the establishment of MGGL. Due to the ongoing reclamation activity in the Gyöngyösoroszi mine the vicinity of the laboratory was also subject of maintenance works. Therefore in our case both internal and external human activities were contributed to the noise level. Seismological data collection was performed by two Guralp CMG 3T low noise, broadband seismometers , and also by the custom made seismic sensor developed in the Warsaw University. The Guralp instruments are sensitive to ground vibrations with flat velocity response in the frequency range 0.008-50 Hz. The Guralp seismometers were calibrated and cross calibrated, operated according to protocol of the seismometers in the Hungarian National Seismological Network. The self noise of the seismometers was below the low noise model from 0.02 Hz to 10 Hz . The custom made Warsaw seismometer uses one vertical and two horizontal geophone sensors mounted firmly in a single aluminium block placed inside a metal housing along with a data acquisition system. The geophones (LGT-2.5 and LGT-2.5H) used as the sensors have the lower corner frequency of 2.5 Hz. The data acquisition system sampled the analog signal with the frequency of 125 Hz and the 32-bit resolution. The Warsaw seismometer was calibrated by comparing the data with a Trillium seismometer, and also with a Guralp CMG 3T during a data calibration session at the MTA Wigner Research Centre for Physics in January 2015. One of the Guralp instruments (ET1H) and the Warsaw seismometer (WARS) were permanently installed in the MGGL. The seismometers were deployed on separated concrete piers which were connected to the bedrock. Between the piers and the seismometers a granite plate has been placed. The other Guralp instrument (hereafter GU02) was used in a measurement campaign in the first two weeks of June 2017 in a measurement cabin, constructed next to the main tunnel and prepared for seismometer installation. The mutual performance of the ET1H and WARS seismometers are demonstrated on Fig. 6, where the averages of the day 2017-07-01 and 2017-07-02 are calculated for each instruments and also the ratio of the averages at given frequency range. The agreement is similar to other customary chosen days during the measurement campaign. Therefore the cross-calibration of the instruments is satisfactory. The data acquisition period of the two instruments slightly differs due to operational problems, however the amount of the collected data is similar. In the following first we analyse the ET1H data, then the WARS data. The methods and the elaboration are different, in order to show more aspects of the noise measurements. -2016-10-16, for 25 days. Therefore 741days of data was collected for the horizontal directions and 716 days in the vertical direction. In our analysis we followed the data processing method of Ref. . The particular problems observed in long term data analysis suggested some methodological improvements, and additional characteristics. These were reported in Ref. where the precise definitions and the justification of the methodology of data processing can be found. In this section some basic definitions are recalled for clarification. The velocity Power Spectral Density (PSD) is defined as where f s is the sampling rate -in our case it is 100 Hz -, N is the length of the analysed data sample -N = 5000 -, and W = 1 . We did not use the advantage of fast Fourier algorithm on the expense of increasing the lowest frequency value. The coefficients , represent the Fourier transform of the deviation of raw velocity data v from its average value v . In our analysis PSD-s were calculated with 50 s data samples with f s = 100 Hz sampling rate. Before further processing, raw data were highpass-filtered with f HP = 0.02 Hz and the overlap is 3/4 due to the window function. To characterize spectral properties the acceleration Amplitude Spectral Density (ASD), will be used and displacement rms will be applied as cumulative property. This is the square root of the integral of displacement PSD between two frequency values where l is the low cut-off index -in Beker's paper this is chosen to be 2 Hz -, the K is the high cut-off index -usually the Nyquist frequency -and T = N fs . In our case l is chosen to be 1 Hz or 2 Hz and K is either the Nyquist-frequency or 10 Hz, because we use rms 2Hz of Beker and also calculate the rms 2−10Hz and rms 1−10Hz values. With these new measures we can drop the irrelevant frequency interval above 10 Hz and consider the technologically already available 1 − 2 Hz region . The commonly used comparative measure for the spectral properties of the sites is a particular value of the amplitude spectral density, the so called Black Forest line: It is worth to give the various rms values corresponding to the the reference spectral density represented by the Black Forest line: rms BF 2Hz ≈ 0.1 nm rms BF 2−10Hz = 0.1 nm and rms BF 1−10Hz = 0.29 nm. An other important aspects of the long term data evaluation are the use of percentiles and the choice of intermediate averaging periods. Previous studies applied the mode of the data for the representation of a characteristic mean value. However, the median, and also the other percentiles, are less sensitive for discretisation and the inevitable averaging. Moreover, naturally select the representative data without a necessity of filtering short large noise bursts. Therefore in the following the analysis of site properties is mostly based on the median of the data. However, we will demonstrate the most important differences in the spectral representation and also calculate the mode related rms 2Hz of Beker for a clear comparison with the previous studies. It is also worth to mention, that the basic Fourier length used at the Fourier transformation was 50 s for Guralp instruments and 128s for WARS. For the ET1H and GUO2 Guralp instruments we have introduced a short time averaging (STA) period of 300 s and for the two-year data the use of daily averages was convenient. The daily periods were called intrinsic averaging (INA), because it considers the natural periodicity of the data. In other words in order to obtain the long term 10 th , 50 th and 90 th percentiles first the short therm averages (STA) were calculated and then particular chosen periods in each day (INA) enable the comparison of working hours, night time or whole day data. See for more details about the chosen methodology. 3.2. Long term seismic results. A particular factor in our noise analysis is the ongoing mine reclamation activity in the Gyöngyösoroszi mine. As a result, the investigation and identification of various noise sources originating from external and internal human activity, machine noise, construction works, train noise, etc. proved to be difficult. In November 2016 a three-shift operation period has been started with increased industrial noise, present also during the nights. In order to compare the noise types of these kind of activities in our analysis we have defined three periods for each day for our study: (a) the whole day, (b) night period (20:00 -2:00 UTC) and (c) working period (9:00 -15:00 UTC). In the following we present a comparative analysis of long term data considering seasonal changes, external and internal human noise and also depth dependence for a shorter two weeks long measurement campaign performed by our second Guralp instrument, GUO2, at -404 m depth. 3.2.1. Complete Run-1 results. The acceleration ASD-s for the two-year observation period are shown in Fig. 7. The borderlines of the blue colored area are the 10 th and 90 th percentiles of daily 300 s data. The dotted black lines are the modes of the 1800 s averages, according to the methodology in Ref. . The medians and modes are closer to the Black Forest level in horizontal directions. It is also remarkable that the mode underestimates the median, that is most of the days are noisier than the mode. at a given frequency. This is well represented in the corresponding rms values given in Tab. 5, too. Here the first line rms 2Hz , calculated from the mode of the data. It is worth to recall that it was 0.12 nm in the previous short term measurements of Beker in the same place. As it can be seen, the rms calculated from the median is much larger, about 20% more. It is also worth to mention that the Black forest line reference value of rms 1−10Hz is 0.29 nm, and the rms 1−10Hz normalized to this value is less than the rms 2−10Hz , therefore the site is less noisy at the lower frequencies, as it can be seen in Figs. 7, as well. The role of the human and industrial noises is shown in Fig. 8, where the spectral densities for the working and night periods is plotted in the North-South and the vertical directions. It is remarkable, that half of the frequency range is below the median of the data (blue line), the asymmetric relative position of the blue area at working period indicates the presence of short noisy periods between 9:00 -15:00 UTC. We have to mention that from the end of 2016 the reclamation works were performed close to the MGGL in a three-shift schedule. . In order to illustrate the cultural and industrial noises the frequency dependence of the ratio of the working and night periods are plotted in Fig. 8. The cultural noises start at about 0.7 Hz and reach their maxima between 2 − 3 Hz and 10 − 20 Hz. We may suppose that the night shifts cause less noises, the main works are done during the daytime. Then the observed noise level at the night periods can be considered as an upper limit for an operating underground GW detector facility, where the equipments are optimized for a low noise operation. The corresponding rms values are for the night and working periods are given in Table (6. We can see, that the rms of the noisier working periods can be two times higher than in the calm night ones. The differences at longer periods in Run-1 were analysed by three different methods. First, the acceleration ASD-s of the night periods were plotted for each year in Fig. 10. The 90 th percentiles at higher frequencies show the increasing industrial noise at night due to the three-shift schedule of the mine works. Interestingly this is not apparent in the mode and in the median of the data, because the particular activity seemingly put up only short noisy periods. In Table 7 the rms 2−10Hz values Table 6. The rms values in nm for the night and working periods in Run-1, according to the panels of Fig. 8. of the night periods of each year are shown, corresponding to the middle panels of Fig. 10. There is no significant annual variation during Run-1 of MGGL. The seasonal averages are plotted in Fig. 11 including the rms values in Table 8, which presents that the calmest seasons are the spring and the summer. Table 8. Seasonal variation of the rms values of the seasons according to the data shown in Fig. 11. The first part is the rms 2Hz of the mode and the second one is the rms 1−10Hz of the median. Finally, the seasonal changes are represented with a timeline plot of the the daily rms values in Fig. 12. As we can see, there is an annual trend in the curves, the late spring and the early summer are the most silent periods in the 1 − 10 Hz frequency range. Note, this seasonal difference is not apparent in the rms 2Hz timeline, which is at the bottom panel of Fig. 12. This seasonal variation of the lowest frequency noise is in agreement with the observable also in Fig. 11 interval, the percentiles were computed directly from the 300 s averages without INA. The modes were calculated from 1800 s averages as in the previous section. In Fig. 13 the comparison of the two-week and the total Run-1 ET1H data are plotted. One can see, this two-week period is representative, there is no significant differences of the plots, in spite of the INA in case of Run-1. We have plotted together the ET1H and GU02 data in Fig. 14. There it is apparent that the main attenuation is in the intervall of 1-4 Hz for the horizontal and 1-7 Hz for the vertical direction. This interval is crucial for the low frequency noise budget of the proposed ET, furthermore, this frequency range dominate all the rms values. The corresponding rms values for GU02 are in Tab. 9. In Fig. 15 the night and working periods are shown for the GU02 station with the corresponding rms values in Tab. 10. The ratios of night and working periods are also shown in Fig. 16. 3.3. Summary. We analysed the long term Run-1 data of the ET1H station and compared the data from a representative two-week period of the −88 m deep ET1H station with the −404 m deep GU02 station. For higher than 2 Hz frequencies, there are no significant annual changes, but for 1 − 2 Hz the spring-summer time shows annual minimum. Comparing the deeper and shallower noise data we have observed that the decrease of seismic noise spectral amplitudes in the 1-8 Hz frequency range is approximately 60%. This range is crucial for the low frequency performance of Einstein Telescope. We emphasize, that almost in 90% of the observation period detected the noise level below the Black Forest line at night (see Fig. 15). The related average horizontal rms 2−10Hz = 0.0742 nm and the rms 1−10Hz = 0.213 nm. The first value is equal to the mode related rms 2Hz value, which is 0.0745 nm. Study of seismic noise with the Warsaw seismometer As it was described in Section 3, the custom made seismometer of the Warsaw University (WARS) is located near to the ET1H seismometer, therefore they should measure the same noise, although at low frequencies there can be differences due to scattering of seismic waves in the mine. The y-axis of the sensor is pointing at the direction of 25 degrees NW. 4.1. The collected and analysed data: The Run-1 data of the WARS seismometer statts from 24 May 2016 till 2 July 2018. In this period 654 days were used for analysis with the rest of the days being unavailable. Days not included in the study will be referred to as "missing-days" henceforth. The data for the study were recorded in each hour. Out of these 654 days, 30 days do not have complete 24 hours and will be referred to as "gap-days" henceforth. After eliminating the missing durationsfrom the gap-days, total of 15 290 hours of complete data were available for analysis. 4.2. Analysis of the WARS data: The acceleration power spectral density (PSD) was calculated by Welch's method . The data for each hour were divided into 2048 length segments with each segment windowed by Hann window function using half the length segment as overlap . No zero padding was used. The acceleration amplitude spectral density (ASD) for each hour was calculated by taking square root of the acceleration PSD. The data for each hour were available for each of the three axis for all the 15290 hours so a total of 3×15290 ASDs were generated. 4.3. Monthly analysis of the WARS data: The ASD generated for each hour were used to study the variation of ASDs with time over the period of each month i.e spectrograms were generated for every month for the period of the study. This was done for all three axis to study the variation in detail. We present an example acceleration ASD and binned it on a plane spanned by the frequency and the logarithm of acceleration ASD. We then plotted it using the color shading for presenting how often each value occurs in the data. The log ASD axis was divided into twenty bins per decade. We show the results for the two axes in Fig. 18. The variation of the ASD spans about 0.7 dex at low frequencies below 5Hz and goes to almost 1.5 dex at about 20Hz. 4.5. Complete daily analysis: In order to investigate several types of spectra during different working times of the day in more detail we generated the acceleration ASD for periods corresponding to the working shifts in the mine, 09:00-15:00 UTC and 20:00-02:00 UTC. The acceleration ASDs for these periods were then binned and plotted for each axis for the whole data set. We present these in the panels of Fig. 19 for the working, and in panels of Fig. 20 for the night periods, respectively, like in the case of ET1H station. These plots show less variation than the plots for the entire data set. The lack of characteristic bimodality at higher frequencies indicates lack of noisy periods at night. The working period shows additional lines, that are not visible at the night. The night spectra have very little variability less than 0.3 dex over the entire frequency band. 4.6. Site parameterization. The values of rms 2Hz were calculated for each hour of available data and the cumulative distribution function was plotted for the three axes of the detector in Fig. 21. The median rms 2Hz ≈ 0.18 nm in the vertical direction and ≈ 0.16 nm in the horizontal directions. The two horizontal rms 2Hz distributions are different below the median. It is unclear whether this is an instrumental effect or if this corresponds to the properties of the seismicity in MGGL. 4.7. Conclusion: In conclusion our measurements show that the seismic noise level is very low at the Mátra site. In spite of the spectral differences the median rms 2Hz values are in agreement with the ET1H data of Table 5, measured by a different device. An electromagnetic study on the signal attenuation in the ELF range Since the gravitational wave interferometer's mirrors of the proposed ET is going to be stabilized by means of magnetic fields, man-made and natural electromagnetic (EM) signals may also result in contaminated time windows of the gravitational wave observation. Magnetic noise from local sources can be identified based on correlation analysis utilising a network of extremely low frequency (ELF) field observatories. However, global EM components in the frequency range of interest may result in undesirable noise load . The global thunderstorm activity exciting the Earth-ionosphere cavity continuously keeps a background signal at certain resonance frequencies called Schumann resonances . Installation of the gravitational wave detector in a subsurface location may lead to lower EM noise even in the ELF range, 3 − 20Hz. An EM investigation has been carried out in close vicinity of the MGGL which aims to provide an estimation of the attenuation of EM signal with the depth in the Mátra andesite rock. ELF range geomagnetic observation stations has been installed at the backfilled end of a 140m deep cave and in the close vicinity of the surface projection of the subsurface measurement site. For technical reasons, the subsurface and surface recordings have been run with no overlap in time. Therefore an indirect processing method has been applied utilising the recordings of the very low-noise ELF EM observation site of Hylaty station as reference observation. The amplitude rate between the subsurface and the surface station has been estimated based on the experimental transfer functions between the subsurface-Hylaty and the surface-Hylaty relation. 5.1. The data acquisition system and the observation. The study is based on two mobile observations and an observatory data: time series from a subsurface site, a surface station and from a reference station. The subsurface measurement location could have been accessed from the town Mátraszentimre by an elevator crossing levels down to a tunnel at 140m depth. The observation site has been hundreds of meters away from the vertical mine of the elevator and from all active electronic facilities at that level. Notice that active loads have been in operation during the measurement in other levels of the mine, around 50m and 20m distance vertically. The subsurface data acquisition system consists of a Lemi-423 wide band magnetotelluric station and two Lemi-120 induction coils. The coils have not been fitted to a common plane to increase the separation distance to reduce the inductive coupling effect between the coils. The orientation of the coils have been set to the orientation of the tunnel: NNW-SSE and ENE-WSW, with NNW 22 • to North. The timing of the subsurface station has been initialised at the surface by means of GPS synchronisation and had been transported in the proposed site in operating status with no sensor connected. During the whole measurement session (Period I), the inside clock of the station has not been synchronized due to the lack of GPS or NTP access. The station was powered by a fully charged 84Ah battery. The data logger has been installed 40m away from the sensors along the tunnel. The connector of the coils and the whole instrumentation have been waterproof sealed, since high humidity and leaking water could lead to shortcut of the connector part. The sampling frequency has been set to 2kHz. The surface measurement has been carried out after the subsurface measurement (Period II) and by means of the same Lemi station which has been set up at the subsurface site before. The coils have been buried 50cm deep in a common horizontal plane in perpendicular orientation. The separation of the sensors was more than 10 meters and the Lemi-423 station has been buried 40m away. The Lemi instruments and the wires have been buried preventing harmful activity of forest animals. The orientation of the coils at the surface stations has been set to NNE-SSW and WNW-ESE adapting to the local environment conditions. The sampling frequency has been set to 2kHz at this mobile station, too. The Hylaty geophysical station was proved to be the optimal ELF observation facility as reference station based on data quality and geographic aspects too, . The Hylaty station provides very low noise ELF geomagnetic data at around 888Hz sampling rate which is optimal for the proposed study as reference. Data processing. 5.2.1. Data preparation. The primary goal of the study is to provide an estimation of EM attenuation of the overlying andesite block in the lower ELF range, especially at the first Schumann resonance frequency. In the aspect of the measurement configuration it is identical to the relation of the absolute value of the empiric amplitude transfer functions of the subsurface and the surface stations at certain frequency. Since the surface and subsurface observation time periods were not overlapping the direct comparison of the amplitude spectra was not possible. The concept of the processing is based on utilization of a high-quality, low-noise ELF reference station data covering all the studied time intervals. The indirect method is basically as follows: determination of Tc deep/ref power transfer coefficient in Period I. than the Tc surf ace/ref in Period II and finally the estimation of the attenuation is accounted to the relation of the two coefficient. The amplitude attenuation has been derived as the square root of the power transfer coefficient of subsurface-surface site relation. The observations took 4 and 7 days in Period I. and II., respectively. After resampling the three datasets to a common frequency of 800Hz both mobile data have been rotated in the coordinate system of the reference site. The magnetic components of the subsurface and the surface observation have been transformed by 22 • and 55 • , respectively. Since the infrastructure of the mine has been operating during the whole observation campaign, the time and frequency domain identification of low magnetic noise intervals has formed the basis of the forthcoming processing. The variation of the x component in time and time-frequency domain is plotted in Fig. 22. An intermittent broadband load with maximum at 50Hz has been demonstrated for the whole observation period and besides of that a peak appears at around 15-17Hz, too for several minutes by no regular timing. These large power load periods are related to the operation phases of the elevator of the mine shaft and the latter one is originated by the horizontal transportation and the lighting on active levels of the mine. Both have been present in the whole observation period. The low-noise segments have been identified based on the dynamic magnetic spectra covering Period I. Finally the remaining subsurface data consists of thirty one 80-100 second long time series of relatively quiet time segments, selected for further processing. The low noise windows are concentrated in the early afternoon hours of 24 March, 2018, 12:00-16:00 UTC. For the reason of having similar relative position between the observation site and the source area the same temporal pattern of time segments have been selected in Period II from the surface magnetic variation time series on 13 April, 2018. From the reference data the same temporal pattern of time sections have been selected for the estimation of the transfer functions in Period I and Period II, too. Since the inside clock of the data logger has a characteristic delay per day, post synchronization of the subsurface data have been applied first. The method is based on the comparison of the same natural signal recorded on both stations: cross correlation function of the subsurface and the Hylaty data have been estimated on the 31 pair of selected time series sections. Preparation of magnetic data sections by the application of a 5.6-25Hz bandpass Butterworth filter was necessary to attenuate the strong effect of the 50Hz peak of the power grid on the cross correlation function. The time delay according to the cross correlation was 4 samples which means 5ms for both channels. It has been corrected assuming linear drift of the inside clock time. The total magnetic time series has been correlated in both relation of the subsurface-Hylaty and surface-Hylaty observations. The cross correlation of the bandpass filtered total variations are overlay plotted for each section pairs in Fig. 23 after post synchronization. The cross correlated time sections contain minimum 64.000 samples each at 800Hz sampling rate. After the same processing, the cross correlation between surface registration and reference data has also been demonstrated, see Fig. 23. The sharp and high peak at zero demonstrates that the subsurface station observed the same natural signal as the reference station. In frequency domain the correlated components have also been confirmed by computing the spectral coherence between the magnetic variations in relation of subsurface-Hylaty and surface-Hylaty datasets, see Fig. 24. The spectral coherence plots indicate definite correlation at the first three Schumann resonance frequencies in relation of both subsurface-Hylaty and surface-Hylaty site. Finally, four dataset has been prepared for the estimation of the transfer functions of subsurface-Hylaty and surface-Hylaty relation and finally to the determination of the surface-subsurface transformation: (1) B I.mine : 31 selected time segments in Period I, subsurface magnetic variation; (2) B I.Hylaty : the same 31 time segments in Period I, reference site magnetic variation; (3) B II.surf ace : 31 selected time segments in Period II with the same daily distribution, surface site magnetic variation; (4) B II.Hylaty : the same 31 time segments in Period II, reference site magnetic variation; 5.3. Results. The introduced processing steps resulted in four dataset, which are accurately synchronised, transformed to the same coordinate system and resampled to a common sampling rate of 800Hz. The data has been prepared for time domain and spectral analysis. Additionally it has been confirmed that the subsurface observation and the Hylaty reference station has observed the same natural signal which variation is considered to be homogeneous in the scale of their relative distance, 300km. It has been demonstrated in the surface-Hylaty observation, too. The analysis of the distribution of the spectral power values at individual frequencies demonstrated non-normal distribution of the spectral power. The mean, the median and the interquartile range (IQR) of the spectral power have been plotted in Fig. 25. In the first approach no outlier removal algorithm has been utilised, but the simplest robust statistics, the median of the spectral power has been determined at each frequency values. For the estimation of the signal attenuation with the depth, the median of the power spectral density (PSD) has been computed related to the All four median PSD functions have been determined from the Fourier transform of 1024 sample long consecutive time windows, with 512 sample overlapping between adjoining segments, using Hamming window. For an estimation of the power attenuation with the depth, the power of the fundamental Schumann spectral component has been determined in case of all four observations. The following subsection introduces a method which has been applied to have accurate estimation of the fundamental Schumann resonance spectral power peaks related to the four observations. 5.3.1. Polynomial baseline approximation. The method to provide an estimation of the ELF range attenuation is based on utilisation of an advanced background baseline removal technique. In this method the baseline is estimated by fitting a low-order polynomial, but instead of obtaining parameters by minimising the sum of squares of the residuals a non-quadratic cost function is adapted . Third order polynomials have been considered to fit the median PSDs in a limited frequency range, see left four subplots of Fig. 27. All polynomials have been fitted by means of asymmetric truncated quadratic cost function. The fitted baseline polynomials have been substracted from the power spectra, plotted on the right four subplots in Fig. 27. The power ratio obtained at 7.81 Hz in the subsurface-Hylaty relation is 1.126, for the surface-Hylaty relation 1.219 which results a power ratio of 0.924. The square root of the power ratio results the estimated amplitude attenuation: 96.1%. The rate of signal amplitude between the surface and at depth h can generally be computed by writing the exponential decay of the magnetic signal amplitude as follows: By substitution of the empiric amplitude rate values and the 140m depth of the subsurface station the characteristic depth of the amplitude attenuation, the so called the δ skin-depth can be estimated. It results δ = 3520 m. The power decrease of the signal with the depth is a consequence of the nonzero electrical conductance of the underlying/overhead andesite rock. Providing an estimation of the bulk resistivity of the andesite block is possible by a simplified approach of homogenious half space approximation. This estimation is considered valid if the homogenious halfspace assumption is realistic. A simple approximation formula between the skindepth and the bulk electrical resistivity is as follows : Extracting the bulk resistivity by substitution of estimated skin-depth value of the Schumann fundamental resonance frequency 387 Ωm is obtained. The estimated resistivity value is in the wide characteristic range of electrical resistivity of the andesite rock in the literature (170-45.000Ωm) . Time domain investigation. A recent electromagnetic study of ELF range magnetic signal attenuation could have been explained by the assumption of distortion effect of iron artifacts of the mine infrastructure. For this reason the time domain inspection of the bandpass filtered subsurface data has been carried out. Bandpass filter is tuned below common power grid frequency and below upper Schumann frequencies to have comparable pattern of the hodographs. Based on this, 5.6, 10Hz cut off frequencies has been set. About 100 sample long time windows have been picked from the subsurface data and the corresponding reference time series. The magnetic variation of a time segments has been plotted and compared in Fig. 28. The hodograph demonstrates no strong deviation of the subsurface signal is present confirming that iron parts of the mine infrastructure did not have recognizable effect on the subsurface observation and the estimated attenuation rate at the subsurface measurement site. The noisy environment, the amount of data utilised and the indirect comparation of the signal transformation all increase the deviation of the results and the variance of the power spectra. Considering all these circumstances the obtained attenuation value confirmes that in less noisy intervals, parallel measurement of surface and subsurface relation together can significantly improve the quality of the data and may provide more accurate estimation of the amplitude attenuation in the ELF range. Application of outlier removal may result further improvement on the quality of the estimation, too. The main goal of the study was to provide an estimation of the magnetic signal attenuation with depth. The results confirm that even an indirect estimation method can be promising. The estimation of the attenuation coefficient of the overhead andesite block might be more accurate by means of the data observed in an upcoming EM silent time window and even the more with the possibility of parallel recordings at the surface and subsurface sites. These two circumstances should improve the accuracy of the estimation significantly. 6. Infrasound measurements 6.1. Introduction. Sensitivity of second generation gavitational-wave detectors at low frequencies are limited fundamentally by the thermal noise of the suspension last stage and of the test masses, the seismic noise, and the gravitational gradient noise (so-called Newtonian noise (NN) ) resulted by changes in density of medium near the detector. Seismicity is one source of this type of noise, but changes in density of air caused by pressure waves are also contribute to NN . Inspired by recent theoretical investigations of NN from the infrasound below the Earth surface , we have continued our infrasound measurements in MGGL. For these measurements, new infrasound microphone (ISM) was developed by the Institute for Nuclear Research of Hungarian Academy of Sciences (Atomki) . The first instrument was installed at MGGL in 2016 with the purpose on one hand to test its performance on the long-term, and on the other hand to characterize the underground site's ambient infrasound noise. In this study, Bowman's low-and median noise models were used as a reference for our site characterization. According to Ref. , the main sources of infrasound noise on the surface of Earth are microbaroms and the wind. These sources are not present underneath, but other sources related to the miner's work and machines produce infrasound. Our results show that ambient infrasound presumably can be supressed when going under the ground, but the local noise sources and the geometry of the underground site influences the possible extent of the noise supression at different frequencies. 6.2. The instrument. ISM consist of two main parts: an industrially manufactured capacitive sensor and C-V converter, developed by Atomki. The capacitive sensor is a differential pressure sensor with two inlets. One inlet is connected to the environment, while the other is connected to a reference volume. A diaphragm Table 11. The characteristics of the two infrasound sensors, ISM1 and ISM2, developed in ATOMKI. is placed in between the two pressure inlets. The diaphragm is deflected when the difference between the environment and the background pressure changes. The deflection is converted into an electrical signal by the sensor, then the signal is amplified to the −10 − 10 V range. There is a leak with a diameter of 250 microns, which connects the background volume to the environment. It makes possible to slowly equalize the background pressure and the environmental pressure, and hence determines the lowest frequency of the measuring range. The main characteristics of two versions of ISM are given in Table 11. The drawback of the structure of the instrument is that fast changes in temperature dT /dt > 2 • C/h introduce artifacts in spectrum below 1 Hz. When the frequency of the temperature changes is in the measuring range of the detectors, then the temperature change initiated pressure in the background volume cannot be distinguished from the pressure change due to environmental infrasound. Therefore a high-resolution temperature sensor is built into the detector. 6.3. Data acquisition system and data processing. The microphones have analog output. In order to sample, digitalize and store measurement data, a lowcost data acquisition system is added to the sensors, based on a Raspberry Pi 3 model B . The spectral properties of the infrasound background are characterised by the Pressure Amplitude Spectral Density (P ASD) and the analysis of the data is similar to the seismological noise evaluation in Section 3. The collected data was downsampled to 100 Hz, then divided into 163.84 s long segments. For each segment, PASD was computed. For each day, the 5 th , 50 th and 95 th percentile of corresponding PASD data was determined. 6.4. Comparative measurements with MB3 microbarometer. In order to compare performance of ISM1 with the performance of MB3 microbarometers , comparative measurements were performed on 2018-04-26 at Infrasound station no. 3 of Piszkéstető observatory in Mátra mountain range, Hungary. This station is part of the Hungarian National Infrasound Network, which is a permanent infrasound network operated by the Kövesligethy Radó Seismological Observatory of the Hungarian Academy of Sciences . At Piszkéstető, the MB3 microbarometers are placed in a reservoir, which is partly covered by soil. MB3 is equipped with a wind reducing system of long pipes attached to the microbarometer. During our measurements, the pipes were detached, and the MB3 and ISM1 were placed next to each other in order to detect similar pressure as much as it is possible. The measurement lasted for three hours. Our aim was to calibrate our instrument, that is to determine the real PASD values in Pa/ √ Hz from the PASD ISM 1 given in ADCunits/ √ Hz computed from measurement data. During the data processing part, raw signals both of MB3 and ISM1 were cut into 128 seconds length segments and PASD M B3 and PASD ISM 1 of each segment was computed. Then values corresponding to a given frequency of PASD M B3 and PASD ISM 1 pairs from the same time interval were collected. The number of pairs corresponding to a given frequency is equal to the number of segments used for calibration. For a given frequency, the ratios of PASD M B3 values and PASD ISM 1 values were computed (C=PASD M B3 /PASD ISM 1 ). The distribution of these ratios corresponding to a given frequency can be approximated with normal distribution. Standard deviation (σ C ) of the distribution was estimated by using median absolute deviation ( ). Two standard deviations were chosen to express the uncertainty of the ratio in Fig. 29. 6.6. Relationship between seismic and infrasound noise in the MGGL. In order to examine the relation between the signal of the seismometer and the signal of ISM1, we have computed the magnitude squared coherence estimate for each 128 s long data segment pairs from the deep measurement campaign of MGGL by using Welch's method . In the case of the ET1H seismometer's signal, we treated the tree directions, East, North and Z (vertical) separately. This way, we got three datasets for the coherence: C v E p , C v N p and C v Z p . We determined the average of the coherence values of each frequency of the three datasets. A peak between 3 and 4 Hz can be observed on each plot on Fig. 31, which indicates a common source has an effect both on the seismometer and ISM1. There were no similar coherences with the farther, GU02 seismometer. The drainage system or the ventilation system may be responsible for the peak. We will prove or refute this hypothesis with the forthcoming experiments in MGGL. 7. Measurements of the inhomogeneity and the rock-density at large-scale with cosmic muon tracking A novel cosmic muon tomography technology were used to explore the precise, large-scale density structure of the Mátra mountain above the MGGL within the Gyöngyösoroszi mine. The idea is to measure the angular distribution of the highly penetrating muon-component flux of the cosmic-rays, which are created in the upper atmosphere and reach the surface of the Earth. This technique, has been applied successfully by our group for various cases such as cave researches, cosmic background measurements for underground laboratories, civil engineering, and volcano muography . To reach the full, 2π coverage, muon flux measurements were continued, using a portable tracking system, called the Muontomograph. This has been developed by the Wigner Research Centre for Physics and described in details in Ref. . Based on the Run-0 experiences, we made modifications on the Muontomograph in order to get better performance in the MGGL laboratory for the Run-1. The position of the MGGL and the acceptance directions of the detector related to the surface topology are shown in Fig. 32. We adjusted the trigger levels for higher efficiency and a noisy chamber were also removed, thus the number of chambers were reduced to five. Since we have covered the zenith-and two 90 • -tilted directions in Run-0, the four Run-1 measurements were all tilted to 45 • from the zenith. With this choice we could cover the upper hemisphere fully, without any missing direction. The first measurement was directed to 20.5 • for 77 days, then we continued to 110.5 • , 200.5 • , and 290.5 • directions, respectively for 62 days, 77 days, and 64 days. Further details of the measurements are summarized in Table 12. Data were monitored and downloaded continuously from the detector via Ethernet connection during the data taking period in MGGL for online checking of data quality. Comparing Run-0 and Run-1 date in Table 12, one can see that, although the event number dropped by an order of magnitude, the fraction of the track number increased relatively to the number of events. This means, that the efficiency become better since noise and fake events were suppressed well in Run-1. 7.2. Results of the rock inhomogeneity measurements. The muon flux distribution can be obtained after applying a track reconstruction algorithm and merging the maps according to the partial overlap and geometry normalization . The Run-0 and Run-1 flux maps are shown in the left and right panels of Fig. 33 respectively. The muon flux distribution is plotted by color-scale contours as a function of azimuth and zenith angles. The cosmic muon (track) rate was 0.005-0.02 Hz for both runs, which is sufficient to provide flux measurement with 5 − 50% statistical errors (depending on zenith angle). The measured flux has a maximum value of 0.7 m -2 sr -1 s -1 at 15 • zenith angle to the West. In Fig. 33 white color 'triangles' at 20 • zenith angle (panel left) and 'dial' at zenith (panel right) represents the directions, which are out of the acceptance of the current run. The detector-to-surface distance is indicated with dark contour lines in the Fig. 33. For Run-0 in , we used Shuttle Radar Topography Mission (SRTM) satellite data for the elevation map with an estimated relative error of 10% (10 m at the zenith). However by comparing the SRTM model with other available elevation data the accuracy of the SRTM model seemed much poorer than it was previously expected, likely because of the different height of the vegetation. Thus the elevation contours obtained from the National Hungarian Grid (EOV) have been used instead. Applying this, new detector-to-surface data, give better correlation for both Run-0 and Run-1. Merging all the data of Run-0 and Run-1 the muon flux distribution can be obtained, which is plotted in Fig. 34 up to zenith angle 60 • . Correlation between muon flux map and the detector-to-distance contour lines is excellent. We found fluctuation is in the order of ±0.05 m -2 sr -1 s -1 as it was obtained in Run-0 as well. One can observe that the surface distance and the muon flux reasonably match each other in Fig 34. Combining this with the acceptance plotted on the surface map plot on Fig. 32 the maximal flux matches the shortest distance-to-surface directions on the hill's West slopes, while to the East, the muon flux drops at the large zenith angles with the longest traveling lengths. The collected ∼ 340, 000 tracks of the muon flux measurement provided highenough statistics to estimate the rock thickness, based on the muon absorption model . This model has been used successfully in other, similar measurements up to few 100 meters depth. In Fig. 35 the angular distribution of the rock density is presented. The average of the rock density is 2.6 ± 0.1 kg dm -3 , which is the density of the andesite-based host rock. The data do not show any large-scale density inhomogeneities or cavities above the MGGL. However, minor, ∼ 0.2 kg dm -3 , positive density deviation ridge is observed along the 40 • − 50 • zenith angle from the southwest to northwest. This may correspond to several percent metallic ore in the unexplored part of the Gyöngyösoroszi mine. A negative ∼ 0.5 kg dm -3 density anomaly valley was also observed at the lowest 50 • − 60 • zenith angle from the south to north. This is the direction of the known tunnels and caverns of the mine, although here the uncertainty of the measurement is also significant at large zenith angles. Figure 35. The angular distribution of the density based on Run-0 and Run-1 measured from the MGGL is plotted as a function of azimuth and zenith angles from the detector position. Color-scale contours show the rock densities, dark contour lines show the detectorto-surface distance in meters. 7.3. Conclusions. Based on the muon-flux measurements, the improved detectorto-surface date, and the muon absorption model, we can state, that the rock density map correlates well with the unexplored and mined areas above the MGGL. This unique measurement support to place the Einstein Telescope at Gyöngyösoroszi mine, since the lack of large-scale density inhomogenities would reduce the higherorder tensor-like gravitational corrections during the measurements. Summary The Mátra Gravitational and Geophysical Laboratory was established to investigate on a long term basis the conditions and requirements of next generation gravitational wave detectors in case of underground construction and operation. In more general, the aim was to measure various geological and rheological properties of the base rock in addition to test experimentally novel theoretical approaches on the noise penetration and suppression. Also the Mátra mountain range is studied as a possible site of the planned Einstein Telescope. Our first investigations were published in Ref. including the technical details of the measurements. In this paper we summarize the first two years of measurements of the laboratory by different methods: geophysical environment, electromagnetic attenuation, infrasound noise, cosmic muon tomography of the sorrounding rock mass, and long term seismic noise. The timeline of the data taking periods of the various measurements are shown in Fig. 36. Our topical results are: Geophysical environment: A short survey of the geological and seismological conditions in the Mátra mountains reveals a homogeneous composition of hard andesite rock with low seismic activity. We performed rock mechanic experiments and shown, that a typical grey andesite from the vicinity of the laboratory is not ideal elastic. We characterised and modeled the deviation from ideal elasticity and determined the corresponding rheological parameters (see Tables 3 and 4). The independent measurements of dynamic elastic moduli verify our novel rheological model, which properties are important when calculating the Newtonian noise from rock deformations. Electromagnetic attenuation: With EM noise measurements we estimated the electromagnetic attenuation by the andesite rock mass in the lower ELF range, especially at the frequency of the first Schumann resonance component. Comparing the data of the external surface reference station, we obtained the skin depth 3520 m. For the bulk resistivity at the Schumann resonance frequency 387Ωm was obtained. This value -supporting the validity of this measurement -fits well to the literature value of vulcanic, andesite rocks (170-45000 Ωm). Infrasound noise: Using the custom-made infrasound detector designed by the MTA Atomki, we measured the pressure amplitude spectral distribution in the MGGL (see Fig. 30). This is relatively noisy in the frequency range 1 − 7 Hz. The recorded infrasound noise was found to be in accordance with the seismic noise at 3-4 Hz in the Laboratory. Cosmic muon tomography: During the 404 days long measurement with the Muontomograph merging Run-0 and Run-1 data, led us to map the rock density and its inhomogeneities above MGGL at large scale (see Fig. 35).We verified this novel measurement technique by obtaining the typical andesite rock density (2.6 ± 0.1 kg dm -3 ). According to this investigations, we have found, there were no large scale density inhomogeneities (≤ 0.2 kg dm -3 ) measured in the rock mass. Seismic noise: The long term seismic noise was registered by two seismometers inside the MGGL at depth −88 m almost for the whole two-years period. In parallel, as a cross check, a third seismometer measured the seismic noise for two-weeks duration. This data record were farther from the MGGL in the mine in −404 m depth. Seismic noise measurement were found to be consistent. In the MGGL we had the opportunity to combine data taken at the same position, and in addition to this we could test the insights gained from noise analysis. According to our experience the proper evaluation of long term data requires some refinements of the methods applied in the ET Design Report . Our suggestions has been published in Ref. and in the recent study we evaluated the data accordingly. Therefore we calculated and show also the median of the spectra and the integrated noise measures rms 2−10Hz and rms 1−10Hz from the median, here. The analysis of the data from the Guralp ET1H and GU02 stations is based on these characteristics and the evaluation of the data from the WARS station also considered the problems of the more traditional methods. We found, that the long term data does not show yearly changes in the median, but the intensity of the noisy periods reflects the changes in the activity inside the mine (see Fig. 10 and Tab. 7). The seasonal changes show minimal noise level in the late spring early summer period (see Fig. 11). The comparison of the noise level at the shallower and the deeper locations revealed that the two-weeks measurement is unexpectedly representative for the whole Run-0 and Run-1 period (see Fig. 13). We compared also working and night periods in MGGL in order to estimate an achievable minimum noise level (see Fig. 9). Considering our whole analysis the median spectrum of the night noise at -404m in the mine in Fig. 37 is convincingly representative in this respect. Here the percentiles are calculated from the spectra of the 50s long periods of data. One can see that in 90% of these periods the noise level is below the Black Forest line. In general the noise levels our long term measurements in the shallower MGGL and also the two-week data from the deeper location are remarkably similar to the noise levels of the previous study , the mode rms 2Hz are almost the same. As we have already mentioned, the median rms values are preferable, because they are more stable, less sensitive to the uncertainties is the data and in the evaluation. The ET1H and WARS median rms 2Hz are close to each other. However, from the point of view of operational requirements, a higher percentile limit is a better estimation of the sensitivity of the detector for continuous operation. Therefore, the 90 th percentile rms values, given in Tab. 13, estimate the noise level for a continuous operation of the Einstein Telescope. According to the recent study a reliable and comparable characterisation of long term seismic noise data requires the evaluation of various noise measures with a uniform methodology. The separation of reducible and irreducible cultural noise, the seasonal changes and the attenuation by depth are important aspects for site description. Also it is very important to publish the raw seismological data for open Table 13. The rms values of the 90 th percentile in the E direction with different frequency ranges for the MGGL seismometer ET1H and for the deeper location GU02, too. The first two columns are calculated from the 90 th percentile of the 300 s data of the two-week interval, the last one from the average of daily 90 th percentiles. evaluation and comparison. For the ET1H station of MGGL and for the GU02 station these are available at . Conclusions During the Run-0 and Run-1 periods of the MGGL in 2016-2018 we performed long-term monitoring the Mátra mountain range as a possible underground site of the planned Einstein Telescope. We used various standard methods in parallel to novel approaches of investigating the geophysical environment, electromagnetic attenuation, infrasound noise, cosmic muon tomography of the surrounding rock mass, and long term seismic noise. The collected data could enable us to cross check and and compare standard measurements and techniques applied in earlier investigations with the new ones. Alongside this, the geological and rheological properties of the base rock were summarized in this paper. In addition to the analysis of the noise background relevant for a next generation, underground-based gravitational wave detector, especially in the low frequency regime, at 1-10 Hz. We strongly believe that applying our results for the site selection will significantly improve the signal to nose ratio of the multi-messenger astrophysics era. Our conclusion was that for the background noise analysis, it is necessary to perform long term data taking and apply the state-of-the-art techniques presented here.
/// <reference path="../../typings/_custom.d.ts" /> import {bind, Inject, Injectable} from 'angular2/di'; import {GooglePlus} from './GooglePlusService'; interface IAccount { id:string; user:string; userName:string; profilePicture:string; } export class Account implements IAccount { id:string; user:string; userName:string; profilePicture:string; constructor(account:IAccount) { this.id = account.id; this.user = account.user; this.userName = account.userName; this.profilePicture = account.profilePicture; } } @Injectable() export class AccountService { private accounts:Array<Account>; constructor(private Plus:GooglePlus) { this.accounts = []; } get():Array<Account>; get(user?:string):Account; get(user?:string):any { return (user) ? this.accounts.find((account) => account.user === user) : this.accounts; } add() { return this.Plus.authenticate() .map((profile:IAccount) => { let account = new Account(profile); this.accounts.push(account); return account; }); } } export var accountInjectables: Array<any> = [ bind(AccountService).toClass(AccountService) ];
<filename>app/src/main/java/com/java/demo/widgets/BottomNavBar.java package com.java.demo.widgets; /* 自定义BottomNavigationBar */ import android.content.Context; import android.util.AttributeSet; import com.ashokvarma.bottomnavigation.BottomNavigationBar; import com.ashokvarma.bottomnavigation.BottomNavigationItem; import com.ashokvarma.bottomnavigation.ShapeBadgeItem; import com.ashokvarma.bottomnavigation.TextBadgeItem; import com.java.demo.R; public class BottomNavBar extends BottomNavigationBar { private TextBadgeItem mCartBadge; private ShapeBadgeItem mMsgBadge; private BottomNavigationItem homeItem; private BottomNavigationItem categoryItem; private BottomNavigationItem cartItem; private BottomNavigationItem msgItem; private BottomNavigationItem userItem; public BottomNavBar(Context context) { this(context, null); } public BottomNavBar(Context context, AttributeSet attrs) { super(context, attrs); init(); } private void init() { //首页 homeItem = new BottomNavigationItem(R.drawable.btn_nav_home_press, "Home") .setInactiveIconResource(R.drawable.btn_nav_home_normal) .setActiveColorResource(R.color.colorPrimary) .setInActiveColorResource(R.color.colorAccent); //分类 categoryItem = new BottomNavigationItem(R.drawable.btn_nav_category_press, "Category") .setInactiveIconResource(R.drawable.btn_nav_category_normal) .setActiveColorResource(R.color.colorPrimary) .setInActiveColorResource(R.color.colorAccent); //订单 cartItem = new BottomNavigationItem(R.drawable.btn_nav_cart_press, "Cart") .setInactiveIconResource(R.drawable.btn_nav_cart_normal) .setActiveColorResource(R.color.colorPrimary) .setInActiveColorResource(R.color.colorAccent); // mCartBadge = new TextBadgeItem(); // cartItem.setBadgeItem(mCartBadge); //消息 msgItem = new BottomNavigationItem(R.drawable.btn_nav_msg_press, "Msg") .setInactiveIconResource(R.drawable.btn_nav_msg_normal) .setActiveColorResource(R.color.colorPrimary) .setInActiveColorResource(R.color.colorAccent); // mMsgBadge = new ShapeBadgeItem(); // mMsgBadge.setShape(ShapeBadgeItem.SHAPE_OVAL); // msgItem.setBadgeItem(mMsgBadge); //用户 userItem = new BottomNavigationItem(R.drawable.btn_nav_user_press, "User") .setInactiveIconResource(R.drawable.btn_nav_user_normal) .setActiveColorResource(R.color.colorPrimary) .setInActiveColorResource(R.color.colorAccent); //设置模式 每个item对应名称,不选中也会显示 setMode(BottomNavigationBar.MODE_FIXED); setBackgroundStyle(BottomNavigationBar.BACKGROUND_STYLE_STATIC); setBarBackgroundColor("#ffffff"); addItem(homeItem) .addItem(categoryItem) .addItem(cartItem) .addItem(msgItem) .addItem(userItem) .setFirstSelectedPosition(0) .initialise(); } }
Finance Minister Mathias Cormann wants to help families get ahead, businesses to become more successful, while ensuring energy is reliable and as affordable as possible. But a ReachTEL poll reported in Fairfax Media of the electorates of Prime Minister Malcolm Turnbull and his predecessor Tony Abbott found many voters do not believe the government is going the right way about it with its planned business tax cuts, and want more money spent on renewable energy. . More than half of voters in Wentworth and Warringah think the present 30 per cent business tax rate is about right or too low, while six in 10 favour policies closer to Labor's 50 per cent renewable energy target. The government has indicated it will use the autumn session of parliament to push ahead with its 10-year business tax plan to cut the rate from 30 per cent to 25 per cent. The first leg would lower the rate to 27.5 per cent for businesses with a turnover of up to $10 million. "We are focused as a government on families getting ahead and businesses being the most successful they can be and hire more Australians," Senator Cormann told ABC radio on Tuesday. "We make no apologies for that." © AAP 2019
/** * Create a jid from pre-vetted and normalized parts. * * If length arguments are 0 and their associated strings are non-NULL, strings * are assumed to be null-terminated. */ static bool _create_jid_from_normalized_parts( jw_jid_ctx *ctx, const uint8_t *normalizedLocalpart, size_t normalizedLocalpartLen, const uint8_t *normalizedDomain, size_t normalizedDomainLen, const uint8_t *normalizedResource, size_t normalizedResourceLen, jw_jid **jid, jw_err *err) { JW_LOG_TRACE_FUNCTION_NO_ARGS; assert(NULL != ctx); assert(NULL != jid); assert(NULL != normalizedDomain); assert(0 < normalizedDomainLen); size_t jidstrLen = 0; jw_jid_part componentTypeMask = 0; if (NULL != normalizedLocalpart) { assert(0 < normalizedLocalpartLen); jidstrLen += normalizedLocalpartLen + 1; componentTypeMask |= JW_JID_LOCALPART; } jidstrLen += normalizedDomainLen; componentTypeMask |= JW_JID_DOMAIN; if (NULL != normalizedResource) { assert(0 < normalizedResourceLen); jidstrLen += normalizedResourceLen + 1; componentTypeMask |= JW_JID_RESOURCE; } jw_jid *allocatedJid = jw_data_malloc(sizeof(_jw_jid)+jidstrLen+1); if (NULL == allocatedJid) { JABBERWERX_ERROR(err, JW_ERR_NO_MEMORY); return false; } uint8_t * curPtr = allocatedJid->str; if (NULL != normalizedLocalpart) { memcpy(curPtr, normalizedLocalpart, normalizedLocalpartLen); curPtr += normalizedLocalpartLen; *curPtr = '@'; ++curPtr; } memcpy(curPtr, normalizedDomain, normalizedDomainLen); curPtr += normalizedDomainLen; if (NULL != normalizedResource) { *curPtr = '/'; ++curPtr; memcpy(curPtr, normalizedResource, normalizedResourceLen); curPtr += normalizedResourceLen; } *curPtr = '\0'; return _create_jid(ctx, allocatedJid, allocatedJid->str, jidstrLen, componentTypeMask, normalizedLocalpartLen, normalizedDomainLen, normalizedResourceLen, jid, err); }
package view; import android.content.Context; import android.content.res.TypedArray; import android.graphics.Canvas; import android.graphics.Paint; import android.util.AttributeSet; import android.util.TypedValue; import android.widget.ProgressBar; import com.hm.horizontalprogressbar.R; /** * Created by Administrator on 2017/2/28 0028. */ public class HorizonalProgessBar extends ProgressBar { /** * 设置默认的控件属性 */ /** * 文字默认的大小 */ private static final int DEFAULT_TEXT_SIZE = 10; /** * 文字的默认间距 */ private static final int DEFAULT_TEXT_OFFSET = 10; /** * 文字的默认颜色 */ private static final int DEFAULT_TEXT_COLOR = 0xFFD3D6DA; /** * unReach的默认颜色 */ private static final int DEFAULT_UNREACH_COLOR = 0xFF00FF00; /** * unReach的默认高度 */ private static final int DEFAULT_UNREACH_HEIGHT = 2; /** * Reach的默认颜色 */ private static final int DEFAULT_REACH_COLOR = 0xFF895412; /** * Reach的默认高度 */ private static final int DEFAULT_REACH_HEIGHT = 2; protected int mTextSize = sp2px(DEFAULT_TEXT_SIZE); protected int mTextColor = DEFAULT_TEXT_COLOR; protected int mTextOffSet = dp2px(DEFAULT_TEXT_OFFSET); protected int mReachColor = DEFAULT_REACH_COLOR; protected int mReachHeight = dp2px(DEFAULT_REACH_HEIGHT); protected int mUnReachColor = DEFAULT_UNREACH_COLOR; protected int mUnReachHeight = dp2px(DEFAULT_UNREACH_HEIGHT); protected int mRealWidth = 0; protected Paint mPaint = new Paint(); public HorizonalProgessBar(Context context) { this(context, null); } public HorizonalProgessBar(Context context, AttributeSet attrs) { this(context, attrs, 0); } public HorizonalProgessBar(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); initAttr(attrs); mPaint.setTextSize(mTextSize); } @Override protected synchronized void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { int widthSize = MeasureSpec.getSize(widthMeasureSpec); int heightSize = measureHeight(heightMeasureSpec); setMeasuredDimension(widthSize, heightSize); mRealWidth = getMeasuredWidth() - getPaddingRight() - getPaddingLeft(); } @Override protected synchronized void onDraw(Canvas canvas) { canvas.save(); canvas.translate(getPaddingLeft(), getHeight() / 2); float radio = getProgress() * 1.0f / getMax(); float progressX = radio * mRealWidth; boolean unNeedDrawUnReach = false; String textStr = getProgress() + "%"; int textWidth = (int) mPaint.measureText(textStr); if (progressX + textWidth > mRealWidth) { progressX = mRealWidth - textWidth; unNeedDrawUnReach = true; } float endX = progressX - mTextOffSet / 2; //画第一条线 if (endX > 0) { mPaint.setColor(mReachColor); mPaint.setStrokeWidth(mReachHeight); canvas.drawLine(0, 0, endX, 0, mPaint); } //画文字 mPaint.setColor(mTextColor); int textY = (int) (-(mPaint.ascent() + mPaint.descent()) / 2); canvas.drawText(textStr, progressX, textY, mPaint); //画第二条线 if (!unNeedDrawUnReach){ int startX = (int) (progressX + textWidth + mTextOffSet/2); mPaint.setColor(mUnReachColor); mPaint.setStrokeWidth(mUnReachHeight); canvas.drawLine(startX,0,mRealWidth,0,mPaint); } canvas.restore(); } private int measureHeight(int heightMeasureSpec) { int resultSize = 0; int heightMode = MeasureSpec.getMode(heightMeasureSpec); int heightSize = MeasureSpec.getSize(heightMeasureSpec); if (heightMode == MeasureSpec.EXACTLY) { resultSize = heightSize; } else { int textHeight = (int) (mPaint.ascent() - mPaint.descent()); resultSize = getPaddingBottom() + getPaddingTop() + Math.max(Math.max(mUnReachHeight, mReachHeight), Math.abs(textHeight)); if (heightMode == MeasureSpec.AT_MOST) { resultSize = Math.min(heightSize, resultSize); } } return resultSize; } protected int dp2px(int dpValue) { return (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, dpValue, getResources().getDisplayMetrics()); } protected int sp2px(int spValue) { return (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_SP, spValue, getResources().getDisplayMetrics()); } private void initAttr(AttributeSet attrs) { TypedArray ta = getContext().obtainStyledAttributes(attrs, R.styleable.HorizonalProgessBar); mTextSize = (int) ta.getDimension(R.styleable.HorizonalProgessBar_progressbar_text_size, mTextSize); mTextColor = ta.getColor(R.styleable.HorizonalProgessBar_progressbar_text_color, mTextColor); mTextOffSet = (int) ta.getDimension(R.styleable.HorizonalProgessBar_progressbar_text_offset, mTextOffSet); mReachColor = ta.getColor(R.styleable.HorizonalProgessBar_progressbar_reach_color, mReachColor); mReachHeight = (int) ta.getDimension(R.styleable.HorizonalProgessBar_progressbar_reach_height, mReachHeight); mUnReachColor = ta.getColor(R.styleable.HorizonalProgessBar_progressbar_unreach_color, mUnReachColor); mUnReachHeight = (int) ta.getDimension(R.styleable.HorizonalProgessBar_progressbar_unreach_height, mUnReachHeight); ta.recycle(); } }
/** * Call it to do the parse work. */ public void parse() { Debugger.logI("Parse begin..."); if (null == mCursor) { Debugger.logW("Curosr is null."); return; } onParseStart(); try { while (mCursor.moveToNext()) { ++mPosition; onNewRow(mCursor); if (isBlockReady()) { onBlockReady(); } } } catch (final IllegalStateException e) { Debugger.logE(new Object[] {}, ">>>>>>>>>>Catched IllegalStateException!"); onBlockReadyForEx(); } finally { onParseOver(); Debugger.logI("Parse finished."); } }
/* * Define an attribute, optionally with an interface (a locator list) * and a set of attribute-dependencies. * * Attribute dependencies MAY NOT be interface attributes. * * Since an empty locator list is logically different from "no interface", * all locator lists include a dummy head node, which we discard here. */ int defattr(const char *name, struct nvlist *locs, struct nvlist *deps, int devclass) { struct attr *a, *dep; struct nvlist *nv; int len; if (locs != NULL && devclass) panic("defattr(%s): locators and devclass", name); if (deps != NULL && devclass) panic("defattr(%s): dependencies and devclass", name); for (nv = deps; nv != NULL; nv = nv->nv_next) { dep = nv->nv_ptr; if (dep->a_iattr) { error("`%s' dependency `%s' is an interface attribute", name, dep->a_name); return (1); } } a = ecalloc(1, sizeof *a); if (ht_insert(attrtab, name, a)) { free(a); error("attribute `%s' already defined", name); nvfreel(locs); return (1); } a->a_name = name; if (locs != NULL) { a->a_iattr = 1; a->a_locs = locs->nv_next; nvfree(locs); } else { a->a_iattr = 0; a->a_locs = NULL; } if (devclass) { char *classenum = alloca(strlen(name) + 4), *cp; int errored = 0; strcpy(classenum, "DV_"); strcat(classenum, name); for (cp = classenum + 3; *cp; cp++) { if (!errored && (!isalnum(*cp) || (isalpha(*cp) && !islower(*cp)))) { error("device class names must be lower-case alphanumeric characters"); errored = 1; } *cp = toupper(*cp); } a->a_devclass = intern(classenum); } else a->a_devclass = NULL; len = 0; for (nv = a->a_locs; nv != NULL; nv = nv->nv_next) len++; a->a_loclen = len; a->a_devs = NULL; a->a_refs = NULL; a->a_deps = deps; a->a_expanding = 0; expandattr(a, NULL); return (0); }
<filename>img_demo.py import torch import cv2 from yolact import Yolact
# -*- coding:utf-8 -*- # filename: units/api.py # by スノル from locals import * from .has_unit import (BaseHasUnit, HasUnit, HasUnitComplex) from .bases import (Unit, genUnit) from .value_type import (ValueUnit, genValue) V = genValue
def send(self, address, amount, parent_window): dest_address = self.fetch_destination(address) if dest_address is None or not self.wallet.is_valid(dest_address): QMessageBox.warning(parent_window, _('Error'), _('Invalid Bitcoin Address') + ':\n' + address, _('OK')) return False convert_amount = lambda amount: \ int(D(unicode(amount)) * bitcoin(1)) amount = convert_amount(amount) if self.wallet.use_encryption: password_dialog = PasswordDialog(parent_window) password = password_dialog.run() if not password: return else: password = None fee = 0 if amount < bitcoin(1) / 10: fee = bitcoin(1) / 1000 try: tx = self.wallet.mktx([(dest_address, amount)], "", password, fee) except BaseException as error: QMessageBox.warning(parent_window, _('Error'), str(error), _('OK')) return False h = self.wallet.send_tx(tx) self.waiting_dialog(lambda: False if self.wallet.tx_event.isSet() else _("Sending transaction, please wait...")) status, message = self.wallet.receive_tx(h) if not status: import tempfile dumpf = tempfile.NamedTemporaryFile(delete=False) dumpf.write(tx) dumpf.close() print "Dumped error tx to", dumpf.name QMessageBox.warning(parent_window, _('Error'), message, _('OK')) return False QMessageBox.information(parent_window, '', _('Your transaction has been sent.') + '\n' + message, _('OK')) return True
/* $Id: mode.c,v 1.28 2008/11/06 20:45:12 pekberg Exp $ ****************************************************************************** LibGGI Mode management. Copyright (C) 1997 <NAME> [<EMAIL>] Copyright (C) 1998 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************** */ #include "config.h" #include <ggi/internal/gg_replace.h> #include <ggi/internal/ggi.h> #include <ggi/internal/ggi_debug.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> /* Static variables */ static ggi_mode _ggiDefaultMode = { GGI_AUTO, /* frames */ {GGI_AUTO,GGI_AUTO}, /* visible size */ {GGI_AUTO,GGI_AUTO}, /* virtual size */ {GGI_AUTO,GGI_AUTO}, /* size in mm (don't care) */ GT_AUTO, /* graphtype */ {GGI_AUTO,GGI_AUTO}, /* dots per pixel */ /* 0 */ }; void _ggiSetDefaultMode(const char *str) { ggiParseMode(str, &_ggiDefaultMode); } static void _ggiCheck4Defaults(ggi_mode *tm) { #define DOCHECK(what) \ if (tm->what == GGI_AUTO) tm->what=_ggiDefaultMode.what DOCHECK(frames); DOCHECK(visible.x); DOCHECK(visible.y); DOCHECK(virt.x); DOCHECK(virt.y); DOCHECK(dpp.x); DOCHECK(dpp.y); DOCHECK(graphtype); #undef DOCHECK } int ggiSetMode(ggi_visual_t v, ggi_mode *tm) { struct ggi_visual *vis = GGI_VISUAL(v); int retval; ggi_mode oldmode; APP_ASSERT(vis != NULL, "ggiSetMode: vis == NULL"); APP_ASSERT(tm != NULL, "ggiSetMode: tm == NULL"); #ifdef DEBUG if ((_ggiDebug & DEBUG_CORE) || (_ggiDebug & DEBUG_MODE)) { fprintf(stderr, "LibGGI: ggiSetMode(%p, ", (void *)vis); ggiFPrintMode(stderr, tm); fprintf(stderr, ") called\n"); } #endif ggLock(vis->mutex); DPRINT_MODE("ggiSetMode: trying (vis %dx%d virt %dx%d)\n", tm->visible.x,tm->visible.y,tm->virt.x,tm->virt.y); _ggiCheck4Defaults(tm); memcpy(&oldmode, tm, sizeof(ggi_mode)); DPRINT_MODE("ggiSetMode: trying2 (vis %dx%d virt %dx%d)\n", tm->visible.x,tm->visible.y,tm->virt.x,tm->virt.y); DPRINT_MODE("ggiSetMode: calling %p\n",vis->opdisplay->setmode); retval=vis->opdisplay->setmode(vis,tm); if (retval < 0) { fprintf(stderr, "LibGGI: Failed to set mode: "); ggiFPrintMode(stderr, &oldmode); fprintf(stderr, "\n"); } else { int i; ggi_color col; DPRINT_CORE("ggiSetMode: set to frame 0, origin = {0,0}\n"); ggiSetDisplayFrame(v, 0); ggiSetReadFrame(v, 0); ggiSetOrigin(v,0,0); DPRINT_CORE("ggiSetMode: set GC\n"); /* Set clipping rectangle to the full (virtual) screen */ ggiSetGCClipping(v,0,0,tm->virt.x,tm->virt.y); DPRINT_CORE("ggiSetMode: success (vis %dx%d virt %dx%d)\n", tm->visible.x,tm->visible.y,tm->virt.x,tm->virt.y); /* Set foreground and background to black */ col.r = 0; col.g = 0; col.b = 0; ggiSetGCForeground(v, ggiMapColor(v, &col)); ggiSetGCBackground(v, ggiMapColor(v, &col)); /* Clear frames to black */ for (i = 0; i < tm->frames; i++) { DPRINT_CORE("ggiSetMode: SetWriteFrame %d\n", i); ggiSetWriteFrame(v, i); #ifdef DEBUG if (vis->w_frame) { DPRINT_CORE("ggiSetMode: frame address: " "%p\n", vis->w_frame->write); } #endif DPRINT_CORE("ggiSetMode: FillScreen %d\n", i); ggiFillscreen(v); } ggiSetWriteFrame(v, 0); ggiFlush(v); } DPRINT_CORE("ggiSetMode: done!\n"); ggUnlock(vis->mutex); return retval; } int ggiCheckMode(ggi_visual_t v, ggi_mode *tm) { struct ggi_visual *vis = GGI_VISUAL(v); APP_ASSERT(vis != NULL, "ggiCheckMode: vis == NULL"); APP_ASSERT(tm != NULL, "ggiCheckMode: tm == NULL"); DPRINT_CORE("ggiCheckMode(%p, %p) called\n", vis, tm); _ggiCheck4Defaults(tm); return vis->opdisplay->checkmode(vis,tm); } int ggiGetMode(ggi_visual_t v,ggi_mode *tm) { struct ggi_visual *vis = GGI_VISUAL(v); APP_ASSERT(vis != NULL, "ggiGetMode: vis != NULL"); APP_ASSERT(tm != NULL, "ggiGetMode: tm != NULL"); DPRINT_CORE("ggiGetMode(%p, %p) called\n", vis, tm); return vis->opdisplay->getmode(vis,tm); } int ggiCheckTextMode(ggi_visual_t v, int cols,int rows, int vcols,int vrows, int fontsizex,int fontsizey, ggi_graphtype type, ggi_mode *md) { int rc; ggi_mode mode; DPRINT_CORE("ggiCheckTextMode(%p, %d, %d, %d, %d, %d, %d, 0x%x, %p) called\n", v, cols, rows, vcols, vrows, fontsizex, fontsizey, type, md); mode.frames = GGI_AUTO; mode.visible.x = cols; mode.visible.y = rows; mode.virt.x = vcols; mode.virt.y = vrows; mode.size.x = mode.size.y = GGI_AUTO; mode.graphtype = type; mode.dpp.x = fontsizex; mode.dpp.y = fontsizey; rc = ggiCheckMode(v,&mode); if (md) *md = mode; /* give back the mode if asked for. */ return rc; } int ggiCheckGraphMode(ggi_visual_t v,int xsize,int ysize, int xvirtual,int yvirtual,ggi_graphtype type, ggi_mode *md) { int rc; ggi_mode mode; DPRINT_CORE("ggiCheckGraphMode(%p, %d, %d, %d, %d, 0x%x, %p) called\n", v, xsize, ysize, xvirtual, yvirtual, type, md); mode.frames = GGI_AUTO; mode.visible.x = xsize; mode.visible.y = ysize; mode.virt.x = xvirtual; mode.virt.y = yvirtual; mode.size.x = mode.size.y = GGI_AUTO; mode.graphtype = type; mode.dpp.x = mode.dpp.y = GGI_AUTO; rc = ggiCheckMode(v,&mode); if (md) *md = mode; /* give back the mode if asked for. */ return rc; } int ggiCheckSimpleMode(ggi_visual_t visual, int xsize, int ysize, int frames, ggi_graphtype type, ggi_mode *md) { int rc; ggi_mode mode; DPRINT_CORE("ggiCheckSimpleMode(%p, %d, %d, %d, 0x%x, %p) called\n", visual, xsize, ysize, frames, type, md); mode.frames = frames; mode.visible.x = xsize; mode.visible.y = ysize; mode.virt.x = mode.virt.y = GGI_AUTO; mode.size.x = mode.size.y = GGI_AUTO; mode.graphtype = type; mode.dpp.x = mode.dpp.y = GGI_AUTO; rc = ggiCheckMode(visual, &mode); if (md) *md = mode; /* give back the mode if asked for. */ return rc; } int ggiSNPrintMode(char *s, size_t size, const ggi_mode *m) { int n, t; size_t l; #define SNPRINTF(a) \ do { \ n = snprintf a; \ if (n == -1) { \ s = NULL; \ l = 0; \ } else if ((size_t)n >= l) { \ s = NULL; \ l = 0; \ } else { \ s += n; \ l -= n; \ } \ t += n; \ } while(0) #define PUTC(c) \ do { \ t += 1; \ if (l <= 1) { \ s = NULL; \ l = 0; \ } else { \ *s++ = c; \ *s = 0; \ l -= 1; \ } \ } while(0) t = 0; l = (s == NULL) ? 0 : size; if (m->visible.x != GGI_AUTO || m->visible.y != GGI_AUTO) SNPRINTF((s, l, "%dx%d.", m->visible.x, m->visible.y)); if (m->virt.x != GGI_AUTO || m->virt.y != GGI_AUTO) SNPRINTF((s, l, "V%dx%d.", m->virt.x, m->virt.y)); if (m->frames != GGI_AUTO) SNPRINTF((s, l, "F%"PRId32".", m->frames)); if (m->dpp.x != GGI_AUTO || m->dpp.y != GGI_AUTO) SNPRINTF((s, l, "D%dx%d.", m->dpp.x, m->dpp.y)); PUTC('['); switch (GT_SCHEME(m->graphtype)) { case GT_AUTO: break; case GT_TEXT: PUTC('T'); break; case GT_TRUECOLOR: PUTC('C'); break; case GT_GREYSCALE: PUTC('K'); break; case GT_PALETTE: PUTC('P'); break; default: PUTC('?'); break; } if (GT_DEPTH(m->graphtype) != GT_AUTO) SNPRINTF((s, l, "%"PRIu32, GT_DEPTH(m->graphtype))); if (GT_SIZE(m->graphtype) != GT_AUTO) SNPRINTF((s, l, "/%"PRIu32, GT_SIZE(m->graphtype))); if (GT_SUBSCHEME(m->graphtype) & GT_SUB_REVERSE_ENDIAN) PUTC('R'); if (GT_SUBSCHEME(m->graphtype) & GT_SUB_HIGHBIT_RIGHT) PUTC('H'); if (GT_SUBSCHEME(m->graphtype) & GT_SUB_PACKED_GETPUT) PUTC('G'); PUTC(']'); #undef SNPRINTF #undef PUTC return t; } int ggiFPrintMode(FILE *s, const ggi_mode *m) { char buf[256]; ggiSNPrintMode(buf, sizeof(buf), m); return fprintf(s, buf); } /*******************/ /* parse mode */ /*******************/ /* * format = size virt dpp frames graphtype. (in any order) * * size = ['S'] X 'x' Y [ 'x' depth ] * virt = 'V' X 'x' Y * dpp = 'D' X 'x' Y * frames = 'F' frames * graphtype = '[' scheme [depth] ['/' size] [subscheme] ']' | scheme depth * scheme = 'C' | 'P' | 'K' | 'T' * subscheme = ['R'] ['H'] ['G'] (in any order) * * Anything and Everything (!) can be omitted, all ommitted values * default to GGI_AUTO (and GT_AUTO for the graphtype). * Whitespace and '.' symbols ignored. Case insensitive. * * Examples include: * 640x480 just a visible size * 640x480#640x960 same size, but double-height virtual screen * #1024x768 only virtual size defined * * 80x40[T] (default-bitsized) text mode with 80x40 characters * #x100[T] text mode with 100 virtual lines * 640x400[8] 640x400 at 8 bits per pixel * 640x400[GT_8BIT] same as above, but palettized * * 320x200x15 320x200 with 32768 colors * 320x200[C15] 320x200 with 32768 colors (hicolor) * 320x200[C/16] 320x200 with 16 bit pixels (also hicolor) * 320x200[C24/32] 320x200, 32 bit pixels, 16777216 colors (truecolor) * 320x200[GT_32BIT] same as above * 320x200[K2H] 320x200, 2 bit pixels, greyscale, high bit right * * The only way of specifying GGI_AUTO is omitting the parameter; * * Returncode: * 0 on success, i.e. the string was correct. * ignored characters, like GT_ and a position information * do not make it fail, and a missing ] from the bitdepth field * is ignored, no failure * -1 if there is text that can not be parsed. * This text is printed to stderr. * All parameters parsed so far are written into m! * * So m contains all parameters that have been successfully parsed. * For most applications there will be no need for testing parsemode * for failure. */ #define SKIPSPACE while ((*s) && (isspace((uint8_t)*s) || (*s == '.'))) s++ /* scan the integer from the string pointer s */ #define SCANINT(x,def) \ SKIPSPACE; x = def; \ if (isdigit((uint8_t) *s)) { \ x = *s++ - '0'; \ while (isdigit((uint8_t) *s)) { \ x = x*10 + (*s++ - '0'); \ } \ } \ SKIPSPACE #define CHECKGTMODE(str,len,val) \ if (strncasecmp(s, str, len) == 0) \ { m->graphtype = val; s += len; continue; } int ggiParseMode(const char * s, ggi_mode * m) { int depth; int subscheme; if (s == NULL) s = ""; DPRINT_CORE("ggiParseMode(\"%s\", %p) called\n", s, m); *m = _ggiDefaultMode; while (*s) { SKIPSPACE; /* visible */ if ((tolower((uint8_t)*s)=='s') || isdigit((uint8_t)*s)) { if (! isdigit((uint8_t)*s)) s++; SCANINT(m->visible.x, GGI_AUTO); if (tolower((uint8_t) *s) == 'x') { s++; SCANINT(m->visible.y, GGI_AUTO); } if (tolower((uint8_t) *s) == 'x') { s++; SCANINT(depth, GT_AUTO); GT_SETDEPTH(m->graphtype, depth); } continue; } /* virtual */ if ((*s=='#') || (tolower((uint8_t) *s)=='v')) { s++; SCANINT(m->virt.x, GGI_AUTO); if (tolower((uint8_t) *s) == 'x') { s++; SCANINT(m->virt.y, GGI_AUTO); } continue; } /* frames */ if (tolower((uint8_t) *s)=='f') { s++; SCANINT(m->frames, GGI_AUTO); continue; } if (tolower((uint8_t) *s)=='d') { /* dpp */ s++; SCANINT(m->dpp.x, GGI_AUTO); if (tolower((uint8_t) *s) == 'x') { s++; SCANINT(m->dpp.y, GGI_AUTO); } continue; } if (tolower((uint8_t) *s)=='p') { /* palette */ s++; SCANINT(depth, GT_AUTO); GT_SETSCHEME(m->graphtype, GT_PALETTE); GT_SETDEPTH(m->graphtype, depth); continue; } if (tolower((uint8_t) *s)=='c') { /* truecolor */ s++; SCANINT(depth, GT_AUTO); GT_SETSCHEME(m->graphtype, GT_TRUECOLOR); GT_SETDEPTH(m->graphtype, depth); continue; } if (tolower((uint8_t) *s)=='k') { /* greyscale */ s++; SCANINT(depth, GT_AUTO); GT_SETSCHEME(m->graphtype, GT_GREYSCALE); GT_SETDEPTH(m->graphtype, depth); continue; } if (tolower((uint8_t) *s)=='t') { /* text */ s++; SCANINT(depth, GT_AUTO); GT_SETSCHEME(m->graphtype, GT_TEXT); GT_SETDEPTH(m->graphtype, depth); continue; } if (*s != '[') { break; } s++; CHECKGTMODE("GT_1BIT]", 8, GT_1BIT); CHECKGTMODE("GT_2BIT]", 8, GT_2BIT); CHECKGTMODE("GT_4BIT]", 8, GT_4BIT); CHECKGTMODE("GT_8BIT]", 8, GT_8BIT); CHECKGTMODE("GT_15BIT]", 9, GT_15BIT); CHECKGTMODE("GT_16BIT]", 9, GT_16BIT); CHECKGTMODE("GT_24BIT]", 9, GT_24BIT); CHECKGTMODE("GT_32BIT]", 9, GT_32BIT); CHECKGTMODE("GT_TEXT16]", 10, GT_TEXT16); CHECKGTMODE("GT_TEXT32]", 10, GT_TEXT32); if (tolower((uint8_t) *s) == 't') { /* text */ GT_SETSCHEME(m->graphtype, GT_TEXT); s++; } else if (tolower((uint8_t) *s) == 'p') { /* palette */ GT_SETSCHEME(m->graphtype, GT_PALETTE); s++; } else if (tolower((uint8_t) *s) == 'c') { /* truecolor */ GT_SETSCHEME(m->graphtype, GT_TRUECOLOR); s++; } else if (tolower((uint8_t) *s) == 'k') { /* greyscale */ GT_SETSCHEME(m->graphtype, GT_GREYSCALE); s++; } SCANINT(depth, GT_AUTO); GT_SETDEPTH(m->graphtype, depth); if (*s == '/') { s++; SCANINT(depth, GT_AUTO); GT_SETSIZE(m->graphtype, depth); } subscheme = GT_AUTO; next_subscheme: if (tolower((uint8_t) *s) == 'r') { /* reverse endian */ s++; SKIPSPACE; subscheme |= GT_SUB_REVERSE_ENDIAN; goto next_subscheme; } if (tolower((uint8_t) *s) == 'h') { /* high bit right */ s++; SKIPSPACE; subscheme |= GT_SUB_HIGHBIT_RIGHT; goto next_subscheme; } if (tolower((uint8_t) *s) == 'g') { /* packed get/put */ s++; SKIPSPACE; subscheme |= GT_SUB_PACKED_GETPUT; goto next_subscheme; } GT_SETSUBSCHEME(m->graphtype, subscheme); if (*s == ']') { s++; } else { fprintf(stderr,"ggiParseMode: missing `]' " "or bad graphtype\n."); break; } } if (*s) { fprintf(stderr, "ggiParseMode: trailing text `%s' " "ignored. Parsed mode is ", s); ggiFPrintMode(stderr, m); fprintf(stderr, "\n"); return -1; } return 0; } #undef SKIPSPACE #undef SCANINT #undef CHECKGTMODE #if 0 /* the ORIGINAL version */ int ggiParseMode(const char * s, ggi_mode * m) { int bitdepth=0; int negative=0; /* negative flag for positions */ int xposition=0; int yposition=0; DPRINT_CORE("ggiParseMode(%p, %p) called\n", s, m); *m = _ggiDefaultMode; #define SKIPSPACE while ( (*s!='\000') && (isspace((uint8_t)*s)) ) s++; /* scan the integer from the string pointer s */ #define SCANINT(x) SKIPSPACE; \ if (isdigit((uint8_t)*s)){ \ x=*s-'0'; \ s++; \ while (isdigit((uint8_t)*s)){ \ x = x*10+ ((int)*s) -'0'; \ s++; \ } \ } \ SKIPSPACE /* first number is visible-x: */ SCANINT(m->visible.x); if (tolower((uint8_t)*s) == 'x') { /* now for the y */ s++; SCANINT(m->visible.y); } if (*s == '#'){ /* virtual starts here */ s++; SCANINT(m->virt.x); if (tolower((uint8_t)*s) == 'x') { /* now for the y */ s++; SCANINT(m->virt.y); } } if (tolower((uint8_t)*s) == 'd') { /* dpp starts here */ s++; SCANINT(m->dpp.x); if (tolower((uint8_t)*s) == 'x') { /* now for the y */ s++; SCANINT(m->dpp.y); } } if (tolower((uint8_t)*s) == 'f') { /* frames starts here */ s++; SCANINT(m->frames); } if (*s == '[') { /* graphtype starts here */ s++; #define CHECKGTMODE(str,len,val) \ if (strncasecmp(s,(str),(len)) == 0) \ { m->graphtype = (val); s += len; } CHECKGTMODE("GT_1BIT]", 8, GT_1BIT) else CHECKGTMODE("GT_2BIT]", 8, GT_2BIT) else CHECKGTMODE("GT_4BIT]", 8, GT_4BIT) else CHECKGTMODE("GT_8BIT]", 8, GT_8BIT) else CHECKGTMODE("GT_15BIT]", 9, GT_15BIT) else CHECKGTMODE("GT_16BIT]", 9, GT_16BIT) else CHECKGTMODE("GT_24BIT]", 9, GT_24BIT) else CHECKGTMODE("GT_32BIT]", 9, GT_32BIT) else CHECKGTMODE("GT_TEXT16]", 10, GT_TEXT16) else CHECKGTMODE("GT_TEXT32]", 10, GT_TEXT32) else { /* scheme */ if (tolower((uint8_t)*s) == 't') { /* text mode */ GT_SETSCHEME(m->graphtype, GT_TEXT); s++; } else if (tolower((uint8_t)*s) == 'p') { /* palette mode */ GT_SETSCHEME(m->graphtype, GT_PALETTE); s++; } else if (tolower((uint8_t)*s) == 'c') { /* truecolor mode */ GT_SETSCHEME(m->graphtype, GT_TRUECOLOR); s++; } else if (tolower((uint8_t)*s) == 'k') { /* greyscale mode */ GT_SETSCHEME(m->graphtype, GT_GREYSCALE); s++; } bitdepth = GT_AUTO; SCANINT(bitdepth); GT_SETDEPTH(m->graphtype, bitdepth); if (*s == '/') { s++; bitdepth = GT_AUTO; SCANINT(bitdepth); GT_SETSIZE(m->graphtype, bitdepth); } if (*s == ']') { s++; } else { fprintf(stderr,"ggiParseMode: warning: ] " "missing or bad graphtype\n."); } } #undef CHECKGTMODE } if ((*s=='-') || (*s=='+')){ /* x position starts */ if (*s=='-'){ negative=1; } s++; SCANINT(xposition); if (negative){ negative=0; xposition = - xposition; } fprintf(stderr,"X position %d ignored.\n",xposition); } if ((*s=='-') || (*s=='+')){ /* y position starts */ if (*s=='-'){ negative=1; } s++; SCANINT(yposition); if (negative){ negative=0; yposition = - yposition; } fprintf(stderr,"Y position %d ignored.\n",yposition); } if (*s !='\000'){ fprintf(stderr,"trailing text %s ignored.\n" "parsed mode is ",s); ggiFPrintMode(stderr,m); fprintf(stderr,"\n"); return -1; } #undef SCANINT return 0; } #endif /***************************************/ /* PHYsical SiZe (physz) handling */ /***************************************/ int _ggi_physz_parse_option(const char *optstr, int *physzflag, ggi_coord *physz) { /* This function parses a string gotten through the -physz= option, * contained in optstr, and fills out the values physzflag and physz * based on what is in that string. The latter two are stored in * the visual's target private area (not all targets use the -physz * option.) * * physz gets the integer values in the option string. physzflag can * contain two flags, one (GGI_PHYSZ_OVERRIDE) designating that the * values are not defaults, rather overrides for a misconfigured * display. The second, GGI_PHYSZ_DPI designates that the sizes * in the string are in dots-per-inch, otherwise the sizes are * assumed to be the full size of the display in millimeters. * (which may not be the same as the size of the visual, on targets * where the visual is a subregion of a display system such as X). */ char *endptr; const char *nptr = optstr; *physzflag = 0; physz->x =physz->y = GGI_AUTO; /* The 'N' is there by default, if the option was not filled in. */ if (*nptr == 'N' || *nptr == 'n') return GGI_OK; /* Check if we should *always* override the display system values */ if (*nptr == '=') { nptr++; *physzflag |= GGI_PHYSZ_OVERRIDE; } physz->x = strtoul(nptr, &endptr, 0); if (*nptr == '\0' || *endptr != ',') { *physzflag = 0; physz->x = physz->y = GGI_AUTO; return GGI_EARGINVAL; } nptr = endptr + 1; physz->y = strtoul(nptr, &endptr, 0); if (*nptr != '\0' && (*endptr == 'd' || *endptr == 'D') && (*(endptr + 1) == 'p' || *(endptr + 1) == 'P') && (*(endptr + 2) == 'i' || *(endptr + 2) == 'I')) { endptr += 3; *physzflag |= GGI_PHYSZ_DPI; } if (*nptr == '\0' || *endptr != '\0') { *physzflag = 0; physz->x =physz->y = GGI_AUTO; return GGI_EARGINVAL; } return GGI_OK; } int _ggi_physz_figure_visible(ggi_mode *mode, int def_x, int def_y, int physzflag, const ggi_coord *screen_size, const ggi_coord *screen_res) { /* This function validates/suggests values in mode->size to * designate the physical screen size in millimeters. * * mode->dpp is assumed to already contain valid values. * def_x and def_y are the default visible sizes from the target * private area * * screen_size is the screen size in dpi, if physzflag is * GGI_PHYSZ_DPI, otherwise screen_size is in mm. * * screen_res is the screen size in pixels or 0 on fullscreen * targets. */ ggi_coord size, res; ggi_mode tmp; DPRINT_MODE("_ggi_physz_figure_visible(%p) called\n", mode); LIB_ASSERT(mode != NULL, "Invalid mode"); LIB_ASSERT(screen_size != NULL, "Invalid screen size"); LIB_ASSERT(screen_res != NULL, "Invalid screen resolution"); memset(&tmp, GGI_AUTO, sizeof(tmp)); size = *screen_size; res = *screen_res; if ((mode->visible.x == GGI_AUTO) && (mode->virt.x == GGI_AUTO) && (mode->size.x == GGI_AUTO)) { tmp.visible.x = tmp.virt.x = def_x; } else if ((mode->visible.x == GGI_AUTO) && (mode->virt.x == GGI_AUTO)) { if (size.x == GGI_AUTO) size.x = mode->size.x; if (res.x == GGI_AUTO) res.x = def_x; if (physzflag & GGI_PHYSZ_DPI) { tmp.visible.x = (mode->size.x * 254 / 10) * size.x / mode->dpp.x; } else { tmp.visible.x = mode->size.x * res.x / size.x; } } else if (mode->visible.x == GGI_AUTO) { tmp.visible.x = mode->virt.x; } else if (mode->virt.x == GGI_AUTO) { tmp.virt.x = mode->visible.x; } if ((mode->visible.y == GGI_AUTO) && (mode->virt.y == GGI_AUTO) && (mode->size.y == GGI_AUTO)) { tmp.visible.y = tmp.virt.y = def_y; } else if ((mode->visible.y == GGI_AUTO) && (mode->virt.y == GGI_AUTO)) { if (size.y == GGI_AUTO) size.y = mode->size.y; if (res.y == GGI_AUTO) res.y = def_y; if (physzflag & GGI_PHYSZ_DPI) { tmp.visible.y = (mode->size.y * 254 / 10) * size.y / mode->dpp.y; } else { tmp.visible.y = mode->size.y * res.y / size.y; } } else if (mode->visible.y == GGI_AUTO) { tmp.visible.y = mode->virt.y; } else if (mode->virt.y == GGI_AUTO) { tmp.virt.y = mode->visible.y; } DPRINT_MODE("_ggi_physz_figure_visible: mode dpp (%i,%i), size (%i,%i)\n", mode->dpp.x, mode->dpp.y, mode->size.x, mode->size.y); DPRINT_MODE("_ggi_physz_figure_visible: visible (%i,%i), virt (%i,%i)\n", tmp.visible.x, tmp.visible.y, mode->virt.x, mode->virt.y); if ((mode->virt.x != GGI_AUTO) && (tmp.visible.x > mode->virt.x)) { tmp.visible.x = mode->virt.x; } if ((mode->virt.y != GGI_AUTO) && (tmp.visible.y > mode->virt.y)) { tmp.visible.y = mode->virt.y; } if (tmp.visible.x <= 0) tmp.visible.x = 0; if (tmp.visible.y <= 0) tmp.visible.y = 0; if ((mode->visible.x != GGI_AUTO && mode->visible.x != tmp.visible.x) || (mode->visible.y != GGI_AUTO && mode->visible.y != tmp.visible.y)) { DPRINT_MODE("_ggi_physz_figure_visible: " "physical size (%i,%i) doesn't match (%i,%i)\n", mode->size.x, mode->size.y, mode->visible.x, mode->visible.y); return GGI_ENOMATCH; } mode->visible = tmp.visible; mode->virt = tmp.virt; DPRINT_MODE("_ggi_physz_figure_visible: visible (%i,%i), virt (%i,%i)\n", mode->visible.x, mode->visible.y, mode->virt.x, mode->virt.y); DPRINT_MODE("_ggi_physz_figure_visible: leaving\n"); return GGI_OK; } int _ggi_physz_figure_size(ggi_mode *mode, int physzflag, const ggi_coord *op_sz, int dpix, int dpiy, int dsx, int dsy) { /* This function validates/suggests values in mode->size to * designate the physical screen size in millimeters. * * mode->visible and mode->dpp are assumed to already contain * valid values. * * The physzflag and op_sz parameters are from the visual's * target private area, as set by the above _ggi_physz_parse_option * function. * * The dpix, dpiy parameters contain the dpi of the display. * * The dsx, dsy parameters contain the size in pixels of the * entire display, which on visuals using a subregion of * a display system, such as X, is the size of the entire screen. */ long xsize, ysize; int err = GGI_OK; xsize = ysize = 0; if (physzflag & GGI_PHYSZ_DPI) { xsize = (physzflag & GGI_PHYSZ_OVERRIDE) ? op_sz->x : dpix; ysize = (physzflag & GGI_PHYSZ_OVERRIDE) ? op_sz->y : dpiy; if (xsize <= 0 || ysize <= 0) { xsize = op_sz->x; ysize = op_sz->y; } if (xsize <= 0 || ysize <= 0) goto nosize; /* find absolute size in mm */ xsize = mode->visible.x * mode->dpp.x * 254 / xsize / 10; ysize = mode->visible.y * mode->dpp.y * 254 / ysize / 10; } else if (physzflag & GGI_PHYSZ_MM) { xsize = (physzflag & GGI_PHYSZ_OVERRIDE) ? op_sz->x : dpix; ysize = (physzflag & GGI_PHYSZ_OVERRIDE) ? op_sz->y : dpiy; if (xsize <= 0 || ysize <= 0) { xsize = op_sz->x; ysize = op_sz->y; } if (xsize <= 0 || ysize <= 0) goto nosize; /* Now xsize and ysize are in mm, but scale them * to mode->visible */ xsize = xsize * mode->visible.x / dsx; ysize = ysize * mode->visible.y / dsy; } else { if (physzflag & GGI_PHYSZ_OVERRIDE) { xsize = op_sz->x; ysize = op_sz->y; } else if (dpix > 0 && dpiy > 0) { xsize = (dsx * mode->dpp.x * 254 / dpix / 10); ysize = (dsy * mode->dpp.y * 254 / dpiy / 10); } if (xsize <= 0 || ysize <= 0) { xsize = op_sz->x; ysize = op_sz->y; } if (xsize <= 0 || ysize <= 0) goto nosize; if (dsx <= 0 || dsy <= 0) goto nosize; xsize = xsize * mode->visible.x / dsx; ysize = ysize * mode->visible.y / dsy; } if ((mode->size.x != xsize && mode->size.x != GGI_AUTO) || (mode->size.y != ysize && mode->size.y != GGI_AUTO)) { DPRINT_MODE("_ggi_physz_figure_size: " "physical size (%i,%i) doesn't match (%i,%i)\n", xsize, ysize, mode->size.x, mode->size.y); err = GGI_ENOMATCH; } mode->size.x = (int)xsize; mode->size.y = (int)ysize; return err; nosize: if ((mode->size.x != GGI_AUTO) || (mode->size.y != GGI_AUTO)) err = GGI_ENOMATCH; return err; }
/** * This controller contains an action to handle HTTP requests to the * application's home page. */ public class HomeController extends Controller { FormFactory formFactory; MessagesApi messages; @Inject public HomeController(FormFactory formFactory, MessagesApi messages) { super(); this.formFactory = formFactory; this.messages = messages; } /** * An action that renders an HTML page with a welcome message. The * configuration in the <code>routes</code> file means that this method will * be called when the application receives a <code>GET</code> request with a * path of <code>/</code>. */ public Result index() { return ok(views.html.index.render("This is HOMEPAGE!")); } public Result openAttributeToolForm(Http.Request request) { Form<AttributeTool> attrToolForm = formFactory.form(AttributeTool.class); List<String> options = new ArrayList<String>(); options.add("Excel to XML"); options.add("XML to Excel"); return ok(views.html.attributeTool.render(attrToolForm, options, request, messages.preferred(request))); } public Result attributeTool(Http.Request request) { DynamicForm form = formFactory.form().bindFromRequest(request); String errors = FileValidationUtil.validateFormForValidFiles(form); if (!errors.isEmpty()) { return redirect(routes.HomeController.openAttributeToolForm()).flashing("info", errors); } String inputFilePath = form.get("inputFilePath"); String outputFilePath = form.get("outputFilePath"); String fileName = form.get("fileName"); String configFilePath = form.get("configFilePath"); String delimeter = form.get("delimeter"); if (FileValidationUtil.isNullOrBlank(outputFilePath)) { outputFilePath = FileValidationUtil.getDefaultOutputDirectoryFromInput(inputFilePath); } String selectedOpt = formFactory.form().bindFromRequest(request).get("types"); FileConversionHandler handler; if (selectedOpt.equals("Excel to XML")) { if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultXMLFilenameFromInput(inputFilePath); } handler = new AttributeExcelFileHandler(); } else { if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultExcelFilenameFromInput(inputFilePath); } handler = new AttributeXMLFileHandler(); } AttributeTool attrTool = new AttributeTool(); String response = attrTool.convertFile(inputFilePath, outputFilePath, fileName, configFilePath, delimeter, handler); return redirect(routes.HomeController.openAttributeToolForm()).flashing("info", response); } public Result openAttributeLinksToolForm(Http.Request request) { Form<AttributeLinksTool> attrLinksToolForm = formFactory.form(AttributeLinksTool.class); List<String> options = new ArrayList<String>(); options.add("Excel to XML"); options.add("XML to Excel"); return ok( views.html.attributeLinksTool.render(attrLinksToolForm, options, request, messages.preferred(request))); } public Result attributeLinksTool(Http.Request request) { DynamicForm form = formFactory.form().bindFromRequest(request); String errors = FileValidationUtil.validateFormForValidFiles(form); if (!errors.isEmpty()) { return redirect(routes.HomeController.openAttributeToolForm()).flashing("info", errors); } String inputFilePath = form.get("inputFilePath"); String outputFilePath = form.get("outputFilePath"); String fileName = form.get("fileName"); String configFilePath = form.get("configFilePath"); String delimeter = form.get("delimeter"); if (FileValidationUtil.isNullOrBlank(outputFilePath)) { outputFilePath = FileValidationUtil.getDefaultOutputDirectoryFromInput(inputFilePath); } String selectedOpt = formFactory.form().bindFromRequest(request).get("types"); FileConversionHandler handler; if (selectedOpt.equals("Excel to XML")) { if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultXMLFilenameFromInput(inputFilePath); } handler = new AttributeLinkExcelFileHandler(); } else { if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultExcelFilenameFromInput(inputFilePath); } handler = new AttributeLinkXMLFileHandler(); } AttributeLinksTool attrLinksTool = new AttributeLinksTool(); String response = attrLinksTool.convertFile(inputFilePath, outputFilePath, fileName, configFilePath, delimeter, handler); return redirect(routes.HomeController.openAttributeLinksToolForm()).flashing("info", response); } public Result openLOVSchemaForm(Http.Request request) { Form<LOVSchema> lovSchema = formFactory.form(LOVSchema.class); List<String> options = new ArrayList<String>(); options.add("Excel to XML"); options.add("XML to Excel"); return ok(views.html.lovSchema.render(lovSchema, options, request, messages.preferred(request))); } public Result lovSchema(Http.Request request) { DynamicForm form = formFactory.form().bindFromRequest(request); String errors = FileValidationUtil.validateFormForValidFiles(form); if (!errors.isEmpty()) { return redirect(routes.HomeController.openAttributeToolForm()).flashing("info", errors); } String inputFilePath = form.get("inputFilePath"); String outputFilePath = form.get("outputFilePath"); String fileName = form.get("fileName"); String configFilePath = form.get("configFilePath"); String delimeter = form.get("delimeter"); if (FileValidationUtil.isNullOrBlank(outputFilePath)) { outputFilePath = FileValidationUtil.getDefaultOutputDirectoryFromInput(inputFilePath); } String selectedOpt = formFactory.form().bindFromRequest(request).get("types"); FileConversionHandler handler; if (selectedOpt.equals("Excel to XML")) { if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultXMLFilenameFromInput(inputFilePath); } handler = new LovExcelFileHandler(); } else { if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultExcelFilenameFromInput(inputFilePath); } handler = new LovXMLFileHandler(); } LOVSchema lovSchema = new LOVSchema(); String response = lovSchema.convertFile(inputFilePath, outputFilePath, fileName, configFilePath, delimeter, handler); return redirect(routes.HomeController.openLOVSchemaForm()).flashing("info", response); } public Result openBGPReportForm(Http.Request request) { Form<BGPReport> bgpReport = formFactory.form(BGPReport.class); return ok(views.html.bgpreport.render(bgpReport, request, messages.preferred(request))); } public Result bgpReport(Http.Request request) { DynamicForm form = formFactory.form().bindFromRequest(request); String errors = FileValidationUtil.validateFormForValidFiles(form); if (!errors.isEmpty()) { return redirect(routes.HomeController.openBGPReportForm()).flashing("info", errors); } BGPReport bgpReport = new BGPReport(); String response = bgpReport.generateReport(form.get("inputServerPath"), form.get("username"), form.get("password"), form.get("contextID"), form.get("inputFilePath")); return redirect(routes.HomeController.openBGPReportForm()).flashing("info", response); } public Result openMILConversionForm(Http.Request request) { Form<MILConversion> milConversionForm = formFactory.form(MILConversion.class); List<String> options = new ArrayList<String>(); options.add("TypeA"); options.add("TypeC"); return ok(views.html.milConversion.render(milConversionForm, options, request, messages.preferred(request))); } public Result milConversion(Http.Request request) { DynamicForm form = formFactory.form().bindFromRequest(request); String errors = FileValidationUtil.validateFormForValidFiles(form); if (!errors.isEmpty()) { return redirect(routes.HomeController.openMILConversionForm()).flashing("info", errors); } String inputFilePath = form.get("inputFilePath"); String outputFilePath = form.get("outputFilePath"); String fileName = form.get("fileName"); String configFilePath = form.get("configFilePath"); if (FileValidationUtil.isNullOrBlank(outputFilePath)) { outputFilePath = FileValidationUtil.getDefaultOutputDirectoryFromInput(inputFilePath); } if (FileValidationUtil.isNullOrBlank(fileName)) { fileName = FileValidationUtil.setDefaultXMLFilenameFromInput(inputFilePath); } String selectedOpt = formFactory.form().bindFromRequest(request).get("types"); boolean isSkeleton = true; if (selectedOpt.equals("TypeA")) { isSkeleton = true; } else if (selectedOpt.equals("TypeC")) { isSkeleton = false; } MILConversion milConversion = new MILConversion(); String response = milConversion.convertFile(inputFilePath, outputFilePath, fileName, configFilePath, isSkeleton); return redirect(routes.HomeController.openMILConversionForm()).flashing("info", response); } public Result openXMLExtractorForm(Http.Request request) { Form<XMLExtractor> extractorForm = formFactory.form(XMLExtractor.class); List<String> nodeTypes = new ArrayList<>(); Arrays.asList(XMLExtractorInputUtil.class.getFields()).forEach(field -> { if (Modifier.isPublic(field.getModifiers())) { nodeTypes.add(field.getName()); } }); return ok(views.html.xmlExtractor.render(extractorForm, nodeTypes, request, messages.preferred(request))); } public Result XMLExtractor(Http.Request request) { DynamicForm form = formFactory.form().bindFromRequest(request); String errors = FileValidationUtil.validateFormForValidFiles(form); if (!errors.isEmpty()) { return redirect(routes.HomeController.openXMLExtractorForm()).flashing("info", errors); } XMLExtractorInputUtil xmlExtractorInputUtil = new XMLExtractorInputUtil(); try { int i = 1; while (form.get("stepNodeIDs_" + i) != null) { String nodeType = form.get("stepNodeType_" + i); String nodeIDs = form.get("stepNodeIDs_" + i); if (!nodeIDs.isEmpty()) { // set the value Method method = xmlExtractorInputUtil.getClass().getMethod("set" + nodeType, new Class[] { nodeIDs.getClass() }); method.invoke(xmlExtractorInputUtil, nodeIDs); } i++; } } catch (NoSuchMethodException | IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } String inputFilePath = form.get("inputFilePath"); xmlExtractorInputUtil.setInputFilePath(inputFilePath); XMLExtractor extractor = new XMLExtractor(); String response = extractor.extractFile(xmlExtractorInputUtil); return redirect(routes.HomeController.openXMLExtractorForm()).flashing("info", response); } }
/** * mtsdram - write controller register data * @dcr_host: A pointer to the DCR mapping. * @idcr_n: The indirect DCR register to write. * @value: The data to write. * * This routine writes the provided data to the controller's specified * indirect DCR register. */ static inline void mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value) { return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET, dcr_host->base + SDRAM_DCR_DATA_OFFSET, idcr_n, value); }
/** * The handler used in health_check_type table. Performs health checks via URL's */ public class GlobalAWSHandler extends SurveillerHandler { private static final Logger logger = Logger.getLogger(GlobalAWSHandler.class); private static final String PARAM_KEY_URL = "URL"; /** * Get health check status via URL * @see com.tmobile.kardio.surveiller.handler.SurveillerHandler#getSurveillerStatus() **/ @Override public StatusVO getSurveillerStatus() { XmlReader reader = null; try { if(paramDetails.get(PARAM_KEY_URL)==null ){ logger.error("Configuration Error : The RSS URL is null in DB"); throw new IllegalArgumentException("Configuration Error : The RSS URL is null in DB"); } URL url = new URL(paramDetails.get(PARAM_KEY_URL)); reader = new XmlReader(url); SyndFeed feed = new SyndFeedInput().build(reader); if(feed.getEntries().size() == 0){ return new StatusVO(Status.UP); } else{ SyndEntry entry = (SyndEntry) feed.getEntries().get(0); if(entry.getTitle().contains("operating normally") || (entry.getDescription().getValue().contains("operating normally"))){ return new StatusVO(Status.UP); } else{ logger.debug(feed.getTitle() + " - " + entry.getTitle() + "for" + paramDetails.get(PARAM_KEY_URL)); return new StatusVO(Status.DOWN,feed.getTitle() + " - " + entry.getTitle() + ". Date -" + entry.getPublishedDate()); } } } catch (Exception e) { logger.error("Got Exception while connecting", e); return new StatusVO(Status.DOWN,e.getMessage() + ". URL=" + paramDetails.get(PARAM_KEY_URL)); } finally{ try { if (reader != null) { reader.close(); } } catch (Exception e) { logger.error("Got Exception while closing reader", e); } } } }
import { Table, Card } from 'antd'; import React, { ReactElement } from 'react'; const TableAmortizacion = ({ style = {} }): ReactElement => { const data = [ { cuota: '1', fecha: '5 de Febrero, 2020', capital: 32.15, interes: 0.15, balance: 5800.0 }, { cuota: '2', fecha: '5 de Diciembre, 2024', capital: 42.15, interes: 0.18, balance: 1358.1 } ]; const columns = [ { title: 'Cuota', dataIndex: 'cuota', key: 'cuota' }, { title: 'Fecha', dataIndex: 'fecha', key: 'fecha' }, { title: 'Capital', dataIndex: 'capital', key: 'capital' }, { title: 'Ínteres', dataIndex: 'interes', key: 'interes' }, { title: 'Balance', dataIndex: 'balance', key: 'balance' } ]; return ( <Card title="Amortización"> <Table columns={columns} dataSource={data} style={style} /> </Card> ); }; export default TableAmortizacion;
#ifndef UNTITLED_PLAYER_H #define UNTITLED_PLAYER_H #include "../mesh/imageloader.h" #include "../mesh/objloader.h" #include <cmath> #include <iostream> #include <GL/glut.h> using namespace std; class Player { private: double radius, defaultRadius, halfRadius; double Bcolor; double x,y,z; bool underWater; double thetaHelice, thetaLeme, thetaPlayer, vX, vY, thetaX, thetaY; int slice , stack; double cam2X, cam2Y, cam2Z, cameraDistance; vector<mesh*> submarine; mesh cannon; int frame, velHelice; public: Player(double radius, double x, double y, vector<mesh*> submarine, mesh cannon){ this->radius = radius; this->defaultRadius = radius; this->halfRadius=radius/2.0; this->x = x; this->y = y; this->z=0; this->thetaHelice = 0; this->thetaLeme = 0; this->velHelice = 0; this->thetaPlayer = this->thetaX = 0; this->thetaY = 30; this->vX = 0; this->vY = 1; this->Bcolor = 0; this->slice = 16; this->stack = 16; this->cameraDistance = 70; this->cam2X = x; this->cam2Y=y+this->cameraDistance; this->cam2Z = - 30; this->submarine = submarine; this->cannon = cannon; this->frame = 0; } double getHalfRadius(){return this->halfRadius;} double getRadius(){return this->radius;} double getXCod(){return this->x;} double getYCod(){return this->y;} double getZCod(){return this->z;} double addZCod(double z){if(this->z+z>=0 && this->z+z<=7*this->radius)this->z+=z;} double getXCAM2Cod(){return this->cam2X;} double getYCAM2Cod(){return this->cam2Y;} double getZCAM2Cod(){return this->cam2Z;} double getThetaPlayer(){return this->thetaPlayer;} double getThetaXYTorpedo(){return this->thetaX;} double getThetaYZTorpedo(){return this->thetaY;} double getThetaLeme(){return this->thetaLeme;} double getVX(){return this->vX;} double getVY(){return this->vY;} void drawPlayer(GLuint textura); void desenhaCilindro(double radius); void addLeme(double theta){ if( (this->thetaLeme + theta <= 45) && (this->thetaLeme + theta >= -45) ) this->thetaLeme += theta;} double getBetaAngle(){ return this->thetaHelice/360 + this->thetaPlayer;}; void movePlayer(double x, double y){ this->x+=x; this->y+=y;} void addRadius(double radius){ this->radius+= radius;} bool deepWater(){ return this->radius<=this->halfRadius;} bool upWater(){return this->radius>=2*this->halfRadius;} bool addBlue(double blue){this->Bcolor+=blue;} void setVelHelice( int vel ){ this->velHelice = vel;} double getTorpedoXPosition(); double getTorpedoYPosition(); double getDefaultRadius(){ return this->defaultRadius; } double getMisselXPosition(); double getMisselYPosition(); double getMisselZPosition(); void updateDirectionStats(double theta, double vX, double vY){ this->thetaPlayer += theta; this->vX = vX; this->vY = vY; if(this->thetaPlayer>360) this->thetaPlayer-=360; if(this->thetaPlayer<-360) this->thetaPlayer+=360; } void cam3rdPerson( double camXYAngle, double camXZAngle ) { double thetaXCam = -(atan2(vY,vX) - 3.14/2) + ( camXYAngle * 3.14 ) / 180; double thetaYCam = ( camXZAngle * 3.14 ) / 180; double lookX = 0; double lookY = - this->cameraDistance; double lookZ = 0; double rotX = lookX; double rotY = lookY * cos( thetaYCam ) + lookZ * sin( thetaYCam ); double rotZ = -lookY * sin( thetaYCam ) + lookZ * cos( thetaYCam ); lookX = rotX; lookY = rotY; lookZ = rotZ; rotX = lookX * cos( thetaXCam ) + lookY * sin( thetaXCam ); rotY = -lookX * sin( thetaXCam ) + lookY * cos( thetaXCam ); rotZ = lookZ; double ix = this->x - rotX; double iy = this->y - rotY; double iz = this->z - rotZ; gluLookAt( ix, iy, iz -30, x, y, z, 0, 0, -1); } void camCannon( double camXYAngle, double camXZAngle ) { double thetaXCam = -( atan2(vY,vX) - 3.14/2 ) + ( -thetaX / 180 ); double thetaYCam = ( thetaY ) / 180; double xc = x - 0.65*vX*radius/* * cos(thetaXCam) */; double yc = y - 0.65*vY*radius/* * sin(thetaXCam) *//* * cos(thetaYCam) */; double zc = z - 0.5*radius/* * sin(thetaYCam) */; double xl = x; double yl = y; double zl = z; double lookX = 0/* xc - xl */; double lookY = 1 * radius/* yc - yl */; double lookZ = 0.4 * radius/* zc - zl */; double rotX = lookX; double rotY = lookY * cos( thetaYCam ) + lookZ * sin( thetaYCam ); double rotZ = lookY * sin( thetaYCam ) + lookZ * cos( thetaYCam ); lookX = rotX; lookY = rotY; lookZ = rotZ; rotX = lookX * cos( thetaXCam ) + lookY * sin( thetaXCam ); rotY = -lookX * sin( thetaXCam ) + lookY * cos( thetaXCam ) ; rotZ = lookZ; double ix = xl - rotX; double iy = yl - rotY; double iz = zl - rotZ; //printf("%lf %lf %lf\n", rotX, rotY, rotZ); gluLookAt( xc, yc, zc, x - rotX, y - rotY, z - rotZ, 0, 0, -1); } void updateThetaTorpedo( double dx, double dy){ if(((this->thetaX + dx)<=30) && ((this->thetaX + dx)>=-30)) this->thetaX += dx; if(((this->thetaY + dy)<=60) && ((this->thetaY + dy)>=0)) this->thetaY += dy; /* this->thetaX = dx; this->thetaY = dy; */ } double getTorpedoZPosition(){ return this->z - 0.5*radius; } }; #endif
n,x=map(int,input().split()) a,p=[1],[1] for i in range(n): a+=[a[i]*2+3]; p+=[p[i]*2+1] def f(n,x): return int(x>0) if n<1 else p[n-1]+1+f(n-1,x-2-a[n-1]) if x>a[n]//2 else f(n-1,x-1) print(f(n,x))
/** * If the file is determined to be already minified (by checking the file basename for a ".min" or "-min" suffix), * then it will copy the file to the destination. * * @param sourceFile Source file * @param destDir The output directory where the minified code should end up * @return true if and only if the file name ends with .min.js or -min.js * @throws IOException If an error is encountered reading or writing the source or destination file. */ private boolean maybeCopyPreminifiedFileToDest(final File sourceFile, final File destDir) throws IOException { final Log log = minifierParameters.getLog(); final String path = sourceFile.getName(); final String pathNoExt = removeExtension(path); for (String s : MINIFIED_FILENAME_SUFFIXES) { if (pathNoExt.endsWith(s)) { String pathNoSuffix = pathNoExt.substring(0, pathNoExt.length() - s.length()) + "." + getExtension(path); File destFile = new File(destDir, getMinifiedFilepath(pathNoSuffix)); log.debug(String.format("Copying pre-minified file '%s' to destination '%s' file ends in '%s'", path, destFile.getName(), s)); copyFile(sourceFile, destFile); return true; } } return false; }
<gh_stars>0 #pragma once //STD //LIBS //SELF #include "Window.hpp" #include "Renderer.hpp" namespace paperbag { class GUI { public: GUI(Window* window, Renderer* renderer); ~GUI(); void input(SDL_Event& event); void update(); void render(); private: Window* window; Renderer* renderer; }; }
// Acknowledge acknowledges a global alert. // // See more: https://docs.opsmanager.mongodb.com/current/reference/api/global-alerts/ func (s *GlobalAlertsServiceOp) Acknowledge(ctx context.Context, alertID string, body *atlas.AcknowledgeRequest) (*GlobalAlert, *Response, error) { if alertID == "" { return nil, nil, atlas.NewArgError("alertID", "must be set") } path := fmt.Sprintf("%s/%s", globalAlertsBasePath, alertID) req, err := s.Client.NewRequest(ctx, http.MethodPatch, path, body) if err != nil { return nil, nil, err } root := new(GlobalAlert) resp, err := s.Client.Do(ctx, req, root) return root, resp, err }
import { KeyValue } from './key-value.interface'; export interface Template { path: string; var: KeyValue[]; }
<reponame>matthieu-m/stysh //! Syntactic pass, aka parsing. //! //! Expression parser. use std::{cell, fmt}; use crate::basic::mem; use crate::basic::com::{Range, Span, Store, MultiStore}; use crate::model::tt; use crate::model::ast::*; use super::{gen, typ}; use super::com::RawParser; pub fn parse_expression<'a, 'tree>(raw: &mut RawParser<'a, 'tree>) -> ExpressionId { let mut parser = ExprParser::new(*raw); let expr = parser.parse(); *raw = parser.into_raw(); expr } pub fn parse_pattern<'a, 'tree>(raw: &mut RawParser<'a, 'tree>) -> PatternId { let mut parser = PatternParser::new(*raw); let stmt = parser.parse(); *raw = parser.into_raw(); stmt } pub fn parse_statement<'a, 'tree>(raw: &mut RawParser<'a, 'tree>) -> StatementId { let mut parser = StmtParser::new(*raw); let stmt = parser.parse(); *raw = parser.into_raw(); stmt } // // Implementation Details (Expression) // struct ExprParser<'a, 'tree> { raw: RawParser<'a, 'tree>, } impl<'a, 'tree> ExprParser<'a, 'tree> { fn new(raw: RawParser<'a, 'tree>) -> ExprParser<'a, 'tree> { ExprParser { raw: raw } } fn into_raw(self) -> RawParser<'a, 'tree> { self.raw } fn parse(&mut self) -> ExpressionId { use crate::model::tt::Kind as K; fn binop(kind: tt::Kind) -> Option<(Operator, Precedence)> { use crate::model::ast::BinaryOperator as B; // Ordered from higher precedence to lower; higher binding tigther. match kind { K::SignDoubleSlash => Some((B::FloorBy, 7)), K::SignStar => Some((B::Times, 7)), K::SignDash => Some((B::Minus, 6)), K::SignPlus => Some((B::Plus, 6)), K::SignLeft => Some((B::LessThan, 5)), K::SignLeftEqual => Some((B::LessThanOrEqual, 5)), K::SignRight => Some((B::GreaterThan, 5)), K::SignRightEqual => Some((B::GreaterThanOrEqual, 5)), K::SignBangEqual => Some((B::Different, 4)), K::SignDoubleEqual => Some((B::Equal, 4)), K::KeywordAnd => Some((B::And, 3)), K::KeywordXor => Some((B::Xor, 2)), K::KeywordOr => Some((B::Or, 1)), _ => None }.map(|(b, p)| (Operator::Bin(b), Precedence(p))) } // An expression is an expression, optionally followed by a binary // operator and another expression. // Note: an expression immediatelly followed by a tuple expression is // a constructor or function call expression. // Use the Shunting Yard algorithm to parse this "expr [op expr]" into // an expression tree. let mut yard = ShuntingYard::new(self.raw.tree()); while let Some(node) = self.raw.peek() { // An expression. let (expr, range) = match node { tt::Node::Run(_) => { let path = self.raw.parse_path(self.raw.tree()); match self.parse_tokens(&mut yard, path) { LoopResult::Done(result) => result, LoopResult::Continue => continue, LoopResult::Break => break, } }, tt::Node::Braced(..) => { let mut raw = &mut self.raw; let tree = raw.tree(); if let Some(generics) = gen::try_parse_generic_variables(&mut raw, tree) { yard.push_generics(generics); continue; } self.parse_braced(node) }, tt::Node::Bytes(..) => self.parse_bytes(node), tt::Node::String(..) => self.parse_string(node), tt::Node::UnexpectedBrace(..) => unimplemented!(), }; yard.push_expression(expr, range); // Optionally followed by a binary operator and another expression. if let Some(tt::Node::Run(tokens)) = self.raw.peek() { if let Some((op, prec)) = binop(tokens[0].kind()) { self.raw.pop_tokens(1); yard.push_operator(op, tokens[0].offset() as u32, prec); continue; // go get right hand side! } } match self.raw.peek_kind() { // It might be a field access Some(K::NameField) | // It might be a constructor or function call. Some(K::ParenthesisOpen) | // It is a pack of generic arguments. Some(K::BracketOpen) => continue, _ => break, }; } let (expr, generics, range) = yard.pop_expression(); assert!(generics.is_none()); self.raw.tree().borrow_mut().push_expression(expr, range) } fn parse_tokens( &mut self, yard: &mut ShuntingYard, path: Path ) -> LoopResult<(Expression, Range)> { use crate::model::tt::Kind as K; use self::LoopResult::*; let token = self.raw.peek_token().expect("Token"); let kind = token.kind(); match kind { K::KeywordIf => Done(self.parse_if_else()), K::KeywordLoop => Done(self.parse_loop()), K::KeywordNot => { self.raw.pop_tokens(1); yard.push_operator( Operator::Pre(PrefixOperator::Not), token.offset() as u32, Precedence(8) ); Continue }, K::LitBoolFalse | K::LitBoolTrue => Done(self.parse_bool(kind)), K::LitIntegral => Done(self.parse_integral()), K::NameField => { let field = self.parse_field_identifier(); yard.push_field(field); Continue }, K::NameValue => { self.raw.pop_tokens(1); let variable = self.raw.resolve_variable(token); let range = if let Some(range) = path.range(&*self.raw.tree().borrow()) { range.extend(token.span()) } else { token.span() }; Done((Expression::Var(variable, path), range)) }, K::SignBind => Break, _ => { let raw = &mut self.raw; let ty = typ::try_parse_type(raw, raw.tree(), path); if let Some(ty) = ty { Done(self.parse_constructor(ty)) } else { unimplemented!("Expected type, got {:?}\n{:?}", token, self.raw); } }, } } fn parse_block(&mut self) -> Block { if let Some(tt::Node::Braced(o, n, c)) = self.raw.peek() { self.raw.pop_node(); return self.parse_braces(n, o, c); } unimplemented!("Expected block, got {:?}", self.raw.peek()); } fn parse_bool(&mut self, kind: tt::Kind) -> (Expression, Range) { use self::tt::Kind::*; let value = match kind { LitBoolFalse => false, LitBoolTrue => true, _ => panic!("Unexpected kind {:?}", kind), }; let token = self.raw.pop_kind(kind).expect("true/false"); (Expression::Lit(Literal::Bool(value)), token.span()) } fn parse_braced(&mut self, node: tt::Node<'a>) -> (Expression, Range) { use crate::model::tt::Kind as K; if let tt::Node::Braced(o, n, c) = node { self.raw.pop_node(); match o.kind() { K::BraceOpen => { let block = self.parse_braces(n, o, c); (Expression::Block(block), block.span()) }, K::ParenthesisOpen => self.parse_parens(n, o, c), _ => unimplemented!(), } } else { unreachable!("Not a Braced node: {:?}", node); } } fn parse_braces(&self, ns: &'a [tt::Node], o: tt::Token, c: tt::Token) -> Block { let mut raw = self.raw.spawn(ns); let statements = parse_statements_impl(&mut raw); let expression = if let Some(_) = raw.peek_kind() { Some(ExprParser::new(raw).parse()) } else { None }; Block { statements, expression, open: o.offset() as u32, close: c.offset() as u32, } } fn parse_bytes(&mut self, node: tt::Node) -> (Expression, Range) { if let tt::Node::Bytes(_, f, _) = node { self.raw.pop_node(); let (fragments, result) = self.parse_string_impl(f); (Expression::Lit(Literal::Bytes(fragments, result)), node.span()) } else { unreachable!("Not a Bytes node: {:?}", node); } } fn parse_constructor(&mut self, ty: TypeId) -> (Expression, Range) { let sep = tt::Kind::SignBind; let (c, range) = parse_constructor_impl(&mut self.raw, parse_expression, sep, ty); (Expression::Constructor(c), range) } fn parse_field_identifier(&mut self) -> FieldIdentifier { let token = self.raw.pop_kind(tt::Kind::NameField).expect("Token"); let source = self.raw.source(token); debug_assert!(!source.is_empty()); if source[0] < b'0' || source[0] > b'9' { let id = self.raw.resolve_identifier(token); return FieldIdentifier::Name(id); } if let Some(i) = parse_integral_impl(source, false) { assert!(i >= 0); FieldIdentifier::Index(i as _, token.span()) } else { unimplemented!("Cannot parse {:?} from {:?}", source, token) } } fn parse_if_else(&mut self) -> (Expression, Range) { let if_ = self.raw.pop_kind(tt::Kind::KeywordIf).expect(":if"); let condition = self.parse(); // FIXME(matthieum): possibly missing. let true_expr = self.parse_block(); if let Some(else_) = self.raw.pop_kind(tt::Kind::KeywordElse) { // FIXME(matthieum): only ":if" and "{ ... }" are legal. let false_expr = self.parse_block(); return ( Expression::If(IfElse { condition, true_expr: self.insert_block(true_expr), false_expr: self.insert_block(false_expr), if_: if_.offset() as u32, else_: else_.offset() as u32, }), if_.span().extend(false_expr.span()), ); } // FIXME(matthieum): ";" is legal. unimplemented!() } fn parse_integral(&mut self) -> (Expression, Range) { let token = self.raw.pop_kind(tt::Kind::LitIntegral).expect("Token"); let source = self.raw.source(token); if let Some(i) = parse_integral_impl(source, true) { (Expression::Lit(Literal::Integral(i)), token.span()) } else { unimplemented!("Cannot parse {:?} from {:?}", source, token) } } fn parse_loop(&mut self) -> (Expression, Range) { let loop_ = self.raw.pop_kind(tt::Kind::KeywordLoop).expect(":loop"); let loop_ = loop_.offset() as u32; if let Some(tt::Node::Braced(o, ns, c)) = self.raw.peek() { self.raw.pop_node(); let mut raw = self.raw.spawn(ns); let statements = parse_statements_impl(&mut raw); assert!(raw.peek().is_none()); let open = o.offset() as u32; let close = c.offset() as u32; let loop_ = Loop { statements, loop_, open, close }; return (Expression::Loop(loop_), loop_.span()); } unimplemented!("Expected braces after :loop"); } fn parse_parens( &mut self, ns: &'a [tt::Node<'a>], o: tt::Token, c: tt::Token ) -> (Expression, Range) { (Expression::Tuple(self.parse_tuple(ns, o, c)), o.span().extend(c.span())) } fn parse_string(&mut self, node: tt::Node) -> (Expression, Range) { if let tt::Node::String(_, f, _) = node { self.raw.pop_node(); let (fragments, result) = self.parse_string_impl(f); (Expression::Lit(Literal::String(fragments, result)), node.span()) } else { unreachable!("Not a String node: {:?}", node); } } fn parse_tuple( &mut self, ns: &'a [tt::Node<'a>], o: tt::Token, c: tt::Token ) -> Tuple<Expression> { let sep = tt::Kind::SignBind; let tree = self.raw.tree(); self.raw.parse_tuple(tree, parse_expression, sep, ns, o, c) } fn parse_string_impl(&self, f: &[StringFragment]) -> (Id<[StringFragment]>, mem::InternId) { use self::StringFragment::*; let mut buffer = vec!(); for &fragment in f { match fragment { Text(tok) => buffer.extend(self.raw.source(tok)), SpecialCharacter(tok) => match self.raw.source(tok) { b"N" => buffer.push(b'\n'), _ => unimplemented!(), }, _ => unimplemented!(), } } ( self.raw.tree().borrow_mut().push_string_fragments(f), self.raw.intern_bytes(&buffer), ) } fn insert(&self, expr: Expression, range: Range) -> ExpressionId { self.raw.tree().borrow_mut().push_expression(expr, range) } fn insert_block(&self, block: Block) -> ExpressionId { self.insert(Expression::Block(block), block.span()) } } #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)] enum Operator { Bin(BinaryOperator), Pre(PrefixOperator), } #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)] struct Precedence(u8); struct ShuntingYard<'a> { tree: &'a cell::RefCell<Tree>, op_stack: Vec<(Operator, u32, Precedence)>, expr_stack: Vec<(Expression, Option<Id<GenericVariablePack>>, Range)>, } impl<'a> ShuntingYard<'a> { fn new(tree: &'a cell::RefCell<Tree>) -> ShuntingYard<'a> { ShuntingYard { tree, op_stack: vec!(), expr_stack: vec!(), } } fn push_expression(&mut self, expr: Expression, range: Range) { // Function calls are distinguished from regular expression by having // a normal expression immediately followed by a tuple expression // without an intervening operator. if let Expression::Tuple(arguments) = expr { if let Some((callee, generics, range)) = self.pop_trailing_expression() { // Method calls are distinguished from Function calls by // having a tuple expression following a field access. let expr = if let Expression::FieldAccess(fa) = callee { MethodCall { receiver: fa.accessed, method: fa.field, generics, arguments, }.into() } else { let function = self.insert(callee, range); FunctionCall { function, generics, arguments, }.into() }; let range = range.extend(arguments.span()); self.expr_stack.push((expr, None, range)); return; } } self.expr_stack.push((expr, None, range)); } fn push_generics(&mut self, generics: Id<GenericVariablePack>) { if let Some((_, g, _)) = self.expr_stack.last_mut() { assert!(g.is_none()); *g = Some(generics); } else { unreachable!("Cannot push generics without an expression!"); } } fn push_field(&mut self, field: FieldIdentifier) { if let Some((accessed, generics, range)) = self.pop_trailing_expression() { assert!(generics.is_none()); let accessed = self.insert(accessed, range); self.expr_stack.push(( FieldAccess { accessed, field, }.into(), None, range.extend(field.span()), )); return; } unimplemented!("{:?} -> {:?}", field, self); } fn push_operator( &mut self, op: Operator, pos: u32, prec: Precedence) { self.pop_operators(prec); self.op_stack.push((op, pos, prec)); } fn pop_expression(&mut self) -> (Expression, Option<Id<GenericVariablePack>>, Range) { self.pop_operators(Precedence(0)); if let Some(r) = self.expr_stack.pop() { r } else { unreachable!("Could not pop expression - {:?}", self); } } fn pop_operators(&mut self, threshold: Precedence) { while let Some((op, pos)) = self.pop_operator_impl(threshold) { match op { Operator::Bin(op) => { let (right_hand, right_generics, right_range) = self.expr_stack.pop().expect("Right"); let (left_hand, left_generics, left_range) = self.expr_stack.pop().expect("Left"); assert!(right_generics.is_none() && left_generics.is_none()); let left = self.insert(left_hand, left_range); let right = self.insert(right_hand, right_range); self.expr_stack.push(( Expression::BinOp(op, pos, left, right), None, left_range.extend(right_range), )); }, Operator::Pre(op) => { let (expr, generics, range) = self.expr_stack.pop().expect("Expression"); assert!(generics.is_none()); let expr = self.insert(expr, range); self.expr_stack.push(( Expression::PreOp(op, pos, expr), None, Range::new(pos as usize, 1).extend(range), )); }, }; } } /// Pops the last expression if there was no operator afterwards fn pop_trailing_expression(&mut self) -> Option<(Expression, Option<Id<GenericVariablePack>>, Range)> { let last_op = self.op_stack.last().map(|&(_, pos, _)| pos).unwrap_or(0); if let Some((_, _, range)) = self.expr_stack.last().cloned() { if last_op as usize <= range.offset() { return self.expr_stack.pop(); } } None } fn pop_operator_impl(&mut self, threshold: Precedence) -> Option<(Operator, u32)> { if let Some((op, pos, prec)) = self.op_stack.last().cloned() { if threshold <= prec { self.op_stack.pop(); return Some((op, pos)); } } None } fn insert(&self, expr: Expression, range: Range) -> ExpressionId { self.tree.borrow_mut().push_expression(expr, range) } } impl<'a> fmt::Debug for ShuntingYard<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!( f, "expr_stack: {:?}, op_stack: {:?}", self.expr_stack, self.op_stack ) } } fn parse_integral_impl(raw: &[u8], allow_underscores: bool) -> Option<i64> { let mut value = 0; for byte in raw { match *byte { b'0'..=b'9' => { value *= 10; value += (byte - b'0') as i64; }, b'_' if allow_underscores => (), _ => return None, } } Some(value) } // // Implementation Details (Pattern) // #[derive(Debug)] struct PatternParser<'a, 'tree> { raw: RawParser<'a, 'tree>, } impl<'a, 'tree> PatternParser<'a, 'tree> { fn new(raw: RawParser<'a, 'tree>) -> Self { PatternParser { raw: raw } } fn into_raw(self) -> RawParser<'a, 'tree> { self.raw } fn parse(&mut self) -> PatternId { use crate::model::tt::Kind as K; match self.raw.peek_kind() { Some(K::NameType) => self.parse_type_name(), Some(K::NameValue) => self.parse_value_name(), Some(K::ParenthesisOpen) => self.parse_parens(), Some(K::SignUnderscore) => self.parse_underscore(), Some(k) => unimplemented!("Expected identifier or tuple, got {:?}", k), None => unimplemented!("Expected identifier or tuple, got nothing"), } } fn parse_parens(&mut self) -> PatternId { match self.raw.peek() { Some(tt::Node::Braced(o, n, c)) => { self.raw.pop_node(); self.parse_tuple(n, o, c) }, n => unimplemented!("Expected tuple, got {:?}", n), } } fn parse_type_name(&mut self) -> PatternId { let raw = &mut self.raw; if let Some(ty) = typ::try_parse_type(raw, raw.tree(), Path::empty()) { let sep = tt::Kind::SignColon; let (c, range) = parse_constructor_impl(raw, parse_pattern, sep, ty); self.insert(Pattern::Constructor(c), range) } else { unimplemented!("parse_type_name - {:?}", self) } } fn parse_underscore(&mut self) -> PatternId { let u = self.raw.pop_kind(tt::Kind::SignUnderscore).expect("underscore"); self.insert(Pattern::Ignored(u.span()), u.span()) } fn parse_value_name(&mut self) -> PatternId { let name = self.raw.pop_kind(tt::Kind::NameValue).expect("name"); self.insert(Pattern::Var(self.raw.resolve_variable(name)), name.span()) } fn parse_tuple( &mut self, ns: &'a [tt::Node<'a>], o: tt::Token, c: tt::Token ) -> PatternId { let sep = tt::Kind::SignColon; let tree = self.raw.tree(); let tup = self.raw.parse_tuple(tree, parse_pattern, sep, ns, o, c); self.insert(Pattern::Tuple(tup), tup.span()) } fn insert(&self, pat: Pattern, range: Range) -> PatternId { self.raw.tree().borrow_mut().push_pattern(pat, range) } } // // Implementation Details (Statement) // struct StmtParser<'a, 'tree> { raw: RawParser<'a, 'tree>, } impl<'a, 'tree> StmtParser<'a, 'tree> { fn new(raw: RawParser<'a, 'tree>) -> Self { StmtParser { raw: raw } } fn into_raw(self) -> RawParser<'a, 'tree> { self.raw } fn parse(&mut self) -> StatementId { use crate::model::tt::Kind as K; match self.raw.peek_kind() { Some(K::KeywordReturn) => self.parse_return(), Some(K::KeywordSet) => self.parse_set(), Some(K::KeywordVar) => self.parse_var(), Some(k) => unimplemented!( "Expected :return, :set or :var, got {:?}", k ), None => unimplemented!( "Expected :return, :set or :var, got nothing" ), } } fn parse_return(&mut self) -> StatementId { use crate::model::tt::Kind as K; let ret = self.pop(K::KeywordReturn).expect(":return"); let expr = if self.raw.peek_kind() != Some(K::SignSemiColon) { Some(parse_expression(&mut self.raw)) } else { None }; let expr_range = expr.map(|e| self.raw.tree().borrow().get_expression_range(e)); let semi = self.pop(K::SignSemiColon).unwrap_or( expr_range.map(|e| e.end_offset() as u32 - 1).unwrap_or(ret + 6) ); self.insert(Statement::Return(Return { expr, ret, semi })) } fn parse_set(&mut self) -> StatementId { let set = self.pop(tt::Kind::KeywordSet).expect(":set"); let left = parse_expression(&mut self.raw); let (expr, bind, semi) = self.parse_bind(); self.insert(Statement::Set(VariableReBinding { left, expr, set, bind, semi })) } fn parse_var(&mut self) -> StatementId { let var = self.pop(tt::Kind::KeywordVar).expect(":var"); let pattern = parse_pattern(&mut self.raw); // TODO(matthieum): parse type. let (expr, bind, semi) = self.parse_bind(); self.insert(Statement::Var(VariableBinding { pattern, type_: None, expr, var, colon: 0, bind, semi, })) } fn parse_bind(&mut self) -> (ExpressionId, u32, u32) { let bind = self.pop(tt::Kind::SignBind).unwrap_or(0); let expr = parse_expression(&mut self.raw); let expr_range = self.raw.tree().borrow().get_expression_range(expr); let semi = self.pop(tt::Kind::SignSemiColon) .unwrap_or(expr_range.end_offset() as u32 - 1); (expr, bind, semi) } fn pop(&mut self, kind: tt::Kind) -> Option<u32> { self.raw .pop_kind(kind) .map(|t| t.offset() as u32) } fn insert(&self, stmt: Statement) -> StatementId { self.raw.tree().borrow_mut().push_statement(stmt) } } // // Implementation Details (Tuple) // enum LoopResult<T> { Done(T), Continue, Break, } fn parse_constructor_impl<'a, 'tree, T: Copy>( raw: &mut RawParser<'a, 'tree>, inner_parser: fn(&mut RawParser<'a, 'tree>) -> Id<T>, separator: tt::Kind, type_: TypeId, ) -> (Constructor<T>, Range) where Tree: Store<T> + MultiStore<Id<T>> { let range = raw.tree().borrow().get_type_range(type_); let (arguments, range) = if let Some(tt::Node::Braced(o, ns, c)) = raw.peek() { assert_eq!(o.kind(), tt::Kind::ParenthesisOpen); raw.pop_node(); let tree = raw.tree(); let arguments = raw.parse_tuple(tree, inner_parser, separator, ns, o, c); (arguments, range.extend(arguments.span())) } else { (Default::default(), range) }; (Constructor { type_, arguments }, range) } fn parse_statements_impl<'a, 'tree>(raw: &mut RawParser<'a, 'tree>) -> Id<[StatementId]> { let mut stmts = vec!(); while let Some(tok) = raw.peek().map(|n| n.front()) { if tok.kind() != tt::Kind::KeywordReturn && tok.kind() != tt::Kind::KeywordSet && tok.kind() != tt::Kind::KeywordVar { break; } stmts.push(parse_statement(raw)); } raw.tree().borrow_mut().push_statement_ids(&stmts) } // // Tests // #[cfg(test)] mod tests { use std::ops; use crate::model::ast::*; use super::super::com::tests::Env; #[test] fn basic_add() { let env = LocalEnv::new(b"1 + 2"); let e = env.factory().expr(); e.bin_op(e.int(1, 0), e.int(2, 4)).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_var() { let env = LocalEnv::new(b" :var fool := 1234;"); let (e, _, _, p, s, _, _) = env.factories(); s.var(p.var(6, 4), e.int(1234, 14)).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn basic_var_automatic_insertion() { let env = LocalEnv::new(b" :var fool 1234"); let (e, _, _, p, s, _, _) = env.factories(); s.var(p.var(6, 4), e.int(1234, 11)) .bind(0) .semi_colon(14) .build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn basic_bytes() { let env = LocalEnv::new(b"b'1 + 2'"); let e = env.factory().expr(); e.literal(0, 8).push_text(2, 5).bytes().build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_constructor() { let env = LocalEnv::new(b"True"); let (e, _, _, _, _, _, t) = env.factories(); e.constructor(t.simple(0, 4)).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_constructor_arguments() { let env = LocalEnv::new(b"Some(1)"); let (e, _, _, _, _, _, t) = env.factories(); e.constructor(t.simple(0, 4)).push(e.int(1, 5)).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_nested_constructor() { let env = LocalEnv::new(b"Bool::True"); let (e, _, _, _, _, _, t) = env.factories(); e.constructor(t.nested(6, 4).push(0, 4).build()).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_function_call() { let env = LocalEnv::new(b"basic(1, 2)"); let (e, _, _, _, _, _, _) = env.factories(); let (one, two) = (e.int(1, 6), e.int(2, 9)); e.function_call(e.var(0, 5), 5, 10).push(one).push(two).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_nested_function_call() { let env = LocalEnv::new(b"Nested::basic(1, 2)"); let (e, _, _, _, _, _, _) = env.factories(); let (one, two) = (e.int(1, 14), e.int(2, 17)); e.function_call( e.nested(8, 5).push(0, 6).build(), 13, 18 ) .push(one) .push(two) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_generic_function_call() { let env = LocalEnv::new(b"basic[Int](1, 2)"); let (e, g, _, _, _, _, t) = env.factories(); let (one, two) = (e.int(1, 11), e.int(2, 14)); let generics = g.variables_tree().push(g.variable_type(t.simple(6, 3))).build(); e.function_call(e.var(0, 5), 10, 15).generics(generics).push(one).push(two).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_function_call_statement() { let env = LocalEnv::new(b":var a := basic(1, 2);"); let (e, _, _, p, s, _, _) = env.factories(); let (one, two) = (e.int(1, 16), e.int(2, 19)); let fun = e.function_call(e.var(10, 5), 15, 20) .push(one) .push(two) .build(); s.var(p.var(5, 1), fun).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn basic_if_else() { let env = LocalEnv::new(b":if true { 1 } :else { 0 }"); let e = env.factory().expr(); let (cond, one, two) = (e.bool_(true, 4), e.int(1, 11), e.int(0, 23)); e.if_else(cond, e.block(one).build(), e.block(two).build()).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn basic_string() { let env = LocalEnv::new(b"'1 + 2'"); let e = env.factory().expr(); e.literal(0, 7).push_text(1, 5).string().build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn block_basic() { let env = LocalEnv::new(b"{\n :var fool := 1234;\n fool\n}"); let (e, _, _, p, s, _, _) = env.factories(); let int = e.int(1234, 19); e.block(e.var(29, 4)) .range(0, 35) .push_stmt(s.var(p.var(11, 4), int).build()) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn block_return() { let env = LocalEnv::new(b"{ :return 1; }"); let (e, _, _, _, s, _, _) = env.factories(); e.block_expression_less() .push_stmt(s.ret().expr(e.int(1, 10)).build()) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn boolean_basic() { let env = LocalEnv::new(b":var x := true;"); let (e, _, _, p, s, _, _) = env.factories(); s.var(p.var(5, 1), e.bool_(true, 10)).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn constructor_keyed() { let env = LocalEnv::new(b"Person(.name := jack_jack, .age := 1)"); let (e, _, _, _, _, _, t) = env.factories(); e.constructor(t.simple(0, 6)) .name(7, 5).separator(13).push(e.var(16, 9)) .name(27, 4).separator(32).push(e.int(1, 35)) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn field_access_basic() { let env = LocalEnv::new(b"tup.42"); let e = env.factory().expr(); e.field_access(e.var(0, 3)).index(42).range(3, 3).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn field_access_keyed() { let env = LocalEnv::new(b"tup.x"); let e = env.factory().expr(); e.field_access(e.var(0, 3)).name(3, 2).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn field_access_recursive() { let env = LocalEnv::new(b"tup.42.53"); let e = env.factory().expr(); let field = e.field_access(e.var(0, 3)).index(42).range(3, 3).build(); e.field_access(field).index(53).range(6, 3).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn loop_empty() { let env = LocalEnv::new(b":loop { }"); let e = env.factory().expr(); e.loop_(0).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn method_call_basic() { let env = LocalEnv::new(b"foo.42()"); let e = env.factory().expr(); e.method_call(e.var(0, 3), 6, 7).index(42).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn method_call_keyed() { let env = LocalEnv::new(b"foo.bar()"); let e = env.factory().expr(); e.method_call(e.var(0, 3), 7, 8).name(3, 4).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn method_call_recursive() { let env = LocalEnv::new(b"foo.42.53()"); let e = env.factory().expr(); let field = e.field_access(e.var(0, 3)).index(42).range(3, 3).build(); e.method_call(field, 9, 10).index(53).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn method_call_single_argument() { let env = LocalEnv::new(b"foo.bar(42)"); let e = env.factory().expr(); e.method_call(e.var(0, 3), 7, 10) .name(3, 4) .push(e.int(42, 8)) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn method_call_generic() { let env = LocalEnv::new(b"foo.bar[3]()"); let (e, g, _, _, _, _, _) = env.factories(); let generics = g.variables_tree().push(g.variable_literal(Literal::Integral(3), 8, 1)).build(); e.method_call(e.var(0, 3), 10, 11).generics(generics).name(3, 4).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn set_basic() { let env = LocalEnv::new(b" :set fool := 1234;"); let (e, _, _, _, s, _, _) = env.factories(); s.set(e.var(6, 4), e.int(1234, 14)).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn set_field() { let env = LocalEnv::new(b" :set foo.0 := 1234;"); let (e, _, _, _, s, _, _) = env.factories(); s.set( e.field_access(e.var(6, 3)).index(0).build(), e.int(1234, 15) ).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn var_constructor() { let env = LocalEnv::new(b":var Some(x) := Some(1);"); let (e, _, _, p, s, _, t) = env.factories(); s.var( p.constructor(t.simple(5, 4)).push(p.var(10, 1)).build(), e.constructor(t.simple(16, 4)).push(e.int(1, 21)).build(), ).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn var_constructor_keyed() { let env = LocalEnv::new(b":var Person(.name: n, .age: a) := p;"); let (e, _, _, p, s, _, t) = env.factories(); let pat = p.constructor(t.simple(5, 6)) .name(12, 5).push(p.var(19, 1)) .name(22, 4).push(p.var(28, 1)) .build(); s.var(pat, e.var(34, 1)).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn var_ignored() { let env = LocalEnv::new(b":var _ := 1;"); let (e, _, _, p, s, _, _) = env.factories(); s.var(p.ignored(5), e.int(1, 10)).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn var_ignored_nested() { let env = LocalEnv::new(b":var (_, b) := (1, 2);"); let (e, _, _, p, s, _, _) = env.factories(); s.var( p.tuple().push(p.ignored(6)).push(p.var(9, 1)).build(), e.tuple().push(e.int(1, 16)).push(e.int(2, 19)).build(), ).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn var_tuple() { let env = LocalEnv::new(b":var (a, b) := (1, 2);"); let (e, _, _, p, s, _, _) = env.factories(); s.var( p.tuple().push(p.var(6, 1)).push(p.var(9, 1)).build(), e.tuple().push(e.int(1, 16)).push(e.int(2, 19)).build(), ).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn var_tuple_keyed() { let env = LocalEnv::new(b":var (.x: a, .y: b) := foo();"); let (e, _, _, p, s, _, _) = env.factories(); s.var( p.tuple() .name(6, 2).push(p.var(10, 1)) .name(13, 2).push(p.var(17, 1)) .build(), e.function_call(e.var(23, 3), 26, 27).build(), ).build(); assert_eq!(env.actual_statement(), env.expected_tree()); } #[test] fn shunting_yard_prefix() { let env = LocalEnv::new(b":not a :or b :and c"); let e = env.factory().expr(); let (a, b, c) = (e.var(5, 1), e.var(11, 1), e.var(18, 1)); e.bin_op( e.pre_op(a).build(), e.bin_op(b, c).and().build() ).or().build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn shunting_yard_simple() { let env = LocalEnv::new(b"1 + 2 * 3 < 4 // 5"); let e = env.factory().expr(); let (two, three, one) = (e.int(2, 4), e.int(3, 8), e.int(1, 0)); let mult = e.bin_op(two, three).times().build(); let (four, five) = (e.int(4, 12), e.int(5, 17)); let left = e.bin_op(one, mult).build(); let right = e.bin_op(four, five).floor_by().build(); e.bin_op(left, right).less_than().build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn tuple_basic() { let env = LocalEnv::new(b"(1)"); let e = env.factory().expr(); e.tuple().push(e.int(1, 1)).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn tuple_keyed() { let env = LocalEnv::new(b"(.x := 1, .y := 2)"); let e = env.factory().expr(); e.tuple() .name(1, 2).separator(4).push(e.int(1, 7)) .name(10, 2).separator(13).push(e.int(2, 16)) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn tuple_keyed_named() { let env = LocalEnv::new(b"(.x := 1, .y := 2)"); let e = env.factory().expr(); e.tuple() .name(1, 2).separator(4).push(e.int(1, 7)) .name(10, 2).separator(13).push(e.int(2, 16)) .build(); assert_eq!(env.actual_expression(), env.expected_tree()); } #[test] fn tuple_nested() { let env = LocalEnv::new(b"(1, (2, 3), 4)"); let e = env.factory().expr(); let one = e.int(1, 1); let inner = e.tuple().push(e.int(2, 5)).push(e.int(3, 8)).build(); e.tuple().push(one).push(inner).push(e.int(4, 12)).build(); assert_eq!(env.actual_expression(), env.expected_tree()); } struct LocalEnv { env: Env, } impl LocalEnv { fn new(source: &[u8]) -> LocalEnv { LocalEnv { env: Env::new(source), } } fn actual_expression(&self) -> Tree { let mut raw = self.env.raw(); super::parse_expression(&mut raw); let result = self.env.actual_tree().borrow().clone(); println!("actual_expression: {:#?}", result); println!(); result } fn actual_statement(&self) -> Tree { let mut raw = self.env.raw(); super::parse_statement(&mut raw); let result = self.env.actual_tree().borrow().clone(); println!("actual_statement: {:#?}", result); println!(); result } fn expected_tree(&self) -> Tree { let result = self.env.expected_tree().borrow().clone(); println!("expected_tree: {:#?}", result); println!(); result } } impl ops::Deref for LocalEnv { type Target = Env; fn deref(&self) -> &Env { &self.env } } }
/** * @brief Sends CAN messages from server to an in-memory FIFO queue */ int fixtureServerSendCAN(const uint32_t arbitration_id, const uint8_t *data, const uint8_t size) { assert(size <= 8); assert(g_clientRecvQueueIdx < CAN_MESSAGE_QUEUE_SIZE); struct CANMessage *msg = &g_clientRecvQueue[g_clientRecvQueueIdx++]; memmove(msg->data, data, size); msg->arbId = arbitration_id; msg->size = size; printf("s>0x%03x [%02d]: ", arbitration_id, g_clientRecvQueueIdx); PRINTHEX(data, size); return ISOTP_RET_OK; }
/** Called when an "event" happens on an already-connected socket. This can only be an error or EOF. */ static void error_cb(struct bufferevent *bev, short what, void *arg) { conn_t *conn = arg; int errcode = EVUTIL_SOCKET_ERROR(); if (what & BEV_EVENT_ERROR) { log_debug("%s for %s: what=0x%04x errno=%d", __func__, safe_str(conn->peername), what, errcode); } else { log_debug("%s for %s: what=0x%04x", __func__, safe_str(conn->peername), what); } obfs_assert(!(what & BEV_EVENT_CONNECTED)); if (what & BEV_EVENT_ERROR) { log_info("Error talking to %s: %s", safe_str(conn->peername), evutil_socket_error_to_string(errcode)); } else if (what & BEV_EVENT_EOF) { log_info("EOF from %s", safe_str(conn->peername)); } else if (what & BEV_EVENT_TIMEOUT) { log_info("Timeout talking to %s", safe_str(conn->peername)); } error_or_eof(conn); }
// Mounted checks if the given path is mounted as the fs type //Solaris supports only ZFS for now func Mounted(fsType FsMagic, mountPath string) (bool, error) { cs := C.CString(filepath.Dir(mountPath)) defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) defer C.free(unsafe.Pointer(buf)) if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || (buf.f_basetype[3] != 0) { logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) return false, ErrPrerequisites } return true, nil }
<reponame>henriquegemignani/urde #pragma once #include <string_view> #include "Runtime/GCNTypes.hpp" #include "Runtime/World/CActor.hpp" #include <zeus/CAABox.hpp> namespace urde { class CScriptAiJumpPoint : public CActor { private: float xe8_apex; zeus::CAABox xec_touchBounds; bool x108_24_inUse : 1 = false; TUniqueId x10a_occupant = kInvalidUniqueId; TUniqueId x10c_currentWaypoint = kInvalidUniqueId; TUniqueId x10e_nextWaypoint = kInvalidUniqueId; float x110_timeRemaining = 0.f; public: CScriptAiJumpPoint(TUniqueId, std::string_view, const CEntityInfo&, zeus::CTransform&, bool, float); void Accept(IVisitor& visitor) override; void Think(float, CStateManager&) override; void AcceptScriptMsg(EScriptObjectMessage, TUniqueId, CStateManager&) override; void AddToRenderer(const zeus::CFrustum&, CStateManager&) override {} void Render(CStateManager&) override {} std::optional<zeus::CAABox> GetTouchBounds() const override; bool GetInUse(TUniqueId uid) const; TUniqueId GetJumpPoint() const { return x10c_currentWaypoint; } TUniqueId GetJumpTarget() const { return x10e_nextWaypoint; } float GetJumpApex() const { return xe8_apex; } }; } // namespace urde
package com.glmis.service.personnel; import com.glmis.domain.personnel.YesOrNo; import com.glmis.service.BasicService; import org.springframework.stereotype.Service; /** * Created by dell on 2016/11/17. */ @Service public class YesOrNoService extends BasicService<YesOrNo,Long>{ }
def _reset_gensym(): global _gensym_counter _gensym_counter = 0
#include <stdio.h> #include <string.h> #include "ipv6text.h" #ifdef _WIN32 #ifndef snprintf #define snprintf _snprintf #endif #endif void IPv6IPToString( char *pszOutText, const unsigned char *ip ) { // Find the longest run of consecutive zero quads. // If there's a tie, we want the leftmost one. int idxLongestRunStart = -1; int nLongestRun = 1; // It must be at least 2 quads in a row, a single 0 must not be compressed int nCurrentRun = 0; int idxQuad; for ( idxQuad = 0 ; idxQuad < 8 ; ++idxQuad ) { // Zero if ( ip[idxQuad*2] || ip[idxQuad*2 + 1] ) { // Terminate run nCurrentRun = 0; } else { // Extend (or begin) run ++nCurrentRun; // Longer than previously found run? if ( nCurrentRun > nLongestRun ) { nLongestRun = nCurrentRun; idxLongestRunStart = idxQuad - nCurrentRun + 1; } } } // Print the quads char *p = pszOutText; idxQuad = 0; bool bNeedColon = false; while ( idxQuad < 8 ) { // Run of compressed zeros? if ( idxQuad == idxLongestRunStart ) { *(p++) = ':'; *(p++) = ':'; bNeedColon = false; idxQuad += nLongestRun; } else { // Colon to separate from previous, unless // we are first or immediately follow compressed zero "::" if ( bNeedColon ) *(p++) = ':'; // Next quad should should print a separator bNeedColon = true; // Assemble 16-bit quad value from the two bytes unsigned quad = ( (unsigned)ip[idxQuad*2] << 8U ) | ip[idxQuad*2 + 1]; // Manually do the hex number formatting. // Lowercase hex digits, with leading zeros omitted static const char hexdigits[] = "0123456789abcdef"; if ( quad >= 0x0010 ) { if ( quad >= 0x0100 ) { if ( quad >= 0x1000 ) *(p++) = hexdigits[ quad >> 12U ]; *(p++) = hexdigits[ ( quad >> 8U ) & 0xf ]; } *(p++) = hexdigits[ ( quad >> 4U ) & 0xf ]; } // Least significant digit, which is always printed *(p++) = hexdigits[ quad & 0xf ]; // On to the next one ++idxQuad; } } // String terminator *p = '\0'; } void IPv6AddrToString( char *pszOutText, const unsigned char *ip, uint16_t port, uint32_t scope ) { char *p = pszOutText; // Open bracket *(p++) = '['; // Print in the IP IPv6IPToString( p, ip ); // Find the end of the string while (*p) ++p; if ( scope ) { // And now the scope. Max 32-digit scope number is 10 digits snprintf( p, 12, "%%%d", scope ); // Find the end of the string while (*p) ++p; } // And now the rest. Max 16-digit port number is 6 digits snprintf( p, 8, "]:%u", (unsigned int)port ); } static inline int ParseIPv6Addr_HexDigitVal( char c ) { if ( c >= '0' && c <= '9' ) return c - '0'; if ( c >= 'a' && c <= 'f' ) return c - ('a' - 0xa); if ( c >= 'A' && c <= 'F' ) return c - ('A' - 0xa); return -1; } static inline int ParseIPv6Addr_DecimalDigitVal( char c ) { if ( c >= '0' && c <= '9' ) return c - '0'; return -1; } bool ParseIPv6Addr_IsSpace( char c ) { // Newlines don't count, intentionally return c == ' ' || c == '\t'; } bool ParseIPv6Addr( const char *pszText, unsigned char *pOutIP, int *pOutPort, uint32_t *pOutScope ) { while ( ParseIPv6Addr_IsSpace( *pszText ) ) ++pszText; const char *s = pszText; // Skip opening bracket, if present if ( *s == '[' ) { ++s; while ( ParseIPv6Addr_IsSpace( *s ) ) ++s; } // Special case for leading "::" bool bQuadMustFollow = true; unsigned char *d = pOutIP; unsigned char *pZeroFill = NULL; unsigned char *pEndIP = pOutIP + 16; if ( s[0] == ':' && s[1] == ':' ) { pZeroFill = d; s += 2; bQuadMustFollow = false; } // Parse quads until we get to the end for (;;) { // Next thing must be a quad, or end of input. Is it a quad? int quadDigit = ParseIPv6Addr_HexDigitVal( *s ); if ( quadDigit < 0 ) { if ( bQuadMustFollow ) return false; break; } // No room for more quads? if ( d >= pEndIP ) return false; ++s; int quad = quadDigit; // Now parse up to three additional characters quadDigit = ParseIPv6Addr_HexDigitVal( *s ); if ( quadDigit >= 0 ) { quad = ( quad << 4 ) | quadDigit; ++s; quadDigit = ParseIPv6Addr_HexDigitVal( *s ); if ( quadDigit >= 0 ) { quad = ( quad << 4 ) | quadDigit; ++s; quadDigit = ParseIPv6Addr_HexDigitVal( *s ); if ( quadDigit >= 0 ) { quad = ( quad << 4 ) | quadDigit; ++s; } } } // Stash it in the next slot, ignoring for now the issue // of compressed zeros *(d++) = (unsigned char)( quad >> 8 ); *(d++) = (unsigned char)quad; // Only valid character for the IP portion is a colon. // Anything else ends the IP portion if ( *s != ':' ) break; // Compressed zeros? if ( s[1] == ':' ) { // Eat '::' s += 2; // Can only have one range of compressed zeros if ( pZeroFill ) return false; // Remember where to insert the compressed zeros pZeroFill = d; // An IP can end with '::' bQuadMustFollow = false; } else { // If they have filed the entire IP with no compressed zeros, // then this is unambiguously a port number. That's not // necessarily the best style, but it *is* unambiguous // what it should mean, so let's allow it. If there // are compressed zeros, then this is ambiguous, and we will // always interpret it as a quad. if ( !pZeroFill && d >= pEndIP ) break; // leave ':' as next character, for below // Eat ':' ++s; // A single colon must be followed by another quad bQuadMustFollow = true; } } // End of the IP. Do we have compressed zeros? if ( pZeroFill ) { // How many zeros do we need to fill? intptr_t nZeros = pEndIP - d; if ( nZeros <= 0 ) return false; // Shift the quads after the bytes to the end memmove( pZeroFill+nZeros, pZeroFill, d-pZeroFill ); // And now fill the zeros memset( pZeroFill, 0, nZeros ); } else { // No compressed zeros. Just make sure we filled the IP exactly if ( d != pEndIP ) return false; } if ( *s == '%' ) { ++s; // Parse scope number uint32_t unScope = 0; int nScopeDigit = ParseIPv6Addr_DecimalDigitVal( *s ); if ( nScopeDigit < 0 ) return false; unScope = (uint32_t)nScopeDigit; for (;;) { ++s; if ( *s == '\0' || *s == ']' || ParseIPv6Addr_IsSpace( *s ) ) break; nScopeDigit = ParseIPv6Addr_DecimalDigitVal( *s ); if ( nScopeDigit < 0 ) return false; unScope = unScope * 10 + nScopeDigit; } if ( pOutScope ) *pOutScope = unScope; } else { if ( pOutScope ) *pOutScope = 0; } // If we started with a bracket, then the next character MUST be a bracket. // (And this is the only circumstance in which a closing bracket would be legal) if ( *pszText == '[' ) { while ( ParseIPv6Addr_IsSpace( *s ) ) ++s; if ( *s != ']' ) return false; ++s; } // Now we are definitely at the end of the IP. Do we have a port? // We support all of the syntaxes mentioned in RFC5952 section 6 other // than the ambiguous case if ( *s == ':' || *s == '#' || *s == '.' || *s == 'p' || *s == 'P' ) { ++s; } else { while ( ParseIPv6Addr_IsSpace( *s ) ) ++s; if ( *s == '\0' ) { // Parsed IP without port OK if ( pOutPort ) *pOutPort = -1; return true; } if ( strncmp( s, "port", 4 ) == 0 ) { s += 4; while ( ParseIPv6Addr_IsSpace( *s ) ) ++s; } else { // Extra stuff after the IP which isn't whitespace or a port return false; } } // We have a port. If they didn't ask for it, that's considered a parse failure. if ( !pOutPort ) return false; // Parse port number int nPort = ParseIPv6Addr_DecimalDigitVal( *s ); if ( nPort < 0 ) return false; for (;;) { ++s; if ( *s == '\0' || ParseIPv6Addr_IsSpace( *s ) ) break; int portDigit = ParseIPv6Addr_DecimalDigitVal( *s ); if ( portDigit < 0 ) return false; nPort = nPort * 10 + portDigit; if ( nPort > 0xffff ) return false; } // Consume trailing whitespace; confirm nothing else in the input while ( ParseIPv6Addr_IsSpace( *s ) ) ++s; if ( *s != '\0' ) return false; *pOutPort = nPort; return true; }
President Donald Trump boards Air Force One at Chennault International Airport in Lake Charles, La., following a visit with those helping with the impacted of Hurricane Harvey, Saturday, Sept. 2, 2017. AP Photo/Susan Walsh President Donald Trump issued a set of tweets Sunday morning attacking North Korea after the country appeared to conduct its sixth nuclear test. "North Korea has conducted a major Nuclear Test. Their words and actions continue to be very hostile and dangerous to the United States," Trump tweeted. Before the test, North Korea claimed that leader Kim Jong Un had inspected a hydrogen bomb meant for a new intercontinental ballistic missile. South Korea's military said Sunday that North Korea is believed to have conducted its sixth nuclear test after it detected a strong earthquake. Trump also went after China's and South Korea's efforts to counter North Korea. "North Korea is a rogue nation which has become a great threat and embarrassment to China, which is trying to help but with little success," Trump tweeted. "South Korea is finding, as I have told them, that their talk of appeasement with North Korea will not work, they only understand one thing!" On Tuesday, North Korea launched a missile that flew over the northern Japanese island of Hokkaido. Japanese Prime Minister Shinzo Abe called the launch "an unprecedented, grave, and serious threat" that damaged the security of the region. North Korea conducted its fifth test last year in September. If confirmed, the latest test would mark yet another big step forward in North Korean attempts to obtain a nuclear-armed missile. The Associated Press contributed reporting to this article.
<gh_stars>1-10 // AUTOGENERATED FILE - DO NOT MODIFY! // This file generated by Djinni from foo_listener.djinni #pragma once // python_cdef_ignore #include <stdbool.h> // python_cdef_ignore #include <stdbool.h> // python_cdef_ignore #include <stdint.h> // python_cdef_ignore struct DjinniWrapperFooListener; void foo_listener___delete(struct DjinniObjectHandle * djinni_this); void foo_listener___wrapper_dec_ref(struct DjinniWrapperFooListener * dh); void foo_listener___wrapper_add_ref(struct DjinniWrapperFooListener * dh); struct DjinniWrapperFooListener * make_proxy_object_from_handle_cw__foo_listener(struct DjinniObjectHandle * c_ptr); struct DjinniObjectHandle * get_handle_from_proxy_object_cw__foo_listener(struct DjinniWrapperFooListener * dw); bool equal_handles_cw__foo_listener(struct DjinniWrapperFooListener * dw1, struct DjinniWrapperFooListener * dw2); void foo_listener_add_callback_on_string_change(struct DjinniString *(* ptr)(struct DjinniObjectHandle * , struct DjinniString *)); void foo_listener_add_callback_get_private_int(int32_t(* ptr)(struct DjinniObjectHandle * )); void foo_listener_add_callback_on_changes_string_returned(struct DjinniString *(* ptr)(struct DjinniObjectHandle * , int32_t, float, struct DjinniString *, struct DjinniBinary *, bool, uint64_t)); void foo_listener_add_callback_on_changes_binary_returned(struct DjinniBinary *(* ptr)(struct DjinniObjectHandle * , int32_t, float, struct DjinniString *, struct DjinniBinary *, bool, uint64_t)); void foo_listener_add_callback_on_changes_date_returned(uint64_t(* ptr)(struct DjinniObjectHandle * , int32_t, float, struct DjinniString *, struct DjinniBinary *, bool, uint64_t)); void foo_listener_add_callback_on_changes_int_returned(int32_t(* ptr)(struct DjinniObjectHandle * , int32_t, float, struct DjinniString *, struct DjinniBinary *, bool, uint64_t)); void foo_listener_add_callback_on_changes_record_returned(struct DjinniRecordHandle *(* ptr)(struct DjinniObjectHandle * , int32_t, int32_t)); void foo_listener_add_callback_on_changes_string_optional_returned(struct DjinniString *(* ptr)(struct DjinniObjectHandle * , struct DjinniBoxedI32 *, float, struct DjinniString *, struct DjinniBinary *, int32_t, uint64_t)); void foo_listener_add_callback_on_changes_int_optional_returned(struct DjinniBoxedI32 *(* ptr)(struct DjinniObjectHandle * , struct DjinniBoxedI32 *, float, struct DjinniString *, struct DjinniBinary *, int32_t, uint64_t)); void foo_listener_add_callback_cause_py_exception(void(* ptr)(struct DjinniObjectHandle * , struct DjinniString *)); void foo_listener_add_callback_cause_zero_division_error(void(* ptr)(struct DjinniObjectHandle * )); void foo_listener_add_callback___delete(void(* ptr)(struct DjinniObjectHandle * ));
<gh_stars>0 /* See LICENSE for license details */ /* */ #include <appl_status.h> #include <appl_types.h> #include <object/appl_object.h> #include <options/appl_options.h> #include <misc/appl_unused.h> #if defined APPL_DEBUG #include <debug/appl_debug_handle.h> #endif /* #if defined APPL_DEBUG */ // // // enum appl_status appl_options::v_count( unsigned long int * const r_count) const { appl_unused( r_count); return appl_raise_not_implemented(); } // v_count() // // // enum appl_status appl_options::v_get( unsigned long int const i_index, unsigned char const * * const r_buf_min, unsigned char const * * const r_buf_max) const { appl_unused( i_index, r_buf_min, r_buf_max); return appl_raise_not_implemented(); } // v_get() // // // enum appl_status appl_options::v_write( unsigned char const * const p_buf_min, unsigned char const * const p_buf_max, unsigned long int * const p_count, char * const p_ready) { appl_unused( p_buf_min, p_buf_max, p_count, p_ready); return appl_raise_not_implemented(); } // v_write() // // // enum appl_status appl_options::v_append_argument( unsigned char const * const p_buf_min, unsigned char const * const p_buf_max) { appl_unused( p_buf_min, p_buf_max); return appl_raise_not_implemented(); } // v_append_argument() // // // enum appl_status appl_options::v_reset(void) { return appl_raise_not_implemented(); } // v_reset() // // // appl_options::appl_options( struct appl_context * const p_context) : appl_object( p_context) { } // // // appl_options::~appl_options() { } /* end-of-file: appl_options.cpp */
/** * Initializes and creates an account corresponding to the specified * accountProperties and registers the resulting ProtocolProvider in the * <tt>context</tt> BundleContext parameter. This method has a persistent * effect. Once created the resulting account will remain installed until * removed through the uninstallAccount method. * * @param accountProperties a set of protocol (or implementation) specific * properties defining the new account. * @return the AccountID of the newly loaded account */ public AccountID loadAccount(Map<String, String> accountProperties) { AccountID accountID = createAccount(accountProperties); loadAccount(accountID); return accountID; }
def shorten(thelist, maxlen, shorten): if len(thelist) <= maxlen: return thelist if shorten == "right": return thelist[0:maxlen] elif shorten == "left": return thelist[-maxlen-1:-1] elif shorten == "both": excess = len(thelist) - maxlen; left = int(excess/2) right = excess - left return thelist[left:-right] elif shorten == "middle": excess = len(thelist) - maxlen; left = int(excess/2) right = excess - left return thelist[0:left]+thelist[-right:] else: raise Exception("Not a valid value for the shorten setting: {}".format(shorten))
#!/usr/bin/env python # Apply a gaussian blur to a smearing matrix # By jba 09/18/18 # Usage: ./apply_blur.py <path-to-smearing-file.dat> <path-to-output-smearing-file.dat> [opt. blur size] # After running, the code will plot the mean and std dev. You can use this to confirm that you haven't # blurred too much. If the standard deviation is getting too big, move to a smaller number of bins for blurring # Code should work with python 2.6, 3.7, or similar import sys import numpy as np from scipy import ndimage import matplotlib.pyplot as plt # look for input, give helpful advice if it is problematic infile = 0 outfilename = 0 blur_size = 3 if len(sys.argv) <3 or len(sys.argv) > 4: print("Usage: ./apply_blur.py <path-to-smearing-file.dat> <path-to-output-smearing-file.dat> [opt. blur size (bins)]") print("Check the plotted statistics after running to decide if the blurring is reasonable") sys.exit() else: try: infile = open(sys.argv[1], "r") except: print("problem encountered trying to open " + sys.argv[1]) raise try: outfile = open(sys.argv[2], "w") outfile.close() outfilename = sys.argv[2] except: print("problem encountered trying to open " + sys.argv[2]) raise if len(sys.argv) == 4: blur_size = int(sys.argv[3]) def find_between( s, first, last ): try: start = s.index( first ) + len( first ) end = s.index( last, start ) return s[start:end] except ValueError: return "" # This little bit of code below is meant to ensure that input is correctly implemented # whether this is run in python 2.x or 3.x try: input = raw_input except NameError: pass ############################################################################### # Here is a function to normalize the edet columns to sum to one def NormalizeEdet(matrix): # recall that edet is the first index! # Need a bit of extra work to ensure no division by zero! matsum = matrix.sum(axis=0) ncols = len(matsum) for icol in range(ncols): if matsum[icol] > 0: matrix[:,icol] = matrix[:,icol]/matsum[icol] return matrix ############################################################################### # We need a function to write out the smearing matrix after it is computed! def WriteMatrix(filename, channelname, matrix): outfile = open(filename, "w") outfile.write("energy(#" + channelname + ")<\n") lastelem = str(len(matrix[1,:]) - 1) # To start the first line... outfile.write("@energy = ") for irow in range(0, matrix.shape[0]-1): our_line = "{0," + lastelem + "," + ",".join([x for x in matrix[irow,:].astype(str)]) + "}:\n" outfile.write(our_line) # the last line needs a semicolon instead of a colon our_line = "{0," + lastelem + "," + ",".join([x for x in matrix[irow,:].astype(str)]) + "};\n" outfile.write(our_line) outfile.write(">") outfile.close() return ############################################################################### # Now we have the smearing matrix file open, time to parse it. smearrows = [] flavorname = 0 # we might as well get the name from the top line isfirst = False islast = False # The first row should be ignorable, as should the second, up to an @energy = { for iline in infile.readlines(): if "#" in iline: flavorname = find_between(iline, "#", ")") continue # this is a header line, move on else: rowvals = find_between(iline, "{", "}") rowvals_str = rowvals.split(",")[2:] smearrows.append([float(x) for x in rowvals_str]) # first two entries don't really matter for this if ";" in iline: # this is the last one break print("Read in " + str(len(smearrows)) + " lines") z_mesh = np.array(smearrows) blurred_z_mesh = ndimage.gaussian_filter(z_mesh, blur_size) # Now we can set up some masking to bring things like thresholds back. z_mesh_column_filter = np.sum(z_mesh, axis=0, keepdims=True) blurred_z_mesh = z_mesh_column_filter*blurred_z_mesh # Now let's re-normalize the matrix, as the blurring might mess up normalization blurred_normalized_z_mesh = NormalizeEdet(blurred_z_mesh) # Now we write the output as requested WriteMatrix(outfilename, flavorname, blurred_normalized_z_mesh) # and we're done, unless we want to keep going to analyze things a bit... ############################################################################### def WeightedAvgAndSTD(matrix): """ Return the weighted average and standard deviation. We are assuming 0.5 to 100 MeV """ edet_min = 0.5 edet_max = 100 edet_step = (edet_max - edet_min)/float(matrix.shape[0]) # Use a lambda function to evaluate detected energies (bin centers) edet = np.fromfunction(lambda i,j: edet_min + edet_step * (i+0.5), matrix.shape, dtype=float) average = np.ma.average(edet, weights=matrix, axis=0) # Fast and numerically precise: variance = np.ma.average((edet-average)**2, weights=matrix, axis=0) return (average, np.sqrt(variance)) ############################################################################### # Let's analyze the mean and sigma before and after blurring! def PlotMeanAndSigma(matrix1, matrix2): mean1, std1 = WeightedAvgAndSTD(matrix1) mean2, std2 = WeightedAvgAndSTD(matrix2) plt.figure(1) plt.subplot(121) plt.plot(mean1, label="before mean") plt.plot(mean2, label="after mean") legmean = plt.legend(loc="best") plt.subplot(122) plt.plot(std1, label="before stdev") plt.plot(std2, label="after stdev") legstd = plt.legend(loc="best") print("Close the plot window to finish the program") plt.show() return PlotMeanAndSigma(z_mesh, blurred_normalized_z_mesh)
Back in September, TFB reader Brandon took us through the history and variations of Russian Kalashnikov magazine patterns in a two part article that’s well worth reading if you haven’t already. Having said that, if you don’t have the patience for articles or if you can’t get enough on AK magazine patterns, Ian McCollum of Forgotten Weapons has released a brief video overview on the subject: One of the points that Ian makes in the video is that the magazines in use with Russian forces during the 7.62mm AK’s heyday are a little different than the popular idea of ribbed steel AK magazines that most Americans associate with the rifles. While the ribbed steel pattern was used in large quantities, this pattern only became the overwhelmingly iconic “AK pattern” through foreign production that lasted several decades. In Russia, these magazines were just one step in a long line of magazine development that for the 7.62x39mm lasted through the end of the 1960s. Western intelligence of Soviet forces was often informed by the lesser states of the Communist bloc, which has resulted in an particular image of the AKM-plus-ribbed-steel-mag wielding Soviet trooper, even though in reality Soviet forces would have been using a mix of slab-sided, ribbed, aluminum waffle, and a large number of 7.62×39 resin magazines, as well.
Reconstruction of Protein-Protein Interaction Network of Insulin Signaling in Homo Sapiens Diabetes is one of the most prevalent diseases in the world. Type 1 diabetes is characterized by the failure of synthesizing and secreting of insulin because of destroyed pancreatic β-cells. Type 2 diabetes, on the other hand, is described by the decreased synthesis and secretion of insulin because of the defect in pancreatic β-cells as well as by the failure of responding to insulin because of malfunctioning of insulin signaling. In order to understand the signaling mechanisms of responding to insulin, it is necessary to identify all components in the insulin signaling network. Here, an interaction network consisting of proteins that have statistically high probability of being biologically related to insulin signaling in Homo sapiens was reconstructed by integrating Gene Ontology (GO) annotations and interactome data. Furthermore, within this reconstructed network, interacting proteins which mediate the signal from insulin hormone to glucose transportation were identified using linear paths. The identification of key components functioning in insulin action on glucose metabolism is crucial for the efforts of preventing and treating type 2 diabetes mellitus. Introduction Signaling provides the communication of living cells by processing biological information. Mammalian cells integrate information from complex intracellular signaling pathways to make decisions in response to changes in the environment. Using systematic genome-wide and pathway specific proteinprotein interaction screens, a framework of the interconnectivity of a large number of human proteins, including therapeutically relevant disease-associated proteins has been generated by these pathways. Recent developments in these protein-protein interaction networks have increased the understanding of the mechanisms of diseases with identification of drug targets and adaptation of living cells to the environment . In mammalian cells, the balance between hepatic glucose production and glucose utilization by the tissues, such as liver, adipose, muscle, brain, and kidney provides the glucose homeostasis. In healthy individuals, the increased blood glucose levels result in secretion of insulin from βcells of the pancreas. Insulin triggers the transportation of glucose into peripheral tissues by glucose transporter GLUT4 inhibiting hepatic glucose production . By the stimulation of insulin (INS) hormone, the insulin receptor (INSR) phosphorylates insulin receptor substrate (IRS) proteins that activate two main signaling pathways. The phosphatidylinositol 3-kinase (PI3K)-AKT/protein kinase B (PKB) pathway is responsible for the metabolic actions of the insulin such as glucose uptake, glycogen synthesis, gene expression, and protein synthesis. The Ras-mitogen activated protein kinase (MAPK) pathway controls cell growth and differentiation by regulating expression of some genes and cooperating with PI3K pathway . Defects in insulin signaling pathways may decrease the ability of peripheral tissues to respond to insulin (insulin resistance) causing type 2 diabetes. Beside its primary role in glucose homeostasis, insulin signaling mechanism also regulates ion and amino acid transport, lipid metabolism, glycogen synthesis, gene transcription and mRNA turnover, protein synthesis and degradation, and DNA synthesis by a complex, highly integrated network activated by the insulin receptor . Most of research published so far reports experimental and computational work to decipher small-scale mechanisms around key proteins in insulin metabolism . However, it is very important to capture the global picture of insulin signaling in order to understand the mechanisms underlying diabetes with crosstalks between other signaling networks. This need motivated us for the reconstruction of insulin signaling network in Homo sapiens with the aim of identification of all known components together with new candidate proteins of insulin signaling. In this study, a computational framework integrating interactome data with GO annotations was used to build large scale protein interaction network which is composed of candidate proteins for insulin signaling in Homo sapiens. The reconstructed insulin signaling network was decomposed into linear paths resulting in glucose transportation to be able to identify the proteins functioning in this metabolic action of insulin. The topology of the reconstructed insulin signaling network governing glucose transportation was then analyzed to determine whether the network properties are biologically feasible or not, and to obtain detailed information about the signaling mechanisms. Moreover, graph theoretic analysis gives the proteins that are well or poorly connected in the interaction network. This study provides a comprehensive insulin signaling network with indication of key components which will facilitate a deeper understanding of underlying mechanisms of insulin-resistant states and pathophysiology of insulin deficiency. Figure 1 represents an overview of the computational approach integrating Gene Ontology (GO) annotations and interactome data for the reconstruction of a protein interaction network which was used to predict candidate proteins in insulin signaling in human. All known interacting human proteins obtained from BioGRID version 2.0.61 release were used as inputs to the algorithm. BioGRID (The Biological General Repository for Interaction Datasets) uses the results of highthroughput experiments and conventional studies . The GO annotations (in terms of cellular component, molecular function and biological process) of the core proteins that are known to have certain functions in the insulin signaling were collected (http://www.ebi.ac.uk/QuickGO/) to form an annotation collection table (see Supplementary material available on line at doi:10.1155/2010/690925). The relevance of the human proteins to the insulin signaling was tested by employing this annotation collection table. The proteins with all three GO terms matching to those in the annotation collection table were added to the network. Thus, a high probability of having role in the insulin-interaction network is ensured for these proteins. In the second step, the interaction data among these proteins were obtained from BioGRID version 2.0.61 release, and the network architecture was constructed. Network Decomposition Analysis. Network decomposition analysis is based on the decomposition of a protein interaction network into linear paths starting from inputs (ligands) and extending to outputs (cellular responses). In the reconstructed insulin signaling network, the linear paths from the insulin receptor to the glucose transporter GLUT4 were found by the NetSearch algorithm , and the specific part of the protein network governing insulin action on glucose metabolism was identified. The participation of the proteins in linear paths can be considered as an indication of their importance in the signal transduction, since any state of the signaling network is a combination of the linear paths . Therefore, the participation percentages of each protein in linear paths of the reconstructed insulin signaling network were calculated to get an insight on the roles of the proteins in the signal transduction from INS to GLUT4. Graph Theoretic Analysis. The topology of the reconstructed protein-protein interaction network functioning in glucose transportation was determined by graph theoretic analysis based on the properties, such as the degree (connectivity) of nodes, the number of hubs (highly connected nodes), and the shortest path lengths between indirectly connected nodes, network diameter and mean path length. The graph properties of the network were found using Network Analyzer plugin (ver. 2.6.1) of Cytoscape (ver. 2.6.3). The input to the calculation is the list of binary interacting proteins. Observing the connectivity distribution of the proteins allows us to identify highly connected proteins which participate in significant numbers of interactions and play critical roles in the organization of the cellular protein interaction network. Mean path length and network diameter are calculated as the average and the maximum of the shortest path lengths, respectively. Results and Discussion In the present study, the protein-protein interaction network of insulin signaling was reconstructed in Homo sapiens with special emphasis on glucose transportation mechanism. During the reconstruction of a protein interaction network, the main problem is the existence of false positives and false negatives in the available interaction data obtained mostly by high-throughput screens . Several approaches have been performed to improve the quality of the data by integrating different biological features, including GO annotations . Compared to metabolic and regulatory networks, the reconstruction and analysis of signaling networks are very limited. The previous signaling network reconstruction methods are focused on integration of protein-protein interaction data with microarray gene expression profiles or a detailed literature survey on published knowledge . Here, we used a computational framework integrating interactome data with GO annotations. Reconstruction of Insulin Signaling Network in H. Sapiens. 30 proteins related to human insulin signaling were identified by the literature information and GO annotations (Table 1). Through the literature search only experimental cases were investigated, and the proteins that are RAC-alpha serine/threonine-protein kinase AKT2 RAC-beta serine/threonine-protein kinase AP3S1 AP-3 complex subunit sigma-1 BAIAP2 Brain-specific angiogenesis inhibitor 1-associated protein 2 BCAR1 Breast cancer antiestrogen resistance protein 1 CILP Cartilage intermediate layer protein 1 ENPP1 Ectonucleotide pyrophosphatase/phosphodiesterase family member 1 FOXO4 Forkhead box protein O4 GAB1 GRB2-associated-binding protein 1 GRB2 Growth factor receptor-bound protein 2 GRB10 Growth factor receptor-bound protein 10 IGFBP5 Insulin-like growth factor-binding protein 5 IGF1 Insulin-like growth factor IA IGF1R Insulin-like growth factor 1 receptor IGF2 Insulin-like growth factor II IGF2R Insulin reported as functioning in insulin signaling mechanisms in human were considered as the core proteins. In addition to that, some core proteins were collected via their GO function and process terms which indicate insulin signaling explicitly. Therefore, each of these proteins is known to be essential for insulin actions. For instance, the binding of insulin receptor substrate-1 (IRS1) to the phosphorylated insulin receptor (INSR) leads to the activation of phosphatidylinositol 3-kinase (PI3K) whose regulatory subunits (PIK3R1 and PIK3R3) play pivotal roles in the metabolic and mitogenic actions of insulin. AKT1 plays an important role in GLUT4 translocation via phosphorylating and regulating components of GLUT4 complex . By reconstructing the protein-protein interaction network this study unravels the mechanisms around these insulin signaling proteins. 8211 interacting human proteins obtained from BioGrid 2.0.61 were tested through the GO annotations of the core proteins. If there is at least one annotation for each of the GO terms (component, function, and process) that are included in the annotation collection table, the corresponding protein was added to the network. Consequently, 6248 proteins passed this selection criterion increasing their probability to have function in insulin signal transduction. However, only 3588 of these proteins have interactome data, and of these, 365 proteins cannot be included into the network as the GO terms of their interacting partners do not coincide with those in the annotation collection table. Eventually, an interaction network of 3223 nodes and 10537 edges is obtained for insulin signaling. When the isolated smaller parts are removed the resulting protein-protein interaction network consists of 3056 proteins and 10401 interactions among them (see Supplementary material). Two of the core proteins CILP and PHIP are not included in the reconstructed network, since CILP has no interaction data, and PHIP's interacting partners does not fulfill the selection criterion based on GO annotations. Network Decomposition Analysis. In a protein interaction network, a signaling pathway for a specific signaling output can be identified using linear paths starting from membrane-bound receptors and ending at that particular cellular response . The linear paths of the reconstructed network (3056 proteins and 10401 interactions) were found using NetSearch algorithm of Steffen and coworkers . INSR (insulin receptor) and GLUT4 (glucose transporter 4) proteins were used as the input and the output of the signaling network, respectively, for the identification of the proteins that have roles in the insulin signal transduction triggered by binding of insulin to its receptor and ending with metabolic action of glucose transportation. The shortest path length between INSR and GLUT4 was found as 4, since the shortest 7 linear paths include 5 proteins connected linearly by 4 interactions. In order to determine the optimum path length for the identification of the linear paths functioning specifically in glucose transportation, the paths were searched by increasing the maximum path length by one each time ( Table 2). The number of core proteins and the interacting proteins included in the linear paths were investigated to determine the critical path length and participating proteins that have roles in glucose transportation response of the signaling network. Between INSR and GLUT4, a path length of 6 resulting in 7176 linear paths was chosen to be optimum, as it provides a balance between smaller path length and participating core proteins. The criterion of small path length is reasonable, since signaling mechanisms are known to give such responses very quickly . Increasing the maximum path length from 6 to 7 causes the number of the core proteins that participate in the linear paths to increase only by one, from 17 to 18, despite a nearly two fold increase in the number of interacting proteins. Increasing the path length more than 6 would result in nearly same signaling mechanisms around these 17 core proteins with longer paths covering more proteins in the insulin signaling network. Therefore, these 498 proteins and 2887 interactions (see Supplementary material) that function in the linear paths at a path length of 6 constitute the insulin signaling pathway having roles in glucose translocation. Bottleneck proteins are known as the key connectors that are central to many shortest paths in an interaction network . To identify the bottlenecks in the signal transduction from INSR to GLUT4, the percentage of each protein contributing to 7176 linear paths were calculated. INSR, GLUT4 and DAXX (death domain-associated protein 6) participate in all the linear paths since they are the input, the output, and the unique protein that connects GLUT4 to the network as its interacting partner, respectively. 10 following proteins with the highest participation in these linear paths (Table 3) should be investigated with special care owing to their critical roles in transducing the signal from INSR to GLUT4. These proteins are highly encountered in linear paths as many are bound to the input or output proteins. SMAD2 (mothers against decapentaplegic homolog 2), MAPK1 (mitogen activated protein kinase 1), and JAK2 (tyrosine protein kinase JAK2) interact with INSR in the reconstructed network. AR (androgen receptor), MDM2 (E3 ubiquitin protein ligase Mdm2), NR3C1 (glucocorticoid receptor), UBE2I (SUMO-conjugating enzyme UBC 9), HDAC1 (histone deacetylase 1), and PML (probable transcription factor PML) interact with DAXX which is the only protein having an interaction with GLUT4 in the network. On the other hand, TP53 (tumor protein p53) was also found to be a bottleneck in the PPI network, since it has interactions with other bottleneck proteins such as MAPK1, MDM2, UBE2I, HDAC1, PML, and NR3C1. Since these bottleneck proteins control most of the signal transduction from insulin to the glucose transporter protein, their mutations may cause glucose transportation system to fail resulting in insulin resistance. The most promising result of architecture of the reconstructed network is about DAXX protein, since it connects GLUT4 to the network. Although its physical interaction with GLUT4 was reported , its functional roles in insulin signaling mechanism remain elusive except very few studies . Therefore, the ending part of the interaction network functioning through glucose transportation should be investigated thoroughly to discover the effects of DAXX on GLUT4. Graph Theoretic Analysis. The reconstructed insulin signaling network was represented by an undirected interaction graph with 498 nodes and 2887 edges. The topological analysis was performed using Network analyzer plugin of Cytoscape. The network diameter and the mean path length were found as 5 and 2.9, respectively, indicating the small-world topology. A comparative analysis of the graph theoretic properties of several protein interaction networks (Table 4) similarly reveals the small-world architecture. The comparison of the number of nodes and edges of the present network with those of the other PPI networks indicates that the reconstructed insulin signaling network is highly connected, that is, its average connectivity is 11.6. The small network diameter and the low mean path length result from this architecture since any two nodes in the network are connected by shorter paths through high number of neighbouring proteins. The connectivity (k, the number of links per node) distribution of the nodes in the reconstructed graph was found as scale-free (Figure 2) following nearly a power law model (P(k) ≈ k −γ γ = 1.53 R 2 = 0.83). Having small-world properties with scale-free topology is a general characteristic of complex biological networks . The node of GLUT4 with only one edge was excluded in the inner diagram of Figure 2 since it is an outlier point. The hubs of the insulin signaling network were determined as GRB2 (growth factor receptor bound protein 2), HDAC1, AR, and TP53 having connectivity values of 88, 84, 83 and 74, respectively. GRB2 has a vital role in signaling by receptor protein tyrosine kinases, where its SH2 and SH3 domains bind to the receptors and effectors and it functions in the insulin signaling through lots of proteins including IRS1 . It was reported that HDAC inhibition in human primary myotubes increases endogenous GLUT4 gene expression . Investigating all HDAC proteins in the reconstructed network (HDAC1, 2, 3, 4, 5, 9) may provide potential drug targets for the treatment and management of insulin resistance and type 2 diabetes. Similar to HDAC1, TP53 has a repressive effect on transcriptional activity of the GLUT4 gene promoters. Mutations within its DNAbinding domain were found to impair this repressive effect resulting in increased glucose metabolism and cell energy supply facilitating tumor growth . AR functions mainly as a ligand-activated transcription factor. Besides, it was reported to induce the rapid activation of kinase signaling cascades . In addition to having a high degree in the protein interaction network of insulin signaling, AR was also found to have the highest participation in the linear paths from INSR to GLUT4 (Table 3). This is one of the promising results of this study indicating critical nodes in the insulin signaling governing glucose transportation. Conclusions There is a growing need for a comprehensive protein-protein interaction network of insulin signaling, especially covering its part on glucose metabolism with the aim to solve the type 2 diabetes problem. Here, we integrated GO annotations and interactome data for the reconstruction of a protein interaction network of insulin signalling, considering the relevance of the proteins as well as their interactions. Starting with 30 insulin signalling-related proteins, the proposed method resulted in an interaction network of 3056 proteins and 10,401 protein-protein interactions for human insulin signaling. The linear paths transducing the signal from the insulin receptor to the glucose transporter protein include 498 proteins with 2887 physical interactions and constitute the network of signaling for glucose transportation. The key components of the reconstructed network were identified as bottlenecks and hubs since they are crucial for the signal processing being central to many signaling paths and having many neighboring proteins, respectively. The mechanisms around these components, for example, directed interactions, activation, or inhibition effects in the reconstructed insulin signaling network, are potential targets for further analyses to gain insight on causes and results of type 2 diabetes. Additionally, DAXX protein requires special care being the unique protein that connects the flowing information to the GLUT4 protein. Finally, other putative insulin signaling proteins having interaction with GLUT4 should be searched to obtain a robust network. This large-scale protein-protein interaction network allows us to consider any signaling node within its global working mechanism which is required by the holism perspective of systems biology approach.
/** * Store information about read of given table. * * @param tableName read table */ public void addReadTable(String tableName) { if (!modifiedTables.contains(tableName.toUpperCase())) { readTables.add(tableName.toUpperCase()); } }
<filename>src/game_state/game_state.rs #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use std::collections::hash_map::DefaultHasher; use std::fs::File; use std::hash::{Hash, Hasher}; use std::io::Read; use std::path::Path; use crate::{Cascades, Foundations, Freecells}; pub type GameStateId = u64; /// Represents the state of the board at one point during the course of a game. /// /// The game state is defined by the cards that are in each of the three parts of the board, the /// cascades, the foundations and the freecells. /// /// A game state is valid if these conditions hold: /// - Each of the 52 cards exists exactly once /// - Each card on a foundation is of the correct suit /// - The cards on each foundation are ordered correctly /// /// # Examples /// /// ``` /// // TODO [v1] Add code examples (once FromStr is implemented for GameState) /// ``` #[derive(Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct GameState { #[allow(missing_docs)] pub cascades: Cascades, #[allow(missing_docs)] pub foundations: Foundations, #[allow(missing_docs)] pub freecells: Freecells, } impl GameState { /// Parses the contents of a file into a GameState. /// /// This is equivalent to reading the contents of the file as `&str` and using that as input /// for `GameState::from_str` // TODO consider borrowing the file name: `file_name: &P` it doesn't really need to be consumed here, right? pub fn from_file<P: AsRef<Path>>(file_name: P) -> Result<GameState, String> { let mut file = match File::open(file_name) { Ok(file) => file, Err(_) => return Err("File could not be read".to_string()), }; let mut contents = String::new(); if file.read_to_string(&mut contents).is_err() { return Err("File contents could not be read".to_string()) } contents.parse() } /// Returns `true` if all cards are on the foundations, `false` otherwise. /// /// # Examples /// /// ``` /// # use freecell::GameState; /// assert!("foundations: KC KS KH KD".parse::<GameState>().unwrap().is_solved()); /// assert!( /// !" /// foundations: KC KS KH QD /// cascade: KD /// ".parse::<GameState>().unwrap().is_solved() /// ); /// ``` pub fn is_solved(&self) -> bool { self.foundations.0[0].len() == 13 && self.foundations.0[1].len() == 13 && self.foundations.0[2].len() == 13 && self.foundations.0[3].len() == 13 } // TODO [v1] document pub fn id(&self) -> GameStateId { let mut hasher = DefaultHasher::new(); self.hash(&mut hasher); hasher.finish() } }
// String converts planResults to a string func (p *planResults) String() string { s := "" for _, r := range p.ranges { if r.firstPort == r.lastPort { s += fmt.Sprintf("%d\t", r.firstPort) } else { s += fmt.Sprintf("%d:%d\t", r.firstPort, r.lastPort) } switch r.result { case planResultPass: s += fmt.Sprintf("pass\n") case planResultReject: s += fmt.Sprintf("reject\n") case planResultDrop: s += fmt.Sprintf("drop\n") } } return s }
Here's a trivia question: What state has installed more solar panels than any other except California? If you said New Jersey, your solar-energy IQ is brilliant. And if you know why several thousand huge mirrors are splayed across the Mojave Desert in California, consider yourself a solar energy genius, CBS News correspondent Tony Guida reports. "Every single day when the Sun comes up, we are bathed with enough energy in one day to supply all the power we need for five years across the globe," Stephen Smith, executive director of the Southern Alliance for Clean Energy, said. Lyle Rawlings has been making a living off that fact for 25 years - he's known as the godfather of solar power in New Jersey. His company's installation of solar panels on a warehouse roof will provide 90 percent of the facility's power. But technology doesn't come cheap. "Solar power is more expensive than fossil fuel power so our business actually depends on government subsidies," says Rawlings. But so do the oil business … and the coal business … and the nuclear industry too. The outlook for solar power, though, is brighter than any of those established technologies. General Electric estimates its solar energy division will top $1 billion in annual revenue in three years. JPMorgan Chase and Wells Fargo are investors in the biggest solar plant built in a generation. "Solar is the one technology that is decreasing in price, increasing in opportunity and it can be the key part of the solution if we make the right investments today," says Smith. Which brings us back to those mirrors in the Mojave Desert. No less a cutting-edge giant than Google is heavily invested in similar technology for turning sunlight into power. They're betting, just as New Jersey's godfather of solar, that this country will build a vibrant solar industry before oil and coal run out. "For us in the solar industry, the holy grail is the point at which the cost of solar power is about equal to the cost of fossil fuel," says Rawlings. Experts say that should happen in seven to 10 years. And by 2025, one report concludes, solar power can provide 10 percent of the nation's power needs. But that all depends on the government - the sun sets on tax credits for solar energy later this year.
import sys import logging def setup_logging(): # Setup logging format logfmt = ( "[%(asctime)s] %(levelname)s - %(name)s (%(filename)s:%(lineno)d): %(message)s" ) datefmt = "%d/%m/%Y %H:%M:%S" # Setup log levels by module # asyncio logging.getLogger("asyncio").setLevel(logging.WARN) # discord.py logging.getLogger("discord.client").setLevel(logging.WARN) logging.getLogger("discord.gateway").setLevel(logging.WARN) logging.getLogger("discord.http").setLevel(logging.WARN) logging.getLogger("discord.state").setLevel(logging.WARN) # aiosqlite logging.getLogger("aiosqlite").setLevel(logging.INFO) # Stdout output stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.DEBUG) stdout_handler.setFormatter(logging.Formatter(fmt=logfmt, datefmt=datefmt)) # File output file_handler = logging.handlers.TimedRotatingFileHandler( filename="bot.log", when="D", backupCount=7 ) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter(fmt=logfmt, datefmt=datefmt)) logging.basicConfig( level=logging.DEBUG, encoding="utf-8", format=logfmt, datefmt=datefmt, handlers=[stdout_handler, file_handler], )
The British Beer and Pub Association (BBPA) has revealed that 893 pubs were forced to call last orders for good in 2009. The statistics show that key services are disappearing from village life "at an alarming rate", according to the National Housing Federation. About 400 village shops closed in 2008 while rural schools shut down at the rate of one a month in England between 1997 and 2008, said the organisation - which represents England's housing associations. The National Housing Federation said the closures reflected declining demand for services in villages where local families had been priced out of the area by wealthy commuters, pensioners and second-home owners, while too few new homes having been built. David Orr, the Federation's chief executive, said: "The cornerstones of traditional village life, such as the local school, the shop and the pub, are disappearing from the rural landscape at an alarming rate. "Rural towns and villages need to have mixed, working communities, otherwise there is a very real danger our countryside will become little more than a theme park for weekenders. "While there are a range of issues at play here, affordable housing lies at the centre of the battle to save traditional village life. "Unless we build more affordable homes for local people, they will continue to be priced out of rural areas and services they support will vanish with them." Brigid Simmonds, BBPA chief executive, added: "Along with local shops, post offices and schools, village pubs are pivotal to the life of local communities across Britain. "Pubs act as much more than a social venue. They are a focal point for sports teams, local groups and meetings. In addition they provide a range of community services like post offices and shops. "We need a climate that allows these community businesses to thrive."
Clinical Reasoning in Musculoskeletal Practice: Students’ Conceptualizations Background: Qualitative research on physical therapist students’ conceptualizations of clinical reasoning (CR) is sparse. Objectives: The purpose of this study was to explore CR from students’ perspectives. Design: For this study, a qualitative, cross-sectional design was used. Methods: Thirty-one students were randomly selected from years 2, 3, and 4 of an undergraduate physical therapist program in New Zealand. Students were interviewed about their understanding of CR and how they used it in practice in a recent musculoskeletal placement. Interviews were recorded and transcribed verbatim. A 3-stage analysis included the categorization of students’ conceptualizations on the basis of the meaning and the structure of each experience and the identification of cross-category themes. Results: Five qualitatively different categories were identified: A—applying knowledge and experience to the problem, patient, or situation; B—analyzing and reanalyzing to deduce the problem and treatment; C—rationalizing or justifying what and why; D—combining knowledge to reach a conclusion; and E—problem solving and pattern building. Cross-category analysis revealed 5 general themes: forms of CR, spatiotemporal aspects, the degree of focus on the patient, attributions of confidence, and the role of clinical experience. Conclusions: Categories formed a continuum of CR from less to more sophistication and complexity. Students were distributed evenly across categories, except for category E, which included only students from years 3 and 4. Each category comprised a logical, coherent experiential field. The general themes as critical dimensions suggest a new way of exploring CR and suggest a possible pathway of development, but further research is required. These findings have implications for teaching and the development of physical therapy curricula.
// CountCommits returns the number of commits in the OpenBuildService // repository saved at path for email. // It returns ErrNoChangesFileFound error in case it couldnt locate any // .changes file And forwards other errors that might occur. func CountCommits(path string, email string) (count int, err error) { var changesFiles []string err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { if info.IsDir() && info.Name() == ".osc" { return filepath.SkipDir } if strings.HasSuffix(path, ".changes") { changesFiles = append(changesFiles, path) } return err }) if err != nil && err != filepath.SkipDir { return } if len(changesFiles) == 0 { err = ErrNoChangesFileFound return } for i := 0; i < len(changesFiles); i++ { changesFile := changesFiles[i] var changes []byte changes, err = ioutil.ReadFile(changesFile) if err != nil { return } count += strings.Count(string(changes), email) } return count, nil }
/** * Synchronous call from the RIL to us to return current radio state. * RADIO_STATE_UNAVAILABLE should be the initial state. */ static RIL_RadioState currentState() { return sState; }
/** * check local version * * @param expectedVersion * @return */ public Tuple<Boolean, Long> checkLocalCache(long expectedVersion) { final AtomicLong version = new AtomicLong(0); try { retryer.call( () -> { clientManagerAddressRepository.wakeup(); fetchClientOffAddressService.wakeup(); version.set(fetchClientOffAddressService.lastLoadVersion()); return version.get() >= expectedVersion; }); return new Tuple(true, version.get()); } catch (Throwable t) { LOGGER.error( "[checkLocalCache]client manager check version, excepted: {}, actual: {} error.", expectedVersion, version.get(), t); return new Tuple(false, version.get()); } }
import express from "express"; import { ImageUpload, PDFUpload, VideoUpload } from "./../../middlewares/fileStorage"; import { isAuth } from "./../../middlewares/isAuth"; import { uploadMultipleImgs, uploadMultipleVids, uploadSingleImg, uploadSinglePdf, uploadSingleVid } from "./controller"; const router = express.Router(); router.post( "/image/single", isAuth, ImageUpload.single("image"), uploadSingleImg ); router.post( "/image/multiple", isAuth, ImageUpload.array("images"), uploadMultipleImgs ); router.post( "/video/single", isAuth, VideoUpload.single("video"), uploadSingleVid ); router.post( "/video/multiple", isAuth, VideoUpload.array("vidoes"), uploadMultipleVids ); router.post("/pdf/single", isAuth, PDFUpload.array("pdf"), uploadSinglePdf); export default router;
/// A bridge that forwards all requests from certain TCP port to gpg-agent on Windows. /// /// `to_path` should point to the path of gnupg UDS. pub async fn bridge(from_addr: String, to_path: Option<String>) -> io::Result<()> { // Attempt to setup gpg-agent if it's not up yet. let _ = ping_gpg_agent().await; let mut listener = TcpListener::bind(&from_addr).await?; let meta = Arc::new(Mutex::new(AgentMeta { path: to_path, args: None, })); loop { let (socket, _) = listener.accept().await?; let meta = meta.clone(); let (port, nounce) = { let mut m = meta.lock().unwrap(); if m.args.is_none() { if m.path.is_none() { m.path = Some(load_gpg_extra_socket_path().await?); } m.args = Some(load_port_nounce(m.path.as_ref().unwrap()).await?); } m.args.unwrap() }; tokio::spawn(async move { if let Err(e) = delegate(socket, port, nounce).await { error!("failed to delegate tcp: {:?}", e); meta.lock().unwrap().args.take(); } }); } }
def bounds_args(self): min_lat = self.lat_origin - ((self.nlats -1) * self['cellsize']) max_lon = self.lon_origin + ((self.nlons-1) * self['cellsize']) return (self.lat_origin,self.lon_origin,min_lat,max_lon)
package org.gradle.profiler.jfr; import org.gradle.profiler.InstrumentingProfiler; import org.gradle.profiler.JvmArgsCalculator; import org.gradle.profiler.ScenarioSettings; import java.io.File; import java.util.function.Consumer; public class JfrProfiler extends InstrumentingProfiler { private final JFRArgs jfrArgs; JfrProfiler(JFRArgs jfrArgs) { this.jfrArgs = jfrArgs; } @Override public String toString() { return "JFR"; } @Override public void summarizeResultFile(File resultFile, Consumer<String> consumer) { if (resultFile.getName().endsWith(".jfr")) { consumer.accept("JFR recording: " + resultFile.getAbsolutePath()); } else if (resultFile.getName().endsWith(".jfr-flamegraphs")) { consumer.accept("JFR Flame Graphs: " + resultFile.getAbsolutePath()); } } @Override protected SnapshotCapturingProfilerController doNewController(ScenarioSettings settings) { return new JFRControl(jfrArgs, settings.computeJfrProfilerOutputLocation()); } @Override protected JvmArgsCalculator jvmArgsWithInstrumentation(ScenarioSettings settings, boolean startRecordingOnProcessStart, boolean captureSnapshotOnProcessExit) { File jfrFile = settings.computeJfrProfilerOutputLocation(); return new JFRJvmArgsCalculator(jfrArgs, startRecordingOnProcessStart, captureSnapshotOnProcessExit, jfrFile); } @Override public void validate(ScenarioSettings settings, Consumer<String> reporter) { validateMultipleIterationsWithCleanupAction(settings, reporter); } @Override protected boolean canRestartRecording(ScenarioSettings settings) { return !settings.getScenario().getInvoker().isReuseDaemon(); } @Override public boolean isCreatesStacksFiles() { return true; } }
<filename>deps.ts export { soxa } from "https://deno.land/x/[email protected]/mod.ts";
// WithUpdateCallback defines the callback called upon recovering a message // from the log. func WithUpdateCallback(cb UpdateCallback) ProcessorOption { return func(o *poptions, gg *GroupGraph) { o.updateCallback = cb } }
def testBuildCpuUsageFilter(self): instances_filter = gcp_mocks.FAKE_MONITORING._BuildCpuUsageFilter( ['0000000000000000001', '0000000000000000002']) self.assertEqual( instances_filter, ('metric.type = "compute.googleapis.com/instance/' 'cpu/utilization" AND (resource.label.instance_id = ' '"0000000000000000001" OR resource.label.instance_id = ' '"0000000000000000002")'))
1960 live album by Bob Newhart Professional ratings Review scores Source Rating Allmusic [2] The Button-Down Mind of Bob Newhart is a 1960 live album by comedian Bob Newhart. Recorded at the Tidelands Club in Houston, Texas, the debut album by Newhart was number one on the Billboard pop album chart and won Album of the Year at the 1961 Grammy Awards, where Newhart was named Best New Artist. It was the first comedy album to win Album of the Year and the only time a comedian had won Best New Artist. Newhart wanted the title to be The Most Celebrated New Comedian Since Attila the Hun, but Warner Bros. executives created the album's title and Newhart had to settle for his idea as a subtitle.[3] The album was a 2006 entry into the Library of Congress' National Recording Registry.[4] On release, the album debuted atop the Billboard 200, staying at number one for 14 weeks, and on the chart for two years, selling over 600,000 copies near release. Eventually the album went on to be the 20th best-selling album of all time on the Billboard charts.[5] In popular culture [ edit ] The album that Pete Campbell listens to in Mad Men Season 1, Episode 4, "New Amsterdam". In the pilot episode of The Marvelous Mrs. Maisel, Joel Maisel performs routines from the album at a cafe, passing them off as his own work. This is anachronistic, as the episode is set in 1958, two years before the album was released. Track listing [ edit ] Charts [ edit ] Year Chart Position 1961 Billboard Pop Albums (Billboard 200) (mono)[ citation needed ] 1 References [ edit ]
/** * Created by lon on 12/3/16. */ import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import {FormsModule, ReactiveFormsModule} from '@angular/forms'; import {EqualValidatorDirective} from '../services/equal-validator.directive'; import {FrontRoutingModule} from './front-routing.module'; import {HomeComponent} from './home/home.component'; import {LoginComponent} from './login/login.component'; import {RegisterComponent} from './register/register.component'; import {GlobalModule} from '../global/global.module'; import {FrontComponent} from './front.component'; import {EventComponent} from "./event/event.component"; @NgModule({ imports: [ CommonModule, FormsModule, ReactiveFormsModule, FrontRoutingModule, GlobalModule ], declarations: [ FrontComponent, HomeComponent, LoginComponent, RegisterComponent, EqualValidatorDirective, EventComponent ], providers: [ ], exports: [ ] }) export class FrontModule {}
package br.com.splessons.lesson12.security.service; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.AuthenticationException; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.web.authentication.WebAuthenticationDetailsSource; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import javax.servlet.http.HttpServletRequest; @Component public class AuthenticationResolver { @Autowired private CustomUserDetailsService customUserDetailsService; @Transactional public Authentication getAuthentication(HttpServletRequest request, Long userId) throws AuthenticationException { UserDetails userDetails = customUserDetailsService.loadUserById(userId); UsernamePasswordAuthenticationToken authentication = new UsernamePasswordAuthenticationToken(userDetails, userDetails.getUsername(), userDetails.getAuthorities()); authentication.setDetails(new WebAuthenticationDetailsSource().buildDetails(request)); return authentication; } }
/** * Test of getInssueDate method, of class Invoice. */ @Test public void testGetIssueDate() { String expected = "30/12/2019"; invoice.setIssueDate(expected); String obtained = invoice.getIssueDate(); assertEquals(expected, obtained); }
// FetchTxByShaList returns the most recent tx of the name fully spent or not func (db *LevelDb) FetchTxByShaList(txShaList []*btcwire.ShaHash) []*btcdb.TxListReply { db.dbLock.Lock() defer db.dbLock.Unlock() replies := make([]*btcdb.TxListReply, len(txShaList)) for i, txsha := range txShaList { tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha) btxspent := []bool{} if err == nil { btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut)) for idx := range tx.TxOut { byteidx := idx / 8 byteoff := uint(idx % 8) btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0 } } if err == btcdb.ErrTxShaMissing { sTxList, fSerr := db.getTxFullySpent(txsha) if fSerr == nil && len(sTxList) != 0 { idx := len(sTxList) - 1 stx := sTxList[idx] tx, blockSha, _, _, err = db.fetchTxDataByLoc( stx.blkHeight, stx.txoff, stx.txlen, []byte{}) if err == nil { btxspent = make([]bool, len(tx.TxOut)) for i := range btxspent { btxspent[i] = true } } } } txlre := btcdb.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err} replies[i] = &txlre } return replies }
/** * In addition to the normal base implementation, the <code>GameLoop</code> performs registered action at the required * time and tracks some detailed metrics. */ @Override protected void process() { if (this.getTimeScale() > 0) { super.process(); this.executeTimedActions(); } this.trackUpdateRate(); }
/** * Pax Exam test wrapper which starts (and stops) an etcd test server via * launcher. We need to do this because jetcd-launcher uses Testcontainers, * which cannot run inside OSGi (due to the use of TCL loadClass(String) in * DockerClientProviderStrategy). And even if we were to use the launcher only * in the config() method (which "is executed before the OSGi container is * launched, so it does run in plain Java"), the Pax Exam probe still needs to * load the entire test class and all of its references (launcher with * Testcontainers) into OSGi, which is a PITA. There is also no easy way to stop * the testcontainer after. It is therefore simplest to just launch the etcd * server before getting Pax Exam's world, like this does. * * @author Michael Vorburger.ch */ public class PaxExamWrapperTest { private static final String ETCD_ENDPOINT_SYSTEM_PROPERTY_NAME = "etcd.endpoint"; private static EtcdCluster cluster; @BeforeClass public static void beforeClass() { cluster = EtcdClusterFactory.buildCluster("karaf", 1, false); cluster.start(); } @AfterClass public static void afterClass() { if (cluster != null) { cluster.close(); } } @Test public void testClientServiceChecks() throws Throwable { URI endpoint = cluster.getClientEndpoints().get(0); System.setProperty(ETCD_ENDPOINT_SYSTEM_PROPERTY_NAME, endpoint.toString()); Optional<Failure> failure = JUnitCore.runClasses(ClientServiceChecks.class).getFailures().stream().findFirst(); if (failure.isPresent()) { throw failure.get().getException(); } } static String getClientEndpoints() { return System.getProperty(ETCD_ENDPOINT_SYSTEM_PROPERTY_NAME); } }
<reponame>TheNexusCity/thoth /* eslint-disable no-console */ // eslint-disable-next-line @typescript-eslint/ban-ts-comment //@ts-nocheck import TopicDetection from 'topic-detection' let detector: TopicDetection export async function classifyText(input: any) { if (!detector || detector === undefined) { await initClassifier() } const scores = detector.topics(input) let higher = -1 let _prop = '' for (let prop in scores) { if (Object.prototype.hasOwnProperty.call(scores, prop)) { if (scores[prop] > higher) { higher = scores[prop] _prop = prop } } } if (higher > 0 && _prop !== '') { return _prop.charAt(0).toUpperCase() + _prop.slice(1) } else { return '' } } export async function initClassifier() { if (detector) { return } detector = new TopicDetection() }
Dr Pavel Kravchenko holds a PhD in technical sciences and is the founder of Distributed Lab. In this opinion piece, the first of a two-part series, Kravchenko argues that tokenization of assets using blockchains will have more profound effects on the world’s markets than simply reducing back-office record-keeping costs. Would you swallow a random pill that you saw on the counter in the pharmacy? Of course not. You don’t know anything about it! But what if this pill came in a package with details from the manufacturer? And you had a prescription from your doctor? Further, what if you could independently test the pill’s chemical composition and make sure it matches the label and prescription? Or (flash forward to 2049), suppose you could verify that the chemical composition of the tablet is suitable for your DNA and confirmed by clinical studies. Would that pill be more valuable to you? Undeniably. Its value increases depending on how much reliable information about it you have, even though the properties of the pill did not change. Today, financial assets are too much like that loose pill on the counter. You don’t know enough about where it’s been, what’s in it, or what it will do to you. But the process we call tokenization is going to make many assets a lot more attractive to a lot more investors, in part by providing an unprecedented level of information. Why crypto took off Stepping back, let’s consider the legacy financial system. Quite apart from information asymmetry, there are other forms of friction that discourage investment. Even though we tend to think the global financial markets are as liquid as possible, that is only actually true for people and organizations already in the “system” – i.e. brokers and financial institutions. The end client is forced to go through all the levels of hell in the form of know-your-customer (KYC) and compliance checks at each and every opening of an account, signing of contracts, paying of commissions, etc. This also applies to investments into growing enterprises, access to which is only granted to accredited investors. Strict regulation of the market for end users has led to demand for alternatives, which has unexpectedly let off steam through the cryptocurrency market. As soon as people started to believe that this market let them not only enter, but also withdraw freely, liquidity surged, cryptocurrency grew by factors of 10, and the number of initial coin offerings (ICOs) rose by more than hundreds per month. Despite the hype and inevitable disappointment in investing in totally unregulated assets (where the level of fraud constitutes 90 percent, according to the People’s Bank of China), it is clear that the democratization of trade leads to a sharp increase in the attractiveness of assets. Every business or nation would, or should, like this to happen in its economy. Barriers to exchange As someone who, for a couple of years, was involved in the equities market, I can say red tape is the main reason why a client can change their mind about opening an account. A secondary issue is by a low usability of trading software – it is necessary either to study up or to entrust the work to a third party. More fundamental problems – such as the need for trust in intermediaries, poor infrastructure integration, and the speed of settlements – are in third place. Indirectly, tokenization has created a fashion for extremely simple, convenient systems, where within 20 minutes you can get money on the exchange, trade, and withdraw capital. Of course, there is a risk that it will never be possible to withdraw money, but it is sometimes easier to accept such risk than the infinite dragging-on of undergoing compliance procedures. The age of tokenization One way or another, a term appeared in the blockchain space that had been coined, as it were, in the security management process. Balances of accounts on blockchains began to be called “tokens,” due to the fact that they were items to be simply and safely transmitted. In essence, tokenization is the process of transforming the storage and management of an asset, when each asset is assigned a digital counterpart. Ideally, everything that happens in a digital accounting system should have legal implications, just as changes in a real estate register lead to a change in ownership of land. The age of tokenization introduces the important innovation that assets are managed directly by the owner instead of managing assets through issuing orders to a middleman. The difference in approaches is easily explained by the example of the difference between the banking system and bitcoin. With a bank account, the client sends an instruction to a bank where it is executed by someone, and the client identifies themselves through their login and password. In the case of bitcoin, the transaction initiator uses their digital signature, which in itself is a sufficient condition for the transaction to be executed. Nothing prevents the use of the same mechanism for traditional asset management. Certainly, this will require a change in infrastructure, but will bring many benefits. It will reduce costs, and increase the speed and security of trades. Every trading infrastructure includes a depository, an exchange, a clearing house and client software. Tokenization assumes that all these components will be far more integrated. And blockchain technology will allow decentralizing the entire infrastructure, distributing the storage and processing functions between all the parties involved. This decentralization will make the system more resilient, since there will be no single point of failure; it will reduce the need for trust in a central provider; and it will allow instant audits, since multiple parties have real-time access to the ledger. Unexpected results In addition to the most obvious benefits from the transition to a digital domain – increased speed, security and convenience of operations, as well as less need for intermediaries – tokenization allows unexpected results. Among them is the addition of properties of assets that are not initially inherent: the ability to prove the history of ownership, the opportunity to divide assets into the smallest fractions (bitcoin, for example, is divisible to the eighth decimal), and the ability to integrate principles of management into the asset itself. For example, suppose there are several partners in a real estate development who need to vote on a proposed renovation. With a wallet that holds their tokenized property, they can take the vote more efficiently, without having to meet face-to-face or trust a proxy to represent their wishes. All these things will make a tokenized asset more valuable than a non-tokenized asset with the same fundamentals, just as easy access to reliable information about a pill would give you more confidence to take it. In the next article of this series: What stands in the way of tokenization. Pills image via Shutterstock
<reponame>Chise1/fast-tmp2<filename>{{cookiecutter.project_slug}}/main.py # -*- encoding: utf-8 -*- """ @File : main.py @Time : 2021/2/26 9:26 @Author : chise @Email : <EMAIL> @Software: PyCharm @info : """ if __name__ == '__main__': print("Hello CookieCuttter")
// Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below. func (o ClusterMasterInstanceFleetInstanceTypeConfigOutput) EbsConfigs() ClusterMasterInstanceFleetInstanceTypeConfigEbsConfigArrayOutput { return o.ApplyT(func(v ClusterMasterInstanceFleetInstanceTypeConfig) []ClusterMasterInstanceFleetInstanceTypeConfigEbsConfig { return v.EbsConfigs }).(ClusterMasterInstanceFleetInstanceTypeConfigEbsConfigArrayOutput) }
import sys import os.path import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import matplotlib.pyplot as plt import pylab __mnist__ = input_data.read_data_sets("MNIST_data/", one_hot=True) def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bais_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def main(): """ Main """ one_conv_deep_mnist() #simple_mnist() def one_conv_deep_mnist(): x = tf.placeholder(dtype=tf.float32, shape=[None, 784]) _y = tf.placeholder(dtype=tf.float32, shape=[None, 10]) # 1. First convolution layer W_conv1 = weight_variable([5,5,1,32]) b_conv1 = bais_variable([32]) x_img = tf.reshape(x, [-1, 28, 28, 1]) h_conv1 = tf.nn.relu(conv2d(x_img, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # 2. Fully connected layer W_fc1 = weight_variable([14*14*32, 1024]) b_fc1 = bais_variable([1024]) # Reshape 2d image to flat for input in fully connected layer h_pool1_flat = tf.reshape(h_pool1, [-1, 14*14*32]) # output of fully connected layer h_fc1 = tf.nn.relu(tf.matmul(h_pool1_flat, W_fc1) + b_fc1) # 3. Softmax layer W_softmax = weight_variable([1024, 10]) b_softmax = bais_variable([10]) y_conv = tf.nn.relu(tf.matmul(h_fc1, W_softmax) + b_softmax) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=_y, logits=y_conv)) train_fn = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) result = tf.equal(tf.argmax(_y, axis=1), tf.argmax(y_conv, axis=1)) calculate_accuracy = tf.reduce_mean(tf.cast(result, tf.float32)) sess = tf.InteractiveSession() tf.global_variables_initializer().run() saver = tf.train.Saver() file_path = "deep_model/test.model" if os.path.isfile(file_path+".meta"): # Restore model saver.restore(sess, file_path) else: costs = [] for i in range(10000): batch = __mnist__.train.next_batch(100) if i%100 == 0: train_accuracy = sess.run(calculate_accuracy, feed_dict={x: batch[0], _y: batch[1]}) print "\nTrain accuracy: {0}".format(train_accuracy) _, temp_cost = sess.run([train_fn, cross_entropy], feed_dict={x: batch[0], _y: batch[1]}) costs.append(temp_cost) sys.stderr.write('\rEpoch: %d/%d' % (i+1, 7000)) sys.stderr.flush() # Save model save_path = saver.save(sess, file_path) print "\nSaved at {0}".format(save_path) #x1 = __mnist__.train.images[0].reshape(1,784) #y1 = __mnist__.train.labels[0].reshape(1,10) #test_accuracy = sess.run(calculate_accuracy, feed_dict={x: x1, _y: y1}) test_accuracy = sess.run(calculate_accuracy, feed_dict={x: __mnist__.test.images, _y: __mnist__.test.labels}) print "\nTest accuracy: {0}".format(test_accuracy) #print "\nLabel: {0}".format(np.argmax(y1)) #pylab.imshow(x1.reshape(28,28)) #pylab.show() def simple_mnist(): x = tf.placeholder(dtype=tf.float32, shape=[None, 784]) y_ = tf.placeholder(dtype=tf.float32, shape=[None, 10]) w = weight_variable([784,10]) b = bais_variable([10]) y = tf.matmul(x, w) + b cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_fn = tf.train.GradientDescentOptimizer(0.5).minimize(cost) sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Add an op to save and restore model saver = tf.train.Saver() file_path = "tmp/test.model" if os.path.isfile(file_path+".meta"): saver.restore(sess, file_path) else: costs = [] for _ in range(2000): batch_xs, batch_ys = __mnist__.train.next_batch(100) _, temp_cost = sess.run([train_fn, cost], feed_dict={x: batch_xs, y_: batch_ys}) costs.append(temp_cost) save_path = saver.save(sess, file_path) print "Saved at {0}".format(save_path) result = tf.equal(tf.argmax(y, axis=1), tf.argmax(y_, axis=1)) calculate_accuracy = tf.reduce_mean(tf.cast(result, tf.float32)) accuracy = sess.run(calculate_accuracy, feed_dict={x: __mnist__.train.images, y_: __mnist__.train.labels}) print accuracy #plt.plot(costs, '-') #plt.show() if __name__ == "__main__": main()
import { NodeDef } from "node-red"; declare namespace statusChart { interface appConfigBase { confsel: string; item: string; label: string; params: any; group: any; templateScope: string; width: number; height: number; tab: string; order: any; fwdInMessages?: any; storeOutMessages?: any; } type graphObject = { statusColor: string; statusValue: string; statusLabel: string; } interface statusColorListDef { [key: string]: string; } type graphDataObject = { datetime: Date; value: number | string; widthRatio: number; statusColor: string; label: string; } interface makeMsgBase { [key: string]: any; templateScope?: string; template?: string; graphObject?: graphObject[]; graphData?: graphDataObject[]; } interface nodeRedMsgBase { [key: string]: any; _msgid?: string; payload?: any; topic?: string; } interface nodeConf extends NodeDef, appConfigBase {} interface graphDataDef extends graphDataObject {} interface makeMegDef extends makeMsgBase, nodeRedMsgBase {} interface inputNodeMsgDef extends nodeRedMsgBase, graphDataObject {} } export default statusChart;