content
stringlengths
10
4.9M
#include<stdio.h> #include<string.h> #include<math.h> #include<stdlib.h> #include<time.h> #define MAXN 10010 int num[MAXN],a[MAXN]; int n; int work(int l,int r,int x) { int val=a[x],tot=0; int i; memset(num,0,sizeof(num)); for (i=l;i<=r;i++) { if (a[i]<val) { tot++; } } if (l+tot==x) { return 1; } else { return 0; } } int main() { int m,i; scanf("%d%d",&n,&m); for (i=1;i<=n;i++) { scanf("%d",&a[i]); } for (i=1;i<=m;i++) { int l,r,x; scanf("%d%d%d",&l,&r,&x); if (l==r) { printf("Yes\n"); } else if (work(l,r,x)==1) { printf("Yes\n"); } else { printf("No\n"); } } return 0; }
/** * This class is intended to notify observers of the existence native instances of * xwalk_content. It receives a callback when native xwalk_content are created or * destroyed. Observers are notified when the first instance is created or the * last instance is destroyed. */ @JNINamespace("xwalk") public class XWalkContentLifecycleNotifier { /** * Observer interface to be implemented by deriving xwalk lifecycle observers. */ public static interface Observer { public void onFirstXWalkViewCreated(); public void onLastXWalkViewDestroyed(); } private static final ObserverList<Observer> sLifecycleObservers = new ObserverList<Observer>(); private static int sNumXWalkViews = 0; private XWalkContentLifecycleNotifier() {} public static void addObserver(Observer observer) { sLifecycleObservers.addObserver(observer); } public static void removeObserver(Observer observer) { sLifecycleObservers.removeObserver(observer); } public static boolean hasXWalkViewInstances() { return sNumXWalkViews > 0; } // Called on UI thread. @CalledByNative private static void onXWalkViewCreated() { ThreadUtils.assertOnUiThread(); assert sNumXWalkViews >= 0; sNumXWalkViews++; if (sNumXWalkViews == 1) { // first XWalkView created, notify observers. for (Observer observer : sLifecycleObservers) { observer.onFirstXWalkViewCreated(); } } } // Called on UI thread. @CalledByNative private static void onXWalkViewDestroyed() { ThreadUtils.assertOnUiThread(); assert sNumXWalkViews > 0; sNumXWalkViews--; if (sNumXWalkViews == 0) { // last XWalkView destroyed, notify observers. for (Observer observer : sLifecycleObservers) { observer.onLastXWalkViewDestroyed(); } } } }
n=int(input()) l=list(map(int,input().rstrip().split())) def thanos(l): j=list(l) j.sort() if j==l: return len(j) p=len(j)//2 k1=l[:p] k2=l[p:] return max(thanos(k1),thanos(k2)) print(thanos(l))
#include <stdio.h> int main(void) { // your code goes here int r1,r2,c1,c2; scanf("%d %d %d %d",&r1,&c1,&r2,&c2); int ans1,ans2,ans3; if(r1==r2||c1==c2) { ans1=1; } else { ans1=2; } if(((r1+c1)%2==0&&(r2+c2)%2==0)||((r1+c1)%2==1&&(r2+c2)%2==1)) { if(((r1+c1)==(r2+c2))||((r1-c1)==(r2-c2))) { ans2=1; } else { ans2=2; } } else { ans2=0; } int rdiff = r1-r2; int cdiff = c1-c2; if(rdiff<0) rdiff = -rdiff; if(cdiff<0) cdiff = -cdiff; if(rdiff>=cdiff) { ans3 = rdiff; } else { ans3 = cdiff; } printf("%d %d %d\n",ans1,ans2,ans3); return 0; }
def centering(self, axis=0): _aList = axis_list(axis) _wasCart = self.coordSys == "C" if _wasCart: self.coordSys = "D" _center = self.center _shift = np.array([0.5, 0.5, 0.5], dtype=self._dtype) - _center for i in range(3): ia = i + 1 if ia not in _aList: _shift[i] = 0.0 self.__move_all(_shift) if _wasCart: self.coordSys = "C"
package validator import ( "encoding/json" "errors" "fmt" "io" "log" "strings" ) //Body checks for various possible request body decoding errors func (v *productValidator) Body(err error) error { var syntaxError *json.SyntaxError var unmarshalTypeError *json.UnmarshalTypeError switch { // Catch any syntax errors in the JSON case errors.As(err, &syntaxError): return fmt.Errorf("request body contains badly-formed JSON at position: %d", syntaxError.Offset) // In some circumstances Decode() may also return an // io.ErrUnexpectedEOF error for syntax errors in the JSON case errors.Is(err, io.ErrUnexpectedEOF): return fmt.Errorf("request body contains badly-formed JSON") // Catch any type errors case errors.As(err, &unmarshalTypeError): return fmt.Errorf("invalid value for the %q field at position: %d", unmarshalTypeError.Field, unmarshalTypeError.Offset) // Catch the error caused by extra unexpected fields case strings.HasPrefix(err.Error(), "json: unknown field "): fieldErr := strings.TrimPrefix(err.Error(), "json: ") return errors.New(fieldErr) // An io.EOF error is returned by Decode() if the request body is empty case errors.Is(err, io.EOF): return errors.New("request body must not be empty") // Otherwise log the error default: log.Println(err.Error()) return errors.New("error decoding JSON") //http.StatusInternalServerError) } }
Well, that didn't take long. I had thought that after Attachmate bought Novell it would be keeping its open-source teams working. Indeed, Attachmate CEO Jeff Hawn had told me that, "Business will operate as usual." While Attachmate will be keeping SUSE Linux as a spin-off company, Mono, the open-source implementation of Windows' .NET, is being shut down and there have been hundreds of additional Novell layoffs. So much for business as usual. In a statement, Hawn told me, "We have re-established Nuremburg [Germany] as the headquarters of our SUSE business unit and the prioritization and resourcing of certain development efforts--including Mono--will now be determined by the business unit leaders there. This change led to the release of some US based employees today. As previously stated, all technology road-maps remain intact with resources being added to those in a manner commensurate with customer demand." At this time, I do not know what other development efforts are being put on the back-burner. Nor, do I know if Miguel de Icaza, the founder and driving engine of Mono, has been let go. I've send several requests for comments to him, but I haven't received a reply. De Icaza, who is usually very outspoken, has also not tweeted nor written on his blogs about the fate of Mono and his own future with Novell. My understanding is that all of the Mono team, approximately 30-individuals, have been let go. The Salt Lake City KSL television station reports that, "Novell Inc. laid off hundreds of employees Monday from its Provo office, just days after the company was sold, according to employees." Under Attachmate's rule, Provo was to be Novell's headquarters. The KSL Novell source said, "800 people lost their jobs company-wide, with most of those in Provo." The sales force, human resources, corporate operations and the legal department were particularly hard hit. Even before this, Novell's top brass had been shown the door. If the 800 number is correct, that would be about 25% of the company's worldwide workforce or 50% of its U.S. workers. These firings at a lower-level though came as a shock to the Novell staff. The source continued that in the aftermath of the Novell sale, "We all expected something. Nobody expected what we got." He concluded that Attachmate/Novell plans to drop all but four of its product lines. Mono, since its implementation of a proprietary Microsoft software stack and C# language, has always had its enemies in open-source circles. Frankly, I didn't like some of .NET's licensing ; I wasn't comfortable with how close it brought open-source programming to Microsoft; and, having nothing to with software development politics or licensing, I didn't like that Mono would forever be trying to catch up with its proprietary big-brother .NET. On the other hand, there have been several great programs written in Mono. As far as I'm concerned, Banshee is the best open-source media player out there. I'm not the only one to think that since Canonical decided to make Banshee its new default media player in Ubuntu 11.04. But, without its corporate backer, what happens now to Mono? I really hadn't expected this. Microsoft sponsored Attachmate's purchase of Novell. I had presumed they'd be happy to see Mono keep going. I was wrong. While some people are happy about Mono's demise and the stormy weather ahead for Novell, other open-source figures aren't so sure that the end of Mono is really a good thing. Bradley M. Kuhn, Executive Directory and President of the Software Freedom Conservancy, wrote, that while "I have been critical of Mono … Mono should exist, for at least one important reason: some developers write lots and lots of new code on Microsoft systems in C#. If those developers decide they want to abandon Microsoft platforms tomorrow and switch to GNU/Linux, we don't want them to change their minds and decide to stay with Microsoft merely because GNU/Linux lacks a C# implementation." But will Mono continue? I'm sure it will in one form or another. Open-source software, no matter how encumbered it may be with possible copyright or patent problems, is hard to kill. Will it continue to be an important software development environment is another question entirely. I fear the answer is no. While some programs, such as Banshee and the F-Spot photo manager, will continue on, it's hard for me to see developers choosing to start significant new projects in Mono. As for Novell? Well, I'm just glad its founder, the brilliant and cantankerous Ray Noorda is no longer to see the end of his company. It would have broken his heart. Related Stories: Novell Deals Done Attachmate reveals Novell, SUSE, & Linux Plans Microsoft gets Novell's Patents rights but must share them with Open-Source Software OpenSUSE 11.4: SUSE Linux Revitalized
A Comparative of Neural Network with Metaheuristics for Electricity Consumption Forecast Modelling This paper attempted to study the efficiency of Artificial Neural Network (ANN) with metaheuristic algorithms for electricity consumption modeling in Thailand. The objective was to compare the study between ANN with Backpropagation algorithm (ANN-BP) and ANN combined with different metaheuristic algorithms: ANN with Harmony Search (ANN-HS), ANN with Artificial Bee Colony (ANN-ABC), ANN with Teaching-Learning-Based Optimization (ANN-TLBO) and ANN with Jaya Algorithms (ANN-JA) models. The models selected Population, Gross Domestic Product (GDP), Imports of goods and services, Exports of goods and services as inputs. The experiment results showed that the ANN-TLBO model had optimal efficiency, while ANN-JA was one of the competitive metaheuristic algorithms that could be implemented to modelling for use in future studies.
// DropDatabase do drop database work according to drop index plan func (s *Scope) DropDatabase(ts uint64) error { p, _ := s.Plan.(*plan.DropDatabase) if _, err := p.E.Database(p.Id); err != nil { if p.IfExistFlag { return nil } return err } return p.E.Delete(ts, p.Id) }
<gh_stars>1-10 package org.mirrentools.orion.service; import java.util.Map; import javax.servlet.http.HttpServletResponse; import javax.websocket.Session; import org.mirrentools.orion.common.LoginSession; import org.mirrentools.orion.entity.Project; import org.mirrentools.orion.entity.ProjectApi; import org.mirrentools.orion.entity.ProjectApiGroup; import org.mirrentools.orion.entity.ProjectApiTemplate; import org.mirrentools.orion.entity.ProjectShare; /** * 项目的服务接口 * * @author <a href="https://mirrentools.org">Mirren</a> * */ public interface ProjectService { /** * 获取项目列表 * * @param loginSession 用户的会话 * @return */ Map<String, Object> getProjectList(LoginSession loginSession); /** * 获取项目 * * @param loginSession 用户的会话 * @param id 项目的id * @return */ Map<String, Object> getProject(LoginSession loginSession, String id); /** * 保存项目 * * @param loginSession 用户的会话 * @param project 项目 * @return */ Map<String, Object> saveProject(LoginSession loginSession, Project project); /** * 保存一个项目,这个项目可能包含分组与API * * @param loginSession 用户的会话 * @param json 项目的json * @return */ Map<String, Object> saveProjectfromJson(LoginSession loginSession, String json); /** * 保存一个项目,并推送处理进度给前端WebSocket * * @param loginSession 用户的会话 * @param json 项目 * @param session websocket的会话 */ void saveProjectfromJsonWebSocket(LoginSession loginSession, String json, Session session); /** * 复制一份项目 * * * @param loginSession 用户的会话 * @param key 项目的id * @return */ Map<String, Object> copyProject(LoginSession loginSession, String key); /** * 更新项目 * * @param loginSession 用户的会话 * @param project 项目的信息 * @return */ Map<String, Object> updateProject(LoginSession loginSession, Project project); /** * 项目排序上移 * * @param key 项目的id * @return */ Map<String, Object> projectMoveUp(String key); /** * 项目排序下移 * * @param key 项目的id * @return */ Map<String, Object> projectMoveDown(String key); /** * 删除项目 * * @param loginSession 用户的会话 * @param key 项目的id * @return */ Map<String, Object> deleteProject(LoginSession loginSession, String key); /** * 获取项目分享记录 * * @param loginSession 用户的会话 * @param key 项目的id * @return */ Map<String, Object> findProjectShare(LoginSession loginSession, String key); /** * 创建项目分享 * * @param loginSession 用户的会话 * @param share 项目分享信息 * @return */ Map<String, Object> saveProjectShare(LoginSession loginSession, ProjectShare share); /** * 修改项目分享 * * @param loginSession 用户的会话 * @param share 项目分享信息 * @return */ Map<String, Object> updateProjectShare(LoginSession loginSession, ProjectShare share); /** * 删除项目分享 * * @param loginSession 用户的会话 * @param sid 项目分享信息 * @return */ Map<String, Object> deleteProjectShare(LoginSession loginSession, String sid); /** * 获取指定Project的接口分组 * * @param projectId * @return */ Map<String, Object> getApiGroupList(LoginSession loginSession, String projectId); /** * 获取指定的接口分组 * * * @param groupId 分组的id * @return */ Map<String, Object> getApiGroup(LoginSession loginSession, String groupId); /** * 获取指定Project的接口分组 * * @param loginSession 用户的会话 * @param group 分组的信息 * @return */ Map<String, Object> saveApiGroup(LoginSession loginSession, ProjectApiGroup group); /** * 修改指定Project的接口分组 * * @param loginSession 用户的会话 * @param group 分组的信息 */ Map<String, Object> updateApiGroup(LoginSession loginSession, ProjectApiGroup group); /** * 接口分组上移动 * * @param group * @return */ Map<String, Object> moveUpApiGroup(String id); /** * 接口分组下移动 * * @param group * @return */ Map<String, Object> moveDownApiGroup(String id); /** * 删除指定Project的接口分组 * * @param loginSession 用户的会话 * @param group 分组的id * @return */ Map<String, Object> deleteApiGroup(LoginSession loginSession, String groupId); /** * 新增接口 * * * @param loginSession 用户的会话 * @param api 接口的信息 * @return */ Map<String, Object> saveApi(LoginSession loginSession, ProjectApi api); /** * 通过id获取接口 * * @param groupId 分组的id * @return */ Map<String, Object> findApis(LoginSession loginSession, String groupId); /** * 通过id获取接口 * * @param apiId 接口的id * @return */ Map<String, Object> getApi(LoginSession loginSession, String apiId); /** * 复制指定API * * @param apiId 接口的id * @return */ Map<String, Object> copyApi(LoginSession loginSession, String apiId); /** * 更新接口 * * @param loginSession 用户的会话 * @param api 接口的信息 * @return */ Map<String, Object> updateApi(LoginSession loginSession, ProjectApi api); /** * API排序上移动 * * @param group * @return */ Map<String, Object> moveUpApi(String id); /** * API排序下移动 * * @param group * @return */ Map<String, Object> moveDownApi(String id); /** * 删除指定接口 * * @param loginSession 用户的会话 * @param apiId 接口的id * @return */ Map<String, Object> deleteApi(LoginSession loginSession, String apiId); /*** * 获取API模板的列表,返回模板的id与名称 * * @param loginSession * @return */ Map<String, Object> findApiTemplateList(LoginSession loginSession); /*** * 获取模板详情 * * @param loginSession * @param tid * @return */ Map<String, Object> getApiTemplate(LoginSession loginSession, String tid); /** * 新增模板 * * @param loginSession * @param template * @return */ Map<String, Object> postApiTemplate(LoginSession loginSession, ProjectApiTemplate template); /** * 删除指定模板 * * @param loginSession * @return */ Map<String, Object> deleteApiTemplate(LoginSession loginSession, String tid); /** * 获取分享记录中的项目JSON字符串 * * @param sid 分享的id * @param pwd 查看的密码 * @return */ Map<String, Object> getJsonByShare(String sid, String pwd); /** * 获取项目的JSON字符串 * * @param projectId * @return */ String getJson(LoginSession loginSession, String projectId); /** * 下载JSON文件 * * @param response * @param projectId 项目的id */ void downJson(HttpServletResponse response, LoginSession loginSession, String projectId); }
// *********************************************************************** // Author : the_hunter // Created : 04-01-2020 // // Last Modified By : the_hunter // Last Modified On : 04-01-2020 // *********************************************************************** #include <toml/pch.h> /// <summary> /// </summary> GetStringCommand::GetStringCommand(const cell handle, const char* key, amx::string& string) : handle_(handle), string_(string) { find_value_ = [key](TomlHolder* holder) -> toml_t& { return toml_find(holder, key); }; } /// <summary> /// </summary> GetStringCommand::GetStringCommand(const cell handle, cell index, amx::string& string) : handle_(handle), string_(string) { find_value_ = [index](TomlHolder* holder) -> toml_t& { return toml::find(*holder->toml(), index); }; } /// <summary> /// </summary> cell GetStringCommand::execute() { auto* const holder = Storage::get(handle_); const auto& value = find_value_(holder); switch (value.type()) { case toml::value_t::integer: string_ = std::to_string(value.as_integer()).c_str(); // cppcheck-suppress danglingTemporaryLifetime break; case toml::value_t::boolean: string_ = std::to_string(value.as_boolean()).c_str(); // cppcheck-suppress danglingTemporaryLifetime break; case toml::value_t::floating: string_ = std::to_string(value.as_floating()).c_str(); // cppcheck-suppress danglingTemporaryLifetime break; default: string_ = value.as_string().str.c_str(); // cppcheck-suppress danglingTemporaryLifetime } return string_.length(); // cppcheck-suppress danglingTemporaryLifetime } /// <summary> /// </summary> cell AMX_NATIVE_CALL toml_get_string(Amx* amx, cell* params) { enum Args { Count, Handle, Key, Buffer, BufferSize, DefaultValue, ThrowError }; const auto buf_size = params[BufferSize]; if (!check_string_buffer_size(amx, buf_size, __func__)) { return 0; } const auto handle = params[Handle]; auto* const key = AmxxApi::get_amx_string(amx, params[Key]); auto string = amx::string(amx, params[Buffer], buf_size); GetStringCommand command(handle, key, string); const auto result = execute(command, amx, -1, params[ThrowError], __func__); if (result != -1) { return result; } return AmxxApi::set_amx_string(amx, params[Buffer], AmxxApi::get_amx_string(amx, params[DefaultValue]), buf_size); } /// <summary> /// </summary> cell AMX_NATIVE_CALL toml_array_get_string(Amx* amx, cell* params) { enum Args { Count, Handle, Index, Buffer, BufferSize, DefaultValue, ThrowError }; const auto buf_size = params[BufferSize]; if (!check_string_buffer_size(amx, buf_size, __func__)) { return 0; } const auto handle = params[Handle]; const auto index = params[Index]; auto string = amx::string(amx, params[Buffer], buf_size); GetStringCommand command(handle, index, string); const auto result = execute(command, amx, -1, params[ThrowError], __func__); if (result != -1) { return result; } return AmxxApi::set_amx_string(amx, params[Buffer], AmxxApi::get_amx_string(amx, params[DefaultValue]), buf_size); }
/// Set the permission of a file. /// /// @return `OK` for success, `FAIL` for failure. int os_setperm(const char_u *name, int perm) { uv_fs_t request; int result = uv_fs_chmod(uv_default_loop(), &request, (const char*)name, perm, NULL); uv_fs_req_cleanup(&request); if (result == kLibuvSuccess) { return OK; } return FAIL; }
<reponame>Masterlu1998/kube-viewer package common import "time" const ScrapInterval = time.Second * 1 type KubernetesData interface{} type ScrapperArgs interface { GetNamespaceField() string } type ListScrapperArgs struct { Namespace string } func (l ListScrapperArgs) GetNamespaceField() string { return l.Namespace } type DetailScrapperArgs struct { Namespace string Name string } func (d DetailScrapperArgs) GetNamespaceField() string { return d.Namespace }
package backtest import ( "testing" . "github.com/smartystreets/goconvey/convey" ) func Test_NextIDFunc(t *testing.T) { Convey("测试 NextID", t, func() { nextID := NextIDFunc() for i := 1; i < 10; i++ { So(nextID(), ShouldEqual, i) } }) }
Looking for news you can trust? Subscribe to our free newsletters. A year after her defeat by Donald Trump in the 2016 presidential election, Hillary Clinton says “there are lots of questions about its legitimacy” due to Russian interference and widespread voter suppression efforts. In an interview with Mother Jones in downtown Manhattan, Clinton said Russian meddling in the election “was one of the major contributors to the outcome.” The Russians used “weaponized false information,” she said, in “a very successful disinformation campaign” that “wasn’t just influencing voters—it was determining the outcome.” Republican efforts to make it harder to vote—through measures such as voter ID laws, shortened early voting periods, and new obstacles to registration—likewise “contributed to the outcome,” Clinton said. These moves received far less attention than Russian interference but arguably had a more demonstrable impact on the election result. According to an MIT study, more than 1 million people did not vote in 2016 because they encountered problems registering or at the polls. Clinton lost the election by a total of 78,000 votes in Michigan, Pennsylvania, and Wisconsin. “In a couple of places, most notably Wisconsin, I think it had a dramatic impact on the outcome,” Clinton said of voter suppression. Wisconsin’s new voter ID law required a Wisconsin driver’s license or one of several other types of ID to cast a ballot. It blocked or deterred up to 23,000 people from voting in reliably Democratic Milwaukee and Madison, and potentially 45,000 people statewide, according to a University of Wisconsin study. Clinton lost the state by fewer than 23,000 votes. African Americans, who overwhelmingly supported Clinton, were more than three times as likely as whites not to vote because of the law. “It seems likely that it cost me the election [in Wisconsin] because of the tens of thousands of people who were turned away and the margin being so small,” Clinton said. Clinton says of voter suppression that in at least one state, “It seems likely that it cost me the election.” She noted that this was the first presidential election in more than 50 years without the full protections of the Voting Rights Act after the Supreme Court gutted the law in a 2013 ruling, and 14 states had new voting restrictions in effect for the first time. “So many places have really tried to make it as difficult as they possibly could for young people, for African Americans, the elderly, to vote,” she said. For Clinton and others who question the legitimacy of the election results, particularly due to Russian interference, there’s not an obvious next step. “We don’t have a method for contesting that in our system,” she said. “That’s why I’ve long advocated for an independent commission to get to the bottom of what happened.” On Wednesday, Democrats in Congress introduced articles of impeachment against Trump, but Clinton thinks that’s the wrong approach. “I think we need the investigations to continue,” she said, “and I think that it’s premature.” But Clinton stood by a claim she made during a presidential debate last year, that if Trump were elected president, he would be Putin’s “puppet.” Asked if she still felt that way, Clinton said, “I do.” “I don’t know how the president of the United States, with all of the concerns about the integrity of our elections, could meet with Putin just recently and basically say, ‘Well, you know, he told me again he didn’t do it,’” she said. “I can’t believe that he’s so naïve. I think that he hopes or expects the rest of us to be naïve, or at least the people who support him to be naïve. But this is a serious cyberattack on America.” The impact of Russian interference in the election can be measured in a few tangible ways. Operatives in Russia published about 80,000 Facebook posts that reached 126 million Americans, as Russia-linked Facebook ads targeted swing states like Michigan and Wisconsin. More than 36,000 Russia-linked Twitter accounts generated 1.4 million tweets about the election that had 288 million impressions. The constant drumbeat of stories based on Clinton campaign and Democratic Party emails obtained by Russian-backed hackers is one reason that then-FBI Director James Comey’s 11th-hour letter hurt Clinton so much. Clinton admitted that her campaign had “shortcomings” that contributed to her loss, but she said the stakes of Russian meddling were bigger than just the election result. “If we don’t figure out what they did to us and take adequate steps to prevent it, they’re only going to get better,” she said. “This is the first time we’ve ever been attacked by a foreign adversary and then they suffer no real consequences.” The interview came amid a slew of new allegations of sexual assault against prominent figures in politics and entertainment. Asked why the various sexual assault allegations against Trump haven’t stuck when similar ones have recently ended careers, Clinton said she couldn’t explain it. “I don’t understand a lot about how he got away with so many attacks and insults and behaviors that allowed him to win the presidency,” she said. “I think part of it is because a lot of people really saw him more as an entertainment figure.” She added, “It’s something that people are going to be scratching their heads about a long time.”
/** * Quicksort usually has a running time of n x log(n), but is there an algorithm that can sort even faster? In general, * this is not possible. Most sorting algorithms are comparison sorts, i.e. they sort a list just by comparing * the elements to one another. A comparison sort algorithm cannot beat n x log(n) (worst-case) running time, * since n x log(n) represents the minimum number of comparisons needed to know where to place each element. * * @author Leandro Franchi */ public class CountingSort1 { public static List<Integer> countingSort(List<Integer> arr) { if (arr == null) return new ArrayList<>(); int n = arr.stream().mapToInt(x -> x).max().orElseGet(null); Integer[] resultArray = new Integer[n + 1]; Arrays.fill(resultArray, 0); arr.forEach(i -> resultArray[i]++); return Arrays.asList(resultArray); } }
{-| Module : Name Description : Test suite for Data.Serialization.Yaml Copyright : License : GPL-2 Maintainer : <EMAIL> Stability : experimental Portability : POSIX -} module Test.Data.Serialization.Yaml ( ) where
package api import ( "encoding/json" "github.com/pkg/errors" ) // Payload is a basic structure to encapsulate a generic structure. type Payload struct { Type string `json:"type"` Props json.RawMessage `json:"props,omitempty"` } // Unmarshal basic payload structure. func (payload *Payload) Unmarshal(raw []byte) error { return json.Unmarshal(raw, payload) } // MarshalPayloadEntity basic payload structure with an entity. func MarshalPayloadEntity(typeName string, entity Entity) Payload { props, _ := json.Marshal(entity) return Payload{ Type: typeName, Props: props, } } // Entity returns the appropriate entity as an interface // based on its type. func (payload Payload) Entity() (Entity, error) { if payload.Type == "" { return nil, errors.New("Empty payload") } entityfunc, ok := transform[payload.Type] if !ok { if payload.Type == "metadata" { return nil, errors.New("Cannot create entity from metadata section") } return nil, errors.New("Could not determine a suitable type") } entity, _ := entityfunc() if entity == nil { return nil, errors.New("Could not determine a suitable type") } // If there is a payload present we want to try and unmarshal it if payload.Props != nil { if err := entity.Unmarshal(payload.Props); err != nil { return entity, err } } return entity, nil } // SafeEntity returns the appropriate entity as an interface // based on its type and is at the section level. func (payload Payload) SafeEntity() (Entity, error) { if payload.Type == "" { return nil, errors.New("Empty payload") } entity, safe := transform[payload.Type]() if entity == nil || !safe { return nil, errors.New("Could not determine a suitable type") } // If there is a payload present we want to try and unmarshal it if payload.Props != nil { if err := entity.Unmarshal(payload.Props); err != nil { return entity, err } } return entity, nil } // UnmarshalEntity returns the appropriate entity as an interface // based on its type. func (payload Payload) UnmarshalEntity(raw []byte) (Entity, error) { // Deserialize the initial payload from a JSON structure if err := payload.Unmarshal(raw); err != nil { return nil, err } // Extract the entity interface of the payload and validate it return payload.Entity() } // Valid return whether the entity validates. func (payload Payload) Valid() (bool, error) { entity, err := payload.Entity() if err != nil { return false, err } return entity.Valid() } // PayloadProperties is a structure of JSON where it is an object // of named properties which each value being that of a Payload. type PayloadProperties map[string]Payload // Save the entity to data storage. func (entity *PayloadProperties) Save(context DatabaseService, account int) (int, error) { return 0, nil } // Delete the entity from data storage. func (entity *PayloadProperties) Delete(context DatabaseService, account int) (int, error) { return 0, nil } // Get the entity from data storage. func (entity *PayloadProperties) Get(context DatabaseService, account int) (int, error) { return 0, nil }
Development and validation of a liquid-chromatographic procedure for serum creatinine. We evaluated and compared three analytical procedures for measuring creatinine by testing pooled sera and sera from individual human subjects. In two of the methods, an enzymic and a conventional colorimetric procedure, creatinine was separated from serum proteins with tungstic acid and detected at 520 nm after reaction with alkaline picrate (Jaff#{233} reagent). The amount of enzymaticaIly active creatinine was estimated by computing the difference between the creatinine concentrations in specimens incubated with and without creatinine deiminase (EC 3.5.4.21; creatinine iminohydrolase). For the other procedure we measured creatinine at 236 nm in serum ultrafiltrates injected on a reversed-phase high-performance liquid-chromatographic column, with allopurinol as an internal standard. Studies with serum pools showed that the chromatographic procedure was more precise than the enzymic and conventional tests. The routine Jaff#{233} method was not specific and produced erroneously high creatinine results for serum pools supplemented with ascorbate, pyruvate, acetone, and glucose. There was little or no interference with the chromatographic or enzymic techniques. Recovery of creatinine added to serum specimens was about 95% for the chromatographic procedures, but only 82% for both Jaff#{233} methods.
Variability of substance abuse. Global variability of substance abuse: is latitude a unique etiological factor? Worldwide substance abuse consequences are a major problem challenging health planners and providers. To mediate these problems effectively, further information on the variability of substance abuse prevalence and associated causes is needed. There is some evidence suggesting that latitude may present unique etiology for substance abuse because of northerly conditions such as extreme light and dark cycles and longer periods of cold harsh environment. This hypothesis is investigated by reviewing the known literature and applying methods for evaluating latitude as a geophysical grouping characteristic on archival substance abuse data. Conclusions are based on previous findings and examples of alcohol-attributable mortality for populations from six northern areas and the United States.
<filename>gadgets/mri_core/generic_recon_gadgets/GenericReconReferenceKSpaceDelayedBufferGadget.h /** \file GenericReconReferenceKSpaceDelayedBufferGadget.h \brief Generic chain does requires that reference data is acquried right before the imaging data, depending on the triggering scheme Sometimes for the separate acceleration mode, ref data for all SLC may be acuquired all at once at the beginning of scan This gadget will buffer the ref data for every slice and only send them down stream when imaging data for a slice arrives. \author <NAME> */ #pragma once #include "GenericReconBase.h" #include "hoNDArray_utils.h" #include "hoNDArray_elemwise.h" namespace Gadgetron { class EXPORTGADGETSMRICORE GenericReconReferenceKSpaceDelayedBufferGadget : public GenericReconKSpaceReadoutBase { public: GADGET_DECLARE(GenericReconReferenceKSpaceDelayedBufferGadget); typedef GenericReconKSpaceReadoutBase BaseClass; GenericReconReferenceKSpaceDelayedBufferGadget(); ~GenericReconReferenceKSpaceDelayedBufferGadget(); /// ------------------------------------------------------------------------------------ /// parameters to control the reconstruction /// ------------------------------------------------------------------------------------ protected: // -------------------------------------------------- // variables for protocol // -------------------------------------------------- // for every encoding space // calibration mode std::vector<Gadgetron::ismrmrdCALIBMODE> calib_mode_; // number of slices std::vector< size_t > SLC_; // reference data buffer for every slice, ecoding-slice-acq std::vector< std::vector< std::vector< GadgetContainerMessage<ISMRMRD::AcquisitionHeader>* > > > ref_buf_; // check has imaging data arrived yet std::vector< std::vector<bool> > imaging_data_arrived_; // -------------------------------------------------- // variable for recon // -------------------------------------------------- // ref buffer for every slice // -------------------------------------------------- // gadget functions // -------------------------------------------------- // default interface function virtual int process_config(ACE_Message_Block* mb); virtual int process(Gadgetron::GadgetContainerMessage< ISMRMRD::AcquisitionHeader >* m1); // close call int close(unsigned long flags); }; }
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """This module is the entry to run spark processing script. This module contains code related to Spark Processors, which are used for Processing jobs. These jobs let customers perform data pre-processing, post-processing, feature engineering, data validation, and model evaluation on SageMaker using Spark and PySpark. """ from __future__ import absolute_import import json import logging import os.path import shutil import subprocess import tempfile import time import urllib.request from enum import Enum from io import BytesIO from urllib.parse import urlparse from sagemaker import image_uris from sagemaker.local.image import _ecr_login_if_needed, _pull_image from sagemaker.processing import ProcessingInput, ProcessingOutput, ScriptProcessor from sagemaker.s3 import S3Uploader from sagemaker.session import Session from sagemaker.spark import defaults logger = logging.getLogger(__name__) class _SparkProcessorBase(ScriptProcessor): """Handles Amazon SageMaker processing tasks for jobs using Spark. Base class for either PySpark or SparkJars. """ _default_command = "smspark-submit" _conf_container_base_path = "/opt/ml/processing/input/" _conf_container_input_name = "conf" _conf_file_name = "configuration.json" _valid_configuration_keys = ["Classification", "Properties", "Configurations"] _valid_configuration_classifications = [ "core-site", "hadoop-env", "hadoop-log4j", "hive-env", "hive-log4j", "hive-exec-log4j", "hive-site", "spark-defaults", "spark-env", "spark-log4j", "spark-hive-site", "spark-metrics", "yarn-env", "yarn-site", "export", ] _submit_jars_input_channel_name = "jars" _submit_files_input_channel_name = "files" _submit_py_files_input_channel_name = "py-files" _submit_deps_error_message = ( "Please specify a list of one or more S3 URIs, " "local file paths, and/or local directory paths" ) # history server vars _history_server_port = "15050" _history_server_url_suffix = f"/proxy/{_history_server_port}" _spark_event_log_default_local_path = "/opt/ml/processing/spark-events/" def __init__( self, role, instance_type, instance_count, framework_version=None, py_version=None, container_version=None, image_uri=None, volume_size_in_gb=30, volume_kms_key=None, output_kms_key=None, max_runtime_in_seconds=None, base_job_name=None, sagemaker_session=None, env=None, tags=None, network_config=None, ): """Initialize a ``_SparkProcessorBase`` instance. The _SparkProcessorBase handles Amazon SageMaker processing tasks for jobs using SageMaker Spark. Args: framework_version (str): The version of SageMaker PySpark. py_version (str): The version of python. container_version (str): The version of spark container. role (str): An AWS IAM role name or ARN. The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. After the endpoint is created, the inference code might use the IAM role, if it needs to access an AWS resource. instance_type (str): Type of EC2 instance to use for processing, for example, 'ml.c4.xlarge'. instance_count (int): The number of instances to run the Processing job with. Defaults to 1. volume_size_in_gb (int): Size in GB of the EBS volume to use for storing data during processing (default: 30). volume_kms_key (str): A KMS key for the processing volume. output_kms_key (str): The KMS key id for all ProcessingOutputs. max_runtime_in_seconds (int): Timeout in seconds. After this amount of time Amazon SageMaker terminates the job regardless of its current status. base_job_name (str): Prefix for processing name. If not specified, the processor generates a default job name, based on the training image name and current timestamp. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the processor creates one using the default AWS configuration chain. env (dict): Environment variables to be passed to the processing job. tags ([dict]): List of tags to be passed to the processing job. network_config (sagemaker.network.NetworkConfig): A NetworkConfig object that configures network isolation, encryption of inter-container traffic, security group IDs, and subnets. """ self.history_server = None self._spark_event_logs_s3_uri = None session = sagemaker_session or Session() region = session.boto_region_name self.image_uri = self._retrieve_image_uri( image_uri, framework_version, py_version, container_version, region, instance_type ) env = env or {} command = [_SparkProcessorBase._default_command] super(_SparkProcessorBase, self).__init__( role=role, image_uri=self.image_uri, instance_count=instance_count, instance_type=instance_type, command=command, volume_size_in_gb=volume_size_in_gb, volume_kms_key=volume_kms_key, output_kms_key=output_kms_key, max_runtime_in_seconds=max_runtime_in_seconds, base_job_name=base_job_name, sagemaker_session=session, env=env, tags=tags, network_config=network_config, ) def get_run_args( self, code, inputs=None, outputs=None, arguments=None, ): """Returns a RunArgs object. For processors (:class:`~sagemaker.spark.processing.PySparkProcessor`, :class:`~sagemaker.spark.processing.SparkJar`) that have special run() arguments, this object contains the normalized arguments for passing to :class:`~sagemaker.workflow.steps.ProcessingStep`. Args: code (str): This can be an S3 URI or a local path to a file with the framework script to run. inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for the processing job. These must be provided as :class:`~sagemaker.processing.ProcessingInput` objects (default: None). outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for the processing job. These can be specified as either path strings or :class:`~sagemaker.processing.ProcessingOutput` objects (default: None). arguments (list[str]): A list of string arguments to be passed to a processing job (default: None). """ return super().get_run_args( code=code, inputs=inputs, outputs=outputs, arguments=arguments, ) def run( self, submit_app, inputs=None, outputs=None, arguments=None, wait=True, logs=True, job_name=None, experiment_config=None, kms_key=None, ): """Runs a processing job. Args: submit_app (str): .py or .jar file to submit to Spark as the primary application inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for the processing job. These must be provided as :class:`~sagemaker.processing.ProcessingInput` objects (default: None). outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for the processing job. These can be specified as either path strings or :class:`~sagemaker.processing.ProcessingOutput` objects (default: None). arguments (list[str]): A list of string arguments to be passed to a processing job (default: None). wait (bool): Whether the call should wait until the job completes (default: True). logs (bool): Whether to show the logs produced by the job. Only meaningful when wait is True (default: True). job_name (str): Processing job name. If not specified, the processor generates a default job name, based on the base job name and current timestamp. experiment_config (dict[str, str]): Experiment management configuration. Optionally, the dict can contain three keys: 'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'. The behavior of setting these keys is as follows: * If `ExperimentName` is supplied but `TrialName` is not a Trial will be automatically created and the job's Trial Component associated with the Trial. * If `TrialName` is supplied and the Trial already exists the job's Trial Component will be associated with the Trial. * If both `ExperimentName` and `TrialName` are not supplied the trial component will be unassociated. * `TrialComponentDisplayName` is used for display in Studio. kms_key (str): The ARN of the KMS key that is used to encrypt the user code file (default: None). """ self._current_job_name = self._generate_current_job_name(job_name=job_name) super().run( submit_app, inputs, outputs, arguments, wait, logs, job_name, experiment_config, kms_key, ) def _extend_processing_args(self, inputs, outputs, **kwargs): """Extends processing job args such as inputs.""" if kwargs.get("spark_event_logs_s3_uri"): spark_event_logs_s3_uri = kwargs.get("spark_event_logs_s3_uri") self._validate_s3_uri(spark_event_logs_s3_uri) self._spark_event_logs_s3_uri = spark_event_logs_s3_uri self.command.extend( [ "--local-spark-event-logs-dir", _SparkProcessorBase._spark_event_log_default_local_path, ] ) output = ProcessingOutput( source=_SparkProcessorBase._spark_event_log_default_local_path, destination=spark_event_logs_s3_uri, s3_upload_mode="Continuous", ) outputs = outputs or [] outputs.append(output) if kwargs.get("configuration"): configuration = kwargs.get("configuration") self._validate_configuration(configuration) inputs = inputs or [] inputs.append(self._stage_configuration(configuration)) return inputs, outputs def start_history_server(self, spark_event_logs_s3_uri=None): """Starts a Spark history server. Args: spark_event_logs_s3_uri (str): S3 URI where Spark events are stored. """ if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image_uri): logger.info("Pulling spark history server image...") _pull_image(self.image_uri) history_server_env_variables = self._prepare_history_server_env_variables( spark_event_logs_s3_uri ) self.history_server = _HistoryServer( history_server_env_variables, self.image_uri, self._get_network_config() ) self.history_server.run() self._check_history_server() def terminate_history_server(self): """Terminates the Spark history server.""" if self.history_server: logger.info("History server is running, terminating history server") self.history_server.down() self.history_server = None def _retrieve_image_uri( self, image_uri, framework_version, py_version, container_version, region, instance_type ): """Builds an image URI.""" if not image_uri: if (py_version is None) != (container_version is None): raise ValueError( "Both or neither of py_version and container_version should be set" ) if container_version: container_version = f"v{container_version}" return image_uris.retrieve( defaults.SPARK_NAME, region, version=framework_version, instance_type=instance_type, py_version=py_version, container_version=container_version, ) return image_uri def _validate_configuration(self, configuration): """Validates the user-provided Hadoop/Spark/Hive configuration. This ensures that the list or dictionary the user provides will serialize to JSON matching the schema of EMR's application configuration: https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html """ emr_configure_apps_url = ( "https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html" ) if isinstance(configuration, dict): keys = configuration.keys() if "Classification" not in keys or "Properties" not in keys: raise ValueError( f"Missing one or more required keys in configuration dictionary " f"{configuration} Please see {emr_configure_apps_url} for more information" ) for key in keys: if key not in self._valid_configuration_keys: raise ValueError( f"Invalid key: {key}. Must be one of {self._valid_configuration_keys}. " f"Please see {emr_configure_apps_url} for more information." ) if key == "Classification": if configuration[key] not in self._valid_configuration_classifications: raise ValueError( f"Invalid classification: {key}. Must be one of " f"{self._valid_configuration_classifications}" ) if isinstance(configuration, list): for item in configuration: self._validate_configuration(item) def _stage_configuration(self, configuration): """Serializes and uploads the user-provided EMR application configuration to S3. This method prepares an input channel. Args: configuration (Dict): the configuration dict for the EMR application configuration. """ serialized_configuration = BytesIO(json.dumps(configuration).encode("utf-8")) s3_uri = ( f"s3://{self.sagemaker_session.default_bucket()}/{self._current_job_name}/" f"input/{self._conf_container_input_name}/{self._conf_file_name}" ) S3Uploader.upload_string_as_file_body( body=serialized_configuration, desired_s3_uri=s3_uri, sagemaker_session=self.sagemaker_session, ) conf_input = ProcessingInput( source=s3_uri, destination=f"{self._conf_container_base_path}{self._conf_container_input_name}", input_name=_SparkProcessorBase._conf_container_input_name, ) return conf_input def _stage_submit_deps(self, submit_deps, input_channel_name): """Prepares a list of paths to jars, py-files, or files dependencies. This prepared list of paths is provided as `spark-submit` options. The submit_deps list may include a combination of S3 URIs and local paths. Any S3 URIs are appended to the `spark-submit` option value without modification. Any local file paths are copied to a temp directory, uploaded to a default S3 URI, and included as a ProcessingInput channel to provide as local files to the SageMaker Spark container. :param submit_deps (list[str]): List of one or more dependency paths to include. :param input_channel_name (str): The `spark-submit` option name associated with the input channel. :return (Optional[ProcessingInput], str): Tuple of (left) optional ProcessingInput for the input channel, and (right) comma-delimited value for `spark-submit` option. """ if not submit_deps: raise ValueError( f"submit_deps value may not be empty. {self._submit_deps_error_message}" ) if not input_channel_name: raise ValueError("input_channel_name value may not be empty.") input_channel_s3_uri = ( f"s3://{self.sagemaker_session.default_bucket()}" f"/{self._current_job_name}/input/{input_channel_name}" ) use_input_channel = False spark_opt_s3_uris = [] with tempfile.TemporaryDirectory() as tmpdir: for dep_path in submit_deps: dep_url = urlparse(dep_path) # S3 URIs are included as-is in the spark-submit argument if dep_url.scheme in ["s3", "s3a"]: spark_opt_s3_uris.append(dep_path) # Local files are copied to temp directory to be uploaded to S3 elif not dep_url.scheme or dep_url.scheme == "file": if not os.path.isfile(dep_path): raise ValueError( f"submit_deps path {dep_path} is not a valid local file. " f"{self._submit_deps_error_message}" ) logger.info( "Copying dependency from local path %s to tmpdir %s", dep_path, tmpdir ) shutil.copy(dep_path, tmpdir) else: raise ValueError( f"submit_deps path {dep_path} references unsupported filesystem " f"scheme: {dep_url.scheme} {self._submit_deps_error_message}" ) # If any local files were found and copied, upload the temp directory to S3 if os.listdir(tmpdir): logger.info( "Uploading dependencies from tmpdir %s to S3 %s", tmpdir, input_channel_s3_uri ) S3Uploader.upload( local_path=tmpdir, desired_s3_uri=input_channel_s3_uri, sagemaker_session=self.sagemaker_session, ) use_input_channel = True # If any local files were uploaded, construct a ProcessingInput to provide # them to the Spark container and form the spark-submit option from a # combination of S3 URIs and container's local input path if use_input_channel: input_channel = ProcessingInput( source=input_channel_s3_uri, destination=f"{self._conf_container_base_path}{input_channel_name}", input_name=input_channel_name, ) spark_opt = ",".join(spark_opt_s3_uris + [input_channel.destination]) # If no local files were uploaded, form the spark-submit option from a list of S3 URIs else: input_channel = None spark_opt = ",".join(spark_opt_s3_uris) return input_channel, spark_opt def _get_network_config(self): """Runs container with different network config based on different env.""" if self._is_notebook_instance(): return "--network host" return f"-p 80:80 -p {self._history_server_port}:{self._history_server_port}" def _prepare_history_server_env_variables(self, spark_event_logs_s3_uri): """Gets all parameters required to run history server.""" # prepare env varibles history_server_env_variables = {} if spark_event_logs_s3_uri: history_server_env_variables[ _HistoryServer.arg_event_logs_s3_uri ] = spark_event_logs_s3_uri # this variable will be previously set by run() method elif self._spark_event_logs_s3_uri is not None: history_server_env_variables[ _HistoryServer.arg_event_logs_s3_uri ] = self._spark_event_logs_s3_uri else: raise ValueError( "SPARK_EVENT_LOGS_S3_URI not present. You can specify spark_event_logs_s3_uri " "either in run() or start_history_server()" ) history_server_env_variables.update(self._config_aws_credentials()) region = self.sagemaker_session.boto_region_name history_server_env_variables["AWS_REGION"] = region if self._is_notebook_instance(): history_server_env_variables[ _HistoryServer.arg_remote_domain_name ] = self._get_notebook_instance_domain() return history_server_env_variables def _is_notebook_instance(self): """Determine whether it is a notebook instance.""" return os.path.isfile("/opt/ml/metadata/resource-metadata.json") def _get_notebook_instance_domain(self): """Get the instance's domain.""" region = self.sagemaker_session.boto_region_name with open("/opt/ml/metadata/resource-metadata.json") as file: data = json.load(file) notebook_name = data["ResourceName"] return f"https://{notebook_name}.notebook.{region}.sagemaker.aws" def _check_history_server(self, ping_timeout=40): """Print message indicating the status of history server. Pings port 15050 to check whether the history server is up. Times out after `ping_timeout`. Args: ping_timeout (int): Timeout in seconds (defaults to 40). """ # ping port 15050 to check history server is up timeout = time.time() + ping_timeout while True: if self._is_history_server_started(): if self._is_notebook_instance(): logger.info( "History server is up on %s%s", self._get_notebook_instance_domain(), self._history_server_url_suffix, ) else: logger.info( "History server is up on http://0.0.0.0%s", self._history_server_url_suffix ) break if time.time() > timeout: logger.error( "History server failed to start. Please run 'docker logs history_server' " "to see logs" ) break time.sleep(1) def _is_history_server_started(self): """Check if history server is started.""" try: response = urllib.request.urlopen(f"http://localhost:{self._history_server_port}") return response.status == 200 except Exception: # pylint: disable=W0703 return False # TODO (guoqioa@): method only checks urlparse scheme, need to perform deep s3 validation def _validate_s3_uri(self, spark_output_s3_path): """Validate whether the URI uses an S3 scheme. In the future, this validation will perform deeper S3 validation. Args: spark_output_s3_path (str): The URI of the Spark output S3 Path. """ if urlparse(spark_output_s3_path).scheme != "s3": raise ValueError( f"Invalid s3 path: {spark_output_s3_path}. Please enter something like " "s3://bucket-name/folder-name" ) def _config_aws_credentials(self): """Configure AWS credentials.""" try: creds = self.sagemaker_session.boto_session.get_credentials() access_key = creds.access_key secret_key = creds.secret_key token = creds.token return { "AWS_ACCESS_KEY_ID": str(access_key), "AWS_SECRET_ACCESS_KEY": str(secret_key), "AWS_SESSION_TOKEN": str(token), } except Exception as e: # pylint: disable=W0703 logger.info("Could not get AWS credentials: %s", e) return {} def _handle_script_dependencies(self, inputs, submit_files, file_type): """Handle script dependencies The method extends inputs and command based on input files and file_type """ if not submit_files: return inputs input_channel_name_dict = { FileType.PYTHON: self._submit_py_files_input_channel_name, FileType.JAR: self._submit_jars_input_channel_name, FileType.FILE: self._submit_files_input_channel_name, } files_input, files_opt = self._stage_submit_deps( submit_files, input_channel_name_dict[file_type] ) inputs = inputs or [] if files_input: inputs.append(files_input) if files_opt: self.command.extend([f"--{input_channel_name_dict[file_type]}", files_opt]) return inputs class PySparkProcessor(_SparkProcessorBase): """Handles Amazon SageMaker processing tasks for jobs using PySpark.""" def __init__( self, role, instance_type, instance_count, framework_version=None, py_version=None, container_version=None, image_uri=None, volume_size_in_gb=30, volume_kms_key=None, output_kms_key=None, max_runtime_in_seconds=None, base_job_name=None, sagemaker_session=None, env=None, tags=None, network_config=None, ): """Initialize an ``PySparkProcessor`` instance. The PySparkProcessor handles Amazon SageMaker processing tasks for jobs using SageMaker PySpark. Args: framework_version (str): The version of SageMaker PySpark. py_version (str): The version of python. container_version (str): The version of spark container. role (str): An AWS IAM role name or ARN. The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. After the endpoint is created, the inference code might use the IAM role, if it needs to access an AWS resource. instance_type (str): Type of EC2 instance to use for processing, for example, 'ml.c4.xlarge'. instance_count (int): The number of instances to run the Processing job with. Defaults to 1. volume_size_in_gb (int): Size in GB of the EBS volume to use for storing data during processing (default: 30). volume_kms_key (str): A KMS key for the processing volume. output_kms_key (str): The KMS key id for all ProcessingOutputs. max_runtime_in_seconds (int): Timeout in seconds. After this amount of time Amazon SageMaker terminates the job regardless of its current status. base_job_name (str): Prefix for processing name. If not specified, the processor generates a default job name, based on the training image name and current timestamp. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the processor creates one using the default AWS configuration chain. env (dict): Environment variables to be passed to the processing job. tags ([dict]): List of tags to be passed to the processing job. network_config (sagemaker.network.NetworkConfig): A NetworkConfig object that configures network isolation, encryption of inter-container traffic, security group IDs, and subnets. """ super(PySparkProcessor, self).__init__( role=role, instance_count=instance_count, instance_type=instance_type, framework_version=framework_version, py_version=py_version, container_version=container_version, image_uri=image_uri, volume_size_in_gb=volume_size_in_gb, volume_kms_key=volume_kms_key, output_kms_key=output_kms_key, max_runtime_in_seconds=max_runtime_in_seconds, base_job_name=base_job_name, sagemaker_session=sagemaker_session, env=env, tags=tags, network_config=network_config, ) def get_run_args( self, submit_app, submit_py_files=None, submit_jars=None, submit_files=None, inputs=None, outputs=None, arguments=None, job_name=None, configuration=None, spark_event_logs_s3_uri=None, ): """Returns a RunArgs object. This object contains the normalized inputs, outputs and arguments needed when using a ``PySparkProcessor`` in a :class:`~sagemaker.workflow.steps.ProcessingStep`. Args: submit_app (str): Path (local or S3) to Python file to submit to Spark as the primary application. This is translated to the `code` property on the returned `RunArgs` object. submit_py_files (list[str]): List of paths (local or S3) to provide for `spark-submit --py-files` option submit_jars (list[str]): List of paths (local or S3) to provide for `spark-submit --jars` option submit_files (list[str]): List of paths (local or S3) to provide for `spark-submit --files` option inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for the processing job. These must be provided as :class:`~sagemaker.processing.ProcessingInput` objects (default: None). outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for the processing job. These can be specified as either path strings or :class:`~sagemaker.processing.ProcessingOutput` objects (default: None). arguments (list[str]): A list of string arguments to be passed to a processing job (default: None). job_name (str): Processing job name. If not specified, the processor generates a default job name, based on the base job name and current timestamp. configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive. List or dictionary of EMR-style classifications. https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html spark_event_logs_s3_uri (str): S3 path where spark application events will be published to. """ self._current_job_name = self._generate_current_job_name(job_name=job_name) if not submit_app: raise ValueError("submit_app is required") extended_inputs, extended_outputs = self._extend_processing_args( inputs=inputs, outputs=outputs, submit_py_files=submit_py_files, submit_jars=submit_jars, submit_files=submit_files, configuration=configuration, spark_event_logs_s3_uri=spark_event_logs_s3_uri, ) return super().get_run_args( code=submit_app, inputs=extended_inputs, outputs=extended_outputs, arguments=arguments, ) def run( self, submit_app, submit_py_files=None, submit_jars=None, submit_files=None, inputs=None, outputs=None, arguments=None, wait=True, logs=True, job_name=None, experiment_config=None, configuration=None, spark_event_logs_s3_uri=None, kms_key=None, ): """Runs a processing job. Args: submit_app (str): Path (local or S3) to Python file to submit to Spark as the primary application submit_py_files (list[str]): List of paths (local or S3) to provide for `spark-submit --py-files` option submit_jars (list[str]): List of paths (local or S3) to provide for `spark-submit --jars` option submit_files (list[str]): List of paths (local or S3) to provide for `spark-submit --files` option inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for the processing job. These must be provided as :class:`~sagemaker.processing.ProcessingInput` objects (default: None). outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for the processing job. These can be specified as either path strings or :class:`~sagemaker.processing.ProcessingOutput` objects (default: None). arguments (list[str]): A list of string arguments to be passed to a processing job (default: None). wait (bool): Whether the call should wait until the job completes (default: True). logs (bool): Whether to show the logs produced by the job. Only meaningful when wait is True (default: True). job_name (str): Processing job name. If not specified, the processor generates a default job name, based on the base job name and current timestamp. experiment_config (dict[str, str]): Experiment management configuration. Optionally, the dict can contain three keys: 'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'. The behavior of setting these keys is as follows: * If `ExperimentName` is supplied but `TrialName` is not a Trial will be automatically created and the job's Trial Component associated with the Trial. * If `TrialName` is supplied and the Trial already exists the job's Trial Component will be associated with the Trial. * If both `ExperimentName` and `TrialName` are not supplied the trial component will be unassociated. * `TrialComponentDisplayName` is used for display in Studio. configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive. List or dictionary of EMR-style classifications. https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html spark_event_logs_s3_uri (str): S3 path where spark application events will be published to. kms_key (str): The ARN of the KMS key that is used to encrypt the user code file (default: None). """ self._current_job_name = self._generate_current_job_name(job_name=job_name) if not submit_app: raise ValueError("submit_app is required") extended_inputs, extended_outputs = self._extend_processing_args( inputs=inputs, outputs=outputs, submit_py_files=submit_py_files, submit_jars=submit_jars, submit_files=submit_files, configuration=configuration, spark_event_logs_s3_uri=spark_event_logs_s3_uri, ) super().run( submit_app=submit_app, inputs=extended_inputs, outputs=extended_outputs, arguments=arguments, wait=wait, logs=logs, job_name=self._current_job_name, experiment_config=experiment_config, kms_key=kms_key, ) def _extend_processing_args(self, inputs, outputs, **kwargs): """Extends inputs and outputs. Args: inputs: Processing inputs. outputs: Processing outputs. kwargs: Additional keyword arguments passed to `super()`. """ self.command = [_SparkProcessorBase._default_command] extended_inputs = self._handle_script_dependencies( inputs, kwargs.get("submit_py_files"), FileType.PYTHON ) extended_inputs = self._handle_script_dependencies( extended_inputs, kwargs.get("submit_jars"), FileType.JAR ) extended_inputs = self._handle_script_dependencies( extended_inputs, kwargs.get("submit_files"), FileType.FILE ) return super()._extend_processing_args(extended_inputs, outputs, **kwargs) class SparkJarProcessor(_SparkProcessorBase): """Handles Amazon SageMaker processing tasks for jobs using Spark with Java or Scala Jars.""" def __init__( self, role, instance_type, instance_count, framework_version=None, py_version=None, container_version=None, image_uri=None, volume_size_in_gb=30, volume_kms_key=None, output_kms_key=None, max_runtime_in_seconds=None, base_job_name=None, sagemaker_session=None, env=None, tags=None, network_config=None, ): """Initialize a ``SparkJarProcessor`` instance. The SparkProcessor handles Amazon SageMaker processing tasks for jobs using SageMaker Spark. Args: framework_version (str): The version of SageMaker PySpark. py_version (str): The version of python. container_version (str): The version of spark container. role (str): An AWS IAM role name or ARN. The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. After the endpoint is created, the inference code might use the IAM role, if it needs to access an AWS resource. instance_type (str): Type of EC2 instance to use for processing, for example, 'ml.c4.xlarge'. instance_count (int): The number of instances to run the Processing job with. Defaults to 1. volume_size_in_gb (int): Size in GB of the EBS volume to use for storing data during processing (default: 30). volume_kms_key (str): A KMS key for the processing volume. output_kms_key (str): The KMS key id for all ProcessingOutputs. max_runtime_in_seconds (int): Timeout in seconds. After this amount of time Amazon SageMaker terminates the job regardless of its current status. base_job_name (str): Prefix for processing name. If not specified, the processor generates a default job name, based on the training image name and current timestamp. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the processor creates one using the default AWS configuration chain. env (dict): Environment variables to be passed to the processing job. tags ([dict]): List of tags to be passed to the processing job. network_config (sagemaker.network.NetworkConfig): A NetworkConfig object that configures network isolation, encryption of inter-container traffic, security group IDs, and subnets. """ super(SparkJarProcessor, self).__init__( role=role, instance_count=instance_count, instance_type=instance_type, framework_version=framework_version, py_version=py_version, container_version=container_version, image_uri=image_uri, volume_size_in_gb=volume_size_in_gb, volume_kms_key=volume_kms_key, output_kms_key=output_kms_key, max_runtime_in_seconds=max_runtime_in_seconds, base_job_name=base_job_name, sagemaker_session=sagemaker_session, env=env, tags=tags, network_config=network_config, ) def get_run_args( self, submit_app, submit_class=None, submit_jars=None, submit_files=None, inputs=None, outputs=None, arguments=None, job_name=None, configuration=None, spark_event_logs_s3_uri=None, ): """Returns a RunArgs object. This object contains the normalized inputs, outputs and arguments needed when using a ``SparkJarProcessor`` in a :class:`~sagemaker.workflow.steps.ProcessingStep`. Args: submit_app (str): Path (local or S3) to Python file to submit to Spark as the primary application. This is translated to the `code` property on the returned `RunArgs` object submit_class (str): Java class reference to submit to Spark as the primary application submit_jars (list[str]): List of paths (local or S3) to provide for `spark-submit --jars` option submit_files (list[str]): List of paths (local or S3) to provide for `spark-submit --files` option inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for the processing job. These must be provided as :class:`~sagemaker.processing.ProcessingInput` objects (default: None). outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for the processing job. These can be specified as either path strings or :class:`~sagemaker.processing.ProcessingOutput` objects (default: None). arguments (list[str]): A list of string arguments to be passed to a processing job (default: None). job_name (str): Processing job name. If not specified, the processor generates a default job name, based on the base job name and current timestamp. configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive. List or dictionary of EMR-style classifications. https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html spark_event_logs_s3_uri (str): S3 path where spark application events will be published to. """ self._current_job_name = self._generate_current_job_name(job_name=job_name) if not submit_app: raise ValueError("submit_app is required") extended_inputs, extended_outputs = self._extend_processing_args( inputs=inputs, outputs=outputs, submit_class=submit_class, submit_jars=submit_jars, submit_files=submit_files, configuration=configuration, spark_event_logs_s3_uri=spark_event_logs_s3_uri, ) return super().get_run_args( code=submit_app, inputs=extended_inputs, outputs=extended_outputs, arguments=arguments, ) def run( self, submit_app, submit_class=None, submit_jars=None, submit_files=None, inputs=None, outputs=None, arguments=None, wait=True, logs=True, job_name=None, experiment_config=None, configuration=None, spark_event_logs_s3_uri=None, kms_key=None, ): """Runs a processing job. Args: submit_app (str): Path (local or S3) to Jar file to submit to Spark as the primary application submit_class (str): Java class reference to submit to Spark as the primary application submit_jars (list[str]): List of paths (local or S3) to provide for `spark-submit --jars` option submit_files (list[str]): List of paths (local or S3) to provide for `spark-submit --files` option inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for the processing job. These must be provided as :class:`~sagemaker.processing.ProcessingInput` objects (default: None). outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for the processing job. These can be specified as either path strings or :class:`~sagemaker.processing.ProcessingOutput` objects (default: None). arguments (list[str]): A list of string arguments to be passed to a processing job (default: None). wait (bool): Whether the call should wait until the job completes (default: True). logs (bool): Whether to show the logs produced by the job. Only meaningful when wait is True (default: True). job_name (str): Processing job name. If not specified, the processor generates a default job name, based on the base job name and current timestamp. experiment_config (dict[str, str]): Experiment management configuration. Optionally, the dict can contain three keys: 'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'. The behavior of setting these keys is as follows: * If `ExperimentName` is supplied but `TrialName` is not a Trial will be automatically created and the job's Trial Component associated with the Trial. * If `TrialName` is supplied and the Trial already exists the job's Trial Component will be associated with the Trial. * If both `ExperimentName` and `TrialName` are not supplied the trial component will be unassociated. * `TrialComponentDisplayName` is used for display in Studio. configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive. List or dictionary of EMR-style classifications. https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html spark_event_logs_s3_uri (str): S3 path where spark application events will be published to. kms_key (str): The ARN of the KMS key that is used to encrypt the user code file (default: None). """ self._current_job_name = self._generate_current_job_name(job_name=job_name) if not submit_app: raise ValueError("submit_app is required") extended_inputs, extended_outputs = self._extend_processing_args( inputs=inputs, outputs=outputs, submit_class=submit_class, submit_jars=submit_jars, submit_files=submit_files, configuration=configuration, spark_event_logs_s3_uri=spark_event_logs_s3_uri, ) super().run( submit_app=submit_app, inputs=extended_inputs, outputs=extended_outputs, arguments=arguments, wait=wait, logs=logs, job_name=self._current_job_name, experiment_config=experiment_config, kms_key=kms_key, ) def _extend_processing_args(self, inputs, outputs, **kwargs): self.command = [_SparkProcessorBase._default_command] if kwargs.get("submit_class"): self.command.extend(["--class", kwargs.get("submit_class")]) else: raise ValueError("submit_class is required") extended_inputs = self._handle_script_dependencies( inputs, kwargs.get("submit_jars"), FileType.JAR ) extended_inputs = self._handle_script_dependencies( extended_inputs, kwargs.get("submit_files"), FileType.FILE ) return super()._extend_processing_args(extended_inputs, outputs, **kwargs) class _HistoryServer: """History server class that is responsible for starting history server.""" _container_name = "history_server" _entry_point = "smspark-history-server" arg_event_logs_s3_uri = "event_logs_s3_uri" arg_remote_domain_name = "remote_domain_name" _history_server_args_format_map = { arg_event_logs_s3_uri: "--event-logs-s3-uri {} ", arg_remote_domain_name: "--remote-domain-name {} ", } def __init__(self, cli_args, image_uri, network_config): self.cli_args = cli_args self.image_uri = image_uri self.network_config = network_config self.run_history_server_command = self._get_run_history_server_cmd() def run(self): """Runs the history server.""" self.down() logger.info("Starting history server...") subprocess.Popen( self.run_history_server_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) def down(self): """Stops and removes the container.""" subprocess.call(["docker", "stop", self._container_name]) subprocess.call(["docker", "rm", self._container_name]) logger.info("History server terminated") # This method belongs to _HistoryServer because _CONTAINER_NAME(app name) belongs # to _HistoryServer. In the future, dynamically creating new app name, available # port should also belong to _HistoryServer rather than PySparkProcessor def _get_run_history_server_cmd(self): """Gets the history server command.""" env_options = "" ser_cli_args = "" for key, value in self.cli_args.items(): if key in self._history_server_args_format_map: ser_cli_args += self._history_server_args_format_map[key].format(value) else: env_options += f"--env {key}={value} " cmd = ( f"docker run {env_options.strip()} --name {self._container_name} " f"{self.network_config} --entrypoint {self._entry_point} {self.image_uri} " f"{ser_cli_args.strip()}" ) return cmd class FileType(Enum): """Enum of file type""" JAR = 1 PYTHON = 2 FILE = 3
/* * Allocate the static TLS area. Return a pointer to the TCB. The * static area is based on negative offsets relative to the tcb. * * The TCB contains an errno pointer for the system call layer, but because * we are the RTLD we really have no idea how the caller was compiled so * the information has to be passed in. errno can either be: * * type 0 errno is a simple non-TLS global pointer. * (special case for e.g. libc_rtld) * type 1 errno accessed by GOT entry (dynamically linked programs) * type 2 errno accessed by %gs:OFFSET (statically linked programs) */ struct tls_tcb * allocate_tls(Obj_Entry *objs) { Obj_Entry *obj; size_t data_size; size_t dtv_size; struct tls_tcb *tcb; Elf_Addr *dtv; Elf_Addr addr; data_size = (tls_static_space + RTLD_STATIC_TLS_ALIGN_MASK) & ~RTLD_STATIC_TLS_ALIGN_MASK; tcb = malloc(data_size + sizeof(*tcb)); tcb = (void *)((char *)tcb + data_size); dtv_size = (tls_max_index + 2) * sizeof(Elf_Addr); dtv = malloc(dtv_size); bzero(dtv, dtv_size); #ifdef RTLD_TCB_HAS_SELF_POINTER tcb->tcb_self = tcb; #endif tcb->tcb_dtv = dtv; tcb->tcb_pthread = NULL; dtv[0] = tls_dtv_generation; dtv[1] = tls_max_index; for (obj = objs; obj; obj = obj->next) { if (obj->tlsoffset) { addr = (Elf_Addr)tcb - obj->tlsoffset; memset((void *)(addr + obj->tlsinitsize), 0, obj->tlssize - obj->tlsinitsize); if (obj->tlsinit) memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); dtv[obj->tlsindex + 1] = addr; } } return(tcb); }
/** * Scan is a structure which describes Cloud Key Visualizer scan information. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Cloud Spanner API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class Scan extends com.google.api.client.json.GenericJson { /** * Additional information provided by the implementer. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.Map<String, java.lang.Object> details; /** * The upper bound for when the scan is defined. * The value may be {@code null}. */ @com.google.api.client.util.Key private String endTime; /** * The unique name of the scan, specific to the Database service implementing this interface. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String name; /** * Output only. Cloud Key Visualizer scan data. Note, this field is not available to the ListScans * method. * The value may be {@code null}. */ @com.google.api.client.util.Key private ScanData scanData; /** * A range of time (inclusive) for when the scan is defined. The lower bound for when the scan is * defined. * The value may be {@code null}. */ @com.google.api.client.util.Key private String startTime; /** * Additional information provided by the implementer. * @return value or {@code null} for none */ public java.util.Map<String, java.lang.Object> getDetails() { return details; } /** * Additional information provided by the implementer. * @param details details or {@code null} for none */ public Scan setDetails(java.util.Map<String, java.lang.Object> details) { this.details = details; return this; } /** * The upper bound for when the scan is defined. * @return value or {@code null} for none */ public String getEndTime() { return endTime; } /** * The upper bound for when the scan is defined. * @param endTime endTime or {@code null} for none */ public Scan setEndTime(String endTime) { this.endTime = endTime; return this; } /** * The unique name of the scan, specific to the Database service implementing this interface. * @return value or {@code null} for none */ public java.lang.String getName() { return name; } /** * The unique name of the scan, specific to the Database service implementing this interface. * @param name name or {@code null} for none */ public Scan setName(java.lang.String name) { this.name = name; return this; } /** * Output only. Cloud Key Visualizer scan data. Note, this field is not available to the ListScans * method. * @return value or {@code null} for none */ public ScanData getScanData() { return scanData; } /** * Output only. Cloud Key Visualizer scan data. Note, this field is not available to the ListScans * method. * @param scanData scanData or {@code null} for none */ public Scan setScanData(ScanData scanData) { this.scanData = scanData; return this; } /** * A range of time (inclusive) for when the scan is defined. The lower bound for when the scan is * defined. * @return value or {@code null} for none */ public String getStartTime() { return startTime; } /** * A range of time (inclusive) for when the scan is defined. The lower bound for when the scan is * defined. * @param startTime startTime or {@code null} for none */ public Scan setStartTime(String startTime) { this.startTime = startTime; return this; } @Override public Scan set(String fieldName, Object value) { return (Scan) super.set(fieldName, value); } @Override public Scan clone() { return (Scan) super.clone(); } }
<reponame>edges-collab/edges-analysis """Plotting utilities.""" import datetime as dt import healpy as hp import matplotlib.pyplot as plt import numpy as np from astropy import coordinates as apc from astropy import time as apt from astropy import units as apu from scipy import interpolate as interp from . import beams from . import sky_models from .. import const def plot_sky_model(): """Plot a Haslam sky model.""" # Loading Haslam map map408, (lon, lat, gc) = sky_models.haslam_408MHz_map() ipole = 2.65 icenter = 2.4 sigma_deg = 8.5 i1 = ipole - (ipole - icenter) * np.exp(-(1 / 2) * (np.abs(lat) / sigma_deg) ** 2) Tcmb = 2.725 s = (map408 - Tcmb) * (90 / 408) ** (-i1) + Tcmb hp.cartview( np.log10(s), nest="true", coord=("G", "C"), flip="geo", title="", notext="true", min=2.7, max=4.3, unit=r"log($T_{\mathrm{sky}}$)", rot=[5.76667 * 15, 0, 0], cmap="jet", ) hp.graticule(local=True) beam = beams.feko_read( "mid_band", 0, frequency_interpolation=False, az_antenna_axis=90 ) beam90 = beam[20, :, :] beam90n = beam90 / np.max(beam90) FWHM = np.zeros((360, 2)) EL_raw = np.arange(0, 91, 1) EL_new = np.arange(0, 90.01, 0.01) for j in range(len(beam90[0, :])): func = interp.interp1d(EL_raw, beam90n[:, j]) beam90n_interp = func(EL_new) min_diff = 100 for el, beam in zip(EL_new, beam90n_interp): diff = np.abs(beam, -0.5) if diff < min_diff: min_diff = np.copy(diff) FWHM[j, 0] = j FWHM[j, 1] = 90 - el def get_ra_dec(time_iter_utc): time_iter_utc_dt = dt.datetime(*time_iter_utc) alt_az = apc.SkyCoord( alt=(90 - FWHM[:, 1]) * apu.deg, az=FWHM[:, 0] * apu.deg, frame="altaz", obstime=apt.Time(time_iter_utc_dt, format="datetime"), location=const.edges_location, ) ra_dec = alt_az.icrs ra = np.asarray(ra_dec.ra) dec = np.asarray(ra_dec.dec) ra[ra > 180] -= 360 return ra, dec # Converting Beam Contours from Local to Equatorial coordinates ra_start, dec_start = get_ra_dec([2014, 1, 1, 3, 31, 0]) ra_middle, dec_middle = get_ra_dec([2014, 1, 1, 9, 30, 0]) ra_end, dec_end = get_ra_dec([2014, 1, 1, 15, 29, 0]) plt.plot(np.arange(-180, 181, 1), -26.7 * np.ones(361), "y--", linewidth=2) plt.plot(ra_start, dec_start, "w", linewidth=3) plt.plot(ra_middle, dec_middle, "w--", linewidth=3) plt.plot(ra_end, dec_end, "w:", linewidth=3) plt.plot(-6 * (360 / 24), -26.7, "x", color="1", markersize=5, mew=2) plt.plot(0 * (360 / 24), -26.7, "x", color="1", markersize=5, mew=2) plt.plot(6 * (360 / 24), -26.7, "x", color="1", markersize=5, mew=2) off_x = -4 off_y = -12 for i in range(0, 26, 2): plt.text(-180 + i * 15 + off_x, -90 + off_y, str(i)) plt.text(-60, -115, "galactic hour angle [hr]") off_y = -3 for j in range(90, -120, 30): off_x = -15 if j > 0 else (-10 if j == 0 else -19) plt.text(-180 + off_x, j + off_y, str(j)) plt.text(-210, 45, "declination [degrees]", rotation=90)
<reponame>fantasiorona/LRender #pragma once #include "renderer/tasks/task.h" namespace LRender { class Renderer::Task::Select final : public Task { public: Select(float x, float y); void perform(Renderer &renderer) override final; private: const float x; const float y; }; }
<filename>frontend/src/pages/studios/studioForm/StudioForm.tsx import { FC, useMemo, useState } from "react"; import { Row, Col, Form, Tab, Tabs } from "react-bootstrap"; import { useForm } from "react-hook-form"; import { yupResolver } from "@hookform/resolvers/yup"; import cx from "classnames"; import { Link } from "react-router-dom"; import { faExclamationTriangle } from "@fortawesome/free-solid-svg-icons"; import { Studio_findStudio as Studio } from "src/graphql/definitions/Studio"; import { StudioEditDetailsInput, ValidSiteTypeEnum } from "src/graphql"; import { Icon } from "src/components/fragments"; import StudioSelect from "src/components/studioSelect"; import EditImages from "src/components/editImages"; import { EditNote, NavButtons, SubmitButtons } from "src/components/form"; import URLInput from "src/components/urlInput"; import { renderStudioDetails } from "src/components/editCard/ModifyEdit"; import { StudioSchema, StudioFormData } from "./schema"; import DiffStudio from "./diff"; interface StudioProps { studio: Studio; callback: (data: StudioEditDetailsInput, editNote: string) => void; showNetworkSelect?: boolean; saving: boolean; } const StudioForm: FC<StudioProps> = ({ studio, callback, showNetworkSelect = true, saving, }) => { const { register, control, handleSubmit, watch, formState: { errors }, } = useForm<StudioFormData>({ resolver: yupResolver(StudioSchema), defaultValues: { name: studio.name, images: studio.images, urls: studio.urls ?? [], studio: studio.parent ? { id: studio.parent.id, name: studio.parent.name, } : undefined, }, }); const [file, setFile] = useState<File | undefined>(); const fieldData = watch(); const [oldStudioChanges, newStudioChanges] = useMemo( () => DiffStudio(StudioSchema.cast(fieldData), studio), [fieldData, studio] ); const [activeTab, setActiveTab] = useState("details"); const onSubmit = (data: StudioFormData) => { const callbackData: StudioEditDetailsInput = { name: data.name, urls: data.urls.map((u) => ({ url: u.url, site_id: u.site.id, })), image_ids: data.images.map((i) => i.id), parent_id: data.studio?.id, }; callback(callbackData, data.note); }; const metadataErrors = [ { error: errors.name?.message, tab: "details" }, { error: errors.urls?.find((u) => u?.url?.message)?.url?.message, tab: "links", }, ].filter((e) => e.error) as { error: string; tab: string }[]; return ( <Form className="StudioForm" onSubmit={handleSubmit(onSubmit)}> <Tabs activeKey={activeTab} onSelect={(key) => key && setActiveTab(key)} className="d-flex" > <Tab eventKey="details" title="Details" className="col-xl-6"> <Form.Group controlId="name" className="mb-3"> <Form.Label>Name</Form.Label> <Form.Control className={cx({ "is-invalid": errors.name })} placeholder="Name" defaultValue={studio.name} {...register("name")} /> <Form.Control.Feedback type="invalid"> {errors?.name?.message} </Form.Control.Feedback> </Form.Group> {showNetworkSelect && ( <Form.Group controlId="network" className="mb-3"> <Form.Label>Network</Form.Label> <StudioSelect excludeStudio={studio.id} control={control} initialStudio={studio.parent} isClearable networkSelect /> </Form.Group> )} <NavButtons onNext={() => setActiveTab("links")} /> </Tab> <Tab eventKey="links" title="Links" className="col-xl-9"> <Form.Group className="mb-3"> <Form.Label>Links</Form.Label> <URLInput control={control} type={ValidSiteTypeEnum.STUDIO} errors={errors.urls} /> </Form.Group> <NavButtons onNext={() => setActiveTab("images")} /> </Tab> <Tab eventKey="images" title="Images" className="col-xl-6"> <EditImages control={control} maxImages={1} file={file} setFile={(f) => setFile(f)} /> <NavButtons onNext={() => setActiveTab("confirm")} disabled={!!file} /> <div className="d-flex"> {/* dummy element for feedback */} <div className="ms-auto"> <span className={file ? "is-invalid" : ""} /> <Form.Control.Feedback type="invalid"> Upload or remove image to continue. </Form.Control.Feedback> </div> </div> </Tab> <Tab eventKey="confirm" title="Confirm" className="mt-3 col-xl-9"> {renderStudioDetails(newStudioChanges, oldStudioChanges, true)} <Row className="my-4"> <Col md={{ span: 8, offset: 4 }}> <EditNote register={register} error={errors.note} /> </Col> </Row> {metadataErrors.length > 0 && ( <div className="text-end my-4"> <h6> <Icon icon={faExclamationTriangle} color="red" /> <span className="ms-1">Errors</span> </h6> <div className="d-flex flex-column text-danger"> {metadataErrors.map(({ error, tab }) => ( <Link to="#" key={error} onClick={() => setActiveTab(tab)}> {error} </Link> ))} </div> </div> )} <SubmitButtons disabled={!!file || saving} /> </Tab> </Tabs> </Form> ); }; export default StudioForm;
<gh_stars>0 // Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/shell/platform/linux/public/flutter_linux/fl_method_channel.h" #include "flutter/shell/platform/linux/fl_method_call_private.h" #include "flutter/shell/platform/linux/fl_method_channel_private.h" #include "flutter/shell/platform/linux/fl_method_codec_private.h" #include <gmodule.h> struct _FlMethodChannel { GObject parent_instance; // Messenger to communicate on FlBinaryMessenger* messenger; // Channel name gchar* name; // Codec to en/decode messages FlMethodCodec* codec; // Function called when a method call is received FlMethodChannelMethodCallHandler method_call_handler; gpointer method_call_handler_data; }; // Added here to stop the compiler from optimising this function away G_MODULE_EXPORT GType fl_method_channel_get_type(); G_DEFINE_TYPE(FlMethodChannel, fl_method_channel, G_TYPE_OBJECT) // Called when a binary message is received on this channel static void message_cb(FlBinaryMessenger* messenger, const gchar* channel, GBytes* message, FlBinaryMessengerResponseHandle* response_handle, gpointer user_data) { FlMethodChannel* self = FL_METHOD_CHANNEL(user_data); if (self->method_call_handler == nullptr) return; g_autofree gchar* method = nullptr; g_autoptr(FlValue) args = nullptr; g_autoptr(GError) error = nullptr; if (!fl_method_codec_decode_method_call(self->codec, message, &method, &args, &error)) { g_warning("Failed to decode method call: %s", error->message); return; } g_autoptr(FlMethodCall) method_call = fl_method_call_new(method, args, self, response_handle); self->method_call_handler(self, method_call, self->method_call_handler_data); } // Called when a response is received to a sent message static void message_response_cb(GObject* object, GAsyncResult* result, gpointer user_data) { GTask* task = G_TASK(user_data); g_task_return_pointer(task, result, g_object_unref); } static void fl_method_channel_dispose(GObject* object) { FlMethodChannel* self = FL_METHOD_CHANNEL(object); if (self->messenger != nullptr) fl_binary_messenger_set_message_handler_on_channel( self->messenger, self->name, nullptr, nullptr); g_clear_object(&self->messenger); g_clear_pointer(&self->name, g_free); g_clear_object(&self->codec); G_OBJECT_CLASS(fl_method_channel_parent_class)->dispose(object); } static void fl_method_channel_class_init(FlMethodChannelClass* klass) { G_OBJECT_CLASS(klass)->dispose = fl_method_channel_dispose; } static void fl_method_channel_init(FlMethodChannel* self) {} G_MODULE_EXPORT FlMethodChannel* fl_method_channel_new( FlBinaryMessenger* messenger, const gchar* name, FlMethodCodec* codec) { g_return_val_if_fail(FL_IS_BINARY_MESSENGER(messenger), nullptr); g_return_val_if_fail(name != nullptr, nullptr); g_return_val_if_fail(FL_IS_METHOD_CODEC(codec), nullptr); FlMethodChannel* self = FL_METHOD_CHANNEL(g_object_new(fl_method_channel_get_type(), nullptr)); self->messenger = FL_BINARY_MESSENGER(g_object_ref(messenger)); self->name = g_strdup(name); self->codec = FL_METHOD_CODEC(g_object_ref(codec)); fl_binary_messenger_set_message_handler_on_channel( self->messenger, self->name, message_cb, self); return self; } G_MODULE_EXPORT void fl_method_channel_set_method_call_handler( FlMethodChannel* self, FlMethodChannelMethodCallHandler handler, gpointer user_data) { g_return_if_fail(FL_IS_METHOD_CHANNEL(self)); self->method_call_handler = handler; self->method_call_handler_data = user_data; } G_MODULE_EXPORT void fl_method_channel_invoke_method( FlMethodChannel* self, const gchar* method, FlValue* args, GCancellable* cancellable, GAsyncReadyCallback callback, gpointer user_data) { g_return_if_fail(FL_IS_METHOD_CHANNEL(self)); g_return_if_fail(method != nullptr); g_autoptr(GTask) task = callback != nullptr ? g_task_new(self, cancellable, callback, user_data) : nullptr; g_autoptr(GError) error = nullptr; g_autoptr(GBytes) message = fl_method_codec_encode_method_call(self->codec, method, args, &error); if (message == nullptr) { if (task != nullptr) g_task_return_error(task, error); return; } fl_binary_messenger_send_on_channel( self->messenger, self->name, message, cancellable, callback != nullptr ? message_response_cb : nullptr, g_steal_pointer(&task)); } G_MODULE_EXPORT FlMethodResponse* fl_method_channel_invoke_method_finish( FlMethodChannel* self, GAsyncResult* result, GError** error) { g_return_val_if_fail(FL_IS_METHOD_CHANNEL(self), nullptr); g_return_val_if_fail(g_task_is_valid(result, self), nullptr); g_autoptr(GTask) task = G_TASK(result); GAsyncResult* r = G_ASYNC_RESULT(g_task_propagate_pointer(task, nullptr)); g_autoptr(GBytes) response = fl_binary_messenger_send_on_channel_finish(self->messenger, r, error); if (response == nullptr) return nullptr; return fl_method_codec_decode_response(self->codec, response, error); } gboolean fl_method_channel_respond( FlMethodChannel* self, FlBinaryMessengerResponseHandle* response_handle, FlMethodResponse* response, GError** error) { g_return_val_if_fail(FL_IS_METHOD_CHANNEL(self), FALSE); g_return_val_if_fail(FL_IS_BINARY_MESSENGER_RESPONSE_HANDLE(response_handle), FALSE); g_return_val_if_fail(FL_IS_METHOD_SUCCESS_RESPONSE(response) || FL_IS_METHOD_ERROR_RESPONSE(response) || FL_IS_METHOD_NOT_IMPLEMENTED_RESPONSE(response), FALSE); g_autoptr(GBytes) message = nullptr; if (FL_IS_METHOD_SUCCESS_RESPONSE(response)) { FlMethodSuccessResponse* r = FL_METHOD_SUCCESS_RESPONSE(response); message = fl_method_codec_encode_success_envelope( self->codec, fl_method_success_response_get_result(r), error); } else if (FL_IS_METHOD_ERROR_RESPONSE(response)) { FlMethodErrorResponse* r = FL_METHOD_ERROR_RESPONSE(response); message = fl_method_codec_encode_error_envelope( self->codec, fl_method_error_response_get_code(r), fl_method_error_response_get_message(r), fl_method_error_response_get_details(r), error); } else if (FL_IS_METHOD_NOT_IMPLEMENTED_RESPONSE(response)) message = nullptr; else g_assert_not_reached(); return fl_binary_messenger_send_response(self->messenger, response_handle, message, error); }
def reply(self, text: str, **kwargs) -> None: text_bytes = text.encode("UTF-8") if len(text_bytes) > MAX_MESSAGE_LENGTH: end = b'\n\n...' text_bytes = text_bytes[:MAX_MESSAGE_LENGTH - len(end)] + end self._message = self._context.bot.send_message( chat_id=self._message.chat_id, parse_mode=ParseMode.MARKDOWN, reply_to_message_id=self._message.message_id, text=text_bytes.decode("UTF-8"), **kwargs )
Current priming solutions for cardiopulmonary bypass contain a potent coronary vasodilator We studied the effects of acetate (Ac), the buffer in crystalloids used during cardiopulmonary bypass (CPB) (e.g. Normosol-R and Plasma-Lyte, Ac = 27mEq/1), on coronary resistance (R) for the empty beating heart, and on vascular resistance (SVR) of 19 dogs on CPB. Control Rwith Ac = 0 (Rc) was obtained by adjusting flow to give a pressure of 90 ± 10mmHg (R = 532 ± 32 dynes.sec/cm5g, other Rs are given as a percentage of their respective Rc ) . Continuous addition of Ac = 1.6 or 7.1 mEq/L of blood resulted in a rapid decrease in R to 34 ± 3 and 17 ± 2% of Rc, followed by a recovery to higher steady values of 53 ± 3 and 27 ± 2% respectively (p < 0.008). A bolus administration of 2mEq and 6mEq into the coronary circuit resulted in R dropping to 30 ± 5 and 17 ± 6% respectively. Rs obtained with Ac were compared to the Rs obtained for control 20sec reactive hyperaemia (35 ± 1 %), after 30 minutes global ischaemia (14 ± 1 %), and with the addition of bicarbonate (98 ± 2%). Ac did not change heart rate, but caused an increase in O2 consumption from 3.4 ± 0.4 to 4.8 ± 0.7ml/min/1 00g, (p < 0.038). Systemically, a bolus of 6mEq of Ac caused an immediate drop in SVR to 45 ± 5% of control. The property of Ac as a potent coronary and vascular dilator may need to be considered when Ac is used during cardiac surgery. Statistical comparisons were made with the paired t-test.
n,naib=map(int,input().split()) a=list(map(int,input().split())) dop=0 s=0 for i in range(n): s+=a[i] while dop!=-1: if (s+naib*dop)/(n+dop)>=naib-0.5: print(dop) dop=-2 dop+=1
/** * Recursively removes all the listeners of all children of this animator */ public static void removeAnimationListenersRecursive(Animator animator) { if (animator instanceof AnimatorSet) { ArrayList<Animator> animators = ((AnimatorSet) animator).getChildAnimations(); for (int i = animators.size() - 1; i >= 0; i--) { removeAnimationListenersRecursive(animators.get(i)); } } animator.removeAllListeners(); }
def score_event(self, game): score_time = 0 if self.x >= consts.SCREEN_WIDTH or self.x <= 0: if self.x >= consts.SCREEN_WIDTH: game.first_paddle.score += 1 game.ball = Ball(x_direction_right=False) else: game.second_paddle.score += 1 game.ball = Ball(x_direction_right=True) game.state = 'score' score_time = time() return score_time
Venezuela has been in a hyperinflation, causing the price of basic goods to soar. The rate of inflation is expected to be up to 1,600%. Many residents have turned to Bitcoin mining as a solution to the money crisis. Bitcoin mining involves using computer hardware that solves mathematical equations and creates a link for the currency. The bitcoins trade between the conventional currency to create a market for non-miners and a way for those who are miners to cash out. The bitcoin blockchain creates a link and rewards the bitcoin users with their money. To do a bitcoin transfer, the miner must use certain websites and applications on phones, for example, Venmo, to do so. The digital currency is more affordable due to the cheap electricity in Venezuela. As a result, miners make up to $500 per month with bitcoins. The $500 gives citizens the ability to purchase more goods. Some of these goods include diapers and insulin. This amount is substantial to most households in Venezuela. To put the money crisis in perspective, the average lunch costs 200,000 Bolivars that equals to $8-$10 US dollars, according to Panam Post. The cryptocurrency is common in Venezuela now. Residents from professors to shopkeepers buy and sell with the online exchange. It is also common for non-miners to use. Although bitcoins help those in Venezuela, authorities have taken action to take the currency down. Maduro calls on those who profit from the economic crisis by saying they’re “capitalist parasites.” There are no laws banning or promoting the currency, but that is not stopping them. Authorities have been tackling the cryptocurrency operations by arresting miners on spurious charges. Joel Padron owns a courier service and began mining to improve his income. He is the first to have charges under energy theft and possession of contraband. These charges caused him to be detained for 14 weeks. Not only are bitcoin users put under arrest, but corrupt police officers use them for their own profit. All the arrests have not made the currency lose relevance, though. The founder of BlinkTrade, Rodrigo Souza claims people have not stopped mining. Many miners are continuing with the cryptocurrency ether (ETH). This new system increases the profits and the risk is lower. Both bitcoin and ETH have the same principle, but ETH is more affordable. All ETH miners need is a no-cost software and a PC with a video card. As a result, police officers believe that the ETH miners are just regular computers. This is how the risk is lowered. The fact that Venezuela residents have turned to a new system for economic relief does open many eyes to how high inflation really is. Cryptocurrency is benefiting citizens in this time of need and continue to dominate Venezuela currency. Lemon Wire
California wine industry expected to benefit from Trans-Pacific Partnership The North Coast wine industry likely stands to benefit from a trade pact that was reached early Monday by the United States and 11 other Pacific Rim countries writing new commerce rules for nearly 40 percent of the world’s gross domestic product. The pact, known as the Trans-Pacific Partnership, eventually would end more than 18,000 tariffs that the participating countries have placed on U.S. exports, including autos, machinery, technology, consumer goods, chemicals and agricultural products as varied as avocados from California and wheat, pork and beef from the Plains states. The agreement now goes to Congress, which must ratify the pact before it takes effect. Most notably, local vintners could see lower tariffs from Japan, the third-largest export market for American wines last year at $88 million, according to the Wine Institute, which represents California producers. Without the pact, American wine producers would be at a disadvantage in Japan, which previously struck trade deals with rivals Australia and Chile. Japan’s agreement with Chile will end tariffs in 2019, while its deal with Australia eliminates a 15 percent tariff on bottled Australian wine over the next seven years, said Tom LaFaille, vice president and international trade counsel for the Wine Institute. All three countries are part of the new trade agreement reached Monday, as well as New Zealand, another large wine-producing country. “We make the best wines in the world and we really need to compete on a level playing field,” LaFaille said. American negotiators have not released details of the massive agreement, including the size and pace of the tariff rollbacks on U.S. products. But Canada publicly noted that Japanese tariffs on wine, sparkling wine and icewine made in Canada will be eliminated over 10 years. “We’re cautiously optimistic that the tariff phaseout period will be beneficial to California wine exports,” LaFaille said. American wine producers are hopeful the trade pact will lead to great exports to Japan, much like the jump in sales that occurred in South Korea following a 2012 free trade agreement. Over the past three years, U.S. wine and beer exports to South Korea have increased from $18 million to $30 million. The Japanese market should be ripe for American wines, which have gained market share around the world over the past two decades as their reputation for quality has grown, led by brands from Napa and Sonoma. Since the end of the 1990s, U.S. wine exports have increased by more than 50 percent in volume, while over the same period, French wine exports have fallen almost 20 percent in volume, said Damien Wilson, Hamel Family Faculty Chair in Wine Business at Sonoma State University. As exports have grown, American vintners also have been able to sell higher-priced wines overseas. The average price of U.S. wine at $5.42 per liter has almost reached parity with that of France at $5.46 per liter, Wilson said. In addition to seeking a reduction in tariffs, the U.S. wine industry had argued for other items as well during the trade negotiations. It wanted to streamline cross-border transactions and create additional protections for label-of-origin standards, which have been a big concern with foreign competitors who occasionally mislabel wines from California or Napa that are produced in other areas, LaFaille said.
def Reconstruct(self, input_data): x = Conv2D(filters=3, kernel_size=3, padding='same', activation=PReLU(), kernel_initializer=VarianceScaling(scale=2.0, mode="fan_in", distribution="untruncated_normal"), name='Rec_C')(input_data) return x
/** * Allows to get loaded particles from project asset directory. * @author Kotcrab */ public class ParticleCacheModule extends ProjectModule implements WatchListener { private FileAccessModule fileAccess; private AssetsWatcherModule watcherModule; @Override public void init () { watcherModule.addListener(this); } public ParticleEffect get (VisAssetDescriptor assetDescriptor, float scaleFactor) { if (assetDescriptor instanceof PathAsset == false) throw new UnsupportedAssetDescriptorException(assetDescriptor); PathAsset path = (PathAsset) assetDescriptor; return get(fileAccess.getAssetsFolder().child(path.getPath()), scaleFactor); } private ParticleEffect get (FileHandle file, float scaleFactor) { ParticleEffect effect = new ParticleEffect(); try { effect.load(file, file.parent()); } catch (GdxRuntimeException e) { throw new EditorRuntimeException(e); } effect.scaleEffect(scaleFactor); return effect; } @Override public void dispose () { watcherModule.removeListener(this); } @Override public void fileChanged (FileHandle file) { if (ProjectPathUtils.isParticle(file)) { App.eventBus.post(new ResourceReloadedEvent(EnumSet.of(ResourceType.PARTICLES))); } } @Override public void fileDeleted (FileHandle file) { } @Override public void fileCreated (FileHandle file) { } }
Defining System Boundaries of an Institution Nitrogen Footprint Abstract A nitrogen (N) footprint quantifies the amount of reactive nitrogen released to the environment and can be measured at different scales. The N footprint of a university includes activities and consumption within its geographic boundaries as well as activities that support the institution. Determining system bounds of an N footprint depends on the institution's mission and provides a common baseline for comparison. A comparison of three scopes of the N footprint, which describe how emissions are directly related to an institution's activities, was conducted for seven institutions. Scopes follow the established definition for the carbon footprint. In this article, the authors propose a new system bounds definition (core campus versus adjunct). Two case studies were explored: how the N footprint of Dickinson College changed with air travel, and how the N footprint of the Marine Biological Laboratory changed with scientific research. Of the three scopes, scope 3 was consistently the largest proportion of the N footprint for all seven institutions. The core campus activities of Dickinson College made up 99 percent of its N footprint, with air travel making up the remaining 1 percent. The Marine Biological Laboratory's core campus activities made up 51 percent of its N footprint and the scientific research made up the remaining 49 percent. Institutions should define system bounds based on their mission and stay consistent with their boundaries following the baseline year. The core campus footprint could be used to compare institution footprints using consistent system bounds. How institutions define their boundaries will impact the recorded amount of nitrogen as well as how the institution will set reduction strategies. Introduction A nitrogen (N) footprint quantifies the amount of reactive nitrogen released to the environment as a result of an entity's resource consumption. 1 Reactive nitrogen (Nr) is defined as all nitrogen except nitrogen gas (N 2 ). 2 Humans create reactive nitrogen by breaking the triple bond of N 2 through the Haber-Bosch process, the cultivation of legumes, and the combustion of fossil fuels. 3 The detrimental impacts to ecosystems and human health caused by Nr indicate the importance of managing nitrogen emissions in order to reduce its impact on the environment. This article examines how changing system boundaries affects the N footprint results of different institutions. Setting system boundaries is important because it provides a common baseline for comparison as well as allows different entities within an institution to have direct interaction with their footprint. The following topics are addressed: Defining System Boundaries of an Institution Nitrogen Footprint Abstract A nitrogen (N) footprint quantifies the amount of reactive nitrogen released to the environment and can be measured at different scales. The N footprint of a university includes activities and consumption within its geographic boundaries as well as activities that support the institution. Determining system bounds of an N footprint depends on the institution's mission and provides a common baseline for comparison. A comparison of three scopes of the N footprint, which describe how emissions are directly related to an institution's activities, was conducted for seven institutions. Scopes follow the established definition for the carbon footprint. In this article, the authors propose a new system bounds definition (core campus versus adjunct). Two case studies were explored: how the N footprint of Dickinson College changed with air travel, and how the N footprint of the Marine Biological Laboratory changed with scientific research. Of the three scopes, scope 3 was consistently the largest proportion of the N footprint for all seven institutions. The core campus activities of Dickinson College made up 99 percent of its N footprint, with air travel making up the remaining 1 percent. The Marine Biological Laboratory's core campus activities made up 51 percent of its N footprint and the scientific research made up the remaining 49 percent. Institutions should define system bounds based on their mission and stay consistent with their boundaries following the baseline year. The core campus footprint could be used to compare institution footprints using consistent system bounds. How institutions define their boundaries will impact the recorded amount of nitrogen as well as how the institution will set reduction strategies. The various system bounds discussed in this article focus on the scopes, the core campus activities, other components of energy consumption, and research activities. System Bounds and Scopes The N footprint could be measured at different scales, from an individual person to institutions, regions, and the globe. 1,3,4,5 The N footprint of an institution, such as a university, includes activities and consumption within the geographic boundaries of the institution as well as supporting activities and consumption that occur outside the boundaries (e.g., food production, commuting, electricity generation). 3 To calculate an institution's N footprint, the system bounds must be clearly and carefully designed. Setting boundaries determines what will be included in the footprint. For example, in tracking carbon dioxide (CO 2 ) emissions, the Climate Registry describes the parameters used for defining the boundaries, which include scopes, gases, activity types (e.g., stationary combustion), and geographic/operation boundaries. 6 One framework for determining systems bounds is to allocate emissions into three scopes based on where they occur relative to the given institution as well as how directly those emissions are related to the institution's activities. Scopes are groupings of sectors that account for emissions that occur as a result of direct or indirect activity by the institution. The sectors allocated to scopes are defined by the World Resources Institute and the World Business Council on Sustainable Development. 7 Both the carbon footprint tool for institutions (the Carbon Management and Assessment Platform) and the N footprint tool collect data for scopes 1 through 3. 8 Scope 1 includes direct emissions that occur from institution-ownedand-operated equipment. In scope 1, both footprints report on-campus stationary sources, direct transportation sources, research animals, and the amount of fertilizer applied to landscape. Scope 2 accounts for indirect emissions from purchased utilities; both footprints report purchased electricity. Finally, scope 3 includes losses from other indirect consequences of institutional activities and consumption patterns (e.g., commuting and food production) that may not necessarily occur within institution-owned equipment (e.g., wastewater). For the carbon footprint tool, scope 3 is considered optional. Institutions that have signed the American College and University Presidents' Climate Commitment (ACUPCC) are required to report scope 3 emissions for commuting and air travel that is paid for by the institution. There are many types of scope 3 emissions. The consumption associated with these emissions is often not collected in a centralized way, making them difficult to track and report. Some institutions determine which parts of scope 3 to include based on finances or control boundaries. 9 In contrast, all institutions in the N footprint network include scope 3 in their calculations and have included food purchases in their footprint. Including food in the N footprint is important because virtual nitrogen (any nitrogen used in the food production process but not contained in the food we consume) associated with food is one of the largest contributors of reactive nitrogen for most higher education institutions. 1 In addition, many activities in scope 3 (such as food purchases) are necessary to support an institution's operations. Given the significance of scope 3, it is important to identify other methods of defining system bounds that better emphasize relevant environmental impacts that are also integral to an institution's activities. The N footprints for the seven institutions that completed the calculation were compared by scope. Each institution similarly calculated its N footprint by following methods established by the Nitrogen Footprint Tool. 10 The average proportion for each scope was calculated, and the contribution and importance of scope 3 was considered for the N footprint. Core Campus Footprint versus Adjunct Footprint Another way of defining system boundaries is by distinguishing core facilities and activities from adjunct facilities and activities. For example, a university may have a main campus at which its core undergraduate and graduate education activities are conducted, a separate facility that is primarily research and not an education facility, a hospital, and/or a farm. The core footprint encompasses the main campus and its educational activities, while an adjunct footprint The core campus footprint includes any activity that occurs on the campus and contributes to the core functioning of that institution. It also includes the upstream emissions associated with the production or distribution of that activity. The core campus footprint supports baseline institutional activities, including oncampus energy usage, public transit, commuting, fertilizer, and food. The adjunct footprint includes all off-campus research activities that are not directly related to the core campus activities. It should be noted that some research is encompassed within the core campus footprint; the adjunct footprint focuses on major off-campus research initiatives. Through using scopes to determine the system bounds of both core campus footprints and adjunct footprints, institutions can capture the full depths of each scope within the campus. The distinction between the two footprints will assist institutions in setting reduction strategies appropriate for each footprint. This article compares the two levels of footprint system bounds (core campus footprint and adjunct footprint) of six institutions, with a focus on case studies for Dickinson College and the Marine Biological Laboratory. In addition, it explores a specific example of research activities for each institution. Case Study A: Dickinson College and Air Travel Dickinson College, a small residential liberal arts school located in Carlisle, Pennsylvania, emphasizes global study, sustainability, and civic engagement. The first N footprint was calculated for the period August 2011 through July 2012 as a senior undergraduate thesis, and was updated for August 2013 through July 2014. 11,12 In addition, Dickinson reports its carbon footprint annually to the ACUPCC and has set reduction goals to reduce greenhouse gas (GHG) emissions 25 percent from the 2008 level by 2020, and to reach net zero emissions in that same year. Dickinson set its boundaries based on the boundaries used in its carbon footprint. Dickinson owns a college farm where some food grown is supplied to the college and the remainder is sold to Community Supported Agriculture members or at the local farmers' market. The farm operations that were included in the N footprint were all college food purchases from the farm, fertilizer purchased by the farm, and all fuels and electricity used. For Dickinson, accounting for air travel included any air travel directly financed by the school. The Campus Carbon Calculator™ allows for the accounting of "directly encouraged emissions, " which include study abroad air travel and regular commuting by faculty, staff, and students. If Dickinson was directly financing a visiting faculty member or student, then their travels were included in the footprint. Air travel is a scope 3 activity that not all institutions track. The Dickinson footprint was compared with and without air travel. Case Study B: Marine Biological Laboratory and Scientific Research The Marine Biological Laboratory (MBL) is a private nonprofit scientific research institution affiliated with the University of Chicago and located in Woods Hole, Massachusetts. It has a small number of yearround employees (approximately 250), but in the summer season the MBL grows in size due to visiting scientists and students. The first N footprint was calculated for the period January to December 2011 as a research project for the Semester in Environmental Science; the footprint was updated for January to December 2013. 13,14 The MBL set its system boundaries based on the geographic bounds of its campus. Although MBL has numerous field sites, they were excluded because experiments are temporary and only a few MBL employees reside at field sites. In addition, these scientific activities were not included in the core campus footprint (e.g., fertilization experiments) because setting a reduction goal does not include altering the research of scientists. The scenario unique to the MBL is its research activities. Therefore, its footprint was compared with and without research activities in their base year. Scopes Of the three scopes, scope 3 is consistently the largest proportion of each institution's N footprint (Figure 1). On average, scope 3 contributes 70 percent of the N footprint for the seven institutions that have calculated their footprint. The range is from 37 percent to 87 percent. Scope 2 is the second-largest contributor, making up an average of 22 percent (range: 4% -50%). Scope 1 has the smallest contribution, averaging just 7 percent (range: 3% -12%) of the total N footprint. Original Article According to carbon footprint accounting protocols, only scopes 1 and 2 must be calculated and reported-scope 3 is optional. If only scopes 1 and 2 were reported for the N footprint, then just 30 percent of the total N footprint would be accounted for. Food production makes up the largest proportion of scope 3. By including this scope, an important sector that makes up the majority of all calculated N footprints can be included (Figure 2). Core Campus Footprint versus Adjunct Footprint In addition to their main campus, institutions often have research facilities such as a hospital, veterinary school, or agricultural facilities. These institutionally owned operations can be large enough that they have their own geographical boundaries and activities that span all three scopes and most of the sectors. However, their activities are focused on research, which is often part of the mission of an institution. The contributions of the core footprint versus the adjunct footprint can vary widely based on an institution's activities (Figure 3). Two examples that represent the extremes of institution focus are Dickinson College and the MBL (Figure 4). Dickinson is an undergraduate college with limited research, whereas the MBL is a research institution with limited coursework. The footprints reflect these activities. The core campus activities make up 99 percent of the Dickinson N footprint but just 51 percent of the MBL N footprint. Adjunct facilities make up almost half (49%) of the N footprint for MBL when all research activities are accounted for. Calculating a footprint focusing just on the core campus activities initially is beneficial because it can make comparing footprints across institutions more comparable. Comparing institutions with different system bounds can lead to misleading or erroneous conclusions. The core campus activities footprint resolves this issue by focusing on activities consistent across institutions. In addition, reduction strategies that are focused on core campus activities will not affect an institution's mission related to research. Case Study A: Dickinson College and Air Travel Dickinson College strongly encourages students to study abroad and take advantage of the global education opportunities it offers. Roughly 60 percent of Dickinson students will study abroad during their academic career and more than 40 percent of faculty members have led a study abroad program. 15 Other air travel associated with Dickinson's campus is athletic teams traveling for games and tournaments and faculty members traveling for research and development. Although air travel is a small portion of Dickinson's overall footprint, it is part of the overall educational mission and is included in its carbon footprint. Therefore air travel was included in the N footprint. Dickinson in 2014 released a total of 85 metric tons N, which included all air travel (for study abroad, athletics, and research and development). (See Figure 5.) Air travel of all three types totaled approximately 1.3 metric tons N, which is about 1.5 percent of total nitrogen released by Dickinson. By removing air travel from the footprint, the total dropped by 1.3 metric tons N, to 84. Many institutions base their N footprint calculations on annual per person totals. In order to ensure a uniform comparison, Dickinson calculated its total kilograms of nitrogen released per person annually and found that for food production, food consumption, utilities, and fertilizer, nitrogen per person was 22, 0.57, 4.1, 0.52 metric tons N, respectively. When excluding air travel, transportation releases were 0.40 kg N/person; when including air travel the release of nitrogen per person doubled to 0.82 kg N/person. Al- Case Study B: Marine Biological Laboratory and Scientific Research The total N footprint for the Marine Biological Laboratory in 2013 was 7.5 metric tons N. The footprint is based on the campus size, 43,942 gross square meters, which includes cottages that are rented during the summer, but excludes the warehouses and storage facilities in other towns. The cottages were included in the footprint because they are owned by the MBL and are utilized by visiting scientists during the summer season. Also included is a Original Article dining facility on campus that caters to students, visiting scientists, employees, and visitors. Upstream food production nitrogen losses released 4.0 metric tons N in 2013, and food consumption released 0.4 metric tons N. Commuting for year-round employees was accounted for, which contributed 0.4 metric tons N to the overall footprint. The MBL N footprint can be divided into core campus activities (food, utilities, transportation) and research-related activities (fertilizer application for scientific experiments, research animals). When accounting only for core activities, the N footprint was 6.5 metric tons N. However, research activities contribute a significant addition: research animals added 1.0 metric tons N, and a fertilizer application experiment in 2013 added 5.2 metric tons N ( Figure 6). Although the fertilizer experiment lasted for a short period of time, it made up over 40 percent of MBL's N footprint that year. The kilograms of nitrogen per person on the MBL campus for food production, food consumption, utilities, and transportation are 3.8, 0.32, 1.5, 0.36 metric tons N, respectively. However, research increased the use of fertilizer and the number of research animals. Fertilizer increased the nitrogen loss from 0 kg N/person to 4.9 kg N/person; research animals increased the nitrogen loss from 0 kg N/per person to 0.92 kg N/person. Including research activities is important in accounting for all nitrogen losses associated with an institution's activities. Other Examples of the Adjunct Footprint Two other examples of research activities that can be a large con-tributor to an institution's total N footprint are agricultural research and a hospital. Colorado State University (CSU) and the University of Virginia (UVA) are institutions that have agricultural research and a hospital, respectively. CSU has a substantial agricultural research program, which includes multiple agricultural research facilities. Core campus activities make up just 46 percent of CSU's total N footprint; the remainder (54%) is a result of agricultural research. 16 Exclud-ing the agricultural research sector would miss an important sector for nitrogen losses for CSU. However, including agriculture could be misleading when comparing it to other institutions. UVA has a hospital where its medical students work and it also provides services to the general public. The hospital is included in UVA's carbon footprint. The sectors that could be affected by adding an institution's hospital include food (production and consumption) and utilities. Preliminary results indicate that the hospital contributes a large proportion of the UVA N footprint. UVA includes the hospital in its reduction goals and sustainability efforts; however, because the hospital is not part of UVA's core campus activities, it should not be included in comparisons to other institutions. Recommendations for Defining System Bounds Based on this research, it is recommended that institutions define their core campus footprint system bounds based on their institution's mission. A core campus footprint is a necessary initial start for calculating an institution's N footprint, but if there is a research component to an institution, then it should be included in the adjunct boundaries for completeness. These decisions should be made for the baseline calculation year of an institution and followed for all additional updates to the N footprint. If an institution has or intends to calculate its carbon footprint, the same system bounds should be followed for both footprint calculations. The consistency in system bounds for footprints will aid in setting reduction strategies that can reduce both footprints. 17 A drawback of including research activities is that the total N footprint will be larger; however, adding research activities results in a more complete picture of an institution's contribution to nitrogen pollution. Another drawback is the difficulty in setting reduction strategies. Potentially, the adjunct footprint could have its own set of reduction strategies, separate from the core campus footprint, which would encourage institutions to think critically about creative ways to reduce their footprints. It is especially important to include a secondary campus when reduction strategies can be applied to the secondary campus. When comparing N footprints, it is important to compare the core campus footprints so that only comparable activities are included for each institution. Conclusion How institutions define their system boundaries when calculating their N footprint will impact not only the recorded amount of nitrogen released, but also how the institution will go about setting reduction strategies. Boundaries should represent the institution's mission and focus, and should also follow the same bounds used for a carbon footprint being calculated simultaneously.
// htmlNoEntitiesEscape escapes and writes to w the string s as htmlEscape // does but without escaping the HTML entities. func htmlNoEntitiesEscape(w strWriter, s string) error { last := 0 for i := 0; i < len(s); i++ { var esc string switch s[i] { case '"': esc = "&#34;" case '\'': esc = "&#39;" case '<': esc = "&lt;" case '>': esc = "&gt;" default: continue } if last != i { _, err := w.WriteString(s[last:i]) if err != nil { return err } } _, err := w.WriteString(esc) if err != nil { return err } last = i + 1 } if last != len(s) { _, err := w.WriteString(s[last:]) return err } return nil }
# coding: utf-8 # Usage: # python tests/test_pyccel_mpi.py --compiler=mpif90 # here the execution is handled by os.system, since some tests need specific # number ot processors import os from pyccel.commands.console import pyccel from utils import clean_tests # ... def test_core(n_procs=2): print('============== testing core ================') ignored = ['poisson_fd_1d.py', 'scatter.py'] base_dir = os.getcwd() path_dir = os.path.join(base_dir, 'tests/scripts/mpi/core') files = sorted(os.listdir(path_dir)) files = [f for f in files if not(f in ignored) and (f.endswith(".py"))] # we give here tests that only works with a given number of procs, d_tests = {'sendrecv.py': 2, 'sendrecv_replace.py': 2, 'line.py': 2, 'column.py': 2, 'cart2d_1.py': 4, 'cart2d_2.py': 4} for f in files: f_name = os.path.join(path_dir, f) # we only convert and compile the generated code pyccel(files=[f_name], openmp=False) # then we use 'mpirun' binary = f_name.split('.py')[0] _n_procs = n_procs if f in d_tests: _n_procs = d_tests[f] cmd = 'mpirun -n {n_procs} {binary}'.format(n_procs=_n_procs, binary=binary) print('> {0}'.format(cmd)) os.system(cmd) print('> testing {0}: done'.format(str(f))) # ... # ... def test_examples(n_procs=2): print('============== testing examples ================') ignored = [] base_dir = os.getcwd() path_dir = os.path.join(base_dir, 'tests/scripts/mpi/') files = sorted(os.listdir(path_dir)) files = [f for f in files if not(f in ignored) and (f.endswith(".py"))] # we give here tests that only works with a given number of procs, d_tests = {} for f in files: f_name = os.path.join(path_dir, f) # we only convert and compile the generated code pyccel(files=[f_name], openmp=False) # then we use 'mpirun' binary = f_name.split('.py')[0] _n_procs = n_procs if f in d_tests: _n_procs = d_tests[f] cmd = 'mpirun -n {n_procs} {binary}'.format(n_procs=_n_procs, binary=binary) print('> {0}'.format(cmd)) os.system(cmd) print('> testing {0}: done'.format(str(f))) # ... ################################ if __name__ == '__main__': clean_tests() test_core() test_examples() clean_tests()
<reponame>vitorsouza/FrameWeb-Martins-2015<filename>core/plugins/graphicaleditor/br.ufes.inf.nemo.frameweb/src-gen/br/ufes/inf/nemo/frameweb/model/frameweb/impl/AuthPermissionImpl.java /** */ package br.ufes.inf.nemo.frameweb.model.frameweb.impl; import br.ufes.inf.nemo.frameweb.model.frameweb.AuthPermName; import br.ufes.inf.nemo.frameweb.model.frameweb.AuthPermission; import br.ufes.inf.nemo.frameweb.model.frameweb.FramewebPackage; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.ecore.EClass; import org.eclipse.emf.ecore.InternalEObject; import org.eclipse.emf.ecore.impl.ENotificationImpl; /** * <!-- begin-user-doc --> * An implementation of the model object '<em><b>Auth Permission</b></em>'. * <!-- end-user-doc --> * <p> * The following features are implemented: * </p> * <ul> * <li>{@link br.ufes.inf.nemo.frameweb.model.frameweb.impl.AuthPermissionImpl#getAuthPermName <em>Auth Perm Name</em>}</li> * </ul> * * @generated */ public class AuthPermissionImpl extends DomainClassImpl implements AuthPermission { /** * The cached value of the '{@link #getAuthPermName() <em>Auth Perm Name</em>}' reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #getAuthPermName() * @generated * @ordered */ protected AuthPermName authPermName; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ protected AuthPermissionImpl() { super(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override protected EClass eStaticClass() { return FramewebPackage.Literals.AUTH_PERMISSION; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public AuthPermName getAuthPermName() { if (authPermName != null && authPermName.eIsProxy()) { InternalEObject oldAuthPermName = (InternalEObject) authPermName; authPermName = (AuthPermName) eResolveProxy(oldAuthPermName); if (authPermName != oldAuthPermName) { if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.RESOLVE, FramewebPackage.AUTH_PERMISSION__AUTH_PERM_NAME, oldAuthPermName, authPermName)); } } return authPermName; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public AuthPermName basicGetAuthPermName() { return authPermName; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void setAuthPermName(AuthPermName newAuthPermName) { AuthPermName oldAuthPermName = authPermName; authPermName = newAuthPermName; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, FramewebPackage.AUTH_PERMISSION__AUTH_PERM_NAME, oldAuthPermName, authPermName)); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case FramewebPackage.AUTH_PERMISSION__AUTH_PERM_NAME: if (resolve) return getAuthPermName(); return basicGetAuthPermName(); } return super.eGet(featureID, resolve, coreType); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case FramewebPackage.AUTH_PERMISSION__AUTH_PERM_NAME: setAuthPermName((AuthPermName) newValue); return; } super.eSet(featureID, newValue); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void eUnset(int featureID) { switch (featureID) { case FramewebPackage.AUTH_PERMISSION__AUTH_PERM_NAME: setAuthPermName((AuthPermName) null); return; } super.eUnset(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public boolean eIsSet(int featureID) { switch (featureID) { case FramewebPackage.AUTH_PERMISSION__AUTH_PERM_NAME: return authPermName != null; } return super.eIsSet(featureID); } } //AuthPermissionImpl
Blood‐brain barrier injury during spirochetal infection The most severe and a relatively common complication of untreated relapsing fever (RF) borreliosis is brain hemorrhage; mortality rate can reach as high as 70% in the epidemic form. Subjects with epidemic RF who are stable prior to treatment have extraordinarily high levels of circulating IL‐10. Our studies in murine RF borreliosis have shown that IL‐10 protects the microcirculation of the brain and others organs from hemorrhagic and thrombotic complications. Mortality is higher in infected mice deficient only in IL‐10 compared to mice normal for IL‐10 but deficient in both B and T cells. In this study we examined morphologically and molecularly the brain of mice with relapsing‐remitting and persistent infection with B. turicatae (Bt) with or without IL‐10 deficiency for alterations in the BBB. The analysis shows significant BBB disruption in IL‐10 deficient infected mice often resulting in brain hemorrhage. Immufluorescence microscopy of vessels near hemorrhagic areas shows rupture of basal lamina, detachment of astrocytes foot processes and pericytes, leakage of serum proteins, decreased in tight junction proteins, endothelial cell activation and apoptosis, and recruitment of inflammatory cells. These results indicate that IL‐10 protects the BBB at multiple levels during spirochetal infection to prevent injury. Studies funded by NIH grant R21 NS057545‐02 to Diego Cadavid.
/** * Created by lkacimi on 5/4/2017. */ public class SNPRC_EHRUserSchema extends SimpleUserSchema { public SNPRC_EHRUserSchema(User user, Container container, DbSchema dbschema) { super(SNPRC_EHRSchema.NAME, null, user, container, dbschema); } public SNPRC_EHRUserSchema(User user, Container container) { super(SNPRC_EHRSchema.NAME, null, user, container, SNPRC_EHRSchema.getInstance().getSchema()); } @Override protected TableInfo createWrappedTable(String name, @NotNull TableInfo schemaTable, ContainerFilter cf) { String nameLowercased = name.toLowerCase(); switch(nameLowercased){ case SNPRC_EHRSchema.TABLE_VALID_VETS: case SNPRC_EHRSchema.TABLE_VALID_BIRTH_CODES: case SNPRC_EHRSchema.TABLE_VALID_DEATH_CODES: case SNPRC_EHRSchema.TABLE_VALID_INSTITUTIONS: return getCustomPermissionTable(createSourceTable(nameLowercased), cf, ManageLookupTablesPermission.class); case SNPRC_EHRSchema.TABLE_GROUP_CATEGORIES: case SNPRC_EHRSchema.TABLE_ANIMAL_GROUPS: return getCustomPermissionTable(createSourceTable(nameLowercased), cf, ManageGroupMembersPermission.class); } return super.createWrappedTable(name, schemaTable, cf); } private TableInfo getCustomPermissionTable(TableInfo schemaTable, ContainerFilter cf, Class<? extends Permission> perm) { CustomPermissionsTable ret = new CustomPermissionsTable(this, schemaTable, cf); ret.addPermissionMapping(InsertPermission.class, perm); ret.addPermissionMapping(UpdatePermission.class, perm); ret.addPermissionMapping(DeletePermission.class, perm); return ret.init(); } }
#ifndef NOTES_H_ #define NOTES_H_ namespace vottie { namespace notes { /** * @class Notes */ class Notes { public: Notes() {} ~Notes() {} int printHeader(std::string &title); int printBody(std::string &note); int printFooter(); int parse(char const *request_uri); private: void indexing(); }; } } // end of namespace #endif // NOTES_H_
<gh_stars>1-10 package org.artifactory.webapp.servlet.redirection; import org.apache.commons.lang.StringUtils; import org.artifactory.util.PathUtils; import org.artifactory.webapp.servlet.RequestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.servlet.RequestDispatcher; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; /** * @author <NAME> */ public class SamlRedirectionHandler implements RedirectionHandler { private static final Logger log = LoggerFactory.getLogger(SamlRedirectionHandler.class); @Override public boolean shouldRedirect(ServletRequest request) { HttpServletRequest httpRequest = (HttpServletRequest) request; String path = httpRequest.getRequestURI(); path = PathUtils.trimLeadingSlashes(path); path = path.toLowerCase(); return path.endsWith("/webapp/saml/loginrequest") || path.endsWith("/webapp/saml/loginresponse") || path.endsWith("/webapp/saml/logoutrequest"); } @Override public void redirect(ServletRequest request, ServletResponse response) { try { String path = RequestUtils.getServletPathFromRequest((HttpServletRequest) request); String targetUrl = StringUtils.replace(path, "webapp", "ui"); RequestDispatcher dispatcher = request.getRequestDispatcher(targetUrl); dispatcher.forward(request, response); } catch (Exception e) { log.error("Failed to redirect SAML request.",e); } } }
/** * @Classname UploadUtil * @Description None * @Date 2019/6/27 20:06 * @Created by WDD */ public class UploadUtil { // 项目根路径下的目录 -- SpringBoot static 目录相当于是根路径下(SpringBoot 默认) public final static String IMG_PATH_PREFIX = "static/upload/imgs"; public static File getImgDirFile(){ // 构建上传文件的存放 "文件夹" 路径 String fileDirPath = new String("src/main/resources/" + IMG_PATH_PREFIX); File fileDir = new File(fileDirPath); if(!fileDir.exists()){ // 递归生成文件夹 fileDir.mkdirs(); } return fileDir; } }
/** * Contains static validation methods for checking if a name conforms to Asgard naming standards. */ public class NameValidation { private static final Pattern NAME_HYPHEN_CHARS_PATTERN = Pattern.compile("^[" + NameConstants.NAME_HYPHEN_CHARS + "]+"); private static final Pattern NAME_CHARS_PATTERN = Pattern.compile("^[" + NameConstants.NAME_CHARS + "]+"); private static final Pattern PUSH_FORMAT_PATTERN = Pattern.compile(".*?" + NameConstants.PUSH_FORMAT); private static final Pattern LABELED_VARIABLE_PATTERN = Pattern.compile("^(.*?-)?" + NameConstants.LABELED_VARIABLE + ".*?$"); private NameValidation() { } /** * Validates if provided value is non-null and non-empty. * * @param value the string to validate * @param variableName name of the variable to include in error messages * @return the value parameter if valid, throws an exception otherwise */ public static String notEmpty(String value, String variableName) { if (value == null) { throw new NullPointerException("ERROR: Trying to use String with null " + variableName); } if (value.isEmpty()) { throw new IllegalArgumentException("ERROR: Illegal empty string for " + variableName); } return value; } /** * Validates a name of a cloud object. The name can contain letters, numbers, dots, and underscores. * * @param name the string to validate * @return true if the name is valid */ public static boolean checkName(String name) { return checkMatch(name, NAME_CHARS_PATTERN); } /** * Validates a name of a cloud object. The name can contain hyphens in addition to letters, numbers, dots, and * underscores. * * @param name the string to validate * @return true if the name is valid */ public static boolean checkNameWithHyphen(String name) { return checkMatch(name, NAME_HYPHEN_CHARS_PATTERN); } /** * The detail part of an auto scaling group name can include letters, numbers, dots, underscores, and hyphens. * Restricting the ASG name this way allows safer assumptions in other code about ASG names, like a promise of no * spaces, hash marks, percent signs, or dollar signs. * * @deprecated use checkNameWithHyphen * @param detail the detail string to validate * @return true if the detail is valid */ @Deprecated public static boolean checkDetail(String detail) { return checkMatch(detail, NAME_HYPHEN_CHARS_PATTERN); } /** * Determines whether a name ends with the reserved format -v000 where 0 represents any digit, or starts with the * reserved format z0 where z is any letter, or contains a hyphen-separated token that starts with the z0 format. * * @param name to inspect * @return true if the name ends with the reserved format */ public static Boolean usesReservedFormat(String name) { return checkMatch(name, PUSH_FORMAT_PATTERN) || checkMatch(name, LABELED_VARIABLE_PATTERN); } private static boolean checkMatch(String input, Pattern pattern) { return input != null && pattern.matcher(input).matches(); } }
def add_songs(self, pl_id): songs = self.user.get_playlist_songs(pl_id) self.songs_table.add_songs(songs, True)
package com.eoenesapps.foundboard; import androidx.appcompat.app.AppCompatActivity; import android.media.MediaPlayer; import android.os.Bundle; import android.view.View; import com.eoenesapps.myapplication.R; public class MainActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); } public void playSound(View v) { //Place music files in file called raw, then after that the name of the file MediaPlayer mp = MediaPlayer.create(this, R.raw.heavyrain); //Placeholder file, change heavyrain to change what sound file it's playing mp.start(); } }
async def _chunked_upload(self, stream: streams.BaseStream, path: WaterButlerPath, conflict: str='replace') -> dict: session_id = await self._create_upload_session() await self._upload_parts(stream, session_id) return await self._complete_session(stream, session_id, path, conflict=conflict)
<reponame>juanpsm/luma.led_matrix #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014-18 <NAME> and contributors # See LICENSE.rst for details. import pytest from luma.led_matrix.device import max7219 from luma.core.render import canvas from helpers import setup_function, serial, call, assert_invalid_dimensions # noqa: F401 def test_init_cascaded(): device = max7219(serial, cascaded=4) assert device.width == 32 assert device.height == 8 def test_init_reversed(): device = max7219(serial, cascaded=4, blocks_arranged_in_reverse_order=True) assert device.blocks_arranged_in_reverse_order is True def test_init_8x8(): device = max7219(serial) assert device.cascaded == 1 serial.data.assert_has_calls([ call([11, 7]), call([9, 0]), call([15, 0]), call([10, 7]), call([1, 0]), call([2, 0]), call([3, 0]), call([4, 0]), call([5, 0]), call([6, 0]), call([7, 0]), call([8, 0]), call([12, 1]) ]) def test_init_16x8(): device = max7219(serial, width=16, height=8) assert device.cascaded == 2 serial.data.assert_has_calls([ call([11, 7, 11, 7]), call([9, 0, 9, 0]), call([15, 0, 15, 0]), call([10, 7, 10, 7]), call([1, 0, 1, 0]), call([2, 0, 2, 0]), call([3, 0, 3, 0]), call([4, 0, 4, 0]), call([5, 0, 5, 0]), call([6, 0, 6, 0]), call([7, 0, 7, 0]), call([8, 0, 8, 0]), call([12, 1, 12, 1]) ]) def test_init_invalid_dimensions(): assert_invalid_dimensions(max7219, serial, 59, 22) def test_hide(): device = max7219(serial, cascaded=5) serial.reset_mock() device.hide() serial.data.assert_called_once_with([12, 0] * 5) def test_show(): device = max7219(serial, cascaded=3) serial.reset_mock() device.show() serial.data.assert_called_once_with([12, 1] * 3) def test_contrast(): device = max7219(serial, cascaded=6) serial.reset_mock() device.contrast(0x6B) serial.data.assert_called_once_with([10, 6] * 6) def test_display_16x8(): device = max7219(serial, cascaded=2) serial.reset_mock() with canvas(device) as draw: draw.rectangle(device.bounding_box, outline="white") serial.data.assert_has_calls([ call([1, 0x81, 1, 0xFF]), call([2, 0x81, 2, 0x81]), call([3, 0x81, 3, 0x81]), call([4, 0x81, 4, 0x81]), call([5, 0x81, 5, 0x81]), call([6, 0x81, 6, 0x81]), call([7, 0x81, 7, 0x81]), call([8, 0xFF, 8, 0x81]) ]) def test_display_16x16(): device = max7219(serial, width=16, height=16) serial.reset_mock() with canvas(device) as draw: draw.rectangle(device.bounding_box, outline="white") serial.data.assert_has_calls([ call([1, 0x80, 1, 0xFF, 1, 0x01, 1, 0xFF]), call([2, 0x80, 2, 0x80, 2, 0x01, 2, 0x01]), call([3, 0x80, 3, 0x80, 3, 0x01, 3, 0x01]), call([4, 0x80, 4, 0x80, 4, 0x01, 4, 0x01]), call([5, 0x80, 5, 0x80, 5, 0x01, 5, 0x01]), call([6, 0x80, 6, 0x80, 6, 0x01, 6, 0x01]), call([7, 0x80, 7, 0x80, 7, 0x01, 7, 0x01]), call([8, 0xFF, 8, 0x80, 8, 0xFF, 8, 0x01]) ]) def test_normal_alignment(): device = max7219(serial, cascaded=2, block_orientation=0) serial.reset_mock() with canvas(device) as draw: draw.rectangle((0, 0, 15, 3), outline="white") serial.data.assert_has_calls([ call([1, 0x09, 1, 0x0F]), call([2, 0x09, 2, 0x09]), call([3, 0x09, 3, 0x09]), call([4, 0x09, 4, 0x09]), call([5, 0x09, 5, 0x09]), call([6, 0x09, 6, 0x09]), call([7, 0x09, 7, 0x09]), call([8, 0x0F, 8, 0x09]) ]) def test_block_realignment_minus90(): device = max7219(serial, cascaded=2, block_orientation=-90) serial.reset_mock() with canvas(device) as draw: draw.rectangle((0, 0, 15, 3), outline="white") serial.data.assert_has_calls([ call([1, 0x00, 1, 0x00]), call([2, 0x00, 2, 0x00]), call([3, 0x00, 3, 0x00]), call([4, 0x00, 4, 0x00]), call([5, 0xFF, 5, 0xFF]), call([6, 0x80, 6, 0x01]), call([7, 0x80, 7, 0x01]), call([8, 0xFF, 8, 0xFF]) ]) def test_block_realignment_plus90(): device = max7219(serial, cascaded=2, block_orientation=90) serial.reset_mock() with canvas(device) as draw: draw.rectangle((0, 0, 15, 3), outline="white") serial.data.assert_has_calls([ call([1, 0xFF, 1, 0xFF]), call([2, 0x01, 2, 0x80]), call([3, 0x01, 3, 0x80]), call([4, 0xFF, 4, 0xFF]), call([5, 0x00, 5, 0x00]), call([6, 0x00, 6, 0x00]), call([7, 0x00, 7, 0x00]), call([8, 0x00, 8, 0x00]) ]) def test_block_realignment_plus180(): device = max7219(serial, cascaded=2, block_orientation=180) serial.reset_mock() with canvas(device) as draw: draw.rectangle((0, 0, 15, 3), outline="white") serial.data.assert_has_calls([ call([1, 0xF0, 1, 0x90]), call([2, 0x90, 2, 0x90]), call([3, 0x90, 3, 0x90]), call([4, 0x90, 4, 0x90]), call([5, 0x90, 5, 0x90]), call([6, 0x90, 6, 0x90]), call([7, 0x90, 7, 0x90]), call([8, 0x90, 8, 0xF0]) ]) def test_reversed_max7219(): device = max7219(serial, cascaded=4, blocks_arranged_in_reverse_order=True) serial.reset_mock() with canvas(device) as draw: draw.rectangle((0, 0, 15, 3), outline="white") serial.data.assert_has_calls([ call([1, 15, 1, 9, 1, 0, 1, 0]), call([2, 9, 2, 9, 2, 0, 2, 0]), call([3, 9, 3, 9, 3, 0, 3, 0]), call([4, 9, 4, 9, 4, 0, 4, 0]), call([5, 9, 5, 9, 5, 0, 5, 0]), call([6, 9, 6, 9, 6, 0, 6, 0]), call([7, 9, 7, 9, 7, 0, 7, 0]), call([8, 9, 8, 15, 8, 0, 8, 0]) ]) def test_unknown_block_orientation(): with pytest.raises(AssertionError): max7219(serial, cascaded=2, block_orientation="sausages")
<filename>test/halidehlsold/map_times_5/map_times_5_1_pix_in.cpp #include "vhls_target.h" #include "Linebuffer.h" #include "halide_math.h" void vhls_target( hls::stream<AxiPackedStencil<uint16_t, 1, 1> > &arg_0, hls::stream<AxiPackedStencil<uint16_t, 1, 1> > &arg_1) { #pragma HLS DATAFLOW #pragma HLS INLINE region #pragma HLS INTERFACE s_axilite port=return bundle=config #pragma HLS INTERFACE axis register port=arg_0 #pragma HLS INTERFACE axis register port=arg_1 // alias the arguments hls::stream<AxiPackedStencil<uint16_t, 1, 1> > &_hw_input_stencil_stream = arg_0; hls::stream<AxiPackedStencil<uint16_t, 1, 1> > &_hw_output_stencil_stream = arg_1; // dispatch_stream(_hw_input_stencil_stream, 2, 1, 1, 100, 1, 1, 100, 1, "hw_output", 1, 0, 100, 0, 100); hls::stream<AxiPackedStencil<uint16_t, 1, 1> > _hw_input_stencil_stream_to_hw_output; #pragma HLS STREAM variable=_hw_input_stencil_stream_to_hw_output depth=1 #pragma HLS RESOURCE variable=_hw_input_stencil_stream_to_hw_output core=FIFO_SRL for (int _dim_1 = 0; _dim_1 <= 99; _dim_1 += 1) for (int _dim_0 = 0; _dim_0 <= 99; _dim_0 += 1) { #pragma HLS PIPELINE PackedStencil<uint16_t, 1, 1> _tmp_stencil = _hw_input_stencil_stream.read(); if (_dim_0 >= 0 && _dim_0 <= 99 && _dim_1 >= 0 && _dim_1 <= 99) { _hw_input_stencil_stream_to_hw_output.write(_tmp_stencil); } } (void)0; // produce hw_output.stencil.stream for (int _hw_output_y___scan_dim_1 = 0; _hw_output_y___scan_dim_1 < 0 + 100; _hw_output_y___scan_dim_1++) { for (int _hw_output_x___scan_dim_0 = 0; _hw_output_x___scan_dim_0 < 0 + 100; _hw_output_x___scan_dim_0++) { #pragma HLS PIPELINE II=1 Stencil<uint16_t, 1, 1> _hw_input_stencil; #pragma HLS ARRAY_PARTITION variable=_hw_input_stencil.value complete dim=0 _hw_input_stencil(0, 0, 0) = 0; // produce hw_input.stencil _hw_input_stencil = _hw_input_stencil_stream_to_hw_output.read(); (void)0; // consume hw_input.stencil Stencil<uint16_t, 1, 1> _hw_output_stencil; #pragma HLS ARRAY_PARTITION variable=_hw_output_stencil.value complete dim=0 _hw_output_stencil(0, 0, 0) = 0; // produce hw_output.stencil uint16_t _273 = _hw_input_stencil(0, 0); uint16_t _274 = (uint16_t)(5); uint16_t _275 = _273 * _274; _hw_output_stencil(0, 0) = _275; // consume hw_output.stencil AxiPackedStencil<uint16_t, 1, 1> _hw_output_stencil_packed = _hw_output_stencil; if (_hw_output_x___scan_dim_0 == 99 && _hw_output_y___scan_dim_1 == 99) { _hw_output_stencil_packed.last = 1; } else { _hw_output_stencil_packed.last = 0; } _hw_output_stencil_stream.write(_hw_output_stencil_packed); (void)0; } // for _hw_output_x___scan_dim_0 } // for _hw_output_y___scan_dim_1 } // kernel hls_target_vhls_target
Congenital segmental dilatation of intestine with different morphology: A case report Congenital segmental dilatation of intestine is rare with over 150 cases reported so far. The normal intestine abruptly transits to a dilated segment of variable length and diameter (3-4 times) without any evidence of mechanical obstruction. The dilated segment is hypomotile but with normal neuronal connections and ganglion cells. Often, it is a solitary lesion, but multiple sites may be involved. Ileum is the most common site followed by colon, jejunum, and duodenum. The extent may range from a small lesion to as much as near-total colon. INTRODUCTION Congenital segmental dilatation of intestine is rare with over 150 cases reported so far. The normal intestine abruptly transits to a dilated segment of variable length and diameter (3-4 times) without any evidence of mechanical obstruction. The dilated segment is hypomotile but with normal neuronal connections and ganglion cells. Often, it is a solitary lesion, but multiple sites may be involved. Ileum is the most common site followed by colon, jejunum, and duodenum. The extent may range from a small lesion to as much as near-total colon. Swenson et al. proposed diagnostic criteria of congenital segmental dilatation of the intestine; and most of the reported cases followed it, except a few. Herein, we report a morphologically different case of congenital segmental dilatation of ileum in a patient with anorectal malformation. CASE REPORT A 1-day-old male neonate, weighing 2.5kg, presented with failure to pass meconium since birth. The baby was born at term by spontaneous vaginal delivery. Antenatal follow-up and perinatal period were uneventful. On examination, the baby had abdominal distension and an imperforated anus. X-ray abdomen (prone cross-table lateral, and erect) showed a high variety of anorectal malformation (Fig. 1A, 1B). Ultrasound of the abdomen revealed normal kidneys. Echocardiography showed a small PDA. The laboratory tests were within normal ranges. After optimization, the patient underwent sigmoid loop colostomy over a skin bridge as first stage surgery in the management of high variety anorectal malformation. The next day, the patient passed negligible meconium through the stoma. The abdomen further distended and stoma was non-functional. The repeat X-ray abdomen showed dilated small bowel loops. A fusiform area of bowel gas was noted in the right hemiabdomen that was also present on the initial radiographs. Laboratory tests showed a rising Creactive protein and declining thrombocytes. The next day, repeated x-rays had the same findings; thus, the parents were counseled for reoperation. A segmental dilated portion of mid-ileum was found at exploratory laparotomy with proximal dilated loops and distal comparatively collapsed bowel (Fig. 2). The abrupt transition from normal bowel to segmental dilatation was absent. The dilated segment was resected, and an ileostomy formed-anastomosis avoided as the baby was getting septic. Serial biopsies were taken from the ileostomy site and colon that excluded Hirschsprung's disease. The postoperative recovery remained uneventful. The baby passed adequate stool from ileostomy and was discharged in good condition on ad libitum. The patient is on our follow-up for definitive surgery. We have prioritized ileostomy reversal first, followed by anorectoplasty and colostomy reversal. DISCUSSION Congenital segmental dilatation of intestine encounters mostly in neonates (>60%) with a male preponderance. Finding merely case reports and small case series in the literature vindicates its scarcity. Sakaguchi et al. identified 28 cases in a nationwide survey of Japan, over a decade period. We also published a case report in 2012 and a series of 3 cases in 2016. Two patterns of presentation exist: early with neonatal intestinal obstruction; and late with chronic constipation and related sequelae such as failure to thrive, malnutrition, malabsorption, anemia, etc. Ileal segmental dilatation usually presents early whereas colonic segmental dilatation appears to escape the early presentation, though not a rule. Associated anomalies may alter the presentation as in the index case; we initially missed the segmental dilatation because the colostomy was formed with a small incision; later we identified it on exploratory laparotomy for persistent intestinal obstruction. We have reported a similar case of imperforate anus with congenital pouch colon where the segmental dilatation was identified on repeat surgery for early postoperative intestinal obstruction. Antenatal and preoperative diagnoses are seldom, with most cases diagnosed at surgery. Abdominal X-ray, ultrasound, CT scan, and GIT contrast study may help the diagnosis preoperatively; though needs a high index of suspicion. It can mimic other surgical conditions on radiological investigations: pneumoperitoneum on an abdominal radiograph, and duplication cyst on contrast imaging are few examples. In the index case, we initially ignored the discrete fusiform gas shadow on the abdominal radiographs and noticed when the finding persisted on subsequent radiographs. IV Tubular Longer lesions with any type of transition. (7,9,12) V Complex Multiple segmental dilatations (3) or associated with congenital pouch colon. (13) On literature search, we have identified and classified the varied morphology of the congenital segmental dilatation of the intestine ( Table 1). The diagnostic criteria laid by Swensen et al. appear exclusive, but we believe the anomaly has a developmental spectrum, and reported variations in morphology are common. The reported cases differ in site, size, shape, and histology. Adhikari et al. reported a case of a small congenital segmental dilatation of the jejunum which lacked an abrupt transition between the normal bowel and the segmental dilatation. They initially left it unresected as the mechanical obstruction was absent; on persistent postoperative intestinal obstruction, the lesion was resected at repeat surgery. Such lesions may represent a milder morphological variation (forme fruste) in the spectrum. The index case also appears to be a forme fruste of congenital segmental dilatation of intestine. Complete surgical resection and anastomosis is curative; however at certain locations (such as the duodenum), tapering is recommended. Occasionally a temporary stoma is needed in cases who are not fit or in sepsis. Similarly in the index case, as the baby was in sepsis, we avoided anastomosis and a stoma was formed. To conclude, congenital segmental dilatation of the intestine is a rare entity with morphological varia-tions. Rarer still is its forme fruste variant. Occasionally these small lesions are missed on initial surgery thus complicating the treatment course. Complete surgical resection is the treatment of choice in most location.
-- Hey Emacs, this is a -*- haskell -*- file {- | Auto-generated module holding version information. -} module Idly.Version ( version ) where -- | The version of the sources. version :: String version = "(ci wrapper) version 0.1"
/** Prints out useful information about an OpenGL program including a * listing of the active attribute variables and active uniform * variables. * * @param program The program that you want information about. */ void kuhl_print_program_info(GLuint program) { GLint numVarsInProg = 0; glGetProgramiv(program, GL_ACTIVE_ATTRIBUTES, &numVarsInProg); char buf[1024] = ""; int buflen = 0; buflen += snprintf(buf+buflen, 1024-buflen, "GLSL prog %d: Active attributes: ", program); for(int i=0; i<numVarsInProg; i++) { char attribName[1024]; GLint arraySize = 0; GLenum type = 0; GLsizei actualLength = 0; glGetActiveAttrib(program, i, 1024, &actualLength, &arraySize, &type, attribName); GLint location = glGetAttribLocation(program, attribName); buflen += snprintf(buf+buflen, 1024-buflen, "%s@%d ", attribName, location); } if(numVarsInProg == 0) buflen += snprintf(buf+buflen, 1024-buflen, "[none!]"); msg(INFO, "%s", buf); kuhl_errorcheck(); numVarsInProg = 0; glGetProgramiv(program, GL_ACTIVE_UNIFORMS, &numVarsInProg); buflen = 0; buflen += snprintf(buf+buflen, 1024-buflen, "GLSL prog %d: Active uniforms: ", program); for(int i=0; i<numVarsInProg; i++) { char attribName[1024]; GLint arraySize = 0; GLenum type = 0; GLsizei actualLength = 0; glGetActiveUniform(program, i, 1024, &actualLength, &arraySize, &type, attribName); GLint location = glGetUniformLocation(program, attribName); buflen += snprintf(buf+buflen, 1024-buflen, "%s@%d ", attribName, location); } if(numVarsInProg == 0) buflen += snprintf(buf+buflen, 1024-buflen, "[none!]"); msg(INFO, "%s", buf); kuhl_errorcheck(); GLint attachedShaderCount=0; GLint binarySize=0; GLint deleteStatus=GL_FALSE; glGetProgramiv(program, GL_ATTACHED_SHADERS, &attachedShaderCount); glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH, &binarySize); glGetProgramiv(program, GL_DELETE_STATUS, &deleteStatus); msg(INFO, "GLSL prog %d: AttachedShaderCount=%d Size=%d %s\n", program, attachedShaderCount, binarySize, deleteStatus == GL_TRUE ? "DELETED!" : ""); kuhl_errorcheck(); }
/* Generated by RuntimeBrowser Image: /System/Library/PrivateFrameworks/GeoServices.framework/GeoServices */ @interface GEOUserActionUsageLogMessage : PBCodable <NSCopying> { struct { unsigned int locationBucket : 1; unsigned int uiTarget : 1; } _has; int _locationBucket; GEOMapLaunchDetails * _mapLaunchDetails; GEOPlaceActionDetails * _placeActionDetails; NSString * _providerId; GEORouteDetails * _routeDetails; GEOMapsServerMetadata * _serverMetadata; int _uiTarget; NSString * _usageEventKey; NSString * _usageEventValue; } @property (nonatomic) bool hasLocationBucket; @property (nonatomic, readonly) bool hasMapLaunchDetails; @property (nonatomic, readonly) bool hasPlaceActionDetails; @property (nonatomic, readonly) bool hasProviderId; @property (nonatomic, readonly) bool hasRouteDetails; @property (nonatomic, readonly) bool hasServerMetadata; @property (nonatomic) bool hasUiTarget; @property (nonatomic, readonly) bool hasUsageEventKey; @property (nonatomic, readonly) bool hasUsageEventValue; @property (nonatomic) int locationBucket; @property (nonatomic, retain) GEOMapLaunchDetails *mapLaunchDetails; @property (nonatomic, retain) GEOPlaceActionDetails *placeActionDetails; @property (nonatomic, retain) NSString *providerId; @property (nonatomic, retain) GEORouteDetails *routeDetails; @property (nonatomic, retain) GEOMapsServerMetadata *serverMetadata; @property (nonatomic) int uiTarget; @property (nonatomic, retain) NSString *usageEventKey; @property (nonatomic, retain) NSString *usageEventValue; - (int)StringAsLocationBucket:(id)arg1; - (int)StringAsUiTarget:(id)arg1; - (void)copyTo:(id)arg1; - (id)copyWithZone:(struct _NSZone { }*)arg1; - (void)dealloc; - (id)description; - (id)dictionaryRepresentation; - (bool)hasLocationBucket; - (bool)hasMapLaunchDetails; - (bool)hasPlaceActionDetails; - (bool)hasProviderId; - (bool)hasRouteDetails; - (bool)hasServerMetadata; - (bool)hasUiTarget; - (bool)hasUsageEventKey; - (bool)hasUsageEventValue; - (unsigned long long)hash; - (bool)isEqual:(id)arg1; - (int)locationBucket; - (id)locationBucketAsString:(int)arg1; - (id)mapLaunchDetails; - (void)mergeFrom:(id)arg1; - (id)placeActionDetails; - (id)providerId; - (bool)readFrom:(id)arg1; - (id)routeDetails; - (id)serverMetadata; - (void)setHasLocationBucket:(bool)arg1; - (void)setHasUiTarget:(bool)arg1; - (void)setLocationBucket:(int)arg1; - (void)setMapLaunchDetails:(id)arg1; - (void)setPlaceActionDetails:(id)arg1; - (void)setProviderId:(id)arg1; - (void)setRouteDetails:(id)arg1; - (void)setServerMetadata:(id)arg1; - (void)setUiTarget:(int)arg1; - (void)setUsageEventKey:(id)arg1; - (void)setUsageEventValue:(id)arg1; - (int)uiTarget; - (id)uiTargetAsString:(int)arg1; - (id)usageEventKey; - (id)usageEventValue; - (void)writeTo:(id)arg1; @end
<gh_stars>1-10 package typ import "testing" func TestCommon(t *testing.T) { tests := []struct { a, b Type want Type }{ {Int, Int, Int}, {Int, Opt(Int), Int}, {Int, Real, Num}, {Int, Span, Int}, {Str, Int, Any}, {List(Any), List(Int), List(Any)}, {Cont(Any), List(Any), List(Any)}, {List(Int), List(Int), List(Int)}, {List(Int), List(Real), List(Num)}, {List(Int), List(Real), List(Num)}, {List(Int), List(Span), List(Int)}, {Dict(Int), Dict(Span), Dict(Int)}, {Str, Var(0), Str}, {Sym, Expr, Expr}, {Sym, Typ, Expr}, {Alt(Int), Int, Alt(Int)}, {Alt(Int), Alt(Int), Alt(Int)}, {Alt(Int), Alt(Real), Alt(Int, Real)}, {Alt(Int, Real), Str, Alt(Int, Real, Str)}, } for _, test := range tests { got, _, _ := Common(test.a, test.b) if !test.want.Equal(got) { t.Errorf("for %s,%s want %s got %s", test.a, test.b, test.want, got) } got, _, _ = Common(test.b, test.a) if !test.want.Equal(got) { t.Errorf("for %s,%s want %s got %s", test.b, test.a, test.want, got) } } }
import { Operation, Path } from '..'; /** * `Point` objects refer to a specific location in a text node in a Slate * document. Its path refers to the location of the node in the tree, and its * offset refers to the distance into the node's string of text. Points can * only refer to `Text` nodes. */ interface Point { path: Path; offset: number; [key: string]: any; } /** * `PointKey` is either an "anchor" or "focus" point string. */ declare type PointKey = 'anchor' | 'focus'; declare namespace Point { /** * Compare a point to another, returning an integer indicating whether the * point was before, at, or after the other. */ const compare: (point: Point, another: Point) => 0 | 1 | -1; /** * Check if a point is after another. */ const isAfter: (point: Point, another: Point) => boolean; /** * Check if a point is before another. */ const isBefore: (point: Point, another: Point) => boolean; /** * Check if a point is exactly equal to another. */ const equals: (point: Point, another: Point) => boolean; /** * Check if a value implements the `Point` interface. */ const isPoint: (value: any) => value is Point; /** * Transform a point by an operation. */ const transform: (point: Point, op: Operation, options?: { affinity?: "forward" | "backward" | null | undefined; }) => Point | null; } export { Point, PointKey }; //# sourceMappingURL=point.d.ts.map
def update_columns_list(columns_list, json_list, sep, int_to_float, remove_null, flatten_list): data = _transform_jsons(json_list, sep, int_to_float, remove_null, flatten_list) cols = [] for js in data: cols.extend(js.keys()) columns_list = list(set(columns_list + cols)) return columns_list
/** * triggerBumpyRide() completes all of the actions required for the Bumpy Ride event. * - Decides whether an injury occurs based on pilot's skill and a random number * - If yes, randomly selects a player and damages their health. * * @param player - the player who is serving as the pilot * @param ship - the ship being flown * @return A string detailing the events of the trip. */ private String triggerBumpyRide(Player player, Ship ship) { String path = "events.bumpyRide."; int damage = config.getInt(path + "damage"); boolean professionsEnabled = config.getBoolean("professionsEnabled"); String professionReq = config.getString("professionReqs.pilot.profession"); double randomDoub = 0; int injuryChance = 0; Random rand = new Random(); if (prof != null && professionsEnabled && professionReq != null) { injuryChance = config.getInt(path + "injuryChance." + prof.getTier(player.getUniqueId(), professionReq)); randomDoub = rand.nextDouble() * 100; } if (prof == null || professionReq == null || randomDoub > injuryChance) { return "The ship creaks and shudders, battered with whorls of wind. " + player.getDisplayName() + ChatColor.YELLOW + " expertly manoeuvres the ship through the atmosphere, and the tremors fade " + "away."; } else { Collection<? extends Player> onlinePlayers = Bukkit.getServer().getOnlinePlayers(); List<Player> playersOnShip = new ArrayList<>(); Location location = ship.getDock().getLocation(); for (Player p: onlinePlayers) if (p.getWorld().equals(location.getWorld()) && p.getLocation().getBlockX() >= location.getBlockX() && p.getLocation().getBlockX() <= location.getBlockX() + ship.getLength() && p.getLocation().getBlockY() >= location.getBlockY() && p.getLocation().getBlockY() <= location.getBlockY() + ship.getHeight() && p.getLocation().getBlockZ() >= location.getBlockZ() && p.getLocation().getBlockZ() <= location.getBlockZ() + ship.getWidth()) playersOnShip.add(p); if (playersOnShip.isEmpty()) throw new IllegalArgumentException("Error with bumpy ride event: no players detected on the ship!"); int randomInt = rand.nextInt(playersOnShip.size()); Player injuredPlayer = playersOnShip.get(randomInt); double newHealth = injuredPlayer.getHealth() - damage; if (newHealth < 2) injuredPlayer.setHealth(2); else injuredPlayer.setHealth(newHealth); return "The ship creaks and shudders, battered with whorls of wind as " + player.getDisplayName() + ChatColor.YELLOW + " struggles to manoeuvre the ship. Suddenly it lurches to one side, " + "throwing everyone to the floor. " + injuredPlayer.getDisplayName() + ChatColor.YELLOW + " is injured!"; } }
/** * Description : user provider Created by sunpengfei on 2017/8/4. Person in * charge : sunpengfei */ public class UserProvider { /** * 插入数据库 * * @param userInfo 用户信息 * @return long */ public static long addOrUpdateUserInfo(UserInfo userInfo) { return new UserModel().addOrUpdateUserInfo(userInfo); } /** * 批量插入数据库 * * @param userInfos 用户信息 */ public static void addOrUpdateUserInfos(List<UserInfo> userInfos) { new UserModel().addOrUpdateUserInfos(userInfos); } /** * 获取本地userInfo * * @param userId 用户id * @return UserInfo */ public static UserInfo getUserInfoByDB(String userId) { if (TextUtils.isEmpty(userId)) { return null; } return new UserModel().getUserInfoByDB(userId); } /** * 批量获取本地userInfo * * @param userIds 用户id * @return List<UserInfo> */ private static List<UserInfo> getUserInfosByDB(List<String> userIds) { if (userIds == null || userIds.size() == 0) { return null; } return new UserModel().getUserInfosByDB(userIds); } /** * 网络获取用户信息 * * @param userId 用户id * @return Observable<UserInfo> */ public static Observable<UserInfo> getUserInfoByServer(String userId) { if (TextUtils.isEmpty(userId)) { return Observable.just(null); } return UserServiceManager.selectUserInfo(userId).map(new Func1<UserInfo, UserInfo>() { @Override public UserInfo call(UserInfo userInfo) { addOrUpdateUserInfo(userInfo); return userInfo; } }); } /** * 获取用户信息 * * @param userId 用户id * @return Observable<UserInfo> */ public static Observable<UserInfo> obtainUserInfo(String userId) { if (TextUtils.isEmpty(userId)) { return Observable.just(null); } UserInfo userInfo = new UserModel().getUserInfoByDB(userId); if (userInfo != null) { return Observable.just(userInfo); } return UserServiceManager.selectUserInfo(userId).map(new Func1<UserInfo, UserInfo>() { @Override public UserInfo call(UserInfo userInfo) { addOrUpdateUserInfo(userInfo); return userInfo; } }); } /** * 批量获取用户信息 * * @param userIds 用户id * @return Observable<UserInfo> */ public static Observable<List<UserInfo>> obtainUserInfos(List<String> userIds) { if (userIds == null || userIds.size() == 0) { return Observable.just(null); } return UserServiceManager.selectUserInfos(userIds).map(new Func1<List<UserInfo>, List<UserInfo>>() { @Override public List<UserInfo> call(List<UserInfo> userInfos) { addOrUpdateUserInfos(userInfos); return userInfos; } }); } /** * 更新用户信息 * * @param userInfo 用户信息 * @return Observable<Object> */ public static Observable<Object> updateUserInfo(final UserInfo userInfo) { if (userInfo == null) { return Observable.just(null); } return UserServiceManager.updateUserInfo(userInfo).map(new Func1<Object, Object>() { @Override public Object call(Object o) { //更新数据库 addOrUpdateUserInfo(userInfo); return o; } }); } /** * 打开frame * * @param activity 上下文 * @param userId 用户id */ public static void openFrame(@NonNull Activity activity, String userId) { Intent intent = new Intent(activity, FrameActivity.class); intent.putExtra(FrameConfig.USER_ID, userId); activity.startActivity(intent); } /** * 打开位置 * * @param activity 上下文 */ public static void openLocation(@NonNull Activity activity, int requestCode) { Intent intent = new Intent(activity, LocationActivity.class); activity.startActivityForResult(intent, requestCode); } /** * 打开位置 * * @param activity 上下文 */ public static void openChooseLocation(@NonNull Activity activity, int requestCode) { Intent it = new Intent(activity, MapFragmentLoadActivity.class); activity.startActivityForResult(it, requestCode); } /** * 打开粉丝列表 * * @param activity 上下文 * @param userId 用户id */ public static void openFans(@NotNull Activity activity, String userId) { Intent intent = new Intent(activity, TotalFansFocusActivity.class); intent.putExtra(FrameConfig.USER_ID, userId); intent.putExtra(UserConfig.USER_FANS_TYPE, UserConfig.UserFansType.FANS_TYPE); activity.startActivity(intent); } /** * 打开关注列表 * * @param activity 上下文 * @param userId 用户id */ public static void openFocus(@NotNull Activity activity, String userId) { Intent intent = new Intent(activity, TotalFansFocusActivity.class); intent.putExtra(FrameConfig.USER_ID, userId); intent.putExtra(UserConfig.USER_FANS_TYPE, UserConfig.UserFansType.FOCUS_TYPE); activity.startActivity(intent); } /** * 打开我的相册 * * @param context * @param userId */ public static void openMyPhoto(Activity context, String userId) { Intent intent = new Intent(context, MyPhotoActivity.class); intent.putExtra(FrameConfig.USER_ID, userId); context.startActivity(intent); } }
class CanLoopback: """A class for capturing a mocked canbus and providing responses to messages. When built on a mocked CanMessenger, hooks the messenger's send mocked method and inserts provided responders. """ class LoopbackResponder(Protocol): """The provided callback function. Returned values will be sent.""" def __call__( self, node_id: NodeId, message: MessageDefinition, ) -> List[Tuple[NodeId, MessageDefinition, NodeId]]: """Main call method that will usually be a function. Args: node_id: Destination node id message: Message Return value: A tuple of (destination, message, source). """ ... def __init__( self, mock_messenger: AsyncMock, mock_notifier: MockCanMessageNotifier ) -> None: """Build the loopback. Args: mock_messenger: The messenger to wrap mock_notifier: A notifier to wrap """ self._mock_messenger = mock_messenger self._mock_notifier = mock_notifier self._responders: List[CanLoopback.LoopbackResponder] = [] self._mock_messenger.send.side_effect = self._listener def _listener(self, node_id: NodeId, message: MessageDefinition) -> None: for responder in self._responders: for response in responder(node_id, message): self._mock_notifier.notify( message=response[1], arbitration_id=ArbitrationId( parts=ArbitrationIdParts( message_id=response[1].message_id, originating_node_id=response[2], node_id=response[0], function_code=0, ) ), ) def add_responder(self, responder: "CanLoopback.LoopbackResponder") -> None: """Add a responder.""" self._responders.append(responder) def remove_responder(self, responder: "CanLoopback.LoopbackResponder") -> None: """Remove a responder.""" self._responders.remove(responder)
// Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package merge import ( "reflect" "testing" "github.com/coreos/ignition/v2/config/util" "github.com/coreos/ignition/v2/config/v3_0/types" "github.com/stretchr/testify/assert" ) func TestMerge(t *testing.T) { type test struct { in1 types.Config in2 types.Config out types.Config } tests := []test{ { // case 1: merging empty configs is empty }, { in1: types.Config{ Ignition: types.Ignition{Version: "1234"}, }, in2: types.Config{ Ignition: types.Ignition{Version: "haha this isn't validated"}, }, out: types.Config{ Ignition: types.Ignition{Version: "haha this isn't validated"}, }, }, { in1: types.Config{ Storage: types.Storage{ Disks: []types.Disk{ { Device: "foo", WipeTable: util.BoolToPtr(true), Partitions: []types.Partition{ { Number: 1, Label: util.StrToPtr("label"), StartMiB: util.IntToPtr(4), }, }, }, { Device: "bar", WipeTable: util.BoolToPtr(true), }, }, Files: []types.File{ { Node: types.Node{ Path: "/foo", }, FileEmbedded1: types.FileEmbedded1{ Append: []types.FileContents{ { Source: util.StrToPtr("source1"), }, }, }, }, { Node: types.Node{ Path: "/bar", }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{ Compression: util.StrToPtr("gzip"), }, }, }, }, }, }, in2: types.Config{ Storage: types.Storage{ Disks: []types.Disk{ { Device: "foo", WipeTable: util.BoolToPtr(false), Partitions: []types.Partition{ { Number: 1, Label: util.StrToPtr("labelchanged"), }, { Number: 2, Label: util.StrToPtr("label2"), }, }, }, { Device: "bar", }, { Device: "baz", WipeTable: util.BoolToPtr(true), }, }, Files: []types.File{ { Node: types.Node{ Path: "/foo", }, FileEmbedded1: types.FileEmbedded1{ Append: []types.FileContents{ { Source: util.StrToPtr("source1"), }, { Source: util.StrToPtr("source2"), }, }, }, }, }, Directories: []types.Directory{ { Node: types.Node{ Path: "/bar", }, }, }, Links: []types.Link{ { Node: types.Node{ Path: "/baz", }, }, }, }, }, out: types.Config{ Storage: types.Storage{ Disks: []types.Disk{ { Device: "foo", WipeTable: util.BoolToPtr(false), Partitions: []types.Partition{ { Number: 1, Label: util.StrToPtr("labelchanged"), StartMiB: util.IntToPtr(4), }, { Number: 2, Label: util.StrToPtr("label2"), }, }, }, { Device: "bar", WipeTable: util.BoolToPtr(true), }, { Device: "baz", WipeTable: util.BoolToPtr(true), }, }, Files: []types.File{ { Node: types.Node{ Path: "/foo", }, FileEmbedded1: types.FileEmbedded1{ Append: []types.FileContents{ { Source: util.StrToPtr("source1"), }, { Source: util.StrToPtr("source1"), }, { Source: util.StrToPtr("source2"), }, }, }, }, }, Directories: []types.Directory{ { Node: types.Node{ Path: "/bar", }, }, }, Links: []types.Link{ { Node: types.Node{ Path: "/baz", }, }, }, }, }, }, } for i, test := range tests { in1v := reflect.ValueOf(test.in1) in2v := reflect.ValueOf(test.in2) out := MergeStruct(in1v, in2v).Interface().(types.Config) assert.Equal(t, test.out, out, "#%d bas merge", i) } }
<gh_stars>0 //-- ControlPointEditor ------------------------------------------------------- // // Copyright (C) 2006 <NAME>. All rights reserved. // // Simple GUI dialog used to modify transfer function control points (i.e., // color and opacity control points). // //---------------------------------------------------------------------------- #ifndef ControlPointEditor_h #define ControlPointEditor_h #include "ControlPointEditorBase.h" #include <qdialog.h> class MappingFrame; namespace VAPoR { class MapperFunction; class OpacityMap; class VColormap; class ControlPointEditor : public QDialog, public Ui_ControlPointEditorBase { Q_OBJECT public: ControlPointEditor(MappingFrame* parent, OpacityMap *map, int cp); ControlPointEditor(MappingFrame* parent, VColormap *map, int cp); ~ControlPointEditor(); void update() { initWidgets(); } protected: QColor tempColor; void initWidgets(); void initConnections(); float dataValue(); int toIndex(float); float toData(int); protected slots: void dataValueChanged(); void indexValueChanged(); void pickColor(); void okHandler(); void cancelHandler(); private: int _controlPoint; MapperFunction *_mapper; OpacityMap *_omap; VColormap *_cmap; }; }; // VAPoR namespace #endif
Acute and chronic effects of nitrendipine in patients with precapillary pulmonary hypertension due to pulmonary fibrosis Eleven patients with histologically confirmed fibrosis of the lung were investigated for the effects of the dihydropyridine calcium antagonist nitrendipine on pulmonary hemodynamics. After 5 mg of acute sublingual nitrendipine, mean pulmonary artery pressure was significantly lowered (p≤0.05) from 32 ± 3 to 29 ± 3 mmHg at rest, and significantly lowered (p≤0.05) during exercise from 55 ± 4 to 49 ± 4 mmHg. Short‐term oxygen application at rest significantly reduced this parameter to 28 ± 3 mmHg(p≤0.01). Nitrendipine lowered total pulmonary vascular resistance during both rest (from 412 ± 50 to 351 ± 49 dyn•S•cm‐5; p≤0.05) and exercise (from 433 ± 61 to 383 ± 54 dyn•s•cm‐5; p≤0.05), although it did not affect pulmonary arteriolar resistance. Also, oxygen treatment at rest influenced only total pulmonary vascular resistance (reduction from 412 ± 50 to 373 ± 48 dyn•s•cm‐5; p≤0.01), but not pulmonary arteriolar resistance. Pressure‐flow curves, which were derived from cardiac output at rest and during exercise and from the corresponding gradient between mean pulmonary artery pressure and pulmonary capillary wedge pressure, remained unchanged by acute medication. Since a change in arterial oxygen partial pressure was not noticed after nitrendipine, arteriovenous shunting or a worsening of ventilation perfusion relationships can be excluded. Long‐term (3 weeks) treatment (double‐blind parallel design) with 10 mg of nitrendipine (4 patients) once daily showed no advantage in comparison to placebo (6 patients). From these observations we conclude that the reduction of lower circulatory pressures after nitrendipine is not caused by pulmonary arteriolar vasodilation, but rather by indirect effects due to the reduction of total peripheral resistance and left ventricular filling pressure.
Here are five things you need to know about the mortgage market as the spring home-buying season gets going: 1. That 2.99 per cent Bank of Montreal five-year mortgage isn't quite as good as it sounds. BMO's recent move to bring its rate below the psychologically significant 3-per-cent mark for fixed-rate five-year mortgages is being treated as a big deal because a similar move a year ago provoked then-finance minister Jim Flaherty to admonish the bank. Joe Oliver, Mr. Flaherty's successor, is taking a more laissez-faire attitude. Story continues below advertisement What BMO is offering until April 17 is a competitive rate in a mortgage with uncompetitive terms. Most importantly, you can't break this mortgage before it comes up for renewal in five years unless you sell the property, refinance with BMO or do an early renewal into another BMO product. All the usual prepayment penalties would apply in these situations. Veteran mortgage broker Vince Gaetano's summary: "You're handcuffed." Other issues: -BMO will hold the rate for 90 days, compared with 120 days at some other lenders. -You can prepay 10 per cent of the mortgage annually without penalty and increase your payment by 10 per cent a year; 20 per cent is the usual standard for both types of payment increase. -The skip-a-payment option – a bad idea, admittedly – is not available. -The maximum amortization period is 25 years; you can typically go up to 30 years if you have a down payment of 20 per cent or more. 2. You can do better than 2.99 per cent. Story continues below advertisement Story continues below advertisement Mr. Gaetano said late last week that he had a 2.84-per-cent rate on five-year fixed mortgages, but it only applied to clients who had down payments of less than 20 per cent and thus required mortgage default insurance. The RateSpy.com website confirmed this rate from Mr. Gaetano's firm, Monster Mortgage, while also showing competing brokers and credit unions with rates in the range of 2.83 per cent to 2.94 per cent. Some other rate comparison sites to try include RateSupermarket.ca, RateHub.ca and LowestRates.ca. 3. We will see wide open rate competition this spring. "I think there will be a full-scale rate war with some mortgage brokers," said Bruce Joseph, a broker with Anthem Mortgage Group in Barrie, Ont. "We've got a huge amount of competition in the market. The market is quite saturated with realtors and brokers." Mr. Joseph wonders whether we'll see more of a practice called "mortgage rate buydowns," where brokers sacrifice some of their compensation from selling a mortgage in order to get a lower rate for the client. He said some brokerage firms have been aggressive users of buydowns to build sales volume. Borrowers, there's nothing to stop you from asking for a rate buydown. You just have to recognize that less compensation for a broker may mean less advice and hand-holding. Story continues below advertisement 4. Variable-rate mortgages are looking good. Rates on variable-rate mortgages are based on the major banks' prime lending rate, which has been stuck at 3 per cent since September, 2010, minus a discount. Mr. Gaetano said discounts have widened out to 0.6 percentage points or more from roughly half that level about eight months ago, and that means a variable rate around 2.4 per cent. His preference for variable-rate mortgages over the fixed-rate alternative right now is based both on the discounts being offered, and his interest rate outlook. "I don't think rates are going anywhere soon, and getting a variable in the prime minus 0.60 range give you a considerable advantage in hammering down a mortgage." That said, many of Mr. Gaetano's first-time home buyer clients are going with five-year fixed-rate mortgages, which is smart. In today's expensive housing market, it makes good sense to buy yourself a five-year period to find your financial equilibrium as a homeowner without the risk that your payments will rise. 5. The banks will crush you if you want to break your mortgage. The penalties that the big banks charge to break a mortgage before it comes up for renewal are abusive. They're a far more deserving target for the federal finance minister than lenders aggressively undercutting each other on mortgage rates. Story continues below advertisement Get the lowdown on bank mortgage penalties in this column I wrote not too long ago. If there's any chance you might have to break your mortgage – brokers say this is by no means unusual – then consider using a non-big bank lender with a lighter touch on penalties. These same lenders are often good on rates, too. Follow me on Twitter: @rcarrick ------ Why low mortgage rates matter Even small differences in payments can add up. Assumptions Story continues below advertisement -you're buying a house at the average national price in February of $406,372 -you have a 5 per cent down payment -CMHC mortgage insurance costs are added to your principal (table source: RateSpy.com, Canequity.com)
package ru.resql; import ru.resql.orm.stream.OrmSqlStreamRecordSource; import java.sql.ResultSet; import java.util.function.Supplier; @FunctionalInterface public interface StreamMapper<ReturnType> { OrmSqlStreamRecordSource<ReturnType> process(Supplier<ReturnType> factory, ResultSet resultSet); }
// NewMatcher creates a new Matcher. func NewMatcher(rule *jwtauthnv3.RequirementRule) Matcher { switch rule.GetMatch().PathSpecifier.(type) { case *routev3.RouteMatch_Prefix: return newPrefixMatcher(rule) default: return newPathMatcher(rule) } }
#include <iostream> #include <vector> #include <algorithm> #include <cstdio> using namespace std; #define rep(i,n) for(int i = 0; i < n; ++i) class Node{ public: int sum; vector<Node> ch; void calc(){ if(ch[0].sum==0){ rep(i,ch.size()){ ch[i].calc(); } } sort(ch.begin(),ch.end()); rep(i,(ch.size()+1)/2){ sum+=ch[i].sum; } } Node(){sum=0;ch=vector<Node>();} }; void paser2(Node &n); Node paser(){ Node n; getchar(); char c=getchar(); if(c=='['){ cin.putback(c); paser2(n); } else if(c>='0'&&c<='9') { int d; cin.putback(c); cin>>d; n.sum=(d+1)/2; //cout<<"input"<<n.sum<<endl; } getchar(); return n; } void paser2(Node &n){ while(1){ char c=getchar(); if(c!='['){cin.putback(c);break;} cin.putback(c); n.ch.push_back(paser()); } } bool operator<(const Node &a,const Node &b){ return a.sum<b.sum; } int main(){ int N;cin>>N; rep(i,N){ char c=getchar(); Node n=paser(); n.calc(); cout<<n.sum<<endl; } }
/** * Authenticates a user. * * @param credentials the user credentials * @return the controller's response. */ @PostMapping(value = "/api/v1/auth/login") public RestResponse login(@RequestBody Credentials credentials) { try { AuthenticatedUser user = authenticate(credentials, EnumRole.ROLE_USER); Profile profile = profileRepository.getProfileByUserKey(user.getKey(), EnumApplication.MOBILE); profileRepository.updateMobileVersion(user.getKey(), credentials.getVersion()); return new AuthenticationResponse(getRuntime(), profile, user.roleToStringArray()); } catch (Exception ex) { logger.error(ex.getMessage(), ex); return new RestResponse(getError(ex)); } }
So far, we’ve heard a lot of bluster coming from Barack Obama and his allies on Capitol Hill about how the voters will punish Republicans for opposing a series of gun-control measures that didn’t even keep all the Democrats in the fold. The real problem with “the audacity of mope,” as National Hotline’s Josh Kraushaar writes today, is that voters may end up punishing Democrats in key 2014 Senate contests. Obama and the Democrats just learned the wrong lesson over their spectacular and embarrassing failure, and may lose the Senate as a result: If this doesn’t demonstrate the limitations of the president’s political muscle and the influence of his newly minted Organizing for Action lobbying group, I don’t know what does. Yet, despite the embarrassing setback, Obama nonetheless argued that he still held the upper hand, politically: “If this Congress refuses to listen to the American people and pass commonsense gun legislation, then the real impact is going to have to come from the voters.” That couldn’t misread the political environment heading into 2014 anymore. That’s the audacity of mope. Put simply, the 2014 Senate elections will be fought predominantly on the very turf that is most inhospitable to gun control–Southern and Mountain West conservative states. It’s no coincidence that three of the four Democrats who opposed the Toomey-Manchin bill are facing difficult reelections in 2014 and presumably are attuned to the sentiments of their constituents. Blame the National Rifle Association for the bill’s failure, but the lobby is feeding into already deeply held opposition to gun regulations and a broader sense of anxiety about the president’s and New York City Mayor Michael Bloomberg’s intentions–particularly given the president’s past publicized remark about “bitter” rural voters who “cling to their guns and religion.” It doesn’t take much for the gun-rights crowd, significant in these states, to jump to inaccurate conclusions given that history. And how do the White House or allied groups plan on punishing gun-control opponents? The notion of challenging the Second Amendment is as fanciful as it is self-defeating. Democratic primary voters in the deep South have significantly different views on gun rights than their coastal counterparts. Even if they support expanded background checks, the chance of landing a candidate running a one-issue campaign against brand-name Democrats like Mark Pryor and Mark Begich defies common sense. Three years ago in Arkansas, liberals poured their money and manpower in to defeat former Sen. Blanche Lincoln in a primary with the state’s lieutenant governor. Even though Lincoln was unpopular in the state–later losing reelection to Republican Sen. John Boozman by 21 points–she fended off the challenge. In fact, Kraushaar wonders if Obama is signaling to Democrats who abandoned him that he won’t be doing much to stave off Republican challengers next year: Surely the Democratic Senatorial Campaign Committee, which has its hands full with the competing interests of its incumbents, doesn’t want to see the type of internal conflict that’s riven their Republican counterparts over the last four years. They’ve encouraged their vulnerable Southern members up for reelection to cultivate independent brands, to show they don’t follow the president blindly. That’s what Pryor, Begich, and Sen. Max Baucus of Montana did in opposing the background-check compromise. Obama didn’t say it outright, but he came awfully close to suggesting he won’t be supporting members of his own party who deserted him at a key moment of his presidency. Kraushaar later concludes that Obama’s impotence on gun control will damage the chances for a White House win on immigration reform. Perhaps, but the two issues don’t line up all that closely; there are different constituencies in gun rights and immigration reform, and the problem in the latter issue is clearly a lack of government action for people on both sides of the issue. It might lessen the influence that Obama has on the outcome (if any) for immigration reform, but Obama hasn’t really been part of that effort in any significant degree anyway. That wasn’t the case on gun control, where Obama pulled his party into a fight it had long avoided, and for good reasons. Kraushaar’s colleague Jill Lawrence claims that Democrats will continue fighting for gun control, blaming an “intensity gap” for Obama’s loss. But is that what really happened? Even her own analysis seems to argue otherwise: Many blame the intensity gap for what Giffords describes as a Senate in thrall to the gun lobby. How intense is the NRA? Here’s an example from former Sen. Ted Kaufman, who was Vice President Joe Biden’s chief of staff in 1994,when Biden was the lead senator on a crime bill that included a 10-year assault-weapons ban. During Biden’s 1996 campaign, Kaufman told me, a fellow from Biden’s office was going fishing in rural southern Delaware. He drove down a dirt road, got out, and walked another mile, to a stream, “and some guy comes by and hands him an anti-Joe Biden leaflet from the NRA,” Kaufman said. “These are incredibly dedicated folks.” But … Bloomberg was the first to play hardball in campaigns through his super PAC, Independence USA. The other group he founded, the bipartisan Mayors Against Illegal Guns, announced this week it is scoring senators on their gun votes–just like the National Rifle Association. Obama is urging Americans to “sustain some passion about this” and tell members of Congress that if they don’t support expanded background checks, “you will remember come election time.” There’s no doubt that Organizing for Action, the political group dedicated to his agenda, will remember. So will Giffords, the former congresswoman who was shot in the head two years ago in Tucson, Ariz., and now leads gun-safety efforts through her group Americans for Responsible Solutions. “Mark my words: If we cannot make our communities safer with the Congress we have now, we will use every means available to make sure we have a different Congress, one that puts communities’ interests ahead of the gun lobby’s,” she wrote Wednesday night in a gut-wrenching New York Times op-ed. A day before the Senate vote, Mark Kelly–Giffords’s husband–said they will try to oust Sen. Jeff Flake, R-Ariz., a longtime Giffords friend, if he opposed sensible gun-safety measures. His vote Wednesday helped kill the bipartisan compromise that would have expanded background checks to online and gun-show sales. Yet Flake is not up for reelection until 2018, and therein lies the challenge: Will anyone remember this issue, and the impact of these votes, by then? Combine that with the massive grassroots efforts from Obama’s OFA and the media blitz that overwhelmingly favored gun-control legislation, and you have to wonder how anyone could believe that the NRA was winning an “intensity gap.” Morning Joe, the most moderate of MSNBC’s shows, and CNN’s primetime host Piers Morgan went on full-time campaigns for the assault-weapons ban and universal background checks, for instance. All of this took place in the immediate aftermath of a horrific massacre of the most innocent of victims. And yet, Gallup showed that only 4% thought gun control was the most important issue facing the nation, even after all the hysterical coverage and rhetoric. Obama lost because he miscalculated the mood of the nation, and the danger to Democrats in pushing gun control. And that may make him a very lonely Democrat in DC by 2015. Update: Matt Lewis sums up the “90% support” issue nicely:
def normalized_query_data(self): with self.get_normalizer_lock(self.runner.race): if self['normalized_query_data'] is None: all_query_data = numpy.asarray([self.imputed_query_data] + [runner.sample.imputed_query_data for runner in self.runner.race.active_runners if runner['_id'] != self.runner['_id']]) self['normalized_query_data'] = sklearn.preprocessing.normalize(all_query_data, axis=0).tolist()[0] self.provider.save(self) return self['normalized_query_data']
def insert_chain(self, chain): ref = self.data for i, node in enumerate(chain): if i == 0: continue ref_node = ref.node(node) if ref_node: ref = ref_node else: ref.add(node) break
Bluetooth Network-Based Misuse Detection Bluetooth, a protocol designed to replace peripheral cables, has grown steadily over the last five years and includes a variety of applications. The Bluetooth protocol operates on a wide variety of mobile and wireless devices and is nearly ubiquitous. Several attacks exist that successfully target and exploit Bluetooth enabled devices. This paper describes the implementation of a network intrusion detection system for discovering malicious Bluetooth traffic. The work improves upon existing techniques, which only detect a limited set of attacks (based on measuring anomalies in the power levels of the Bluetooth device). The new method identifies reconnaissance, denial of service, and information theft attacks on Bluetooth enabled devices, using signatures of the attacks. Furthermore, this system includes an intrusion response component to detect attacks in progress, based on the attack classification. This paper presents the implementation of the Bluetooth intrusion detection system and demonstrates its detection, analysis, and response capabilities. The tool includes a visualization interface to facilitate the understanding of Bluetooth enabled attacks. The experimental results show that the system can significantly improve the overall security of an organization by identifying and responding to threats posed to the Bluetooth protocol.
/** * a set that can contain duplicates. * * @author erelsgl */ public class Multiset<T> implements Collection<T> { @Override public boolean add(T arg0) { // TODO Auto-generated method stub return false; } @Override public boolean addAll(Collection<? extends T> arg0) { // TODO Auto-generated method stub return false; } @Override public void clear() { // TODO Auto-generated method stub } @Override public boolean contains(Object arg0) { // TODO Auto-generated method stub return false; } @Override public boolean containsAll(Collection<?> arg0) { // TODO Auto-generated method stub return false; } @Override public boolean isEmpty() { // TODO Auto-generated method stub return false; } @Override public Iterator<T> iterator() { // TODO Auto-generated method stub return null; } @Override public boolean remove(Object arg0) { // TODO Auto-generated method stub return false; } @Override public boolean removeAll(Collection<?> arg0) { // TODO Auto-generated method stub return false; } @Override public boolean retainAll(Collection<?> arg0) { // TODO Auto-generated method stub return false; } @Override public int size() { // TODO Auto-generated method stub return 0; } @Override public Object[] toArray() { // TODO Auto-generated method stub return null; } @Override public <T> T[] toArray(T[] arg0) { // TODO Auto-generated method stub return null; } }
/** * Contains Command Line Interface (CLI) syntax definitions common to multiple commands */ public class CliSyntax { /* Prefix definitions */ public static final Prefix PREFIX_NAME = new Prefix("n/"); public static final Prefix PREFIX_PHONE = new Prefix("p/"); public static final Prefix PREFIX_EMAIL = new Prefix("e/"); public static final Prefix PREFIX_ADDRESS = new Prefix("a/"); public static final Prefix PREFIX_TAG = new Prefix("t/"); /* Added prefix definitions */ public static final Prefix PREFIX_TASKNAME = new Prefix("tn/"); public static final Prefix PREFIX_DATETIME = new Prefix("dt/"); public static final Prefix PREFIX_PERSON = new Prefix("p/"); public static final Prefix PREFIX_GIT_USERNAME = new Prefix("u/"); public static final Prefix PREFIX_LINK = new Prefix("z/"); public static final Prefix PREFIX_RECURRING = new Prefix("r/"); public static final Prefix PREFIX_LIST_ALL_TASK = new Prefix("all/"); public static final Prefix PREFIX_LIST_INCOMPLETE_TASK = new Prefix("nc/"); public static final Prefix PREFIX_LIST_COMPLETE_TASK = new Prefix("c/"); public static final Prefix PREFIX_FILEPATH = new Prefix("fp/"); }
// Write parts of XSL file to output private static void printXslPrologue(PrintWriter out, String levelname) { out .print("<xsl:stylesheet xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" version=\"1.0\"\n" + "xmlns:mmax=\"org.eml.MMAX2.discourse.MMAX2DiscourseLoader\"\n" + "xmlns:" + levelname + "=\"www.eml.org/NameSpaces/" + levelname + "\">\n" + "<xsl:output method=\"text\" indent=\"yes\" omit-xml-declaration=\"yes\"/>\n" + "<xsl:strip-space elements=\"*\"/>\n" + "\n" + "<xsl:template match=\"words\">\n" + "<xsl:apply-templates/>\n" + "</xsl:template>\n" + "\n" + "<xsl:template match=\"word\">\n" + "<xsl:choose>\n" + ""); }
/** * Check object for null values * * @param v object to be checked * @param t class * @return checked object */ public SystemParameter check(SystemParameter v, Class<SystemParameter> t) { if (v == null) v = new SystemParameter(); v.setCode(this.check(v.getCode(), String.class)); v.setDescription(this.check(v.getDescription(), String.class)); return v; }
#include <stdio.h> #include <stdlib.h> #include <math.h> int zeros[200000], ones[200000], ns[20000], subs[20000], a[200000]; int main(){ int t,i,n,j,z,o,c,*ans=a; char q; scanf("%d",&t); for(i=0;i<t;i++){ scanf("%d\n", &ns[i]); z=0; o=0; c=0; for(j=0;j<ns[i];j++){ scanf("%c", &q); if(q=='1'){ if(z==0){ c++; ones[o]=c; o++; ans[j]=c; } else{ z--; ones[o]=zeros[z]; o++; ans[j]=zeros[z]; } } else{ if(o==0){ c++; zeros[z]=c; z++; ans[j]=c; } else{ o--; zeros[z]=ones[o]; z++; ans[j]=ones[o]; } } } subs[i]=c; ans+=ns[i]; } ans=a; for(i=0;i<t;i++){ printf("\n%d\n", subs[i]); for(j=0;j<ns[i];j++) printf("%d ", ans[j]); ans+=ns[i]; } return 0; }
If you saw Kate Upton or Jennifer Lawrence naked last week, there's a good chance you saw them on the social news site Reddit. The self-proclaimed "front page of the Internet" was one of the main outlets linking to the celebrity nude photographs hacked from Apple's iCloud accounts and leaked across the web. Over the weekend, Reddit cleaned up the portions of the site devoted to the stolen photos—but not before it had made a significant chunk of revenue from its role in the massive celebrity sext-spillage.1 In just six days, Reddit earned enough money from the nude pics scandal to power its servers for roughly a month, says John Menese, the 33-year-old creator of a Reddit sub-forum expressly launched to share the photos. That statistic, he says, is based on how many times members of the subreddit paid for so-called Reddit "gold," the $3.99-per-month premium accounts that users often gift to each other to bestow a few extra features and prestige. Each subreddit publicly displays the amount of server time paid for by its members' Reddit gold, and Menese tracked his forum's contribution until just before it went offline. His estimate of the site's take from the sext scandal doesn't include any advertising revenue the site may have made from the quarter billion pageviews Menese's subreddit created during its short time on the web. "If Reddit had wanted to, they could have banned us on Sunday when our traffic broke their servers," says Menese, a 33-year old salesman at a Las Vegas call center. "Instead, they chose to milk a week of publicity and a month of server time in Reddit gold before they stepped in." Menese and another moderator of the subreddit, which they called TheFappening in a reference to Reddit's lingo for masturbation, say that Reddit credited their users for paying for at least 27 days worth of site server time before the forum was banned Saturday. For comparison, that would mean it generated about half as much revenue from Reddit gold in six days as the site's "programming" subreddit, the oldest on the site, earned in the four years since Reddit's gold program was created. Reddit staff didn't respond to WIRED's request for comment on its financial rewards from its TheFappening scandal. But one administrator admitted in a long note about the staff's ambivalence on the issue that it had "hit new traffic milestones, ones which I'd be ashamed to share publicly." That immense traffic, however, already was waning when Reddit banned TheFappening. At its peak on September 1st, the site pulled in 141 million visitors in a day, according to numbers Menese accessed as a moderator of the subreddit. By September 2, it only attracted 45 million pageviews. By September 6, when Reddit finally pulled TheFappening from the site, the majority of the forum's users visitors likely had moved on. "It’s sad that Reddit already made their money and then made a show of banning the site," says Menese. That belated filtering, long after Reddit had received the majority of the scandal's financial benefits, provides ammunition to critics; they accuse it of profiting from its anything-goes community at the expense of victims like the women whose photos were hacked from Apple's iCloud accounts and subjected to its users' horny feeding frenzy. The issue is particularly timely as the site seeks to raise a new round of investment at a valuation higher than $500 million. As T.C. Sottek wrote at the Verge, "Reddit is a kleptocracy that speaks to lofty virtues while profiting from vice," and went on to compare TheFappening to "sexual assault, condoned by a state that earns revenue from it." In a statement on the scandal, Reddit CEO Yishan Wong was sympathetic but unapologetic about Reddit's linking to the photos. "We understand the harm that misusing our site does to the victims of this theft, and we deeply sympathize," he wrote. "Having said that, we are unlikely to make changes to our existing site content policies in response to this specific event." (Reddit is owned by Advance Publications, the parent company of Condé Nast, which owns WIRED.) Menese, for his part, is unrepentant about his involvement in violating the privacy of a dozen innocent women. He argues that TheFappening only linked to the images, and that he wasn't involved in their initial theft. He points to other existing corners of Reddit focused on stolen nudes, like the "celebs" subreddit or "candid fashion police," where users post creep shots of women under the guise of critiquing their fashion sense. "There are lots of other subreddits that have questionable content," he says. "But they’re still up right now because people whose photos are on them don’t have lawyers." He's still not sure why those sites—along with far more hideous ones like WatchPeopleDie and SexyAbortions—are allowed to persist while his own forum was banned. "Reddit basically stands up for free speech until it becomes inconvenient for them to do so," he says. Or, he might have added, until it no longer helps them pay their server costs. 1*Correction 3:25pm 9/10/2014: An earlier version of this story stated that Reddit hosted the stolen images, when in fact they were merely linked from the site. *
/** * PCHGChunk * * @author <a href="mailto:[email protected]">Harald Kuhr</a> * @author last modified by $Author: haraldk$ * @version $Id: PCHGChunk.java,v 1.0 27.03.12 13:02 haraldk Exp$ */ final class PCHGChunk extends AbstractMultiPaletteChunk { // NOTE: Values from ilbm2ppm. final static int PCHG_COMP_NONE = 0; final static int PCHG_COMP_HUFFMAN = 1; /** Use SmallLineChanges */ final static int PCHGF_12BIT = 1; // NOTE: The beta spec refers to this as PHCGF_4BIT /** Use BigLineChanges */ final static int PCHGF_32BIT = 2; /** meaningful only if PCHG_32BIT is on: use the Alpha channel info */ final static int PCHGF_USE_ALPHA = 4; private int startLine; private int changedLines; private int lineCount; private int totalChanges; private int minReg; PCHGChunk(int chunkLength) { super(IFF.CHUNK_PCHG, chunkLength); } @Override void readChunk(final DataInput input) throws IOException { int compression = input.readUnsignedShort(); int flags = input.readUnsignedShort(); startLine = input.readShort(); lineCount = input.readUnsignedShort(); changedLines = input.readUnsignedShort(); minReg = input.readUnsignedShort(); int maxReg = input.readUnsignedShort(); /*int maxChangesPerLine = */ input.readUnsignedShort(); // We don't really care, as we're not limited by the Amiga display hardware totalChanges = input.readInt(); byte[] data; switch (compression) { case PCHG_COMP_NONE: data = new byte[chunkLength - 20]; input.readFully(data); break; case PCHG_COMP_HUFFMAN: // NOTE: Huffman decompression is completely untested, due to lack of source data (read: Probably broken). int compInfoSize = input.readInt(); int originalDataSize = input.readInt(); short[] compTree = new short[compInfoSize / 2]; for (int i = 0; i < compTree.length; i++) { compTree[i] = input.readShort(); } byte[] compData = new byte[chunkLength - 20 - 8 - compInfoSize]; input.readFully(compData); data = new byte[originalDataSize]; // decompress the change structure data decompressHuffman(compData, data, compTree, data.length); default: throw new IIOException("Unknown PCHG compression: " + compression); } changes = new MutableIndexColorModel.PaletteChange[startLine + lineCount][]; if (startLine < 0) { int numChanges = maxReg - minReg + 1; initialChanges = new MutableIndexColorModel.PaletteChange[numChanges]; } // TODO: Postpone conversion to when the data is actually needed parseChanges(data, flags); } static void decompressHuffman(byte[] src, byte[] dest, short[] tree, int origSize) { int i = 0; int bits = 0; int thisbyte = 0; int treeIdx = tree.length - 1; int srcIdx = 0; int destIdx = 0; while (i < origSize) { if (bits == 0) { thisbyte = src[srcIdx++]; bits = 8; } if ((thisbyte & (1 << 7)) != 0) { if (tree[treeIdx] >= 0) { dest[destIdx++] = (byte) tree[treeIdx]; i++; treeIdx = tree.length - 1; } else { treeIdx += tree[treeIdx] / 2; } } else { treeIdx--; if (tree[treeIdx] > 0 && (tree[treeIdx] & 0x100) != 0) { dest[destIdx++] = (byte) tree[treeIdx]; i++; treeIdx = tree.length - 1; } } thisbyte <<= 1; bits--; } } private void parseChanges(final byte[] data, int flags) throws IIOException { boolean small; if ((flags & PCHGF_12BIT) != 0) { small = true; } else if ((flags & PCHGF_32BIT) != 0) { if ((flags & PCHGF_USE_ALPHA) != 0) { // TODO: Warning, or actually implement new IIOException("Alpha currently not supported.").printStackTrace(); } small = false; } else { throw new IIOException("Missing PCHG 12/32 bit flag."); } int thismask = 0; int changeCount; int totalchanges = 0; int changedlines = changedLines; int maskBytesLeft = 4 * ((lineCount + 31) / 32); int maskIdx = 0; int dataIdx = maskBytesLeft; int dataBytesLeft = data.length - maskBytesLeft; int bits = 0; for (int row = startLine; changedlines != 0 && row < 0; row++) { if (bits == 0) { if (maskBytesLeft == 0) { throw new IIOException("Insufficient data in line mask"); } thismask = data[maskIdx++]; --maskBytesLeft; bits = 8; } if ((thismask & (1 << 7)) != 0) { if (dataBytesLeft < 2) { throw new IIOException("Insufficient data in SmallLineChanges structures: " + dataBytesLeft); } int changeCount16 = 0; if (small) { changeCount16 = data[dataIdx++] & 0xff; changeCount = changeCount16 + (data[dataIdx++] & 0xff); } else { changeCount = toShort(data, dataIdx); dataIdx += 2; } dataBytesLeft -= 2; for (int i = 0; i < changeCount; i++) { if (totalchanges >= this.totalChanges) { throw new IIOException("Insufficient data in SmallLineChanges structures (changeCount): " + totalchanges); } if (dataBytesLeft < 2) { throw new IIOException("Insufficient data in SmallLineChanges structures: " + dataBytesLeft); } // TODO: Make PaletteChange immutable with constructor params, assign outside test? if (small) { int smallChange = toShort(data, dataIdx); dataIdx += 2; dataBytesLeft -= 2; int reg = ((smallChange & 0xf000) >> 12) + (i >= changeCount16 ? 16 : 0); initialChanges[reg - minReg] = new MutableIndexColorModel.PaletteChange(); initialChanges[reg - minReg].index = reg; initialChanges[reg - minReg].r = (byte) (((smallChange & 0x0f00) >> 8) * FACTOR_4BIT); initialChanges[reg - minReg].g = (byte) (((smallChange & 0x00f0) >> 4) * FACTOR_4BIT); initialChanges[reg - minReg].b = (byte) (((smallChange & 0x000f) ) * FACTOR_4BIT); } else { int reg = toShort(data, dataIdx); dataIdx += 2; initialChanges[reg - minReg] = new MutableIndexColorModel.PaletteChange(); initialChanges[reg - minReg].index = reg; dataIdx++; /* skip alpha */ initialChanges[reg - minReg].r = data[dataIdx++]; initialChanges[reg - minReg].b = data[dataIdx++]; /* yes, RBG */ initialChanges[reg - minReg].g = data[dataIdx++]; dataBytesLeft -= 6; } ++totalchanges; } --changedlines; } thismask <<= 1; bits--; } for (int row = startLine; changedlines != 0 && row < changes.length; row++) { if (bits == 0) { if (maskBytesLeft == 0) { throw new IIOException("Insufficient data in line mask"); } thismask = data[maskIdx++]; --maskBytesLeft; bits = 8; } if ((thismask & (1 << 7)) != 0) { if (dataBytesLeft < 2) { throw new IIOException("Insufficient data in SmallLineChanges structures: " + dataBytesLeft); } int changeCount16 = 0; if (small) { changeCount16 = data[dataIdx++] & 0xff; changeCount = changeCount16 + (data[dataIdx++] & 0xff); } else { changeCount = toShort(data, dataIdx); dataIdx += 2; } dataBytesLeft -= 2; changes[row] = new MutableIndexColorModel.PaletteChange[changeCount]; for (int i = 0; i < changeCount; i++) { if (totalchanges >= this.totalChanges) { throw new IIOException("Insufficient data in SmallLineChanges structures (changeCount): " + totalchanges); } if (dataBytesLeft < 2) { throw new IIOException("Insufficient data in SmallLineChanges structures: " + dataBytesLeft); } if (small) { int smallChange = toShort(data, dataIdx); dataIdx += 2; dataBytesLeft -= 2; int reg = ((smallChange & 0xf000) >> 12) + (i >= changeCount16 ? 16 : 0); MutableIndexColorModel.PaletteChange paletteChange = new MutableIndexColorModel.PaletteChange(); paletteChange.index = reg; paletteChange.r = (byte) (((smallChange & 0x0f00) >> 8) * FACTOR_4BIT); paletteChange.g = (byte) (((smallChange & 0x00f0) >> 4) * FACTOR_4BIT); paletteChange.b = (byte) (((smallChange & 0x000f) ) * FACTOR_4BIT); changes[row][i] = paletteChange; } else { int reg = toShort(data, dataIdx); dataIdx += 2; MutableIndexColorModel.PaletteChange paletteChange = new MutableIndexColorModel.PaletteChange(); paletteChange.index = reg; dataIdx++; /* skip alpha */ paletteChange.r = data[dataIdx++]; paletteChange.b = data[dataIdx++]; /* yes, RBG */ paletteChange.g = data[dataIdx++]; changes[row][i] = paletteChange; dataBytesLeft -= 6; } ++totalchanges; } --changedlines; } thismask <<= 1; bits--; } if (totalchanges != this.totalChanges) { // TODO: Issue IIO warning new IIOException(String.format("Got %d change structures, chunk header reports %d", totalchanges, this.totalChanges)).printStackTrace(); } } // TODO: Util method private static short toShort(byte[] bytes, int idx) { return (short) ((bytes[idx] & 0xff) << 8 | (bytes[idx + 1] & 0xff)); } }
def project_rect_to_velo2(self, rot, tr, sc, pts_3d_rect, RO, Tr_velo_to_cam): pts_3d_ref = self.project_rect_to_ref(pts_3d_rect, RO) temp = self.project_ref_to_velo(pts_3d_ref, Tr_velo_to_cam) temp = temp.transpose() + tr[:3, :1] temp = np.dot(sc[:3, :3], np.dot(rot[:3, :3], temp)).transpose() return temp
package com.vladsch.flexmark.docx.converter; import org.junit.runner.RunWith; import org.junit.runners.Suite; @RunWith(Suite.class) @Suite.SuiteClasses({ ComboDocxConverterSpecTest.class, ComboDocxConverterAltStylesSpecTest.class, ComboEnDocxConverterSpecTest.class, ComboDeDocxConverterSpecTest.class, ComboDocxConverterSpec2Test.class, ComboDocxConverterSpec3Test.class, ComboEnDocxConverterSpec2Test.class, ComboDeDocxConverterSpec2Test.class, ComboDocxConverterIssuesSpecTest.class, ComboDocxConverterAttributeSpecTest.class, ComboDocxConverterFormSpecTest.class, }) public class DocxConverterTestSuite { }
// NewDeliverSMResp creates and initializes a new DeliverSMResp PDU. func NewDeliverSMResp() Body { b := newDeliverSMResp(&Header{ID: DeliverSMRespID}) b.init() return b }
<reponame>Bergi84/vihal /* ----------------------------------------------------------------------------- * This file is a part of the NVCM project: https://github.com/nvitya/nvcm * Copyright (c) 2018 <NAME>, nvitya * * This software is provided 'as-is', without any express or implied warranty. * In no event will the authors be held liable for any damages arising from * the use of this software. Permission is granted to anyone to use this * software for any purpose, including commercial applications, and to alter * it and redistribute it freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software in * a product, an acknowledgment in the product documentation would be * appreciated but is not required. * * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * * 3. This notice may not be removed or altered from any source distribution. * --------------------------------------------------------------------------- */ /* * file: hwuart_atsam_v2.cpp * brief: ATSAM_V2 UART * version: 1.00 * date: 2018-02-10 * authors: nvitya * notes: * the SERCOM pads are used as follows: * pad 0 = TX (ATSAM output) * pad 1 = RX (ATSAM input) */ #include <stdio.h> #include <stdarg.h> #include "hwuart.h" #include "atsam_v2_utils.h" bool THwUart_atsam_v2::Init(int adevnum) // devnum: 0 - 7 = SERCOM ID { unsigned code; unsigned perid; devnum = adevnum; initialized = false; regs = nullptr; if (!atsam2_sercom_enable(devnum, 0)) { return false; } regs = (HW_UART_REGS *)sercom_inst_list[devnum]; regs->CTRLA.bit.ENABLE = 0; // disable //regs->CTRLA.bit.SWRST = 1; // reset //while (regs->SYNCBUSY.bit.SWRST) { } // wait for reset //regs->CTRLA.bit.SWRST = 0; // reset // baud rate calculation // fbaud = fref / oversampling * (1 - baudvalue / 65536) // baudvalue = 65536 * (1 - oversampling * fbaud / fref) unsigned oversampling = 16; unsigned brdiv = SystemCoreClock / ((oversampling / 8) * baudrate); // the lower 3 bits are the fractional part unsigned baudvalue = (((brdiv >> 3) & 0x1FFF) | ((brdiv & 7) << 13)); regs->BAUD.reg = baudvalue; // CTRLB code = 0 | (1 << 17) // RXEN | (1 << 16) // TXEN | ((((halfstopbits-2) / 2) & 1) << 6) // SBMODE | (0 << 0) // CHSIZE(3): 0 = 8 bit characters ; if (parity and oddparity) { code |= (1 << 13); } // 0x30000 while (regs->SYNCBUSY.bit.CTRLB) { } // wait for sync regs->CTRLB.reg = code; while (regs->SYNCBUSY.bit.CTRLB) { } // wait for sync // CTRLC #ifdef REG_SERCOM0_USART_CTRLC regs->CTRLC.reg = 0x700002; //((7 << 20) | (2 << 0)); #endif // CTRLA code = 0 | (1 << 30) // DORD: 1 = LSB first | (0 << 28) // CMODE: async mode | (0 << 24) // FORM(4): frame format, 0 = USART without parity | (0 << 22) // SAMPA(2): sample adjustment | (1 << 20) // RXPO(2): RX pad select, 1 = PAD[1] for RX | (0 << 16) // TXPO(2): TX pad select, 2 = PAD[0] for TX, RTS=PAD[2], CTS=PAD[3] | (1 << 13) // SAMPR(3): Sample rate, 16x oversampling, fractional b.r.g. | (1 << 2) // MODE(3): Mode, 1 = USART with internal clock | (0 << 1) // ENABLE ; if (parity) { code |= (1 << 24); } regs->CTRLA.reg = code; regs->RXPL.reg = 0; regs->DBGCTRL.reg = 0; while (regs->SYNCBUSY.bit.ENABLE) { } // wait for enable regs->CTRLA.reg = (code | (1 << 1)); // enable it while (regs->SYNCBUSY.bit.ENABLE) { } // wait for enable initialized = true; return true; } bool THwUart_atsam_v2::TrySendChar(char ach) { if (regs->INTFLAG.bit.DRE) { regs->DATA.reg = ach; return true; } else { return false; } } bool THwUart_atsam_v2::TryRecvChar(char * ach) { if (regs->INTFLAG.bit.RXC) { *ach = regs->DATA.reg; return true; } else { return false; } } void THwUart_atsam_v2::DmaAssign(bool istx, THwDmaChannel * admach) { if (istx) { txdma = admach; } else { rxdma = admach; } admach->Prepare(istx, (void *)&regs->DATA.reg, 0); } bool THwUart_atsam_v2::DmaStartSend(THwDmaTransfer * axfer) { if (!txdma) { return false; } txdma->StartTransfer(axfer); return true; } bool THwUart_atsam_v2::DmaStartRecv(THwDmaTransfer * axfer) { if (!rxdma) { return false; } rxdma->StartTransfer(axfer); return true; }
ENDOGENOUS AND PHOTOPERIODIC DIURNAL RHYTHMS OF IN VIVO LIGHT ABSORPTION AND SCATTERING IN THE GREEN ALGA ULVA LACTUCA L. Techniques of in vivo spectroscopy were employed to demonstrate the presence of rhythms of light absorption and scattering in the green thalloid alga Ulva lactuca L. maintained in artificial nutrient medium under constant photoperiod. The absorbance during photophase at 682 nm, the chlorophyll a in vivo absorption maximum, was typically two to three times that during scotophase. Prephased endogenous rhythms (in continued darkness) were observed for a time period equal to three photoperiods. The absorbance rhythm did not correlate with changes in extractable chlorophyll a or b concentration. No changes in linear dichroism were observed, indicating the rhythm of light absorption could not be explained on the basis of orientation of individual absorber molecules. In vivo light microscopy did not reveal differences in chloroplast orientation. Alternative mechanisms are discussed.
<filename>shared/src/test/java/LogUtilTests/PlayLogUtilTest.java package LogUtilTests; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import shared.domain.cards.Card; import shared.domain.cards.kingdoms.Mine; import shared.domain.cards.treasures.CopperCard; import shared.domain.cards.treasures.GoldCard; import shared.domain.cards.treasures.SilverCard; import shared.domain.cards.victories.EstateCard; import shared.domain.engine.Player; import shared.domain.exceptions.InvalidIDException; import shared.util.LogUtil; public class PlayLogUtilTest { private Player player; @Before public void createPlayerWithHand() { this.player = new Player(); player.getHand().clear(); player.getHand().add(new Mine()); player.getHand().add(new CopperCard()); player.getHand().add(new SilverCard()); player.getHand().add(new EstateCard()); } @Test public void listHandCardsWorks() { int[] indices = {0, 3}; try { String listedCards = LogUtil.listHandCards(player, indices); Assert.assertEquals(" 1 Estate,\n 1 Mine", listedCards); } catch (InvalidIDException e) { Assert.fail("Did not expect this exception to be thrown: " + e.getMessage()); } } @Test public void tooLargeIndexInListCardsThrowsException() { int[] indices = {2, 4}; String listedCards; try { listedCards = LogUtil.listHandCards(player, indices); } catch (InvalidIDException e) { Assert.assertEquals("#4 is not a valid card id; this player is only holding 4 cards", e.getMessage()); return; } Assert.fail("Did not expect call to listHandCards to succeed with index 4/ "+player.getHandSize() +" and list: " + listedCards); } @Test public void listHandCardsWorksOnMultipleCards() { player.getHand().add(new GoldCard()); player.getHand().add(new GoldCard()); int[] indices = {0, 1, 3}; try { String listedCards = LogUtil.listHandCards(player, indices); Assert.assertEquals(" 2 Golds,\n 1 Silver", listedCards); } catch (InvalidIDException e) { Assert.fail("Did not expect exception with message: " + e.getMessage()); } } @Test public void cardWithArticleReturnsAnForVowel() { Card vowelCard = new EstateCard(); String result = LogUtil.cardNameWithArticle(vowelCard); Assert.assertEquals("an Estate", result); } @Test public void cardWithArticleReturnsAForNonVowel() { Card vowelCard = new Mine(); String result = LogUtil.cardNameWithArticle(vowelCard); Assert.assertEquals("a Mine", result); } @After public void removeHand() { player.getHand().clear(); player = null; } }
// Simplified matrix element so we can send less data via JSON. private static class SimpleMatrixElement { @Expose final String status; @Expose final String cpuTime; @Expose final String memUsage; @Expose final String wallclock; public SimpleMatrixElement(String status, String cpuTime, String memUsage, String wallclock) { this.status = status; this.cpuTime = cpuTime; this.memUsage = memUsage; this.wallclock = wallclock; } }
Disch is the third defensive coach to be removed from head coach Larry Fedora’s staff since the end of the 2014 regular season. Former associate head coach for defense Vic Koenning left the program in early December and promptly accepted the defensive coordinator position at Troy. Former linebackers coach Ron West was let go in January. UNC ranked fifth in the ACC in scoring defense in 2012 (25.7 ppg) and sixth in 2013 (24.5 ppg) before allowing a school-record 39.0 points per game in 2014. Disch’s position group intercepted eight passes for 157 return yards last season. Senior cornerback Tim Scott led the team with 78 tackles. Disch was initially hired by Fedora in 2011 as defensive coordinator and secondary coach at Southern Miss, where he installed his version of the 4-2-5. Under Disch’s direction, the Golden Eagles ranked seventh nationally in pass efficiency defense and 22nd in rushing defense in helping the program deliver a school-record 12 victories. His Southern Miss defense set an NCAA record with eight interceptions returned for touchdowns. UNC recently announced the hiring of defensive coordinator Gene Chizik and defensive assistant John Papuchis. Disch's job has yet to be filled.
Factors Affecting the Choice of Irrigation Systems for Florida Tomato Production Several economic factors should be considered in selecting an agricultural irrigation system. This 7-page fact sheet compares two widely used irrigation systems for tomato production: seepage and sub-surface drip irrigation. Written by Jenna Rogers, Tatiana Borisova, Jeffrey Ullman, Kelly Morgan, Lincoln Zotarelli, and Kelly Grogan, and published by the UF Department of Food and Resource Economics, October 2014. (UF/IFAS Photo: Tyler Jones)
<gh_stars>0 use crate::amazingly_lost_data::AmazinglyLostData; use crate::game_state::{ChangeGameStateEvent, GameState}; use crate::maze_generator::GameTile; use crate::tile_factory::GameTileHandlers; use crate::{maze_generator, tile_factory}; use bevy::render::camera::Camera; use bevy::{asset::HandleId, prelude::*}; pub struct GameFlowPlugin; impl Plugin for GameFlowPlugin { fn build(&self, app: &mut AppBuilder) { app.add_system(init_game_flow.system()); } } fn init_game_flow( mut commands: Commands, mut amazing_data: ResMut<AmazinglyLostData>, mut camera_query: Query<(&mut Transform, &Camera)>, mut game_tile_query: Query<(Entity, (With<GameTile>, Without<Camera>))>, mut asset_server: Res<AssetServer>, mut materials: ResMut<Assets<ColorMaterial>>, mut game_state: ResMut<State<GameState>>, mut change_game_state: EventWriter<ChangeGameStateEvent>, ) { match game_state.current() { GameState::StartMenu => {} GameState::PlayingGame => { amazing_data.is_generating_maze = false; // TODO:RG // Do we need another State or some sub states for what is happening // during the game? } GameState::GenerateNewGame => { amazing_data.is_loading_assets = false; // Check if we are already generating. // Just in case there is a delay with Bevy and GeneratingGame is triggerd twice // This seems to happen often if !amazing_data.is_generating_maze { amazing_data.is_generating_maze = true; println!("GeneratingGame"); // First clear the game field maze_generator::clear_maze_tiles(&mut commands, &mut game_tile_query); // Now create a new maze maze_generator::create_new_maze( &mut commands, &mut amazing_data, &mut camera_query, ); change_game_state.send(ChangeGameStateEvent(GameState::PlayingGame)); } else { // println!("Tried to generate the maze twice"); } } GameState::Settings => { println!("Settings"); } GameState::Save => { println!("save"); } GameState::LoadingAssets => { // Just in case there is a delay with Bevy and LoadingAssets is triggerd twice // This seems to happen often and we can counter this by using boolen checks if !amazing_data.is_loading_assets { amazing_data.is_loading_assets = true; tile_factory::load_all_assets(&mut amazing_data, &mut asset_server, &mut materials); change_game_state.send(ChangeGameStateEvent(GameState::GenerateNewGame)); } } } }
/** * extractPropertyNamesFromSearchProperties * * This method extracts a list of the (short) property names referred to in a search properties * parameter, as passed to findEntities or findRelationships. This information can be used to * pre-screen a query to determine the preferred execution strategy. * * @param searchProperties - the SearchProperties type parameter to be analysed * @return a list of (Strinng) type property names */ List<String> extractPropertyNamesFromSearchProperties(SearchProperties searchProperties, String repositoryName) throws InvalidParameterException { List<String> propertyNames = null; if (searchProperties != null) { propertyNames = new ArrayList<>(); List<PropertyCondition> conditions = searchProperties.getConditions(); for (PropertyCondition condition : conditions) { boolean localCondition = validatePropertyCondition(condition, repositoryName); /* * Condition is valid, and localCondition indicates whether to process property, value, operator or to * recurse into nestedConditions. */ if (localCondition) { /* Construct a traversal for the property name, operator and value */ String propertyName = condition.getProperty(); if (propertyName != null) { propertyNames.add(propertyName); } } else { /* * Recursively process nested conditions.... */ SearchProperties nestedConditions = condition.getNestedConditions(); List<String> moreProperties = extractPropertyNamesFromSearchProperties(nestedConditions, repositoryName); if (moreProperties != null && !moreProperties.isEmpty()) { propertyNames.addAll(moreProperties); } } } if (propertyNames.isEmpty()) { propertyNames = null; } } return propertyNames; }
/** * MatrixUtil is a collection of useful matrix operations */ public class MatrixUtil { /** * Generate a transform matrix * * @param translate * point to translate to * @param rotate * amounts to rotate about axes * @param scale * amout to scale by * * @return the matrix */ public static Matrix4f transform(float[] translate, float[] rotate, float scale) { Matrix4f raw = new Matrix4f(); raw.identity(); //raw.translate(translate[0], translate[1], translate[2]); //raw.rotate((float)Math.toRadians(rotate[0]), 1, 0, 0); //raw.rotate((float)Math.toRadians(rotate[1]), 0, 1, 0); //raw.rotate((float)Math.toRadians(rotate[2]), 0, 0, 1); raw.scale(scale); return raw; } /** * Generate a projection matrix * * @param fov * the field-of-view * @param near * the near plane * @param far * the far plane * @param width * window width * @param height * window height * * @return the matrix */ public static Matrix4f project(float fov, float near, float far, int width, int height) { float aspect = (float)width / (float)height; Matrix4f raw = new Matrix4f(); raw.perspective((float)Math.toRadians(fov), aspect, near, far); return raw; } public static Matrix4f view(Vector3f position, Vector3f front, Vector3f up) { Vector3f center = new Vector3f(); position.add(front, center); Matrix4f raw = new Matrix4f(); raw.lookAt(position, center, up, raw); return raw; } }
import { odd$ } from "../../lib/math"; import { iff } from "../../lib/conditional"; import { range } from "lodash"; import { keep } from "../../lib/list/keep"; describe("keep", function () { function returnWithBool(val: number) { return iff(odd$(val), val, false); } function returnWithNull(val: number) { return iff(odd$(val), val, null); } function returnWithUndefined(val: number) { return iff(odd$(val), val, undefined); } // @ts-ignore const keepFn = (arg) => keep(arg); it("should return a lazy sequence of the non-nil results of (f item)", function () { expect(keep(returnWithBool, range(5))).toEqual([false, 1, false, 3, false]); expect(keep(returnWithNull, range(5))).toEqual([1, 3]); expect(keep(returnWithUndefined, range(5))).toEqual([1, 3]); expect(keepFn(returnWithBool)(range(5))).toEqual([false, 1, false, 3, false]); expect(keepFn(returnWithNull)(range(5))).toEqual([1, 3]); expect(keepFn(returnWithUndefined)(range(5))).toEqual([1, 3]); }); });
def append_text(self, text): self.txtbox.insert(tkinter.INSERT, text) self.txtbox.insert(tkinter.INSERT, '\n') self.txtbox.see(tkinter.END)
<gh_stars>0 import { ChangeDetectionStrategy, Component, Self } from '@angular/core'; import { FormBuilder, FormGroup, Validators } from '@angular/forms'; import { AccountService } from '../../auth/state'; import { Router } from '@angular/router'; import { NgOnDestroy } from '@core/destroy.service'; import { switchMap, takeUntil } from 'rxjs/operators'; import { fromPromise } from 'rxjs/internal-compatibility'; import { NotificationService } from '@shared/notification.service'; import { environment } from '../../../environments/environment'; @Component({ selector: 'app-login', templateUrl: './login.component.html', styleUrls: ['./login.component.scss'], providers: [NgOnDestroy], changeDetection: ChangeDetectionStrategy.OnPush, }) export class LoginComponent { authForm: FormGroup; officeName = environment.officeName; constructor(private fb: FormBuilder, private authService: AccountService, private router: Router, private notificationService: NotificationService, @Self() private ngOnDestroy$: NgOnDestroy) { this.authForm = fb.group({ login: [null, Validators.required], password: [null, Validators.required], }); } login() { this.authService.login(this.authForm.value).pipe( switchMap(() => fromPromise(this.router.navigate(['']))), takeUntil(this.ngOnDestroy$), ).subscribe(); } }
I love a story with a happy ending. Photo by Kidnurse I write about vaccinations every now and again, usually when there’s bad news: an outbreak, for example, or when some talking head utters some unconscionably fallacious piece of dupery about medical health. It’s not too often I get to write about something good, so I cherish this: Dani McBurnett Stringer is a nurse practitioner who has taken care of thousands of children in the past few years. She’s charged with caring for them and their health … but she has a secret. She was anti-vaccine. Stress the “was.” Even though she was vaccinated as a child, her parents became more anti-vax with time, and by the time Stringer entered the nursing field, she shared her parents’ beliefs. But then something happened. She started seeing what happens when you don’t vaccinate: Over the course of my education, I went on to care for a 1 month old fighting for her life against pertussis who was too young to be vaccinated. Her horrific cough and her helpless parents will stay with me forever. I learned from a case study of a child born in my county who later died because her mom developed chickenpox a few days before delivery. I was alerted by the health department when measles was found in my area. My own great uncle is in a wheel chair to this day from a childhood case of polio. I studied a child whose legs and arms were amputated after meningitis nearly cost him his life. I watched a toddler almost die from dehydration due to rotavirus. I’ve cared for many children with cancer and immunodeficiencies who I worry about due to daily declining herd immunity. But most of all, I learned that this suffering is preventable. (Emphasis hers.) That’s wonderfully put, although it makes my heart heavy that she had to see what she did to change her mind. On the other hand, it makes my heart sing that she was able to make this journey at all. So many people don’t. Of course, this wasn’t an easy path. In a completely predictable manner, she was attacked by anti-vaxxers. But then, to my delight, she took those attacks and turned them into a list of reasons why we must vaccinate our children. She pretty much nails it. I’m thrilled Stringer broke through her own personal biases and accepted reality for what it is. We all have some of those biases weighing us down, and every time someone sheds them it’s cause to celebrate, especially when it’s on an issue that is, quite literally, life or death. Stringer is on Twitter and Facebook, so send her some support. I expect she could use it. Tip o’ the syringe to my friend Tim Farley.
def run_model_analysis( model_location, data_location, file_format = 'tfrecords', slice_spec = None, example_weight_key = None, add_metrics_callbacks = None, output_path = None): _assert_tensorflow_version() if output_path is None: output_path = tempfile.mkdtemp() if not tf.gfile.Exists(output_path): tf.gfile.MakeDirs(output_path) with beam.Pipeline() as p: if file_format == 'tfrecords': data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord( file_pattern=data_location, compression_type=beam.io.filesystem.CompressionTypes.UNCOMPRESSED) elif file_format == 'text': data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location) else: raise ValueError('unknown file_format: %s' % file_format) _ = ( data | 'EvaluateAndWriteResults' >> EvaluateAndWriteResults( eval_saved_model_path=model_location, output_path=output_path, display_only_data_location=data_location, example_weight_key=example_weight_key, add_metrics_callbacks=add_metrics_callbacks, slice_spec=slice_spec)) eval_result = load_eval_result(output_path=output_path) return eval_result
/** * @param input string in Camel Case * @return String in Kebab case * Inspiration from KebabCaseStrategy class of com.fasterxml.jackson.databind with an additional condition to handle numbers as well * Using QNAME would have been a more fool proof solution, however it can lead to performance problems due to usage of Java reflection */ private String convertCamelToKebabCase(String input) { if (input == null) return input; int length = input.length(); if (length == 0) { return input; } StringBuilder result = new StringBuilder(length + (length >> 1)); int upperCount = 0; for (int i = 0; i < length; ++i) { char ch = input.charAt(i); char lc = Character.toLowerCase(ch); if (lc == ch) { if ((upperCount > 1) ){ result.insert(result.length() - 1, '-'); } else if ((upperCount == 1) && Character.isDigit(ch) && i != length-1) { result.append('-'); } upperCount = 0; } else { if ((upperCount == 0) && (i > 0)) { result.append('-'); } ++upperCount; } result.append(lc); } return result.toString(); }
def solve_screen_name_clashes(self): try: u = User.objects.exclude(pk=self.pk).get(screen_name__iexact=self.screen_name) except User.DoesNotExist: return else: try: u.fill_info_from_twitter() except TwitterHTTPError as e: if e.e.getcode() == 404: u.delete() else: u.save()