body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def test_socket_overrides_memory(self):
"\n Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't\n work on `OpenSSL.SSL.Connection`() that use sockets.\n "
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write('foo')
with pytest.raises(TypeError):
clientSSL.bio_shutdown() | -3,350,819,653,941,006,300 | Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets. | tests/test_ssl.py | test_socket_overrides_memory | dholth/pyopenssl | python | def test_socket_overrides_memory(self):
"\n Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't\n work on `OpenSSL.SSL.Connection`() that use sockets.\n "
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write('foo')
with pytest.raises(TypeError):
clientSSL.bio_shutdown() |
def test_outgoing_overflow(self):
'\n If more bytes than can be written to the memory BIO are passed to\n `Connection.send` at once, the number of bytes which were written is\n returned and that many bytes from the beginning of the input can be\n read from the other end of the connection.\n '
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = (2 ** 15)
sent = client.send((b'x' * size))
assert (sent < size)
(receiver, received) = interact_in_memory(client, server)
assert (receiver is server)
assert (len(received) == sent) | -24,608,563,463,420,264 | If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection. | tests/test_ssl.py | test_outgoing_overflow | dholth/pyopenssl | python | def test_outgoing_overflow(self):
'\n If more bytes than can be written to the memory BIO are passed to\n `Connection.send` at once, the number of bytes which were written is\n returned and that many bytes from the beginning of the input can be\n read from the other end of the connection.\n '
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = (2 ** 15)
sent = client.send((b'x' * size))
assert (sent < size)
(receiver, received) = interact_in_memory(client, server)
assert (receiver is server)
assert (len(received) == sent) |
def test_shutdown(self):
'\n `Connection.bio_shutdown` signals the end of the data stream\n from which the `Connection` reads.\n '
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
assert (type(err.value) in [Error, SysCallError]) | 6,889,483,336,208,162,000 | `Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads. | tests/test_ssl.py | test_shutdown | dholth/pyopenssl | python | def test_shutdown(self):
'\n `Connection.bio_shutdown` signals the end of the data stream\n from which the `Connection` reads.\n '
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
assert (type(err.value) in [Error, SysCallError]) |
def test_unexpected_EOF(self):
'\n If the connection is lost before an orderly SSL shutdown occurs,\n `OpenSSL.SSL.SysCallError` is raised with a message of\n "Unexpected EOF".\n '
(server_conn, client_conn) = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert (err.value.args == ((- 1), 'Unexpected EOF')) | 2,354,537,306,989,160,400 | If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF". | tests/test_ssl.py | test_unexpected_EOF | dholth/pyopenssl | python | def test_unexpected_EOF(self):
'\n If the connection is lost before an orderly SSL shutdown occurs,\n `OpenSSL.SSL.SysCallError` is raised with a message of\n "Unexpected EOF".\n '
(server_conn, client_conn) = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert (err.value.args == ((- 1), 'Unexpected EOF')) |
def _check_client_ca_list(self, func):
'\n Verify the return value of the `get_client_ca_list` method for\n server and client connections.\n\n :param func: A function which will be called with the server context\n before the client and server are connected to each other. This\n function should specify a list of CAs for the server to send to the\n client and return that same list. The list will be used to verify\n that `get_client_ca_list` returns the proper value at\n various times.\n '
server = self._server(None)
client = self._client(None)
assert (client.get_client_ca_list() == [])
assert (server.get_client_ca_list() == [])
ctx = server.get_context()
expected = func(ctx)
assert (client.get_client_ca_list() == [])
assert (server.get_client_ca_list() == expected)
interact_in_memory(client, server)
assert (client.get_client_ca_list() == expected)
assert (server.get_client_ca_list() == expected) | -8,730,736,075,572,612,000 | Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times. | tests/test_ssl.py | _check_client_ca_list | dholth/pyopenssl | python | def _check_client_ca_list(self, func):
'\n Verify the return value of the `get_client_ca_list` method for\n server and client connections.\n\n :param func: A function which will be called with the server context\n before the client and server are connected to each other. This\n function should specify a list of CAs for the server to send to the\n client and return that same list. The list will be used to verify\n that `get_client_ca_list` returns the proper value at\n various times.\n '
server = self._server(None)
client = self._client(None)
assert (client.get_client_ca_list() == [])
assert (server.get_client_ca_list() == [])
ctx = server.get_context()
expected = func(ctx)
assert (client.get_client_ca_list() == [])
assert (server.get_client_ca_list() == expected)
interact_in_memory(client, server)
assert (client.get_client_ca_list() == expected)
assert (server.get_client_ca_list() == expected) |
def test_set_client_ca_list_errors(self):
'\n `Context.set_client_ca_list` raises a `TypeError` if called with a\n non-list or a list that contains objects other than X509Names.\n '
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list('spam')
with pytest.raises(TypeError):
ctx.set_client_ca_list(['spam']) | 8,985,719,186,592,469,000 | `Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names. | tests/test_ssl.py | test_set_client_ca_list_errors | dholth/pyopenssl | python | def test_set_client_ca_list_errors(self):
'\n `Context.set_client_ca_list` raises a `TypeError` if called with a\n non-list or a list that contains objects other than X509Names.\n '
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list('spam')
with pytest.raises(TypeError):
ctx.set_client_ca_list(['spam']) |
def test_set_empty_ca_list(self):
'\n If passed an empty list, `Context.set_client_ca_list` configures the\n context to send no CA names to the client and, on both the server and\n client sides, `Connection.get_client_ca_list` returns an empty list\n after the connection is set up.\n '
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca) | 6,187,173,914,336,512,000 | If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up. | tests/test_ssl.py | test_set_empty_ca_list | dholth/pyopenssl | python | def test_set_empty_ca_list(self):
'\n If passed an empty list, `Context.set_client_ca_list` configures the\n context to send no CA names to the client and, on both the server and\n client sides, `Connection.get_client_ca_list` returns an empty list\n after the connection is set up.\n '
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca) |
def test_set_one_ca_list(self):
'\n If passed a list containing a single X509Name,\n `Context.set_client_ca_list` configures the context to send\n that CA name to the client and, on both the server and client sides,\n `Connection.get_client_ca_list` returns a list containing that\n X509Name after the connection is set up.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca) | -2,357,789,607,760,486,400 | If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up. | tests/test_ssl.py | test_set_one_ca_list | dholth/pyopenssl | python | def test_set_one_ca_list(self):
'\n If passed a list containing a single X509Name,\n `Context.set_client_ca_list` configures the context to send\n that CA name to the client and, on both the server and client sides,\n `Connection.get_client_ca_list` returns a list containing that\n X509Name after the connection is set up.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca) |
def test_set_multiple_ca_list(self):
'\n If passed a list containing multiple X509Name objects,\n `Context.set_client_ca_list` configures the context to send\n those CA names to the client and, on both the server and client sides,\n `Connection.get_client_ca_list` returns a list containing those\n X509Names after the connection is set up.\n '
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca) | -7,102,183,694,918,540,000 | If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up. | tests/test_ssl.py | test_set_multiple_ca_list | dholth/pyopenssl | python | def test_set_multiple_ca_list(self):
'\n If passed a list containing multiple X509Name objects,\n `Context.set_client_ca_list` configures the context to send\n those CA names to the client and, on both the server and client sides,\n `Connection.get_client_ca_list` returns a list containing those\n X509Names after the connection is set up.\n '
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca) |
def test_reset_ca_list(self):
'\n If called multiple times, only the X509Names passed to the final call\n of `Context.set_client_ca_list` are used to configure the CA\n names sent to the client.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca) | 4,391,791,017,981,610,500 | If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client. | tests/test_ssl.py | test_reset_ca_list | dholth/pyopenssl | python | def test_reset_ca_list(self):
'\n If called multiple times, only the X509Names passed to the final call\n of `Context.set_client_ca_list` are used to configure the CA\n names sent to the client.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca) |
def test_mutated_ca_list(self):
'\n If the list passed to `Context.set_client_ca_list` is mutated\n afterwards, this does not affect the list of CA names sent to the\n client.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca) | -2,296,114,413,460,073,000 | If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client. | tests/test_ssl.py | test_mutated_ca_list | dholth/pyopenssl | python | def test_mutated_ca_list(self):
'\n If the list passed to `Context.set_client_ca_list` is mutated\n afterwards, this does not affect the list of CA names sent to the\n client.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca) |
def test_add_client_ca_wrong_args(self):
'\n `Context.add_client_ca` raises `TypeError` if called with\n a non-X509 object.\n '
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca('spam') | 1,486,215,216,726,044,200 | `Context.add_client_ca` raises `TypeError` if called with
a non-X509 object. | tests/test_ssl.py | test_add_client_ca_wrong_args | dholth/pyopenssl | python | def test_add_client_ca_wrong_args(self):
'\n `Context.add_client_ca` raises `TypeError` if called with\n a non-X509 object.\n '
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca('spam') |
def test_one_add_client_ca(self):
"\n A certificate's subject can be added as a CA to be sent to the client\n with `Context.add_client_ca`.\n "
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca) | 6,252,198,235,848,312,000 | A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`. | tests/test_ssl.py | test_one_add_client_ca | dholth/pyopenssl | python | def test_one_add_client_ca(self):
"\n A certificate's subject can be added as a CA to be sent to the client\n with `Context.add_client_ca`.\n "
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca) |
def test_multiple_add_client_ca(self):
'\n Multiple CA names can be sent to the client by calling\n `Context.add_client_ca` with multiple X509 objects.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca) | 949,775,207,607,759,500 | Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects. | tests/test_ssl.py | test_multiple_add_client_ca | dholth/pyopenssl | python | def test_multiple_add_client_ca(self):
'\n Multiple CA names can be sent to the client by calling\n `Context.add_client_ca` with multiple X509 objects.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca) |
def test_set_and_add_client_ca(self):
'\n A call to `Context.set_client_ca_list` followed by a call to\n `Context.add_client_ca` results in using the CA names from the\n first call and the CA name from the second call.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca) | -7,623,362,178,323,251,000 | A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call. | tests/test_ssl.py | test_set_and_add_client_ca | dholth/pyopenssl | python | def test_set_and_add_client_ca(self):
'\n A call to `Context.set_client_ca_list` followed by a call to\n `Context.add_client_ca` results in using the CA names from the\n first call and the CA name from the second call.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca) |
def test_set_after_add_client_ca(self):
'\n A call to `Context.set_client_ca_list` after a call to\n `Context.add_client_ca` replaces the CA name specified by the\n former call with the names specified by the latter call.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca) | 3,161,915,503,385,484,000 | A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call. | tests/test_ssl.py | test_set_after_add_client_ca | dholth/pyopenssl | python | def test_set_after_add_client_ca(self):
'\n A call to `Context.set_client_ca_list` after a call to\n `Context.add_client_ca` replaces the CA name specified by the\n former call with the names specified by the latter call.\n '
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca) |
def test_integers(self):
'\n All of the info constants are integers.\n\n This is a very weak test. It would be nice to have one that actually\n verifies that as certain info events happen, the value passed to the\n info callback matches up with the constant exposed by OpenSSL.SSL.\n '
for const in [SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK, SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT, SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP, SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT, SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE]:
assert isinstance(const, int)
for const in [SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE]:
assert ((const is None) or isinstance(const, int)) | -8,794,536,108,357,829,000 | All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL. | tests/test_ssl.py | test_integers | dholth/pyopenssl | python | def test_integers(self):
'\n All of the info constants are integers.\n\n This is a very weak test. It would be nice to have one that actually\n verifies that as certain info events happen, the value passed to the\n info callback matches up with the constant exposed by OpenSSL.SSL.\n '
for const in [SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK, SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT, SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP, SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT, SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE]:
assert isinstance(const, int)
for const in [SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE]:
assert ((const is None) or isinstance(const, int)) |
def test_available(self):
'\n When the OpenSSL functionality is available the decorated functions\n work appropriately.\n '
feature_guard = _make_requires(True, 'Error text')
results = []
@feature_guard
def inner():
results.append(True)
return True
assert (inner() is True)
assert ([True] == results) | 2,875,773,025,657,722,400 | When the OpenSSL functionality is available the decorated functions
work appropriately. | tests/test_ssl.py | test_available | dholth/pyopenssl | python | def test_available(self):
'\n When the OpenSSL functionality is available the decorated functions\n work appropriately.\n '
feature_guard = _make_requires(True, 'Error text')
results = []
@feature_guard
def inner():
results.append(True)
return True
assert (inner() is True)
assert ([True] == results) |
def test_unavailable(self):
'\n When the OpenSSL functionality is not available the decorated function\n does not execute and NotImplementedError is raised.\n '
feature_guard = _make_requires(False, 'Error text')
@feature_guard
def inner():
pytest.fail('Should not be called')
with pytest.raises(NotImplementedError) as e:
inner()
assert ('Error text' in str(e.value)) | -7,286,798,191,054,335,000 | When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised. | tests/test_ssl.py | test_unavailable | dholth/pyopenssl | python | def test_unavailable(self):
'\n When the OpenSSL functionality is not available the decorated function\n does not execute and NotImplementedError is raised.\n '
feature_guard = _make_requires(False, 'Error text')
@feature_guard
def inner():
pytest.fail('Should not be called')
with pytest.raises(NotImplementedError) as e:
inner()
assert ('Error text' in str(e.value)) |
def _client_connection(self, callback, data, request_ocsp=True):
'\n Builds a client connection suitable for using OCSP.\n\n :param callback: The callback to register for OCSP.\n :param data: The opaque data object that will be handed to the\n OCSP callback.\n :param request_ocsp: Whether the client will actually ask for OCSP\n stapling. Useful for testing only.\n '
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client | -2,450,263,854,002,226,000 | Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only. | tests/test_ssl.py | _client_connection | dholth/pyopenssl | python | def _client_connection(self, callback, data, request_ocsp=True):
'\n Builds a client connection suitable for using OCSP.\n\n :param callback: The callback to register for OCSP.\n :param data: The opaque data object that will be handed to the\n OCSP callback.\n :param request_ocsp: Whether the client will actually ask for OCSP\n stapling. Useful for testing only.\n '
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client |
def _server_connection(self, callback, data):
'\n Builds a server connection suitable for using OCSP.\n\n :param callback: The callback to register for OCSP.\n :param data: The opaque data object that will be handed to the\n OCSP callback.\n '
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server | 7,783,377,464,125,420,000 | Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback. | tests/test_ssl.py | _server_connection | dholth/pyopenssl | python | def _server_connection(self, callback, data):
'\n Builds a server connection suitable for using OCSP.\n\n :param callback: The callback to register for OCSP.\n :param data: The opaque data object that will be handed to the\n OCSP callback.\n '
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server |
def test_callbacks_arent_called_by_default(self):
'\n If both the client and the server have registered OCSP callbacks, but\n the client does not send the OCSP request, neither callback gets\n called.\n '
def ocsp_callback(*args, **kwargs):
pytest.fail('Should not be called')
client = self._client_connection(callback=ocsp_callback, data=None, request_ocsp=False)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server) | 8,567,903,337,387,487,000 | If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called. | tests/test_ssl.py | test_callbacks_arent_called_by_default | dholth/pyopenssl | python | def test_callbacks_arent_called_by_default(self):
'\n If both the client and the server have registered OCSP callbacks, but\n the client does not send the OCSP request, neither callback gets\n called.\n '
def ocsp_callback(*args, **kwargs):
pytest.fail('Should not be called')
client = self._client_connection(callback=ocsp_callback, data=None, request_ocsp=False)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server) |
def test_client_negotiates_without_server(self):
'\n If the client wants to do OCSP but the server does not, the handshake\n succeeds, and the client callback fires with an empty byte string.\n '
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert (len(called) == 1)
assert (called[0] == b'') | 4,349,626,982,019,916,000 | If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string. | tests/test_ssl.py | test_client_negotiates_without_server | dholth/pyopenssl | python | def test_client_negotiates_without_server(self):
'\n If the client wants to do OCSP but the server does not, the handshake\n succeeds, and the client callback fires with an empty byte string.\n '
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert (len(called) == 1)
assert (called[0] == b) |
def test_client_receives_servers_data(self):
'\n The data the server sends in its callback is received by the client.\n '
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert (len(calls) == 1)
assert (calls[0] == self.sample_ocsp_data) | 4,777,403,329,407,207,000 | The data the server sends in its callback is received by the client. | tests/test_ssl.py | test_client_receives_servers_data | dholth/pyopenssl | python | def test_client_receives_servers_data(self):
'\n \n '
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert (len(calls) == 1)
assert (calls[0] == self.sample_ocsp_data) |
def test_callbacks_are_invoked_with_connections(self):
'\n The first arguments to both callbacks are their respective connections.\n '
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert (len(client_calls) == 1)
assert (len(server_calls) == 1)
assert (client_calls[0] is client)
assert (server_calls[0] is server) | 551,831,454,631,807,200 | The first arguments to both callbacks are their respective connections. | tests/test_ssl.py | test_callbacks_are_invoked_with_connections | dholth/pyopenssl | python | def test_callbacks_are_invoked_with_connections(self):
'\n \n '
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert (len(client_calls) == 1)
assert (len(server_calls) == 1)
assert (client_calls[0] is client)
assert (server_calls[0] is server) |
def test_opaque_data_is_passed_through(self):
'\n Both callbacks receive an opaque, user-provided piece of data in their\n callbacks as the final argument.\n '
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(callback=client_callback, data=sentinel)
server = self._server_connection(callback=server_callback, data=sentinel)
handshake_in_memory(client, server)
assert (len(calls) == 2)
assert (calls[0][(- 1)] is sentinel)
assert (calls[1][(- 1)] is sentinel) | 4,149,982,713,494,448,000 | Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument. | tests/test_ssl.py | test_opaque_data_is_passed_through | dholth/pyopenssl | python | def test_opaque_data_is_passed_through(self):
'\n Both callbacks receive an opaque, user-provided piece of data in their\n callbacks as the final argument.\n '
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(callback=client_callback, data=sentinel)
server = self._server_connection(callback=server_callback, data=sentinel)
handshake_in_memory(client, server)
assert (len(calls) == 2)
assert (calls[0][(- 1)] is sentinel)
assert (calls[1][(- 1)] is sentinel) |
def test_server_returns_empty_string(self):
'\n If the server returns an empty bytestring from its callback, the\n client callback is called with the empty bytestring.\n '
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert (len(client_calls) == 1)
assert (client_calls[0] == b'') | 7,501,926,478,803,016,000 | If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring. | tests/test_ssl.py | test_server_returns_empty_string | dholth/pyopenssl | python | def test_server_returns_empty_string(self):
'\n If the server returns an empty bytestring from its callback, the\n client callback is called with the empty bytestring.\n '
client_calls = []
def server_callback(*args):
return b
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert (len(client_calls) == 1)
assert (client_calls[0] == b) |
def test_client_returns_false_terminates_handshake(self):
'\n If the client returns False from its callback, the handshake fails.\n '
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server) | 3,973,897,642,014,427,600 | If the client returns False from its callback, the handshake fails. | tests/test_ssl.py | test_client_returns_false_terminates_handshake | dholth/pyopenssl | python | def test_client_returns_false_terminates_handshake(self):
'\n \n '
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server) |
def test_exceptions_in_client_bubble_up(self):
'\n The callbacks thrown in the client callback bubble up to the caller.\n '
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server) | 8,659,388,420,352,561,000 | The callbacks thrown in the client callback bubble up to the caller. | tests/test_ssl.py | test_exceptions_in_client_bubble_up | dholth/pyopenssl | python | def test_exceptions_in_client_bubble_up(self):
'\n \n '
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server) |
def test_exceptions_in_server_bubble_up(self):
'\n The callbacks thrown in the server callback bubble up to the caller.\n '
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args):
pytest.fail('Should not be called')
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server) | 1,846,659,666,523,737,300 | The callbacks thrown in the server callback bubble up to the caller. | tests/test_ssl.py | test_exceptions_in_server_bubble_up | dholth/pyopenssl | python | def test_exceptions_in_server_bubble_up(self):
'\n \n '
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args):
pytest.fail('Should not be called')
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server) |
def test_server_must_return_bytes(self):
'\n The server callback must return a bytestring, or a TypeError is thrown.\n '
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args):
pytest.fail('Should not be called')
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server) | -5,247,461,585,641,548,000 | The server callback must return a bytestring, or a TypeError is thrown. | tests/test_ssl.py | test_server_must_return_bytes | dholth/pyopenssl | python | def test_server_must_return_bytes(self):
'\n \n '
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args):
pytest.fail('Should not be called')
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server) |
def select(conn, options):
'\n Assert later that no args are actually appended.\n '
select_args.append((conn, options))
return b'' | -9,000,996,100,594,796,000 | Assert later that no args are actually appended. | tests/test_ssl.py | select | dholth/pyopenssl | python | def select(conn, options):
'\n \n '
select_args.append((conn, options))
return b |
def test_alpn_success(self):
'\n Clients and servers that agree on the negotiated ALPN protocol can\n correct establish a connection, and the agreed protocol is reported\n by the connections.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])])
assert (server.get_alpn_proto_negotiated() == b'spdy/2')
assert (client.get_alpn_proto_negotiated() == b'spdy/2') | -1,615,314,544,418,148,400 | Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections. | tests/test_ssl.py | test_alpn_success | dholth/pyopenssl | python | def test_alpn_success(self):
'\n Clients and servers that agree on the negotiated ALPN protocol can\n correct establish a connection, and the agreed protocol is reported\n by the connections.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])])
assert (server.get_alpn_proto_negotiated() == b'spdy/2')
assert (client.get_alpn_proto_negotiated() == b'spdy/2') |
def test_alpn_set_on_connection(self):
'\n The same as test_alpn_success, but setting the ALPN protocols on\n the connection rather than the context.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])])
assert (server.get_alpn_proto_negotiated() == b'spdy/2')
assert (client.get_alpn_proto_negotiated() == b'spdy/2') | 5,401,062,428,428,302,000 | The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context. | tests/test_ssl.py | test_alpn_set_on_connection | dholth/pyopenssl | python | def test_alpn_set_on_connection(self):
'\n The same as test_alpn_success, but setting the ALPN protocols on\n the connection rather than the context.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])])
assert (server.get_alpn_proto_negotiated() == b'spdy/2')
assert (client.get_alpn_proto_negotiated() == b'spdy/2') |
def test_alpn_server_fail(self):
'\n When clients and servers cannot agree on what protocol to use next\n the TLS connection does not get established.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(Error):
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])]) | -3,258,457,692,606,931,000 | When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established. | tests/test_ssl.py | test_alpn_server_fail | dholth/pyopenssl | python | def test_alpn_server_fail(self):
'\n When clients and servers cannot agree on what protocol to use next\n the TLS connection does not get established.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(Error):
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])]) |
def test_alpn_no_server(self):
"\n When clients and servers cannot agree on what protocol to use next\n because the server doesn't offer ALPN, no protocol is negotiated.\n "
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert (client.get_alpn_proto_negotiated() == b'') | 3,146,339,162,060,569,600 | When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated. | tests/test_ssl.py | test_alpn_no_server | dholth/pyopenssl | python | def test_alpn_no_server(self):
"\n When clients and servers cannot agree on what protocol to use next\n because the server doesn't offer ALPN, no protocol is negotiated.\n "
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert (client.get_alpn_proto_negotiated() == b) |
def test_alpn_callback_exception(self):
'\n We can handle exceptions in the ALPN select callback.\n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])]) | -1,442,336,582,692,366,800 | We can handle exceptions in the ALPN select callback. | tests/test_ssl.py | test_alpn_callback_exception | dholth/pyopenssl | python | def test_alpn_callback_exception(self):
'\n \n '
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
server_context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert (select_args == [(server, [b'http/1.1', b'spdy/2'])]) |
def test_alpn_not_implemented(self):
'\n If ALPN is not in OpenSSL, we should raise NotImplementedError.\n '
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None) | -4,271,522,351,178,993,700 | If ALPN is not in OpenSSL, we should raise NotImplementedError. | tests/test_ssl.py | test_alpn_not_implemented | dholth/pyopenssl | python | def test_alpn_not_implemented(self):
'\n \n '
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None) |
def colorize(x):
' Converts a one-channel grayscale image to a color heatmap image '
if (x.dim() == 2):
torch.unsqueeze(x, 0, out=x)
if (x.dim() == 3):
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = (gauss(x, 0.5, 0.6, 0.2) + gauss(x, 1, 0.8, 0.3))
cl[1] = gauss(x, 1, 0.5, 0.3)
cl[2] = gauss(x, 1, 0.2, 0.3)
cl[cl.gt(1)] = 1
elif (x.dim() == 4):
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:, 0, :, :] = (gauss(x, 0.5, 0.6, 0.2) + gauss(x, 1, 0.8, 0.3))
cl[:, 1, :, :] = gauss(x, 1, 0.5, 0.3)
cl[:, 2, :, :] = gauss(x, 1, 0.2, 0.3)
return cl | 4,817,324,830,790,308,000 | Converts a one-channel grayscale image to a color heatmap image | rethinking-network-pruning/cifar/weight-level/utils/visualize.py | colorize | 1337Eddy/BirdRecognitionPruning | python | def colorize(x):
' '
if (x.dim() == 2):
torch.unsqueeze(x, 0, out=x)
if (x.dim() == 3):
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = (gauss(x, 0.5, 0.6, 0.2) + gauss(x, 1, 0.8, 0.3))
cl[1] = gauss(x, 1, 0.5, 0.3)
cl[2] = gauss(x, 1, 0.2, 0.3)
cl[cl.gt(1)] = 1
elif (x.dim() == 4):
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:, 0, :, :] = (gauss(x, 0.5, 0.6, 0.2) + gauss(x, 1, 0.8, 0.3))
cl[:, 1, :, :] = gauss(x, 1, 0.5, 0.3)
cl[:, 2, :, :] = gauss(x, 1, 0.2, 0.3)
return cl |
@addressable(Exactly(int))
def age(self):
"Return the person's age in years.\n\n :rtype int\n " | 5,267,023,490,484,108,000 | Return the person's age in years.
:rtype int | tests/python/pants_test/engine/test_addressable.py | age | AHassanSOS/pants | python | @addressable(Exactly(int))
def age(self):
"Return the person's age in years.\n\n :rtype int\n " |
@addressable_list(Exactly(int, float))
def values(self):
"Return this series' values.\n\n :rtype list of int or float\n " | -8,348,155,537,852,040,000 | Return this series' values.
:rtype list of int or float | tests/python/pants_test/engine/test_addressable.py | values | AHassanSOS/pants | python | @addressable_list(Exactly(int, float))
def values(self):
"Return this series' values.\n\n :rtype list of int or float\n " |
@addressable_dict(Exactly(int, float))
def varz(self):
'Return a snapshot of the current /varz.\n\n :rtype dict of string -> int or float\n ' | 6,021,855,875,471,639,000 | Return a snapshot of the current /varz.
:rtype dict of string -> int or float | tests/python/pants_test/engine/test_addressable.py | varz | AHassanSOS/pants | python | @addressable_dict(Exactly(int, float))
def varz(self):
'Return a snapshot of the current /varz.\n\n :rtype dict of string -> int or float\n ' |
def read_uint32(fp, pos):
'Read 4 little-endian bytes into an unsigned 32-bit integer. Return value, position + 4.'
fp.seek(pos)
val = struct.unpack('<I', fp.read(4))[0]
return (val, (pos + 4)) | 4,483,001,118,760,967,000 | Read 4 little-endian bytes into an unsigned 32-bit integer. Return value, position + 4. | zw1_pack.py | read_uint32 | nmbook/zw1-pack | python | def read_uint32(fp, pos):
fp.seek(pos)
val = struct.unpack('<I', fp.read(4))[0]
return (val, (pos + 4)) |
def read_strn(fp, pos, size):
'Read N null-padded bytes into an ascii encoded string. Return value, position + N.'
fp.seek(pos)
val = struct.unpack((('<' + str(size)) + 's'), fp.read(size))[0]
return (val.decode('ascii').strip('\x00'), (pos + size)) | -3,470,060,733,731,498,500 | Read N null-padded bytes into an ascii encoded string. Return value, position + N. | zw1_pack.py | read_strn | nmbook/zw1-pack | python | def read_strn(fp, pos, size):
fp.seek(pos)
val = struct.unpack((('<' + str(size)) + 's'), fp.read(size))[0]
return (val.decode('ascii').strip('\x00'), (pos + size)) |
def pack(file_array, args):
'Packs files or folders given into the first argument: a target file name or a directory (archive will be named the same as directory).\n \n If archive name exists, appends number and tries again.'
output_target = file_array[0]
input_set = file_array.copy()
if os.path.isdir(output_target):
output_target = os.path.basename(os.path.realpath(output_target.rstrip('\\/')))
if (len(output_target) == 0):
print('Error: Archive name invalid.', file=sys.stderr)
return
elif ((len(output_target) > 4) and (output_target[(- 4):].upper() == '.DAT')):
input_set = file_array[1:]
else:
print('Error: Unknown file(s). Please provide a .DAT file or existing folder to pack it.', file=sys.stderr)
return
input_files = []
for f in input_set:
for (dirpath, dirnames, filenames) in os.walk(f):
for dirname in dirnames:
if (dirname.startswith('.') or dirname.startswith('__')):
dirnames.remove(dirname)
input_files += [os.path.join(dirpath, name) for name in filenames if ((not name.startswith('.')) and (not name.startswith('__')))]
try_count = 0
alt = ''
while os.path.exists('{}{}.dat'.format(output_target, alt)):
try_count += 1
alt = '-{}'.format(try_count)
if (try_count >= 100):
print('Error: Archive output file exists and no alternative.', file=sys.stderr)
return
output_target = '{}{}.dat'.format(output_target, alt)
if (not args.quiet):
print('Packing {} files into {}...'.format(len(input_files), output_target))
file_tables = []
for file_path in input_files:
try:
(name, ext) = os.path.splitext(os.path.basename(file_path).lower())
try:
name = name.encode('ascii')
ext = ext.encode('ascii')
except UnicodeError:
print('Error: Input file names must be valid ASCII. {} skipped.'.format(file_path), file=sys.stderr)
continue
if (len(ext) != 4):
print('Error: Input file names must have 3 character extensions. {} skipped.'.format(file_path), file=sys.stderr)
continue
ext = ext[1:]
if ((len(name) < 1) or (len(name) > 8)):
print('Error: Input file names must be <= 8 characters in length. {} skipped.'.format(file_path), file=sys.stderr)
continue
if (b'.' in name):
print('Error: Input file names cannot have multiple extensions or additional dots. {} skipped.'.format(file_path), file=sys.stderr)
continue
this_name_obj = {'name': name, 'size': os.path.getsize(file_path), 'pos': None, 'full_name': file_path}
this_ext_table = None
for table in file_tables:
if (table['name'] == ext):
this_ext_table = table
break
if (this_ext_table is None):
this_ext_table = {'name': ext, 'count': None, 'pos': None, 'files': [this_name_obj]}
file_tables.append(this_ext_table)
else:
this_ext_table['files'].append(this_name_obj)
except Exception as err:
print(('Error: Uncaught exception locating file: ' + file_path), file=sys.stderr)
print('{}'.format(err), file=sys.stderr)
if (not args.quiet):
traceback.print_exc()
return
try:
pos = 8
ft_count = len(file_tables)
pos += (ft_count * 12)
for ft in file_tables:
fd_count = len(ft['files'])
ft['count'] = fd_count
ft['pos'] = pos
pos += (fd_count * 16)
for ft in file_tables:
for fd in ft['files']:
fd['pos'] = pos
pos += fd['size']
with open(output_target, 'wb') as f:
pos = 0
pos += write_into(f, '<II', 12345678, ft_count)
for ft in file_tables:
f.write(ft['name'].ljust(4, b'\x00'))
pos += 4
pos += write_into(f, '<II', ft['pos'], ft['count'])
for ft in file_tables:
for fd in ft['files']:
f.write(fd['name'].ljust(8, b'\x00'))
pos += 8
pos += write_into(f, '<II', fd['size'], fd['pos'])
for ft in file_tables:
for fd in ft['files']:
file_path = fd['full_name']
if (not args.quiet):
print(file_path)
try:
with open(file_path, 'rb') as fi:
f.write(fi.read(fd['size']))
except Exception as err:
print(((('Error: Uncaught exception writing file to archive: ' + output_target) + ' <- ') + file_path), file=sys.stderr)
print('{}'.format(err), file=sys.stderr)
if (not args.quiet):
traceback.print_exc()
return
except Exception as err2:
print(('Error: Uncaught exception writing archive: ' + output_target), file=sys.stderr)
print('{}'.format(err2), file=sys.stderr)
if (not args.quiet):
traceback.print_exc() | -4,834,152,850,736,557,000 | Packs files or folders given into the first argument: a target file name or a directory (archive will be named the same as directory).
If archive name exists, appends number and tries again. | zw1_pack.py | pack | nmbook/zw1-pack | python | def pack(file_array, args):
'Packs files or folders given into the first argument: a target file name or a directory (archive will be named the same as directory).\n \n If archive name exists, appends number and tries again.'
output_target = file_array[0]
input_set = file_array.copy()
if os.path.isdir(output_target):
output_target = os.path.basename(os.path.realpath(output_target.rstrip('\\/')))
if (len(output_target) == 0):
print('Error: Archive name invalid.', file=sys.stderr)
return
elif ((len(output_target) > 4) and (output_target[(- 4):].upper() == '.DAT')):
input_set = file_array[1:]
else:
print('Error: Unknown file(s). Please provide a .DAT file or existing folder to pack it.', file=sys.stderr)
return
input_files = []
for f in input_set:
for (dirpath, dirnames, filenames) in os.walk(f):
for dirname in dirnames:
if (dirname.startswith('.') or dirname.startswith('__')):
dirnames.remove(dirname)
input_files += [os.path.join(dirpath, name) for name in filenames if ((not name.startswith('.')) and (not name.startswith('__')))]
try_count = 0
alt =
while os.path.exists('{}{}.dat'.format(output_target, alt)):
try_count += 1
alt = '-{}'.format(try_count)
if (try_count >= 100):
print('Error: Archive output file exists and no alternative.', file=sys.stderr)
return
output_target = '{}{}.dat'.format(output_target, alt)
if (not args.quiet):
print('Packing {} files into {}...'.format(len(input_files), output_target))
file_tables = []
for file_path in input_files:
try:
(name, ext) = os.path.splitext(os.path.basename(file_path).lower())
try:
name = name.encode('ascii')
ext = ext.encode('ascii')
except UnicodeError:
print('Error: Input file names must be valid ASCII. {} skipped.'.format(file_path), file=sys.stderr)
continue
if (len(ext) != 4):
print('Error: Input file names must have 3 character extensions. {} skipped.'.format(file_path), file=sys.stderr)
continue
ext = ext[1:]
if ((len(name) < 1) or (len(name) > 8)):
print('Error: Input file names must be <= 8 characters in length. {} skipped.'.format(file_path), file=sys.stderr)
continue
if (b'.' in name):
print('Error: Input file names cannot have multiple extensions or additional dots. {} skipped.'.format(file_path), file=sys.stderr)
continue
this_name_obj = {'name': name, 'size': os.path.getsize(file_path), 'pos': None, 'full_name': file_path}
this_ext_table = None
for table in file_tables:
if (table['name'] == ext):
this_ext_table = table
break
if (this_ext_table is None):
this_ext_table = {'name': ext, 'count': None, 'pos': None, 'files': [this_name_obj]}
file_tables.append(this_ext_table)
else:
this_ext_table['files'].append(this_name_obj)
except Exception as err:
print(('Error: Uncaught exception locating file: ' + file_path), file=sys.stderr)
print('{}'.format(err), file=sys.stderr)
if (not args.quiet):
traceback.print_exc()
return
try:
pos = 8
ft_count = len(file_tables)
pos += (ft_count * 12)
for ft in file_tables:
fd_count = len(ft['files'])
ft['count'] = fd_count
ft['pos'] = pos
pos += (fd_count * 16)
for ft in file_tables:
for fd in ft['files']:
fd['pos'] = pos
pos += fd['size']
with open(output_target, 'wb') as f:
pos = 0
pos += write_into(f, '<II', 12345678, ft_count)
for ft in file_tables:
f.write(ft['name'].ljust(4, b'\x00'))
pos += 4
pos += write_into(f, '<II', ft['pos'], ft['count'])
for ft in file_tables:
for fd in ft['files']:
f.write(fd['name'].ljust(8, b'\x00'))
pos += 8
pos += write_into(f, '<II', fd['size'], fd['pos'])
for ft in file_tables:
for fd in ft['files']:
file_path = fd['full_name']
if (not args.quiet):
print(file_path)
try:
with open(file_path, 'rb') as fi:
f.write(fi.read(fd['size']))
except Exception as err:
print(((('Error: Uncaught exception writing file to archive: ' + output_target) + ' <- ') + file_path), file=sys.stderr)
print('{}'.format(err), file=sys.stderr)
if (not args.quiet):
traceback.print_exc()
return
except Exception as err2:
print(('Error: Uncaught exception writing archive: ' + output_target), file=sys.stderr)
print('{}'.format(err2), file=sys.stderr)
if (not args.quiet):
traceback.print_exc() |
def unpack(file_array, args):
'Unpacks one or more files given the provided arguments.\n \n If contents exist in the target output folder, they will be overwritten.'
if (not args.quiet):
print('Unpacking {} files...'.format(len(file_array)))
is_multiple = (len(file_array) != 1)
for file_path in file_array:
try:
if (not os.path.isfile(file_path)):
print(('Error: File not found: ' + file_path), file=sys.stderr)
continue
basename = os.path.basename(os.path.realpath(file_path))
if ((len(basename) <= 4) or (basename[(- 4):].upper() != '.DAT')):
if (not is_multiple):
print(('Error: Not an archive of the correct format [file name error]: ' + file_path), file=sys.stderr)
continue
dirname = (basename[:(- 4)] + '/')
if ((not args.quiet) or args.test):
print(basename)
with open(file_path, 'rb') as f:
pos = 0
(magic, pos) = read_uint32(f, pos)
if (magic != 12345678):
if (not is_multiple):
print(('Error: Not an archive of the correct format [magic number error]: ' + file_path), file=sys.stderr)
continue
(ft_count, pos) = read_uint32(f, pos)
file_tables = []
for x in range(0, ft_count):
(ftext, pos) = read_strn(f, pos, 4)
(ftpos, pos) = read_uint32(f, pos)
(ftnum, pos) = read_uint32(f, pos)
ft = {'name': ftext, 'count': ftnum, 'pos': ftpos, 'files': []}
for y in range(0, ftnum):
(fdnam, ftpos) = read_strn(f, ftpos, 8)
(fdsiz, ftpos) = read_uint32(f, ftpos)
(fdpos, ftpos) = read_uint32(f, ftpos)
fd = {'name': fdnam, 'size': fdsiz, 'pos': fdpos}
ft['files'].append(fd)
file_tables.append(ft)
if args.test:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(file_tables)
else:
if (not os.path.isdir(dirname)):
os.mkdir(dirname)
for ft in file_tables:
for fd in ft['files']:
out_path = os.path.join(dirname, ((fd['name'] + '.') + ft['name']))
if (not args.quiet):
print(out_path)
with open(out_path, 'wb') as fo:
f.seek(fd['pos'])
fo.write(f.read(fd['size']))
except Exception as err:
print(('Error: Uncaught exception parsing file: ' + file_path), file=sys.stderr)
print('{}'.format(err), file=sys.stderr)
if (not args.quiet):
traceback.print_exc()
continue | -8,862,921,214,724,358,000 | Unpacks one or more files given the provided arguments.
If contents exist in the target output folder, they will be overwritten. | zw1_pack.py | unpack | nmbook/zw1-pack | python | def unpack(file_array, args):
'Unpacks one or more files given the provided arguments.\n \n If contents exist in the target output folder, they will be overwritten.'
if (not args.quiet):
print('Unpacking {} files...'.format(len(file_array)))
is_multiple = (len(file_array) != 1)
for file_path in file_array:
try:
if (not os.path.isfile(file_path)):
print(('Error: File not found: ' + file_path), file=sys.stderr)
continue
basename = os.path.basename(os.path.realpath(file_path))
if ((len(basename) <= 4) or (basename[(- 4):].upper() != '.DAT')):
if (not is_multiple):
print(('Error: Not an archive of the correct format [file name error]: ' + file_path), file=sys.stderr)
continue
dirname = (basename[:(- 4)] + '/')
if ((not args.quiet) or args.test):
print(basename)
with open(file_path, 'rb') as f:
pos = 0
(magic, pos) = read_uint32(f, pos)
if (magic != 12345678):
if (not is_multiple):
print(('Error: Not an archive of the correct format [magic number error]: ' + file_path), file=sys.stderr)
continue
(ft_count, pos) = read_uint32(f, pos)
file_tables = []
for x in range(0, ft_count):
(ftext, pos) = read_strn(f, pos, 4)
(ftpos, pos) = read_uint32(f, pos)
(ftnum, pos) = read_uint32(f, pos)
ft = {'name': ftext, 'count': ftnum, 'pos': ftpos, 'files': []}
for y in range(0, ftnum):
(fdnam, ftpos) = read_strn(f, ftpos, 8)
(fdsiz, ftpos) = read_uint32(f, ftpos)
(fdpos, ftpos) = read_uint32(f, ftpos)
fd = {'name': fdnam, 'size': fdsiz, 'pos': fdpos}
ft['files'].append(fd)
file_tables.append(ft)
if args.test:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(file_tables)
else:
if (not os.path.isdir(dirname)):
os.mkdir(dirname)
for ft in file_tables:
for fd in ft['files']:
out_path = os.path.join(dirname, ((fd['name'] + '.') + ft['name']))
if (not args.quiet):
print(out_path)
with open(out_path, 'wb') as fo:
f.seek(fd['pos'])
fo.write(f.read(fd['size']))
except Exception as err:
print(('Error: Uncaught exception parsing file: ' + file_path), file=sys.stderr)
print('{}'.format(err), file=sys.stderr)
if (not args.quiet):
traceback.print_exc()
continue |
def _CreateLegalIdentifier(input_string):
'Converts input_string to a legal identifier for ADMX/ADML files.\n\n Changes some characters that do not necessarily cause problems and may not\n handle all cases.\n\n Args:\n input_string: Text to convert to a legal identifier.\n\n Returns:\n String containing a legal identifier based on input_string.\n '
return re.sub('[\\W_]', '', input_string) | 290,922,038,349,501,200 | Converts input_string to a legal identifier for ADMX/ADML files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string. | omaha/enterprise/generate_group_policy_template_admx.py | _CreateLegalIdentifier | huhisoftware/omaha | python | def _CreateLegalIdentifier(input_string):
'Converts input_string to a legal identifier for ADMX/ADML files.\n\n Changes some characters that do not necessarily cause problems and may not\n handle all cases.\n\n Args:\n input_string: Text to convert to a legal identifier.\n\n Returns:\n String containing a legal identifier based on input_string.\n '
return re.sub('[\\W_]', , input_string) |
def GenerateGroupPolicyTemplateAdmx(apps):
'Generates a Group Policy template (ADMX format)for the specified apps.\n\n Replaces LF in strings above with CRLF as required by gpedit.msc.\n When writing the resulting contents to a file, use binary mode to ensure the\n CRLFs are preserved.\n\n Args:\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n\n Returns:\n String containing the contents of the .ADMX file.\n '
def _GenerateCategories(apps):
'Generates category string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the category string for each app\n in apps, each populated with the appropriate app-specific strings.\n '
admx_app_category_template = ' <category name="Cat_%(AppLegalId)s"\n displayName="$(string.Cat_%(AppLegalId)s)">\n <parentCategory ref="Cat_Applications" />\n </category>'
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append((admx_app_category_template % {'AppLegalId': _CreateLegalIdentifier(app_name)}))
return (ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)})
def _GeneratePolicies(apps):
'Generates policy string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the policy template for each app\n in apps, each populated with the appropriate app-specific strings.\n '
app_policy_list = []
for app in apps:
(app_name, app_guid, _, _) = app
app_policy_list.append((ADMX_APP_POLICY_TEMPLATE % {'AppLegalId': _CreateLegalIdentifier(app_name), 'AppGuid': app_guid, 'RootPolicyKey': MAIN_POLICY_KEY}))
return (ADMX_POLICIES % {'AppPolicyList': '\n'.join(app_policy_list), 'RootPolicyKey': MAIN_POLICY_KEY})
target_contents = [ADMX_HEADER, ADMX_ENVIRONMENT, _GenerateCategories(apps), _GeneratePolicies(apps), ADMX_FOOTER]
return ''.join(target_contents) | -1,079,573,196,691,132,000 | Generates a Group Policy template (ADMX format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADMX file. | omaha/enterprise/generate_group_policy_template_admx.py | GenerateGroupPolicyTemplateAdmx | huhisoftware/omaha | python | def GenerateGroupPolicyTemplateAdmx(apps):
'Generates a Group Policy template (ADMX format)for the specified apps.\n\n Replaces LF in strings above with CRLF as required by gpedit.msc.\n When writing the resulting contents to a file, use binary mode to ensure the\n CRLFs are preserved.\n\n Args:\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n\n Returns:\n String containing the contents of the .ADMX file.\n '
def _GenerateCategories(apps):
'Generates category string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the category string for each app\n in apps, each populated with the appropriate app-specific strings.\n '
admx_app_category_template = ' <category name="Cat_%(AppLegalId)s"\n displayName="$(string.Cat_%(AppLegalId)s)">\n <parentCategory ref="Cat_Applications" />\n </category>'
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append((admx_app_category_template % {'AppLegalId': _CreateLegalIdentifier(app_name)}))
return (ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)})
def _GeneratePolicies(apps):
'Generates policy string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the policy template for each app\n in apps, each populated with the appropriate app-specific strings.\n '
app_policy_list = []
for app in apps:
(app_name, app_guid, _, _) = app
app_policy_list.append((ADMX_APP_POLICY_TEMPLATE % {'AppLegalId': _CreateLegalIdentifier(app_name), 'AppGuid': app_guid, 'RootPolicyKey': MAIN_POLICY_KEY}))
return (ADMX_POLICIES % {'AppPolicyList': '\n'.join(app_policy_list), 'RootPolicyKey': MAIN_POLICY_KEY})
target_contents = [ADMX_HEADER, ADMX_ENVIRONMENT, _GenerateCategories(apps), _GeneratePolicies(apps), ADMX_FOOTER]
return .join(target_contents) |
def GenerateGroupPolicyTemplateAdml(apps):
'Generates a Group Policy template (ADML format)for the specified apps.\n\n Replaces LF in strings above with CRLF as required by gpedit.msc.\n When writing the resulting contents to a file, use binary mode to ensure the\n CRLFs are preserved.\n\n Args:\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n\n Returns:\n String containing the contents of the .ADML file.\n '
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if (not rollback_disclaimer):
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = (('Cat_' + app_legal_id), app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (('Explain_Install' + app_legal_id), ('Specifies whether %s can be installed using Google Update/Google Installer.\n\nIf this policy is not configured, %s can be installed as specified by "Allow installation default".\n\n%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (('Explain_AutoUpdate' + app_legal_id), ('Specifies how Google Update handles available %s updates from Google.\n\nIf this policy is not configured, Google Update handles available updates as specified by "Update policy override default".\n\nOptions:\n - Always allow updates: Updates are always applied when found, either by periodic update check or by a manual update check.\n - Manual updates only: Updates are only applied when the user does a manual update check. (Not all apps provide an interface for this.)\n - Automatic silent updates only: Updates are only applied when they are found via the periodic update check.\n - Updates disabled: Never apply updates.\n\nIf you select manual updates, you should periodically check for updates using the application\'s manual update mechanism if available. If you disable updates, you should periodically check for updates and distribute them to users.%s\n\n%s' % (app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (('Explain_TargetVersionPrefix' + app_legal_id), ('Specifies which version %s should be updated to.\n\nWhen this policy is enabled, the app will be updated to the version prefixed with this policy value.\n\nSome examples:\n1) Not configured: app will be updated to the latest version available.\n2) Policy value is set to "55.": the app will be updated to any minor version of 55 (e.g., 55.24.34 or 55.60.2).\n3) Policy value is "55.2.": the app will be updated to any minor version of 55.2 (e.g., 55.2.34 or 55.2.2).\n4) Policy value is "55.24.34": the app will be updated to this specific version only.\n\n%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (('Explain_RollbackToTargetVersion' + app_legal_id), ('Specifies that Google Update should roll installations of %s back to the version indicated by "Target version prefix override".\n\nThis policy setting has no effect unless "Target version prefix override" is set.\n\nIf this policy is not configured or is disabled, installs that have a version higher than that specified by "Target version prefix override" will be left as-is.\n\nIf this policy is enabled, installs that have a version higher than that specified by "Target version prefix override" will be downgraded to the highest available version that matches the target version.\n\n%s\n\n%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append((' <string id="%s">%s</string>' % (entry[0], entry[1])))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE % ('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [ADML_HEADER, ADML_ENVIRONMENT, app_resource_tables, ADML_FOOTER]
return ''.join(target_contents) | -2,271,222,399,420,323,600 | Generates a Group Policy template (ADML format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADML file. | omaha/enterprise/generate_group_policy_template_admx.py | GenerateGroupPolicyTemplateAdml | huhisoftware/omaha | python | def GenerateGroupPolicyTemplateAdml(apps):
'Generates a Group Policy template (ADML format)for the specified apps.\n\n Replaces LF in strings above with CRLF as required by gpedit.msc.\n When writing the resulting contents to a file, use binary mode to ensure the\n CRLFs are preserved.\n\n Args:\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n\n Returns:\n String containing the contents of the .ADML file.\n '
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if (not rollback_disclaimer):
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = (('Cat_' + app_legal_id), app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (('Explain_Install' + app_legal_id), ('Specifies whether %s can be installed using Google Update/Google Installer.\n\nIf this policy is not configured, %s can be installed as specified by "Allow installation default".\n\n%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (('Explain_AutoUpdate' + app_legal_id), ('Specifies how Google Update handles available %s updates from Google.\n\nIf this policy is not configured, Google Update handles available updates as specified by "Update policy override default".\n\nOptions:\n - Always allow updates: Updates are always applied when found, either by periodic update check or by a manual update check.\n - Manual updates only: Updates are only applied when the user does a manual update check. (Not all apps provide an interface for this.)\n - Automatic silent updates only: Updates are only applied when they are found via the periodic update check.\n - Updates disabled: Never apply updates.\n\nIf you select manual updates, you should periodically check for updates using the application\'s manual update mechanism if available. If you disable updates, you should periodically check for updates and distribute them to users.%s\n\n%s' % (app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (('Explain_TargetVersionPrefix' + app_legal_id), ('Specifies which version %s should be updated to.\n\nWhen this policy is enabled, the app will be updated to the version prefixed with this policy value.\n\nSome examples:\n1) Not configured: app will be updated to the latest version available.\n2) Policy value is set to "55.": the app will be updated to any minor version of 55 (e.g., 55.24.34 or 55.60.2).\n3) Policy value is "55.2.": the app will be updated to any minor version of 55.2 (e.g., 55.2.34 or 55.2.2).\n4) Policy value is "55.24.34": the app will be updated to this specific version only.\n\n%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (('Explain_RollbackToTargetVersion' + app_legal_id), ('Specifies that Google Update should roll installations of %s back to the version indicated by "Target version prefix override".\n\nThis policy setting has no effect unless "Target version prefix override" is set.\n\nIf this policy is not configured or is disabled, installs that have a version higher than that specified by "Target version prefix override" will be left as-is.\n\nIf this policy is enabled, installs that have a version higher than that specified by "Target version prefix override" will be downgraded to the highest available version that matches the target version.\n\n%s\n\n%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN)))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append((' <string id="%s">%s</string>' % (entry[0], entry[1])))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE % ('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [ADML_HEADER, ADML_ENVIRONMENT, app_resource_tables, ADML_FOOTER]
return .join(target_contents) |
def WriteGroupPolicyTemplateAdmx(target_path, apps):
'Writes a Group Policy template (ADM format)for the specified apps.\n\n The file is UTF-16 and contains CRLF on all platforms.\n\n Args:\n target_path: Output path of the .ADM template file.\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n '
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close() | 1,054,630,995,328,795,000 | Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line. | omaha/enterprise/generate_group_policy_template_admx.py | WriteGroupPolicyTemplateAdmx | huhisoftware/omaha | python | def WriteGroupPolicyTemplateAdmx(target_path, apps):
'Writes a Group Policy template (ADM format)for the specified apps.\n\n The file is UTF-16 and contains CRLF on all platforms.\n\n Args:\n target_path: Output path of the .ADM template file.\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n '
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close() |
def WriteGroupPolicyTemplateAdml(target_path, apps):
'Writes a Group Policy template (ADM format)for the specified apps.\n\n The file is UTF-16 and contains CRLF on all platforms.\n\n Args:\n target_path: Output path of the .ADM template file.\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n '
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close() | 5,698,310,291,979,134,000 | Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line. | omaha/enterprise/generate_group_policy_template_admx.py | WriteGroupPolicyTemplateAdml | huhisoftware/omaha | python | def WriteGroupPolicyTemplateAdml(target_path, apps):
'Writes a Group Policy template (ADM format)for the specified apps.\n\n The file is UTF-16 and contains CRLF on all platforms.\n\n Args:\n target_path: Output path of the .ADM template file.\n apps: A list of tuples containing information about each app.\n Each element of the list is a tuple of:\n * app name\n * app ID\n * optional string to append to the auto-update explanation\n - Should start with a space or double new line.\n '
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close() |
def _GenerateCategories(apps):
'Generates category string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the category string for each app\n in apps, each populated with the appropriate app-specific strings.\n '
admx_app_category_template = ' <category name="Cat_%(AppLegalId)s"\n displayName="$(string.Cat_%(AppLegalId)s)">\n <parentCategory ref="Cat_Applications" />\n </category>'
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append((admx_app_category_template % {'AppLegalId': _CreateLegalIdentifier(app_name)}))
return (ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}) | 8,737,190,411,212,836,000 | Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings. | omaha/enterprise/generate_group_policy_template_admx.py | _GenerateCategories | huhisoftware/omaha | python | def _GenerateCategories(apps):
'Generates category string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the category string for each app\n in apps, each populated with the appropriate app-specific strings.\n '
admx_app_category_template = ' <category name="Cat_%(AppLegalId)s"\n displayName="$(string.Cat_%(AppLegalId)s)">\n <parentCategory ref="Cat_Applications" />\n </category>'
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append((admx_app_category_template % {'AppLegalId': _CreateLegalIdentifier(app_name)}))
return (ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}) |
def _GeneratePolicies(apps):
'Generates policy string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the policy template for each app\n in apps, each populated with the appropriate app-specific strings.\n '
app_policy_list = []
for app in apps:
(app_name, app_guid, _, _) = app
app_policy_list.append((ADMX_APP_POLICY_TEMPLATE % {'AppLegalId': _CreateLegalIdentifier(app_name), 'AppGuid': app_guid, 'RootPolicyKey': MAIN_POLICY_KEY}))
return (ADMX_POLICIES % {'AppPolicyList': '\n'.join(app_policy_list), 'RootPolicyKey': MAIN_POLICY_KEY}) | 1,275,777,392,437,355,800 | Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings. | omaha/enterprise/generate_group_policy_template_admx.py | _GeneratePolicies | huhisoftware/omaha | python | def _GeneratePolicies(apps):
'Generates policy string for each of the specified apps.\n\n Args:\n apps: list of tuples containing information about the apps.\n\n Returns:\n String containing concatenated copies of the policy template for each app\n in apps, each populated with the appropriate app-specific strings.\n '
app_policy_list = []
for app in apps:
(app_name, app_guid, _, _) = app
app_policy_list.append((ADMX_APP_POLICY_TEMPLATE % {'AppLegalId': _CreateLegalIdentifier(app_name), 'AppGuid': app_guid, 'RootPolicyKey': MAIN_POLICY_KEY}))
return (ADMX_POLICIES % {'AppPolicyList': '\n'.join(app_policy_list), 'RootPolicyKey': MAIN_POLICY_KEY}) |
def ComputeDeriv(self):
'Compute derivative w.r.t input given derivative w.r.t output.'
self.deriv.apply_logistic_deriv(self.state) | -3,191,914,412,202,420,700 | Compute derivative w.r.t input given derivative w.r.t output. | package/deepnet/logistic_layer.py | ComputeDeriv | Corvalius/deepnet | python | def ComputeDeriv(self):
self.deriv.apply_logistic_deriv(self.state) |
def GetLoss(self, get_deriv=False, acc_deriv=False, **kwargs):
'Compute loss and also deriv w.r.t to it if asked for.\n\n Compute the loss function. Targets should be in self.data, predictions\n should be in self.state.\n Args:\n get_deriv: If True, compute the derivative w.r.t the loss function and put\n it in self.deriv.\n '
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
if (self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY):
data = self.data
state = self.state
temp1 = self.statesize
cm.cross_entropy_bernoulli(data, state, target=temp1, tiny=self.tiny)
perf.cross_entropy = temp1.sum()
cm.correct_preds(data, state, target=temp1, cutoff=0.5)
perf.correct_preds = temp1.sum()
if get_deriv:
self.state.subtract(self.data, target=self.deriv)
elif (self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS):
target = self.statesize
self.state.subtract(self.data, target=target)
error = (target.euclid_norm() ** 2)
perf.error = error
if acc_deriv:
self.deriv.add_mult(target, alpha=self.loss_weight)
else:
self.deriv.assign(target)
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for logistic units.')
return perf | 6,746,405,064,523,228,000 | Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv. | package/deepnet/logistic_layer.py | GetLoss | Corvalius/deepnet | python | def GetLoss(self, get_deriv=False, acc_deriv=False, **kwargs):
'Compute loss and also deriv w.r.t to it if asked for.\n\n Compute the loss function. Targets should be in self.data, predictions\n should be in self.state.\n Args:\n get_deriv: If True, compute the derivative w.r.t the loss function and put\n it in self.deriv.\n '
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
if (self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY):
data = self.data
state = self.state
temp1 = self.statesize
cm.cross_entropy_bernoulli(data, state, target=temp1, tiny=self.tiny)
perf.cross_entropy = temp1.sum()
cm.correct_preds(data, state, target=temp1, cutoff=0.5)
perf.correct_preds = temp1.sum()
if get_deriv:
self.state.subtract(self.data, target=self.deriv)
elif (self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS):
target = self.statesize
self.state.subtract(self.data, target=target)
error = (target.euclid_norm() ** 2)
perf.error = error
if acc_deriv:
self.deriv.add_mult(target, alpha=self.loss_weight)
else:
self.deriv.assign(target)
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for logistic units.')
return perf |
def _CreateServiceRestriction(restriction_message_type, mask_prefix, enable_restriction, allowed_services):
'Returns a service restriction message and its update mask.'
if ((allowed_services is None) and (enable_restriction is None)):
return (None, [])
message = restriction_message_type()
update_mask = []
if (allowed_services is not None):
message.allowedServices = allowed_services
update_mask.append('allowedServices')
if (enable_restriction is not None):
message.enableRestriction = enable_restriction
update_mask.append('enableRestriction')
return (message, ['{}.{}'.format(mask_prefix, item) for item in update_mask]) | -358,618,633,509,207,100 | Returns a service restriction message and its update mask. | gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/accesscontextmanager/zones.py | _CreateServiceRestriction | bopopescu/JobSniperRails | python | def _CreateServiceRestriction(restriction_message_type, mask_prefix, enable_restriction, allowed_services):
if ((allowed_services is None) and (enable_restriction is None)):
return (None, [])
message = restriction_message_type()
update_mask = []
if (allowed_services is not None):
message.allowedServices = allowed_services
update_mask.append('allowedServices')
if (enable_restriction is not None):
message.enableRestriction = enable_restriction
update_mask.append('enableRestriction')
return (message, ['{}.{}'.format(mask_prefix, item) for item in update_mask]) |
def _CreateServicePerimeterConfig(messages, mask_prefix, include_unrestricted_services, resources, restricted_services, unrestricted_services, levels, ingress_allowed_services, vpc_allowed_services, bridge_allowed_services, enable_ingress_service_restriction, enable_vpc_service_restriction, enable_bridge_service_restriction):
'Returns a ServicePerimeterConfig and its update mask.'
config = messages.ServicePerimeterConfig()
mask = []
if (resources is not None):
mask.append('resources')
config.resources = resources
if (include_unrestricted_services and (unrestricted_services is not None)):
mask.append('unrestrictedServices')
config.unrestrictedServices = unrestricted_services
if (restricted_services is not None):
mask.append('restrictedServices')
config.restrictedServices = restricted_services
if (levels is not None):
mask.append('accessLevels')
config.accessLevels = [l.RelativeName() for l in levels]
if ((enable_ingress_service_restriction is not None) or (ingress_allowed_services is not None)):
(config.ingressServiceRestriction, mask_updates) = _CreateServiceRestriction(messages.IngressServiceRestriction, 'ingressServiceRestriction', enable_restriction=enable_ingress_service_restriction, allowed_services=ingress_allowed_services)
mask += mask_updates
if ((enable_vpc_service_restriction is not None) or (vpc_allowed_services is not None)):
(config.vpcServiceRestriction, mask_updates) = _CreateServiceRestriction(messages.VpcServiceRestriction, 'vpcServiceRestriction', enable_restriction=enable_vpc_service_restriction, allowed_services=vpc_allowed_services)
mask += mask_updates
if ((enable_bridge_service_restriction is not None) or (bridge_allowed_services is not None)):
(config.bridgeServiceRestriction, mask_updates) = _CreateServiceRestriction(messages.BridgeServiceRestriction, 'bridgeServiceRestriction', enable_restriction=enable_bridge_service_restriction, allowed_services=bridge_allowed_services)
mask += mask_updates
if (not mask):
return (None, [])
return (config, ['{}.{}'.format(mask_prefix, item) for item in mask]) | -1,922,617,703,878,170,400 | Returns a ServicePerimeterConfig and its update mask. | gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/accesscontextmanager/zones.py | _CreateServicePerimeterConfig | bopopescu/JobSniperRails | python | def _CreateServicePerimeterConfig(messages, mask_prefix, include_unrestricted_services, resources, restricted_services, unrestricted_services, levels, ingress_allowed_services, vpc_allowed_services, bridge_allowed_services, enable_ingress_service_restriction, enable_vpc_service_restriction, enable_bridge_service_restriction):
config = messages.ServicePerimeterConfig()
mask = []
if (resources is not None):
mask.append('resources')
config.resources = resources
if (include_unrestricted_services and (unrestricted_services is not None)):
mask.append('unrestrictedServices')
config.unrestrictedServices = unrestricted_services
if (restricted_services is not None):
mask.append('restrictedServices')
config.restrictedServices = restricted_services
if (levels is not None):
mask.append('accessLevels')
config.accessLevels = [l.RelativeName() for l in levels]
if ((enable_ingress_service_restriction is not None) or (ingress_allowed_services is not None)):
(config.ingressServiceRestriction, mask_updates) = _CreateServiceRestriction(messages.IngressServiceRestriction, 'ingressServiceRestriction', enable_restriction=enable_ingress_service_restriction, allowed_services=ingress_allowed_services)
mask += mask_updates
if ((enable_vpc_service_restriction is not None) or (vpc_allowed_services is not None)):
(config.vpcServiceRestriction, mask_updates) = _CreateServiceRestriction(messages.VpcServiceRestriction, 'vpcServiceRestriction', enable_restriction=enable_vpc_service_restriction, allowed_services=vpc_allowed_services)
mask += mask_updates
if ((enable_bridge_service_restriction is not None) or (bridge_allowed_services is not None)):
(config.bridgeServiceRestriction, mask_updates) = _CreateServiceRestriction(messages.BridgeServiceRestriction, 'bridgeServiceRestriction', enable_restriction=enable_bridge_service_restriction, allowed_services=bridge_allowed_services)
mask += mask_updates
if (not mask):
return (None, [])
return (config, ['{}.{}'.format(mask_prefix, item) for item in mask]) |
def Patch(self, perimeter_ref, description=None, title=None, perimeter_type=None, resources=None, restricted_services=None, unrestricted_services=None, levels=None, ingress_allowed_services=None, vpc_allowed_services=None, bridge_allowed_services=None, enable_ingress_service_restriction=None, enable_vpc_service_restriction=None, enable_bridge_service_restriction=None, apply_to_dry_run_config=False, clear_dry_run=False):
"Patch a service perimeter.\n\n Args:\n perimeter_ref: resources.Resource, reference to the perimeter to patch\n description: str, description of the zone or None if not updating\n title: str, title of the zone or None if not updating\n perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level\n or None if not updating\n resources: list of str, the names of resources (for now, just\n 'projects/...') in the zone or None if not updating.\n restricted_services: list of str, the names of services\n ('example.googleapis.com') that *are* restricted by the access zone or\n None if not updating.\n unrestricted_services: list of str, the names of services\n ('example.googleapis.com') that *are not* restricted by the access zone\n or None if not updating.\n levels: list of Resource, the access levels (in the same policy) that must\n be satisfied for calls into this zone or None if not updating.\n ingress_allowed_services: list of str, the names of services\n ('example.googleapis.com') that *are* allowed to use Access Levels to\n make a cross access zone boundary call, or None if not updating.\n vpc_allowed_services: list of str, the names of services\n ('example.googleapis.com') that *are* allowed to be made within the\n access zone, or None if not updating.\n bridge_allowed_services: list of str, the names of services\n ('example.googleapis.com') that *are* allowed to use the bridge access\n zone, or None if not updating.\n enable_ingress_service_restriction: bool, whether to restrict the set of\n APIs callable outside the access zone via Access Levels, or None if not\n updating.\n enable_vpc_service_restriction: bool, whether to restrict the set of APIs\n callable within the access zone, or None if not updating.\n enable_bridge_service_restriction: bool, whether to restrict the set of\n APIs callable using the bridge access zone, or None if not updating.\n apply_to_dry_run_config: When true, the configuration will be place in the\n 'spec' field instead of the 'status' field of the Service Perimeter.\n clear_dry_run: When true, the ServicePerimeterConfig field for dry-run\n (i.e. 'spec') will be cleared and dryRun will be set to False.\n\n Returns:\n ServicePerimeter, the updated Service Perimeter.\n "
m = self.messages
perimeter = m.ServicePerimeter()
update_mask = []
if (description is not None):
update_mask.append('description')
perimeter.description = description
if (title is not None):
update_mask.append('title')
perimeter.title = title
if (perimeter_type is not None):
update_mask.append('perimeterType')
perimeter.perimeterType = perimeter_type
if (not clear_dry_run):
mask_prefix = ('status' if (not apply_to_dry_run_config) else 'spec')
(config, config_mask_additions) = _CreateServicePerimeterConfig(m, mask_prefix, self.include_unrestricted_services, resources, restricted_services, unrestricted_services, levels, ingress_allowed_services, vpc_allowed_services, bridge_allowed_services, enable_ingress_service_restriction, enable_vpc_service_restriction, enable_bridge_service_restriction)
if (not apply_to_dry_run_config):
perimeter.status = config
else:
perimeter.dryRun = True
perimeter.spec = config
update_mask += config_mask_additions
if (apply_to_dry_run_config and config_mask_additions):
update_mask.append('dryRun')
else:
update_mask.append('spec')
update_mask.append('dryRun')
perimeter.spec = None
perimeter.dryRun = False
update_mask.sort()
if (not update_mask):
log.warning('The update specified results in an identical resource. Skipping request.')
return perimeter
request_type = m.AccesscontextmanagerAccessPoliciesServicePerimetersPatchRequest
request = request_type(servicePerimeter=perimeter, name=perimeter_ref.RelativeName(), updateMask=','.join(update_mask))
operation = self.client.accessPolicies_servicePerimeters.Patch(request)
poller = util.OperationPoller(self.client.accessPolicies_servicePerimeters, self.client.operations, perimeter_ref)
operation_ref = core_resources.REGISTRY.Parse(operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(poller, operation_ref, 'Waiting for PATCH operation [{}]'.format(operation_ref.Name())) | 7,694,572,383,652,828,000 | Patch a service perimeter.
Args:
perimeter_ref: resources.Resource, reference to the perimeter to patch
description: str, description of the zone or None if not updating
title: str, title of the zone or None if not updating
perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level
or None if not updating
resources: list of str, the names of resources (for now, just
'projects/...') in the zone or None if not updating.
restricted_services: list of str, the names of services
('example.googleapis.com') that *are* restricted by the access zone or
None if not updating.
unrestricted_services: list of str, the names of services
('example.googleapis.com') that *are not* restricted by the access zone
or None if not updating.
levels: list of Resource, the access levels (in the same policy) that must
be satisfied for calls into this zone or None if not updating.
ingress_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to use Access Levels to
make a cross access zone boundary call, or None if not updating.
vpc_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to be made within the
access zone, or None if not updating.
bridge_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to use the bridge access
zone, or None if not updating.
enable_ingress_service_restriction: bool, whether to restrict the set of
APIs callable outside the access zone via Access Levels, or None if not
updating.
enable_vpc_service_restriction: bool, whether to restrict the set of APIs
callable within the access zone, or None if not updating.
enable_bridge_service_restriction: bool, whether to restrict the set of
APIs callable using the bridge access zone, or None if not updating.
apply_to_dry_run_config: When true, the configuration will be place in the
'spec' field instead of the 'status' field of the Service Perimeter.
clear_dry_run: When true, the ServicePerimeterConfig field for dry-run
(i.e. 'spec') will be cleared and dryRun will be set to False.
Returns:
ServicePerimeter, the updated Service Perimeter. | gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/accesscontextmanager/zones.py | Patch | bopopescu/JobSniperRails | python | def Patch(self, perimeter_ref, description=None, title=None, perimeter_type=None, resources=None, restricted_services=None, unrestricted_services=None, levels=None, ingress_allowed_services=None, vpc_allowed_services=None, bridge_allowed_services=None, enable_ingress_service_restriction=None, enable_vpc_service_restriction=None, enable_bridge_service_restriction=None, apply_to_dry_run_config=False, clear_dry_run=False):
"Patch a service perimeter.\n\n Args:\n perimeter_ref: resources.Resource, reference to the perimeter to patch\n description: str, description of the zone or None if not updating\n title: str, title of the zone or None if not updating\n perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level\n or None if not updating\n resources: list of str, the names of resources (for now, just\n 'projects/...') in the zone or None if not updating.\n restricted_services: list of str, the names of services\n ('example.googleapis.com') that *are* restricted by the access zone or\n None if not updating.\n unrestricted_services: list of str, the names of services\n ('example.googleapis.com') that *are not* restricted by the access zone\n or None if not updating.\n levels: list of Resource, the access levels (in the same policy) that must\n be satisfied for calls into this zone or None if not updating.\n ingress_allowed_services: list of str, the names of services\n ('example.googleapis.com') that *are* allowed to use Access Levels to\n make a cross access zone boundary call, or None if not updating.\n vpc_allowed_services: list of str, the names of services\n ('example.googleapis.com') that *are* allowed to be made within the\n access zone, or None if not updating.\n bridge_allowed_services: list of str, the names of services\n ('example.googleapis.com') that *are* allowed to use the bridge access\n zone, or None if not updating.\n enable_ingress_service_restriction: bool, whether to restrict the set of\n APIs callable outside the access zone via Access Levels, or None if not\n updating.\n enable_vpc_service_restriction: bool, whether to restrict the set of APIs\n callable within the access zone, or None if not updating.\n enable_bridge_service_restriction: bool, whether to restrict the set of\n APIs callable using the bridge access zone, or None if not updating.\n apply_to_dry_run_config: When true, the configuration will be place in the\n 'spec' field instead of the 'status' field of the Service Perimeter.\n clear_dry_run: When true, the ServicePerimeterConfig field for dry-run\n (i.e. 'spec') will be cleared and dryRun will be set to False.\n\n Returns:\n ServicePerimeter, the updated Service Perimeter.\n "
m = self.messages
perimeter = m.ServicePerimeter()
update_mask = []
if (description is not None):
update_mask.append('description')
perimeter.description = description
if (title is not None):
update_mask.append('title')
perimeter.title = title
if (perimeter_type is not None):
update_mask.append('perimeterType')
perimeter.perimeterType = perimeter_type
if (not clear_dry_run):
mask_prefix = ('status' if (not apply_to_dry_run_config) else 'spec')
(config, config_mask_additions) = _CreateServicePerimeterConfig(m, mask_prefix, self.include_unrestricted_services, resources, restricted_services, unrestricted_services, levels, ingress_allowed_services, vpc_allowed_services, bridge_allowed_services, enable_ingress_service_restriction, enable_vpc_service_restriction, enable_bridge_service_restriction)
if (not apply_to_dry_run_config):
perimeter.status = config
else:
perimeter.dryRun = True
perimeter.spec = config
update_mask += config_mask_additions
if (apply_to_dry_run_config and config_mask_additions):
update_mask.append('dryRun')
else:
update_mask.append('spec')
update_mask.append('dryRun')
perimeter.spec = None
perimeter.dryRun = False
update_mask.sort()
if (not update_mask):
log.warning('The update specified results in an identical resource. Skipping request.')
return perimeter
request_type = m.AccesscontextmanagerAccessPoliciesServicePerimetersPatchRequest
request = request_type(servicePerimeter=perimeter, name=perimeter_ref.RelativeName(), updateMask=','.join(update_mask))
operation = self.client.accessPolicies_servicePerimeters.Patch(request)
poller = util.OperationPoller(self.client.accessPolicies_servicePerimeters, self.client.operations, perimeter_ref)
operation_ref = core_resources.REGISTRY.Parse(operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(poller, operation_ref, 'Waiting for PATCH operation [{}]'.format(operation_ref.Name())) |
def create(self, validated_data):
'Create a new user with validated password and return it'
return get_user_model().objects.create_user(**validated_data) | 8,558,868,365,978,276,000 | Create a new user with validated password and return it | xojbackend/app/user/serializers.py | create | mazharkafi004/XOJ | python | def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data) |
def update(self, instance, validated_data):
'update user data with encrypted password'
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user | 3,703,274,701,840,843,000 | update user data with encrypted password | xojbackend/app/user/serializers.py | update | mazharkafi004/XOJ | python | def update(self, instance, validated_data):
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user |
def validate(self, attrs):
'validate and authenticate the user'
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(request=self.context.get('request'), username=email, password=password)
if (not user):
msg = _('Unable to authenticate with the provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs | -7,865,784,348,675,763,000 | validate and authenticate the user | xojbackend/app/user/serializers.py | validate | mazharkafi004/XOJ | python | def validate(self, attrs):
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(request=self.context.get('request'), username=email, password=password)
if (not user):
msg = _('Unable to authenticate with the provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs |
def create_or_update(self, resource_group_name, availability_set_name, parameters, **kwargs):
'Create or update an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :param parameters: Parameters supplied to the Create Availability Set operation.\n :type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AvailabilitySet, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
content_type = kwargs.pop('content_type', 'application/json')
url = self.create_or_update.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'AvailabilitySet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | -1,283,394,762,818,203,600 | Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | create_or_update | 00Kai0/azure-sdk-for-python | python | def create_or_update(self, resource_group_name, availability_set_name, parameters, **kwargs):
'Create or update an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :param parameters: Parameters supplied to the Create Availability Set operation.\n :type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AvailabilitySet, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
content_type = kwargs.pop('content_type', 'application/json')
url = self.create_or_update.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'AvailabilitySet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
def update(self, resource_group_name, availability_set_name, parameters, **kwargs):
'Update an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :param parameters: Parameters supplied to the Update Availability Set operation.\n :type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetUpdate\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AvailabilitySet, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
content_type = kwargs.pop('content_type', 'application/json')
url = self.update.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'AvailabilitySetUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | 459,799,859,112,720,300 | Update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | update | 00Kai0/azure-sdk-for-python | python | def update(self, resource_group_name, availability_set_name, parameters, **kwargs):
'Update an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :param parameters: Parameters supplied to the Update Availability Set operation.\n :type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetUpdate\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AvailabilitySet, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
content_type = kwargs.pop('content_type', 'application/json')
url = self.update.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'AvailabilitySetUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
def delete(self, resource_group_name, availability_set_name, **kwargs):
'Delete an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
url = self.delete.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {}) | 923,248,023,635,578,900 | Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | delete | 00Kai0/azure-sdk-for-python | python | def delete(self, resource_group_name, availability_set_name, **kwargs):
'Delete an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
url = self.delete.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {}) |
def get(self, resource_group_name, availability_set_name, **kwargs):
'Retrieves information about an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AvailabilitySet, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | 3,359,067,744,655,367,700 | Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | get | 00Kai0/azure-sdk-for-python | python | def get(self, resource_group_name, availability_set_name, **kwargs):
'Retrieves information about an availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AvailabilitySet, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
def list_by_subscription(self, expand=None, **kwargs):
'Lists all availability sets in a subscription.\n\n :param expand: The expand expression to apply to the operation.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
def prepare_request(next_link=None):
if (not next_link):
url = self.list_by_subscription.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailabilitySetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | -4,662,806,663,185,924,000 | Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | list_by_subscription | 00Kai0/azure-sdk-for-python | python | def list_by_subscription(self, expand=None, **kwargs):
'Lists all availability sets in a subscription.\n\n :param expand: The expand expression to apply to the operation.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
def prepare_request(next_link=None):
if (not next_link):
url = self.list_by_subscription.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailabilitySetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) |
def list(self, resource_group_name, **kwargs):
'Lists all availability sets in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
def prepare_request(next_link=None):
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailabilitySetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | 8,732,512,862,638,445,000 | Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | list | 00Kai0/azure-sdk-for-python | python | def list(self, resource_group_name, **kwargs):
'Lists all availability sets in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
def prepare_request(next_link=None):
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailabilitySetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) |
def list_available_sizes(self, resource_group_name, availability_set_name, **kwargs):
'Lists all available virtual machine sizes that can be used to create a new virtual machine in\n an existing availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineSizeListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
def prepare_request(next_link=None):
if (not next_link):
url = self.list_available_sizes.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineSizeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return (None, iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | 2,787,570,141,898,369,000 | Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | list_available_sizes | 00Kai0/azure-sdk-for-python | python | def list_available_sizes(self, resource_group_name, availability_set_name, **kwargs):
'Lists all available virtual machine sizes that can be used to create a new virtual machine in\n an existing availability set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param availability_set_name: The name of the availability set.\n :type availability_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineSizeListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-10-01'
def prepare_request(next_link=None):
if (not next_link):
url = self.list_available_sizes.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'availabilitySetName': self._serialize.url('availability_set_name', availability_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineSizeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return (None, iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) |
@oneflow_export('tensor_buffer_to_tensor')
@stable_api
def tensor_buffer_to_tensor(x: oneflow._oneflow_internal.BlobDesc, dtype: flow.dtype, instance_shape: Sequence[int], name: Optional[str]=None) -> oneflow._oneflow_internal.BlobDesc:
"This operator converts the Blob's type from TensorBuffer to Tensor.\n Some operator's output data type is `TensorBuffer`, you can use this operator to convert back\n to `Tensor`.\n\n Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_\n for more about TensorBuffer.\n\n\n Args:\n x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.\n dtype (flow.dtype): The data dtype.\n instance_shape (Sequence[int]): The shape of each TensorBuffer instance.\n name (Optional[str], optional): The name for the operation. Defaults to None.\n\n Returns:\n oneflow._oneflow_internal.BlobDesc: A `Blob`.\n\n For example:\n\n .. code-block:: python\n\n import oneflow.compatible.single_client as flow\n import numpy as np\n import oneflow.compatible.single_client.typing as tp\n\n\n @flow.global_function()\n def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),\n ) -> tp.Numpy:\n x = flow.tensor_to_tensor_buffer(x,\n instance_dims=2)\n return flow.tensor_buffer_to_tensor(x,\n instance_shape=(64, 64),\n dtype=flow.float)\n\n x = np.random.randn(4, 16, 64, 64).astype(np.float32)\n out = tensor_buffer_to_tensor_Job(x)\n\n # out.shape (4, 16, 64, 64)\n\n "
if (name is None):
name = id_util.UniqueStr('TensorBufferToTensor_')
return flow.user_op_builder(name).Op('tensor_buffer_to_tensor').Input('in', [x]).Output('out').Attr('dtype', dtype).Attr('instance_shape', instance_shape).Build().InferAndTryRun().RemoteBlobList()[0] | 8,285,458,281,549,214,000 | This operator converts the Blob's type from TensorBuffer to Tensor.
Some operator's output data type is `TensorBuffer`, you can use this operator to convert back
to `Tensor`.
Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_
for more about TensorBuffer.
Args:
x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
dtype (flow.dtype): The data dtype.
instance_shape (Sequence[int]): The shape of each TensorBuffer instance.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob`.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),
) -> tp.Numpy:
x = flow.tensor_to_tensor_buffer(x,
instance_dims=2)
return flow.tensor_buffer_to_tensor(x,
instance_shape=(64, 64),
dtype=flow.float)
x = np.random.randn(4, 16, 64, 64).astype(np.float32)
out = tensor_buffer_to_tensor_Job(x)
# out.shape (4, 16, 64, 64) | oneflow/compatible_single_client_python/ops/tensor_buffer_ops.py | tensor_buffer_to_tensor | xcnick/oneflow | python | @oneflow_export('tensor_buffer_to_tensor')
@stable_api
def tensor_buffer_to_tensor(x: oneflow._oneflow_internal.BlobDesc, dtype: flow.dtype, instance_shape: Sequence[int], name: Optional[str]=None) -> oneflow._oneflow_internal.BlobDesc:
"This operator converts the Blob's type from TensorBuffer to Tensor.\n Some operator's output data type is `TensorBuffer`, you can use this operator to convert back\n to `Tensor`.\n\n Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_\n for more about TensorBuffer.\n\n\n Args:\n x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.\n dtype (flow.dtype): The data dtype.\n instance_shape (Sequence[int]): The shape of each TensorBuffer instance.\n name (Optional[str], optional): The name for the operation. Defaults to None.\n\n Returns:\n oneflow._oneflow_internal.BlobDesc: A `Blob`.\n\n For example:\n\n .. code-block:: python\n\n import oneflow.compatible.single_client as flow\n import numpy as np\n import oneflow.compatible.single_client.typing as tp\n\n\n @flow.global_function()\n def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),\n ) -> tp.Numpy:\n x = flow.tensor_to_tensor_buffer(x,\n instance_dims=2)\n return flow.tensor_buffer_to_tensor(x,\n instance_shape=(64, 64),\n dtype=flow.float)\n\n x = np.random.randn(4, 16, 64, 64).astype(np.float32)\n out = tensor_buffer_to_tensor_Job(x)\n\n # out.shape (4, 16, 64, 64)\n\n "
if (name is None):
name = id_util.UniqueStr('TensorBufferToTensor_')
return flow.user_op_builder(name).Op('tensor_buffer_to_tensor').Input('in', [x]).Output('out').Attr('dtype', dtype).Attr('instance_shape', instance_shape).Build().InferAndTryRun().RemoteBlobList()[0] |
@oneflow_export('tensor_to_tensor_buffer')
@stable_api
def tensor_to_tensor_buffer(x: oneflow._oneflow_internal.BlobDesc, instance_dims: int, name: Optional[str]=None) -> oneflow._oneflow_internal.BlobDesc:
"This operator converts the Blob's type from Tensor to TensorBuffer.\n\n Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_\n for more about TensorBuffer.\n\n\n Args:\n x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.\n instance_dims (int): The dimensions of dynamic tensor instance.\n name (Optional[str], optional): The name for the operation. Defaults to None.\n\n Returns:\n oneflow._oneflow_internal.BlobDesc: The result Blob.\n\n For example:\n\n .. code-block:: python\n\n import oneflow.compatible.single_client as flow\n import numpy as np\n import oneflow.compatible.single_client.typing as tp\n\n\n @flow.global_function()\n def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),\n ) -> tp.Numpy:\n x = flow.tensor_to_tensor_buffer(x,\n instance_dims=2)\n return flow.tensor_buffer_to_tensor(x,\n instance_shape=(64, 64),\n dtype=flow.float)\n\n x = np.random.randn(4, 16, 64, 64).astype(np.float32)\n out = tensor_buffer_to_tensor_Job(x)\n\n # out.shape (4, 16, 64, 64)\n\n "
if (name is None):
name = id_util.UniqueStr('TensorToTensorBuffer_')
return flow.user_op_builder(name).Op('tensor_to_tensor_buffer').Input('in', [x]).Output('out').Attr('instance_dims', instance_dims).Build().InferAndTryRun().RemoteBlobList()[0] | 2,090,306,197,877,062,400 | This operator converts the Blob's type from Tensor to TensorBuffer.
Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_
for more about TensorBuffer.
Args:
x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
instance_dims (int): The dimensions of dynamic tensor instance.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),
) -> tp.Numpy:
x = flow.tensor_to_tensor_buffer(x,
instance_dims=2)
return flow.tensor_buffer_to_tensor(x,
instance_shape=(64, 64),
dtype=flow.float)
x = np.random.randn(4, 16, 64, 64).astype(np.float32)
out = tensor_buffer_to_tensor_Job(x)
# out.shape (4, 16, 64, 64) | oneflow/compatible_single_client_python/ops/tensor_buffer_ops.py | tensor_to_tensor_buffer | xcnick/oneflow | python | @oneflow_export('tensor_to_tensor_buffer')
@stable_api
def tensor_to_tensor_buffer(x: oneflow._oneflow_internal.BlobDesc, instance_dims: int, name: Optional[str]=None) -> oneflow._oneflow_internal.BlobDesc:
"This operator converts the Blob's type from Tensor to TensorBuffer.\n\n Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_\n for more about TensorBuffer.\n\n\n Args:\n x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.\n instance_dims (int): The dimensions of dynamic tensor instance.\n name (Optional[str], optional): The name for the operation. Defaults to None.\n\n Returns:\n oneflow._oneflow_internal.BlobDesc: The result Blob.\n\n For example:\n\n .. code-block:: python\n\n import oneflow.compatible.single_client as flow\n import numpy as np\n import oneflow.compatible.single_client.typing as tp\n\n\n @flow.global_function()\n def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),\n ) -> tp.Numpy:\n x = flow.tensor_to_tensor_buffer(x,\n instance_dims=2)\n return flow.tensor_buffer_to_tensor(x,\n instance_shape=(64, 64),\n dtype=flow.float)\n\n x = np.random.randn(4, 16, 64, 64).astype(np.float32)\n out = tensor_buffer_to_tensor_Job(x)\n\n # out.shape (4, 16, 64, 64)\n\n "
if (name is None):
name = id_util.UniqueStr('TensorToTensorBuffer_')
return flow.user_op_builder(name).Op('tensor_to_tensor_buffer').Input('in', [x]).Output('out').Attr('instance_dims', instance_dims).Build().InferAndTryRun().RemoteBlobList()[0] |
@oneflow_export('gen_tensor_buffer')
@stable_api
def gen_tensor_buffer(shape: Sequence[int], shape_list: Sequence[Sequence[int]], value_list: Sequence[float], data_type: Optional[flow.dtype]=flow.float32, dynamic_out: Optional[bool]=False, name: Optional[str]=None) -> oneflow._oneflow_internal.BlobDesc:
'This operator generates a tensor buffer blob.\n\n Args:\n shape (Sequence[int]): shape of output blob\n shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob\n value_list (Sequence[float]): values for tensor buffer in output blob\n data_type (Optional[flow.dtype]): data type for tensor buffer in output blob\n dynamic_out (Optional[bool]): if output is a dynamic blob\n name (Optional[str]): The name for the operation. Defaults to None.\n\n Returns:\n BlobDesc: The result Blob.\n\n For example:\n\n .. code-block:: python\n\n import oneflow.compatible.single_client as flow\n\n @flow.global_function(function_config=func_config)\n def GenTensorBufferJob():\n with flow.scope.placement("cpu", "0:0"):\n x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0])\n y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True)\n return y\n\n # y_0.shape (2, 1), y_1.shape (1. 2)\n\n '
return flow.user_op_builder((name if (name is not None) else id_util.UniqueStr('GenTensorBuffer_'))).Op('gen_tensor_buffer').Output('out').Attr('shape', shape).Attr('shape_list', shape_list).Attr('value_list', value_list).Attr('data_type', data_type).Attr('dynamic_out', dynamic_out).Build().InferAndTryRun().RemoteBlobList()[0] | -6,369,745,597,871,519,000 | This operator generates a tensor buffer blob.
Args:
shape (Sequence[int]): shape of output blob
shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob
value_list (Sequence[float]): values for tensor buffer in output blob
data_type (Optional[flow.dtype]): data type for tensor buffer in output blob
dynamic_out (Optional[bool]): if output is a dynamic blob
name (Optional[str]): The name for the operation. Defaults to None.
Returns:
BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
@flow.global_function(function_config=func_config)
def GenTensorBufferJob():
with flow.scope.placement("cpu", "0:0"):
x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0])
y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True)
return y
# y_0.shape (2, 1), y_1.shape (1. 2) | oneflow/compatible_single_client_python/ops/tensor_buffer_ops.py | gen_tensor_buffer | xcnick/oneflow | python | @oneflow_export('gen_tensor_buffer')
@stable_api
def gen_tensor_buffer(shape: Sequence[int], shape_list: Sequence[Sequence[int]], value_list: Sequence[float], data_type: Optional[flow.dtype]=flow.float32, dynamic_out: Optional[bool]=False, name: Optional[str]=None) -> oneflow._oneflow_internal.BlobDesc:
'This operator generates a tensor buffer blob.\n\n Args:\n shape (Sequence[int]): shape of output blob\n shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob\n value_list (Sequence[float]): values for tensor buffer in output blob\n data_type (Optional[flow.dtype]): data type for tensor buffer in output blob\n dynamic_out (Optional[bool]): if output is a dynamic blob\n name (Optional[str]): The name for the operation. Defaults to None.\n\n Returns:\n BlobDesc: The result Blob.\n\n For example:\n\n .. code-block:: python\n\n import oneflow.compatible.single_client as flow\n\n @flow.global_function(function_config=func_config)\n def GenTensorBufferJob():\n with flow.scope.placement("cpu", "0:0"):\n x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0])\n y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True)\n return y\n\n # y_0.shape (2, 1), y_1.shape (1. 2)\n\n '
return flow.user_op_builder((name if (name is not None) else id_util.UniqueStr('GenTensorBuffer_'))).Op('gen_tensor_buffer').Output('out').Attr('shape', shape).Attr('shape_list', shape_list).Attr('value_list', value_list).Attr('data_type', data_type).Attr('dynamic_out', dynamic_out).Build().InferAndTryRun().RemoteBlobList()[0] |
@oneflow_export('tensor_buffer_to_list_of_tensors')
@stable_api
def tensor_buffer_to_list_of_tensors(x: oneflow._oneflow_internal.BlobDesc, out_shape: Sequence[int], out_dtype: flow.dtype, dynamic_out: Optional[bool]=False, name: Optional[str]=None) -> List[oneflow._oneflow_internal.BlobDesc]:
'This operator converts the Blob of TensorBuffer to list of Tensors. Every element in x will be converted\n to a Tensor and output will be flatten to a list.\n\n Args:\n x (BlobDesc): Input `Blob`, data type must be tensor buffer.\n out_shape (Sequence[int]): max shape for a tensor buffer in x\n out_dtype (flow.dtype,): output data type\n dynamic_out (Optional[bool]): if output is dynamic blob. Default to False.\n name (Optional[str]): The name for the operation. Default to None.\n\n Returns:\n List[BlobDesc]: result blobs\n\n For example:\n\n .. code-block:: python\n\n # the same with `gen_tensor_buffer` op\n\n '
return flow.user_op_builder((name if (name is not None) else id_util.UniqueStr('TensorBufferToListOfTensors_'))).Op('tensor_buffer_to_list_of_tensors').Input('in', [x]).Output('out', functools.reduce(operator.mul, x.shape, 1)).Attr('out_dtype', out_dtype).Attr('out_shape', out_shape).Attr('dynamic_out', dynamic_out).Build().InferAndTryRun().RemoteBlobList() | 1,525,385,519,540,934,400 | This operator converts the Blob of TensorBuffer to list of Tensors. Every element in x will be converted
to a Tensor and output will be flatten to a list.
Args:
x (BlobDesc): Input `Blob`, data type must be tensor buffer.
out_shape (Sequence[int]): max shape for a tensor buffer in x
out_dtype (flow.dtype,): output data type
dynamic_out (Optional[bool]): if output is dynamic blob. Default to False.
name (Optional[str]): The name for the operation. Default to None.
Returns:
List[BlobDesc]: result blobs
For example:
.. code-block:: python
# the same with `gen_tensor_buffer` op | oneflow/compatible_single_client_python/ops/tensor_buffer_ops.py | tensor_buffer_to_list_of_tensors | xcnick/oneflow | python | @oneflow_export('tensor_buffer_to_list_of_tensors')
@stable_api
def tensor_buffer_to_list_of_tensors(x: oneflow._oneflow_internal.BlobDesc, out_shape: Sequence[int], out_dtype: flow.dtype, dynamic_out: Optional[bool]=False, name: Optional[str]=None) -> List[oneflow._oneflow_internal.BlobDesc]:
'This operator converts the Blob of TensorBuffer to list of Tensors. Every element in x will be converted\n to a Tensor and output will be flatten to a list.\n\n Args:\n x (BlobDesc): Input `Blob`, data type must be tensor buffer.\n out_shape (Sequence[int]): max shape for a tensor buffer in x\n out_dtype (flow.dtype,): output data type\n dynamic_out (Optional[bool]): if output is dynamic blob. Default to False.\n name (Optional[str]): The name for the operation. Default to None.\n\n Returns:\n List[BlobDesc]: result blobs\n\n For example:\n\n .. code-block:: python\n\n # the same with `gen_tensor_buffer` op\n\n '
return flow.user_op_builder((name if (name is not None) else id_util.UniqueStr('TensorBufferToListOfTensors_'))).Op('tensor_buffer_to_list_of_tensors').Input('in', [x]).Output('out', functools.reduce(operator.mul, x.shape, 1)).Attr('out_dtype', out_dtype).Attr('out_shape', out_shape).Attr('dynamic_out', dynamic_out).Build().InferAndTryRun().RemoteBlobList() |
def __init__(self, args):
"[summary]Initialization of Box's parameters\n\n Args:\n args ([numpy array list]): [this argument represents the bounds of the box]\n "
self.bounds = args | 5,110,870,955,732,952,000 | [summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box] | src/lab2/box_window.py | __init__ | AmineAitLemqeddem/sdia-python | python | def __init__(self, args):
"[summary]Initialization of Box's parameters\n\n Args:\n args ([numpy array list]): [this argument represents the bounds of the box]\n "
self.bounds = args |
def __str__(self):
"[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \\cdots`\n\n Returns:\n [str]: [description of the Box's bounds]\n "
shape = self.bounds.shape
representation = 'BoxWindow: '
for i in range((shape[0] - 1)):
representation = ((((((representation + '[') + str(self.bounds[i][0])) + ', ') + str(self.bounds[i][1])) + ']') + ' x ')
representation = (((((representation + '[') + str(self.bounds[(shape[0] - 1)][0])) + ', ') + str(self.bounds[(shape[0] - 1)][1])) + ']')
return representation | -1,319,349,516,868,488,000 | [summary] BoxWindow: :math:`[a_1, b_1] imes [a_2, b_2] imes \cdots`
Returns:
[str]: [description of the Box's bounds] | src/lab2/box_window.py | __str__ | AmineAitLemqeddem/sdia-python | python | def __str__(self):
"[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \\cdots`\n\n Returns:\n [str]: [description of the Box's bounds]\n "
shape = self.bounds.shape
representation = 'BoxWindow: '
for i in range((shape[0] - 1)):
representation = ((((((representation + '[') + str(self.bounds[i][0])) + ', ') + str(self.bounds[i][1])) + ']') + ' x ')
representation = (((((representation + '[') + str(self.bounds[(shape[0] - 1)][0])) + ', ') + str(self.bounds[(shape[0] - 1)][1])) + ']')
return representation |
def __len__(self):
'[summary]\n\n Returns:\n [int: [the dimension of the box]\n '
return self.bounds.shape[0] | -1,972,206,058,471,564,000 | [summary]
Returns:
[int: [the dimension of the box] | src/lab2/box_window.py | __len__ | AmineAitLemqeddem/sdia-python | python | def __len__(self):
'[summary]\n\n Returns:\n [int: [the dimension of the box]\n '
return self.bounds.shape[0] |
def __contains__(self, args):
'[summary]This method tests if an element (args) is inside the box\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the box , False if not]\n '
flag = True
for i in range(self.__len__()):
if ((args[i] >= self.bounds[i][0]) and (args[i] <= self.bounds[i][1])):
flag = True
else:
return False
return flag | 5,534,255,717,289,143,000 | [summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not] | src/lab2/box_window.py | __contains__ | AmineAitLemqeddem/sdia-python | python | def __contains__(self, args):
'[summary]This method tests if an element (args) is inside the box\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the box , False if not]\n '
flag = True
for i in range(self.__len__()):
if ((args[i] >= self.bounds[i][0]) and (args[i] <= self.bounds[i][1])):
flag = True
else:
return False
return flag |
def dimension(self):
'[summary]\n This method is similar to the method __len__ described above\n '
return self.__len__() | -8,704,261,820,415,596,000 | [summary]
This method is similar to the method __len__ described above | src/lab2/box_window.py | dimension | AmineAitLemqeddem/sdia-python | python | def dimension(self):
'[summary]\n This method is similar to the method __len__ described above\n '
return self.__len__() |
def volume(self):
'[summary]\n This method calculates the volume of the Box\n '
v = 1
for i in range(self.dimension()):
v = (v * abs((self.bounds[i][1] - self.bounds[i][0])))
return v | 7,349,744,758,689,294,000 | [summary]
This method calculates the volume of the Box | src/lab2/box_window.py | volume | AmineAitLemqeddem/sdia-python | python | def volume(self):
'[summary]\n This method calculates the volume of the Box\n '
v = 1
for i in range(self.dimension()):
v = (v * abs((self.bounds[i][1] - self.bounds[i][0])))
return v |
def indicator_function(self, args):
'[summary]\n This method is similar to the method __contains__ described above\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the box , False if not]\n '
if self.__contains__(args):
return True
else:
return False | 2,137,572,310,699,039,200 | [summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not] | src/lab2/box_window.py | indicator_function | AmineAitLemqeddem/sdia-python | python | def indicator_function(self, args):
'[summary]\n This method is similar to the method __contains__ described above\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the box , False if not]\n '
if self.__contains__(args):
return True
else:
return False |
def center(self):
'[summary] determinate the center of the box\n\n Returns:\n [numpy array list]: [the center of the box]\n '
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c | -2,764,501,514,137,764,400 | [summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box] | src/lab2/box_window.py | center | AmineAitLemqeddem/sdia-python | python | def center(self):
'[summary] determinate the center of the box\n\n Returns:\n [numpy array list]: [the center of the box]\n '
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c |
def rand(self, n=1, rng=None):
'[summary]\n Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.\n\n Args:\n n (int, optional): [description]. Defaults to 1.\n rng ([type], optional): [description]. Defaults to None.\n\n Returns:\n Randomly n elements that belong to the box\n '
rng = get_random_number_generator(rng)
L = np.ones((n, self.__len__()))
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (((1 - x) * self.bounds[j][0]) + (x * self.bounds[j][1]))
return L | -2,167,868,873,684,034,000 | [summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box | src/lab2/box_window.py | rand | AmineAitLemqeddem/sdia-python | python | def rand(self, n=1, rng=None):
'[summary]\n Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.\n\n Args:\n n (int, optional): [description]. Defaults to 1.\n rng ([type], optional): [description]. Defaults to None.\n\n Returns:\n Randomly n elements that belong to the box\n '
rng = get_random_number_generator(rng)
L = np.ones((n, self.__len__()))
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (((1 - x) * self.bounds[j][0]) + (x * self.bounds[j][1]))
return L |
def __init__(self, center, dimension):
'[summary]a subclass of BoxWindow,represents the notion of "unit square box"\n\n Args:\n dimension ([int]): [dimension of the Unit Box]\n center ([numpy array list], optional): [center of the Box].\n '
self.bounds = np.array([[(center[i] - 0.5), (center[i] + 0.5)] for i in range(dimension)])
super().__init__(self.bounds) | -2,923,598,614,726,070,000 | [summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box]. | src/lab2/box_window.py | __init__ | AmineAitLemqeddem/sdia-python | python | def __init__(self, center, dimension):
'[summary]a subclass of BoxWindow,represents the notion of "unit square box"\n\n Args:\n dimension ([int]): [dimension of the Unit Box]\n center ([numpy array list], optional): [center of the Box].\n '
self.bounds = np.array([[(center[i] - 0.5), (center[i] + 0.5)] for i in range(dimension)])
super().__init__(self.bounds) |
def __init__(self, center, radius, dimension):
"[summary]Initialization of Box's parameters\n\n Args:\n args ([numpy array list]): [this argument represents the bounds of the box]\n "
self.dim = dimension
self.rad = radius
self.cent = center | -8,616,126,886,397,049,000 | [summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box] | src/lab2/box_window.py | __init__ | AmineAitLemqeddem/sdia-python | python | def __init__(self, center, radius, dimension):
"[summary]Initialization of Box's parameters\n\n Args:\n args ([numpy array list]): [this argument represents the bounds of the box]\n "
self.dim = dimension
self.rad = radius
self.cent = center |
def __contains__(self, args):
'[summary]This method tests if an element (args) is inside the ball\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the ball , False if not]\n '
flag = True
if (len(args) != self.dim):
return False
elif (np.linalg.norm((args - self.center)) <= self.rad):
flag = True
return flag | 2,666,685,982,397,654,500 | [summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not] | src/lab2/box_window.py | __contains__ | AmineAitLemqeddem/sdia-python | python | def __contains__(self, args):
'[summary]This method tests if an element (args) is inside the ball\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the ball , False if not]\n '
flag = True
if (len(args) != self.dim):
return False
elif (np.linalg.norm((args - self.center)) <= self.rad):
flag = True
return flag |
def dimension(self):
'[summary]\n This method gives the dimension of the ball\n '
return self.dim | 3,239,927,561,274,666,500 | [summary]
This method gives the dimension of the ball | src/lab2/box_window.py | dimension | AmineAitLemqeddem/sdia-python | python | def dimension(self):
'[summary]\n This method gives the dimension of the ball\n '
return self.dim |
def volume(self):
'[summary]\n This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\\int_{-r}^{r}V_{n}(\\sqrt{r^2 -x^2})dx`\n '
v = 1
for i in range(self.dimension()):
integ = (lambda x: (v * np.sqrt(((self.rad ** 2) - (x ** 2)))))
v = integrate.quad(integ, (- self.rad), self.rad)
return v | 8,720,640,683,625,807,000 | [summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx` | src/lab2/box_window.py | volume | AmineAitLemqeddem/sdia-python | python | def volume(self):
'[summary]\n This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\\int_{-r}^{r}V_{n}(\\sqrt{r^2 -x^2})dx`\n '
v = 1
for i in range(self.dimension()):
integ = (lambda x: (v * np.sqrt(((self.rad ** 2) - (x ** 2)))))
v = integrate.quad(integ, (- self.rad), self.rad)
return v |
def indicator_function(self, args):
'[summary]\n This method is similar to the method __contains__ described above\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the ball , False if not]\n '
if self.__contains__(args):
return True
else:
return False | 4,032,711,684,902,406,700 | [summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not] | src/lab2/box_window.py | indicator_function | AmineAitLemqeddem/sdia-python | python | def indicator_function(self, args):
'[summary]\n This method is similar to the method __contains__ described above\n\n Args:\n args ([numpy array list]): [the element to test]\n\n Returns:\n [bool]: [True if the element is inside the ball , False if not]\n '
if self.__contains__(args):
return True
else:
return False |
def center(self):
'[summary] determinate the center of the ball\n\n Returns:\n [numpy array list]: [the center of the ball]\n '
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c | -3,314,628,522,506,350,000 | [summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball] | src/lab2/box_window.py | center | AmineAitLemqeddem/sdia-python | python | def center(self):
'[summary] determinate the center of the ball\n\n Returns:\n [numpy array list]: [the center of the ball]\n '
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c |
def get_minibatch(roidb, num_classes):
'Given a mini batch of roidb, construct a data blob from it.'
num_images = len(roidb)
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images)
assert ((cfg.TRAIN.BATCH_SIZE % num_images) == 0), 'num_images ({}) must divide BATCH_SIZE ({})'.format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = (cfg.TRAIN.BATCH_SIZE / num_images)
fg_rois_per_image = np.round((cfg.TRAIN.FG_FRACTION * rois_per_image))
im_timer = Timer()
im_timer.tic()
(im_blob, im_scales) = _get_image_blob(roidb, random_scale_inds)
im_timer.toc()
blobs = {'ims': im_blob}
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros(0, dtype=np.float32)
rels_blob = np.zeros((0, 3), dtype=np.int32)
bbox_targets_blob = np.zeros((0, (4 * num_classes)), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
box_idx_offset = 0
d_timer = Timer()
d_timer.tic()
for im_i in xrange(num_images):
(roi_inds, rels) = _sample_graph(roidb[im_i], fg_rois_per_image, rois_per_image, num_neg_rels=cfg.TRAIN.NUM_NEG_RELS)
if (rels.size == 0):
print('batch skipped')
return None
(rels, labels, overlaps, im_rois, bbox_targets, bbox_inside_weights) = _gather_samples(roidb[im_i], roi_inds, rels, num_classes)
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = (im_i * np.ones((rois.shape[0], 1)))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
all_overlaps = np.hstack((all_overlaps, overlaps))
rels_offset = rels.copy()
rels_offset[:, :2] += box_idx_offset
rels_blob = np.vstack([rels_blob, rels_offset])
box_idx_offset += rois.shape[0]
blobs['rois'] = rois_blob.copy()
blobs['labels'] = labels_blob.copy().astype(np.int32)
blobs['relations'] = rels_blob[:, :2].copy().astype(np.int32)
blobs['predicates'] = rels_blob[:, 2].copy().astype(np.int32)
blobs['bbox_targets'] = bbox_targets_blob.copy()
blobs['bbox_inside_weights'] = bbox_inside_blob.copy()
blobs['bbox_outside_weights'] = np.array((bbox_inside_blob > 0)).astype(np.float32).copy()
num_roi = rois_blob.shape[0]
num_rel = rels_blob.shape[0]
blobs['rel_rois'] = data_utils.compute_rel_rois(num_rel, rois_blob, rels_blob)
d_timer.toc()
graph_dict = data_utils.create_graph_data(num_roi, num_rel, rels_blob[:, :2])
for k in graph_dict:
blobs[k] = graph_dict[k]
return blobs | -4,010,111,006,179,366,000 | Given a mini batch of roidb, construct a data blob from it. | lib/roi_data_layer/minibatch.py | get_minibatch | Alex-Sol/scene-graph-TF-release | python | def get_minibatch(roidb, num_classes):
num_images = len(roidb)
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images)
assert ((cfg.TRAIN.BATCH_SIZE % num_images) == 0), 'num_images ({}) must divide BATCH_SIZE ({})'.format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = (cfg.TRAIN.BATCH_SIZE / num_images)
fg_rois_per_image = np.round((cfg.TRAIN.FG_FRACTION * rois_per_image))
im_timer = Timer()
im_timer.tic()
(im_blob, im_scales) = _get_image_blob(roidb, random_scale_inds)
im_timer.toc()
blobs = {'ims': im_blob}
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros(0, dtype=np.float32)
rels_blob = np.zeros((0, 3), dtype=np.int32)
bbox_targets_blob = np.zeros((0, (4 * num_classes)), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
box_idx_offset = 0
d_timer = Timer()
d_timer.tic()
for im_i in xrange(num_images):
(roi_inds, rels) = _sample_graph(roidb[im_i], fg_rois_per_image, rois_per_image, num_neg_rels=cfg.TRAIN.NUM_NEG_RELS)
if (rels.size == 0):
print('batch skipped')
return None
(rels, labels, overlaps, im_rois, bbox_targets, bbox_inside_weights) = _gather_samples(roidb[im_i], roi_inds, rels, num_classes)
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = (im_i * np.ones((rois.shape[0], 1)))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
all_overlaps = np.hstack((all_overlaps, overlaps))
rels_offset = rels.copy()
rels_offset[:, :2] += box_idx_offset
rels_blob = np.vstack([rels_blob, rels_offset])
box_idx_offset += rois.shape[0]
blobs['rois'] = rois_blob.copy()
blobs['labels'] = labels_blob.copy().astype(np.int32)
blobs['relations'] = rels_blob[:, :2].copy().astype(np.int32)
blobs['predicates'] = rels_blob[:, 2].copy().astype(np.int32)
blobs['bbox_targets'] = bbox_targets_blob.copy()
blobs['bbox_inside_weights'] = bbox_inside_blob.copy()
blobs['bbox_outside_weights'] = np.array((bbox_inside_blob > 0)).astype(np.float32).copy()
num_roi = rois_blob.shape[0]
num_rel = rels_blob.shape[0]
blobs['rel_rois'] = data_utils.compute_rel_rois(num_rel, rois_blob, rels_blob)
d_timer.toc()
graph_dict = data_utils.create_graph_data(num_roi, num_rel, rels_blob[:, :2])
for k in graph_dict:
blobs[k] = graph_dict[k]
return blobs |
def _gather_samples(roidb, roi_inds, rels, num_classes):
'\n join all samples and produce sampled items\n '
rois = roidb['boxes']
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where((overlaps < cfg.TRAIN.FG_THRESH))[0]
labels = labels.copy()
labels[bg_inds] = 0
labels = labels[roi_inds]
overlaps = overlaps[roi_inds]
rois = rois[roi_inds]
roi_ind_map = {}
for (i, roi_i) in enumerate(roi_inds):
roi_ind_map[roi_i] = i
for (i, rel) in enumerate(rels):
rels[i] = [roi_ind_map[rel[0]], roi_ind_map[rel[1]], rel[2]]
(bbox_targets, bbox_inside_weights) = _get_bbox_regression_labels(roidb['bbox_targets'][roi_inds, :], num_classes)
return (rels, labels, overlaps, rois, bbox_targets, bbox_inside_weights) | -488,402,704,682,701,800 | join all samples and produce sampled items | lib/roi_data_layer/minibatch.py | _gather_samples | Alex-Sol/scene-graph-TF-release | python | def _gather_samples(roidb, roi_inds, rels, num_classes):
'\n \n '
rois = roidb['boxes']
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where((overlaps < cfg.TRAIN.FG_THRESH))[0]
labels = labels.copy()
labels[bg_inds] = 0
labels = labels[roi_inds]
overlaps = overlaps[roi_inds]
rois = rois[roi_inds]
roi_ind_map = {}
for (i, roi_i) in enumerate(roi_inds):
roi_ind_map[roi_i] = i
for (i, rel) in enumerate(rels):
rels[i] = [roi_ind_map[rel[0]], roi_ind_map[rel[1]], rel[2]]
(bbox_targets, bbox_inside_weights) = _get_bbox_regression_labels(roidb['bbox_targets'][roi_inds, :], num_classes)
return (rels, labels, overlaps, rois, bbox_targets, bbox_inside_weights) |
def _sample_graph(roidb, num_fg_rois, num_rois, num_neg_rels=128):
'\n Sample a graph from the foreground rois of an image\n\n roidb: roidb of an image\n rois_per_image: maximum number of rois per image\n '
gt_rels = roidb['gt_relations']
fg_gt_ind_assignments = roidb['fg_gt_ind_assignments']
gt_to_fg_roi_inds = {}
all_fg_roi_inds = []
for (ind, gt_ind) in fg_gt_ind_assignments.items():
if (gt_ind not in gt_to_fg_roi_inds):
gt_to_fg_roi_inds[gt_ind] = []
gt_to_fg_roi_inds[gt_ind].append(ind)
all_fg_roi_inds.append(ind)
all_fg_roi_inds = np.array(list(set(all_fg_roi_inds)))
pos_rels = []
for rel in gt_rels:
for sub_i in gt_to_fg_roi_inds[rel[0]]:
for obj_i in gt_to_fg_roi_inds[rel[1]]:
pos_rels.append([sub_i, obj_i, rel[2]])
rels = []
rels_inds = []
roi_inds = []
if (len(pos_rels) > 0):
(_, indices) = np.unique(['{} {}'.format(i, j) for (i, j, k) in pos_rels], return_index=True)
pos_rels = np.array(pos_rels)[indices, :]
for rel in pos_rels:
roi_inds += rel[:2].tolist()
roi_inds = list(set(roi_inds))
rels.append(rel)
rels_inds.append(rel[:2].tolist())
if (len(roi_inds) >= num_fg_rois):
break
roi_candidates = np.setdiff1d(all_fg_roi_inds, roi_inds)
num_rois_to_sample = min((num_fg_rois - len(roi_inds)), len(roi_candidates))
if (num_rois_to_sample > 0):
roi_sample = npr.choice(roi_candidates, size=num_rois_to_sample, replace=False)
roi_inds = np.hstack([roi_inds, roi_sample])
sample_rels = []
sample_rels_inds = []
for i in roi_inds:
for j in roi_inds:
if ((i != j) and ([i, j] not in rels_inds)):
sample_rels.append([i, j, 0])
sample_rels_inds.append([i, j])
if (len(sample_rels) > 0):
num_neg_rels = np.minimum(len(sample_rels), num_neg_rels)
inds = npr.choice(np.arange(len(sample_rels)), size=num_neg_rels, replace=False)
rels += [sample_rels[i] for i in inds]
rels_inds += [sample_rels_inds[i] for i in inds]
num_rois_to_sample = (num_rois - len(roi_inds))
if (num_rois_to_sample > 0):
bg_roi_inds = _sample_bg_rois(roidb, num_rois_to_sample)
roi_inds = np.hstack([roi_inds, bg_roi_inds])
roi_inds = np.array(roi_inds).astype(np.int64)
return (roi_inds.astype(np.int64), np.array(rels).astype(np.int64)) | -4,104,330,219,477,050,400 | Sample a graph from the foreground rois of an image
roidb: roidb of an image
rois_per_image: maximum number of rois per image | lib/roi_data_layer/minibatch.py | _sample_graph | Alex-Sol/scene-graph-TF-release | python | def _sample_graph(roidb, num_fg_rois, num_rois, num_neg_rels=128):
'\n Sample a graph from the foreground rois of an image\n\n roidb: roidb of an image\n rois_per_image: maximum number of rois per image\n '
gt_rels = roidb['gt_relations']
fg_gt_ind_assignments = roidb['fg_gt_ind_assignments']
gt_to_fg_roi_inds = {}
all_fg_roi_inds = []
for (ind, gt_ind) in fg_gt_ind_assignments.items():
if (gt_ind not in gt_to_fg_roi_inds):
gt_to_fg_roi_inds[gt_ind] = []
gt_to_fg_roi_inds[gt_ind].append(ind)
all_fg_roi_inds.append(ind)
all_fg_roi_inds = np.array(list(set(all_fg_roi_inds)))
pos_rels = []
for rel in gt_rels:
for sub_i in gt_to_fg_roi_inds[rel[0]]:
for obj_i in gt_to_fg_roi_inds[rel[1]]:
pos_rels.append([sub_i, obj_i, rel[2]])
rels = []
rels_inds = []
roi_inds = []
if (len(pos_rels) > 0):
(_, indices) = np.unique(['{} {}'.format(i, j) for (i, j, k) in pos_rels], return_index=True)
pos_rels = np.array(pos_rels)[indices, :]
for rel in pos_rels:
roi_inds += rel[:2].tolist()
roi_inds = list(set(roi_inds))
rels.append(rel)
rels_inds.append(rel[:2].tolist())
if (len(roi_inds) >= num_fg_rois):
break
roi_candidates = np.setdiff1d(all_fg_roi_inds, roi_inds)
num_rois_to_sample = min((num_fg_rois - len(roi_inds)), len(roi_candidates))
if (num_rois_to_sample > 0):
roi_sample = npr.choice(roi_candidates, size=num_rois_to_sample, replace=False)
roi_inds = np.hstack([roi_inds, roi_sample])
sample_rels = []
sample_rels_inds = []
for i in roi_inds:
for j in roi_inds:
if ((i != j) and ([i, j] not in rels_inds)):
sample_rels.append([i, j, 0])
sample_rels_inds.append([i, j])
if (len(sample_rels) > 0):
num_neg_rels = np.minimum(len(sample_rels), num_neg_rels)
inds = npr.choice(np.arange(len(sample_rels)), size=num_neg_rels, replace=False)
rels += [sample_rels[i] for i in inds]
rels_inds += [sample_rels_inds[i] for i in inds]
num_rois_to_sample = (num_rois - len(roi_inds))
if (num_rois_to_sample > 0):
bg_roi_inds = _sample_bg_rois(roidb, num_rois_to_sample)
roi_inds = np.hstack([roi_inds, bg_roi_inds])
roi_inds = np.array(roi_inds).astype(np.int64)
return (roi_inds.astype(np.int64), np.array(rels).astype(np.int64)) |
def _sample_bg_rois(roidb, num_bg_rois):
'\n Sample rois from background\n '
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where((((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO)) | (labels == 0)))[0]
bg_rois_per_this_image = np.minimum(num_bg_rois, bg_inds.size)
if (bg_inds.size > 0):
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return bg_inds | 3,924,593,662,606,184,400 | Sample rois from background | lib/roi_data_layer/minibatch.py | _sample_bg_rois | Alex-Sol/scene-graph-TF-release | python | def _sample_bg_rois(roidb, num_bg_rois):
'\n \n '
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where((((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO)) | (labels == 0)))[0]
bg_rois_per_this_image = np.minimum(num_bg_rois, bg_inds.size)
if (bg_inds.size > 0):
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return bg_inds |
def _get_image_blob(roidb, scale_inds):
'Builds an input blob from the images in the roidb at the specified\n scales.\n '
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = roidb[i]['image']()
if roidb[i]['flipped']:
im = im[:, ::(- 1), :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
(im, im_scale) = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
blob = im_list_to_blob(processed_ims)
return (blob, im_scales) | 4,838,210,901,424,313,000 | Builds an input blob from the images in the roidb at the specified
scales. | lib/roi_data_layer/minibatch.py | _get_image_blob | Alex-Sol/scene-graph-TF-release | python | def _get_image_blob(roidb, scale_inds):
'Builds an input blob from the images in the roidb at the specified\n scales.\n '
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = roidb[i]['image']()
if roidb[i]['flipped']:
im = im[:, ::(- 1), :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
(im, im_scale) = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
blob = im_list_to_blob(processed_ims)
return (blob, im_scales) |
def _project_im_rois(im_rois, im_scale_factor):
'Project image RoIs into the rescaled training image.'
rois = (im_rois * im_scale_factor)
return rois | -1,758,947,012,524,473,000 | Project image RoIs into the rescaled training image. | lib/roi_data_layer/minibatch.py | _project_im_rois | Alex-Sol/scene-graph-TF-release | python | def _project_im_rois(im_rois, im_scale_factor):
rois = (im_rois * im_scale_factor)
return rois |
def _get_bbox_regression_labels(bbox_target_data, num_classes):
'Bounding-box regression targets are stored in a compact form in the\n roidb.\n\n This function expands those targets into the 4-of-4*K representation used\n by the network (i.e. only one class has non-zero targets). The loss weights\n are similarly expanded.\n\n Returns:\n bbox_target_data (ndarray): N x 4K blob of regression targets\n bbox_inside_weights (ndarray): N x 4K blob of loss weights\n '
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, (4 * num_classes)), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where((clss > 0))[0]
for ind in inds:
cls = clss[ind].astype(np.int64)
start = (4 * cls)
end = (start + 4)
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return (bbox_targets, bbox_inside_weights) | -5,585,201,523,274,370,000 | Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights | lib/roi_data_layer/minibatch.py | _get_bbox_regression_labels | Alex-Sol/scene-graph-TF-release | python | def _get_bbox_regression_labels(bbox_target_data, num_classes):
'Bounding-box regression targets are stored in a compact form in the\n roidb.\n\n This function expands those targets into the 4-of-4*K representation used\n by the network (i.e. only one class has non-zero targets). The loss weights\n are similarly expanded.\n\n Returns:\n bbox_target_data (ndarray): N x 4K blob of regression targets\n bbox_inside_weights (ndarray): N x 4K blob of loss weights\n '
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, (4 * num_classes)), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where((clss > 0))[0]
for ind in inds:
cls = clss[ind].astype(np.int64)
start = (4 * cls)
end = (start + 4)
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return (bbox_targets, bbox_inside_weights) |
@staticmethod
def random_choice(args: List[Any], n: int=1):
'\n pick a random element from a set.\n \n Example:\n >> sampler = RandomSearch.random_choice(1,2,3)\n >> sampler()\n 2\n '
choices = []
for arg in args:
choices.append(arg)
if (n == 1):
return (lambda : np.random.choice(choices, replace=False))
else:
return (lambda : np.random.choice(choices, n, replace=False)) | -7,495,781,368,315,496,000 | pick a random element from a set.
Example:
>> sampler = RandomSearch.random_choice(1,2,3)
>> sampler()
2 | lr/hyperparameters.py | random_choice | kernelmachine/quality-filter | python | @staticmethod
def random_choice(args: List[Any], n: int=1):
'\n pick a random element from a set.\n \n Example:\n >> sampler = RandomSearch.random_choice(1,2,3)\n >> sampler()\n 2\n '
choices = []
for arg in args:
choices.append(arg)
if (n == 1):
return (lambda : np.random.choice(choices, replace=False))
else:
return (lambda : np.random.choice(choices, n, replace=False)) |
@staticmethod
def random_integer(low: Union[(int, float)], high: Union[(int, float)]):
'\n pick a random integer between two bounds\n \n Example:\n >> sampler = RandomSearch.random_integer(1, 10)\n >> sampler()\n 9\n '
return (lambda : int(np.random.randint(low, high))) | 2,527,066,972,184,185,300 | pick a random integer between two bounds
Example:
>> sampler = RandomSearch.random_integer(1, 10)
>> sampler()
9 | lr/hyperparameters.py | random_integer | kernelmachine/quality-filter | python | @staticmethod
def random_integer(low: Union[(int, float)], high: Union[(int, float)]):
'\n pick a random integer between two bounds\n \n Example:\n >> sampler = RandomSearch.random_integer(1, 10)\n >> sampler()\n 9\n '
return (lambda : int(np.random.randint(low, high))) |
@staticmethod
def random_loguniform(low: Union[(float, int)], high: Union[(float, int)]):
'\n pick a random float between two bounds, using loguniform distribution\n \n Example:\n >> sampler = RandomSearch.random_loguniform(1e-5, 1e-2)\n >> sampler()\n 0.0004\n '
return (lambda : np.exp(np.random.uniform(np.log(low), np.log(high)))) | -1,493,134,483,308,338,200 | pick a random float between two bounds, using loguniform distribution
Example:
>> sampler = RandomSearch.random_loguniform(1e-5, 1e-2)
>> sampler()
0.0004 | lr/hyperparameters.py | random_loguniform | kernelmachine/quality-filter | python | @staticmethod
def random_loguniform(low: Union[(float, int)], high: Union[(float, int)]):
'\n pick a random float between two bounds, using loguniform distribution\n \n Example:\n >> sampler = RandomSearch.random_loguniform(1e-5, 1e-2)\n >> sampler()\n 0.0004\n '
return (lambda : np.exp(np.random.uniform(np.log(low), np.log(high)))) |
@staticmethod
def random_uniform(low: Union[(float, int)], high: Union[(float, int)]):
'\n pick a random float between two bounds, using uniform distribution\n \n Example:\n >> sampler = RandomSearch.random_uniform(0, 1)\n >> sampler()\n 0.01\n '
return (lambda : np.random.uniform(low, high)) | 5,574,592,011,469,580,000 | pick a random float between two bounds, using uniform distribution
Example:
>> sampler = RandomSearch.random_uniform(0, 1)
>> sampler()
0.01 | lr/hyperparameters.py | random_uniform | kernelmachine/quality-filter | python | @staticmethod
def random_uniform(low: Union[(float, int)], high: Union[(float, int)]):
'\n pick a random float between two bounds, using uniform distribution\n \n Example:\n >> sampler = RandomSearch.random_uniform(0, 1)\n >> sampler()\n 0.01\n '
return (lambda : np.random.uniform(low, high)) |
def callback(func: CALLABLE_T) -> CALLABLE_T:
'Annotation to mark method as safe to call from within the event loop.'
setattr(func, '_edge_callback', True)
return func | 2,672,397,233,697,125,000 | Annotation to mark method as safe to call from within the event loop. | merceedge/util/async_util.py | callback | hobo0cn/MerceEdge | python | def callback(func: CALLABLE_T) -> CALLABLE_T:
setattr(func, '_edge_callback', True)
return func |
Subsets and Splits