hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e68f063286831b1e435dc23fedc1ab0c986fc844
| 8,311
|
py
|
Python
|
pyaib/linesocket.py
|
loljoho-old/ainu
|
70c15e59f387817c271c9b282b16423cbba7b6a2
|
[
"Apache-2.0"
] | null | null | null |
pyaib/linesocket.py
|
loljoho-old/ainu
|
70c15e59f387817c271c9b282b16423cbba7b6a2
|
[
"Apache-2.0"
] | null | null | null |
pyaib/linesocket.py
|
loljoho-old/ainu
|
70c15e59f387817c271c9b282b16423cbba7b6a2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Line based socket using gevent
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import errno
import gevent
from gevent import socket
from gevent import queue, select
from OpenSSL import SSL
from .util.decorator import utf8Encode, utf8Decode, raise_exceptions
#We use this to end lines we send to the server its in the RFC
#Buffers don't support unicode just yet so 'encode'
LINEENDING = b'\r\n'
class LineSocket(object):
"""Line based socket impl takes a host and port"""
#Exceptions for LineSockets
# Connect to remote host
#Start up the read and write threads
#Read from the socket, split out lines into a queue for readline
#Read Operation (Block)
@utf8Decode.returnValue
#Write Operation
#writeline Operation [Blocking]
@utf8Encode
| 35.216102
| 79
| 0.545783
|
#!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Line based socket using gevent
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import errno
import gevent
from gevent import socket
from gevent import queue, select
from OpenSSL import SSL
from .util.decorator import utf8Encode, utf8Decode, raise_exceptions
class LineSocketBuffers(object):
def __init__(self):
self.readbuffer = bytearray()
self.writebuffer = bytearray()
def clear(self):
del self.readbuffer[0:]
del self.writebuffer[0:]
def readbuffer_mv(self):
return memoryview(self.readbuffer)
def writebuffer_mv(self):
return memoryview(self.writebuffer)
#We use this to end lines we send to the server its in the RFC
#Buffers don't support unicode just yet so 'encode'
LINEENDING = b'\r\n'
class LineSocket(object):
"""Line based socket impl takes a host and port"""
def __init__(self, host, port, SSL):
self.host, self.port, self.SSL = (host, port, SSL)
self._socket = None
self._buffer = LineSocketBuffers()
#Thread Safe Queues for
self._IN = queue.Queue()
self._OUT = queue.Queue()
#Exceptions for LineSockets
class SocketError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Connect to remote host
def connect(self):
host, port = (self.host, self.port)
#Clean out the buffers
self._buffer.clear()
#If the existing socket is not None close it
if self._socket is not None:
self.close()
# Resolve the hostname and connect (ipv6 ready)
sock = None
try:
for info in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
family, socktype, proto, canonname, sockaddr = info
#Validate the socket will make
try:
sock = socket.socket(family, socktype, proto)
#Set Keepalives
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except socket.error, msg:
print('Socket Error: %s' % msg)
sock = None
continue
#Wrap in ssl if asked
if self.SSL:
print('Starting SSL')
try:
ctx = SSL.Context(SSL.SSLv23_METHOD)
sock = SSL.Connection(ctx, sock)
except SSL.Error, err:
print('Could not Initiate SSL: %s' % err)
sock = None
continue
#Try to establish the connection
try:
print('Trying Connect(%s)' % repr(sockaddr))
sock.settimeout(10)
sock.connect(sockaddr)
except socket.error, msg:
print('Socket Error: %s' % msg)
if self.SSL:
sock.shutdown()
sock.close()
sock = None
continue
break
except Exception as e:
print('Some unknown exception: %s' % e)
#After all the connection attempts and sock is still none lets bomb out
if sock is None:
print('Could not open connection')
return False
#Set the socket to non_blocking
sock.setblocking(0)
print("Connection Open.")
self._socket = sock
return True
#Start up the read and write threads
def run(self):
#Fire off some greenlits to handing reading and writing
try:
print("Starting Read/Write Loops")
tasks = [gevent.spawn(raise_exceptions(self._read)),
gevent.spawn(raise_exceptions(self._write))]
#Wait for a socket exception and raise the flag
select.select([], [], [self._socket]) # Yield
raise self.SocketError('Socket Exception')
finally: # Make sure we kill the tasks
print("Killing read and write loops")
gevent.killall(tasks)
def close(self):
if self.SSL:
try:
self._socket.shutdown()
except:
pass
self._socket.close()
self._socket = None
#Read from the socket, split out lines into a queue for readline
def _read(self):
eof = False
while True:
try:
#Wait for when the socket is ready for read
select.select([self._socket], [], []) # Yield
data = self._socket.recv(4096)
if not data: # Disconnected Remote
eof = True
self._buffer.readbuffer.extend(data)
except SSL.WantReadError:
pass # Nonblocking ssl yo
except (SSL.ZeroReturnError, SSL.SysCallError):
eof = True
except socket.error as e:
if e.errno == errno.EAGAIN:
pass # Don't Care
else:
raise
#If there are lines to proccess do so
while LINEENDING in self._buffer.readbuffer:
#Find the buffer offset
size = self._buffer.readbuffer.find(LINEENDING)
#Get the string from the buffer
line = self._buffer.readbuffer_mv()[0:size].tobytes()
#Place the string the the queue for safe handling
#Also convert it to unicode
self._IN.put(line)
#Delete the line from the buffer + 2 for line endings
del self._buffer.readbuffer[0:size + 2]
# Make sure we parse our readbuffer before we return
if eof: # You would think reading from a disconnected socket would
# raise an excaption
raise self.SocketError('EOF')
#Read Operation (Block)
@utf8Decode.returnValue
def readline(self):
return self._IN.get()
#Write Operation
def _write(self):
while True:
line = self._OUT.get() # Yield Operation
self._buffer.writebuffer.extend(line + LINEENDING)
#If we have buffers to write lets write them all
while self._buffer.writebuffer:
try:
gevent.sleep(0) # This gets tight sometimes
#Try to dump 4096 bytes to the socket
count = self._socket.send(
self._buffer.writebuffer_mv()[0:4096])
#Remove sent len from buffer
del self._buffer.writebuffer[0:count]
except SSL.WantReadError:
gevent.sleep(0) # Yield so this is not tight
except socket.error as e:
if e.errno == errno.EPIPE:
raise self.SocketError('Broken Pipe')
else:
raise self.SocketError('Err Socket Code: ' + e.errno)
except SSL.SysCallError as (errnum, errstr):
if errnum == errno.EPIPE:
raise self.SocketError(errstr)
else:
raise self.SocketError('SSL Syscall (%d) Error: %s'
% (errnum, errstr))
#writeline Operation [Blocking]
@utf8Encode
def writeline(self, data):
self._OUT.put(data)
| 6,417
| 19
| 426
|
364c359b4af83293ca8e35c2b54053600928c890
| 1,174
|
py
|
Python
|
2nd_100/problem108.py
|
takekoputa/project-euler
|
6f434be429bd26f5d0f84f5ab0f5fa2bd677c790
|
[
"MIT"
] | null | null | null |
2nd_100/problem108.py
|
takekoputa/project-euler
|
6f434be429bd26f5d0f84f5ab0f5fa2bd677c790
|
[
"MIT"
] | null | null | null |
2nd_100/problem108.py
|
takekoputa/project-euler
|
6f434be429bd26f5d0f84f5ab0f5fa2bd677c790
|
[
"MIT"
] | 1
|
2021-11-02T12:08:46.000Z
|
2021-11-02T12:08:46.000Z
|
# Question: https://projecteuler.net/problem=108
"""
1/x + 1/y = 1/n
Since n > 0, we have x > n and y > n. Assume x >= y.
Let x = n + a, y = n + b.
1/x + 1/y = 1/n
-> xn + yn = xy
-> (n+a)n + (n+b)n = (n+a)(n+b)
-> 2n^2 + an + bn = n^2 + an + bn + ab
-> n^2 = ab
The number of solutions of (x,y) for each n is the number of solutions of (a,b) for each n.
The number of positive integers (a,b) where a >= b that ab = n^2 is (m-1)/2+1, where m is the number of divisors of n^2.
Why?
| Let the number of divisors be 'm'.
| For each divisor d where d != n, let (a, b) = (d, n^2/d).
| Note that, (d, n^2/d) is the same as (n^2/d, d), where n^2/d is also a divisor but only one of them have a > b.
| There are (m-1) such a case, therefore there are (m-1)/2 pair of (a,b) that a > b.
| For the case (a, b) = (n, n); there is only one such a case.
| So, in total, there are (m-1)/2+1 pairs of (a,b).
In this problem, we want to find the smallest n such that (number_of_divisors of n**2) >= 1999.
"""
from sage.all import number_of_divisors
ans = 0
while True:
ans = ans + 1
if number_of_divisors(ans**2) >= 1999:
break
print(ans)
| 34.529412
| 120
| 0.584327
|
# Question: https://projecteuler.net/problem=108
"""
1/x + 1/y = 1/n
Since n > 0, we have x > n and y > n. Assume x >= y.
Let x = n + a, y = n + b.
1/x + 1/y = 1/n
-> xn + yn = xy
-> (n+a)n + (n+b)n = (n+a)(n+b)
-> 2n^2 + an + bn = n^2 + an + bn + ab
-> n^2 = ab
The number of solutions of (x,y) for each n is the number of solutions of (a,b) for each n.
The number of positive integers (a,b) where a >= b that ab = n^2 is (m-1)/2+1, where m is the number of divisors of n^2.
Why?
| Let the number of divisors be 'm'.
| For each divisor d where d != n, let (a, b) = (d, n^2/d).
| Note that, (d, n^2/d) is the same as (n^2/d, d), where n^2/d is also a divisor but only one of them have a > b.
| There are (m-1) such a case, therefore there are (m-1)/2 pair of (a,b) that a > b.
| For the case (a, b) = (n, n); there is only one such a case.
| So, in total, there are (m-1)/2+1 pairs of (a,b).
In this problem, we want to find the smallest n such that (number_of_divisors of n**2) >= 1999.
"""
from sage.all import number_of_divisors
ans = 0
while True:
ans = ans + 1
if number_of_divisors(ans**2) >= 1999:
break
print(ans)
| 0
| 0
| 0
|
4ad1e402d4b20f23f6ba3b10ad2eb51fccb091c3
| 10,031
|
py
|
Python
|
src/virtual-wan/azext_vwan/commands.py
|
ibabedal/azure-cli-extensions
|
ae86b635e5669e7088108506b8964c6daedb7742
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/virtual-wan/azext_vwan/commands.py
|
ibabedal/azure-cli-extensions
|
ae86b635e5669e7088108506b8964c6daedb7742
|
[
"MIT"
] | 1
|
2020-06-12T01:39:40.000Z
|
2020-06-12T01:39:40.000Z
|
src/virtual-wan/azext_vwan/commands.py
|
ibabedal/azure-cli-extensions
|
ae86b635e5669e7088108506b8964c6daedb7742
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from .profiles import CUSTOM_VWAN
from ._client_factory import (
cf_virtual_wans, cf_virtual_hubs, cf_vpn_sites, cf_vpn_site_configs,
cf_vpn_gateways, cf_vpn_gateway_connection, cf_virtual_hub_route_table_v2s, cf_vpn_server_config,
cf_p2s_vpn_gateways, cf_virtual_hub_connection)
from ._util import (
list_network_resource_property,
get_network_resource_property_entry
)
# pylint: disable=too-many-locals, too-many-statements
| 48.458937
| 167
| 0.718572
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from .profiles import CUSTOM_VWAN
from ._client_factory import (
cf_virtual_wans, cf_virtual_hubs, cf_vpn_sites, cf_vpn_site_configs,
cf_vpn_gateways, cf_vpn_gateway_connection, cf_virtual_hub_route_table_v2s, cf_vpn_server_config,
cf_p2s_vpn_gateways, cf_virtual_hub_connection)
from ._util import (
list_network_resource_property,
get_network_resource_property_entry
)
# pylint: disable=too-many-locals, too-many-statements
def load_command_table(self, _):
network_vhub_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VirtualHubsOperations.{}',
client_factory=cf_virtual_hubs,
resource_type=CUSTOM_VWAN,
min_api='2018-08-01'
)
network_vhub_connection_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#HubVirtualNetworkConnectionsOperations.{}',
client_factory=cf_virtual_hub_connection,
resource_type=CUSTOM_VWAN,
min_api='2020-05-01'
)
network_vhub_route_table_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VirtualHubRouteTableV2sOperations.{}',
client_factory=cf_virtual_hub_route_table_v2s,
resource_type=CUSTOM_VWAN,
min_api='2019-09-01'
)
network_vwan_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VirtualWansOperations.{}',
client_factory=cf_virtual_wans,
resource_type=CUSTOM_VWAN,
min_api='2018-08-01'
)
network_vpn_gateway_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VpnGatewaysOperations.{}',
client_factory=cf_vpn_gateways,
min_api='2018-08-01'
)
network_vpn_gateway_connection_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VpnConnectionsOperations.{}',
client_factory=cf_vpn_gateway_connection,
min_api='2020-05-01'
)
network_vpn_site_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VpnSitesOperations.{}',
client_factory=cf_vpn_sites,
min_api='2018-08-01'
)
network_vpn_site_config_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VpnSitesConfigurationOperations.{}',
client_factory=cf_vpn_site_configs,
min_api='2018-08-01'
)
network_vpn_server_config_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#VpnServerConfigurationsOperations.{}',
client_factory=cf_vpn_server_config,
resource_type=CUSTOM_VWAN,
min_api='2020-03-01'
)
network_p2s_vpn_gateway_sdk = CliCommandType(
operations_tmpl='azext_vwan.vendored_sdks.v2020_05_01.operations#P2sVpnGatewaysOperations.{}',
client_factory=cf_p2s_vpn_gateways,
resource_type=CUSTOM_VWAN,
min_api='2020-03-01'
)
network_util = CliCommandType(
operations_tmpl='azext_vwan._util#{}',
client_factory=None
)
# region VirtualWANs
with self.command_group('network vwan', network_vwan_sdk) as g:
g.custom_command('create', 'create_virtual_wan')
g.command('delete', 'delete')
g.show_command('show')
g.custom_command('list', 'list_virtual_wans')
g.generic_update_command('update', custom_func_name='update_virtual_wan', setter_arg_name='wan_parameters')
# endregion
# region VirtualHubs
with self.command_group('network vhub', network_vhub_sdk) as g:
g.custom_command('create', 'create_virtual_hub', supports_no_wait=True)
g.command('delete', 'delete')
g.show_command('show')
g.custom_command('list', 'list_virtual_hubs')
g.generic_update_command('update', custom_func_name='update_virtual_hub', setter_arg_name='virtual_hub_parameters', supports_no_wait=True)
g.command('get-effective-routes', 'get_effective_virtual_hub_routes', supports_no_wait=True)
with self.command_group('network vhub connection', network_vhub_connection_sdk) as g:
g.custom_command('create', 'create_hub_vnet_connection', supports_no_wait=True)
g.command('delete', 'delete', supports_no_wait=True, confirmation=True)
g.show_command('show')
g.command('list', 'list')
g.wait_command('wait')
with self.command_group('network vhub route', network_vhub_sdk) as g:
g.custom_command('add', 'add_hub_route', supports_no_wait=True)
g.custom_command('list', 'list_hub_routes')
g.custom_command('remove', 'remove_hub_route', supports_no_wait=True)
g.custom_command('reset', 'reset_hub_routes',
supports_no_wait=True)
with self.command_group('network vhub route-table', network_vhub_route_table_sdk) as g:
g.custom_command('create', 'create_vhub_route_table', supports_no_wait=True)
g.custom_command('update', 'update_vhub_route_table', supports_no_wait=True)
g.custom_show_command('show', 'get_vhub_route_table')
g.custom_command('list', 'list_vhub_route_tables')
g.custom_command('delete', 'delete_vhub_route_table')
g.wait_command('wait')
with self.command_group('network vhub route-table route', network_vhub_route_table_sdk) as g:
g.custom_command('add', 'add_hub_routetable_route', supports_no_wait=True)
g.custom_command('list', 'list_hub_routetable_route')
g.custom_command('remove', 'remove_hub_routetable_route', supports_no_wait=True)
# endregion
# region VpnGateways
with self.command_group('network vpn-gateway', network_vpn_gateway_sdk) as g:
g.custom_command('create', 'create_vpn_gateway', supports_no_wait=True)
g.command('delete', 'delete')
g.custom_command('list', 'list_vpn_gateways')
g.show_command('show')
g.generic_update_command('update', custom_func_name='update_vpn_gateway', supports_no_wait=True, setter_arg_name='vpn_gateway_parameters')
with self.command_group('network vpn-gateway connection', network_vpn_gateway_connection_sdk) as g:
g.custom_command('create', 'create_vpn_gateway_connection', supports_no_wait=True)
g.command('list', 'list_by_vpn_gateway')
g.show_command('show', 'get')
g.command('delete', 'delete')
g.wait_command('wait')
with self.command_group('network vpn-gateway connection ipsec-policy', network_vpn_gateway_sdk) as g:
g.custom_command('add', 'add_vpn_gateway_connection_ipsec_policy', supports_no_wait=True)
g.custom_command('list', 'list_vpn_conn_ipsec_policies')
g.custom_command('remove', 'remove_vpn_conn_ipsec_policy', supports_no_wait=True)
# endregion
# region VpnSites
with self.command_group('network vpn-site', network_vpn_site_sdk) as g:
g.custom_command('create', 'create_vpn_site', supports_no_wait=True)
g.command('delete', 'delete')
g.custom_command('list', 'list_vpn_sites')
g.show_command('show')
g.generic_update_command('update', custom_func_name='update_vpn_site', setter_arg_name='vpn_site_parameters', supports_no_wait=True)
with self.command_group('network vpn-site', network_vpn_site_config_sdk) as g:
g.command('download', 'download')
# endregion
# region VpnServer
with self.command_group('network vpn-server-config', network_vpn_server_config_sdk) as g:
g.custom_command('create', 'create_vpn_server_config', supports_no_wait=True)
g.custom_command('set', 'create_vpn_server_config', supports_no_wait=True)
# due to service limitation, we cannot support update command right now.
# g.generic_update_command('update', custom_func_name='update_vpn_server_config', supports_no_wait=True, setter_arg_name='vpn_server_configuration_parameters')
g.show_command('show')
g.command('delete', 'delete', confirmation=True)
g.custom_command('list', 'list_vpn_server_config')
g.wait_command('wait')
with self.command_group('network vpn-server-config ipsec-policy', network_vpn_server_config_sdk) as g:
g.custom_command('add', 'add_vpn_server_config_ipsec_policy', supports_no_wait=True)
g.custom_command('list', 'list_vpn_server_config_ipsec_policies')
g.custom_command('remove', 'remove_vpn_server_config_ipsec_policy', supports_no_wait=True)
g.wait_command('wait')
with self.command_group('network p2s-vpn-gateway', network_p2s_vpn_gateway_sdk) as g:
g.custom_command('create', 'create_p2s_vpn_gateway', supports_no_wait=True)
g.command('delete', 'delete', confirmation=True)
g.custom_command('list', 'list_p2s_vpn_gateways')
g.show_command('show')
g.generic_update_command('update', custom_func_name='update_p2s_vpn_gateway', supports_no_wait=True, setter_arg_name='p2_svpn_gateway_parameters')
g.wait_command('wait')
resource = 'p2s_vpn_gateways'
prop = 'p2_sconnection_configurations'
with self.command_group('network p2s-vpn-gateway connection', network_util, min_api='2020-04-01', is_preview=True) as g:
g.command('list', list_network_resource_property(resource, prop))
g.show_command('show', get_network_resource_property_entry(resource, prop))
with self.command_group('network p2s-vpn-gateway vpn-client', network_p2s_vpn_gateway_sdk, min_api='2020-05-01') as g:
g.command('generate', 'generate_vpn_profile')
# endregion
| 9,131
| 0
| 22
|
fccdc2372d03085d61290955d27b6d16997c2aac
| 3,925
|
py
|
Python
|
train_cnn_model.py
|
jacksonwalters/text-binary-classifier
|
667ec9c899735d897a2fc954b9d1d9ad89d42140
|
[
"MIT"
] | null | null | null |
train_cnn_model.py
|
jacksonwalters/text-binary-classifier
|
667ec9c899735d897a2fc954b9d1d9ad89d42140
|
[
"MIT"
] | null | null | null |
train_cnn_model.py
|
jacksonwalters/text-binary-classifier
|
667ec9c899735d897a2fc954b9d1d9ad89d42140
|
[
"MIT"
] | null | null | null |
#DATA: 6,000 labled tweets before 2020 election from Biden=0, Trump=1.
#VECTORIZER: tokenizer. vocab size 19,415.
#MODEL: 1-d CNN with a global max-pool layer.
if __name__ == "__main__":
#load the data
import os
import pandas as pd
os.chdir('/Users/jacksonwalters/tensorflow_datasets')
filepath_dict = {'tweet': 'labeled_tweets/all_labeled_tweets.txt'}
df_list = []
for source, filepath in filepath_dict.items():
df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\t')
df['source'] = source # Add another column filled with the source name
df_list.append(df)
df = pd.concat(df_list)
print(df.iloc[0])
#vectorize the data and split into train/test
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
tokenizer = Tokenizer(num_words=5000)
#slice the df to get yelp sentences
df_tweet = df[df['source'] == 'tweet']
sentences = df_tweet['sentence'].values
#tweet sentence sentiment labels. 0 = negative, 1 = positive
y = df_tweet['label'].values
#split the sentences into training data and test data
sentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)
#vectorize the sentences
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
# Adding 1 because of reserved 0 index
vocab_size = len(tokenizer.word_index) + 1
#pad_sequences
maxlen = 100
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
print(vocab_size)
#plotting function
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#construct the model
from keras import Sequential
from keras import layers
embedding_dim = 100
model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim, input_length=maxlen))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.summary()
#train the model
history = model.fit(X_train, y_train,
epochs=2,
verbose=True,
validation_data=(X_test, y_test),
batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
plot_history(history)
#test examples
ex_sent = "fake news"
X_ex_sent = tokenizer.texts_to_sequences([ex_sent])
X_ex_sent = pad_sequences(X_ex_sent, padding='post', maxlen=maxlen)
print(model.predict(X_ex_sent))
#save the model
model.save('/Users/jacksonwalters/Desktop/cnn_model')
| 37.028302
| 120
| 0.672102
|
#DATA: 6,000 labled tweets before 2020 election from Biden=0, Trump=1.
#VECTORIZER: tokenizer. vocab size 19,415.
#MODEL: 1-d CNN with a global max-pool layer.
if __name__ == "__main__":
#load the data
import os
import pandas as pd
os.chdir('/Users/jacksonwalters/tensorflow_datasets')
filepath_dict = {'tweet': 'labeled_tweets/all_labeled_tweets.txt'}
df_list = []
for source, filepath in filepath_dict.items():
df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\t')
df['source'] = source # Add another column filled with the source name
df_list.append(df)
df = pd.concat(df_list)
print(df.iloc[0])
#vectorize the data and split into train/test
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
tokenizer = Tokenizer(num_words=5000)
#slice the df to get yelp sentences
df_tweet = df[df['source'] == 'tweet']
sentences = df_tweet['sentence'].values
#tweet sentence sentiment labels. 0 = negative, 1 = positive
y = df_tweet['label'].values
#split the sentences into training data and test data
sentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)
#vectorize the sentences
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
# Adding 1 because of reserved 0 index
vocab_size = len(tokenizer.word_index) + 1
#pad_sequences
maxlen = 100
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
print(vocab_size)
#plotting function
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
#construct the model
from keras import Sequential
from keras import layers
embedding_dim = 100
model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim, input_length=maxlen))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.summary()
#train the model
history = model.fit(X_train, y_train,
epochs=2,
verbose=True,
validation_data=(X_test, y_test),
batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
plot_history(history)
#test examples
ex_sent = "fake news"
X_ex_sent = tokenizer.texts_to_sequences([ex_sent])
X_ex_sent = pad_sequences(X_ex_sent, padding='post', maxlen=maxlen)
print(model.predict(X_ex_sent))
#save the model
model.save('/Users/jacksonwalters/Desktop/cnn_model')
| 683
| 0
| 27
|
25f94c8f84cb34ad71d414206a53ba24e0277a80
| 1,971
|
py
|
Python
|
bot.py
|
Stashchen/bot-top-ranking-slack
|
9fe3a673a9a023b9329b23229fa619d12da93b4b
|
[
"MIT"
] | null | null | null |
bot.py
|
Stashchen/bot-top-ranking-slack
|
9fe3a673a9a023b9329b23229fa619d12da93b4b
|
[
"MIT"
] | null | null | null |
bot.py
|
Stashchen/bot-top-ranking-slack
|
9fe3a673a9a023b9329b23229fa619d12da93b4b
|
[
"MIT"
] | null | null | null |
from flask import Flask, Response, request
import json
import os
from threading import Thread
from poll import Poll
from slack import WebClient
from slackeventsapi import SlackEventAdapter
from handlers.handlers import handle_commands, handle_interactivity
# Start Flask app that will be produce all the requests
app = Flask(__name__)
# Get venv variables
SLACK_BOT_SIGNIN_TOKEN = os.environ.get('SLACK_BOT_SIGNIN_TOKEN')
SLACK_BOT_ACCESS_TOKEN = os.environ.get('SLACK_BOT_ACCESS_TOKEN')
SLACK_BOT_VERIFICATION_TOKEN = os.environ.get('SLACK_BOT_VERIFICATION_TOKEN')
# Get slack api client and poll object
slack_client = WebClient(SLACK_BOT_ACCESS_TOKEN)
music_poll = Poll()
# Enable several routes to the server
@app.route("/")
def event_hook(request):
"""
Main hook that checks all the request with Slack token.
"""
json_dict = json.loads(request.body.decode("utf-8"))
if json_dict["token"] != SLACK_BOT_VERIFICATION_TOKEN:
return {"status": 403}
if "type" in json_dict:
if json_dict["type"] == "url_verification":
response_dict = {"challenge": json_dict["challenge"]}
return response_dict
return {"status": 500}
@app.route('/slack/commands', methods=['POST'])
def command_hook():
"""
Function to handle all the bots commands.
"""
handle_commands(client=slack_client, poll=music_poll, request_form=request.form)
return Response(status=200)
@app.route('/slack/interactivity', methods=['POST'])
def interactivity_hook():
"""
Function, that handles all the interactivity (buttons, checkboxes, slack shortcuts, etc.).
But in that bot, it will handle only poll selection interactivity.
"""
handle_interactivity(client=slack_client, request=request, poll=music_poll)
return Response(status=200)
slack_events_adapter = SlackEventAdapter(
SLACK_BOT_SIGNIN_TOKEN, "/slack/events", app
)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=4000)
| 31.285714
| 94
| 0.734652
|
from flask import Flask, Response, request
import json
import os
from threading import Thread
from poll import Poll
from slack import WebClient
from slackeventsapi import SlackEventAdapter
from handlers.handlers import handle_commands, handle_interactivity
# Start Flask app that will be produce all the requests
app = Flask(__name__)
# Get venv variables
SLACK_BOT_SIGNIN_TOKEN = os.environ.get('SLACK_BOT_SIGNIN_TOKEN')
SLACK_BOT_ACCESS_TOKEN = os.environ.get('SLACK_BOT_ACCESS_TOKEN')
SLACK_BOT_VERIFICATION_TOKEN = os.environ.get('SLACK_BOT_VERIFICATION_TOKEN')
# Get slack api client and poll object
slack_client = WebClient(SLACK_BOT_ACCESS_TOKEN)
music_poll = Poll()
# Enable several routes to the server
@app.route("/")
def event_hook(request):
"""
Main hook that checks all the request with Slack token.
"""
json_dict = json.loads(request.body.decode("utf-8"))
if json_dict["token"] != SLACK_BOT_VERIFICATION_TOKEN:
return {"status": 403}
if "type" in json_dict:
if json_dict["type"] == "url_verification":
response_dict = {"challenge": json_dict["challenge"]}
return response_dict
return {"status": 500}
@app.route('/slack/commands', methods=['POST'])
def command_hook():
"""
Function to handle all the bots commands.
"""
handle_commands(client=slack_client, poll=music_poll, request_form=request.form)
return Response(status=200)
@app.route('/slack/interactivity', methods=['POST'])
def interactivity_hook():
"""
Function, that handles all the interactivity (buttons, checkboxes, slack shortcuts, etc.).
But in that bot, it will handle only poll selection interactivity.
"""
handle_interactivity(client=slack_client, request=request, poll=music_poll)
return Response(status=200)
slack_events_adapter = SlackEventAdapter(
SLACK_BOT_SIGNIN_TOKEN, "/slack/events", app
)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=4000)
| 0
| 0
| 0
|
3078806d0483db47b47c2e1a70d09713610e7d96
| 427
|
py
|
Python
|
my_lambda/test.py
|
Terrencebosco/lambdata-dspt7-tb
|
9a5be4e6e0fea1801393253221fcca0511ded83c
|
[
"MIT"
] | null | null | null |
my_lambda/test.py
|
Terrencebosco/lambdata-dspt7-tb
|
9a5be4e6e0fea1801393253221fcca0511ded83c
|
[
"MIT"
] | null | null | null |
my_lambda/test.py
|
Terrencebosco/lambdata-dspt7-tb
|
9a5be4e6e0fea1801393253221fcca0511ded83c
|
[
"MIT"
] | null | null | null |
from my_lambda.my_classes import MyUtilities
import numpy as np
import pandas as pd
if __name__ == "__main__":
d = {'col1': [1, 5, 3, 100, 8], 'col2': [3, 4, np.nan, 5, 6]}
dt = MyUtilities(d)
print(dt.num_nans())
X_train, X_val, X_test, y_train, y_val, y_test = dt.train_validation_test_split('col1','col2',random_state=42)
print(X_val)
print(dt.shape)
print(dt.drop_high_nan(1))
| 23.722222
| 114
| 0.639344
|
from my_lambda.my_classes import MyUtilities
import numpy as np
import pandas as pd
if __name__ == "__main__":
d = {'col1': [1, 5, 3, 100, 8], 'col2': [3, 4, np.nan, 5, 6]}
dt = MyUtilities(d)
print(dt.num_nans())
X_train, X_val, X_test, y_train, y_val, y_test = dt.train_validation_test_split('col1','col2',random_state=42)
print(X_val)
print(dt.shape)
print(dt.drop_high_nan(1))
| 0
| 0
| 0
|
eae15b49724fd67ea584c4beb8c7b5c0c80b1714
| 4,468
|
py
|
Python
|
kuryr_kubernetes/tests/unit/controller/drivers/test_base.py
|
MaysaMacedo/kuryr-kubernetes-1
|
e4ba3896974e98dc46cb1afd9cbec42646250d72
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/tests/unit/controller/drivers/test_base.py
|
MaysaMacedo/kuryr-kubernetes-1
|
e4ba3896974e98dc46cb1afd9cbec42646250d72
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/tests/unit/controller/drivers/test_base.py
|
MaysaMacedo/kuryr-kubernetes-1
|
e4ba3896974e98dc46cb1afd9cbec42646250d72
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from kuryr_kubernetes.controller.drivers import base as d_base
from kuryr_kubernetes.tests import base as test_base
| 38.852174
| 79
| 0.705685
|
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from kuryr_kubernetes.controller.drivers import base as d_base
from kuryr_kubernetes.tests import base as test_base
class _TestDriver(d_base.DriverBase, metaclass=abc.ABCMeta):
ALIAS = 'test_alias'
@abc.abstractmethod
def test(self):
raise NotImplementedError()
class TestDriverBase(test_base.TestCase):
@mock.patch.object(d_base, '_DRIVER_MANAGERS')
@mock.patch('kuryr_kubernetes.config.CONF')
@mock.patch('stevedore.driver.DriverManager')
def test_get_instance(self, m_stv_mgr, m_cfg, m_mgrs):
m_drv = mock.MagicMock(spec=_TestDriver)
m_mgr = mock.MagicMock()
m_mgr.driver = m_drv
m_mgrs.__getitem__.return_value = m_mgr
self.assertEqual(m_drv, _TestDriver.get_instance())
m_cfg.assert_not_called()
m_stv_mgr.assert_not_called()
@mock.patch.object(d_base, '_DRIVER_MANAGERS')
@mock.patch('kuryr_kubernetes.config.CONF')
@mock.patch('stevedore.driver.DriverManager')
def test_get_instance_not_loaded(self, m_stv_mgr, m_cfg, m_mgrs):
alias = _TestDriver.ALIAS
cfg_name = '%s_driver' % (alias)
mgr_key = '%s:_from_cfg:default' % (alias)
drv_name = 'driver_impl'
namespace = '%s.%s' % (d_base._DRIVER_NAMESPACE_BASE, alias)
m_cfg.kubernetes.__getitem__.return_value = drv_name
m_drv = mock.MagicMock(spec=_TestDriver)
m_mgr = mock.MagicMock()
m_mgr.driver = m_drv
m_stv_mgr.return_value = m_mgr
m_mgrs.__getitem__.side_effect = KeyError
self.assertEqual(m_drv, _TestDriver.get_instance())
m_cfg.kubernetes.__getitem__.assert_called_with(cfg_name)
m_stv_mgr.assert_called_with(namespace=namespace, name=drv_name,
invoke_on_load=True)
m_mgrs.__setitem__.assert_called_once_with(mgr_key, m_mgr)
@mock.patch.object(d_base, '_DRIVER_MANAGERS')
@mock.patch('kuryr_kubernetes.config.CONF')
@mock.patch('stevedore.driver.DriverManager')
def test_get_instance_invalid_type(self, m_stv_mgr, m_cfg, m_mgrs):
class _InvalidDriver(object):
pass
m_drv = mock.MagicMock(spec=_InvalidDriver)
m_mgr = mock.MagicMock()
m_mgr.driver = m_drv
m_mgrs.__getitem__.return_value = m_mgr
self.assertRaises(TypeError, _TestDriver.get_instance)
m_cfg.assert_not_called()
m_stv_mgr.assert_not_called()
class TestMultiVIFDriver(test_base.TestCase):
@mock.patch.object(d_base, '_MULTI_VIF_DRIVERS', [])
@mock.patch('kuryr_kubernetes.config.CONF')
def test_get_enabled_drivers(self, m_cfg):
cfg_name = 'multi_vif_drivers'
drv_name = 'driver_impl'
m_cfg.kubernetes.__getitem__.return_value = [drv_name]
m_drv = mock.MagicMock()
d_base.MultiVIFDriver.get_instance = mock.MagicMock(return_value=m_drv)
self.assertIn(m_drv, d_base.MultiVIFDriver.get_enabled_drivers())
m_cfg.kubernetes.__getitem__.assert_called_once_with(cfg_name)
@mock.patch.object(d_base, '_MULTI_VIF_DRIVERS', [])
@mock.patch('kuryr_kubernetes.config.CONF')
def test_get_enabled_drivers_multiple(self, m_cfg):
cfg_name = 'multi_vif_drivers'
drv1_name = 'driver_impl_1'
drv2_name = 'driver_impl_2'
m_cfg.kubernetes.__getitem__.return_value = [drv1_name, drv2_name]
m_drv1 = mock.MagicMock()
m_drv2 = mock.MagicMock()
d_base.MultiVIFDriver.get_instance = mock.MagicMock()
d_base.MultiVIFDriver.get_instance.side_effect = [m_drv1, m_drv2]
self.assertIn(m_drv1, d_base.MultiVIFDriver.get_enabled_drivers())
self.assertIn(m_drv2, d_base.MultiVIFDriver.get_enabled_drivers())
m_cfg.kubernetes.__getitem__.assert_called_once_with(cfg_name)
| 2,670
| 951
| 69
|
c3cc2f62b1c83e90106c355852be900796b650f2
| 716
|
py
|
Python
|
Leetcode/week_1/p0867_transpose_matrix.py
|
SamSamhuns/wallbreakers_projekts
|
c07b555127ee89d6f461cea7cd87811c382086ff
|
[
"MIT"
] | 1
|
2021-07-07T00:55:23.000Z
|
2021-07-07T00:55:23.000Z
|
Leetcode/week_1/p0867_transpose_matrix.py
|
SamSamhuns/wallbreakers_projekts
|
c07b555127ee89d6f461cea7cd87811c382086ff
|
[
"MIT"
] | null | null | null |
Leetcode/week_1/p0867_transpose_matrix.py
|
SamSamhuns/wallbreakers_projekts
|
c07b555127ee89d6f461cea7cd87811c382086ff
|
[
"MIT"
] | null | null | null |
from typing import List
"""
Runtime: O(mn)
Space: O(mn)
where m = no of rows in A and n = no of cols in A
Runtime: 72 ms, faster than 80.91% of Python3 online submissions for Transpose Matrix.
Memory Usage: 13.4 MB, less than 100.00% of Python3 online submissions for Transpose Matrix.
"""
| 29.833333
| 92
| 0.620112
|
from typing import List
class Solution:
def transpose(self, A: List[List[int]]) -> List[List[int]]:
return self.manual_transpose(A)
def manual_transpose(self, A: List[List[int]]) -> List[List[int]]:
B = [[0] * len(A) for _ in range(len(A[0]))] # transposed matrix
for i in range(len(B)):
for j in range(len(B[i])):
B[i][j] = A[j][i] # access transposed coords in A
return B
"""
Runtime: O(mn)
Space: O(mn)
where m = no of rows in A and n = no of cols in A
Runtime: 72 ms, faster than 80.91% of Python3 online submissions for Transpose Matrix.
Memory Usage: 13.4 MB, less than 100.00% of Python3 online submissions for Transpose Matrix.
"""
| 352
| -6
| 76
|
d72e01e482deb9e10813b6f69d66e5c7f1cd4601
| 323
|
py
|
Python
|
application/model/layout/option/OptionPopup.py
|
Kzulfazriawan/Stigma-game-demo
|
971ee90a908784dfe1c9e87733b0394fa2212299
|
[
"MIT"
] | 2
|
2016-08-09T05:33:21.000Z
|
2016-10-05T06:34:04.000Z
|
application/model/layout/option/OptionPopup.py
|
Kzulfazriawan/stigma-game-demo
|
971ee90a908784dfe1c9e87733b0394fa2212299
|
[
"MIT"
] | null | null | null |
application/model/layout/option/OptionPopup.py
|
Kzulfazriawan/stigma-game-demo
|
971ee90a908784dfe1c9e87733b0394fa2212299
|
[
"MIT"
] | null | null | null |
from core import Files
from library.stigma.helper import kivyBuilder
from library.stigma.application import Popup
kivyBuilder(Files.apppath, 'model', 'builder', 'option', 'optionpopup.kv')
| 32.3
| 74
| 0.736842
|
from core import Files
from library.stigma.helper import kivyBuilder
from library.stigma.application import Popup
kivyBuilder(Files.apppath, 'model', 'builder', 'option', 'optionpopup.kv')
class OptionPopup(Popup):
def __init__(self):
super(OptionPopup, self).__init__()
self.title = "Pick one Option"
| 81
| 4
| 49
|
ad98b9b64db2a280ab328b62dbe4ff2dd803bdfd
| 14,838
|
py
|
Python
|
orquesta/tests/unit/conducting/test_workflow_conductor_branching.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
orquesta/tests/unit/conducting/test_workflow_conductor_branching.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
orquesta/tests/unit/conducting/test_workflow_conductor_branching.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orquesta import conducting
from orquesta import events
from orquesta.specs import native as specs
from orquesta import states
from orquesta.tests.unit import base
| 43.769912
| 100
| 0.657771
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orquesta import conducting
from orquesta import events
from orquesta.specs import native as specs
from orquesta import states
from orquesta.tests.unit import base
class WorkflowConductorExtendedTest(base.WorkflowConductorTest):
def test_join(self):
wf_def = """
version: 1.0
description: A basic branching workflow.
tasks:
# branch 1
task1:
action: core.noop
next:
- when: <% succeeded() %>
publish:
- var1: 'xyz'
do: task3
# branch 2
task2:
action: core.noop
next:
- when: <% succeeded() %>
publish:
- var2: 123
do: task3
# adjoining branch
task3:
join: all
action: core.noop
next:
- when: <% succeeded() %>
publish:
- var3: True
do: task4
task4:
action: core.noop
"""
spec = specs.WorkflowSpec(wf_def)
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Conduct task1 and check context and that there is no next tasks yet.
task_name = 'task1'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertListEqual(conductor.get_next_tasks(task_name), [])
expected_txsn_ctx = {'task3__0': {'srcs': [0], 'value': {'var1': 'xyz'}}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
# Conduct task2 and check next tasks and context.
task_name = 'task2'
next_task_name = 'task3'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_txsn_ctx = {'task3__0': {'srcs': [0, 1], 'value': {'var1': 'xyz', 'var2': 123}}}
self.assertDictEqual(conductor.get_task_transition_contexts('task2'), expected_txsn_ctx)
next_task_spec = conductor.spec.tasks.get_task(next_task_name)
expected_ctx_value = {'var1': 'xyz', 'var2': 123}
expected_tasks = [self.format_task_item(next_task_name, expected_ctx_value, next_task_spec)]
self.assert_task_list(conductor.get_next_tasks(task_name), expected_tasks)
# Conduct task3 and check merged context.
task_name = 'task3'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_init_ctx = {'srcs': [0, 1], 'value': expected_ctx_value}
self.assertDictEqual(conductor.get_task_initial_context(task_name), expected_init_ctx)
expected_ctx_val = {'var1': 'xyz', 'var2': 123, 'var3': True}
expected_txsn_ctx = {'task4__0': {'srcs': [2], 'value': expected_ctx_val}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
# Conduct task4 and check final workflow state.
task_name = 'task4'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertListEqual(conductor.get_next_tasks(task_name), [])
# Check workflow state and context.
expected_ctx_value = {'var1': 'xyz', 'var2': 123, 'var3': True}
expected_ctx_entry = {'src': [3], 'term': True, 'value': expected_ctx_value}
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
self.assertDictEqual(conductor.get_workflow_terminal_context(), expected_ctx_entry)
def test_join_with_no_input_and_no_context_changes(self):
wf_def = """
version: 1.0
description: A basic branching workflow.
tasks:
# branch 1
task1:
action: core.noop
next:
- when: <% succeeded() %>
do: task3
# branch 2
task2:
action: core.noop
next:
- when: <% succeeded() %>
do: task3
# adjoining branch
task3:
join: all
action: core.noop
next:
- when: <% succeeded() %>
do: task4
task4:
action: core.noop
"""
spec = specs.WorkflowSpec(wf_def)
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Conduct task2 and check context and that there is no next tasks yet.
task_name = 'task1'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertListEqual(conductor.get_next_tasks(task_name), [])
expected_txsn_ctx = {'task3__0': {'srcs': [], 'value': {}}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
# Conduct task2 and check next tasks and context.
task_name = 'task2'
next_task_name = 'task3'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_txsn_ctx = {'task3__0': {'srcs': [], 'value': {}}}
self.assertDictEqual(conductor.get_task_transition_contexts('task2'), expected_txsn_ctx)
next_task_spec = conductor.spec.tasks.get_task(next_task_name)
expected_ctx_value = {}
expected_tasks = [self.format_task_item(next_task_name, expected_ctx_value, next_task_spec)]
self.assert_task_list(conductor.get_next_tasks(task_name), expected_tasks)
# Conduct task3 and check merged context.
task_name = 'task3'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_init_ctx = {'srcs': [], 'value': {}}
self.assertDictEqual(conductor.get_task_initial_context(task_name), expected_init_ctx)
expected_txsn_ctx = {'task4__0': {'srcs': [], 'value': {}}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
# Conduct task4 and check final workflow state.
task_name = 'task4'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertListEqual(conductor.get_next_tasks(task_name), [])
# Check workflow state and context.
expected_ctx_entry = {'src': [3], 'term': True, 'value': {}}
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
self.assertDictEqual(conductor.get_workflow_terminal_context(), expected_ctx_entry)
def test_join_with_input_and_no_context_changes(self):
wf_def = """
version: 1.0
description: A basic branching workflow.
input:
- var1
tasks:
# branch 1
task1:
action: core.noop
next:
- when: <% succeeded() %>
do: task3
# branch 2
task2:
action: core.noop
next:
- when: <% succeeded() %>
do: task3
# adjoining branch
task3:
join: all
action: core.noop
next:
- when: <% succeeded() %>
do: task4
task4:
action: core.noop
"""
inputs = {'var1': 'xyz'}
spec = specs.WorkflowSpec(wf_def)
conductor = conducting.WorkflowConductor(spec, inputs=inputs)
conductor.request_workflow_state(states.RUNNING)
# Conduct task2 and check context and that there is no next tasks yet.
task_name = 'task1'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertListEqual(conductor.get_next_tasks(task_name), [])
expected_txsn_ctx = {'task3__0': {'srcs': [], 'value': inputs}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
# Conduct task2 and check next tasks and context.
task_name = 'task2'
next_task_name = 'task3'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_txsn_ctx = {'task3__0': {'srcs': [], 'value': inputs}}
self.assertDictEqual(conductor.get_task_transition_contexts('task2'), expected_txsn_ctx)
next_task_spec = conductor.spec.tasks.get_task(next_task_name)
expected_ctx_value = inputs
expected_tasks = [self.format_task_item(next_task_name, expected_ctx_value, next_task_spec)]
self.assert_task_list(conductor.get_next_tasks(task_name), expected_tasks)
# Conduct task3 and check merged context.
task_name = 'task3'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_init_ctx = {'srcs': [], 'value': inputs}
self.assertDictEqual(conductor.get_task_initial_context(task_name), expected_init_ctx)
expected_txsn_ctx = {'task4__0': {'srcs': [], 'value': inputs}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
# Conduct task4 and check final workflow state.
task_name = 'task4'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertListEqual(conductor.get_next_tasks(task_name), [])
# Check workflow state and context.
expected_ctx_entry = {'src': [3], 'term': True, 'value': inputs}
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
self.assertDictEqual(conductor.get_workflow_terminal_context(), expected_ctx_entry)
def test_parallel(self):
wf_def = """
version: 1.0
description: A basic branching workflow.
tasks:
# branch 1
task1:
action: core.noop
next:
- when: <% succeeded() %>
publish:
- var1: 'xyz'
do: task2
task2:
action: core.noop
# branch 2
task3:
action: core.noop
next:
- when: <% succeeded() %>
publish:
- var2: 123
do: task4
task4:
action: core.noop
"""
spec = specs.WorkflowSpec(wf_def)
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Conduct task1 and check context.
task_name = 'task1'
next_task_name = 'task2'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_ctx_value = {'var1': 'xyz'}
expected_txsn_ctx = {'task2__0': {'srcs': [0], 'value': expected_ctx_value}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
next_task_spec = conductor.spec.tasks.get_task(next_task_name)
expected_tasks = [self.format_task_item(next_task_name, expected_ctx_value, next_task_spec)]
self.assert_task_list(conductor.get_next_tasks(task_name), expected_tasks)
# Conduct task3 and check context.
task_name = 'task3'
next_task_name = 'task4'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
expected_ctx_value = {'var2': 123}
expected_txsn_ctx = {'task4__0': {'srcs': [1], 'value': expected_ctx_value}}
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), expected_txsn_ctx)
next_task_spec = conductor.spec.tasks.get_task(next_task_name)
expected_tasks = [self.format_task_item(next_task_name, expected_ctx_value, next_task_spec)]
self.assert_task_list(conductor.get_next_tasks(task_name), expected_tasks)
# Conduct task2 and check context.
task_name = 'task2'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), {})
self.assertListEqual(conductor.get_next_tasks(task_name), [])
# Conduct task4 and check context.
task_name = 'task4'
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow(task_name, events.ActionExecutionEvent(states.SUCCEEDED))
self.assertDictEqual(conductor.get_task_transition_contexts(task_name), {})
self.assertListEqual(conductor.get_next_tasks(task_name), [])
# Check workflow state and context.
expected_ctx_entry = {'src': [2, 3], 'term': True, 'value': {'var1': 'xyz', 'var2': 123}}
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
self.assertDictEqual(conductor.get_workflow_terminal_context(), expected_ctx_entry)
| 13,949
| 43
| 131
|
3b3140ae07533c0cab7c07ae4475dbd5b63949e5
| 140
|
py
|
Python
|
pyroute2/netlink/rtnl/ifinfmsg/plugins/vrf.py
|
li-ma/pyroute2
|
48b85e39d675c18c05eb209229db082316aa760a
|
[
"Apache-2.0"
] | null | null | null |
pyroute2/netlink/rtnl/ifinfmsg/plugins/vrf.py
|
li-ma/pyroute2
|
48b85e39d675c18c05eb209229db082316aa760a
|
[
"Apache-2.0"
] | null | null | null |
pyroute2/netlink/rtnl/ifinfmsg/plugins/vrf.py
|
li-ma/pyroute2
|
48b85e39d675c18c05eb209229db082316aa760a
|
[
"Apache-2.0"
] | null | null | null |
from pyroute2.netlink import nla
| 20
| 44
| 0.607143
|
from pyroute2.netlink import nla
class vrf(nla):
nla_map = (('IFLA_VRF_UNSPEC', 'none'),
('IFLA_VRF_TABLE', 'uint32'))
| 0
| 83
| 23
|
40523554accfed21ca85773e4e4bb4e36bdf5ef7
| 3,988
|
py
|
Python
|
train.py
|
Tandon-A/Tolkien-Bot
|
b19c9311ae4b33b30f0722e73bd2f981cff6beeb
|
[
"MIT"
] | null | null | null |
train.py
|
Tandon-A/Tolkien-Bot
|
b19c9311ae4b33b30f0722e73bd2f981cff6beeb
|
[
"MIT"
] | null | null | null |
train.py
|
Tandon-A/Tolkien-Bot
|
b19c9311ae4b33b30f0722e73bd2f981cff6beeb
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import math
import pickle
import os
"""
Import sbot class definition.
"""
from sbot import sbot
"""
Function to load data and prepare the metadata variables
"""
"""
Function to build training sequences.
"""
"""
Training Function.
"""
flags = tf.app.flags
flags.DEFINE_string("file_path",None,"Path to file containing training texts")
flags.DEFINE_string("meta_dir","meta_dir/","Directory to store training file metadata produced by application")
flags.DEFINE_string("model_dir","sbot_model/","Directory name to save checkpoints")
FLAGS = flags.FLAGS
if __name__ == '__main__':
tf.app.run()
| 30.442748
| 114
| 0.647693
|
import tensorflow as tf
import numpy as np
import math
import pickle
import os
"""
Import sbot class definition.
"""
from sbot import sbot
"""
Function to load data and prepare the metadata variables
"""
def load_data(file_path,meta_dir):
with open(file_path,"r") as f:
data = f.read()
f.close()
chars = list(set(data))
char_size = len(chars)
ind_to_char = {ind:char for ind,char in enumerate(chars)}
char_to_ind = {char:ind for ind,char in enumerate(chars)}
file = open(meta_dir + "tr_file_meta","wb")
d2f = []
d2f.append(char_size)
d2f.append(ind_to_char)
d2f.append(char_to_ind)
pickle.dump(d2f,file)
file.close()
return data,char_size,char_to_ind
"""
Function to build training sequences.
"""
def get_data(data,char_to_ind,seq_length,char_size):
x = []
y = []
for i in range(int(len(data)/seq_length)):
x_seq = data[i*seq_length:(i+1)*seq_length]
x_seq_ind = [char_to_ind[val] for val in x_seq]
x_seq = np.zeros((seq_length,char_size))
y_seq = data[((i*seq_length) + 1):((i+1)*seq_length) + 1]
y_seq_ind = [char_to_ind[val] for val in y_seq]
y_seq = np.zeros((seq_length,char_size))
for j in range(seq_length):
x_seq[j][x_seq_ind[j]] = 1
y_seq[j][y_seq_ind[j]] = 1
x.append(x_seq)
y.append(y_seq)
return np.array(x),np.array(y)
"""
Training Function.
"""
def train(net,tr_data,tr_labels,batch_size,max_iter,lr_rate,model_dir):
saver = tf.train.Saver(max_to_keep=None)
epoch = 0
print ("starting training")
max_len = tr_data.shape[0] - batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
while epoch < max_iter:
bs = 0
print (epoch)
if epoch >= 25 and epoch%5 ==0:
lr_rate = lr_rate * 0.97; #decreasing learning rate after every 5 epochs, starting 25th epoch.
while bs < max_len:
batch_data = tr_data[bs:(bs+batch_size)]
batch_labels = tr_labels[bs:(bs+batch_size)]
sess.run(net.opt,feed_dict={net.inputs:batch_data,net.targets:batch_labels,net.lr_rate:lr_rate})
if epoch % 5 == 0 and bs % 5000 == 0:
cost = sess.run(net.loss,feed_dict={net.inputs:batch_data,net.targets:batch_labels,net.lr_rate:lr_rate})
print ("epoch = %r step = %r cost = %r" %(epoch,bs,cost))
bs = bs + batch_size
counter = counter + 1
train_writer.add_summary(summary, counter)
epoch = epoch + 1
saver.save(sess,model_dir,write_meta_graph=True)
print ("### Model Saved ### epoch = %r" %(epoch))
def main(_):
if not os.path.exists(FLAGS.file_path):
print ("Training text file doesn't exist.")
else:
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.meta_dir):
os.makedirs(FLAGS.meta_dir)
seq_length = 100
data,char_size,char_to_ind = load_data(FLAGS.file_path,FLAGS.meta_dir)
tr_data,tr_label = get_data(data,char_to_ind,seq_length,char_size)
print (tr_data.shape,tr_label.shape)
batch_size = 50
input_shape = batch_size,seq_length,char_size
lr_rate = 0.002
beta1= 0.5
num_layers = 4
num_neurons = 512
is_train = True
max_iter = 50
temperature = 1.0
tf.reset_default_graph()
net = sbot(input_shape,num_neurons,num_layers,beta1,temperature,is_train)
train(net,tr_data,tr_label,batch_size,max_iter,lr_rate,FLAGS.model_dir)
flags = tf.app.flags
flags.DEFINE_string("file_path",None,"Path to file containing training texts")
flags.DEFINE_string("meta_dir","meta_dir/","Directory to store training file metadata produced by application")
flags.DEFINE_string("model_dir","sbot_model/","Directory name to save checkpoints")
FLAGS = flags.FLAGS
if __name__ == '__main__':
tf.app.run()
| 3,231
| 0
| 92
|
552df972f2beb9f46a8f27ae713cff50805f25a7
| 2,840
|
py
|
Python
|
src/lib/mechanic/ui/lists/installation.py
|
JefferyQ/Mechanic
|
2c5f08484fbcc0bed23c077fe7c55b1c67baeba1
|
[
"MIT"
] | 1
|
2019-04-07T23:44:14.000Z
|
2019-04-07T23:44:14.000Z
|
src/lib/mechanic/ui/lists/installation.py
|
JefferyQ/Mechanic
|
2c5f08484fbcc0bed23c077fe7c55b1c67baeba1
|
[
"MIT"
] | null | null | null |
src/lib/mechanic/ui/lists/installation.py
|
JefferyQ/Mechanic
|
2c5f08484fbcc0bed23c077fe7c55b1c67baeba1
|
[
"MIT"
] | null | null | null |
import webbrowser
from AppKit import NSPredicate
from mechanic.extension import Extension
from mechanic.ui.lists.base import BaseList
from mechanic.ui.formatters.description import DescriptionFormatter
from mechanic.ui.cells.circle import CircleCell
class InstallationList(BaseList):
"""Return an ExtensionList for installation window."""
columns = [{"title": "Installed",
"key": "is_installed",
"width": 25,
"editable": False,
"cell": CircleCell.alloc().init()},
{"title": "Extension",
"key": "extension",
"width": 400,
"editable": False,
"formatter": DescriptionFormatter.alloc().init()}]
@property
| 32.643678
| 72
| 0.597887
|
import webbrowser
from AppKit import NSPredicate
from mechanic.extension import Extension
from mechanic.ui.lists.base import BaseList
from mechanic.ui.formatters.description import DescriptionFormatter
from mechanic.ui.cells.circle import CircleCell
class InstallationList(BaseList):
"""Return an ExtensionList for installation window."""
columns = [{"title": "Installed",
"key": "is_installed",
"width": 25,
"editable": False,
"cell": CircleCell.alloc().init()},
{"title": "Extension",
"key": "extension",
"width": 400,
"editable": False,
"formatter": DescriptionFormatter.alloc().init()}]
def __init__(self, posSize, **kwargs):
kwargs.update({
'rowHeight': 39.0,
'showColumnTitles': False,
'allowsMultipleSelection': True,
'doubleClickCallback': self.open_repo
})
super(InstallationList, self).__init__(posSize, [], **kwargs)
def _wrapItem(self, extension):
name = extension[u'filename'].split("/")[-1]
search = []
if name:
search.append(name.lower())
if extension[u'author']:
search.append(extension[u'author'].lower())
if extension[u'description']:
search.append(extension[u'description'].lower())
item = {'is_installed': Extension(name=name).is_installed,
'extension': extension,
'search': ' '.join(search)}
return super(InstallationList, self)._wrapItem(item)
def _unwrapListItems(self, items=None):
if items is None:
items = super(InstallationList, self).get()
extensions = [d["extension"] for d in items]
return extensions
def set(self, items):
items = sorted(items, key=lambda e: e['name'].lower())
super(InstallationList, self).set(items)
def get(self):
return self._unwrapListItems()
def refresh(self):
self.set(self.get())
def open_repo(self, sender):
for item in self.selected:
webbrowser.open('http://github.com/%s' % item['repository'])
def filter(self, search):
arrayController = self.getNSTableView().dataSource()
if not search:
arrayController.setFilterPredicate_(None)
else:
search = 'search CONTAINS "%s"' % search.lower()
predicate = NSPredicate.predicateWithFormat_(search)
arrayController.setFilterPredicate_(predicate)
@property
def selected(self):
items = self.getNSTableView().dataSource().selectedObjects()
if not self._itemsWereDict:
items = [item["item"] for item in items]
return self._unwrapListItems(items)
| 1,836
| 0
| 242
|
3d439768499921060939de124f36b2c3ee23e26c
| 3,305
|
py
|
Python
|
tests/boxes/util/test_filters.py
|
WildbookOrg/wbia-deprecate-tpl-brambox
|
9aa6a69f706d0653a65520c696a7cd66715b6a37
|
[
"MIT"
] | 2
|
2019-03-23T03:14:11.000Z
|
2019-11-21T07:16:13.000Z
|
tests/boxes/util/test_filters.py
|
WildbookOrg/wbia-deprecate-tpl-brambox
|
9aa6a69f706d0653a65520c696a7cd66715b6a37
|
[
"MIT"
] | null | null | null |
tests/boxes/util/test_filters.py
|
WildbookOrg/wbia-deprecate-tpl-brambox
|
9aa6a69f706d0653a65520c696a7cd66715b6a37
|
[
"MIT"
] | 1
|
2021-12-01T03:04:53.000Z
|
2021-12-01T03:04:53.000Z
|
# -*- coding: utf-8 -*-
import unittest
import brambox.boxes as bbb
if __name__ == '__main__':
unittest.main()
| 33.383838
| 81
| 0.632375
|
# -*- coding: utf-8 -*-
import unittest
import brambox.boxes as bbb
class TestImageBoundsFilter(unittest.TestCase):
def setUp(self):
# construct box that is in bounds
self.box = bbb.Box()
self.box.x_top_left = 2.0
self.box.y_top_left = 2.0
self.box.width = 5.0
self.box.height = 10.0
self.bounds = (1.0, 1.0, 9.0, 13.0)
self.f = bbb.ImageBoundsFilter(self.bounds)
def test_in_bounds(self):
"""Box is in bounds, all box dimensions are smaller than the bounds
"""
self.assertTrue(self.f(self.box))
def test_in_bound_edges(self):
"""Box is in bounds, all bounds equal the box dimensions
"""
self.box.x_top_left = self.bounds[0]
self.box.y_top_left = self.bounds[1]
self.box.width = self.bounds[2] - self.bounds[0]
self.box.height = self.bounds[3] - self.bounds[1]
self.assertTrue(self.f(self.box))
def test_out_bounds_left(self):
"""Box is out of bounds, only on the left side
"""
self.box.x_top_left = self.bounds[0] - 1.0
self.assertFalse(self.f(self.box))
def test_out_bounds_right(self):
"""Box is out of bounds, only on the right side
"""
self.box.width = self.bounds[2]
self.assertFalse(self.f(self.box))
def test_out_bounds_top(self):
"""Box is out of bounds, only on the top side
"""
self.box.y_top_left = self.bounds[1] - 1.0
self.assertFalse(self.f(self.box))
def test_out_bounds_bottom(self):
"""Box is out of bounds, only on the bottom side
"""
self.box.height = self.bounds[3]
self.assertFalse(self.f(self.box))
class TestOcclusionAreaFilter(unittest.TestCase):
def setUp(self):
self.anno = bbb.Annotation()
self.visible_range = (0.5, 0.7)
self.f = bbb.OcclusionAreaFilter(self.visible_range)
def test_not_occluded(self):
"""Annotation is not occluded
"""
self.occluded = False
self.assertTrue(self.f(self.anno))
def test_occlusion_in_range(self):
"""Annotation is occluded but in range of the allowed visible area
"""
visible_fraction = 0.6
self.anno.occluded_fraction = 1 - visible_fraction
self.assertTrue(self.f(self.anno))
def test_occlusion_in_range_upper_bound(self):
"""Annotation is occluded and the visible fraction equals the upper bound
"""
visible_fraction = self.visible_range[1]
self.anno.occluded_fraction = 1 - visible_fraction
self.assertTrue(self.f(self.anno))
def test_occlusion_in_range_lower_bound(self):
"""Annotation is occluded and the visible fraction equals the lower bound
"""
visible_fraction = self.visible_range[0]
self.anno.occluded_fraction = 1 - visible_fraction
self.assertTrue(self.f(self.anno))
def test_occlusion_outside_range_upper_bound(self):
"""Annotation is occluded and the visible fraction > the upper bound
"""
visible_fraction = self.visible_range[1] + 0.1
self.anno.occluded_fraction = 1 - visible_fraction
self.assertFalse(self.f(self.anno))
if __name__ == '__main__':
unittest.main()
| 423
| 2,717
| 46
|
853a6d2f24a1015080819d399104966c122d17e8
| 761
|
py
|
Python
|
snippets/minimal_quick_gui.py
|
GLmontanari/pdEFFE
|
71764540dcf837708723a051c6c0f475a8e95d06
|
[
"MIT"
] | null | null | null |
snippets/minimal_quick_gui.py
|
GLmontanari/pdEFFE
|
71764540dcf837708723a051c6c0f475a8e95d06
|
[
"MIT"
] | null | null | null |
snippets/minimal_quick_gui.py
|
GLmontanari/pdEFFE
|
71764540dcf837708723a051c6c0f475a8e95d06
|
[
"MIT"
] | null | null | null |
import sys
from os.path import abspath, dirname, join
from PyQt5.QtCore import QObject
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtQml import QQmlApplicationEngine
if __name__ == '__main__':
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
# Instance of the Python object
bridge = Bridge()
# Expose the Python object to QML
context = engine.rootContext()
context.setContextProperty("con", bridge)
# Get the path of the current directory, and then add the name
# of the QML file, to load it.
qmlFile = join(dirname(__file__), 'minimal.qml')
engine.load(abspath(qmlFile))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
| 23.060606
| 66
| 0.700394
|
import sys
from os.path import abspath, dirname, join
from PyQt5.QtCore import QObject
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtQml import QQmlApplicationEngine
class Bridge(QObject):
pass
if __name__ == '__main__':
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
# Instance of the Python object
bridge = Bridge()
# Expose the Python object to QML
context = engine.rootContext()
context.setContextProperty("con", bridge)
# Get the path of the current directory, and then add the name
# of the QML file, to load it.
qmlFile = join(dirname(__file__), 'minimal.qml')
engine.load(abspath(qmlFile))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
| 0
| 10
| 23
|
090c72d1ba8b02581515e14ab922e45df840c23a
| 6,375
|
py
|
Python
|
bot/main.py
|
Alohomora-team/2019.2-AlohomoraBot
|
7a4f0a7f12c3aa438a523f67200e4d1e59415cca
|
[
"MIT"
] | null | null | null |
bot/main.py
|
Alohomora-team/2019.2-AlohomoraBot
|
7a4f0a7f12c3aa438a523f67200e4d1e59415cca
|
[
"MIT"
] | 41
|
2019-10-11T05:22:14.000Z
|
2019-11-28T13:32:33.000Z
|
bot/main.py
|
Alohomora-team/2019.2-AlohomoraBot
|
7a4f0a7f12c3aa438a523f67200e4d1e59415cca
|
[
"MIT"
] | null | null | null |
"""
Start program
"""
import logging
import os
from commands import Commands
from admin.admin_auth import AdminAuth
from admin.notify_admin import NotifyAdmin
from admin.register_admin import RegisterAdmin
from feedback import Feedback
from resident.notify_resident import NotifyResident
from resident.register_resident import RegisterResident
from resident.resident_auth import ResidentAuth
from settings import *
from telegram.ext import CallbackQueryHandler
from telegram.ext import Updater, CommandHandler, MessageHandler, ConversationHandler, Filters
from visitor.register_visitor import RegisterVisitor
from visitor.visit import Visit
# Remove logs from APIs
logging.getLogger("telegram").setLevel(API_LOG_LEVEL)
logging.getLogger("JobQueue").setLevel(API_LOG_LEVEL)
# Logger
logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
logger = logging.getLogger(LOG_NAME)
logger.setLevel(LOG_LEVEL)
# FileHandler
file_handler = logging.FileHandler(FILE_NAME)
file_handler.setLevel(LOG_LEVEL)
f_format = logging.Formatter(FORMAT, datefmt=DATEFMT)
file_handler.setFormatter(f_format)
logger.addHandler(file_handler)
def start(update, context):
"""
Start interaction
"""
logger.info("Introducing the bot")
chat_id = update.message.chat_id
context.bot.send_message(
chat_id=chat_id,
parse_mode='Markdown',
text=
"""
Olá, bem vindo(a) ao bot do *Alohomora*!
*Comandos*
/morador - interações para moradores
/visitante - interações para visitante
/admin - interações para administradores
Para dar um _feedback_ pro nosso serviço, digite /feedback
"""
)
if __name__ == '__main__':
token = TOKEN
port = int(os.environ.get('PORT', '8443'))
updater = Updater(token, use_context=True)
logger.info("Starting Bot")
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
# Resident register
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(RegisterResident.index, pattern='r1')],
states={
NAME:[MessageHandler(Filters.text, RegisterResident.name)],
PHONE:[MessageHandler(Filters.text | Filters.contact, RegisterResident.phone)],
EMAIL:[MessageHandler(Filters.text, RegisterResident.email)],
CPF:[MessageHandler(Filters.text, RegisterResident.cpf)],
BLOCK:[MessageHandler(Filters.text, RegisterResident.block)],
APARTMENT:[MessageHandler(Filters.text, RegisterResident.apartment)],
VOICE_REGISTER:[MessageHandler(Filters.voice, RegisterResident.voice_register)],
REPEAT_VOICE:[MessageHandler(Filters.text, RegisterResident.repeat_voice)],
PASSWORD: [MessageHandler(Filters.text, RegisterResident.password)]
},
fallbacks=[CommandHandler('cancelar', RegisterResident.end)]
))
# Resident authentication
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(ResidentAuth.index, pattern='r2')],
states={
CHOOSE_AUTH:[MessageHandler(Filters.text, ResidentAuth.choose_auth)],
VOICE_AUTH:[MessageHandler(Filters.voice, ResidentAuth.voice)],
PASSWORD_AUTH:[MessageHandler(Filters.text, ResidentAuth.password)],
},
fallbacks=[CommandHandler('cancelar', ResidentAuth.end)]
))
# Visitor register
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(RegisterVisitor.index, pattern='v1')],
states={
VISITOR_REGISTER_NAME:[MessageHandler(Filters.text, RegisterVisitor.name)],
VISITOR_REGISTER_CPF:[MessageHandler(Filters.text, RegisterVisitor.cpf)],
},
fallbacks=[CommandHandler('cancelar', RegisterVisitor.end)]
))
# Visit
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(Visit.index, pattern='v2')],
states={
VISIT_BLOCK:[MessageHandler(Filters.text, Visit.block)],
VISIT_APARTMENT:[MessageHandler(Filters.text, Visit.apartment)],
},
fallbacks=[CommandHandler('cancelar', Visit.end)]
))
# Admin register
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(RegisterAdmin.index, pattern='a1')],
states={
ADMIN_REGISTER_EMAIL: [MessageHandler(Filters.text, RegisterAdmin.email)],
ADMIN_REGISTER_PWD: [MessageHandler(Filters.text, RegisterAdmin.password)],
},
fallbacks=[CommandHandler('cancelar', RegisterAdmin.end)]
))
# Admin authentication
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(AdminAuth.index, pattern='a2')],
states={
ADMIN_AUTH_EMAIL: [MessageHandler(Filters.text, AdminAuth.email)],
ADMIN_AUTH_PWD: [MessageHandler(Filters.text, AdminAuth.password)],
ADMIN_AUTH_REPEAT: [MessageHandler(Filters.text, AdminAuth.repeat)],
},
fallbacks=[CommandHandler('cancelar', AdminAuth.end)]
))
# Admin notification
dp.add_handler(CallbackQueryHandler(NotifyAdmin.approved, pattern='app'))
dp.add_handler(CallbackQueryHandler(NotifyAdmin.rejected, pattern='rej'))
# Resident notification
dp.add_handler(CallbackQueryHandler(NotifyResident.authorized, pattern='aut'))
dp.add_handler(CallbackQueryHandler(NotifyResident.refused, pattern='ref'))
# Feedback
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler('feedback', Feedback.index)],
states={
FEEDBACK: [MessageHandler(Filters.text, Feedback.store)],
},
fallbacks=[CommandHandler('cancelar', Feedback.end)]
))
# Listing resident commands
dp.add_handler(CommandHandler('morador', Commands.resident))
# Listing visitor commands
dp.add_handler(CommandHandler('visitante', Commands.visitor))
# Listing admin commands
dp.add_handler(CommandHandler('admin', Commands.admin))
if os.environ['DEPLOY'] == 'True':
updater.start_webhook(listen="0.0.0.0",
port=port,
url_path=token)
updater.bot.set_webhook(os.environ['URL'] + token)
elif os.environ['DEPLOY'] == 'False':
updater.start_polling()
updater.idle()
| 32.860825
| 94
| 0.696314
|
"""
Start program
"""
import logging
import os
from commands import Commands
from admin.admin_auth import AdminAuth
from admin.notify_admin import NotifyAdmin
from admin.register_admin import RegisterAdmin
from feedback import Feedback
from resident.notify_resident import NotifyResident
from resident.register_resident import RegisterResident
from resident.resident_auth import ResidentAuth
from settings import *
from telegram.ext import CallbackQueryHandler
from telegram.ext import Updater, CommandHandler, MessageHandler, ConversationHandler, Filters
from visitor.register_visitor import RegisterVisitor
from visitor.visit import Visit
# Remove logs from APIs
logging.getLogger("telegram").setLevel(API_LOG_LEVEL)
logging.getLogger("JobQueue").setLevel(API_LOG_LEVEL)
# Logger
logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
logger = logging.getLogger(LOG_NAME)
logger.setLevel(LOG_LEVEL)
# FileHandler
file_handler = logging.FileHandler(FILE_NAME)
file_handler.setLevel(LOG_LEVEL)
f_format = logging.Formatter(FORMAT, datefmt=DATEFMT)
file_handler.setFormatter(f_format)
logger.addHandler(file_handler)
def start(update, context):
"""
Start interaction
"""
logger.info("Introducing the bot")
chat_id = update.message.chat_id
context.bot.send_message(
chat_id=chat_id,
parse_mode='Markdown',
text=
"""
Olá, bem vindo(a) ao bot do *Alohomora*!
*Comandos*
/morador - interações para moradores
/visitante - interações para visitante
/admin - interações para administradores
Para dar um _feedback_ pro nosso serviço, digite /feedback
"""
)
if __name__ == '__main__':
token = TOKEN
port = int(os.environ.get('PORT', '8443'))
updater = Updater(token, use_context=True)
logger.info("Starting Bot")
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
# Resident register
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(RegisterResident.index, pattern='r1')],
states={
NAME:[MessageHandler(Filters.text, RegisterResident.name)],
PHONE:[MessageHandler(Filters.text | Filters.contact, RegisterResident.phone)],
EMAIL:[MessageHandler(Filters.text, RegisterResident.email)],
CPF:[MessageHandler(Filters.text, RegisterResident.cpf)],
BLOCK:[MessageHandler(Filters.text, RegisterResident.block)],
APARTMENT:[MessageHandler(Filters.text, RegisterResident.apartment)],
VOICE_REGISTER:[MessageHandler(Filters.voice, RegisterResident.voice_register)],
REPEAT_VOICE:[MessageHandler(Filters.text, RegisterResident.repeat_voice)],
PASSWORD: [MessageHandler(Filters.text, RegisterResident.password)]
},
fallbacks=[CommandHandler('cancelar', RegisterResident.end)]
))
# Resident authentication
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(ResidentAuth.index, pattern='r2')],
states={
CHOOSE_AUTH:[MessageHandler(Filters.text, ResidentAuth.choose_auth)],
VOICE_AUTH:[MessageHandler(Filters.voice, ResidentAuth.voice)],
PASSWORD_AUTH:[MessageHandler(Filters.text, ResidentAuth.password)],
},
fallbacks=[CommandHandler('cancelar', ResidentAuth.end)]
))
# Visitor register
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(RegisterVisitor.index, pattern='v1')],
states={
VISITOR_REGISTER_NAME:[MessageHandler(Filters.text, RegisterVisitor.name)],
VISITOR_REGISTER_CPF:[MessageHandler(Filters.text, RegisterVisitor.cpf)],
},
fallbacks=[CommandHandler('cancelar', RegisterVisitor.end)]
))
# Visit
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(Visit.index, pattern='v2')],
states={
VISIT_BLOCK:[MessageHandler(Filters.text, Visit.block)],
VISIT_APARTMENT:[MessageHandler(Filters.text, Visit.apartment)],
},
fallbacks=[CommandHandler('cancelar', Visit.end)]
))
# Admin register
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(RegisterAdmin.index, pattern='a1')],
states={
ADMIN_REGISTER_EMAIL: [MessageHandler(Filters.text, RegisterAdmin.email)],
ADMIN_REGISTER_PWD: [MessageHandler(Filters.text, RegisterAdmin.password)],
},
fallbacks=[CommandHandler('cancelar', RegisterAdmin.end)]
))
# Admin authentication
dp.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(AdminAuth.index, pattern='a2')],
states={
ADMIN_AUTH_EMAIL: [MessageHandler(Filters.text, AdminAuth.email)],
ADMIN_AUTH_PWD: [MessageHandler(Filters.text, AdminAuth.password)],
ADMIN_AUTH_REPEAT: [MessageHandler(Filters.text, AdminAuth.repeat)],
},
fallbacks=[CommandHandler('cancelar', AdminAuth.end)]
))
# Admin notification
dp.add_handler(CallbackQueryHandler(NotifyAdmin.approved, pattern='app'))
dp.add_handler(CallbackQueryHandler(NotifyAdmin.rejected, pattern='rej'))
# Resident notification
dp.add_handler(CallbackQueryHandler(NotifyResident.authorized, pattern='aut'))
dp.add_handler(CallbackQueryHandler(NotifyResident.refused, pattern='ref'))
# Feedback
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler('feedback', Feedback.index)],
states={
FEEDBACK: [MessageHandler(Filters.text, Feedback.store)],
},
fallbacks=[CommandHandler('cancelar', Feedback.end)]
))
# Listing resident commands
dp.add_handler(CommandHandler('morador', Commands.resident))
# Listing visitor commands
dp.add_handler(CommandHandler('visitante', Commands.visitor))
# Listing admin commands
dp.add_handler(CommandHandler('admin', Commands.admin))
if os.environ['DEPLOY'] == 'True':
updater.start_webhook(listen="0.0.0.0",
port=port,
url_path=token)
updater.bot.set_webhook(os.environ['URL'] + token)
elif os.environ['DEPLOY'] == 'False':
updater.start_polling()
updater.idle()
| 0
| 0
| 0
|
48cf55ac21dae1be0d14091ee631c6e3e8168d90
| 6,829
|
py
|
Python
|
sparkly/functions.py
|
Tubular/sparkly
|
015dbc830648e20ffeb6bf95a94a760c1fb94b6d
|
[
"Apache-2.0"
] | 31
|
2016-12-03T06:54:54.000Z
|
2018-09-08T01:03:57.000Z
|
sparkly/functions.py
|
Tubular/sparkly
|
015dbc830648e20ffeb6bf95a94a760c1fb94b6d
|
[
"Apache-2.0"
] | 23
|
2016-12-02T18:41:10.000Z
|
2018-06-08T23:54:34.000Z
|
sparkly/functions.py
|
Tubular/sparkly
|
015dbc830648e20ffeb6bf95a94a760c1fb94b6d
|
[
"Apache-2.0"
] | 4
|
2016-12-31T19:15:21.000Z
|
2018-02-12T04:44:59.000Z
|
#
# Copyright 2017 Tubular Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from functools import reduce
import operator
from six import string_types
from pyspark.sql import Column
from pyspark.sql import functions as F
def multijoin(dfs, on=None, how=None, coalesce=None):
"""Join multiple dataframes.
Args:
dfs (list[pyspark.sql.DataFrame]).
on: same as ``pyspark.sql.DataFrame.join``.
how: same as ``pyspark.sql.DataFrame.join``.
coalesce (list[str]): column names to disambiguate by coalescing
across the input dataframes. A column must be of the same type
across all dataframes that define it; if different types appear
coalesce will do a best-effort attempt in merging them. The
selected value is the first non-null one in order of appearance
of the dataframes in the input list. Default is None - don't
coalesce any ambiguous columns.
Returns:
pyspark.sql.DataFrame or None if provided dataframe list is empty.
Example:
Assume we have two DataFrames, the first is
``first = [{'id': 1, 'value': None}, {'id': 2, 'value': 2}]``
and the second is
``second = [{'id': 1, 'value': 1}, {'id': 2, 'value': 22}]``
Then collecting the ``DataFrame`` produced by
``multijoin([first, second], on='id', how='inner', coalesce=['value'])``
yields ``[{'id': 1, 'value': 1}, {'id': 2, 'value': 2}]``.
"""
if not dfs:
return None
# Go over the input dataframes and rename each to-be-resolved
# column to ensure name uniqueness
coalesce = set(coalesce or [])
renamed_columns = defaultdict(list)
for idx, df in enumerate(dfs):
for col in df.columns:
if col in coalesce:
disambiguation = '__{}_{}'.format(idx, col)
df = df.withColumnRenamed(col, disambiguation)
renamed_columns[col].append(disambiguation)
dfs[idx] = df
# Join the dataframes
joined_df = reduce(lambda x, y: x.join(y, on=on, how=how), dfs)
# And coalesce the would-have-been-ambiguities
for col, disambiguations in renamed_columns.items():
joined_df = joined_df.withColumn(col, F.coalesce(*disambiguations))
for disambiguation in disambiguations:
joined_df = joined_df.drop(disambiguation)
return joined_df
def switch_case(switch, case=None, default=None, operand=operator.eq, **additional_cases):
"""Switch/case style column generation.
Args:
switch (str, pyspark.sql.Column): column to "switch" on;
its values are going to be compared against defined cases.
case (dict): case statements. When a key matches the value of
the column in a specific row, the respective value will be
assigned to the new column for that row. This is useful when
your case condition constants are not strings.
default: default value to be used when the value of the switch
column doesn't match any keys.
operand: function to compare the value of the switch column to the
value of each case. Default is Column's eq. If user-provided,
first argument will always be the switch Column; it's the
user's responsibility to transform the case value to a column
if they need to.
additional_cases: additional "case" statements, kwargs style.
Same semantics with cases above. If both are provided,
cases takes precedence.
Returns:
pyspark.sql.Column
Example:
``switch_case('state', CA='California', NY='New York', default='Other')``
is equivalent to
>>> F.when(
... F.col('state') == 'CA', 'California'
).when(
... F.col('state') == 'NY', 'New York'
).otherwise('Other')
If you need to "bucketize" a value
``switch_case('age', {(13, 17): 1, (18, 24): 2, ...}, operand=lambda c, v: c.between(*v))``
is equivalent to
>>> F.when(
... F.col('age').between(13, 17), F.lit(1)
).when(
... F.col('age').between(18, 24), F.lit(2)
)
"""
if not isinstance(switch, Column):
switch = F.col(switch)
cases = case or {}
for conflict in set(cases.keys()) & set(additional_cases.keys()):
del additional_cases[conflict]
cases = list(cases.items()) + list(additional_cases.items())
default = _column_or_lit(default)
if not cases:
return default
result = reduce(_execute_case, cases, F).otherwise(default)
return result
def argmax(field, by, condition=None):
"""Select a value from the row that maximizes other column(s)
Args:
field (string, pyspark.sql.Column): the field to return that maximizes the "by" columns
by (*string, *pyspark.sql.Column): field or list of fields to maximize. In reality, this
will usually be only one field. But you may use multiple for tiebreakers
condition (optional): Only consider the entities that pass this condition
Returns:
pyspark.sql.Column
Example:
df = (
df
.groupBy('id')
.agg(argmax('field1', 'by_field'))
)
argmax('field1', ['by_field1', 'by_field2'], condition=F.col('col') == 1)
argmax(F.col('field1'), [F.col('by_field1'), F.col('by_field2')], condition=F.lit(True))
"""
if not isinstance(by, list):
by = [by]
if isinstance(field, string_types):
field = F.col(field)
by.append(field.alias('__tmp_argmax__'))
result = F.struct(*by)
if condition is not None:
result = F.when(condition, result)
result = F.max(result).getField('__tmp_argmax__')
return result
| 35.201031
| 99
| 0.633768
|
#
# Copyright 2017 Tubular Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from functools import reduce
import operator
from six import string_types
from pyspark.sql import Column
from pyspark.sql import functions as F
def multijoin(dfs, on=None, how=None, coalesce=None):
"""Join multiple dataframes.
Args:
dfs (list[pyspark.sql.DataFrame]).
on: same as ``pyspark.sql.DataFrame.join``.
how: same as ``pyspark.sql.DataFrame.join``.
coalesce (list[str]): column names to disambiguate by coalescing
across the input dataframes. A column must be of the same type
across all dataframes that define it; if different types appear
coalesce will do a best-effort attempt in merging them. The
selected value is the first non-null one in order of appearance
of the dataframes in the input list. Default is None - don't
coalesce any ambiguous columns.
Returns:
pyspark.sql.DataFrame or None if provided dataframe list is empty.
Example:
Assume we have two DataFrames, the first is
``first = [{'id': 1, 'value': None}, {'id': 2, 'value': 2}]``
and the second is
``second = [{'id': 1, 'value': 1}, {'id': 2, 'value': 22}]``
Then collecting the ``DataFrame`` produced by
``multijoin([first, second], on='id', how='inner', coalesce=['value'])``
yields ``[{'id': 1, 'value': 1}, {'id': 2, 'value': 2}]``.
"""
if not dfs:
return None
# Go over the input dataframes and rename each to-be-resolved
# column to ensure name uniqueness
coalesce = set(coalesce or [])
renamed_columns = defaultdict(list)
for idx, df in enumerate(dfs):
for col in df.columns:
if col in coalesce:
disambiguation = '__{}_{}'.format(idx, col)
df = df.withColumnRenamed(col, disambiguation)
renamed_columns[col].append(disambiguation)
dfs[idx] = df
# Join the dataframes
joined_df = reduce(lambda x, y: x.join(y, on=on, how=how), dfs)
# And coalesce the would-have-been-ambiguities
for col, disambiguations in renamed_columns.items():
joined_df = joined_df.withColumn(col, F.coalesce(*disambiguations))
for disambiguation in disambiguations:
joined_df = joined_df.drop(disambiguation)
return joined_df
def switch_case(switch, case=None, default=None, operand=operator.eq, **additional_cases):
"""Switch/case style column generation.
Args:
switch (str, pyspark.sql.Column): column to "switch" on;
its values are going to be compared against defined cases.
case (dict): case statements. When a key matches the value of
the column in a specific row, the respective value will be
assigned to the new column for that row. This is useful when
your case condition constants are not strings.
default: default value to be used when the value of the switch
column doesn't match any keys.
operand: function to compare the value of the switch column to the
value of each case. Default is Column's eq. If user-provided,
first argument will always be the switch Column; it's the
user's responsibility to transform the case value to a column
if they need to.
additional_cases: additional "case" statements, kwargs style.
Same semantics with cases above. If both are provided,
cases takes precedence.
Returns:
pyspark.sql.Column
Example:
``switch_case('state', CA='California', NY='New York', default='Other')``
is equivalent to
>>> F.when(
... F.col('state') == 'CA', 'California'
).when(
... F.col('state') == 'NY', 'New York'
).otherwise('Other')
If you need to "bucketize" a value
``switch_case('age', {(13, 17): 1, (18, 24): 2, ...}, operand=lambda c, v: c.between(*v))``
is equivalent to
>>> F.when(
... F.col('age').between(13, 17), F.lit(1)
).when(
... F.col('age').between(18, 24), F.lit(2)
)
"""
if not isinstance(switch, Column):
switch = F.col(switch)
def _column_or_lit(x):
return F.lit(x) if not isinstance(x, Column) else x
def _execute_case(accumulator, case):
# transform the case to a pyspark.sql.functions.when statement,
# then chain it to existing when statements
condition_constant, assigned_value = case
when_args = (operand(switch, condition_constant), _column_or_lit(assigned_value))
return accumulator.when(*when_args)
cases = case or {}
for conflict in set(cases.keys()) & set(additional_cases.keys()):
del additional_cases[conflict]
cases = list(cases.items()) + list(additional_cases.items())
default = _column_or_lit(default)
if not cases:
return default
result = reduce(_execute_case, cases, F).otherwise(default)
return result
def argmax(field, by, condition=None):
"""Select a value from the row that maximizes other column(s)
Args:
field (string, pyspark.sql.Column): the field to return that maximizes the "by" columns
by (*string, *pyspark.sql.Column): field or list of fields to maximize. In reality, this
will usually be only one field. But you may use multiple for tiebreakers
condition (optional): Only consider the entities that pass this condition
Returns:
pyspark.sql.Column
Example:
df = (
df
.groupBy('id')
.agg(argmax('field1', 'by_field'))
)
argmax('field1', ['by_field1', 'by_field2'], condition=F.col('col') == 1)
argmax(F.col('field1'), [F.col('by_field1'), F.col('by_field2')], condition=F.lit(True))
"""
if not isinstance(by, list):
by = [by]
if isinstance(field, string_types):
field = F.col(field)
by.append(field.alias('__tmp_argmax__'))
result = F.struct(*by)
if condition is not None:
result = F.when(condition, result)
result = F.max(result).getField('__tmp_argmax__')
return result
| 385
| 0
| 54
|
1a1d04e177e1156b4c571a0df1d6af5d60cdb792
| 281
|
py
|
Python
|
dev/circuitpython/examples/tc74_simpletest.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | null | null | null |
dev/circuitpython/examples/tc74_simpletest.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | null | null | null |
dev/circuitpython/examples/tc74_simpletest.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import adafruit_tc74
i2c = board.I2C()
tc = adafruit_tc74.TC74(i2c)
while True:
print(f"Temperature: {tc.temperature} C")
time.sleep(0.5)
| 20.071429
| 63
| 0.715302
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import adafruit_tc74
i2c = board.I2C()
tc = adafruit_tc74.TC74(i2c)
while True:
print(f"Temperature: {tc.temperature} C")
time.sleep(0.5)
| 0
| 0
| 0
|
19ce18ffdc412e7bb0a56f2e80ba483d5e2f76c1
| 2,819
|
py
|
Python
|
scripts/choosesubject.py
|
bhrt96/e-Learning-Portal
|
5ca4d7cc355999ddfcb2cfff18d9832d555cc081
|
[
"MIT"
] | null | null | null |
scripts/choosesubject.py
|
bhrt96/e-Learning-Portal
|
5ca4d7cc355999ddfcb2cfff18d9832d555cc081
|
[
"MIT"
] | null | null | null |
scripts/choosesubject.py
|
bhrt96/e-Learning-Portal
|
5ca4d7cc355999ddfcb2cfff18d9832d555cc081
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import MySQLdb
import cgi
import Cookie
import os
if 'HTTP_COOKIE' in os.environ :
cookie = Cookie.SimpleCookie(os.environ["HTTP_COOKIE"])
else :
print "error"
print "Content-type:text/html\r\n\r\n"
db = MySQLdb.connect('localhost','root','1315','quiz', unix_socket="/opt/lampp/var/mysql/mysql.sock")
cursor = db.cursor()
cursor.execute("""select username from session where sessionId= '%s'"""% cookie["session"].value)
username = cursor.fetchone()
if username == None:
print "<script>window.alert('Please login to continue');</script>"
print "<script>window.location.assign('/cgi-bin/scripts/signin.py')</script>"
sql = "SELECT DISTINCT subject FROM teacher ORDER BY subject"
cursor.execute(sql)
var = cursor.fetchall()
print """
<!doctype html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link rel = "stylesheet" href = "/css/materialize.css" />
<script src = "/js/jquery-1.12.2.js"></script>
<script src = "/js/materialize.js"></script>
</head>
<body background="/elearning/books2.jpg" class ="">
<div class="row">
<div class="white col s12" style="margin:auto">
<div class="row">
<div class="col s8">"""
cursor.execute("""select studentId from studentRegister where username ='%s'"""%username)
stId=cursor.fetchone()
cursor.execute("""select name from student where studentId='%d'"""% stId[0])
name= cursor.fetchone()
print""" <h5 class="teal-text">%s</h5>"""% name[0]
print"""
</div>
<div class = " col s2" id = "page-controls">
<p class = "right-align" style="margin-right:-200px">
<a href = "/cgi-bin/scripts/signin.py" class = " tooltipped " data-position="bottom" data-delay="50" data-tooltip="Logout" style="cursor:pointer">
<i class = "material-icons">power_settings_new</i>
</a>
</p>
</div>
</div>
</div>
<div class="card-panel col s4 offset-s1" style="margin-top:10%">
<div class = "row">
<form class = "col s12" action = "/cgi-bin/scripts/questions.py" method = "post">
<h4 class="center-align">Subject</h4>
<div class="input-field col s12">
<select name = "subject" required>
<option value="" disabled selected>Choose your option</option>"""
for i in var:
print """<option value=%s>%s</option>""" %(i[0],i[0])
print"""
</select>
</div>
<div class = "row center">
<button class="btn waves-effect waves-light" type="submit" name="docregister">
Submit
</button>
</div>
</form>
</div>
</div>
</div>
</body>
<script>
$(document).ready(function() {
$('select').material_select();
});
</script>
</html>"""
db.close()
| 28.765306
| 151
| 0.628592
|
#!/usr/bin/python
import MySQLdb
import cgi
import Cookie
import os
if 'HTTP_COOKIE' in os.environ :
cookie = Cookie.SimpleCookie(os.environ["HTTP_COOKIE"])
else :
print "error"
print "Content-type:text/html\r\n\r\n"
db = MySQLdb.connect('localhost','root','1315','quiz', unix_socket="/opt/lampp/var/mysql/mysql.sock")
cursor = db.cursor()
cursor.execute("""select username from session where sessionId= '%s'"""% cookie["session"].value)
username = cursor.fetchone()
if username == None:
print "<script>window.alert('Please login to continue');</script>"
print "<script>window.location.assign('/cgi-bin/scripts/signin.py')</script>"
sql = "SELECT DISTINCT subject FROM teacher ORDER BY subject"
cursor.execute(sql)
var = cursor.fetchall()
print """
<!doctype html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link rel = "stylesheet" href = "/css/materialize.css" />
<script src = "/js/jquery-1.12.2.js"></script>
<script src = "/js/materialize.js"></script>
</head>
<body background="/elearning/books2.jpg" class ="">
<div class="row">
<div class="white col s12" style="margin:auto">
<div class="row">
<div class="col s8">"""
cursor.execute("""select studentId from studentRegister where username ='%s'"""%username)
stId=cursor.fetchone()
cursor.execute("""select name from student where studentId='%d'"""% stId[0])
name= cursor.fetchone()
print""" <h5 class="teal-text">%s</h5>"""% name[0]
print"""
</div>
<div class = " col s2" id = "page-controls">
<p class = "right-align" style="margin-right:-200px">
<a href = "/cgi-bin/scripts/signin.py" class = " tooltipped " data-position="bottom" data-delay="50" data-tooltip="Logout" style="cursor:pointer">
<i class = "material-icons">power_settings_new</i>
</a>
</p>
</div>
</div>
</div>
<div class="card-panel col s4 offset-s1" style="margin-top:10%">
<div class = "row">
<form class = "col s12" action = "/cgi-bin/scripts/questions.py" method = "post">
<h4 class="center-align">Subject</h4>
<div class="input-field col s12">
<select name = "subject" required>
<option value="" disabled selected>Choose your option</option>"""
for i in var:
print """<option value=%s>%s</option>""" %(i[0],i[0])
print"""
</select>
</div>
<div class = "row center">
<button class="btn waves-effect waves-light" type="submit" name="docregister">
Submit
</button>
</div>
</form>
</div>
</div>
</div>
</body>
<script>
$(document).ready(function() {
$('select').material_select();
});
</script>
</html>"""
db.close()
| 0
| 0
| 0
|
b1770432e35eabd76a00e16b0c841f95caab2259
| 97
|
py
|
Python
|
common/__init__.py
|
BravoHaruki/api_auto_test
|
31a61609a7d4d39b361169c5b1aeedc604abb8af
|
[
"MIT"
] | null | null | null |
common/__init__.py
|
BravoHaruki/api_auto_test
|
31a61609a7d4d39b361169c5b1aeedc604abb8af
|
[
"MIT"
] | null | null | null |
common/__init__.py
|
BravoHaruki/api_auto_test
|
31a61609a7d4d39b361169c5b1aeedc604abb8af
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2022/1/13 10:13 下午
# @Author : Haruki
# @File : __init__.py.py
| 19.4
| 28
| 0.56701
|
# -*- coding: utf-8 -*-
# @Time : 2022/1/13 10:13 下午
# @Author : Haruki
# @File : __init__.py.py
| 0
| 0
| 0
|
c3dc24353e71d3809f99fba2bd61617a0d602135
| 2,570
|
py
|
Python
|
hw_diag/tests/diagnostics/test_serial_number_diagnostic.py
|
KevinWassermann94/hm-diag
|
593a53f741b64c9fa029f5643b50abece9ce117b
|
[
"MIT"
] | 14
|
2021-04-15T02:20:44.000Z
|
2022-02-23T19:46:34.000Z
|
hw_diag/tests/diagnostics/test_serial_number_diagnostic.py
|
KevinWassermann94/hm-diag
|
593a53f741b64c9fa029f5643b50abece9ce117b
|
[
"MIT"
] | 245
|
2021-02-05T12:35:59.000Z
|
2022-03-31T14:50:53.000Z
|
hw_diag/tests/diagnostics/test_serial_number_diagnostic.py
|
KevinWassermann94/hm-diag
|
593a53f741b64c9fa029f5643b50abece9ce117b
|
[
"MIT"
] | 16
|
2021-02-15T11:25:33.000Z
|
2022-03-14T23:49:34.000Z
|
import unittest
from unittest.mock import patch, mock_open
from hm_pyhelper.diagnostics.diagnostics_report import \
DIAGNOSTICS_PASSED_KEY, DIAGNOSTICS_ERRORS_KEY, DiagnosticsReport
from hw_diag.diagnostics.serial_number_diagnostic import SerialNumberDiagnostic
VALID_CPU_PROC = """00000000ddd1a4c2"""
PADDED_CPU_PROC = "%s\x00" % VALID_CPU_PROC
| 38.358209
| 79
| 0.695331
|
import unittest
from unittest.mock import patch, mock_open
from hm_pyhelper.diagnostics.diagnostics_report import \
DIAGNOSTICS_PASSED_KEY, DIAGNOSTICS_ERRORS_KEY, DiagnosticsReport
from hw_diag.diagnostics.serial_number_diagnostic import SerialNumberDiagnostic
VALID_CPU_PROC = """00000000ddd1a4c2"""
PADDED_CPU_PROC = "%s\x00" % VALID_CPU_PROC
class TestSerialNumberDiagnostic(unittest.TestCase):
@patch("builtins.open", new_callable=mock_open, read_data=VALID_CPU_PROC)
def test_success(self, mock):
diagnostic = SerialNumberDiagnostic()
diagnostics_report = DiagnosticsReport([diagnostic])
diagnostics_report.perform_diagnostics()
self.assertDictEqual(diagnostics_report, {
DIAGNOSTICS_PASSED_KEY: True,
DIAGNOSTICS_ERRORS_KEY: [],
'serial_number': '00000000ddd1a4c2',
'serial_number': '00000000ddd1a4c2'
})
@patch("builtins.open", new_callable=mock_open, read_data=PADDED_CPU_PROC)
def test_success_strip(self, mock):
diagnostic = SerialNumberDiagnostic()
diagnostics_report = DiagnosticsReport([diagnostic])
diagnostics_report.perform_diagnostics()
self.assertDictEqual(diagnostics_report, {
DIAGNOSTICS_PASSED_KEY: True,
DIAGNOSTICS_ERRORS_KEY: [],
'serial_number': '00000000ddd1a4c2',
'serial_number': '00000000ddd1a4c2'
})
@patch("builtins.open", new_callable=mock_open)
def test_filenotfound(self, mock):
mock.side_effect = FileNotFoundError("File not found")
diagnostic = SerialNumberDiagnostic()
diagnostics_report = DiagnosticsReport([diagnostic])
diagnostics_report.perform_diagnostics()
self.assertDictEqual(diagnostics_report, {
DIAGNOSTICS_PASSED_KEY: False,
DIAGNOSTICS_ERRORS_KEY: ['serial_number'],
'serial_number': 'File not found',
'serial_number': 'File not found'
})
@patch("builtins.open", new_callable=mock_open)
def test_permissionerror(self, mock):
mock.side_effect = PermissionError("Bad permissions")
diagnostic = SerialNumberDiagnostic()
diagnostics_report = DiagnosticsReport([diagnostic])
diagnostics_report.perform_diagnostics()
self.assertDictEqual(diagnostics_report, {
DIAGNOSTICS_PASSED_KEY: False,
DIAGNOSTICS_ERRORS_KEY: ['serial_number'],
'serial_number': 'Bad permissions',
'serial_number': 'Bad permissions'
})
| 1,794
| 400
| 23
|
54fc72b7e05eee4bcb468a02e6bab0f8218a0fa6
| 2,136
|
py
|
Python
|
src/main/script/simple.py
|
stefano-bragaglia/PyFoil
|
eb558659f2d0e1298ebd7b854c3ecdeb1a007bda
|
[
"BSD-2-Clause"
] | null | null | null |
src/main/script/simple.py
|
stefano-bragaglia/PyFoil
|
eb558659f2d0e1298ebd7b854c3ecdeb1a007bda
|
[
"BSD-2-Clause"
] | null | null | null |
src/main/script/simple.py
|
stefano-bragaglia/PyFoil
|
eb558659f2d0e1298ebd7b854c3ecdeb1a007bda
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
from foil.learning import foil
from foil.learning import get_closure
from foil.learning import get_constants
from foil.learning import get_masks
from foil.models import Clause
from foil.models import Example
from foil.models import Literal
from foil.models import Program
if __name__ == '__main__':
target = Literal.parse('path(X,Y)')
examples = [
Example({'X': 0, 'Y': 1}), Example({'X': 0, 'Y': 2}), Example({'X': 0, 'Y': 3}),
Example({'X': 0, 'Y': 4}), Example({'X': 0, 'Y': 5}), Example({'X': 0, 'Y': 6}),
Example({'X': 0, 'Y': 8}), Example({'X': 1, 'Y': 2}), Example({'X': 3, 'Y': 2}),
Example({'X': 3, 'Y': 4}), Example({'X': 3, 'Y': 5}), Example({'X': 3, 'Y': 6}),
Example({'X': 3, 'Y': 8}), Example({'X': 4, 'Y': 5}), Example({'X': 4, 'Y': 6}),
Example({'X': 4, 'Y': 8}), Example({'X': 6, 'Y': 8}), Example({'X': 7, 'Y': 6}),
Example({'X': 7, 'Y': 8}),
]
background = [
Clause.parse('edge(0,1).'), Clause.parse('edge(0,3).'), Clause.parse('edge(1,2).'),
Clause.parse('edge(3,2).'), Clause.parse('edge(3,4).'), Clause.parse('edge(4,5).'),
Clause.parse('edge(4,6).'), Clause.parse('edge(6,8).'), Clause.parse('edge(7,6).'),
Clause.parse('edge(7,8).'),
]
for i in range(10):
with Measure():
print()
constants = get_constants([target, *{l for c in background for l in c.literals}])
print()
world = Program(background).ground()
print()
positives, negatives = get_closure(target, constants, world, examples)
print()
masks = get_masks([target, *{l for c in background for l in c.literals}])
print()
for clause in foil(target, background, masks, constants, positives, negatives):
print(clause)
| 38.836364
| 93
| 0.546348
|
import datetime
from foil.learning import foil
from foil.learning import get_closure
from foil.learning import get_constants
from foil.learning import get_masks
from foil.models import Clause
from foil.models import Example
from foil.models import Literal
from foil.models import Program
class Measure:
def __init__(self):
self.elapsed = None
def __enter__(self):
self.elapsed = datetime.datetime.now()
def __exit__(self, ty, val, tb):
print('\nElapsed time: %s sec.\n' % (datetime.datetime.now() - self.elapsed))
if __name__ == '__main__':
target = Literal.parse('path(X,Y)')
examples = [
Example({'X': 0, 'Y': 1}), Example({'X': 0, 'Y': 2}), Example({'X': 0, 'Y': 3}),
Example({'X': 0, 'Y': 4}), Example({'X': 0, 'Y': 5}), Example({'X': 0, 'Y': 6}),
Example({'X': 0, 'Y': 8}), Example({'X': 1, 'Y': 2}), Example({'X': 3, 'Y': 2}),
Example({'X': 3, 'Y': 4}), Example({'X': 3, 'Y': 5}), Example({'X': 3, 'Y': 6}),
Example({'X': 3, 'Y': 8}), Example({'X': 4, 'Y': 5}), Example({'X': 4, 'Y': 6}),
Example({'X': 4, 'Y': 8}), Example({'X': 6, 'Y': 8}), Example({'X': 7, 'Y': 6}),
Example({'X': 7, 'Y': 8}),
]
background = [
Clause.parse('edge(0,1).'), Clause.parse('edge(0,3).'), Clause.parse('edge(1,2).'),
Clause.parse('edge(3,2).'), Clause.parse('edge(3,4).'), Clause.parse('edge(4,5).'),
Clause.parse('edge(4,6).'), Clause.parse('edge(6,8).'), Clause.parse('edge(7,6).'),
Clause.parse('edge(7,8).'),
]
for i in range(10):
with Measure():
print()
constants = get_constants([target, *{l for c in background for l in c.literals}])
print()
world = Program(background).ground()
print()
positives, negatives = get_closure(target, constants, world, examples)
print()
masks = get_masks([target, *{l for c in background for l in c.literals}])
print()
for clause in foil(target, background, masks, constants, positives, negatives):
print(clause)
| 169
| -7
| 103
|
5ac31a8b6e6c6c659d738d0a0694fa092ebb8ca6
| 118
|
py
|
Python
|
tests/test_alternate.py
|
gypsyx/pytools
|
cf2b450881ea6a2c147be3a833f411b199a67e63
|
[
"MIT"
] | 1
|
2020-01-22T07:58:23.000Z
|
2020-01-22T07:58:23.000Z
|
tests/test_alternate.py
|
gypsyx/pytools
|
cf2b450881ea6a2c147be3a833f411b199a67e63
|
[
"MIT"
] | null | null | null |
tests/test_alternate.py
|
gypsyx/pytools
|
cf2b450881ea6a2c147be3a833f411b199a67e63
|
[
"MIT"
] | null | null | null |
from pytools.alternate import is_alternate
| 19.666667
| 42
| 0.737288
|
from pytools.alternate import is_alternate
def test_alternate():
s = 'ababab'
assert is_alternate(s) is True
| 52
| 0
| 23
|
91517275978a93eac0e935ebcbcd444074086821
| 1,018
|
py
|
Python
|
summarize/nnsum/nnsum/module/attention/bilinear_softmax_attention.py
|
AIPHES/live-blog-summarization
|
a5f899ea07a098e1e0b3ab92cd3d430776e6412a
|
[
"Apache-2.0"
] | 2
|
2019-01-17T17:43:09.000Z
|
2019-01-17T17:50:38.000Z
|
summarize/nnsum/nnsum/module/attention/bilinear_softmax_attention.py
|
AIPHES/live-blog-summarization
|
a5f899ea07a098e1e0b3ab92cd3d430776e6412a
|
[
"Apache-2.0"
] | null | null | null |
summarize/nnsum/nnsum/module/attention/bilinear_softmax_attention.py
|
AIPHES/live-blog-summarization
|
a5f899ea07a098e1e0b3ab92cd3d430776e6412a
|
[
"Apache-2.0"
] | 1
|
2021-09-19T08:29:08.000Z
|
2021-09-19T08:29:08.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
| 31.8125
| 76
| 0.609037
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiLinearSoftmaxAttention(nn.Module):
def __init__(self):
super(BiLinearSoftmaxAttention, self).__init__()
def forward(self, context, query, length):
raw_scores = torch.bmm(query, context.permute(0, 2, 1))
for b, l in enumerate(length.data.tolist()):
if l < raw_scores.size(2):
raw_scores.data[b,:,l:].fill_(float("-inf"))
#bs = length.size(0)
#diag_mask = torch.diag(length.data.new(length.data.max()).fill_(1))
#mask = diag_mask.unsqueeze(0).byte().repeat(bs, 1, 1)
#raw_scores.data.masked_fill_(mask, float("-inf"))
scores = F.softmax(raw_scores, 2)
for b, l in enumerate(length.data.tolist()):
if l < raw_scores.size(1):
scores.data[b,l:].fill_(0)
attended_context = torch.bmm(scores, context)
output = torch.cat([query, attended_context], 2)
return output, scores
| 853
| 21
| 76
|
d29025376156cdce509093d3eab61747ffd84fb5
| 1,517
|
py
|
Python
|
vendor/bundle/ruby/2.6.0/gems/nokogiri-1.10.4/ext/nokogiri/tmp/x86_64-apple-darwin19/ports/libxml2/2.9.9/libxml2-2.9.9/python/tests/compareNodes.py
|
Burton-David/Jalpc
|
5f22fbc245b0bb18220d3a61c3a5d20557e64288
|
[
"MIT"
] | null | null | null |
vendor/bundle/ruby/2.6.0/gems/nokogiri-1.10.4/ext/nokogiri/tmp/x86_64-apple-darwin19/ports/libxml2/2.9.9/libxml2-2.9.9/python/tests/compareNodes.py
|
Burton-David/Jalpc
|
5f22fbc245b0bb18220d3a61c3a5d20557e64288
|
[
"MIT"
] | null | null | null |
vendor/bundle/ruby/2.6.0/gems/nokogiri-1.10.4/ext/nokogiri/tmp/x86_64-apple-darwin19/ports/libxml2/2.9.9/libxml2-2.9.9/python/tests/compareNodes.py
|
Burton-David/Jalpc
|
5f22fbc245b0bb18220d3a61c3a5d20557e64288
|
[
"MIT"
] | 1
|
2019-03-27T16:24:43.000Z
|
2019-03-27T16:24:43.000Z
|
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
#
# Testing XML Node comparison and Node hash-value
#
doc = libxml2.parseDoc("""<root><foo/></root>""")
root = doc.getRootElement()
# Create two different objects which point to foo
foonode1 = root.children
foonode2 = root.children
# Now check that [in]equality tests work ok
if not ( foonode1 == foonode2 ):
print("Error comparing nodes with ==, nodes should be equal but are unequal")
sys.exit(1)
if not ( foonode1 != root ):
print("Error comparing nodes with ==, nodes should not be equal but are equal")
sys.exit(1)
if not ( foonode1 != root ):
print("Error comparing nodes with !=, nodes should not be equal but are equal")
if ( foonode1 != foonode2 ):
print("Error comparing nodes with !=, nodes should be equal but are unequal")
# Next check that the hash function for the objects also works ok
if not (hash(foonode1) == hash(foonode2)):
print("Error hash values for two equal nodes are different")
sys.exit(1)
if not (hash(foonode1) != hash(root)):
print("Error hash values for two unequal nodes are not different")
sys.exit(1)
if hash(foonode1) == hash(root):
print("Error hash values for two unequal nodes are equal")
sys.exit(1)
# Basic tests successful
doc.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| 29.173077
| 83
| 0.699407
|
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
#
# Testing XML Node comparison and Node hash-value
#
doc = libxml2.parseDoc("""<root><foo/></root>""")
root = doc.getRootElement()
# Create two different objects which point to foo
foonode1 = root.children
foonode2 = root.children
# Now check that [in]equality tests work ok
if not ( foonode1 == foonode2 ):
print("Error comparing nodes with ==, nodes should be equal but are unequal")
sys.exit(1)
if not ( foonode1 != root ):
print("Error comparing nodes with ==, nodes should not be equal but are equal")
sys.exit(1)
if not ( foonode1 != root ):
print("Error comparing nodes with !=, nodes should not be equal but are equal")
if ( foonode1 != foonode2 ):
print("Error comparing nodes with !=, nodes should be equal but are unequal")
# Next check that the hash function for the objects also works ok
if not (hash(foonode1) == hash(foonode2)):
print("Error hash values for two equal nodes are different")
sys.exit(1)
if not (hash(foonode1) != hash(root)):
print("Error hash values for two unequal nodes are not different")
sys.exit(1)
if hash(foonode1) == hash(root):
print("Error hash values for two unequal nodes are equal")
sys.exit(1)
# Basic tests successful
doc.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| 0
| 0
| 0
|
03826264e1311ce093b25205b8efba2aafaadf06
| 271
|
py
|
Python
|
spectroscope/model/notification.py
|
neukind/ethmonitor
|
42828a9b2811d97b3c134244b905353a7bf42660
|
[
"Apache-2.0"
] | 6
|
2021-01-29T02:28:44.000Z
|
2021-02-17T14:08:14.000Z
|
spectroscope/model/notification.py
|
neukind/ethmonitor
|
42828a9b2811d97b3c134244b905353a7bf42660
|
[
"Apache-2.0"
] | 3
|
2021-07-26T10:03:12.000Z
|
2021-07-26T11:48:47.000Z
|
spectroscope/model/notification.py
|
neukind/ethmonitor
|
42828a9b2811d97b3c134244b905353a7bf42660
|
[
"Apache-2.0"
] | 3
|
2021-04-19T09:34:11.000Z
|
2021-04-22T06:23:39.000Z
|
from pydantic import BaseModel
from spectroscope.model import Action
| 19.357143
| 51
| 0.763838
|
from pydantic import BaseModel
from spectroscope.model import Action
class Notification(BaseModel):
pass
class Notify(Action):
notification: Notification
def __init__(self, notification: Notification):
super().__init__(notification=notification)
| 78
| 76
| 46
|
c46e670199762ddc0146f1fc9b8fc422a5cc8f38
| 780
|
py
|
Python
|
Random Problems/Encryption/solution.py
|
matheuscordeiro/HackerRank
|
003ab87fdfa9e7c0535972abd06caebb1165423f
|
[
"MIT"
] | null | null | null |
Random Problems/Encryption/solution.py
|
matheuscordeiro/HackerRank
|
003ab87fdfa9e7c0535972abd06caebb1165423f
|
[
"MIT"
] | null | null | null |
Random Problems/Encryption/solution.py
|
matheuscordeiro/HackerRank
|
003ab87fdfa9e7c0535972abd06caebb1165423f
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
import math
# Complete the encryption function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = encryption(s)
fptr.write(result + '\n')
fptr.close()
| 20
| 67
| 0.620513
|
#!/usr/local/bin/python3
import math
# Complete the encryption function below.
def encryption(s):
s_no_spaces = ''
size_s = 0
# Instead the code below, we can use len(s) and s.replace(' ', '')
for value in s:
if s:
s_no_spaces = f'{s_no_spaces}{s}'
size_s += 1
sqrt = math.sqrt(size_s)
rows = int(sqrt)
columns = math.ceil(sqrt)
if rows * columns < size_s:
rows = columns
result = ''
for j in range(0, columns):
jump = j
if result:
result = f'{result} '
for i in range(0, rows):
if jump < size_s:
result = f'{result}{s_no_spaces[jump]}'
jump += columns
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = encryption(s)
fptr.write(result + '\n')
fptr.close()
| 511
| 0
| 22
|
308a75447dc6e71a90797a9724a052f9c802dddf
| 2,477
|
py
|
Python
|
server/coronachat/storage/tests/test_api.py
|
dballesteros7/coronachat
|
9dc94976a763fdcafb4c83171b1dbdeb39b7b103
|
[
"MIT"
] | 2
|
2020-03-30T21:58:21.000Z
|
2022-01-09T16:30:52.000Z
|
server/coronachat/storage/tests/test_api.py
|
dballesteros7/coronachat
|
9dc94976a763fdcafb4c83171b1dbdeb39b7b103
|
[
"MIT"
] | 41
|
2020-03-27T23:44:36.000Z
|
2022-02-27T02:00:48.000Z
|
server/coronachat/storage/tests/test_api.py
|
dballesteros7/coronachat
|
9dc94976a763fdcafb4c83171b1dbdeb39b7b103
|
[
"MIT"
] | null | null | null |
import unittest
from coronachat.storage.api import GENERIC_ERROR_MSG, NO_OPTION_FOUND_MSG, MessageReader
from coronachat.storage.schema import TopLevelMessage, TopLevelOption
_TEST_HEADER_CONTENT = """\
Welcome to your local government bot
Get information and guidance from use regarding the current
outbreak of coronavirus disease (COVID-19).
Reply with a number at any time to get the latest information
on any topic."""
_EXPECTED_TEST_DATA_TOP_LEVEL_MESSAGE = """\
%s
1. *Latest Update on Coronavirus in Switzerland*
2. What is Coronavirus and what are its symptoms
3. How does Coronavirus spread?
""" % _TEST_HEADER_CONTENT
| 29.843373
| 88
| 0.683084
|
import unittest
from coronachat.storage.api import GENERIC_ERROR_MSG, NO_OPTION_FOUND_MSG, MessageReader
from coronachat.storage.schema import TopLevelMessage, TopLevelOption
_TEST_HEADER_CONTENT = """\
Welcome to your local government bot
Get information and guidance from use regarding the current
outbreak of coronavirus disease (COVID-19).
Reply with a number at any time to get the latest information
on any topic."""
_EXPECTED_TEST_DATA_TOP_LEVEL_MESSAGE = """\
%s
1. *Latest Update on Coronavirus in Switzerland*
2. What is Coronavirus and what are its symptoms
3. How does Coronavirus spread?
""" % _TEST_HEADER_CONTENT
def populate_with_test_data(session):
session.add(
TopLevelMessage(
header_content=_TEST_HEADER_CONTENT,
top_level_options=[
TopLevelOption(
title='What is Coronavirus and what are its symptoms',
content='A bad thing',
position=1,
),
TopLevelOption(
title='*Latest Update on Coronavirus in Switzerland*',
content='Things are going up, not the good things',
position=0,
),
TopLevelOption(
title='How does Coronavirus spread?',
content='People',
position=2,
)
]
)
)
session.commit()
def test_get_formatted_top_level_message(session):
populate_with_test_data(session)
reader = MessageReader()
top_level_message = reader.get_formatted_top_level_message()
assert top_level_message == _EXPECTED_TEST_DATA_TOP_LEVEL_MESSAGE
def test_get_formatted_top_level_message_on_empty_table(session):
reader = MessageReader()
top_level_message = reader.get_formatted_top_level_message()
assert top_level_message == GENERIC_ERROR_MSG
def test_get_option_message(session):
populate_with_test_data(session)
reader = MessageReader()
option_message_1 = reader.get_option_message(1)
option_message_3 = reader.get_option_message(3)
assert option_message_1 == 'Things are going up, not the good things'
assert option_message_3 == 'People'
def test_get_option_message_inexistent(session):
populate_with_test_data(session)
reader = MessageReader()
option_message = reader.get_option_message(100)
assert option_message == NO_OPTION_FOUND_MSG % 100
| 1,724
| 0
| 115
|
3bbbff1f66959ae72a9f42b523681a3b6732edd6
| 544
|
py
|
Python
|
FastApi/sima/src/utils/passwd.py
|
parker-pu/SiMa
|
58353d79c99c0ac05d27960b57705ac3b94f528d
|
[
"MIT"
] | 2
|
2020-12-11T05:46:36.000Z
|
2020-12-15T07:47:59.000Z
|
FastApi/sima/src/utils/passwd.py
|
parker-pu/SiMa
|
58353d79c99c0ac05d27960b57705ac3b94f528d
|
[
"MIT"
] | null | null | null |
FastApi/sima/src/utils/passwd.py
|
parker-pu/SiMa
|
58353d79c99c0ac05d27960b57705ac3b94f528d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from passlib.context import CryptContext
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password, hashed_password):
""" Is the hash of the two values equal
:param plain_password: plaintext
:param hashed_password: hash password
:return:
"""
return pwd_context.verify(plain_password, hashed_password)
def gen_password_hash(data):
""" Generate password hash
:param data: plaintext data
:return:
"""
return pwd_context.hash(data)
| 24.727273
| 65
| 0.709559
|
# -*- coding: utf-8 -*-
from passlib.context import CryptContext
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password, hashed_password):
""" Is the hash of the two values equal
:param plain_password: plaintext
:param hashed_password: hash password
:return:
"""
return pwd_context.verify(plain_password, hashed_password)
def gen_password_hash(data):
""" Generate password hash
:param data: plaintext data
:return:
"""
return pwd_context.hash(data)
| 0
| 0
| 0
|
2fe627536463a1be182b8336c14c473483131472
| 4,925
|
py
|
Python
|
tests/provider/dwd/radar/test_api_current.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 155
|
2020-07-03T05:09:22.000Z
|
2022-03-28T06:57:39.000Z
|
tests/provider/dwd/radar/test_api_current.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 453
|
2020-07-02T21:21:52.000Z
|
2022-03-31T21:35:36.000Z
|
tests/provider/dwd/radar/test_api_current.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 21
|
2020-09-07T12:13:27.000Z
|
2022-03-26T16:26:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import h5py
import pytest
from wetterdienst.provider.dwd.radar import (
DwdRadarDataFormat,
DwdRadarDataSubset,
DwdRadarDate,
DwdRadarParameter,
DwdRadarPeriod,
DwdRadarResolution,
DwdRadarValues,
)
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
@pytest.mark.remote
def test_radar_request_site_current_sweep_pcp_v_hdf5():
"""
Example for testing radar sites full current SWEEP_PCP,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# wddump ras07-stqual-pcpng01_sweeph5onem_vradh_00-2020093000403400-boo-10132-hd5 # noqa:E501,B950
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Pcp-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 1
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
shape = hdf["/dataset1/data1/data"].shape
assert shape == (360, 600) or shape == (361, 600)
@pytest.mark.remote
def test_radar_request_site_current_sweep_vol_v_hdf5_full():
"""
Example for testing radar sites full current SWEEP_VOL,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# wddump ras07-stqual-vol5minng01_sweeph5onem_vradh_00-2020092917055800-boo-10132-hd5 # noqa:E501,B950
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Vol-5Min-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
shape = hdf["/dataset1/data1/data"].shape
assert shape == (360, 180) or shape == (360, 720) or shape == (361, 720)
@pytest.mark.remote
def test_radar_request_site_current_sweep_vol_v_hdf5_single():
"""
Example for testing radar sites single current SWEEP_VOL,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
elevation=1,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) <= 1
assert "vradh_01" in results[0].url
buffer = results[0].data
hdf = h5py.File(buffer, "r")
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 2
@pytest.mark.remote
@pytest.mark.parametrize(
"resolution",
[
DwdRadarResolution.DAILY,
DwdRadarResolution.HOURLY,
],
)
def test_radar_request_radolan_cdc_current(resolution):
"""
Verify data acquisition for current RADOLAN_CDC/daily+hourly.
Remark: More often than not, this data is not
available when looking at CURRENT.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
start_date=DwdRadarDate.CURRENT,
resolution=resolution,
period=DwdRadarPeriod.RECENT,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 1
@pytest.mark.remote
def test_radar_request_radolan_cdc_current_5min():
"""
Verify failure for RADOLAN_CDC/5 minutes.
"""
with pytest.raises(ValueError):
DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.MINUTE_5,
start_date=DwdRadarDate.CURRENT,
)
| 27.361111
| 107
| 0.674721
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import h5py
import pytest
from wetterdienst.provider.dwd.radar import (
DwdRadarDataFormat,
DwdRadarDataSubset,
DwdRadarDate,
DwdRadarParameter,
DwdRadarPeriod,
DwdRadarResolution,
DwdRadarValues,
)
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
@pytest.mark.remote
def test_radar_request_site_current_sweep_pcp_v_hdf5():
"""
Example for testing radar sites full current SWEEP_PCP,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# wddump ras07-stqual-pcpng01_sweeph5onem_vradh_00-2020093000403400-boo-10132-hd5 # noqa:E501,B950
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Pcp-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 1
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
shape = hdf["/dataset1/data1/data"].shape
assert shape == (360, 600) or shape == (361, 600)
@pytest.mark.remote
def test_radar_request_site_current_sweep_vol_v_hdf5_full():
"""
Example for testing radar sites full current SWEEP_VOL,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# wddump ras07-stqual-vol5minng01_sweeph5onem_vradh_00-2020092917055800-boo-10132-hd5 # noqa:E501,B950
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Vol-5Min-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
shape = hdf["/dataset1/data1/data"].shape
assert shape == (360, 180) or shape == (360, 720) or shape == (361, 720)
@pytest.mark.remote
def test_radar_request_site_current_sweep_vol_v_hdf5_single():
"""
Example for testing radar sites single current SWEEP_VOL,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
elevation=1,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) <= 1
assert "vradh_01" in results[0].url
buffer = results[0].data
hdf = h5py.File(buffer, "r")
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 2
@pytest.mark.remote
@pytest.mark.parametrize(
"resolution",
[
DwdRadarResolution.DAILY,
DwdRadarResolution.HOURLY,
],
)
def test_radar_request_radolan_cdc_current(resolution):
"""
Verify data acquisition for current RADOLAN_CDC/daily+hourly.
Remark: More often than not, this data is not
available when looking at CURRENT.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
start_date=DwdRadarDate.CURRENT,
resolution=resolution,
period=DwdRadarPeriod.RECENT,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 1
@pytest.mark.remote
def test_radar_request_radolan_cdc_current_5min():
"""
Verify failure for RADOLAN_CDC/5 minutes.
"""
with pytest.raises(ValueError):
DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.MINUTE_5,
start_date=DwdRadarDate.CURRENT,
)
| 0
| 0
| 0
|
4376084a1d2e1f6edcd3bfc78edf432ca44e4aed
| 1,116
|
py
|
Python
|
validate.py
|
abrox/ruuvi
|
28e5f7bcce52f4a9541f0b15e11215e7b7526d59
|
[
"MIT"
] | null | null | null |
validate.py
|
abrox/ruuvi
|
28e5f7bcce52f4a9541f0b15e11215e7b7526d59
|
[
"MIT"
] | null | null | null |
validate.py
|
abrox/ruuvi
|
28e5f7bcce52f4a9541f0b15e11215e7b7526d59
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import jsonschema
from jsonschema import validate
from os.path import join, dirname, abspath
import jsonref
def _load_json_schema(filename):
""" Loads the given schema file """
# relative_path = join('', filename)
relative_path = filename
absolute_path = join(dirname(__file__), relative_path)
absolute_path = abspath(relative_path)
base_path = dirname(absolute_path)
base_uri = 'file://{}/'.format(base_path)
with open(absolute_path) as schema_file:
return jsonref.loads(schema_file.read(),
base_uri=base_uri,
jsonschema=True)
def validate_json(json_data, schema):
"""REF: https://json-schema.org/ """
# Describe what kind of json you expect.
execute_api_schema = _load_json_schema(schema)
try:
validate(instance=json_data, schema=execute_api_schema)
except jsonschema.exceptions.ValidationError as err:
print(err)
err = "Given JSON data is InValid"
return False, err
message = "Given JSON data is Valid"
return True, message
| 29.368421
| 63
| 0.670251
|
#!/usr/bin/env python3
import jsonschema
from jsonschema import validate
from os.path import join, dirname, abspath
import jsonref
def _load_json_schema(filename):
""" Loads the given schema file """
# relative_path = join('', filename)
relative_path = filename
absolute_path = join(dirname(__file__), relative_path)
absolute_path = abspath(relative_path)
base_path = dirname(absolute_path)
base_uri = 'file://{}/'.format(base_path)
with open(absolute_path) as schema_file:
return jsonref.loads(schema_file.read(),
base_uri=base_uri,
jsonschema=True)
def validate_json(json_data, schema):
"""REF: https://json-schema.org/ """
# Describe what kind of json you expect.
execute_api_schema = _load_json_schema(schema)
try:
validate(instance=json_data, schema=execute_api_schema)
except jsonschema.exceptions.ValidationError as err:
print(err)
err = "Given JSON data is InValid"
return False, err
message = "Given JSON data is Valid"
return True, message
| 0
| 0
| 0
|
c69659e8844f3dec21dcc232e1d0f3f8bba2efc1
| 263
|
py
|
Python
|
nmigen_boards/ice40_up5k_b_evn.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 11
|
2021-12-10T12:23:29.000Z
|
2022-03-13T08:40:20.000Z
|
nmigen_boards/ice40_up5k_b_evn.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 12
|
2021-12-11T18:51:29.000Z
|
2022-03-12T05:08:52.000Z
|
nmigen_boards/ice40_up5k_b_evn.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 7
|
2021-12-12T07:20:21.000Z
|
2022-03-06T06:20:55.000Z
|
from amaranth_boards.ice40_up5k_b_evn import *
from amaranth_boards.ice40_up5k_b_evn import __all__
import warnings
warnings.warn("instead of nmigen_boards.ice40_up5k_b_evn, use amaranth_boards.ice40_up5k_b_evn",
DeprecationWarning, stacklevel=2)
| 32.875
| 96
| 0.821293
|
from amaranth_boards.ice40_up5k_b_evn import *
from amaranth_boards.ice40_up5k_b_evn import __all__
import warnings
warnings.warn("instead of nmigen_boards.ice40_up5k_b_evn, use amaranth_boards.ice40_up5k_b_evn",
DeprecationWarning, stacklevel=2)
| 0
| 0
| 0
|
2f8f2b68775ed3bcea2c82fb905ab1dd726fa9c7
| 24,979
|
py
|
Python
|
python/libs/virtual_classes.py
|
neilberard/AutoRig
|
cdd8717483fae418a9dab2b530573f4b804632fe
|
[
"MIT"
] | null | null | null |
python/libs/virtual_classes.py
|
neilberard/AutoRig
|
cdd8717483fae418a9dab2b530573f4b804632fe
|
[
"MIT"
] | null | null | null |
python/libs/virtual_classes.py
|
neilberard/AutoRig
|
cdd8717483fae418a9dab2b530573f4b804632fe
|
[
"MIT"
] | null | null | null |
import pymel.all as pymel
import maya.cmds as cmds
from python.libs import naming_utils
from python.libs import joint_utils
from python.libs import shapes
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def attach_class(node, net):
"""
Adds a string attribute to a PyNode set to the virtual class identifier. Example node._class = '_TransformNode'
:param node: PyNode to add attribute to.
:param net: Network to associate the PyNode with IE: 'L_Leg_Net'
:return: PyNode as a virtual class
"""
if node.hasAttr('_class'):
node.deleteAttr('_class')
if node.hasAttr('Network'):
node.deleteAttr('Network')
#Ensuring that node is a vanilla pynode
node = pymel.PyNode(node)
node.addAttr('Network', dataType='string')
node.Network.set(net.name())
if isinstance(node, pymel.nodetypes.Joint):
node.addAttr('_class', dataType='string')
node._class.set('_JointNode')
return pymel.PyNode(node)
if isinstance(node, pymel.nodetypes.Transform):
node.addAttr('_class', dataType='string')
node._class.set('_TransformNode')
new_node = pymel.PyNode(node)
assert isinstance(new_node, TransformNode)
return new_node
if isinstance(node, pymel.nodetypes.Network):
node.addAttr('_class', dataType='string')
node._class.set('_LimbNode')
return pymel.PyNode(node)
log.warning('Could not find class for: '.format(node))
class BaseNode():
"""
Subclass must also inherit leaf class with pymel.nodetype.dagnode as it's hierarchy. IE: 'pymel.nodetypes.Joint'
This class contains some basic properties that are used for accessing other nodes
"""
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
def getCtrlRig(self):
"""Return all control rig nodes, ignore skinning joints"""
nodes = []
for obj in cmds.ls(type='transform'):
if cmds.attributeQuery('Network', node=obj, exists=True) and\
cmds.getAttr('{}.Network'.format(obj)) == self.network.name() and obj not in self.jnts:
nodes.append(pymel.PyNode(obj))
return nodes
def getMirroredCtrl(self):
"""
Find the Limb network connection to Main Net and traverse the mirrored network
which is connected to another index, either 0 or 1.
For example: L_Elbow_FK_CTRL >>> L_ARM_Net.FK_CTLS[2] >>> MAIN_NET.ARMS[ idx ]
Traverse: MAIN_NET.ARMS[ !idx ] >>> R_ARM_Net.FK_CTLS[2] >>> R_Elbow_FK_CTRL
:return: Mirrored CTRL or None if failed to find ctrl.
"""
net_attr = self.networkAttr # Storing the network attr'
limb = self.mainAttr # Storing the main limb attr
if not limb: # If the network is not a limb, like 'Main', There is mo mirrored ctrl.
return None
for idx, element in enumerate(limb.array().elements()):
if idx != limb.index(): # Traverse through the other idx connection limb network
mirror_net = limb.array().elementByLogicalIndex(idx).connections()[0]
mirror_array = mirror_net.getAttr(net_attr.array().attrName())
return mirror_array[net_attr.index()]
# DAG CLASSES
class JointNode(pymel.nodetypes.Joint, BaseNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
NODE_TYPE = 'JointNode'
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_JointNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
pymel.addAttr(newNode, longName='_class', dataType='string')
newNode._class.set('_JointNode')
class TransformNode(BaseNode, pymel.nodetypes.Transform):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_TransformNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_TransformNode')
# NETWORK CLASSES
class LimbNode(pymel.nt.Network, BaseNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_LimbNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dt='string')
newNode._class.set('_LimbNode')
newNode.addAttr('JOINTS', attributeType='message', multi=True)
newNode.addAttr('IK_JOINTS', attributeType='message', multi=True)
newNode.addAttr('FK_JOINTS', attributeType='message', multi=True)
newNode.addAttr('IK_CTRLS', attributeType='message', multi=True)
newNode.addAttr('FK_CTRLS', attributeType='message', multi=True)
newNode.addAttr('CTRLS', attributeType='message', multi=True)
newNode.addAttr('POLE', attributeType='message', multi=True)
newNode.addAttr('SWITCH', attributeType='message', multi=True)
newNode.addAttr('ORIENTCONSTRAINT', attributeType='message', multi=True)
newNode.addAttr('POINTCONSTRAINT', attributeType='message', multi=True)
newNode.addAttr('IK_HANDLE', attributeType='message', multi=True)
newNode.addAttr('IK_SNAP_LOC', attributeType='message', multi=True)
@property
class SplineIKNet(LimbNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_SplineIKNet':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_SplineIKNet')
newNode.addAttr('JOINTS', attributeType='message', multi=True)
newNode.addAttr('IK_HANDLE', attributeType='message', multi=True)
newNode.addAttr('IK_CTRLS', attributeType='message', multi=True)
newNode.addAttr('CLUSTER_HANDLE', attributeType='message', multi=True)
newNode.addAttr('COG', attributeType='message', multi=True)
@property
@property
@property
@property
class MainNode(LimbNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_MainNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_MainNode')
newNode.addAttr('MAIN_CTRL', attributeType='message', multi=True)
newNode.addAttr('ARMS', attributeType='message', multi=True)
newNode.addAttr('CLAVICLES', attributeType='message', multi=True)
newNode.addAttr('LEGS', attributeType='message', multi=True)
newNode.addAttr('SPINE', attributeType='message', multi=True)
newNode.addAttr('HEAD', attributeType='message', multi=True)
newNode.addAttr('HANDS', attributeType='message', multi=True)
newNode.addAttr('ROOT', attributeType='message', multi=True)
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
# Classes need to be registered to exist in the scene.
pymel.factories.registerVirtualClass(JointNode, nameRequired=False)
pymel.factories.registerVirtualClass(CtrlNode, nameRequired=False)
pymel.factories.registerVirtualClass(TransformNode, nameRequired=False)
pymel.factories.registerVirtualClass(SplineIKNet, nameRequired=False)
pymel.factories.registerVirtualClass(MainNode, nameRequired=False)
pymel.factories.registerVirtualClass(LimbNode, nameRequired=False)
pymel.factories.registerVirtualClass(ClavicleNode, nameRequired=False)
| 32.910408
| 154
| 0.576724
|
import pymel.all as pymel
import maya.cmds as cmds
from python.libs import naming_utils
from python.libs import joint_utils
from python.libs import shapes
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def attach_class(node, net):
"""
Adds a string attribute to a PyNode set to the virtual class identifier. Example node._class = '_TransformNode'
:param node: PyNode to add attribute to.
:param net: Network to associate the PyNode with IE: 'L_Leg_Net'
:return: PyNode as a virtual class
"""
if node.hasAttr('_class'):
node.deleteAttr('_class')
if node.hasAttr('Network'):
node.deleteAttr('Network')
#Ensuring that node is a vanilla pynode
node = pymel.PyNode(node)
node.addAttr('Network', dataType='string')
node.Network.set(net.name())
if isinstance(node, pymel.nodetypes.Joint):
node.addAttr('_class', dataType='string')
node._class.set('_JointNode')
return pymel.PyNode(node)
if isinstance(node, pymel.nodetypes.Transform):
node.addAttr('_class', dataType='string')
node._class.set('_TransformNode')
new_node = pymel.PyNode(node)
assert isinstance(new_node, TransformNode)
return new_node
if isinstance(node, pymel.nodetypes.Network):
node.addAttr('_class', dataType='string')
node._class.set('_LimbNode')
return pymel.PyNode(node)
log.warning('Could not find class for: '.format(node))
class BaseNode():
"""
Subclass must also inherit leaf class with pymel.nodetype.dagnode as it's hierarchy. IE: 'pymel.nodetypes.Joint'
This class contains some basic properties that are used for accessing other nodes
"""
@property
def network(self):
if self.message.connections():
for obj in self.message.connections():
if obj.hasAttr('_class'):
return obj
elif self.hasAttr('Network'):
return pymel.PyNode(self.Network.get())
@property
def networkAttr(self):
for obj in self.message.connections(plugs=True):
if obj.node().hasAttr('_class'):
return obj
@property
def main(self):
for obj in self.network.message.connections():
if obj.node().hasAttr('_class'):
return obj
@property
def mainAttr(self):
try:
for obj in self.network.message.connections(plugs=True):
if obj.node().hasAttr('_class'):
return obj
return self.network.message.connections(plugs=True)[0]
except:
return None
@property
def jnts(self):
return self.network.JOINTS.connections()
@property
def jntsAttr(self):
return self.network.JOINTS
@property
def fk_jnts(self):
return self.network.FK_JOINTS.connections()
@property
def fkJntsAttr(self):
return self.network.FK_JOINTS
@property
def ik_jnts(self):
return self.network.IK_JOINTS.connections()
@property
def ikJntsAttr(self):
return self.network.IK_JOINTS
@property
def ik_ctrls(self):
return self.network.IK_CTRLS.connections()
@property
def ikCtrlsAttr(self):
return self.network.IK_CTRLS
@property
def fk_ctrls(self):
return self.network.FK_CTRLS.connections()
@property
def pole_ctrls(self):
return self.network.POLE.connections()
@property
def fkCtrlsAttr(self):
return self.network.FK_CTRLS
@property
def ik_handles(self):
return self.network.IK_HANDLE.connections()
@property
def switch(self):
if self.network.SWITCH.connections():
return self.network.SWITCH.connections()[0] # todo: This is busted since clavicle is overriding switch method, use self.network.switch for now
else:
return None
@property
def ikHandlesAttr(self):
return self.network.IK_HANDLE
@property
def name_info(self):
return naming_utils.ItemInfo(self)
@property
def _class(self):
return self._class.get()
@property
def side(self):
return self.network.Side.get()
@property
def region(self):
return self.network.Region.get()
@property
def utility(self):
try:
return self.Utility.get()
except:
return None
@property
def joint_name(self):
info = naming_utils.ItemInfo(self.name())
return info.joint_name
@property
def base_name(self):
info = naming_utils.ItemInfo(self.name())
return info.base_name
@property
def info_index(self):
info = naming_utils.ItemInfo(self.name())
return info.index
@property
def limb_grp(self):
# Using mel to speed up the code
for obj in cmds.ls(type='transform'):
if cmds.attributeQuery('Utility', node=obj, exists=True) and\
cmds.getAttr('{}.Utility'.format(obj)) == 'LimbGrp' and\
cmds.attributeQuery('Network', node=obj, exists=True) and\
cmds.getAttr('{}.Network'.format(obj)) == self.network.name():
return pymel.PyNode(obj)
def add_network_tag(self):
self.add_tags({'Network': self.network.name()})
def add_tags(self, tags):
try:
naming_utils.add_tags(self, tags)
except Exception as ex:
log.warning('Failed to add tags: {}, {}, {}'.format(self, tags, ex))
def getRoot(self):
return joint_utils.get_root(self)
def getCtrlRig(self):
"""Return all control rig nodes, ignore skinning joints"""
nodes = []
for obj in cmds.ls(type='transform'):
if cmds.attributeQuery('Network', node=obj, exists=True) and\
cmds.getAttr('{}.Network'.format(obj)) == self.network.name() and obj not in self.jnts:
nodes.append(pymel.PyNode(obj))
return nodes
def getMirroredCtrl(self):
"""
Find the Limb network connection to Main Net and traverse the mirrored network
which is connected to another index, either 0 or 1.
For example: L_Elbow_FK_CTRL >>> L_ARM_Net.FK_CTLS[2] >>> MAIN_NET.ARMS[ idx ]
Traverse: MAIN_NET.ARMS[ !idx ] >>> R_ARM_Net.FK_CTLS[2] >>> R_Elbow_FK_CTRL
:return: Mirrored CTRL or None if failed to find ctrl.
"""
net_attr = self.networkAttr # Storing the network attr'
limb = self.mainAttr # Storing the main limb attr
if not limb: # If the network is not a limb, like 'Main', There is mo mirrored ctrl.
return None
for idx, element in enumerate(limb.array().elements()):
if idx != limb.index(): # Traverse through the other idx connection limb network
mirror_net = limb.array().elementByLogicalIndex(idx).connections()[0]
mirror_array = mirror_net.getAttr(net_attr.array().attrName())
return mirror_array[net_attr.index()]
def getLimbCtrls(self):
return self.network.getLimbCtrls()
# DAG CLASSES
class JointNode(pymel.nodetypes.Joint, BaseNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
NODE_TYPE = 'JointNode'
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_JointNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
pymel.addAttr(newNode, longName='_class', dataType='string')
newNode._class.set('_JointNode')
class TransformNode(BaseNode, pymel.nodetypes.Transform):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_TransformNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_TransformNode')
class CtrlNode(TransformNode):
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_CtrlNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_CtrlNode')
newNode.addAttr('SHAPE', attributeType='message', multi=True)
def freeze_transform(self):
pymel.makeIdentity(self, a=True, t=1, r=1, s=1, n=0, pn=1)
def set_shape(self, shape):
pymel.delete(self.getShapes())
shapes.make_shape(shape_type=shape, transform=self, name=shape)
def set_shape_size(self, size):
for shape in self.getShapes():
pymel.scale(shape.cv[:], (size, size, size))
def set_shape_color(self, color):
shapes = self.getShapes()
for shape in shapes:
shape.overrideEnabled.set(1)
shape.overrideRGBColors.set(1)
shape.overrideColorRGB.set(color)
def reset_axis(self):
if self.shapeAxis.get() == 'X':
self.set_axis('X')
if self.shapeAxis.get() == 'Z':
self.set_axis('Z')
if self.shapeAxis.get() == 'Y':
self.set_axis('Y')
if self.shapeAxis.get() == '-X':
self.set_axis('-X')
if self.shapeAxis.get() == '-Z':
self.set_axis('-Z')
if self.shapeAxis.get() == '-Y':
self.set_axis('-Y')
def set_axis(self, axis):
with pymel.UndoChunk():
x_matrix = pymel.datatypes.Matrix([0.0, -1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0])
neg_x_matrix = pymel.datatypes.Matrix([1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0])
y_matrix = pymel.datatypes.Matrix([0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0])
neg_y_matrix = pymel.datatypes.Matrix([0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0])
z_matrix = pymel.datatypes.Matrix([1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0])
neg_z_matrix = pymel.datatypes.Matrix([0.0, -1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0])
if axis == 'X':
for shape in self.getShapes():
for cv in shape.cv[:]:
cv.setPosition(cv.getPosition() * x_matrix)
if axis == '-X':
for shape in self.getShapes():
for cv in shape.cv[:]:
cv.setPosition(cv.getPosition() * neg_x_matrix)
if axis == 'Y': # Default Y up
for shape in self.getShapes():
for cv in shape.cv[:]:
cv.setPosition(cv.getPosition() * y_matrix)
if axis == '-Y': # Default Y up
for shape in self.getShapes():
for cv in shape.cv[:]:
cv.setPosition(cv.getPosition() * neg_y_matrix)
if axis == 'Z':
for shape in self.getShapes():
for cv in shape.cv[:]:
cv.setPosition(cv.getPosition() * z_matrix)
if axis == '-Z':
for shape in self.getShapes():
for cv in shape.cv[:]:
cv.setPosition(cv.getPosition() * neg_z_matrix)
self.shapeAxis.set(axis)
pymel.ogs(reset=True)
def create_offset(self):
grp = pymel.group(empty=True)
self.setParent(grp)
return grp
# NETWORK CLASSES
class LimbNode(pymel.nt.Network, BaseNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_LimbNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dt='string')
newNode._class.set('_LimbNode')
newNode.addAttr('JOINTS', attributeType='message', multi=True)
newNode.addAttr('IK_JOINTS', attributeType='message', multi=True)
newNode.addAttr('FK_JOINTS', attributeType='message', multi=True)
newNode.addAttr('IK_CTRLS', attributeType='message', multi=True)
newNode.addAttr('FK_CTRLS', attributeType='message', multi=True)
newNode.addAttr('CTRLS', attributeType='message', multi=True)
newNode.addAttr('POLE', attributeType='message', multi=True)
newNode.addAttr('SWITCH', attributeType='message', multi=True)
newNode.addAttr('ORIENTCONSTRAINT', attributeType='message', multi=True)
newNode.addAttr('POINTCONSTRAINT', attributeType='message', multi=True)
newNode.addAttr('IK_HANDLE', attributeType='message', multi=True)
newNode.addAttr('IK_SNAP_LOC', attributeType='message', multi=True)
@property
def network(self):
return self
def getLimbCtrls(self):
nodes = []
for obj in cmds.ls(type='transform'):
if cmds.attributeQuery('Network', node=obj, exists=True) and\
cmds.getAttr('{}.Network'.format(obj)) == self.network.name() and \
cmds.attributeQuery('Type', node=obj, exists=True) and \
cmds.getAttr('{}.Type'.format(obj)) == 'CTRL':
nodes.append(pymel.PyNode(obj))
return nodes
class SplineIKNet(LimbNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_SplineIKNet':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_SplineIKNet')
newNode.addAttr('JOINTS', attributeType='message', multi=True)
newNode.addAttr('IK_HANDLE', attributeType='message', multi=True)
newNode.addAttr('IK_CTRLS', attributeType='message', multi=True)
newNode.addAttr('CLUSTER_HANDLE', attributeType='message', multi=True)
newNode.addAttr('COG', attributeType='message', multi=True)
@property
def network(self):
return self
@property
def clusters(self):
return self.CLUSTER_HANDLE.connections()
@property
def cog(self):
return self.COG.connections()
@property
def clustersAttr(self):
return self.CLUSTER_HANDLE
class MainNode(LimbNode):
""" this is an example of how to create your own subdivisions of existing nodes. """
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances the node in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_MainNode':
return True
return False
except:
pass
return False
@classmethod
def _preCreateVirtual(cls, **kwargs):
"""This is called before creation. python allowed."""
return kwargs
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
newNode.addAttr('_class', dataType='string')
newNode._class.set('_MainNode')
newNode.addAttr('MAIN_CTRL', attributeType='message', multi=True)
newNode.addAttr('ARMS', attributeType='message', multi=True)
newNode.addAttr('CLAVICLES', attributeType='message', multi=True)
newNode.addAttr('LEGS', attributeType='message', multi=True)
newNode.addAttr('SPINE', attributeType='message', multi=True)
newNode.addAttr('HEAD', attributeType='message', multi=True)
newNode.addAttr('HANDS', attributeType='message', multi=True)
newNode.addAttr('ROOT', attributeType='message', multi=True)
@property
def network(self):
return self
@property
def main(self):
return self
@property
def jnts(self):
return self.ROOT.connections()
@property
def main_ctrl(self):
return self.MAIN_CTRL.connections()
@property
def arms(self):
return self.ARMS.connections()
@property
def legs(self):
return self.LEGS.connections()
@property
def clavicles(self):
return self.CLAVICLES.connections()
@property
def spine(self):
return self.SPINE.connections()
@property
def head(self):
return self.HEAD.connections()
@property
def hands(self):
return self.HANDS.connections()
def getAllCtrls(self): # todo: Add support for multiple rigs in a scene.
nodes = []
for obj in cmds.ls(type='transform'):
if cmds.attributeQuery('Type', node=obj, exists=True) and cmds.getAttr('{}.Type'.format(obj)) == 'CTRL':
nodes.append(pymel.PyNode(obj))
return nodes
class ClavicleNode(LimbNode):
SUBNODE_TYPE = '_Clavicle'
@classmethod
def list(cls, *args, **kwargs):
""" Returns all instances of all characters in the scene """
kwargs['type'] = cls.__melnode__
return [node for node in pymel.ls(*args, **kwargs) if isinstance(node, cls)]
@classmethod
def _isVirtual(cls, obj, name):
"""PyMEL code should not be used inside the callback, only API and maya.cmds. """
fn = pymel.api.MFnDependencyNode(obj)
try:
if fn.hasAttribute('_class'):
plug = fn.findPlug('_class')
if plug.asString() == '_LimbNode':
if fn.hasAttribute('_subClass'):
plug = fn.findPlug('_subClass')
if plug.asString() == cls.SUBNODE_TYPE:
return True
return False
except:
pass
return False
@classmethod
def _postCreateVirtual(cls, newNode):
""" This is called before creation, pymel/cmds allowed."""
LimbNode._postCreateVirtual(newNode)
newNode.addAttr('_subClass', dt='string')
newNode._subClass.set('_Clavicle')
@property
def network(self):
return self
@property
def switch(self):
try:
attr = self.mainAttr
net = self.main.arms[attr.index()]
return net.switch
except:
log.warning('Failed to find IKFK, is this node hooked up to main?')
def getLimbCtrls(self):
ctrl_list = set()
attr = self.mainAttr
net = self.main.arms[attr.index()]
for obj in pymel.listTransforms():
# Clavicle CTRLS
if obj.hasAttr('Type') and obj.Type.get() == 'CTRL' and obj.hasAttr('Network') and obj.Network.get() == self.network.name():
ctrl_list.add(obj)
# Get Arm CTRLS
if obj.hasAttr('Type') and obj.Type.get() == 'CTRL' and obj.hasAttr('Network') and obj.Network.get() == net.name():
ctrl_list.add(obj)
return list(ctrl_list)
# Classes need to be registered to exist in the scene.
pymel.factories.registerVirtualClass(JointNode, nameRequired=False)
pymel.factories.registerVirtualClass(CtrlNode, nameRequired=False)
pymel.factories.registerVirtualClass(TransformNode, nameRequired=False)
pymel.factories.registerVirtualClass(SplineIKNet, nameRequired=False)
pymel.factories.registerVirtualClass(MainNode, nameRequired=False)
pymel.factories.registerVirtualClass(LimbNode, nameRequired=False)
pymel.factories.registerVirtualClass(ClavicleNode, nameRequired=False)
| 8,793
| 2,635
| 1,300
|
88949878ea8222d8cce9f9895c96324ac6bd06c7
| 1,320
|
py
|
Python
|
newbeginning/network/test-analysis/medianvariance.py
|
arnavkapoor/fsmresults
|
96daf7e86ed58fea2d7cbbe9364a866c7a548a3e
|
[
"Apache-2.0"
] | null | null | null |
newbeginning/network/test-analysis/medianvariance.py
|
arnavkapoor/fsmresults
|
96daf7e86ed58fea2d7cbbe9364a866c7a548a3e
|
[
"Apache-2.0"
] | null | null | null |
newbeginning/network/test-analysis/medianvariance.py
|
arnavkapoor/fsmresults
|
96daf7e86ed58fea2d7cbbe9364a866c7a548a3e
|
[
"Apache-2.0"
] | null | null | null |
# import plotly.plotly as py
# import plotly.graph_objs as go
# import plotly.figure_factory as FF
import csv
import math
import numpy as np
import pandas as pd
import matplotlib as mplt
import itertools
import matplotlib.pyplot as plt
neededfiles = ['aim.fsm','battlefield2.fsm','counterstrike-source.fsm','halflife2-deathmatch.fsm','dns.fsm','h323.fsm','hotline.fsm','ntp.fsm','rtp.fsm','ssl.fsm','tsp.fsm','yahoo.fsm']
file = csv.writer(open("fsmstats.csv",'w'))
file.writerow(['FSM', 'Median', 'Standard Deviation' ,'Variance','Average all','Total all','Average 2**20' , 'Total 2**20' ])
for filename in neededfiles:
filename=filename.split('.')[0]
df = pd.read_csv('./individual-stats/'+filename+'.csv')
df2 = pd.read_csv('./individual-stats-2pow20/'+filename+'.csv')
freq =df['Number'].values.tolist()
val = df['Length'].values.tolist()
freq2 =df2['Number'].values.tolist()
val2 = df2['Length'].values.tolist()
bmk = filename
data = np.repeat(val, freq)
data2 = np.repeat(val2, freq2)
tsumtot=0
tsumtot2=0
for ele in data:
tsumtot+=ele
for ele2 in data2:
tsumtot2+=ele2
file.writerow([bmk,np.median(data),np.std(data),np.var(data),np.average(data),tsumtot,np.average(data2),tsumtot2])
| 31.428571
| 185
| 0.655303
|
# import plotly.plotly as py
# import plotly.graph_objs as go
# import plotly.figure_factory as FF
import csv
import math
import numpy as np
import pandas as pd
import matplotlib as mplt
import itertools
import matplotlib.pyplot as plt
neededfiles = ['aim.fsm','battlefield2.fsm','counterstrike-source.fsm','halflife2-deathmatch.fsm','dns.fsm','h323.fsm','hotline.fsm','ntp.fsm','rtp.fsm','ssl.fsm','tsp.fsm','yahoo.fsm']
file = csv.writer(open("fsmstats.csv",'w'))
file.writerow(['FSM', 'Median', 'Standard Deviation' ,'Variance','Average all','Total all','Average 2**20' , 'Total 2**20' ])
for filename in neededfiles:
filename=filename.split('.')[0]
df = pd.read_csv('./individual-stats/'+filename+'.csv')
df2 = pd.read_csv('./individual-stats-2pow20/'+filename+'.csv')
freq =df['Number'].values.tolist()
val = df['Length'].values.tolist()
freq2 =df2['Number'].values.tolist()
val2 = df2['Length'].values.tolist()
bmk = filename
data = np.repeat(val, freq)
data2 = np.repeat(val2, freq2)
tsumtot=0
tsumtot2=0
for ele in data:
tsumtot+=ele
for ele2 in data2:
tsumtot2+=ele2
file.writerow([bmk,np.median(data),np.std(data),np.var(data),np.average(data),tsumtot,np.average(data2),tsumtot2])
| 0
| 0
| 0
|
fc7b59b60500fbdc8842b844fa39aad4bbf51587
| 6,561
|
py
|
Python
|
scripts/3d_pointing_detection.py
|
Tacha-S/ros_3d_pointing_detection
|
6fdaf663a7da90c9e17227584fb244455b746606
|
[
"MIT"
] | null | null | null |
scripts/3d_pointing_detection.py
|
Tacha-S/ros_3d_pointing_detection
|
6fdaf663a7da90c9e17227584fb244455b746606
|
[
"MIT"
] | null | null | null |
scripts/3d_pointing_detection.py
|
Tacha-S/ros_3d_pointing_detection
|
6fdaf663a7da90c9e17227584fb244455b746606
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from geometry_msgs.msg import Pose
from ros_3d_pointing_detection.calc_3d_dist import point_3d_line_distance, point_plane_distance
from tfpose_ros.msg import Persons
from sensor_msgs.msg import PointCloud2, CameraInfo
from sensor_msgs import point_cloud2
import numpy as np
from darknet_ros_msgs.msg import BoundingBoxes
from jsk_recognition_msgs.msg import BoundingBoxArray, ClusterPointIndices
import message_filters
import rospy
from ros_3d_pointing_detection.msg import DetectedObject
if __name__ == '__main__':
main()
| 40.006098
| 125
| 0.624295
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from geometry_msgs.msg import Pose
from ros_3d_pointing_detection.calc_3d_dist import point_3d_line_distance, point_plane_distance
from tfpose_ros.msg import Persons
from sensor_msgs.msg import PointCloud2, CameraInfo
from sensor_msgs import point_cloud2
import numpy as np
from darknet_ros_msgs.msg import BoundingBoxes
from jsk_recognition_msgs.msg import BoundingBoxArray, ClusterPointIndices
import message_filters
import rospy
from ros_3d_pointing_detection.msg import DetectedObject
class PointingDetector3D(object):
def __init__(self):
rospy.init_node('3d_pointing_detector')
self._persons_sub = message_filters.Subscriber("~persons", Persons)
self._objects_sub = message_filters.Subscriber("~objects", BoundingBoxArray)
self._points_sub = message_filters.Subscriber("~points", PointCloud2)
self._sub = message_filters.ApproximateTimeSynchronizer(
[self._persons_sub, self._objects_sub, self._points_sub], 10, 1)
self._sub.registerCallback(self._callback)
self.__pub = rospy.Publisher('~detect_object', DetectedObject, queue_size=10)
def _callback(self, persons_msg, objects_msg, points_msg):
if not persons_msg.persons:
return
points_list = list(point_cloud2.read_points(points_msg, field_names=("x", "y", "z")))
points = np.array(points_list)
points_mat = points.reshape([points_msg.height, points_msg.width, 3])
right_arm_joints = self.right_arm_joints(persons_msg.persons[0], points_mat)
if right_arm_joints is None:
rospy.loginfo("not found right arm")
return
if not self.is_arm_stretched(right_arm_joints):
rospy.loginfo("not stretched")
return
is_hit, hit_point = self.get_3d_ray_hit_point(right_arm_joints, points)
if not is_hit:
rospy.loginfo("not hit")
return
min_dist = 0.5
min_box = None
for box in objects_msg.boxes:
origin = np.array([box.pose.position.x, box.pose.position.y, box.pose.position.z])
dist = np.linalg.norm(origin - hit_point)
if dist < min_dist:
min_dist = dist
min_box = box
if min_box is not None:
self.__pub.publish(
DetectedObject(
header=min_box.header,
id="",
pose=min_box.pose,
dimensions=min_box.dimensions))
# hit_point_2d = self.cam2pixel(hit_point, np.array(camera_info_msg.K).reshape([3, 3]))
# for bbox in darknet_msg.bounding_boxes:
# xmin = bbox.x
# ymin = bbox.y
# xmax = xmin + bbox.w
# ymax = ymin + bbox.h
# if hit_point_2d[0] >= xmin and hit_point_2d[0] <= xmax and hit_point_2d[1] >= ymin and hit_point_2d[1] <= ymax:
# pose = Pose()
# pose.position.x = hit_point[0]
# pose.position.y = hit_point[1]
# pose.position.z = hit_point[2]
# self.__pub.publish(DetectedObject(header=points_msg.header, id=bbox.Class, pose=pose))
def right_arm_joints(self, person, points):
p0 = p1 = p2 = None
image_h, image_w = points.shape[:2]
for part in person.body_part:
if part.part_id == 2:
p0 = points[int(part.y * image_h + 0.5), int(part.x * image_w + 0.5)]
elif part.part_id == 3:
p1 = points[int(part.y * image_h + 0.5), int(part.x * image_w + 0.5)]
elif part.part_id == 4:
p2 = points[int(part.y * image_h + 0.5), int(part.x * image_w + 0.5)]
if p0 is None or p1 is None or p2 is None:
return None
return (p0, p1, p2)
def is_arm_stretched(self, right_arm_joints, angle_thresh=30.0):
vec1 = np.array(right_arm_joints[1] - right_arm_joints[0])
vec2 = np.array(right_arm_joints[2] - right_arm_joints[1])
angle = np.arccos(vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))
angle = angle / np.pi * 180.0
return np.abs(angle) <= angle_thresh
def get_3d_ray_hit_point(self, right_arm_joints, points, thresh_in_front_of_wrist=0.50, thresh_close_to_line=0.1):
''' Get the hit point between the pointing ray and the point cloud.
A point in the point cloud that is
(1) in front of the wrist for `thresh_in_front_of_wrist`,
(2) and is close to the ray within `thresh_close_to_line`
is considered as the hit point.
Arguments:
right_arm_joints {np.ndarray}: shape=(3, 3). Three joints' xyz positions.
points {np.ndarray}: shape=(N, 3). N points of xyz positions.
Return:
ret {bool}: Is there a valid hit point.
xyz {np.ndarray}: shape=(3, ). The hit point's position.
'''
p1, p2 = right_arm_joints[0], right_arm_joints[2]
# Select points that are in front of the wrist.
dists_plane = point_plane_distance(points, p1, p2 - p1)
thresh = thresh_in_front_of_wrist + np.linalg.norm(p2 - p1)
valid_idx = dists_plane >= thresh
valid_pts = points[valid_idx]
dists_plane = dists_plane[valid_idx]
if valid_pts.size == 0:
return False, None
# Select points that are close to the pointing direction.
dists_3d_line = point_3d_line_distance(valid_pts, p1, p2)
valid_idx = dists_3d_line <= thresh_close_to_line
valid_pts = valid_pts[valid_idx]
if valid_pts.size == 0:
return False, None
dists_plane = dists_plane[valid_idx]
# Get hit point.
closest_point_idx = np.argmin(dists_plane)
hit_point = valid_pts[closest_point_idx]
return True, hit_point
def cam2pixel(self, xyz_in_camera, camera_intrinsics):
''' Project a point represented in camera coordinate onto the image plane.
Arguments:
xyz_in_camera {np.ndarray}: (3, ).
camera_intrinsics {np.ndarray}: 3x3.
Return:
xy {np.ndarray, np.float32}: (2, ). Column and row index.
'''
pt_3d_on_cam_plane = xyz_in_camera / xyz_in_camera[2] # z=1
xy = camera_intrinsics.dot(pt_3d_on_cam_plane)[0:2]
xy = tuple(int(v) for v in xy)
return xy
def main():
_ = PointingDetector3D()
rospy.spin()
if __name__ == '__main__':
main()
| 3,594
| 2,342
| 46
|
3143c4a4d4be088c0dc8c9b335bc7e4b0d656fee
| 2,528
|
py
|
Python
|
zenora/impl/factory.py
|
notandrewdev/zenora
|
b88d2db2fc03c4475ca3f0cbbd70338cecf5a1da
|
[
"MIT"
] | 1
|
2021-09-24T22:48:33.000Z
|
2021-09-24T22:48:33.000Z
|
zenora/impl/factory.py
|
notandrewdev/zenora
|
b88d2db2fc03c4475ca3f0cbbd70338cecf5a1da
|
[
"MIT"
] | null | null | null |
zenora/impl/factory.py
|
notandrewdev/zenora
|
b88d2db2fc03c4475ca3f0cbbd70338cecf5a1da
|
[
"MIT"
] | null | null | null |
# Zenora, a modern Python API wrapper for the Discord REST API
#
# Copyright (c) 2020 K.M Ahnaf Zamil
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import typing
import datetime
import zenora
from zenora.base.factory import Factory as BaseFactory
from zenora.impl.mapper import ChannelMapper
from zenora.users import PartialUser
| 33.706667
| 80
| 0.686709
|
# Zenora, a modern Python API wrapper for the Discord REST API
#
# Copyright (c) 2020 K.M Ahnaf Zamil
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import typing
import datetime
import zenora
from zenora.base.factory import Factory as BaseFactory
from zenora.impl.mapper import ChannelMapper
from zenora.users import PartialUser
class Factory(BaseFactory):
def parse_channel(response: typing.Dict) -> typing.Any:
"""Parses response data from Dicord API into channel objects
Parameters
----------
response: typing.Dict
Discord API response as dictionary/JSON
snowflake: int
Snowflake ID of the channel
Returns
-------
zenora.channels.GuildTextChannel
Zenora guild text channel object
zenora.channels.GuildVoiceChannel
Zenora guild voice channel object
zenora.channels.DMTextChannel
Zenora DM text channel object
"""
return ChannelMapper.map(response)
def parse_user(response: typing.Dict, snowflake: int) -> typing.Any:
"""Interface of data parser for user object
Parameters
----------
response: typing.Dict
Discord API response as dictionary/JSON
snowflake: int
Snowflake ID of the channel
Returns
-------
zenora.users.PartialUser
Zenora partial user object
"""
return PartialUser(response)
| 0
| 1,162
| 23
|
c1bd92bb03d22a5801d1fa69ec73029da13585e5
| 6,952
|
py
|
Python
|
spidermon/contrib/scrapy/extensions.py
|
tcurvelo/spidermon
|
4190273461f0ad9a97f1de3ea75d331f0c4d2b36
|
[
"BSD-3-Clause"
] | null | null | null |
spidermon/contrib/scrapy/extensions.py
|
tcurvelo/spidermon
|
4190273461f0ad9a97f1de3ea75d331f0c4d2b36
|
[
"BSD-3-Clause"
] | null | null | null |
spidermon/contrib/scrapy/extensions.py
|
tcurvelo/spidermon
|
4190273461f0ad9a97f1de3ea75d331f0c4d2b36
|
[
"BSD-3-Clause"
] | null | null | null |
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.utils.misc import load_object
from twisted.internet.task import LoopingCall
from spidermon import MonitorSuite
from spidermon.contrib.scrapy.runners import SpiderMonitorRunner
from spidermon.python import factory
from spidermon.python.monitors import ExpressionsMonitor
from spidermon.utils.field_coverage import calculate_field_coverage
| 37.578378
| 85
| 0.661392
|
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.utils.misc import load_object
from twisted.internet.task import LoopingCall
from spidermon import MonitorSuite
from spidermon.contrib.scrapy.runners import SpiderMonitorRunner
from spidermon.python import factory
from spidermon.python.monitors import ExpressionsMonitor
from spidermon.utils.field_coverage import calculate_field_coverage
class Spidermon:
def __init__(
self,
crawler,
spider_opened_suites=None,
spider_closed_suites=None,
engine_stopped_suites=None,
spider_opened_expression_suites=None,
spider_closed_expression_suites=None,
engine_stopped_expression_suites=None,
expressions_monitor_class=None,
periodic_suites=None,
):
if not crawler.settings.getbool("SPIDERMON_ENABLED"):
raise NotConfigured
self.crawler = crawler
self.spider_opened_suites = [
self.load_suite(s) for s in spider_opened_suites or []
]
self.spider_opened_suites += [
self.load_expression_suite(s, expressions_monitor_class)
for s in spider_opened_expression_suites or []
]
self.spider_closed_suites = [
self.load_suite(s) for s in spider_closed_suites or []
]
self.spider_closed_suites += [
self.load_expression_suite(s, expressions_monitor_class)
for s in spider_closed_expression_suites or []
]
self.engine_stopped_suites = [
self.load_suite(s) for s in engine_stopped_suites or []
]
self.engine_stopped_suites += [
self.load_expression_suite(s, expressions_monitor_class)
for s in engine_stopped_expression_suites or []
]
self.periodic_suites = periodic_suites or {}
self.periodic_tasks = {}
def load_suite(self, suite_to_load):
try:
suite_class = load_object(suite_to_load)
except Exception as e:
raise e # TO-DO
if not issubclass(suite_class, MonitorSuite):
raise Exception # TO-DO
return suite_class(crawler=self.crawler)
def load_expression_suite(self, suite_to_load, monitor_class=None):
if monitor_class:
monitor_class = load_object(monitor_class)
else:
monitor_class = ExpressionsMonitor
monitor = factory.create_monitor_class_from_dict(
monitor_dict=suite_to_load, monitor_class=monitor_class
)
suite = MonitorSuite(crawler=self.crawler)
suite.add_monitor(monitor)
return suite
@classmethod
def from_crawler(cls, crawler):
ext = cls(
crawler=crawler,
spider_opened_suites=crawler.settings.getlist(
"SPIDERMON_SPIDER_OPEN_MONITORS"
),
spider_closed_suites=crawler.settings.getlist(
"SPIDERMON_SPIDER_CLOSE_MONITORS"
),
engine_stopped_suites=crawler.settings.getlist(
"SPIDERMON_ENGINE_STOP_MONITORS"
),
spider_opened_expression_suites=crawler.settings.getlist(
"SPIDERMON_SPIDER_OPEN_EXPRESSION_MONITORS"
),
spider_closed_expression_suites=crawler.settings.getlist(
"SPIDERMON_SPIDER_CLOSE_EXPRESSION_MONITORS"
),
engine_stopped_expression_suites=crawler.settings.getlist(
"SPIDERMON_ENGINE_STOP_EXPRESSION_MONITORS"
),
expressions_monitor_class=crawler.settings.get(
"SPIDERMON_EXPRESSIONS_MONITOR_CLASS"
),
periodic_suites=crawler.settings.getdict("SPIDERMON_PERIODIC_MONITORS"),
)
crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(ext.engine_stopped, signal=signals.engine_stopped)
has_field_coverage = crawler.settings.getbool("SPIDERMON_ADD_FIELD_COVERAGE")
if has_field_coverage:
crawler.signals.connect(ext.item_scraped, signal=signals.item_scraped)
return ext
def spider_opened(self, spider):
self._run_suites(spider, self.spider_opened_suites)
self.periodic_tasks[spider] = []
for suite, time in self.periodic_suites.items():
task = LoopingCall(self._run_periodic_suites, spider, [suite])
self.periodic_tasks[spider].append(task)
task.start(time, now=False)
def spider_closed(self, spider):
self._add_field_coverage_to_stats()
self._run_suites(spider, self.spider_closed_suites)
for task in self.periodic_tasks[spider]:
task.stop()
def engine_stopped(self):
spider = self.crawler.spider
self._run_suites(spider, self.engine_stopped_suites)
def _count_item(self, item, skip_none_values, item_count_stat=None):
if item_count_stat is None:
item_type = type(item).__name__
item_count_stat = f"spidermon_item_scraped_count/{item_type}"
self.crawler.stats.inc_value(item_count_stat)
for field_name, value in item.items():
if skip_none_values and value is None:
continue
field_item_count_stat = f"{item_count_stat}/{field_name}"
self.crawler.stats.inc_value(field_item_count_stat)
if isinstance(value, dict):
self._count_item(value, skip_none_values, field_item_count_stat)
continue
def _add_field_coverage_to_stats(self):
stats = self.crawler.stats.get_stats()
coverage_stats = calculate_field_coverage(stats)
stats.update(coverage_stats)
def item_scraped(self, item, response, spider):
skip_none_values = spider.crawler.settings.getbool(
"SPIDERMON_FIELD_COVERAGE_SKIP_NONE", False
)
self.crawler.stats.inc_value("spidermon_item_scraped_count")
self._count_item(item, skip_none_values)
def _run_periodic_suites(self, spider, suites):
suites = [self.load_suite(s) for s in suites]
self._run_suites(spider, suites)
def _run_suites(self, spider, suites):
data = self._generate_data_for_spider(spider)
for suite in suites:
runner = SpiderMonitorRunner(spider=spider)
runner.run(suite, **data)
def _generate_data_for_spider(self, spider):
from spidermon.utils.hubstorage import hs
return {
"stats": self.crawler.stats.get_stats(spider),
"stats_history": spider.stats_history
if hasattr(spider, "stats_history")
else [],
"crawler": self.crawler,
"spider": spider,
"job": hs.job if hs.available else None,
}
| 6,144
| 362
| 23
|
ed4cfd079378bcce20665eaa6c2773019151090a
| 3,409
|
py
|
Python
|
tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_session_image_uri.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 1,690
|
2017-11-29T20:13:37.000Z
|
2022-03-31T12:58:11.000Z
|
tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_session_image_uri.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 2,762
|
2017-12-04T05:18:03.000Z
|
2022-03-31T23:40:11.000Z
|
tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_session_image_uri.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 961
|
2017-11-30T16:44:03.000Z
|
2022-03-30T23:12:09.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pasta
from sagemaker.cli.compatibility.v2.modifiers import renamed_params
from tests.unit.sagemaker.cli.compatibility.v2.modifiers.ast_converter import ast_call
CREATE_MODEL_TEMPLATES = (
"sagemaker_session.create_model_from_job({})",
"sess.create_model_from_job({})",
)
CREATE_ENDPOINT_TEMPLATES = (
"sagemaker_session.endpoint_from_job({})",
"sagemaker_session.endpoint_from_model_data({})",
"sess.endpoint_from_job({})",
"sess.endpoint_from_model_data({})",
)
| 35.884211
| 86
| 0.775887
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pasta
from sagemaker.cli.compatibility.v2.modifiers import renamed_params
from tests.unit.sagemaker.cli.compatibility.v2.modifiers.ast_converter import ast_call
CREATE_MODEL_TEMPLATES = (
"sagemaker_session.create_model_from_job({})",
"sess.create_model_from_job({})",
)
CREATE_ENDPOINT_TEMPLATES = (
"sagemaker_session.endpoint_from_job({})",
"sagemaker_session.endpoint_from_model_data({})",
"sess.endpoint_from_job({})",
"sess.endpoint_from_model_data({})",
)
def test_create_model_node_should_be_modified():
modifier = renamed_params.SessionCreateModelImageURIRenamer()
for template in CREATE_MODEL_TEMPLATES:
call = ast_call(template.format("primary_container_image=my_image"))
assert modifier.node_should_be_modified(call)
def test_create_model_node_should_be_modified_no_image():
modifier = renamed_params.SessionCreateModelImageURIRenamer()
for template in CREATE_MODEL_TEMPLATES:
call = ast_call(template.format(""))
assert not modifier.node_should_be_modified(call)
def test_create_model_node_should_be_modified_random_function_call():
modifier = renamed_params.SessionCreateModelImageURIRenamer()
assert not modifier.node_should_be_modified(ast_call("create_model()"))
def test_create_model_modify_node():
modifier = renamed_params.SessionCreateModelImageURIRenamer()
for template in CREATE_MODEL_TEMPLATES:
call = ast_call(template.format("primary_container_image=my_image"))
modifier.modify_node(call)
expected = template.format("image_uri=my_image")
assert expected == pasta.dump(call)
def test_create_endpoint_node_should_be_modified():
modifier = renamed_params.SessionCreateEndpointImageURIRenamer()
for template in CREATE_ENDPOINT_TEMPLATES:
call = ast_call(template.format("deployment_image=my_image"))
assert modifier.node_should_be_modified(call)
def test_create_endpoint_node_should_be_modified_no_image():
modifier = renamed_params.SessionCreateEndpointImageURIRenamer()
for template in CREATE_ENDPOINT_TEMPLATES:
call = ast_call(template.format(""))
assert not modifier.node_should_be_modified(call)
def test_create_endpoint_node_should_be_modified_random_function_call():
modifier = renamed_params.SessionCreateEndpointImageURIRenamer()
assert not modifier.node_should_be_modified(ast_call("create_endpoint()"))
def test_create_endpoint_modify_node():
modifier = renamed_params.SessionCreateEndpointImageURIRenamer()
for template in CREATE_ENDPOINT_TEMPLATES:
call = ast_call(template.format("deployment_image=my_image"))
modifier.modify_node(call)
expected = template.format("image_uri=my_image")
assert expected == pasta.dump(call)
| 2,120
| 0
| 184
|
1c6764d99bda65c0edd8edcc41f93418f856848f
| 1,145
|
py
|
Python
|
backend/appengine/routes/students/rest.py
|
SamaraCardoso27/eMakeup
|
02c3099aca85b5f54214c3a32590e80eb61621e7
|
[
"MIT"
] | null | null | null |
backend/appengine/routes/students/rest.py
|
SamaraCardoso27/eMakeup
|
02c3099aca85b5f54214c3a32590e80eb61621e7
|
[
"MIT"
] | null | null | null |
backend/appengine/routes/students/rest.py
|
SamaraCardoso27/eMakeup
|
02c3099aca85b5f54214c3a32590e80eb61621e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from student.student_model import StudentForm, Course, Student
from distutils import log
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from tekton.gae.middleware.json_middleware import JsonUnsecureResponse
from google.appengine.ext import ndb
@login_not_required
@no_csrf
@login_not_required
@no_csrf
@login_not_required
@no_csrf
| 29.358974
| 70
| 0.755459
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from student.student_model import StudentForm, Course, Student
from distutils import log
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from tekton.gae.middleware.json_middleware import JsonUnsecureResponse
from google.appengine.ext import ndb
@login_not_required
@no_csrf
def deletar(student_id):
key=ndb.Key(Student,int(student_id))
key.delete()
@login_not_required
@no_csrf
def listar():
form = StudentForm()
students = Student.query_order_by_name().fetch()
students = [form.fill_with_model(s) for s in students]
return JsonUnsecureResponse(students)
@login_not_required
@no_csrf
def salvar(_resp, **propriedades):
propriedades['course']=ndb.Key(Course,int(propriedades['course']))
form = StudentForm(**propriedades)
erros = form.validate()
if erros:
_resp.set_status(400)
return JsonUnsecureResponse(erros)
student = form.fill_model()
student.put()
dct = form.fill_with_model(student)
log.info(dct)
return JsonUnsecureResponse(dct)
| 615
| 0
| 66
|
23bf72dc1052caf9090ab8ec52fd5b254a1f6fcf
| 2,511
|
py
|
Python
|
shared/custom_layers/residual_unit.py
|
dbulhosa/ILSVRC
|
c87aaa99680b5d57617c365f45552c94fe34c120
|
[
"MIT"
] | 1
|
2020-04-28T00:41:55.000Z
|
2020-04-28T00:41:55.000Z
|
shared/custom_layers/residual_unit.py
|
dbulhosa/ILSVRC
|
c87aaa99680b5d57617c365f45552c94fe34c120
|
[
"MIT"
] | 2
|
2020-04-27T07:53:35.000Z
|
2020-05-19T05:49:57.000Z
|
shared/custom_layers/residual_unit.py
|
dbulhosa/atari-rl
|
8e91b553300cc72136cf890f0a568b373f061104
|
[
"MIT"
] | 1
|
2020-04-28T00:41:56.000Z
|
2020-04-28T00:41:56.000Z
|
import keras.layers as lyr
def residual_unit(input, size, filters, downsample, kernel_initializer, bias_initializer,
kernel_regularizer, bias_regularizer):
"""
Residual unit using pre-activation as described in:
https://arxiv.org/pdf/1603.05027v2.pdf
Note that we use 1x1 convolutions to transform the
dimension of residuals so we can increase filters
and downsample spatial dimensions.
Ideally we would do zero-padding along filter axis
and downsample through stride 2 or some type of pooling.
However, this would require more code complexity (while
reducing parameter complexity). We may implement a lambda
layer doing exactly this later on.
:param input: The input tensor.
:param siez: The size of the convolutional filters.
:param filters: The number of filters in each convolutional layer of the residual unit.
:param downsample: Whether to downsample at the beginning of the layer. If so we downsample by 2
and we use 1x1 convolutions to resize the residual.
:param kernel_initializer: Kernel initializer for all layers in module.
:param bias_initializer: Bias initializer for all layers in module.
:param kernel_regularizer: Kernel regularizer for all layers in module.
:param bias_regularizer: Bias regularizer for all layers in module.
:return: The output of the residual unit, which consists of the sum of the output of the
previous layer and the output of the layers in the residual unit.
"""
strides = 2 if downsample else 1
int1 = lyr.BatchNormalization()(input)
int2 = lyr.ReLU()(int1)
int3 = get_convolution(filters, strides)(int2)
int4 = lyr.BatchNormalization()(int3)
int5 = lyr.ReLU()(int4)
int6 = get_convolution(filters, 1)(int5)
# If downsampling we use convolutional filters to increase filters
# and reduce the size of the image. This gets dimensions to match.
if downsample:
res = get_convolution(filters, 2)(input)
else:
res = input
out = lyr.Add()([res, int6])
return out
| 41.85
| 100
| 0.687376
|
import keras.layers as lyr
def residual_unit(input, size, filters, downsample, kernel_initializer, bias_initializer,
kernel_regularizer, bias_regularizer):
"""
Residual unit using pre-activation as described in:
https://arxiv.org/pdf/1603.05027v2.pdf
Note that we use 1x1 convolutions to transform the
dimension of residuals so we can increase filters
and downsample spatial dimensions.
Ideally we would do zero-padding along filter axis
and downsample through stride 2 or some type of pooling.
However, this would require more code complexity (while
reducing parameter complexity). We may implement a lambda
layer doing exactly this later on.
:param input: The input tensor.
:param siez: The size of the convolutional filters.
:param filters: The number of filters in each convolutional layer of the residual unit.
:param downsample: Whether to downsample at the beginning of the layer. If so we downsample by 2
and we use 1x1 convolutions to resize the residual.
:param kernel_initializer: Kernel initializer for all layers in module.
:param bias_initializer: Bias initializer for all layers in module.
:param kernel_regularizer: Kernel regularizer for all layers in module.
:param bias_regularizer: Bias regularizer for all layers in module.
:return: The output of the residual unit, which consists of the sum of the output of the
previous layer and the output of the layers in the residual unit.
"""
strides = 2 if downsample else 1
def get_convolution(filters, strides):
return lyr.Conv2D(filters, size, strides=strides, padding='same',
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
int1 = lyr.BatchNormalization()(input)
int2 = lyr.ReLU()(int1)
int3 = get_convolution(filters, strides)(int2)
int4 = lyr.BatchNormalization()(int3)
int5 = lyr.ReLU()(int4)
int6 = get_convolution(filters, 1)(int5)
# If downsampling we use convolutional filters to increase filters
# and reduce the size of the image. This gets dimensions to match.
if downsample:
res = get_convolution(filters, 2)(input)
else:
res = input
out = lyr.Add()([res, int6])
return out
| 370
| 0
| 27
|
0a79ef4fc630bafbc85f4d0253758f76aa8e8d8a
| 16,200
|
py
|
Python
|
test/models/test_pytorch.py
|
adriaat/quantnn
|
5248c240b931fa113120e3564605638095e5278f
|
[
"MIT"
] | null | null | null |
test/models/test_pytorch.py
|
adriaat/quantnn
|
5248c240b931fa113120e3564605638095e5278f
|
[
"MIT"
] | 3
|
2022-01-11T08:41:03.000Z
|
2022-02-11T14:25:09.000Z
|
test/models/test_pytorch.py
|
adriaat/quantnn
|
5248c240b931fa113120e3564605638095e5278f
|
[
"MIT"
] | 5
|
2020-12-11T03:18:32.000Z
|
2022-02-14T10:32:09.000Z
|
"""
Tests for the PyTorch NN backend.
"""
import torch
from torch import nn
import numpy as np
from quantnn import set_default_backend
from quantnn.qrnn import QRNN
from quantnn.drnn import DRNN, _to_categorical
from quantnn.mrnn import Quantiles, Density, Mean, MRNN
from quantnn.models.pytorch import QuantileLoss, CrossEntropyLoss, MSELoss
from quantnn.transformations import Log10
def test_quantile_loss():
"""
Ensure that quantile loss corresponds to half of absolute error
loss and that masking works as expected.
"""
set_default_backend("pytorch")
loss = QuantileLoss([0.5], mask=-1e3)
y_pred = torch.rand(10, 1, 10)
y = torch.rand(10, 1, 10)
l = loss(y_pred, y).detach().numpy()
dy = (y_pred - y).detach().numpy()
l_ref = 0.5 * np.mean(np.abs(dy))
assert np.isclose(l, l_ref)
y_pred = torch.rand(20, 1, 10)
y_pred[10:] = -2e3
y = torch.rand(20, 1, 10)
y[10:] = -2e3
loss = QuantileLoss([0.5], mask=-1e3)
l = loss(y_pred, y).detach().numpy()
l_ref = loss(y_pred[:10], y[:10]).detach().numpy()
assert np.isclose(l, l_ref)
def test_cross_entropy_loss():
"""
Test masking for cross entropy loss.
"""
set_default_backend("pytorch")
y_pred = torch.rand(10, 10, 10)
y = torch.ones(10, 1, 10)
bins = np.linspace(0, 1, 11)
y[:, 0, :] = 0.55
loss = CrossEntropyLoss(bins, mask=-1.0)
ref = -y_pred[:, 5, :] + torch.log(torch.exp(y_pred).sum(1))
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
y[5:, :, :] = -1.0
y[:, :, 5:] = -1.0
ref = -y_pred[:5, 5, :5] + torch.log(torch.exp(y_pred[:5, :, :5]).sum(1))
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
def test_mse_loss():
"""
Test masking for cross entropy loss.
"""
set_default_backend("pytorch")
y_pred = torch.rand(10, 10, 10)
y = torch.ones(10, 10, 10)
y[:, 0, :] = 0.55
loss = MSELoss(mask=-1.0)
ref = ((y_pred - y) ** 2).mean()
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
y[5:, :, :] = -1.0
y[:, :, 5:] = -1.0
ref = ((y_pred[:5, :, :5] - y[:5, :, :5]) ** 2).mean()
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
def test_qrnn_training_with_dataloader():
"""
Ensure that training with a pytorch dataloader works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
training_data = torch.utils.data.TensorDataset(torch.tensor(x),
torch.tensor(y))
training_loader = torch.utils.data.DataLoader(training_data, batch_size=128)
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
qrnn.train(training_loader, n_epochs=1)
def test_qrnn_training_with_dict():
"""
Ensure that training with batch objects as dicts works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
qrnn.train(batched_data, n_epochs=1)
def test_qrnn_training_with_dict_and_keys():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"x_2": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
qrnn.train(batched_data, n_epochs=1, keys=("x", "y"))
def test_qrnn_training_metrics():
"""
Ensure that training with a single target and metrics works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"x_2": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
metrics = ["Bias", "MeanSquaredError", "CRPS"]
qrnn.train(batched_data, n_epochs=1, keys=("x", "y"), metrics=metrics)
def test_drnn_training_metrics():
"""
Ensure that training with a single target and metrics works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
bins = np.arange(128 * 8)
y = _to_categorical(np.random.rand(1024), bins)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"x_2": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
drnn = DRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
metrics = ["Bias", "MeanSquaredError", "CRPS"]
drnn.train(batched_data, n_epochs=1, keys=("x", "y"), metrics=metrics)
def test_training_multiple_outputs():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128]),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128])
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
qrnn = QRNN(np.linspace(0.05, 0.95, 11), model=model)
qrnn.train(batched_data, n_epochs=5, keys=("x", "y"))
def test_training_metrics_multi():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128]),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
bins = np.linspace(0, 1, 12)
bins = {"y_1": bins, "y_2": bins}
qrnn = DRNN(bins=bins, model=model)
metrics = ["Bias", "MeanSquaredError", "CRPS", "ScatterPlot", "QuantileFunction"]
qrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_training_multi_mrnn():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]).to(torch.float32),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32),
"y_3": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
bins = np.linspace(0, 1, 12)
bins = {"y_1": bins, "y_2": bins}
losses = {
"y_1": Quantiles(np.linspace(0.05, 0.95, 10)),
"y_2": Mean(),
"y_3": Density(np.linspace(-2, 2, 21))
}
mrnn = MRNN(losses=losses, model=model)
mrnn.train(batched_data, n_epochs=1)
def test_training_transformation():
"""
Ensure that training in transformed space works.
"""
set_default_backend("pytorch")
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128]),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
transformations = {
"y_1": Log10(),
"y_2": None
}
qrnn = QRNN(np.linspace(0.05, 0.95, 11), model=model,
transformation=transformations)
metrics = ["Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"]
qrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_training_transformation_mrnn_quantiles():
"""
Ensure that training in transformed space works.
"""
set_default_backend("pytorch")
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32),
"y_3": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
transformations = {
"y_1": Log10(),
"y_2": Log10()
}
losses = {
"y_1": Quantiles(np.linspace(0.05, 0.95, 10)),
"y_2": Mean(),
"y_3": Density(np.linspace(-2, 2, 11))
}
mrnn = MRNN(losses=losses, model=model)
metrics = ["Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"]
mrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_training_transformation_mrnn_density():
"""
Ensure that training in transformed space works.
"""
set_default_backend("pytorch")
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
transformations = {
"y_1": Log10(),
"y_2": Log10()
}
losses = {
"y_1": Density(np.linspace(0.05, 0.95, 11)),
"y_2": Mean()
}
mrnn = MRNN(losses=losses, model=model)
metrics = ["Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"]
mrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_qrnn_training_metrics_conv():
"""
E
"""
set_default_backend("pytorch")
x_train = np.random.rand(1024, 16, 32, 32,)
y_train = np.random.rand(1024, 1, 32, 32)
x_val = np.random.rand(32, 16, 32, 32,)
y_val = np.random.rand(32, 1, 32, 32)
training_data = torch.utils.data.TensorDataset(torch.tensor(x_train),
torch.tensor(y_train))
training_loader = torch.utils.data.DataLoader(training_data, batch_size=128)
validation_data = torch.utils.data.TensorDataset(torch.tensor(x_val),
torch.tensor(y_val))
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=1)
model = nn.Sequential(
nn.Conv2d(16, 10, 1)
)
qrnn = QRNN(np.linspace(0.05, 0.95, 10), model=model)
metrics = ["Bias", "MeanSquaredError", "CRPS", "CalibrationPlot", "ScatterPlot"]
qrnn.train(training_loader,
validation_data=validation_loader,
n_epochs=2,
metrics=metrics,
batch_size=1,
mask=-1)
| 29.032258
| 85
| 0.514321
|
"""
Tests for the PyTorch NN backend.
"""
import torch
from torch import nn
import numpy as np
from quantnn import set_default_backend
from quantnn.qrnn import QRNN
from quantnn.drnn import DRNN, _to_categorical
from quantnn.mrnn import Quantiles, Density, Mean, MRNN
from quantnn.models.pytorch import QuantileLoss, CrossEntropyLoss, MSELoss
from quantnn.transformations import Log10
def test_quantile_loss():
"""
Ensure that quantile loss corresponds to half of absolute error
loss and that masking works as expected.
"""
set_default_backend("pytorch")
loss = QuantileLoss([0.5], mask=-1e3)
y_pred = torch.rand(10, 1, 10)
y = torch.rand(10, 1, 10)
l = loss(y_pred, y).detach().numpy()
dy = (y_pred - y).detach().numpy()
l_ref = 0.5 * np.mean(np.abs(dy))
assert np.isclose(l, l_ref)
y_pred = torch.rand(20, 1, 10)
y_pred[10:] = -2e3
y = torch.rand(20, 1, 10)
y[10:] = -2e3
loss = QuantileLoss([0.5], mask=-1e3)
l = loss(y_pred, y).detach().numpy()
l_ref = loss(y_pred[:10], y[:10]).detach().numpy()
assert np.isclose(l, l_ref)
def test_cross_entropy_loss():
"""
Test masking for cross entropy loss.
"""
set_default_backend("pytorch")
y_pred = torch.rand(10, 10, 10)
y = torch.ones(10, 1, 10)
bins = np.linspace(0, 1, 11)
y[:, 0, :] = 0.55
loss = CrossEntropyLoss(bins, mask=-1.0)
ref = -y_pred[:, 5, :] + torch.log(torch.exp(y_pred).sum(1))
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
y[5:, :, :] = -1.0
y[:, :, 5:] = -1.0
ref = -y_pred[:5, 5, :5] + torch.log(torch.exp(y_pred[:5, :, :5]).sum(1))
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
def test_mse_loss():
"""
Test masking for cross entropy loss.
"""
set_default_backend("pytorch")
y_pred = torch.rand(10, 10, 10)
y = torch.ones(10, 10, 10)
y[:, 0, :] = 0.55
loss = MSELoss(mask=-1.0)
ref = ((y_pred - y) ** 2).mean()
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
y[5:, :, :] = -1.0
y[:, :, 5:] = -1.0
ref = ((y_pred[:5, :, :5] - y[:5, :, :5]) ** 2).mean()
assert np.all(np.isclose(loss(y_pred, y).detach().numpy(),
ref.mean().detach().numpy()))
def test_qrnn_training_with_dataloader():
"""
Ensure that training with a pytorch dataloader works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
training_data = torch.utils.data.TensorDataset(torch.tensor(x),
torch.tensor(y))
training_loader = torch.utils.data.DataLoader(training_data, batch_size=128)
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
qrnn.train(training_loader, n_epochs=1)
def test_qrnn_training_with_dict():
"""
Ensure that training with batch objects as dicts works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
qrnn.train(batched_data, n_epochs=1)
def test_qrnn_training_with_dict_and_keys():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"x_2": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
qrnn.train(batched_data, n_epochs=1, keys=("x", "y"))
def test_qrnn_training_metrics():
"""
Ensure that training with a single target and metrics works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"x_2": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
metrics = ["Bias", "MeanSquaredError", "CRPS"]
qrnn.train(batched_data, n_epochs=1, keys=("x", "y"), metrics=metrics)
def test_drnn_training_metrics():
"""
Ensure that training with a single target and metrics works.
"""
set_default_backend("pytorch")
x = np.random.rand(1024, 16)
bins = np.arange(128 * 8)
y = _to_categorical(np.random.rand(1024), bins)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"x_2": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": torch.tensor(y[i * 128: (i + 1) * 128]),
}
for i in range(1024 // 128)
]
drnn = DRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
metrics = ["Bias", "MeanSquaredError", "CRPS"]
drnn.train(batched_data, n_epochs=1, keys=("x", "y"), metrics=metrics)
def test_training_multiple_outputs():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
class MultipleOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(16, 128)
self.head_1 = nn.Linear(128, 11)
self.head_2 = nn.Linear(128, 11)
def forward(self, x):
x = torch.relu(self.hidden(x))
y_1 = self.head_1(x)
y_2 = self.head_2(x)
return {
"y_1": y_1,
"y_2": y_2
}
x = np.random.rand(1024, 16)
y = np.random.rand(1024)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128]),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128])
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
qrnn = QRNN(np.linspace(0.05, 0.95, 11), model=model)
qrnn.train(batched_data, n_epochs=5, keys=("x", "y"))
def test_training_metrics_multi():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
class MultipleOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(16, 128)
self.head_1 = nn.Linear(128, 11)
self.head_2 = nn.Linear(128, 11)
def forward(self, x):
x = torch.relu(self.hidden(x))
y_1 = self.head_1(x)
y_2 = self.head_2(x)
return {
"y_1": y_1,
"y_2": y_2
}
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128]),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
bins = np.linspace(0, 1, 12)
bins = {"y_1": bins, "y_2": bins}
qrnn = DRNN(bins=bins, model=model)
metrics = ["Bias", "MeanSquaredError", "CRPS", "ScatterPlot", "QuantileFunction"]
qrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_training_multi_mrnn():
"""
Ensure that training with batch objects as dicts and provided keys
argument works.
"""
set_default_backend("pytorch")
class MultipleOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(16, 128)
self.head_1 = nn.Linear(128, 10)
self.head_2 = nn.Linear(128, 1)
self.head_3 = nn.Linear(128, 20)
def forward(self, x):
x = torch.relu(self.hidden(x))
y_1 = self.head_1(x)
y_2 = self.head_2(x)
y_3 = self.head_3(x)
return {
"y_1": y_1,
"y_2": y_2,
"y_3": y_3
}
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]).to(torch.float32),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32),
"y_3": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
bins = np.linspace(0, 1, 12)
bins = {"y_1": bins, "y_2": bins}
losses = {
"y_1": Quantiles(np.linspace(0.05, 0.95, 10)),
"y_2": Mean(),
"y_3": Density(np.linspace(-2, 2, 21))
}
mrnn = MRNN(losses=losses, model=model)
mrnn.train(batched_data, n_epochs=1)
def test_training_transformation():
"""
Ensure that training in transformed space works.
"""
set_default_backend("pytorch")
class MultipleOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(16, 128)
self.head_1 = nn.Linear(128, 11)
self.head_2 = nn.Linear(128, 11)
def forward(self, x):
x = torch.relu(self.hidden(x))
y_1 = self.head_1(x)
y_2 = self.head_2(x)
return {
"y_1": y_1,
"y_2": y_2
}
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128]),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128]),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
transformations = {
"y_1": Log10(),
"y_2": None
}
qrnn = QRNN(np.linspace(0.05, 0.95, 11), model=model,
transformation=transformations)
metrics = ["Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"]
qrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_training_transformation_mrnn_quantiles():
"""
Ensure that training in transformed space works.
"""
set_default_backend("pytorch")
class MultipleOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(16, 128)
self.head_1 = nn.Linear(128, 10)
self.head_2 = nn.Linear(128, 1)
self.head_3 = nn.Linear(128, 10)
def forward(self, x):
x = torch.relu(self.hidden(x))
y_1 = self.head_1(x)
y_2 = self.head_2(x)
y_3 = self.head_3(x)
return {
"y_1": y_1,
"y_2": y_2,
"y_3": y_3
}
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32),
"y_3": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
transformations = {
"y_1": Log10(),
"y_2": Log10()
}
losses = {
"y_1": Quantiles(np.linspace(0.05, 0.95, 10)),
"y_2": Mean(),
"y_3": Density(np.linspace(-2, 2, 11))
}
mrnn = MRNN(losses=losses, model=model)
metrics = ["Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"]
mrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_training_transformation_mrnn_density():
"""
Ensure that training in transformed space works.
"""
set_default_backend("pytorch")
class MultipleOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(16, 128)
self.head_1 = nn.Linear(128, 10)
self.head_2 = nn.Linear(128, 1)
def forward(self, x):
x = torch.relu(self.hidden(x))
y_1 = self.head_1(x)
y_2 = self.head_2(x)
return {
"y_1": y_1,
"y_2": y_2
}
x = np.random.rand(2024, 16) + 1.0
y = np.sum(x, axis=-1)
y += np.random.normal(size=y.size)
batched_data = [
{
"x": torch.tensor(x[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y": {
"y_1": torch.tensor(y[i * 128: (i + 1) * 128],
dtype=torch.float32),
"y_2": torch.tensor(y[i * 128: (i + 1) * 128] ** 2,
dtype=torch.float32)
}
}
for i in range(1024 // 128)
]
model = MultipleOutputModel()
transformations = {
"y_1": Log10(),
"y_2": Log10()
}
losses = {
"y_1": Density(np.linspace(0.05, 0.95, 11)),
"y_2": Mean()
}
mrnn = MRNN(losses=losses, model=model)
metrics = ["Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"]
mrnn.train(batched_data,
validation_data=batched_data,
n_epochs=5, keys=("x", "y"),
metrics=metrics)
def test_qrnn_training_metrics_conv():
"""
E
"""
set_default_backend("pytorch")
x_train = np.random.rand(1024, 16, 32, 32,)
y_train = np.random.rand(1024, 1, 32, 32)
x_val = np.random.rand(32, 16, 32, 32,)
y_val = np.random.rand(32, 1, 32, 32)
training_data = torch.utils.data.TensorDataset(torch.tensor(x_train),
torch.tensor(y_train))
training_loader = torch.utils.data.DataLoader(training_data, batch_size=128)
validation_data = torch.utils.data.TensorDataset(torch.tensor(x_val),
torch.tensor(y_val))
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=1)
model = nn.Sequential(
nn.Conv2d(16, 10, 1)
)
qrnn = QRNN(np.linspace(0.05, 0.95, 10), model=model)
metrics = ["Bias", "MeanSquaredError", "CRPS", "CalibrationPlot", "ScatterPlot"]
qrnn.train(training_loader,
validation_data=validation_loader,
n_epochs=2,
metrics=metrics,
batch_size=1,
mask=-1)
| 2,387
| 96
| 528
|
32821109522c40bd1a02449a60d21db007e94187
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_internal/commands/wheel.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/commands/wheel.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/commands/wheel.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c0/a1/92/92cb988e38603986bf8c3eae95a2a93d7694ccf8b2cdf44d353e033a2a
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/c0/a1/92/92cb988e38603986bf8c3eae95a2a93d7694ccf8b2cdf44d353e033a2a
| 0
| 0
| 0
|
c2f0e9d7331b991b80a07c7ef1b8d1683d218eff
| 3,383
|
py
|
Python
|
src/core/context_processors.py
|
compressore/moc
|
8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b
|
[
"MIT"
] | null | null | null |
src/core/context_processors.py
|
compressore/moc
|
8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b
|
[
"MIT"
] | null | null | null |
src/core/context_processors.py
|
compressore/moc
|
8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b
|
[
"MIT"
] | null | null | null |
from django.utils import timezone
import pytz
from core.models import RecordRelationship, Project, ProjectDesign, Work, WorkSprint, Notification
from django.conf import settings
| 39.8
| 134
| 0.651493
|
from django.utils import timezone
import pytz
from core.models import RecordRelationship, Project, ProjectDesign, Work, WorkSprint, Notification
from django.conf import settings
def site(request):
permissions = None
open = None
sprints = None
notifications = None
system_name_singular = "city"
system_name_plural = "cities"
urls = {}
is_data_portal = False
if hasattr(request, "project"):
project = Project.objects_unfiltered.get(pk=request.project)
else:
project = Project.objects.get(pk=1)
# So here is the dealio... we have these URLs that are available on all subsites
# because we load them through these urls_xxxxxxx_baseline files. It's very handy.
# However, it means that the url named say 'profile' can be available through
# core:profile, staf:profile, data:profile, etc etc. In order to have this url
# available anywhere without having to concatenate this out of thin air, we create
# these context variables with commonly used urls.
slug = project.get_slug()
urls = {
"PROFILE": slug + ":" + "user",
"LIBRARY_ITEM": slug + ":" + "library_item",
"LIBRARY": slug + ":" + "library",
"FORUM": slug + ":" + "volunteer_forum",
}
if slug == "data" or slug == "cityloops" or slug == "stocks":
is_data_portal = True
elif slug == "islands":
system_name_singular = "island"
system_name_plural = "islands"
is_data_portal = True
if is_data_portal:
urls["LAYER_OVERVIEW"] = slug + ":" + "layer_overview"
urls["LIBRARY_OVERVIEW"] = slug + ":" + "library_overview"
urls["DASHBOARD"] = slug + ":" + "dashboard"
urls["HUB_HARVESTING"] = slug + ":" + "hub_harvesting_space"
urls["DATA_ARTICLE"] = slug + ":" + "article"
urls["SPACE"] = slug + ":" + "referencespace"
urls["GEOJSON"] = slug + ":" + "geojson"
urls["MAP_ITEM"] = slug + ":" + "map_item"
if request.user.is_authenticated and request.user.people:
people = request.user.people
permissions = RecordRelationship.objects.filter(
record_parent_id = request.user.people.id,
record_child = project,
relationship__is_permission = True
)
open = Work.objects.filter(part_of_project=project, status=1, assigned_to__isnull=True).count()
sprints = WorkSprint.objects.filter(projects=project, start_date__lte=timezone.now(), end_date__gte=timezone.now())
notifications = Notification.objects.filter(people=request.user.people, is_read=False)
design = ProjectDesign.objects.select_related("project").get(pk=project)
return {
"MAPBOX_API_KEY": "pk.eyJ1IjoibWV0YWJvbGlzbW9mY2l0aWVzIiwiYSI6ImNqcHA5YXh6aTAxcmY0Mm8yMGF3MGZjdGcifQ.lVZaiSy76Om31uXLP3hw-Q",
"DEBUG": settings.DEBUG,
"CURRENT_PAGE": request.get_full_path(),
"PERMISSIONS": permissions,
"PROJECT": project,
"HEADER_STYLE": design.header,
"DESIGN": design,
"LOGO": design.logo.url if design.logo else None,
"OPEN_TASKS": open,
"SPRINTS": sprints,
"NOTIFICATIONS": notifications,
"SYSTEM_NAME_SINGULAR": system_name_singular,
"SYSTEM_NAME_PLURAL": system_name_plural,
"URLS": urls,
"IS_DATA_PORTAL": is_data_portal,
}
| 3,181
| 0
| 23
|
a5bc516d0f360ab9ea6a27e217851ad013b853e5
| 1,142
|
py
|
Python
|
extras/simple.py
|
bhavinthakar/Doc2Web
|
c8e772bab9ee9158728548f87daa4433847d1941
|
[
"MIT"
] | null | null | null |
extras/simple.py
|
bhavinthakar/Doc2Web
|
c8e772bab9ee9158728548f87daa4433847d1941
|
[
"MIT"
] | null | null | null |
extras/simple.py
|
bhavinthakar/Doc2Web
|
c8e772bab9ee9158728548f87daa4433847d1941
|
[
"MIT"
] | null | null | null |
import m5
from m5.objects import *
# Create a system
system = System()
print(system)
# Setup a clock domain
system.clk_domain=SrcClockDomain()
system.clk_domain.clock = '1GHz'
system.clk_domain.voltage_domain=VoltageDomain()
# Setup memory mode
system.mem_mode='timing'
system.mem_ranges= [AddrRange('512MB')]
# Create a CPU
system.cpu= TimingSimpleCPU()
# Create a memory bus
system.membus = SystemXBar()
# Connect CPU to membus
system.cpu.icache_port=system.membus.slave
system.cpu.dcache_port=system.membus.slave
# Create Interrupt Controller
system.cpu.createInterruptController()
system.system_port=system.membus.slave
# create a memory controller
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range=system.mem_ranges[0]
system.mem_ctrl.port=system.membus.master
# create a process
process =Process()
process.cmd=['tests/test-progs/hello/bin/riscv/linux/hello']
system.cpu.workload= process
system.cpu.createThreads()
root=Root(full_system=False, system=system)
m5.instantiate()
print("Beginning Simulation...")
exit_event=m5.simulate()
print("Exiting @ tick {} because {}".format(m5.curTick(),exit_event.getCause()))
| 23.306122
| 80
| 0.781086
|
import m5
from m5.objects import *
# Create a system
system = System()
print(system)
# Setup a clock domain
system.clk_domain=SrcClockDomain()
system.clk_domain.clock = '1GHz'
system.clk_domain.voltage_domain=VoltageDomain()
# Setup memory mode
system.mem_mode='timing'
system.mem_ranges= [AddrRange('512MB')]
# Create a CPU
system.cpu= TimingSimpleCPU()
# Create a memory bus
system.membus = SystemXBar()
# Connect CPU to membus
system.cpu.icache_port=system.membus.slave
system.cpu.dcache_port=system.membus.slave
# Create Interrupt Controller
system.cpu.createInterruptController()
system.system_port=system.membus.slave
# create a memory controller
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range=system.mem_ranges[0]
system.mem_ctrl.port=system.membus.master
# create a process
process =Process()
process.cmd=['tests/test-progs/hello/bin/riscv/linux/hello']
system.cpu.workload= process
system.cpu.createThreads()
root=Root(full_system=False, system=system)
m5.instantiate()
print("Beginning Simulation...")
exit_event=m5.simulate()
print("Exiting @ tick {} because {}".format(m5.curTick(),exit_event.getCause()))
| 0
| 0
| 0
|
098f582579cf6ca68568c151f9800f4996a2b0ac
| 2,720
|
py
|
Python
|
app/models.py
|
lpe234/bms
|
20a2242c9c5e52b099c436eac7addfede57c3a6e
|
[
"MIT"
] | 1
|
2019-05-01T08:11:59.000Z
|
2019-05-01T08:11:59.000Z
|
app/models.py
|
lpe234/bms
|
20a2242c9c5e52b099c436eac7addfede57c3a6e
|
[
"MIT"
] | null | null | null |
app/models.py
|
lpe234/bms
|
20a2242c9c5e52b099c436eac7addfede57c3a6e
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy
from sqlalchemy.ext.declarative import declared_attr
from app import bms_app
from app.utils import password_encrypt, password_check
__author__ = 'lpe234'
db = SQLAlchemy(bms_app)
| 28.041237
| 117
| 0.673897
|
# -*- coding: UTF-8 -*-
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy
from sqlalchemy.ext.declarative import declared_attr
from app import bms_app
from app.utils import password_encrypt, password_check
__author__ = 'lpe234'
db = SQLAlchemy(bms_app)
class IDModelMixin(object):
id = db.Column(db.Integer, comment='ID', primary_key=True, autoincrement=True)
@declared_attr
def __tablename__(cls):
return '{}{}'.format(bms_app.config.get('TABLE_PREFIX'), cls.__name__.lower())
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.id)
def __str__(self):
return self.__repr__()
class BaseModel(IDModelMixin):
create_at = db.Column(db.DateTime, comment='创建时间', default=sqlalchemy.func.now())
update_at = db.Column(db.DateTime, comment='更新时间', default=sqlalchemy.func.now(), onupdate=sqlalchemy.func.now())
remarks = db.Column(db.String(length=64), comment='备注')
class User(db.Model, BaseModel):
username = db.Column(db.String(length=32), comment='用户名', nullable=False, unique=True)
email = db.Column(db.String(length=32), comment='邮箱', nullable=False, unique=True)
password_hash = db.Column(db.String(length=64), comment='密码', nullable=False)
active = db.Column(db.Boolean, comment='是否有效')
admin = db.Column(db.Boolean, comment='是否为管理员', default=False)
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
self.active = False
@property
def password(self):
return self.password_hash
@password.setter
def password(self, password):
if password:
self.password_hash = password_encrypt(password.encode('utf-8'))
def check_password(self, password):
if not password:
return False
return password_check(password, self.password_hash.encode('utf-8'))
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return self.active
@property
def is_anonymous(self):
return False
def get_id(self):
return self.id
class Book(db.Model, BaseModel):
name = db.Column(db.String(length=64), comment='名称', nullable=False, index=True)
cover_img = db.Column(db.String(length=128), comment='封面图')
author = db.Column(db.String(length=64), comment='作者')
publish = db.Column(db.String(length=64), comment='出版社')
isbn = db.Column(db.String(length=64), comment='ISBN')
brand = db.Column(db.String(length=64), comment='品牌')
ename = db.Column(db.String(length=64), comment='英文名称')
pubdate = db.Column(db.Date, comment='初版时间')
| 699
| 1,744
| 92
|
234a4a20bf9e718a162a6d6036e897a3e1733925
| 147
|
py
|
Python
|
tardis/rest/app/database.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 4
|
2018-05-22T13:22:06.000Z
|
2019-03-26T15:32:57.000Z
|
tardis/rest/app/database.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 50
|
2018-05-18T11:46:39.000Z
|
2019-04-26T07:29:45.000Z
|
tardis/rest/app/database.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 2
|
2018-12-12T13:15:59.000Z
|
2018-12-17T08:18:15.000Z
|
from ...plugins.sqliteregistry import SqliteRegistry
| 21
| 52
| 0.77551
|
from ...plugins.sqliteregistry import SqliteRegistry
def get_sql_registry():
sql_registry = SqliteRegistry()
return lambda: sql_registry
| 70
| 0
| 23
|
bd731915d7f0dee07d213b30dfcc9e9bb1ace80a
| 130
|
py
|
Python
|
intro-neural-networks/gradient-descent/guille.py
|
endersonO/IA-python-nano-degree
|
b156293f15b212596ccaa381aa8d254c6895f458
|
[
"MIT"
] | null | null | null |
intro-neural-networks/gradient-descent/guille.py
|
endersonO/IA-python-nano-degree
|
b156293f15b212596ccaa381aa8d254c6895f458
|
[
"MIT"
] | 3
|
2021-05-21T16:02:16.000Z
|
2022-02-10T00:25:48.000Z
|
intro-neural-networks/gradient-descent/guille.py
|
endersonC/IA-python-nano-degree
|
b156293f15b212596ccaa381aa8d254c6895f458
|
[
"MIT"
] | null | null | null |
import numpy as np
w1 = input("W1 :")
w2 = input("W2 :")
b = input("b :")
y=w1*0.4+w2*0.6+b
yy=1/(1+np.exp(-y))
print(y)
print(yy)
| 16.25
| 19
| 0.561538
|
import numpy as np
w1 = input("W1 :")
w2 = input("W2 :")
b = input("b :")
y=w1*0.4+w2*0.6+b
yy=1/(1+np.exp(-y))
print(y)
print(yy)
| 0
| 0
| 0
|
a42482c44d1d7aeb5df6a64ed954fb34b471ab90
| 13,561
|
py
|
Python
|
AMI/NRM_simulator/pyami/simcode/utils.py
|
AntoineDarveau/jwst-mtl
|
a849910e14194804a660b3897dd8bdb25b0daf46
|
[
"MIT"
] | 1
|
2022-02-04T13:59:18.000Z
|
2022-02-04T13:59:18.000Z
|
AMI/NRM_simulator/pyami/simcode/utils.py
|
AntoineDarveau/jwst-mtl
|
a849910e14194804a660b3897dd8bdb25b0daf46
|
[
"MIT"
] | 12
|
2020-09-17T20:14:03.000Z
|
2022-03-21T21:16:43.000Z
|
AMI/NRM_simulator/pyami/simcode/utils.py
|
AntoineDarveau/jwst-mtl
|
a849910e14194804a660b3897dd8bdb25b0daf46
|
[
"MIT"
] | 1
|
2020-09-18T15:25:52.000Z
|
2020-09-18T15:25:52.000Z
|
#!/usr/bin/env python
# original code by Thatte, Anand, Sahlmann, Greenbaum
# utility routines and constants for AMI image simulations reorganized by Sahlmann, Anand 3/2016
# anand@stsci.edu 18 Mar 2016
"""
"""
import numpy as np
import sys, time
from astropy.io import fits
import os
# global variables
global readnoise,background,darkcurrent
cdsreadnoise = 21.0 # CDS read noise (e-)
readnoise = cdsreadnoise/np.sqrt(2) # read noise for one frame
# 0.012 e-/sec value used until 09 2016
darkcurrent = 0.04 # ~0.12 e-/sec 09/2016, 10x earlier, still 6e- in max 800 frames
# Kevin Volk via Deepashri Thatte
background = 0.462*0.15*1.75 # 0.125 e-/sec
ips_size = 256 # holdover from before AMISUB became 80x80
flat_sigma = 0.001 # flat field error
pixscl = 0.0656 # arcsec/pixel WebbPSF 0.064 - DL 0.065
tframe = 0.0745 # frame time for NISRAPID on AMI SUB80
amisubfov = 80
SAT_E = 72.0e3 # Fullerton December 20, 2016 e-mail. Also consistent with STScI JWST ETC
#ither_stddev_as = 0.0015 # 15 mas placement error one-axis
#itter_stddev_as = 0.007 # 7 mas level 2 reqt on JWST, arcsec,
#ither_stddev_as = 0.0015 # Anand Alex detectionlimits
#itter_stddev_as = 0.0001 # Anand Alex centering Also for Stefenie test reductiondetectionlimits
dither_stddev_as = 0.005 # Goudfrooij Sep 2 2016 email to anand@ - good to SAMs of 30 arcsec
jitter_stddev_as = 0.004 # NEA ~1mas jitter FGS, plus other slower error, Kevin 2016.09.16
# Post-flight determination required for more realism in simulations...
# In practise expert reduction should do rapid centroiding
# (as in Holfeltz et al. TRs) through all integrations to
# determine the level of jitter, and calculate CPs in reasonable
# subsets of these integrations.
# Anand's email 2016-02-10 orginally from Volk
F277W, F380M, F430M, F480M = ("F277W", "F380M", "F430M", "F480M")
ZP = {F277W: 26.14,
F380M: 23.75,
F430M: 23.32,
F480M: 23.19} # replace w/Neil R.'s values consistent w/ STScI
debug_utils = False
# debug_utils = True
def get_flatfield(detshape,pyamiDataDir,uniform=False,random_seed=None, overwrite=0):
"""
Read in a flat field that possesses the requested flat field error standard deviation,
or if the file does not exist, create, write, and return it
"""
# # J. Sahlmann 2017-02-02: bug here, pyamiDataDir does not always exist because it is derived from the location where the driver script is stored/run
# pathname = os.path.dirname(sys.argv[0])
# fullPath = os.path.abspath(pathname)
# pyamiDataDir = fullPath + '/pyami/etc/NIRISSami_apt_calcPSF/'
ffe_file = os.path.join(pyamiDataDir ,'flat_%dx%d_sigma_%.4f.fits'%(detshape[0],detshape[1],flat_sigma))
if (os.access(ffe_file, os.F_OK) == True) & (overwrite==0):
#print "\tflat field file %s" % ffe_file
pflat = fits.getdata(ffe_file)
else:
if uniform:
pflat = np.ones(detshape)
else:
if random_seed is not None:
np.random.seed(random_seed)
pflat = np.random.normal(1.0, flat_sigma, size=detshape)
print("creating flat field and saving it to file %s" % ffe_file)
(year, month, day, hour, minute, second, weekday, DOY, DST) = time.gmtime()
fitsobj = fits.HDUList()
hdu = fits.PrimaryHDU()
hdu.header['DATE'] = '%4d-%02d-%02dT%02d:%02d:%02d' % \
(year, month, day, hour, minute, second), 'Date of calculation'
hdu.header['AUTHOR'] = '%s@%s' % (os.getenv('USER'), os.getenv('HOST')), \
'username@host for calculation'
hdu.data = pflat
fitsobj.append( hdu )
fitsobj.writeto(ffe_file, clobber=True)
fitsobj.close()
return pflat
# fast rebin Klaus Pontooppidan found on the web
# legacy slow rebin rewritten to use fast rebin
def jitter(no_of_jitters, osample, random_seed=None):
""" returns in oversampled pixel units.
no_of_jitters is known as nint in STScI terminology
"""
mean_j, sigma_j = 0, jitter_stddev_as * osample / pixscl
if random_seed is not None:
np.random.seed(random_seed)
xjit = np.random.normal(mean_j,sigma_j,no_of_jitters)
xjit_r = [int(round(n, 0)) for n in xjit]
if random_seed is not None:
np.random.seed(random_seed+1); # independent noise in X and Y, thus modify random_seed in a controlled way
yjit = np.random.normal(mean_j,sigma_j,no_of_jitters)
yjit_r = [int(round(n, 0)) for n in yjit]
return xjit_r, yjit_r
def create_ramp(countspersec, _fov, ngroups, utr_,verbose=0, include_noise=1,random_seed=None):
"""
input counts per second
output: ramp has ngroups+1 slices, units are detected e- + noise
create_ramp() called nint number of times to provide nint ramps
Noise contributions can be switched off by setting include_noise = 0
"""
global readnoise,background,darkcurrent
# JSA 2017-02-22: investigate effects of various noise contributions
if include_noise == -1:
# zero all noises except photon noise
background = 0.
readnoise = 1.e-16
darkcurrent = 0.
include_noise = 1
if utr_ :
nreadouts = ngroups + 1
timestep = tframe
else:
if ngroups > 1:
nreadouts = 3
timestep = (ngroups-1) * tframe
else:
nreadouts = 2
readnoise_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
background_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
dark_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
poisson_noise_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
cumulative_poisson_noise_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
ramp = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
if (debug_utils) | (verbose):
print("\tcreate_ramp(): ngroups", ngroups, end=' ')
print(" countspersec.sum() = %.2e"%countspersec.sum(), end=' ')
print(" countsperframe = %.2e"%(countspersec.sum()*tframe))
#calculate poisson noise for single reads, then calculate poisson noise for reads up-the-ramp
for iread in range(nreadouts):
if iread == 0:
if include_noise == 0:
ramp[iread,:,:] = np.zeros( (int(_fov),int(_fov)) )
else:
if random_seed is not None:
np.random.seed(random_seed+111)
readnoise_cube[iread,:,:] = np.random.normal(0, readnoise, (int(_fov),int(_fov)))
ramp[iread,:,:] = readnoise_cube[iread,:,:].mean()
if (debug_utils) | (verbose):
print("\t\tpoissoncube slice %2d: %.2e"%(iread, poisson_noise_cube[iread,:,:].sum()), end=' ')
print("poissoncube total %.2e"%poisson_noise_cube.sum())
elif iread == 1:
photonexpectation = countspersec * tframe
photonexpectation[photonexpectation <0.0] = 0.0 # catch roundoff to e-13
if include_noise == 0:
ramp[iread,:,:] = photonexpectation
else:
if random_seed is not None:
# the noise in different frames should be independent, therefore modify random_seed between frames and between poisson and gaussian noise
np.random.seed(random_seed+iread+111)
poisson_noise_cube[iread,:,:] = np.random.poisson(photonexpectation) # expose for tframe
background_cube[iread,:,:] = background * tframe
dark_cube[iread,:,:] = darkcurrent * tframe
if random_seed is not None:
np.random.seed(random_seed+iread+111+10)
readnoise_cube[iread,:,:] = np.random.normal(0, readnoise, (int(_fov),int(_fov)))
ramp[iread,:,:] = ramp[iread-1,:,:] + \
poisson_noise_cube[iread,:,:] + \
dark_cube[iread,:,:] + \
readnoise_cube[iread,:,:]
if (debug_utils) | (verbose):
print("\t\tpoissoncube slice %2d: %.2e"%(iread, poisson_noise_cube[iread,:,:].sum()), end=' ')
print("poissoncube total %.2e"%poisson_noise_cube.sum())
else:
photonexpectation = countspersec * timestep
photonexpectation[photonexpectation <0.0] = 0.0
if include_noise == 0:
ramp[iread,:,:] = photonexpectation
else:
if random_seed is not None:
np.random.seed(random_seed + iread+111)
poisson_noise_cube[iread,:,:] = np.random.poisson(photonexpectation) # expose for tframe or (ng-1)*tframe
background_cube[iread,:,:] = background * timestep
dark_cube[iread,:,:] = darkcurrent * timestep
if random_seed is not None:
np.random.seed(random_seed + iread+111+10)
readnoise_cube[iread,:,:] = np.random.normal(0, readnoise, (int(_fov),int(_fov)))
ramp[iread,:,:] = ramp[iread-1,:,:] + \
poisson_noise_cube[iread,:,:] + \
dark_cube[iread,:,:] + \
readnoise_cube[iread,:,:]
if (debug_utils) | (verbose):
print("\t\tpoissoncube slice %2d: %.2e"%(iread, poisson_noise_cube[iread,:,:].sum()), end=' ')
print("poissoncube total %.2e"%poisson_noise_cube.sum())
if (debug_utils) | (verbose):
s = "%.1e"
print("\tpoissoncube total = %.1e" % poisson_noise_cube.sum()) # requested nphot / nint
print("\tramp last slice total = %.1e" % ramp[-1,:,:].sum()) # approx same as above
#print "\tramp last slice peak = %.1e" % ramp[-1,:,:].max() #should be ~sat_e typically
for i in range(ramp.shape[0]):
print("\t", s%ramp[i,:,:].sum(), ":", s%ramp[i,:,:].max(), end=' ')
print("\n\tcreate_ramp: end")
return ramp
def create_integration(ramp): #????????
"""
input: ramp in e-, including 'zero read', ngroups+1 2D slices
output: data in detected e-
"""
if debug_utils:
s = "%.1e"
for i in range(ramp.shape[0]):
print(" ", s%ramp[i,:,:].sum(), end=' ')
print("\n\tcreate_integration: end")
if ramp.shape[0] == 2:
data = ramp[1,:,:] # no subtraction on readnoise+DC - ramp[0,:,:]
if ramp.shape[0] > 2:
data = ramp[-1,:,:] - ramp[1,:,:]
return data
# old, now ...unused.. 09/2016
"""
def find_slope(utr, ngroups, fov):
xval = np.zeros((ngroups+1,int(fov),int(fov)), np.float64)
slope = np.zeros((int(fov),int(fov)), np.float64)
for i in range(ngroups+1):
xval[i]=i
xm=float(ngroups)/2.0
slope = (np.sum(xval*utr,axis=0)-xm*np.sum(utr,axis=0))/(np.sum(xval**2,axis=0)-ngroups*xm**2)
return slope
"""
#origin is at bottom left of the image. (ds9?)
#padding of 1 for IPS to avoid division by 0 when divided by IPS flat.
| 42.510972
| 157
| 0.570091
|
#!/usr/bin/env python
# original code by Thatte, Anand, Sahlmann, Greenbaum
# utility routines and constants for AMI image simulations reorganized by Sahlmann, Anand 3/2016
# anand@stsci.edu 18 Mar 2016
"""
"""
import numpy as np
import sys, time
from astropy.io import fits
import os
# global variables
global readnoise,background,darkcurrent
cdsreadnoise = 21.0 # CDS read noise (e-)
readnoise = cdsreadnoise/np.sqrt(2) # read noise for one frame
# 0.012 e-/sec value used until 09 2016
darkcurrent = 0.04 # ~0.12 e-/sec 09/2016, 10x earlier, still 6e- in max 800 frames
# Kevin Volk via Deepashri Thatte
background = 0.462*0.15*1.75 # 0.125 e-/sec
ips_size = 256 # holdover from before AMISUB became 80x80
flat_sigma = 0.001 # flat field error
pixscl = 0.0656 # arcsec/pixel WebbPSF 0.064 - DL 0.065
tframe = 0.0745 # frame time for NISRAPID on AMI SUB80
amisubfov = 80
SAT_E = 72.0e3 # Fullerton December 20, 2016 e-mail. Also consistent with STScI JWST ETC
#ither_stddev_as = 0.0015 # 15 mas placement error one-axis
#itter_stddev_as = 0.007 # 7 mas level 2 reqt on JWST, arcsec,
#ither_stddev_as = 0.0015 # Anand Alex detectionlimits
#itter_stddev_as = 0.0001 # Anand Alex centering Also for Stefenie test reductiondetectionlimits
dither_stddev_as = 0.005 # Goudfrooij Sep 2 2016 email to anand@ - good to SAMs of 30 arcsec
jitter_stddev_as = 0.004 # NEA ~1mas jitter FGS, plus other slower error, Kevin 2016.09.16
# Post-flight determination required for more realism in simulations...
# In practise expert reduction should do rapid centroiding
# (as in Holfeltz et al. TRs) through all integrations to
# determine the level of jitter, and calculate CPs in reasonable
# subsets of these integrations.
# Anand's email 2016-02-10 orginally from Volk
F277W, F380M, F430M, F480M = ("F277W", "F380M", "F430M", "F480M")
ZP = {F277W: 26.14,
F380M: 23.75,
F430M: 23.32,
F480M: 23.19} # replace w/Neil R.'s values consistent w/ STScI
debug_utils = False
# debug_utils = True
def get_flatfield(detshape,pyamiDataDir,uniform=False,random_seed=None, overwrite=0):
"""
Read in a flat field that possesses the requested flat field error standard deviation,
or if the file does not exist, create, write, and return it
"""
# # J. Sahlmann 2017-02-02: bug here, pyamiDataDir does not always exist because it is derived from the location where the driver script is stored/run
# pathname = os.path.dirname(sys.argv[0])
# fullPath = os.path.abspath(pathname)
# pyamiDataDir = fullPath + '/pyami/etc/NIRISSami_apt_calcPSF/'
ffe_file = os.path.join(pyamiDataDir ,'flat_%dx%d_sigma_%.4f.fits'%(detshape[0],detshape[1],flat_sigma))
if (os.access(ffe_file, os.F_OK) == True) & (overwrite==0):
#print "\tflat field file %s" % ffe_file
pflat = fits.getdata(ffe_file)
else:
if uniform:
pflat = np.ones(detshape)
else:
if random_seed is not None:
np.random.seed(random_seed)
pflat = np.random.normal(1.0, flat_sigma, size=detshape)
print("creating flat field and saving it to file %s" % ffe_file)
(year, month, day, hour, minute, second, weekday, DOY, DST) = time.gmtime()
fitsobj = fits.HDUList()
hdu = fits.PrimaryHDU()
hdu.header['DATE'] = '%4d-%02d-%02dT%02d:%02d:%02d' % \
(year, month, day, hour, minute, second), 'Date of calculation'
hdu.header['AUTHOR'] = '%s@%s' % (os.getenv('USER'), os.getenv('HOST')), \
'username@host for calculation'
hdu.data = pflat
fitsobj.append( hdu )
fitsobj.writeto(ffe_file, clobber=True)
fitsobj.close()
return pflat
# fast rebin Klaus Pontooppidan found on the web
def krebin(a, shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
# legacy slow rebin rewritten to use fast rebin
def rebin(a = None, rc=(2,2), verbose=None):
r, c = rc
R, C = a.shape
sh = (int(R//r), int(C//c))
return krebin(a, sh)
def jitter(no_of_jitters, osample, random_seed=None):
""" returns in oversampled pixel units.
no_of_jitters is known as nint in STScI terminology
"""
mean_j, sigma_j = 0, jitter_stddev_as * osample / pixscl
if random_seed is not None:
np.random.seed(random_seed)
xjit = np.random.normal(mean_j,sigma_j,no_of_jitters)
xjit_r = [int(round(n, 0)) for n in xjit]
if random_seed is not None:
np.random.seed(random_seed+1); # independent noise in X and Y, thus modify random_seed in a controlled way
yjit = np.random.normal(mean_j,sigma_j,no_of_jitters)
yjit_r = [int(round(n, 0)) for n in yjit]
return xjit_r, yjit_r
def create_ramp(countspersec, _fov, ngroups, utr_,verbose=0, include_noise=1,random_seed=None):
"""
input counts per second
output: ramp has ngroups+1 slices, units are detected e- + noise
create_ramp() called nint number of times to provide nint ramps
Noise contributions can be switched off by setting include_noise = 0
"""
global readnoise,background,darkcurrent
# JSA 2017-02-22: investigate effects of various noise contributions
if include_noise == -1:
# zero all noises except photon noise
background = 0.
readnoise = 1.e-16
darkcurrent = 0.
include_noise = 1
if utr_ :
nreadouts = ngroups + 1
timestep = tframe
else:
if ngroups > 1:
nreadouts = 3
timestep = (ngroups-1) * tframe
else:
nreadouts = 2
readnoise_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
background_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
dark_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
poisson_noise_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
cumulative_poisson_noise_cube = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
ramp = np.zeros((nreadouts,int(_fov),int(_fov)), np.float64)
if (debug_utils) | (verbose):
print("\tcreate_ramp(): ngroups", ngroups, end=' ')
print(" countspersec.sum() = %.2e"%countspersec.sum(), end=' ')
print(" countsperframe = %.2e"%(countspersec.sum()*tframe))
#calculate poisson noise for single reads, then calculate poisson noise for reads up-the-ramp
for iread in range(nreadouts):
if iread == 0:
if include_noise == 0:
ramp[iread,:,:] = np.zeros( (int(_fov),int(_fov)) )
else:
if random_seed is not None:
np.random.seed(random_seed+111)
readnoise_cube[iread,:,:] = np.random.normal(0, readnoise, (int(_fov),int(_fov)))
ramp[iread,:,:] = readnoise_cube[iread,:,:].mean()
if (debug_utils) | (verbose):
print("\t\tpoissoncube slice %2d: %.2e"%(iread, poisson_noise_cube[iread,:,:].sum()), end=' ')
print("poissoncube total %.2e"%poisson_noise_cube.sum())
elif iread == 1:
photonexpectation = countspersec * tframe
photonexpectation[photonexpectation <0.0] = 0.0 # catch roundoff to e-13
if include_noise == 0:
ramp[iread,:,:] = photonexpectation
else:
if random_seed is not None:
# the noise in different frames should be independent, therefore modify random_seed between frames and between poisson and gaussian noise
np.random.seed(random_seed+iread+111)
poisson_noise_cube[iread,:,:] = np.random.poisson(photonexpectation) # expose for tframe
background_cube[iread,:,:] = background * tframe
dark_cube[iread,:,:] = darkcurrent * tframe
if random_seed is not None:
np.random.seed(random_seed+iread+111+10)
readnoise_cube[iread,:,:] = np.random.normal(0, readnoise, (int(_fov),int(_fov)))
ramp[iread,:,:] = ramp[iread-1,:,:] + \
poisson_noise_cube[iread,:,:] + \
dark_cube[iread,:,:] + \
readnoise_cube[iread,:,:]
if (debug_utils) | (verbose):
print("\t\tpoissoncube slice %2d: %.2e"%(iread, poisson_noise_cube[iread,:,:].sum()), end=' ')
print("poissoncube total %.2e"%poisson_noise_cube.sum())
else:
photonexpectation = countspersec * timestep
photonexpectation[photonexpectation <0.0] = 0.0
if include_noise == 0:
ramp[iread,:,:] = photonexpectation
else:
if random_seed is not None:
np.random.seed(random_seed + iread+111)
poisson_noise_cube[iread,:,:] = np.random.poisson(photonexpectation) # expose for tframe or (ng-1)*tframe
background_cube[iread,:,:] = background * timestep
dark_cube[iread,:,:] = darkcurrent * timestep
if random_seed is not None:
np.random.seed(random_seed + iread+111+10)
readnoise_cube[iread,:,:] = np.random.normal(0, readnoise, (int(_fov),int(_fov)))
ramp[iread,:,:] = ramp[iread-1,:,:] + \
poisson_noise_cube[iread,:,:] + \
dark_cube[iread,:,:] + \
readnoise_cube[iread,:,:]
if (debug_utils) | (verbose):
print("\t\tpoissoncube slice %2d: %.2e"%(iread, poisson_noise_cube[iread,:,:].sum()), end=' ')
print("poissoncube total %.2e"%poisson_noise_cube.sum())
if (debug_utils) | (verbose):
s = "%.1e"
print("\tpoissoncube total = %.1e" % poisson_noise_cube.sum()) # requested nphot / nint
print("\tramp last slice total = %.1e" % ramp[-1,:,:].sum()) # approx same as above
#print "\tramp last slice peak = %.1e" % ramp[-1,:,:].max() #should be ~sat_e typically
for i in range(ramp.shape[0]):
print("\t", s%ramp[i,:,:].sum(), ":", s%ramp[i,:,:].max(), end=' ')
print("\n\tcreate_ramp: end")
return ramp
def create_integration(ramp): #????????
"""
input: ramp in e-, including 'zero read', ngroups+1 2D slices
output: data in detected e-
"""
if debug_utils:
s = "%.1e"
for i in range(ramp.shape[0]):
print(" ", s%ramp[i,:,:].sum(), end=' ')
print("\n\tcreate_integration: end")
if ramp.shape[0] == 2:
data = ramp[1,:,:] # no subtraction on readnoise+DC - ramp[0,:,:]
if ramp.shape[0] > 2:
data = ramp[-1,:,:] - ramp[1,:,:]
return data
# old, now ...unused.. 09/2016
"""
def find_slope(utr, ngroups, fov):
xval = np.zeros((ngroups+1,int(fov),int(fov)), np.float64)
slope = np.zeros((int(fov),int(fov)), np.float64)
for i in range(ngroups+1):
xval[i]=i
xm=float(ngroups)/2.0
slope = (np.sum(xval*utr,axis=0)-xm*np.sum(utr,axis=0))/(np.sum(xval**2,axis=0)-ngroups*xm**2)
return slope
"""
#origin is at bottom left of the image. (ds9?)
def apply_padding_image(a,e_x, e_y, fov, osample):
err_x = int(e_x)
err_y = int(e_y)
if err_x <= 0 and err_y <= 0:
b = np.pad(a, [(0,abs(err_y)),(0,abs(err_x))],mode='constant')
c = b[abs(err_y):,abs(err_x):]
elif err_x >= 0 and err_y <= 0:
b = np.pad(a, [(0,abs(err_y)),(abs(err_x),0)],mode='constant')
c = b[abs(err_y):,:(fov*osample)]
elif err_x <= 0 and err_y >= 0:
b = np.pad(a, [(abs(err_y),0),(0, abs(err_x))],mode='constant')
c = b[:(fov*osample),abs(err_x):]
elif err_x >= 0 and err_y >= 0:
b = np.pad(a, [(abs(err_y),0),(abs(err_x), 0)],mode='constant')
c = b[:(fov*osample),:(fov*osample)]
return c
#padding of 1 for IPS to avoid division by 0 when divided by IPS flat.
def apply_padding_ips(a,e_x,e_y, fov, osample):
err_x, err_y = (int(e_x), int(e_y))
if err_x <= 0 and err_y <= 0:
b = np.pad(a, [(0,abs(err_y)),(0,abs(err_x))],mode='constant',constant_values=(1,1))
c = b[abs(err_y):,abs(err_x):]
elif err_x >= 0 and err_y <= 0:
b = np.pad(a, [(0,abs(err_y)),(abs(err_x),0)],mode='constant',constant_values=(1,1))
c = b[abs(err_y):,:(fov*osample)]
elif err_x <= 0 and err_y >= 0:
b = np.pad(a, [(abs(err_y),0),(0, abs(err_x))],mode='constant',constant_values=(1,1))
c = b[:(fov*osample),abs(err_x):]
elif err_x >= 0 and err_y >= 0:
b = np.pad(a, [(abs(err_y),0),(abs(err_x), 0)],mode='constant',constant_values=(1,1))
c = b[:(fov*osample),:(fov*osample)]
return c
| 1,699
| 0
| 88
|
3015c1572611e0e4ec2a94a9c9a1ceaa5347af89
| 429
|
py
|
Python
|
Mundo 3/ex097.py
|
lucasjurado/Curso-em-Video
|
1bf6a0a17ecec6be8e06c128f1c1b4aea88164c4
|
[
"MIT"
] | 3
|
2020-05-19T01:22:35.000Z
|
2021-02-11T18:59:12.000Z
|
Mundo 3/ex097.py
|
lucasjurado/Python
|
1bf6a0a17ecec6be8e06c128f1c1b4aea88164c4
|
[
"MIT"
] | null | null | null |
Mundo 3/ex097.py
|
lucasjurado/Python
|
1bf6a0a17ecec6be8e06c128f1c1b4aea88164c4
|
[
"MIT"
] | null | null | null |
lista = []
lista_par = []
lista_impar = []
while True:
n = int(input('Digite um valor: '))
lista.append(n)
if n%2 == 0:
lista_par.append(n)
else:
lista_impar.append(n)
again = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if again in 'N':
break
print(f'Lista completa: {lista}')
print(f'Lista de itens pares: {lista_par}')
print(f'Lista de itens impares: {lista_impar}')
| 25.235294
| 67
| 0.599068
|
lista = []
lista_par = []
lista_impar = []
while True:
n = int(input('Digite um valor: '))
lista.append(n)
if n%2 == 0:
lista_par.append(n)
else:
lista_impar.append(n)
again = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if again in 'N':
break
print(f'Lista completa: {lista}')
print(f'Lista de itens pares: {lista_par}')
print(f'Lista de itens impares: {lista_impar}')
| 0
| 0
| 0
|
443f32108582f6f879690d6ef57c08094c3452d3
| 868
|
py
|
Python
|
BackEnd/todo/models.py
|
rickyrose/LasttoDo
|
05f3f5cf930cb31278bf458bb01220266180bfe8
|
[
"MIT"
] | null | null | null |
BackEnd/todo/models.py
|
rickyrose/LasttoDo
|
05f3f5cf930cb31278bf458bb01220266180bfe8
|
[
"MIT"
] | null | null | null |
BackEnd/todo/models.py
|
rickyrose/LasttoDo
|
05f3f5cf930cb31278bf458bb01220266180bfe8
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class AbstractTimeStamp(models.Model):
"""Abstract TimeStamp Model
Inherit:
Model
Fields:
created_at : DateTimeField (UnEditable)
updated_at : DateTimeField (Editable)
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Todo(AbstractTimeStamp):
"""Todo Model
Inherit:
AbstractTimeStamp
Fields:
user : ForeignKey (User)
text : CharField
is_completed : BooleanField
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.CharField(max_length=100)
is_completed = models.BooleanField(default=False)
| 23.459459
| 60
| 0.660138
|
from django.db import models
from django.contrib.auth.models import User
class AbstractTimeStamp(models.Model):
"""Abstract TimeStamp Model
Inherit:
Model
Fields:
created_at : DateTimeField (UnEditable)
updated_at : DateTimeField (Editable)
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Todo(AbstractTimeStamp):
"""Todo Model
Inherit:
AbstractTimeStamp
Fields:
user : ForeignKey (User)
text : CharField
is_completed : BooleanField
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.CharField(max_length=100)
is_completed = models.BooleanField(default=False)
def __str__(self):
return self.text
| 22
| 14
| 54
|
99e1901c951083f897e7d698fdca9ce52895b6c4
| 4,279
|
py
|
Python
|
steps/regions.py
|
MaximKsh/Sound-of-Pixels
|
d36ad4ba22b7148269647af0298948969db0b981
|
[
"MIT"
] | 3
|
2021-09-03T06:30:09.000Z
|
2021-09-05T09:02:26.000Z
|
steps/regions.py
|
MaximKsh/Sound-of-Pixels
|
d36ad4ba22b7148269647af0298948969db0b981
|
[
"MIT"
] | 2
|
2020-08-01T13:52:31.000Z
|
2021-03-07T11:01:46.000Z
|
steps/regions.py
|
MaximKsh/Sound-of-Pixels
|
d36ad4ba22b7148269647af0298948969db0b981
|
[
"MIT"
] | null | null | null |
import os
import torch
from imageio import imwrite
from scipy.io import wavfile
from tqdm import tqdm
import numpy as np
from dataset.music import MUSICMixDataset
from helpers.utils import get_ctx, recover_rgb, makedirs, istft_reconstruction, magnitude2heatmap
from steps.common import build_model, unwarp_log_scale, detach_mask
from steps.regions_report_template import sbr_html
| 39.256881
| 115
| 0.619771
|
import os
import torch
from imageio import imwrite
from scipy.io import wavfile
from tqdm import tqdm
import numpy as np
from dataset.music import MUSICMixDataset
from helpers.utils import get_ctx, recover_rgb, makedirs, istft_reconstruction, magnitude2heatmap
from steps.common import build_model, unwarp_log_scale, detach_mask
from steps.regions_report_template import sbr_html
def output_predictions(ctx, data, outputs):
mag_mix = data['mag_mix']
phase_mix = data['phase_mix']
frames = data['frames']
infos = data['infos']
pred_masks_ = outputs['pred_masks']
bs, im_h, im_w, _, _ = pred_masks_.shape
# unwarp
pred_masks_linear = torch.zeros((im_h, bs, im_w, 512, 256)).to(get_ctx(ctx, 'device'))
for h in range(im_h):
pred_masks_linear_h = unwarp_log_scale(ctx, [pred_masks_[:, h, :, :, :]])
pred_masks_linear[h] = pred_masks_linear_h[0]
pred_masks_linear = pred_masks_linear.permute(1, 0, 2, 3, 4)
# to cpu
pred_masks_linear = detach_mask(ctx, [pred_masks_linear], get_ctx(ctx, 'binary_mask'))[0]
pred_masks_ = detach_mask(ctx, [pred_masks_], get_ctx(ctx, 'binary_mask'))[0]
mag_mix = mag_mix.numpy()
phase_mix = phase_mix.numpy()
frames = frames[0]
for i in range(bs):
frames_tensor = np.asarray([recover_rgb(frames[i, :, t].cpu()) for t in range(get_ctx(ctx, 'num_frames'))])
pth, id_ = os.path.split(infos[0][1][i])
_, group = os.path.split(pth)
prefix = group + '-' + id_
folder = os.path.join(get_ctx(ctx, 'vis_regions'), prefix)
sbr_folder = os.path.join(folder, 'sbr')
grid_folder = os.path.join(sbr_folder, 'grid')
makedirs(folder)
makedirs(sbr_folder)
makedirs(grid_folder)
grid_pred_mask = np.zeros((14 * 256, 14 * 256))
for j in range(get_ctx(ctx, 'num_frames')):
imwrite(os.path.join(folder, f'frame{j}.jpg'), frames_tensor[j])
mix_wav = istft_reconstruction(mag_mix[i, 0], phase_mix[i, 0], hop_length=get_ctx(ctx, 'stft_hop'))
wavfile.write(os.path.join(folder, 'mix.wav'), get_ctx(ctx, 'aud_rate'), mix_wav)
# SBR
for h in range(im_h):
for w in range(im_w):
name = f'{h}x{w}'
# output audio
pred_mag = mag_mix[i, 0] * pred_masks_linear[i, h, w]
preds_wav = istft_reconstruction(pred_mag, phase_mix[i, 0], hop_length=get_ctx(ctx, 'stft_hop'))
wavfile.write(os.path.join(grid_folder, f'{name}-pred.wav'), get_ctx(ctx, 'aud_rate'), preds_wav)
# output masks
pred_mask = (np.clip(pred_masks_[i, h, w], 0, 1) * 255).astype(np.uint8)
imwrite(os.path.join(grid_folder, f'{name}-predmask.jpg'), pred_mask[::-1, :])
grid_pred_mask[h * 256:(h + 1) * 256, w * 256:(w + 1) * 256] = pred_mask[::-1, :]
# ouput spectrogram (log of magnitude, show colormap)
pred_mag = magnitude2heatmap(pred_mag)
imwrite(os.path.join(grid_folder, f'{name}-predamp.jpg'), pred_mag[::-1, :, :])
imwrite(os.path.join(sbr_folder, f'masks-grid.jpg'), grid_pred_mask)
grid_frame = frames_tensor[0]
grid_frame[:, np.arange(16, 224, 16)] = 255
grid_frame[np.arange(16, 224, 16), :] = 255
imwrite(os.path.join(sbr_folder, f'frame.jpg'), grid_frame)
with open(os.path.join(sbr_folder, 'sbr.html'), 'w') as text_file:
text_file.write(sbr_html)
def regions(ctx: dict):
ctx['load_best_model'] = True
ctx['net_wrapper'] = build_model(ctx)
ctx['num_mix'] = 1
dataset = MUSICMixDataset(get_ctx(ctx, 'list_regions'), ctx, max_sample=get_ctx(ctx, 'num_regions'),
split='regions')
loader = torch.utils.data.DataLoader(dataset, batch_size=get_ctx(ctx, 'batch_size'), shuffle=True,
num_workers=1, drop_last=False)
makedirs(get_ctx(ctx, 'vis_regions'), remove=True)
cnt = 0
with torch.no_grad():
for data in tqdm(loader):
output = ctx['net_wrapper'].forward(data, ctx, pixelwise=True)
output_predictions(ctx, data, output)
cnt += len(data['audios'][0])
| 3,848
| 0
| 46
|
b8d029f5bc6161440b8026698d334293fd413a64
| 237
|
py
|
Python
|
function/recursion/pattern_01_01n.py
|
nayanapardhekar/Python
|
55ea0cc1dd69192b25cb71358cd03cc2ce13be0a
|
[
"MIT"
] | 37
|
2019-04-03T07:19:57.000Z
|
2022-01-09T06:18:41.000Z
|
function/recursion/pattern_01_01n.py
|
nayanapardhekar/Python
|
55ea0cc1dd69192b25cb71358cd03cc2ce13be0a
|
[
"MIT"
] | 16
|
2020-08-11T08:09:42.000Z
|
2021-10-30T17:40:48.000Z
|
function/recursion/pattern_01_01n.py
|
nayanapardhekar/Python
|
55ea0cc1dd69192b25cb71358cd03cc2ce13be0a
|
[
"MIT"
] | 130
|
2019-10-02T14:40:20.000Z
|
2022-01-26T17:38:26.000Z
|
n=int(input('enter a number: '))
f(n)
'''
output:
enter a number: 6
1 2 3 4 5 6
1 2 3 4 5
1 2 3 4
1 2 3
1 2
1
'''
| 9.875
| 32
| 0.485232
|
def f(n):
if n==0:
return
def g(t):
if t==0:
return
g(t-1)
print(t,end=' ')
g(n)
print()
f(n-1)
n=int(input('enter a number: '))
f(n)
'''
output:
enter a number: 6
1 2 3 4 5 6
1 2 3 4 5
1 2 3 4
1 2 3
1 2
1
'''
| 95
| 0
| 22
|
0546f3edaa9a5f95688e2a8586c42014cf316be2
| 4,085
|
py
|
Python
|
py/s3_intelligent_tiering.py
|
bluecloudreddot/s3-intelligent-tiering
|
08b7b49114522ac055f388fe14588f82df2ccedf
|
[
"MIT"
] | 2
|
2019-02-13T17:07:09.000Z
|
2020-05-03T03:18:13.000Z
|
py/s3_intelligent_tiering.py
|
bluecloudreddot/s3-intelligent-tiering
|
08b7b49114522ac055f388fe14588f82df2ccedf
|
[
"MIT"
] | null | null | null |
py/s3_intelligent_tiering.py
|
bluecloudreddot/s3-intelligent-tiering
|
08b7b49114522ac055f388fe14588f82df2ccedf
|
[
"MIT"
] | 1
|
2019-03-12T09:09:24.000Z
|
2019-03-12T09:09:24.000Z
|
"""
MIT License
Copyright (c) 2019 Blue Cloud Red Dot
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import boto3
import botocore
PARSER = argparse.ArgumentParser()
PARSER.add_argument("bucket", help="Please enter the name of the S3 bucket", type=str)
PARSER.add_argument("-k" "--key", help="Please enter the key of the object", default='', dest='key', type=str)
PARSER.add_argument("-p" "--prefix", help="Please enter the prefix of the objects", default='', dest='prefix', type=str)
PARSER.add_argument("-s" "--suffix", help="Please enter the suffix of the objects", default='', dest='suffix', type=str)
PARSER.add_argument("-r" "--recursive", help="Please use if all objects with the prefix/suffix should be included", dest='recursive', action="store_true")
PARSER.add_argument("-t" "--token", help="Continuation token to list the next set of objects", dest='', type=str)
ARGS = PARSER.parse_args()
RECURSIVE = ARGS.recursive
CLIENT = boto3.client('s3')
S3 = boto3.resource('s3')
if RECURSIVE:
while True:
RESPONSE = CLIENT.list_objects_v2(Bucket=ARGS.bucket, Prefix=ARGS.prefix)
try:
CONTENTS = RESPONSE['Contents']
except KeyError:
print("Key error.")
for CONTENT in CONTENTS:
KEY = CONTENT['Key']
if KEY.startswith(ARGS.prefix) and KEY.endswith(ARGS.suffix):
COPY_SOURCE = {
'Bucket': ARGS.bucket,
'Key': KEY
}
try:
if CLIENT.get_object(Bucket=COPY_SOURCE['Bucket'], Key=COPY_SOURCE['Key'])['StorageClass'] == 'INTELLIGENT_TIERING':
print("Storage Class for '{}/{}' is already Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except KeyError:
S3.meta.client.copy(COPY_SOURCE, COPY_SOURCE['Bucket'], COPY_SOURCE['Key'], ExtraArgs={'StorageClass':'INTELLIGENT_TIERING'})
print("Changed Storage Class for '{}/{}' to Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except AttributeError:
print("Attribute error")
try:
ARGS.continuation_token = RESPONSE['NextContinuationToken']
except KeyError:
break
else:
try:
COPY_SOURCE = {
'Bucket': ARGS.bucket,
'Key': ARGS.key
}
try:
if CLIENT.get_object(Bucket=COPY_SOURCE['Bucket'], Key=COPY_SOURCE['Key'])['StorageClass'] == 'INTELLIGENT_TIERING':
print("Storage Class for '{}/{}' is already Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except KeyError:
S3.meta.client.copy(COPY_SOURCE, COPY_SOURCE['Bucket'], COPY_SOURCE['Key'], ExtraArgs={'StorageClass':'INTELLIGENT_TIERING'})
print("Changed Storage Class for '{}/{}' to Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except botocore.exceptions.ClientError:
print("404 - Sorry, the object you tried cannot be found.")
| 51.708861
| 154
| 0.67246
|
"""
MIT License
Copyright (c) 2019 Blue Cloud Red Dot
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import boto3
import botocore
PARSER = argparse.ArgumentParser()
PARSER.add_argument("bucket", help="Please enter the name of the S3 bucket", type=str)
PARSER.add_argument("-k" "--key", help="Please enter the key of the object", default='', dest='key', type=str)
PARSER.add_argument("-p" "--prefix", help="Please enter the prefix of the objects", default='', dest='prefix', type=str)
PARSER.add_argument("-s" "--suffix", help="Please enter the suffix of the objects", default='', dest='suffix', type=str)
PARSER.add_argument("-r" "--recursive", help="Please use if all objects with the prefix/suffix should be included", dest='recursive', action="store_true")
PARSER.add_argument("-t" "--token", help="Continuation token to list the next set of objects", dest='', type=str)
ARGS = PARSER.parse_args()
RECURSIVE = ARGS.recursive
CLIENT = boto3.client('s3')
S3 = boto3.resource('s3')
if RECURSIVE:
while True:
RESPONSE = CLIENT.list_objects_v2(Bucket=ARGS.bucket, Prefix=ARGS.prefix)
try:
CONTENTS = RESPONSE['Contents']
except KeyError:
print("Key error.")
for CONTENT in CONTENTS:
KEY = CONTENT['Key']
if KEY.startswith(ARGS.prefix) and KEY.endswith(ARGS.suffix):
COPY_SOURCE = {
'Bucket': ARGS.bucket,
'Key': KEY
}
try:
if CLIENT.get_object(Bucket=COPY_SOURCE['Bucket'], Key=COPY_SOURCE['Key'])['StorageClass'] == 'INTELLIGENT_TIERING':
print("Storage Class for '{}/{}' is already Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except KeyError:
S3.meta.client.copy(COPY_SOURCE, COPY_SOURCE['Bucket'], COPY_SOURCE['Key'], ExtraArgs={'StorageClass':'INTELLIGENT_TIERING'})
print("Changed Storage Class for '{}/{}' to Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except AttributeError:
print("Attribute error")
try:
ARGS.continuation_token = RESPONSE['NextContinuationToken']
except KeyError:
break
else:
try:
COPY_SOURCE = {
'Bucket': ARGS.bucket,
'Key': ARGS.key
}
try:
if CLIENT.get_object(Bucket=COPY_SOURCE['Bucket'], Key=COPY_SOURCE['Key'])['StorageClass'] == 'INTELLIGENT_TIERING':
print("Storage Class for '{}/{}' is already Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except KeyError:
S3.meta.client.copy(COPY_SOURCE, COPY_SOURCE['Bucket'], COPY_SOURCE['Key'], ExtraArgs={'StorageClass':'INTELLIGENT_TIERING'})
print("Changed Storage Class for '{}/{}' to Intelligent Tiering".format(COPY_SOURCE['Bucket'], COPY_SOURCE['Key']))
except botocore.exceptions.ClientError:
print("404 - Sorry, the object you tried cannot be found.")
| 0
| 0
| 0
|
a73bbb4e86f50607aba59110d5fab72de671285b
| 7,665
|
py
|
Python
|
featureRec/movielens/data/preprocess.py
|
parth-couture-ai/RecommenderSystems
|
fbf2a748099fc38f1422d121990eb8e62940f25c
|
[
"MIT"
] | 975
|
2019-03-28T19:47:38.000Z
|
2022-03-30T06:25:22.000Z
|
featureRec/movielens/data/preprocess.py
|
ShunLu91/RecommenderSystems
|
0a585139de1b49d72511ce5a4a642bd427c1349a
|
[
"MIT"
] | 20
|
2019-05-06T12:14:31.000Z
|
2022-03-24T12:31:43.000Z
|
featureRec/movielens/data/preprocess.py
|
ShunLu91/RecommenderSystems
|
0a585139de1b49d72511ce5a4a642bd427c1349a
|
[
"MIT"
] | 274
|
2019-04-13T11:06:06.000Z
|
2022-03-30T06:25:21.000Z
|
dict = {}
user_count = 6040
gender = {}
gender['M'] = 1
gender['F'] = 2
dict[1] = "Gender-male"
dict[2] = "Gender-female"
age = {}
age['1'] = 3
age['18'] = 4
age['25'] = 5
age['35'] = 6
age['45'] = 7
age['50'] = 8
age['56'] = 9
dict[3] = "Age-under 18"
dict[4] = "Age-18-24"
dict[5] = "Age-25-34"
dict[6] = "Age-35-44"
dict[7] = "Age-45-49"
dict[8] = "Age-50-55"
dict[9] = "Age-56+"
feature_size = 9
occ = {}
for i in range(21):
feature_size += 1
occ[str(i)] = feature_size
dict[10] = "Occupation-other"
dict[11] = "Occupation-academic/educator"
dict[12] = "Occupation-artist"
dict[13] = "Occupation-clerical/admin"
dict[14] = "Occupation-college/grad student"
dict[15] = "Occupation-customer service"
dict[16] = "Occupation-doctor/health care"
dict[17] = "Occupation-executive/managerial"
dict[18] = "Occupation-farmer"
dict[19] = "Occupation-homemaker"
dict[20] = "Occupation-K-12 student"
dict[21] = "Occupation-lawyer"
dict[22] = "Occupation-programmer"
dict[23] = "Occupation-retired"
dict[24] = "Occupation-sales/marketing"
dict[25] = "Occupation-scientist"
dict[26] = "Occupation-self-employed"
dict[27] = "Occupation-technician/engineer"
dict[28] = "Occupation-tradesman/craftsman"
dict[29] = "Occupation-unemployed"
dict[30] = "Occupation-writer"
f = open('users.dat', 'r')
zipcode = {}
for i in range(1, user_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
if zipcode.get(l[-1]) == None:
feature_size += 1
zipcode[l[-1]] = feature_size
dict[feature_size] = "Zipcode-" + str(l[-1])
f.close()
f = open('users.dat', 'r')
user_i = [[]]
user_v = [[]]
for i in range(1, user_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
user_i.append([gender[l[1]], age[l[2]], occ[l[3]], zipcode[l[4]]])
user_v.append([1, 1, 1, 1])
f.close()
print("The number of user's feature is:", len(user_i))
movie_count = 3883
max_gen = 0
min_gen = 10
year_dict = {}
for i in range(1919, 1930):
year_dict[i] = 1
for i in range(1930, 1940):
year_dict[i] = 2
for i in range(1940, 1950):
year_dict[i] = 3
for i in range(1950, 1960):
year_dict[i] = 4
for i in range(1960, 1970):
year_dict[i] = 5
for i in range(1970, 2001):
year_dict[i] = 6 + i - 1970
f = open('movies.dat', 'r', encoding="ISO-8859-1")
genres = {}
for i in range(1, movie_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
s = l[-1]
l = s.split('|')
if len(l) > max_gen:
max_gen = len(l)
if len(l) < min_gen:
min_gen = len(l)
if len(l) == 0:
print('error')
for _ in l:
if genres.get(_) == None:
feature_size += 1
genres[_] = feature_size
dict[feature_size] = "Genre-" + _
f.close()
print("2222", feature_size)
print(len(dict))
print('The max number is :', max_gen)
#feature_size += 1 # for year of release
f = open('movies.dat', 'r', encoding="ISO-8859-1")
movie_i = {}
movie_v = {}
for i in range(1, movie_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
MovieID = int(l[0])
Year = int(l[1][-5:-1])
l = l[-1].split('|')
new_i = []
new_v = []
for _ in l:
new_i.append(genres[_])
new_v.append(1)
t = 6 - len(l) # 0 ~ 5 remain
for _ in range(feature_size + 1, feature_size + t + 1):
new_i.append(_)
new_v.append(0)
#new_i.append(feature_size + 6)
#new_v.append(Year)
new_i.append(feature_size + 5 + year_dict[Year])
new_v.append(1)
movie_i[MovieID] = new_i
movie_v[MovieID] = new_v
f.close()
print(feature_size + 1, feature_size + 5)
#feature_size += 6
dict[feature_size + 1] = "Genre-NULL"
dict[feature_size + 2] = "Genre-NULL"
dict[feature_size + 3] = "Genre-NULL"
dict[feature_size + 4] = "Genre-NULL"
dict[feature_size + 5] = "Genre-NULL"
feature_size += 5
feature_size += 1
dict[feature_size] = "Release-1919-1929"
feature_size += 1
dict[feature_size] = "Release-1930-1939"
feature_size += 1
dict[feature_size] = "Release-1940-1949"
feature_size += 1
dict[feature_size] = "Release-1950-1959"
feature_size += 1
dict[feature_size] = "Release-1960-1969"
for y in range(1970, 2001):
feature_size += 1
dict[feature_size] = "Release-" + str(y)
print("####: ", feature_size)
print(len(dict))
print("The number of movie's feature is:", len(movie_i))
feature_size += 1 # for timestamp
dict[feature_size] = "Timestamp"
f = open('ratings.dat', 'r')
data_i = []
data_v = []
Y = []
#U = []
#I = []
all_count = 1000209
ratings_count = 0
for i in range(1, all_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
y = int(l[2])
new_i = user_i[int(l[0])].copy()
new_v = user_v[int(l[0])].copy()
new_i.extend(movie_i[int(l[1])])
new_v.extend(movie_v[int(l[1])])
new_i.append(feature_size)
new_v.append(int(l[3]))
if y > 3:
y = 1
elif y < 3:
y = 0
else:
y = -1
if y != -1:
data_i.append(new_i)
data_v.append(new_v)
# U.append(int(l[0]))
# I.append(int(l[1]))
Y.append(y)
ratings_count += 1
f.close()
print('valid number of ratings:', len(data_v))
print('Positive number =', sum(Y))
print(feature_size)
print("Dict: ", len(dict))
print('All =', len(data_i))
import numpy as np
import random
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
R = []
for i in range(ratings_count):
R.append([data_v[i][-1]])
#print(R)
#print(np.max(R))
#print(np.min(R))
R = scaler.fit_transform(R)
#print(R)
for i in range(ratings_count):
data_v[i].pop()
data_v[i].append(R[i][0])
# data_v[i].append(U[i])
# data_v[i].append(I[i])
print(data_v[0])
perm = []
for i in range(ratings_count):
perm.append(i)
random.seed(2019)
random.shuffle(perm)
train_count = int(ratings_count * 0.8)
valid_count = int(ratings_count * 0.9)
X_i_tr = []
X_v_tr = []
Y_tr = []
for i in range(train_count):
X_i_tr.append(data_i[perm[i]])
X_v_tr.append(data_v[perm[i]])
Y_tr.append(Y[perm[i]])
X_i_tr = np.array(X_i_tr)
X_v_tr = np.array(X_v_tr)
Y_tr = np.array(Y_tr)
i1 = X_i_tr[:, 0:4]
i2 = X_i_tr[:, 4:10]
i3 = X_i_tr[:, 10:]
x1 = X_v_tr[:, 0:4]
x2 = X_v_tr[:, 4:10]
x3 = X_v_tr[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("train_i_genre.npy", i2)
np.save("train_i_other.npy", i4)
np.save("train_x_genre.npy", x2)
np.save("train_x_other.npy", x4)
np.save("train_y.npy", np.array(Y_tr))
#np.save("train_ui.npy", np.array(ui_tr))
X_i_va = []
X_v_va = []
Y_va = []
for i in range(train_count, valid_count):
X_i_va.append(data_i[perm[i]])
X_v_va.append(data_v[perm[i]])
Y_va.append(Y[perm[i]])
# ui_va.append([U[perm[i]], I[perm[i]])
X_i_va = np.array(X_i_va)
X_v_va = np.array(X_v_va)
Y_va = np.array(Y_va)
i1 = X_i_va[:, 0:4]
i2 = X_i_va[:, 4:10]
i3 = X_i_va[:, 10:]
x1 = X_v_va[:, 0:4]
x2 = X_v_va[:, 4:10]
x3 = X_v_va[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("valid_i_genre.npy", i2)
np.save("valid_i_other.npy", i4)
np.save("valid_x_genre.npy", x2)
np.save("valid_x_other.npy", x4)
np.save("valid_y.npy", np.array(Y_va))
X_i_te = []
X_v_te = []
Y_te = []
for i in range(valid_count, ratings_count):
X_i_te.append(data_i[perm[i]])
X_v_te.append(data_v[perm[i]])
Y_te.append(Y[perm[i]])
# ui_te.append(U[perm[i]]], I[perm[i]])
X_i_te = np.array(X_i_te)
X_v_te = np.array(X_v_te)
Y_te = np.array(Y_te)
i1 = X_i_te[:, 0:4]
i2 = X_i_te[:, 4:10]
i3 = X_i_te[:, 10:]
x1 = X_v_te[:, 0:4]
x2 = X_v_te[:, 4:10]
x3 = X_v_te[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("test_i_genre.npy", i2)
np.save("test_i_other.npy", i4)
np.save("test_x_genre.npy", x2)
np.save("test_x_other.npy", x4)
np.save("test_y.npy", np.array(Y_te))
print(len(X_i_tr))
print(len(X_i_va))
print(len(X_i_te))
print(len(Y))
f = open("feature.txt", 'w')
f.write(str(dict))
| 21.837607
| 67
| 0.641748
|
dict = {}
user_count = 6040
gender = {}
gender['M'] = 1
gender['F'] = 2
dict[1] = "Gender-male"
dict[2] = "Gender-female"
age = {}
age['1'] = 3
age['18'] = 4
age['25'] = 5
age['35'] = 6
age['45'] = 7
age['50'] = 8
age['56'] = 9
dict[3] = "Age-under 18"
dict[4] = "Age-18-24"
dict[5] = "Age-25-34"
dict[6] = "Age-35-44"
dict[7] = "Age-45-49"
dict[8] = "Age-50-55"
dict[9] = "Age-56+"
feature_size = 9
occ = {}
for i in range(21):
feature_size += 1
occ[str(i)] = feature_size
dict[10] = "Occupation-other"
dict[11] = "Occupation-academic/educator"
dict[12] = "Occupation-artist"
dict[13] = "Occupation-clerical/admin"
dict[14] = "Occupation-college/grad student"
dict[15] = "Occupation-customer service"
dict[16] = "Occupation-doctor/health care"
dict[17] = "Occupation-executive/managerial"
dict[18] = "Occupation-farmer"
dict[19] = "Occupation-homemaker"
dict[20] = "Occupation-K-12 student"
dict[21] = "Occupation-lawyer"
dict[22] = "Occupation-programmer"
dict[23] = "Occupation-retired"
dict[24] = "Occupation-sales/marketing"
dict[25] = "Occupation-scientist"
dict[26] = "Occupation-self-employed"
dict[27] = "Occupation-technician/engineer"
dict[28] = "Occupation-tradesman/craftsman"
dict[29] = "Occupation-unemployed"
dict[30] = "Occupation-writer"
f = open('users.dat', 'r')
zipcode = {}
for i in range(1, user_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
if zipcode.get(l[-1]) == None:
feature_size += 1
zipcode[l[-1]] = feature_size
dict[feature_size] = "Zipcode-" + str(l[-1])
f.close()
f = open('users.dat', 'r')
user_i = [[]]
user_v = [[]]
for i in range(1, user_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
user_i.append([gender[l[1]], age[l[2]], occ[l[3]], zipcode[l[4]]])
user_v.append([1, 1, 1, 1])
f.close()
print("The number of user's feature is:", len(user_i))
movie_count = 3883
max_gen = 0
min_gen = 10
year_dict = {}
for i in range(1919, 1930):
year_dict[i] = 1
for i in range(1930, 1940):
year_dict[i] = 2
for i in range(1940, 1950):
year_dict[i] = 3
for i in range(1950, 1960):
year_dict[i] = 4
for i in range(1960, 1970):
year_dict[i] = 5
for i in range(1970, 2001):
year_dict[i] = 6 + i - 1970
f = open('movies.dat', 'r', encoding="ISO-8859-1")
genres = {}
for i in range(1, movie_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
s = l[-1]
l = s.split('|')
if len(l) > max_gen:
max_gen = len(l)
if len(l) < min_gen:
min_gen = len(l)
if len(l) == 0:
print('error')
for _ in l:
if genres.get(_) == None:
feature_size += 1
genres[_] = feature_size
dict[feature_size] = "Genre-" + _
f.close()
print("2222", feature_size)
print(len(dict))
print('The max number is :', max_gen)
#feature_size += 1 # for year of release
f = open('movies.dat', 'r', encoding="ISO-8859-1")
movie_i = {}
movie_v = {}
for i in range(1, movie_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
MovieID = int(l[0])
Year = int(l[1][-5:-1])
l = l[-1].split('|')
new_i = []
new_v = []
for _ in l:
new_i.append(genres[_])
new_v.append(1)
t = 6 - len(l) # 0 ~ 5 remain
for _ in range(feature_size + 1, feature_size + t + 1):
new_i.append(_)
new_v.append(0)
#new_i.append(feature_size + 6)
#new_v.append(Year)
new_i.append(feature_size + 5 + year_dict[Year])
new_v.append(1)
movie_i[MovieID] = new_i
movie_v[MovieID] = new_v
f.close()
print(feature_size + 1, feature_size + 5)
#feature_size += 6
dict[feature_size + 1] = "Genre-NULL"
dict[feature_size + 2] = "Genre-NULL"
dict[feature_size + 3] = "Genre-NULL"
dict[feature_size + 4] = "Genre-NULL"
dict[feature_size + 5] = "Genre-NULL"
feature_size += 5
feature_size += 1
dict[feature_size] = "Release-1919-1929"
feature_size += 1
dict[feature_size] = "Release-1930-1939"
feature_size += 1
dict[feature_size] = "Release-1940-1949"
feature_size += 1
dict[feature_size] = "Release-1950-1959"
feature_size += 1
dict[feature_size] = "Release-1960-1969"
for y in range(1970, 2001):
feature_size += 1
dict[feature_size] = "Release-" + str(y)
print("####: ", feature_size)
print(len(dict))
print("The number of movie's feature is:", len(movie_i))
feature_size += 1 # for timestamp
dict[feature_size] = "Timestamp"
f = open('ratings.dat', 'r')
data_i = []
data_v = []
Y = []
#U = []
#I = []
all_count = 1000209
ratings_count = 0
for i in range(1, all_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
y = int(l[2])
new_i = user_i[int(l[0])].copy()
new_v = user_v[int(l[0])].copy()
new_i.extend(movie_i[int(l[1])])
new_v.extend(movie_v[int(l[1])])
new_i.append(feature_size)
new_v.append(int(l[3]))
if y > 3:
y = 1
elif y < 3:
y = 0
else:
y = -1
if y != -1:
data_i.append(new_i)
data_v.append(new_v)
# U.append(int(l[0]))
# I.append(int(l[1]))
Y.append(y)
ratings_count += 1
f.close()
print('valid number of ratings:', len(data_v))
print('Positive number =', sum(Y))
print(feature_size)
print("Dict: ", len(dict))
print('All =', len(data_i))
import numpy as np
import random
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
R = []
for i in range(ratings_count):
R.append([data_v[i][-1]])
#print(R)
#print(np.max(R))
#print(np.min(R))
R = scaler.fit_transform(R)
#print(R)
for i in range(ratings_count):
data_v[i].pop()
data_v[i].append(R[i][0])
# data_v[i].append(U[i])
# data_v[i].append(I[i])
print(data_v[0])
perm = []
for i in range(ratings_count):
perm.append(i)
random.seed(2019)
random.shuffle(perm)
train_count = int(ratings_count * 0.8)
valid_count = int(ratings_count * 0.9)
X_i_tr = []
X_v_tr = []
Y_tr = []
for i in range(train_count):
X_i_tr.append(data_i[perm[i]])
X_v_tr.append(data_v[perm[i]])
Y_tr.append(Y[perm[i]])
X_i_tr = np.array(X_i_tr)
X_v_tr = np.array(X_v_tr)
Y_tr = np.array(Y_tr)
i1 = X_i_tr[:, 0:4]
i2 = X_i_tr[:, 4:10]
i3 = X_i_tr[:, 10:]
x1 = X_v_tr[:, 0:4]
x2 = X_v_tr[:, 4:10]
x3 = X_v_tr[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("train_i_genre.npy", i2)
np.save("train_i_other.npy", i4)
np.save("train_x_genre.npy", x2)
np.save("train_x_other.npy", x4)
np.save("train_y.npy", np.array(Y_tr))
#np.save("train_ui.npy", np.array(ui_tr))
X_i_va = []
X_v_va = []
Y_va = []
for i in range(train_count, valid_count):
X_i_va.append(data_i[perm[i]])
X_v_va.append(data_v[perm[i]])
Y_va.append(Y[perm[i]])
# ui_va.append([U[perm[i]], I[perm[i]])
X_i_va = np.array(X_i_va)
X_v_va = np.array(X_v_va)
Y_va = np.array(Y_va)
i1 = X_i_va[:, 0:4]
i2 = X_i_va[:, 4:10]
i3 = X_i_va[:, 10:]
x1 = X_v_va[:, 0:4]
x2 = X_v_va[:, 4:10]
x3 = X_v_va[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("valid_i_genre.npy", i2)
np.save("valid_i_other.npy", i4)
np.save("valid_x_genre.npy", x2)
np.save("valid_x_other.npy", x4)
np.save("valid_y.npy", np.array(Y_va))
X_i_te = []
X_v_te = []
Y_te = []
for i in range(valid_count, ratings_count):
X_i_te.append(data_i[perm[i]])
X_v_te.append(data_v[perm[i]])
Y_te.append(Y[perm[i]])
# ui_te.append(U[perm[i]]], I[perm[i]])
X_i_te = np.array(X_i_te)
X_v_te = np.array(X_v_te)
Y_te = np.array(Y_te)
i1 = X_i_te[:, 0:4]
i2 = X_i_te[:, 4:10]
i3 = X_i_te[:, 10:]
x1 = X_v_te[:, 0:4]
x2 = X_v_te[:, 4:10]
x3 = X_v_te[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("test_i_genre.npy", i2)
np.save("test_i_other.npy", i4)
np.save("test_x_genre.npy", x2)
np.save("test_x_other.npy", x4)
np.save("test_y.npy", np.array(Y_te))
print(len(X_i_tr))
print(len(X_i_va))
print(len(X_i_te))
print(len(Y))
f = open("feature.txt", 'w')
f.write(str(dict))
| 0
| 0
| 0
|
f2244993e211d561bad86dbcef92fadb134ea788
| 2,158
|
py
|
Python
|
syphon/schema/resolvepath.py
|
ethall/syphon
|
dd75fd33f3f9164653f24b33c875615dc1d04182
|
[
"MIT"
] | null | null | null |
syphon/schema/resolvepath.py
|
ethall/syphon
|
dd75fd33f3f9164653f24b33c875615dc1d04182
|
[
"MIT"
] | 23
|
2018-01-06T17:59:58.000Z
|
2019-02-27T15:52:20.000Z
|
syphon/schema/resolvepath.py
|
ethall/syphon
|
dd75fd33f3f9164653f24b33c875615dc1d04182
|
[
"MIT"
] | 1
|
2019-02-20T17:17:40.000Z
|
2019-02-20T17:17:40.000Z
|
"""syphon.schema.resolvepath.py
Copyright (c) 2017-2018 Keithley Instruments, LLC.
Licensed under MIT (https://github.com/ehall/syphon/blob/master/LICENSE)
"""
from pandas import DataFrame
from sortedcontainers import SortedDict
def _normalize(directory: str) -> str:
"""Make lowercase and replace spaces with underscores."""
directory = directory.lower()
directory = directory.replace(' ', '_')
if directory[-1] == '.':
directory = directory[:-1]
return directory
def resolve_path(
archive: str, schema: SortedDict, datapool: DataFrame) -> str:
"""Use the given schema and dataset to make a path.
The base path is `archive`. Additional directories are appended
for the value of each `SortedDict` entry in the given `DataFrame`.
It is important that columns corresponding to a `SortedDict` entry
contain a single value. A `ValueError` will be raised if more than
one value exists in a target column.
Args:
archive (str): Directory where data is stored.
schema (SortedDict): Archive directory storage schema.
datapool (DataFrame): Data to use during path resolution.
Return:
str: The resolved path.
Raises:
IndexError: Schema value is not a column header of the
given DataFrame.
ValueError: When a column corresponding to a SortedDict
entry contains more than one value.
"""
from os.path import join
from numpy import nan
result = archive
for key in schema:
header = schema[key]
if header not in list(datapool.columns):
raise IndexError(
'Schema value {} is not a column in the current DataFrame.'
.format(header))
row_values = list(datapool.get(header).drop_duplicates().values)
if nan in row_values:
row_values.remove(nan)
if len(row_values) > 1:
raise ValueError(
'More than one value exists under the {} column.'
.format(header))
value = row_values.pop()
result = join(result, _normalize(value))
return result
| 31.735294
| 75
| 0.646432
|
"""syphon.schema.resolvepath.py
Copyright (c) 2017-2018 Keithley Instruments, LLC.
Licensed under MIT (https://github.com/ehall/syphon/blob/master/LICENSE)
"""
from pandas import DataFrame
from sortedcontainers import SortedDict
def _normalize(directory: str) -> str:
"""Make lowercase and replace spaces with underscores."""
directory = directory.lower()
directory = directory.replace(' ', '_')
if directory[-1] == '.':
directory = directory[:-1]
return directory
def resolve_path(
archive: str, schema: SortedDict, datapool: DataFrame) -> str:
"""Use the given schema and dataset to make a path.
The base path is `archive`. Additional directories are appended
for the value of each `SortedDict` entry in the given `DataFrame`.
It is important that columns corresponding to a `SortedDict` entry
contain a single value. A `ValueError` will be raised if more than
one value exists in a target column.
Args:
archive (str): Directory where data is stored.
schema (SortedDict): Archive directory storage schema.
datapool (DataFrame): Data to use during path resolution.
Return:
str: The resolved path.
Raises:
IndexError: Schema value is not a column header of the
given DataFrame.
ValueError: When a column corresponding to a SortedDict
entry contains more than one value.
"""
from os.path import join
from numpy import nan
result = archive
for key in schema:
header = schema[key]
if header not in list(datapool.columns):
raise IndexError(
'Schema value {} is not a column in the current DataFrame.'
.format(header))
row_values = list(datapool.get(header).drop_duplicates().values)
if nan in row_values:
row_values.remove(nan)
if len(row_values) > 1:
raise ValueError(
'More than one value exists under the {} column.'
.format(header))
value = row_values.pop()
result = join(result, _normalize(value))
return result
| 0
| 0
| 0
|
51e613ddfda196d79045aa4d8515248aa5d166f3
| 2,330
|
py
|
Python
|
project/api/migrations/0057_auto_20210714_2322.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | null | null | null |
project/api/migrations/0057_auto_20210714_2322.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-06-07T14:06:05.000Z
|
2021-06-18T16:27:29.000Z
|
project/api/migrations/0057_auto_20210714_2322.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-07-27T20:40:18.000Z
|
2021-09-12T16:48:19.000Z
|
# Generated by Django 3.2.3 on 2021-07-14 16:22
from django.db import migrations, models
import django.db.models.deletion
| 38.196721
| 183
| 0.590558
|
# Generated by Django 3.2.3 on 2021-07-14 16:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0056_auto_20210714_2045'),
]
operations = [
migrations.RemoveField(
model_name='catalog',
name='content',
),
migrations.AddField(
model_name='catalog',
name='raw_html',
field=models.TextField(default='<br>', max_length=4000000, verbose_name='HTML'),
preserve_default=False,
),
migrations.AddField(
model_name='history',
name='raw_html',
field=models.TextField(default='<br>', max_length=4000000, verbose_name='HTML'),
preserve_default=False,
),
migrations.AlterField(
model_name='event',
name='tags',
field=models.ForeignKey(limit_choices_to={'category': 'События'}, on_delete=django.db.models.deletion.PROTECT, related_name='events', to='api.tag', verbose_name='Тег(и)'),
),
migrations.AlterField(
model_name='movie',
name='tags',
field=models.ManyToManyField(limit_choices_to={'category': 'Фильмы'}, related_name='movies', to='api.Tag', verbose_name='Теги'),
),
migrations.AlterField(
model_name='place',
name='tags',
field=models.ManyToManyField(limit_choices_to={'category': 'Места'}, related_name='places', to='api.Tag', verbose_name='Тег(и)'),
),
migrations.AlterField(
model_name='question',
name='tags',
field=models.ManyToManyField(limit_choices_to={'category': 'Вопросы'}, related_name='questions', to='api.Tag', verbose_name='Тег(и)'),
),
migrations.AlterField(
model_name='right',
name='tags',
field=models.ManyToManyField(limit_choices_to={'category': 'Права'}, related_name='rights', to='api.Tag', verbose_name='Тег(и)'),
),
migrations.AlterField(
model_name='video',
name='tags',
field=models.ManyToManyField(limit_choices_to={'category': 'Видеоролики'}, related_name='videos', to='api.Tag', verbose_name='Теги'),
),
]
| 0
| 2,248
| 23
|
5d8e3e36a2258f0b2a6784e236142f8901d10da0
| 350
|
py
|
Python
|
Flask_Server_API/db.py
|
SwayamDash/News-Feedback-Management-System1
|
94f191af0771ae4252eb747619a5a7930ccdbbeb
|
[
"MIT"
] | 1
|
2021-11-16T19:37:22.000Z
|
2021-11-16T19:37:22.000Z
|
Flask_Server_API/db.py
|
SwayamDash/News-Feedback-Management-System
|
94f191af0771ae4252eb747619a5a7930ccdbbeb
|
[
"MIT"
] | null | null | null |
Flask_Server_API/db.py
|
SwayamDash/News-Feedback-Management-System
|
94f191af0771ae4252eb747619a5a7930ccdbbeb
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_pymongo import pymongo
from app import app
CONNECTION_STRING = "mongodb+srv://swayam:swayam123@cluster0.wiqoj.mongodb.net/newsfeedbacksystem?retryWrites=true&w=majority"
client = pymongo.MongoClient(CONNECTION_STRING)
db = client.get_database('newsfeedbacksystem')
store = pymongo.collection.Collection(db, 'store')
| 50
| 126
| 0.831429
|
from flask import Flask
from flask_pymongo import pymongo
from app import app
CONNECTION_STRING = "mongodb+srv://swayam:swayam123@cluster0.wiqoj.mongodb.net/newsfeedbacksystem?retryWrites=true&w=majority"
client = pymongo.MongoClient(CONNECTION_STRING)
db = client.get_database('newsfeedbacksystem')
store = pymongo.collection.Collection(db, 'store')
| 0
| 0
| 0
|
4c2a761b765bb318e2b8a214514bc6b78be62b31
| 21,375
|
py
|
Python
|
twext/enterprise/dal/test/test_record.py
|
troglodyne/ccs-twistedextensions
|
1b43cb081ba68ae310140a9e853e041cd6362625
|
[
"Apache-2.0"
] | 23
|
2016-08-14T07:20:27.000Z
|
2021-11-08T09:47:45.000Z
|
twext/enterprise/dal/test/test_record.py
|
DalavanCloud/ccs-twistedextensions
|
2c4046df88873dcf33fba7840ed90e4238dcbec7
|
[
"Apache-2.0"
] | 2
|
2016-12-15T17:51:49.000Z
|
2019-05-12T15:59:03.000Z
|
twext/enterprise/dal/test/test_record.py
|
DalavanCloud/ccs-twistedextensions
|
2c4046df88873dcf33fba7840ed90e4238dcbec7
|
[
"Apache-2.0"
] | 20
|
2016-08-17T06:51:00.000Z
|
2022-03-26T11:55:56.000Z
|
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Test cases for L{twext.enterprise.dal.record}.
"""
import datetime
from twisted.internet.defer import inlineCallbacks, gatherResults, returnValue
from twisted.trial.unittest import TestCase, SkipTest
from twext.enterprise.dal.record import (
Record, fromTable, ReadOnly, NoSuchRecord,
SerializableRecord)
from twext.enterprise.dal.test.test_parseschema import SchemaTestHelper
from twext.enterprise.dal.syntax import SchemaSyntax
from twext.enterprise.fixtures import buildConnectionPool
# from twext.enterprise.dal.syntax import
sth = SchemaTestHelper()
sth.id = lambda: __name__
schemaString = """
create table ALPHA (BETA integer primary key, GAMMA text);
create table DELTA (PHI integer primary key default (nextval('myseq')),
EPSILON text not null,
ZETA timestamp not null default '2012-12-12 12:12:12' );
"""
# sqlite can be made to support nextval() as a function, but 'create sequence'
# is syntax and can't.
parseableSchemaString = """
create sequence myseq;
""" + schemaString
try:
testSchema = SchemaSyntax(sth.schemaFromString(parseableSchemaString))
except SkipTest as e:
Alpha = Delta = object
skip = e
else:
Alpha = fromTable(testSchema.ALPHA)
Delta = fromTable(testSchema.DELTA)
skip = False
class TestRecord(Record, Alpha):
"""
A sample test record.
"""
class TestSerializeRecord(SerializableRecord, Alpha):
"""
A sample test serializable record with default values specified.
"""
class TestAutoRecord(Record, Delta):
"""
A sample test record with default values specified.
"""
class TestCRUD(TestCase):
"""
Tests for creation, mutation, and deletion operations.
"""
@inlineCallbacks
def test_simpleLoad(self):
"""
Loading an existing row from the database by its primary key will
populate its attributes from columns of the corresponding row in the
database.
"""
txn = self.pool.connection()
yield txn.execSQL("insert into ALPHA values (:1, :2)", [234, "one"])
yield txn.execSQL("insert into ALPHA values (:1, :2)", [456, "two"])
rec = yield TestRecord.load(txn, 456)
self.assertIsInstance(rec, TestRecord)
self.assertEquals(rec.beta, 456)
self.assertEquals(rec.gamma, "two")
rec2 = yield TestRecord.load(txn, 234)
self.assertIsInstance(rec2, TestRecord)
self.assertEqual(rec2.beta, 234)
self.assertEqual(rec2.gamma, "one")
@inlineCallbacks
def test_missingLoad(self):
"""
Try loading an row which doesn't exist
"""
txn = self.pool.connection()
yield txn.execSQL("insert into ALPHA values (:1, :2)", [234, "one"])
yield self.assertFailure(TestRecord.load(txn, 456), NoSuchRecord)
@inlineCallbacks
def test_simpleCreate(self):
"""
When a record object is created, a row with matching column values will
be created in the database.
"""
txn = self.pool.connection()
rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
self.assertEquals(rec.beta, 3)
self.assertEqual(rec.gamma, u'epsilon')
rows = yield txn.execSQL("select BETA, GAMMA from ALPHA")
self.assertEqual(rows, [tuple([3, u'epsilon'])])
@inlineCallbacks
def test_simpleDelete(self):
"""
When a record object is deleted, a row with a matching primary key will
be deleted in the database.
"""
txn = self.pool.connection()
yield gatherResults(
[mkrow(123, u"one"), mkrow(234, u"two"), mkrow(345, u"three")]
)
tr = yield TestRecord.load(txn, 234)
yield tr.delete()
rows = yield txn.execSQL("select BETA, GAMMA from ALPHA order by BETA")
self.assertEqual(rows, [(123, u"one"), (345, u"three")])
@inlineCallbacks
def oneRowCommitted(self, beta=123, gamma=u'456'):
"""
Create, commit, and return one L{TestRecord}.
"""
txn = self.pool.connection(self.id())
row = yield TestRecord.create(txn, beta=beta, gamma=gamma)
yield txn.commit()
returnValue(row)
@inlineCallbacks
def test_deleteWhenDeleted(self):
"""
When a record object is deleted, if it's already been deleted, it will
raise L{NoSuchRecord}.
"""
row = yield self.oneRowCommitted()
txn = self.pool.connection(self.id())
newRow = yield TestRecord.load(txn, row.beta)
yield newRow.delete()
yield self.assertFailure(newRow.delete(), NoSuchRecord)
@inlineCallbacks
def test_cantCreateWithoutRequiredValues(self):
"""
When a L{Record} object is created without required values, it raises a
L{TypeError}.
"""
txn = self.pool.connection()
te = yield self.assertFailure(TestAutoRecord.create(txn), TypeError)
self.assertIn("required attribute 'epsilon' not passed", str(te))
@inlineCallbacks
def test_datetimeType(self):
"""
When a L{Record} references a timestamp column, it retrieves the date
as UTC.
"""
txn = self.pool.connection()
# Create ...
rec = yield TestAutoRecord.create(txn, epsilon=1)
self.assertEquals(
rec.zeta,
datetime.datetime(2012, 12, 12, 12, 12, 12)
)
yield txn.commit()
# ... should have the same effect as loading.
txn = self.pool.connection()
rec = (yield TestAutoRecord.all(txn))[0]
self.assertEquals(
rec.zeta,
datetime.datetime(2012, 12, 12, 12, 12, 12)
)
@inlineCallbacks
def test_tooManyAttributes(self):
"""
When a L{Record} object is created with unknown attributes (those which
don't map to any column), it raises a L{TypeError}.
"""
txn = self.pool.connection()
te = yield self.assertFailure(
TestRecord.create(
txn, beta=3, gamma=u'three',
extraBonusAttribute=u'nope',
otherBonusAttribute=4321,
),
TypeError
)
self.assertIn("extraBonusAttribute, otherBonusAttribute", str(te))
@inlineCallbacks
def test_createFillsInPKey(self):
"""
If L{Record.create} is called without an auto-generated primary key
value for its row, that value will be generated and set on the returned
object.
"""
txn = self.pool.connection()
tr = yield TestAutoRecord.create(txn, epsilon=u'specified')
tr2 = yield TestAutoRecord.create(txn, epsilon=u'also specified')
self.assertEquals(tr.phi, 1)
self.assertEquals(tr2.phi, 2)
@inlineCallbacks
def test_attributesArentMutableYet(self):
"""
Changing attributes on a database object is not supported yet, because
it's not entirely clear when to flush the SQL to the database.
Instead, for the time being, use C{.update}. When you attempt to set
an attribute, an error will be raised informing you of this fact, so
that the error is clear.
"""
txn = self.pool.connection()
rec = yield TestRecord.create(txn, beta=7, gamma=u'what')
ro = self.assertRaises(ReadOnly, setit)
self.assertEqual(rec.beta, 7)
self.assertIn("SQL-backed attribute 'TestRecord.beta' is read-only. "
"Use '.update(...)' to modify attributes.", str(ro))
@inlineCallbacks
def test_simpleUpdate(self):
"""
L{Record.update} will change the values on the record and in te
database.
"""
txn = self.pool.connection()
rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
yield rec.update(gamma=u'otherwise')
self.assertEqual(rec.gamma, u'otherwise')
yield txn.commit()
# Make sure that it persists.
txn = self.pool.connection()
rec = yield TestRecord.load(txn, 3)
self.assertEqual(rec.gamma, u'otherwise')
@inlineCallbacks
def test_simpleQuery(self):
"""
L{Record.query} will allow you to query for a record by its class
attributes as columns.
"""
txn = self.pool.connection()
for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
(356, u"three"), (456, u"four")]:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
records = yield TestRecord.query(txn, TestRecord.gamma == u"three")
self.assertEqual(len(records), 2)
records.sort(key=lambda x: x.beta)
self.assertEqual(records[0].beta, 345)
self.assertEqual(records[1].beta, 356)
@inlineCallbacks
def test_querySimple(self):
"""
L{Record.querysimple} will allow you to query for a record by its class
attributes as columns.
"""
txn = self.pool.connection()
for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
(356, u"three"), (456, u"four")]:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
records = yield TestRecord.querysimple(txn, gamma=u"three")
self.assertEqual(len(records), 2)
records.sort(key=lambda x: x.beta)
self.assertEqual(records[0].beta, 345)
self.assertEqual(records[1].beta, 356)
@inlineCallbacks
def test_eq(self):
"""
L{Record.__eq__} works.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
one = yield TestRecord.load(txn, 123)
one_copy = yield TestRecord.load(txn, 123)
two = yield TestRecord.load(txn, 234)
self.assertTrue(one == one_copy)
self.assertFalse(one == two)
@inlineCallbacks
def test_all(self):
"""
L{Record.all} will return all instances of the record, sorted by
primary key.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
self.assertEqual(
[(x.beta, x.gamma) for x in (yield TestRecord.all(txn))],
sorted(data)
)
@inlineCallbacks
def test_count(self):
"""
L{Record.count} will return count of the records
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
self.assertEqual(
(yield TestRecord.count(txn)),
len(data)
)
@inlineCallbacks
def test_updatesome(self):
"""
L{Record.updatesome} will update all instances of the matching records.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.updatesome(txn, where=(TestRecord.beta == 123), gamma=u"changed")
yield txn.commit()
txn = self.pool.connection()
records = yield TestRecord.all(txn)
self.assertEqual(
set([(record.beta, record.gamma,) for record in records]),
set([
(123, u"changed"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")
])
)
yield TestRecord.updatesome(txn, where=(TestRecord.beta.In((234, 345,))), gamma=u"changed-2")
yield txn.commit()
txn = self.pool.connection()
records = yield TestRecord.all(txn)
self.assertEqual(
set([(record.beta, record.gamma,) for record in records]),
set([
(123, u"changed"), (456, u"four"), (345, u"changed-2"),
(234, u"changed-2"), (356, u"three")
])
)
@inlineCallbacks
def test_deleteall(self):
"""
L{Record.deleteall} will delete all instances of the record.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.deleteall(txn)
all = yield TestRecord.all(txn)
self.assertEqual(len(all), 0)
@inlineCallbacks
def test_deletesome(self):
"""
L{Record.deletesome} will delete all instances of the matching records.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.deletesome(txn, TestRecord.gamma == u"three")
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((123, 456, 234,)))
yield TestRecord.deletesome(txn, (TestRecord.gamma == u"one").Or(TestRecord.gamma == u"two"))
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((456,)))
@inlineCallbacks
def test_deletesimple(self):
"""
L{Record.deletesimple} will delete all instances of the matching records.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.deletesimple(txn, gamma=u"three")
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((123, 456, 234,)))
yield TestRecord.deletesimple(txn, beta=123, gamma=u"one")
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((456, 234)))
@inlineCallbacks
def test_repr(self):
"""
The C{repr} of a L{Record} presents all its values.
"""
txn = self.pool.connection()
yield txn.execSQL("insert into ALPHA values (:1, :2)", [789, u'nine'])
rec = list((yield TestRecord.all(txn)))[0]
self.assertIn(" beta=789", repr(rec))
self.assertIn(" gamma=u'nine'", repr(rec))
@inlineCallbacks
def test_orderedQuery(self):
"""
L{Record.query} takes an 'order' argument which will allow the objects
returned to be ordered.
"""
txn = self.pool.connection()
for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
(356, u"three"), (456, u"four")]:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
records = yield TestRecord.query(
txn, TestRecord.gamma == u"three", TestRecord.beta
)
self.assertEqual([record.beta for record in records], [345, 356])
records = yield TestRecord.query(
txn, TestRecord.gamma == u"three", TestRecord.beta, ascending=False
)
self.assertEqual([record.beta for record in records], [356, 345])
@inlineCallbacks
def test_pop(self):
"""
A L{Record} may be loaded and deleted atomically, with L{Record.pop}.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestRecord.pop(txn, 234)
self.assertEqual(rec.gamma, u'two')
self.assertEqual(
(yield txn.execSQL(
"select count(*) from ALPHA where BETA = :1", [234]
)),
[tuple([0])]
)
yield self.assertFailure(TestRecord.pop(txn, 234), NoSuchRecord)
def test_columnNamingConvention(self):
"""
The naming convention maps columns C{LIKE_THIS} to be attributes
C{likeThis}.
"""
self.assertEqual(
Record.namingConvention(u"like_this"),
"likeThis"
)
self.assertEqual(
Record.namingConvention(u"LIKE_THIS"),
"likeThis"
)
self.assertEqual(
Record.namingConvention(u"LIKE_THIS_ID"),
"likeThisID"
)
@inlineCallbacks
def test_lock(self):
"""
A L{Record} may be locked, with L{Record.lock}.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestRecord.load(txn, 234)
yield rec.lock()
self.assertEqual(rec.gamma, u'two')
@inlineCallbacks
def test_trylock(self):
"""
A L{Record} may be locked, with L{Record.trylock}.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestRecord.load(txn, 234)
result = yield rec.trylock()
self.assertTrue(result)
@inlineCallbacks
def test_serialize(self):
"""
A L{SerializableRecord} may be serialized.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestSerializeRecord.load(txn, 234)
result = rec.serialize()
self.assertEqual(result, {"beta": 234, "gamma": u"two"})
@inlineCallbacks
def test_deserialize(self):
"""
A L{SerializableRecord} may be deserialized.
"""
txn = self.pool.connection()
rec = yield TestSerializeRecord.deserialize({"beta": 234, "gamma": u"two"})
yield rec.insert(txn)
yield txn.commit()
txn = self.pool.connection()
rec = yield TestSerializeRecord.query(txn, TestSerializeRecord.beta == 234)
self.assertEqual(len(rec), 1)
self.assertEqual(rec[0].gamma, u"two")
yield txn.commit()
# Check that attributes can be changed prior to insert, and not after
txn = self.pool.connection()
rec = yield TestSerializeRecord.deserialize({"beta": 456, "gamma": u"one"})
rec.gamma = u"four"
yield rec.insert(txn)
self.assertRaises(ReadOnly, _raise)
yield txn.commit()
txn = self.pool.connection()
rec = yield TestSerializeRecord.query(txn, TestSerializeRecord.beta == 456)
self.assertEqual(len(rec), 1)
self.assertEqual(rec[0].gamma, u"four")
yield txn.commit()
| 34.036624
| 101
| 0.568561
|
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Test cases for L{twext.enterprise.dal.record}.
"""
import datetime
from twisted.internet.defer import inlineCallbacks, gatherResults, returnValue
from twisted.trial.unittest import TestCase, SkipTest
from twext.enterprise.dal.record import (
Record, fromTable, ReadOnly, NoSuchRecord,
SerializableRecord)
from twext.enterprise.dal.test.test_parseschema import SchemaTestHelper
from twext.enterprise.dal.syntax import SchemaSyntax
from twext.enterprise.fixtures import buildConnectionPool
# from twext.enterprise.dal.syntax import
sth = SchemaTestHelper()
sth.id = lambda: __name__
schemaString = """
create table ALPHA (BETA integer primary key, GAMMA text);
create table DELTA (PHI integer primary key default (nextval('myseq')),
EPSILON text not null,
ZETA timestamp not null default '2012-12-12 12:12:12' );
"""
# sqlite can be made to support nextval() as a function, but 'create sequence'
# is syntax and can't.
parseableSchemaString = """
create sequence myseq;
""" + schemaString
try:
testSchema = SchemaSyntax(sth.schemaFromString(parseableSchemaString))
except SkipTest as e:
Alpha = Delta = object
skip = e
else:
Alpha = fromTable(testSchema.ALPHA)
Delta = fromTable(testSchema.DELTA)
skip = False
class TestRecord(Record, Alpha):
"""
A sample test record.
"""
class TestSerializeRecord(SerializableRecord, Alpha):
"""
A sample test serializable record with default values specified.
"""
class TestAutoRecord(Record, Delta):
"""
A sample test record with default values specified.
"""
class TestCRUD(TestCase):
"""
Tests for creation, mutation, and deletion operations.
"""
def setUp(self):
self.pool = buildConnectionPool(self, schemaString)
@inlineCallbacks
def test_simpleLoad(self):
"""
Loading an existing row from the database by its primary key will
populate its attributes from columns of the corresponding row in the
database.
"""
txn = self.pool.connection()
yield txn.execSQL("insert into ALPHA values (:1, :2)", [234, "one"])
yield txn.execSQL("insert into ALPHA values (:1, :2)", [456, "two"])
rec = yield TestRecord.load(txn, 456)
self.assertIsInstance(rec, TestRecord)
self.assertEquals(rec.beta, 456)
self.assertEquals(rec.gamma, "two")
rec2 = yield TestRecord.load(txn, 234)
self.assertIsInstance(rec2, TestRecord)
self.assertEqual(rec2.beta, 234)
self.assertEqual(rec2.gamma, "one")
@inlineCallbacks
def test_missingLoad(self):
"""
Try loading an row which doesn't exist
"""
txn = self.pool.connection()
yield txn.execSQL("insert into ALPHA values (:1, :2)", [234, "one"])
yield self.assertFailure(TestRecord.load(txn, 456), NoSuchRecord)
@inlineCallbacks
def test_simpleCreate(self):
"""
When a record object is created, a row with matching column values will
be created in the database.
"""
txn = self.pool.connection()
rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
self.assertEquals(rec.beta, 3)
self.assertEqual(rec.gamma, u'epsilon')
rows = yield txn.execSQL("select BETA, GAMMA from ALPHA")
self.assertEqual(rows, [tuple([3, u'epsilon'])])
@inlineCallbacks
def test_simpleDelete(self):
"""
When a record object is deleted, a row with a matching primary key will
be deleted in the database.
"""
txn = self.pool.connection()
def mkrow(beta, gamma):
return txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield gatherResults(
[mkrow(123, u"one"), mkrow(234, u"two"), mkrow(345, u"three")]
)
tr = yield TestRecord.load(txn, 234)
yield tr.delete()
rows = yield txn.execSQL("select BETA, GAMMA from ALPHA order by BETA")
self.assertEqual(rows, [(123, u"one"), (345, u"three")])
@inlineCallbacks
def oneRowCommitted(self, beta=123, gamma=u'456'):
"""
Create, commit, and return one L{TestRecord}.
"""
txn = self.pool.connection(self.id())
row = yield TestRecord.create(txn, beta=beta, gamma=gamma)
yield txn.commit()
returnValue(row)
@inlineCallbacks
def test_deleteWhenDeleted(self):
"""
When a record object is deleted, if it's already been deleted, it will
raise L{NoSuchRecord}.
"""
row = yield self.oneRowCommitted()
txn = self.pool.connection(self.id())
newRow = yield TestRecord.load(txn, row.beta)
yield newRow.delete()
yield self.assertFailure(newRow.delete(), NoSuchRecord)
@inlineCallbacks
def test_cantCreateWithoutRequiredValues(self):
"""
When a L{Record} object is created without required values, it raises a
L{TypeError}.
"""
txn = self.pool.connection()
te = yield self.assertFailure(TestAutoRecord.create(txn), TypeError)
self.assertIn("required attribute 'epsilon' not passed", str(te))
@inlineCallbacks
def test_datetimeType(self):
"""
When a L{Record} references a timestamp column, it retrieves the date
as UTC.
"""
txn = self.pool.connection()
# Create ...
rec = yield TestAutoRecord.create(txn, epsilon=1)
self.assertEquals(
rec.zeta,
datetime.datetime(2012, 12, 12, 12, 12, 12)
)
yield txn.commit()
# ... should have the same effect as loading.
txn = self.pool.connection()
rec = (yield TestAutoRecord.all(txn))[0]
self.assertEquals(
rec.zeta,
datetime.datetime(2012, 12, 12, 12, 12, 12)
)
@inlineCallbacks
def test_tooManyAttributes(self):
"""
When a L{Record} object is created with unknown attributes (those which
don't map to any column), it raises a L{TypeError}.
"""
txn = self.pool.connection()
te = yield self.assertFailure(
TestRecord.create(
txn, beta=3, gamma=u'three',
extraBonusAttribute=u'nope',
otherBonusAttribute=4321,
),
TypeError
)
self.assertIn("extraBonusAttribute, otherBonusAttribute", str(te))
@inlineCallbacks
def test_createFillsInPKey(self):
"""
If L{Record.create} is called without an auto-generated primary key
value for its row, that value will be generated and set on the returned
object.
"""
txn = self.pool.connection()
tr = yield TestAutoRecord.create(txn, epsilon=u'specified')
tr2 = yield TestAutoRecord.create(txn, epsilon=u'also specified')
self.assertEquals(tr.phi, 1)
self.assertEquals(tr2.phi, 2)
@inlineCallbacks
def test_attributesArentMutableYet(self):
"""
Changing attributes on a database object is not supported yet, because
it's not entirely clear when to flush the SQL to the database.
Instead, for the time being, use C{.update}. When you attempt to set
an attribute, an error will be raised informing you of this fact, so
that the error is clear.
"""
txn = self.pool.connection()
rec = yield TestRecord.create(txn, beta=7, gamma=u'what')
def setit():
rec.beta = 12
ro = self.assertRaises(ReadOnly, setit)
self.assertEqual(rec.beta, 7)
self.assertIn("SQL-backed attribute 'TestRecord.beta' is read-only. "
"Use '.update(...)' to modify attributes.", str(ro))
@inlineCallbacks
def test_simpleUpdate(self):
"""
L{Record.update} will change the values on the record and in te
database.
"""
txn = self.pool.connection()
rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
yield rec.update(gamma=u'otherwise')
self.assertEqual(rec.gamma, u'otherwise')
yield txn.commit()
# Make sure that it persists.
txn = self.pool.connection()
rec = yield TestRecord.load(txn, 3)
self.assertEqual(rec.gamma, u'otherwise')
@inlineCallbacks
def test_simpleQuery(self):
"""
L{Record.query} will allow you to query for a record by its class
attributes as columns.
"""
txn = self.pool.connection()
for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
(356, u"three"), (456, u"four")]:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
records = yield TestRecord.query(txn, TestRecord.gamma == u"three")
self.assertEqual(len(records), 2)
records.sort(key=lambda x: x.beta)
self.assertEqual(records[0].beta, 345)
self.assertEqual(records[1].beta, 356)
@inlineCallbacks
def test_querySimple(self):
"""
L{Record.querysimple} will allow you to query for a record by its class
attributes as columns.
"""
txn = self.pool.connection()
for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
(356, u"three"), (456, u"four")]:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
records = yield TestRecord.querysimple(txn, gamma=u"three")
self.assertEqual(len(records), 2)
records.sort(key=lambda x: x.beta)
self.assertEqual(records[0].beta, 345)
self.assertEqual(records[1].beta, 356)
@inlineCallbacks
def test_eq(self):
"""
L{Record.__eq__} works.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
one = yield TestRecord.load(txn, 123)
one_copy = yield TestRecord.load(txn, 123)
two = yield TestRecord.load(txn, 234)
self.assertTrue(one == one_copy)
self.assertFalse(one == two)
@inlineCallbacks
def test_all(self):
"""
L{Record.all} will return all instances of the record, sorted by
primary key.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
self.assertEqual(
[(x.beta, x.gamma) for x in (yield TestRecord.all(txn))],
sorted(data)
)
@inlineCallbacks
def test_count(self):
"""
L{Record.count} will return count of the records
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
self.assertEqual(
(yield TestRecord.count(txn)),
len(data)
)
@inlineCallbacks
def test_updatesome(self):
"""
L{Record.updatesome} will update all instances of the matching records.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.updatesome(txn, where=(TestRecord.beta == 123), gamma=u"changed")
yield txn.commit()
txn = self.pool.connection()
records = yield TestRecord.all(txn)
self.assertEqual(
set([(record.beta, record.gamma,) for record in records]),
set([
(123, u"changed"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")
])
)
yield TestRecord.updatesome(txn, where=(TestRecord.beta.In((234, 345,))), gamma=u"changed-2")
yield txn.commit()
txn = self.pool.connection()
records = yield TestRecord.all(txn)
self.assertEqual(
set([(record.beta, record.gamma,) for record in records]),
set([
(123, u"changed"), (456, u"four"), (345, u"changed-2"),
(234, u"changed-2"), (356, u"three")
])
)
@inlineCallbacks
def test_deleteall(self):
"""
L{Record.deleteall} will delete all instances of the record.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.deleteall(txn)
all = yield TestRecord.all(txn)
self.assertEqual(len(all), 0)
@inlineCallbacks
def test_deletesome(self):
"""
L{Record.deletesome} will delete all instances of the matching records.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.deletesome(txn, TestRecord.gamma == u"three")
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((123, 456, 234,)))
yield TestRecord.deletesome(txn, (TestRecord.gamma == u"one").Or(TestRecord.gamma == u"two"))
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((456,)))
@inlineCallbacks
def test_deletesimple(self):
"""
L{Record.deletesimple} will delete all instances of the matching records.
"""
txn = self.pool.connection()
data = [(123, u"one"), (456, u"four"), (345, u"three"),
(234, u"two"), (356, u"three")]
for beta, gamma in data:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
yield TestRecord.deletesimple(txn, gamma=u"three")
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((123, 456, 234,)))
yield TestRecord.deletesimple(txn, beta=123, gamma=u"one")
all = yield TestRecord.all(txn)
self.assertEqual(set([record.beta for record in all]), set((456, 234)))
@inlineCallbacks
def test_repr(self):
"""
The C{repr} of a L{Record} presents all its values.
"""
txn = self.pool.connection()
yield txn.execSQL("insert into ALPHA values (:1, :2)", [789, u'nine'])
rec = list((yield TestRecord.all(txn)))[0]
self.assertIn(" beta=789", repr(rec))
self.assertIn(" gamma=u'nine'", repr(rec))
@inlineCallbacks
def test_orderedQuery(self):
"""
L{Record.query} takes an 'order' argument which will allow the objects
returned to be ordered.
"""
txn = self.pool.connection()
for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
(356, u"three"), (456, u"four")]:
yield txn.execSQL("insert into ALPHA values (:1, :2)",
[beta, gamma])
records = yield TestRecord.query(
txn, TestRecord.gamma == u"three", TestRecord.beta
)
self.assertEqual([record.beta for record in records], [345, 356])
records = yield TestRecord.query(
txn, TestRecord.gamma == u"three", TestRecord.beta, ascending=False
)
self.assertEqual([record.beta for record in records], [356, 345])
@inlineCallbacks
def test_pop(self):
"""
A L{Record} may be loaded and deleted atomically, with L{Record.pop}.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestRecord.pop(txn, 234)
self.assertEqual(rec.gamma, u'two')
self.assertEqual(
(yield txn.execSQL(
"select count(*) from ALPHA where BETA = :1", [234]
)),
[tuple([0])]
)
yield self.assertFailure(TestRecord.pop(txn, 234), NoSuchRecord)
def test_columnNamingConvention(self):
"""
The naming convention maps columns C{LIKE_THIS} to be attributes
C{likeThis}.
"""
self.assertEqual(
Record.namingConvention(u"like_this"),
"likeThis"
)
self.assertEqual(
Record.namingConvention(u"LIKE_THIS"),
"likeThis"
)
self.assertEqual(
Record.namingConvention(u"LIKE_THIS_ID"),
"likeThisID"
)
@inlineCallbacks
def test_lock(self):
"""
A L{Record} may be locked, with L{Record.lock}.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestRecord.load(txn, 234)
yield rec.lock()
self.assertEqual(rec.gamma, u'two')
@inlineCallbacks
def test_trylock(self):
"""
A L{Record} may be locked, with L{Record.trylock}.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestRecord.load(txn, 234)
result = yield rec.trylock()
self.assertTrue(result)
@inlineCallbacks
def test_serialize(self):
"""
A L{SerializableRecord} may be serialized.
"""
txn = self.pool.connection()
for beta, gamma in [
(123, u"one"),
(234, u"two"),
(345, u"three"),
(356, u"three"),
(456, u"four"),
]:
yield txn.execSQL(
"insert into ALPHA values (:1, :2)", [beta, gamma]
)
rec = yield TestSerializeRecord.load(txn, 234)
result = rec.serialize()
self.assertEqual(result, {"beta": 234, "gamma": u"two"})
@inlineCallbacks
def test_deserialize(self):
"""
A L{SerializableRecord} may be deserialized.
"""
txn = self.pool.connection()
rec = yield TestSerializeRecord.deserialize({"beta": 234, "gamma": u"two"})
yield rec.insert(txn)
yield txn.commit()
txn = self.pool.connection()
rec = yield TestSerializeRecord.query(txn, TestSerializeRecord.beta == 234)
self.assertEqual(len(rec), 1)
self.assertEqual(rec[0].gamma, u"two")
yield txn.commit()
# Check that attributes can be changed prior to insert, and not after
txn = self.pool.connection()
rec = yield TestSerializeRecord.deserialize({"beta": 456, "gamma": u"one"})
rec.gamma = u"four"
yield rec.insert(txn)
def _raise():
rec.gamma = u"five"
self.assertRaises(ReadOnly, _raise)
yield txn.commit()
txn = self.pool.connection()
rec = yield TestSerializeRecord.query(txn, TestSerializeRecord.beta == 456)
self.assertEqual(len(rec), 1)
self.assertEqual(rec[0].gamma, u"four")
yield txn.commit()
| 212
| 0
| 120
|
1b2c4d7cf83e49c4e21c7430538669298e5db5a5
| 3,518
|
py
|
Python
|
cloudify_gcp/compute/tests/test_security_group.py
|
cloudify-cosmo/cloudify-gcp-plugin
|
c70faee0555070f7fc67f0001395eaafb681b23c
|
[
"Apache-2.0"
] | 4
|
2016-10-24T17:42:07.000Z
|
2020-05-31T00:34:07.000Z
|
cloudify_gcp/compute/tests/test_security_group.py
|
cloudify-cosmo/cloudify-gcp-plugin
|
c70faee0555070f7fc67f0001395eaafb681b23c
|
[
"Apache-2.0"
] | 35
|
2015-04-30T20:14:01.000Z
|
2022-02-03T21:35:54.000Z
|
cloudify_gcp/compute/tests/test_security_group.py
|
cloudify-cosmo/cloudify-gcp-plugin
|
c70faee0555070f7fc67f0001395eaafb681b23c
|
[
"Apache-2.0"
] | 13
|
2015-04-17T16:42:03.000Z
|
2021-06-24T04:12:14.000Z
|
# -*- coding: utf-8 -*-
########
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch
from cloudify_gcp.compute import security_group
from ...tests import TestGCP
@patch('cloudify_gcp.utils.assure_resource_id_correct', return_value=True)
@patch('cloudify_gcp.gcp.ServiceAccountCredentials.from_json_keyfile_dict')
@patch('cloudify_gcp.gcp.build')
| 37.031579
| 75
| 0.517339
|
# -*- coding: utf-8 -*-
########
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch
from cloudify_gcp.compute import security_group
from ...tests import TestGCP
@patch('cloudify_gcp.utils.assure_resource_id_correct', return_value=True)
@patch('cloudify_gcp.gcp.ServiceAccountCredentials.from_json_keyfile_dict')
@patch('cloudify_gcp.gcp.build')
class TestGCPSecurityGroup(TestGCP):
def setUp(self):
super(TestGCPSecurityGroup, self).setUp()
self.ctxmock.instance.relationships = []
def test_create(self, mock_build, *args):
self.ctxmock.node.properties['rules'] = rules = [
{
'allowed': {'NOTHING!': ''},
'sources': ['bob', 'jane'],
},
{
'allowed': {'tcp': ['40', 41]},
'sources': ['jane'],
},
]
security_group.create(
'name',
rules,
)
self.assertEqual(2, mock_build.call_count)
for body in [
{
'network': 'projects/not really a project/'
'global/networks/not a real network',
'sourceTags': ['bob', 'jane'],
'description': 'Cloudify generated SG part',
'sourceRanges': [],
'targetTags': ['ctx-sg-name'],
'allowed': [{'IPProtocol': 'NOTHING!'}],
'name': 'ctx-sg-name-from-bobjane-to-nothing',
},
{
'network': 'projects/not really a project/'
'global/networks/not a real network',
'sourceTags': ['jane'],
'description': 'Cloudify generated SG part',
'sourceRanges': [],
'targetTags': ['ctx-sg-name'],
'allowed': [{
'IPProtocol': 'tcp',
'ports': ['40', 41]}],
'name': 'ctx-sg-name-from-jane-to-tcp4041',
},
]:
mock_build().firewalls().insert.assert_any_call(
body=body,
project='not really a project'
)
def test_delete(self, mock_build, *args):
props = self.ctxmock.instance.runtime_properties
props['gcp_name'] = 'delete_name'
props['rules'] = [
{'name': 'You do not talk about Fight Club'},
{'name': 'You DO NOT talk about Fight Club'},
]
security_group.delete()
self.assertEqual(2, mock_build.call_count)
mock_build().firewalls().delete.assert_called_with(
firewall='youdonottalkaboutfightclub',
project='not really a project',
)
| 2,461
| 15
| 103
|
d24942e508c1186e62ae2a1e3d1a60df2f0dca2e
| 12,218
|
py
|
Python
|
tola/test/test_pipeline.py
|
mercycorps/toladata
|
4d5f9b45905a81af9981b586690e020d5b3bfc60
|
[
"Apache-2.0"
] | null | null | null |
tola/test/test_pipeline.py
|
mercycorps/toladata
|
4d5f9b45905a81af9981b586690e020d5b3bfc60
|
[
"Apache-2.0"
] | 268
|
2020-03-31T15:46:59.000Z
|
2022-03-31T18:01:08.000Z
|
tola/test/test_pipeline.py
|
mercycorps/toladata
|
4d5f9b45905a81af9981b586690e020d5b3bfc60
|
[
"Apache-2.0"
] | 1
|
2021-01-05T01:58:24.000Z
|
2021-01-05T01:58:24.000Z
|
from unittest import mock
from django.test import TestCase
from factories.workflow_models import CountryFactory, OrganizationFactory, TolaUserFactory
from tola_management.models import UserManagementAuditLog as UM
from workflow.models import TolaUser
from tola.pipeline import create_user_okta
SPECIAL_CHARS = "ßpécîäl_chars"
| 46.280303
| 119
| 0.569815
|
from unittest import mock
from django.test import TestCase
from factories.workflow_models import CountryFactory, OrganizationFactory, TolaUserFactory
from tola_management.models import UserManagementAuditLog as UM
from workflow.models import TolaUser
from tola.pipeline import create_user_okta
SPECIAL_CHARS = "ßpécîäl_chars"
class MockBackend(object):
def __init__(self, backend_name):
self.name = backend_name
class ImportIndicatorTests(TestCase):
def setUp(self):
self.country = CountryFactory()
self.backend = MockBackend('saml')
self.details = None
self.organization = OrganizationFactory(id=1)
def test_good_login(self):
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': ['Pat', 0], 'lastName': ['Smith', 0],
'mcCountryCode': ['AF', 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': [None, 0], 'lastName': [None, 0],
'mcCountryCode': ['AF', 0],
},
'idp_name': 'okta',
}
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
def test_bad_country(self):
# Test a country that doesn't exist - UPDATE: does not redirect because bad countries are ok
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': ['Pat', 0], 'lastName': ['Smith', 0],
'mcCountryCode': ['ZZ', 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
# self.assertEqual(okta_result.status_code, 302)
self.assertIsNone(okta_result)
# Test no country for old men
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': ['Pat', 0], 'lastName': ['Smith', 0],
'mcCountryCode': [None, 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
# UPDATE: does not redirect because no countries are ok
# self.assertEqual(okta_result.status_code, 302)
self.assertIsNone(okta_result)
def test_bad_names(self):
# First test a new user but with no names comeing from Okta
with mock.patch('tola.pipeline.logger') as log_mock:
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': [None, 0], 'lastName': [None, 0],
'mcCountryCode': ['AF', 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertEqual(okta_result.status_code, 302, msg="Failed to error on blank name")
# First create user and tola_user
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': ['Pat', 0], 'lastName': ['Smith', 0],
'mcCountryCode': ['AF', 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertEqual(okta_result, None, msg="Failed to pass on normal name")
# Now simulate lack of names
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': [None, 0], 'lastName': [None, 0],
'mcCountryCode': ['AF', 0],
},
'idp_name': 'okta',
}
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertEqual(okta_result, None, msg="Failed to pass on blank name with good name in DB")
# It should work even when the names are very long.
okta_response = {
'attributes': {
'email': ['test@example.com', 0], 'firstName': ['abcdefabcdefabcdefabcdefabcdefabcdefabcdefab', 0],
'lastName': ['abcdefabcdefabcdefabcdefabcdefabcdefabcdefab', 0],
'mcCountryCode': ['AF', 0],
},
'idp_name': 'okta',
}
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertEqual(okta_result, None, msg="Failed to pass on long name")
def test_updates_audit_log_appropriately(self):
banana_country = CountryFactory(country="BananaTown", code="BT")
cat_country = CountryFactory(country="CatLand", code="XT")
tola_user = TolaUserFactory(country=self.country)
count = UM.objects.count()
# doesn't update audit log with no changes:
with mock.patch('tola.pipeline.logger') as log_mock:
okta_response = {
'attributes': {
'email': [tola_user.user.email, 0],
'firstName': [tola_user.user.first_name, 0],
'lastName': [tola_user.user.last_name, 0],
'mcCountryCode': [self.country.code, 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
tu_reload = TolaUser.objects.get(pk=tola_user.pk)
self.assertEqual(tu_reload.country, self.country)
self.assertEqual(count, UM.objects.count(), "No audit log required")
del tu_reload
count = UM.objects.count()
# does update audit log with country change:
with mock.patch('tola.pipeline.logger') as log_mock:
okta_response = {
'attributes': {
'email': [tola_user.user.email, 0],
'firstName': [tola_user.user.first_name, 0],
'lastName': [tola_user.user.last_name, 0],
'mcCountryCode': ["BT", 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
tu_reload = TolaUser.objects.get(pk=tola_user.pk)
self.assertEqual(tu_reload.country, banana_country)
self.assertEqual(UM.objects.count(), count + 1, "Permissions audit log required")
um = UM.objects.order_by('-pk').first()
self.assertEqual(um.change_type, "user_programs_updated")
self.assertTrue(um.system_generated_update)
self.assertEqual(len(um.diff_list['programs']), 0)
self.assertEqual(len(um.diff_list['countries']), 2)
new_country = [x for x in um.diff_list['countries'] if x['name'] == str(banana_country.pk)][0]
old_country = [x for x in um.diff_list['countries'] if x['name'] == str(self.country.pk)][0]
self.assertEqual(new_country['prev']['country'], None)
self.assertEqual(old_country['new']['country'], None)
self.assertEqual(um.diff_list['base_country']['new'], "BananaTown")
self.assertEqual(um.diff_list['base_country']['prev'], self.country.country)
del tu_reload
count = UM.objects.count()
# does update audit log with name change:
with mock.patch('tola.pipeline.logger') as log_mock:
okta_response = {
'attributes': {
'email': [tola_user.user.email, 0],
'firstName': ["NewFirstName", 0],
'lastName': [tola_user.user.last_name, 0],
'mcCountryCode': ["BT", 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
tu_reload = TolaUser.objects.get(pk=tola_user.pk)
self.assertEqual(tu_reload.country, banana_country)
self.assertEqual(tu_reload.user.first_name, "NewFirstName")
self.assertEqual(tu_reload.user.last_name, tola_user.user.last_name)
self.assertEqual(tu_reload.name, f"NewFirstName {tola_user.user.last_name}")
self.assertEqual(UM.objects.count(), count + 1, "Profile audit log required")
um = UM.objects.order_by('-pk').first()
self.assertEqual(um.change_type, "user_profile_updated")
self.assertTrue(um.system_generated_update)
self.assertEqual(len(um.diff_list), 1)
self.assertEqual(um.diff_list[0]['name'], 'first_name')
self.assertEqual(um.diff_list[0]['new'], 'NewFirstName')
del tu_reload
count = UM.objects.count()
with mock.patch('tola.pipeline.logger') as log_mock:
okta_response = {
'attributes': {
'email': [tola_user.user.email, 0],
'firstName': ["NewFirstName", 0],
'lastName': ["NewLastName", 0],
'mcCountryCode': ["XT", 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
tu_reload = TolaUser.objects.get(pk=tola_user.pk)
self.assertEqual(tu_reload.country, cat_country)
self.assertEqual(tu_reload.user.first_name, "NewFirstName")
self.assertEqual(tu_reload.user.last_name, "NewLastName")
self.assertEqual(tu_reload.name, f"NewFirstName NewLastName")
self.assertEqual(UM.objects.count(), count + 2, "Profile and Permissions audit log required")
um = UM.objects.order_by('-pk')[0]
um2 = UM.objects.order_by('-pk')[1]
self.assertEqual(um.change_type, "user_programs_updated")
self.assertTrue(um.system_generated_update)
self.assertEqual(um2.change_type, "user_profile_updated")
self.assertTrue(um2.system_generated_update)
del tu_reload
count = UM.objects.count()
with mock.patch('tola.pipeline.logger') as log_mock:
okta_response = {
'attributes': {
'email': ["newemail@example.com", 0],
'firstName': [SPECIAL_CHARS, 0],
'lastName': ["LastName", 0],
'mcCountryCode': ["BT", 0],
},
'idp_name': 'okta',
}
user = None
okta_result = create_user_okta(self.backend, self.details, user, okta_response)
self.assertIsNone(okta_result)
tu_reload = TolaUser.objects.get(user__email="newemail@example.com")
self.assertEqual(tu_reload.country, banana_country)
self.assertEqual(tu_reload.user.first_name, SPECIAL_CHARS)
self.assertEqual(tu_reload.user.last_name, "LastName")
self.assertEqual(tu_reload.name, f"{SPECIAL_CHARS} LastName")
self.assertEqual(UM.objects.count(), count + 2, "Profile and Permissions audit log required")
um = UM.objects.order_by('-pk')[0]
um2 = UM.objects.order_by('-pk')[1]
self.assertEqual(um.change_type, "user_programs_updated")
self.assertTrue(um.system_generated_update)
self.assertEqual(um2.change_type, "user_created")
self.assertTrue(um2.system_generated_update)
del tu_reload
count = UM.objects.count()
| 11,659
| 21
| 207
|
7667f234b1d7bf88f3908ea5cd6180604aaa5929
| 933
|
py
|
Python
|
tools/idlstub.py
|
yashrajkakkad/resea
|
985ddb887b460f422b06a081a878598c79e8d8e0
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
tools/idlstub.py
|
yashrajkakkad/resea
|
985ddb887b460f422b06a081a878598c79e8d8e0
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
tools/idlstub.py
|
yashrajkakkad/resea
|
985ddb887b460f422b06a081a878598c79e8d8e0
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
from lark import Lark
if __name__ == "__main__":
main()
| 23.923077
| 75
| 0.599143
|
#!/usr/bin/env python3
import argparse
from lark import Lark
def parse(source):
parser = Lark("""
""")
ast = parser.parse(source)
idl = {}
return idl
def c_generator(idl):
text = ""
text += f"#ifndef __IDL_{idl['name']}_H__\n"
text += f"#define __IDL_{idl['name']}_H__\n"
text += "#endif\n"
with open(args.out, "w") as f:
f.write(generate(text))
def main():
parser = argparse.ArgumentParser(description="The IDL stub generator.")
parser.add_argument("--idl", required=True, help="The IDL file.")
parser.add_argument("--lang", choices=["c"], default="c",
help="The output language.")
parser.add_argument("-o", dest="out", required=True,
help="The output directory.")
args = parser.parse_args()
with open(args.idl) as f:
idl = parse(f.read())
if args.lang == "c":
c_generator(idl)
if __name__ == "__main__":
main()
| 764
| 0
| 69
|
82f889092288f46a74d16aabc0c4571dad4852ce
| 1,757
|
py
|
Python
|
RRT.py
|
garymullen/python-visualization-of-the-RRT-algorithm-with-pygame
|
deeb7122ffc247982463e1358004a51c1886307a
|
[
"MIT"
] | 4
|
2021-05-24T00:56:53.000Z
|
2022-01-23T19:17:09.000Z
|
RRT.py
|
garymullen/python-visualization-of-the-RRT-algorithm-with-pygame
|
deeb7122ffc247982463e1358004a51c1886307a
|
[
"MIT"
] | 1
|
2021-08-07T11:20:26.000Z
|
2021-08-07T11:20:26.000Z
|
RRT.py
|
garymullen/python-visualization-of-the-RRT-algorithm-with-pygame
|
deeb7122ffc247982463e1358004a51c1886307a
|
[
"MIT"
] | 4
|
2021-07-24T15:19:32.000Z
|
2022-02-19T12:57:10.000Z
|
import pygame
from RRTbasePy import RRTGraph
from RRTbasePy import RRTMap
import time
if __name__ == '__main__':
result=False
while not result:
try:
main()
result=True
except:
result=False
| 19.741573
| 96
| 0.515083
|
import pygame
from RRTbasePy import RRTGraph
from RRTbasePy import RRTMap
import time
def main():
dimensions =(512,512)
start=(50,50)
goal=(300,300)
obsdim=30
obsnum=50
iteration=0
t1=0
pygame.init()
map=RRTMap(start,goal,dimensions,obsdim,obsnum)
graph=RRTGraph(start,goal,dimensions,obsdim,obsnum)
obstacles=graph.makeobs()
map.drawMap(obstacles)
t1=time.time()
while (not graph.path_to_goal()):
time.sleep(0.005)
elapsed=time.time()-t1
t1=time.time()
#raise exception if timeout
if elapsed > 10:
print('timeout re-initiating the calculations')
raise
if iteration % 10 == 0:
X, Y, Parent = graph.bias(goal)
pygame.draw.circle(map.map, map.grey, (X[-1], Y[-1]), map.nodeRad*2, 0)
pygame.draw.line(map.map, map.Blue, (X[-1], Y[-1]), (X[Parent[-1]], Y[Parent[-1]]),
map.edgeThickness)
else:
X, Y, Parent = graph.expand()
pygame.draw.circle(map.map, map.grey, (X[-1], Y[-1]), map.nodeRad*2, 0)
pygame.draw.line(map.map, map.Blue, (X[-1], Y[-1]), (X[Parent[-1]], Y[Parent[-1]]),
map.edgeThickness)
if iteration % 5 == 0:
pygame.display.update()
iteration += 1
map.drawPath(graph.getPathCoords())
pygame.display.update()
pygame.event.clear()
pygame.event.wait(0)
if __name__ == '__main__':
result=False
while not result:
try:
main()
result=True
except:
result=False
| 1,411
| 0
| 25
|
20a085f350adb968c43f78b8f69f54f92daca374
| 1,940
|
py
|
Python
|
gendatafiles.py
|
incolumepy/analise_bonus_viagem
|
84dd60477ea03cccd0b4c56591f017ca9b123442
|
[
"MIT"
] | null | null | null |
gendatafiles.py
|
incolumepy/analise_bonus_viagem
|
84dd60477ea03cccd0b4c56591f017ca9b123442
|
[
"MIT"
] | null | null | null |
gendatafiles.py
|
incolumepy/analise_bonus_viagem
|
84dd60477ea03cccd0b4c56591f017ca9b123442
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '@britodfbr'
import pandas as pd
from faker import Faker
import locale
import random
import toml
from pathlib import Path
dados = Path('dados/dados.toml')
dados.parent.mkdir(exist_ok=True)
locale.setlocale(locale.LC_TIME, 'pt_BR.utf8')
fake = Faker('pt_BR')
def gen_names():
""" Gera os 1000 nomes aleatórios utilizando a biblioteca Faker """
# Semente para garantir que sempre serão os mesmos nomes
fake.seed_instance(0)
# dict com os 1000 nomes completos
names = {'VENDEDOR': (f"{fake.first_name()} {fake.last_name()}" for x in range(1000))}
# grava o resultado em um arquivo toml para recuperação posterior
with open('dados/dados.toml', 'w') as f:
toml.dump(names, f)
def gendata():
""" Gera massa de dados com VENDEDOR/VENDAS """
# Se não existir cria arquivo toml com nomes de entrada
if not dados.is_file():
gen_names()
# Carrega nomes dos vendedores
names = toml.load(dados)['VENDEDOR']
# Semente aleatória para gerar valor de vendas diferentes
fake.seed_instance(random.randint(1, 13))
# gerador com 1000 entradas aleatórias para vendas
vendas = (
fake.pyfloat(left_digits=None, right_digits=2, positive=True, min_value=1000, max_value=55010)
for _ in range(1000)
)
# Retorna uma lista de tuplas com VENDEDOR/VENDA
return zip(names, vendas)
def run():
""""""
# Lista com nomes dos meses gerado em pandas
meses = set(pd.date_range(start='2020-01-01', end='2020-6-1', periods=30).strftime('%B'))
for mes in meses:
# DataFrame com conjunto de dados gerados
df = pd.DataFrame(gendata(), columns=['VENDEDOR', 'VENDAS'])
# Grava arquivos com nomes dos meses em portugues com vendedores e suas respectivas vendas mensais
df.to_excel(dados.with_name(f"{mes}.xlsx"), index=False)
if __name__ == '__main__':
run()
| 32.881356
| 106
| 0.680412
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '@britodfbr'
import pandas as pd
from faker import Faker
import locale
import random
import toml
from pathlib import Path
dados = Path('dados/dados.toml')
dados.parent.mkdir(exist_ok=True)
locale.setlocale(locale.LC_TIME, 'pt_BR.utf8')
fake = Faker('pt_BR')
def gen_names():
""" Gera os 1000 nomes aleatórios utilizando a biblioteca Faker """
# Semente para garantir que sempre serão os mesmos nomes
fake.seed_instance(0)
# dict com os 1000 nomes completos
names = {'VENDEDOR': (f"{fake.first_name()} {fake.last_name()}" for x in range(1000))}
# grava o resultado em um arquivo toml para recuperação posterior
with open('dados/dados.toml', 'w') as f:
toml.dump(names, f)
def gendata():
""" Gera massa de dados com VENDEDOR/VENDAS """
# Se não existir cria arquivo toml com nomes de entrada
if not dados.is_file():
gen_names()
# Carrega nomes dos vendedores
names = toml.load(dados)['VENDEDOR']
# Semente aleatória para gerar valor de vendas diferentes
fake.seed_instance(random.randint(1, 13))
# gerador com 1000 entradas aleatórias para vendas
vendas = (
fake.pyfloat(left_digits=None, right_digits=2, positive=True, min_value=1000, max_value=55010)
for _ in range(1000)
)
# Retorna uma lista de tuplas com VENDEDOR/VENDA
return zip(names, vendas)
def run():
""""""
# Lista com nomes dos meses gerado em pandas
meses = set(pd.date_range(start='2020-01-01', end='2020-6-1', periods=30).strftime('%B'))
for mes in meses:
# DataFrame com conjunto de dados gerados
df = pd.DataFrame(gendata(), columns=['VENDEDOR', 'VENDAS'])
# Grava arquivos com nomes dos meses em portugues com vendedores e suas respectivas vendas mensais
df.to_excel(dados.with_name(f"{mes}.xlsx"), index=False)
if __name__ == '__main__':
run()
| 0
| 0
| 0
|
2d792888b823d61762ecd6f4dca134980b6301ba
| 3,948
|
py
|
Python
|
case/conftest.py
|
lzpsgh/AscTrio
|
f969beece5dc93d29063da03793521bc54b814dd
|
[
"MIT"
] | 5
|
2021-07-21T06:50:51.000Z
|
2022-03-31T04:18:28.000Z
|
case/conftest.py
|
lzpsgh/AscTrio
|
f969beece5dc93d29063da03793521bc54b814dd
|
[
"MIT"
] | null | null | null |
case/conftest.py
|
lzpsgh/AscTrio
|
f969beece5dc93d29063da03793521bc54b814dd
|
[
"MIT"
] | 1
|
2022-03-28T01:50:03.000Z
|
2022-03-28T01:50:03.000Z
|
import allure
import pytest
from api.account import account
from api.user import user
from util.log_util import logger
from util.mysql_util import mysqler
# 在crm后台登录,获取cookies
@pytest.fixture(scope="session")
# 在h5后台登录,获取cookies
@pytest.fixture(scope="session")
# 注册1个leads
# 注册1个正课用户
# 已老用户身份登录并拿到cookie
# 获取1个leads
# 获取n个正课学员
# BASE_PATH = common_kit.env('PROJECT_ROOT')
# def get_data(yaml_file_name):
# try:
# # data_file_path = os.path.join(BASE_PATH, "data", yaml_file_name)
# yaml_file_path = f"{BASE_PATH}/data/{yaml_file_name}"
# yaml_data = data_pool.load_yml(yaml_file_path)
# except Exception as ex:
# pytest.skip(str(ex))
# else:
# return yaml_data
# base_data = get_data("base_data.yml")
# api_data = get_data("api_test_data.yml")
# scenario_data = get_data("scenario_test_data.yml")
# leadsapi_data = get_data("leadsapi_test_data.yml")
# get_add_leads_data = get_data("test_add_leads.yml")
# bbc_data = get_data("bbc_signup_data.yml")
#
# @pytest.fixture(scope="session")
# def login_fixture():
# username = base_data["init_admin_user"]["username"]
# password = base_data["init_admin_user"]["password"]
# header = {
# "Content-Type": "application/x-www-form-urlencoded"
# }
# payload = {
# "username": username,
# "password": password
# }
# loginInfo = user.login(data=payload, headers=header)
# step_login(username, password)
# yield loginInfo.json()
#
# @pytest.fixture(scope="function")
# def insert_delete_user():
# """删除用户前,先在数据库插入一条用户数据"""
# insert_sql = base_data["init_sql"]["insert_delete_user"][0]
# mysqler.execute_db(insert_sql)
# step_first()
# logger.info("删除用户操作:插入新用户--准备用于删除用户")
# logger.info("执行前置SQL:{}".format(insert_sql))
# yield
# # 因为有些情况是不给删除管理员用户的,这种情况需要手动清理上面插入的数据
# del_sql = base_data["init_sql"]["insert_delete_user"][1]
# mysqler.execute_db(del_sql)
# step_last()
# logger.info("删除用户操作:手工清理处理失败的数据")
# logger.info("执行后置SQL:{}".format(del_sql))
#
# @pytest.fixture(scope="function")
# def delete_register_user():
# """注册用户前,先删除数据,用例执行之后,再次删除以清理数据"""
# del_sql = base_data["init_sql"]["delete_register_user"]
# mysqler.execute_db(del_sql)
# step_first()
# logger.info("注册用户操作:清理用户--准备注册新用户")
# logger.info("执行前置SQL:{}".format(del_sql))
# yield
# mysqler.execute_db(del_sql)
# step_last()
# logger.info("注册用户操作:删除注册的用户")
# logger.info("执行后置SQL:{}".format(del_sql))
#
# @pytest.fixture(scope="function")
# def update_user_telephone():
# """修改用户前,因为手机号唯一,为了使用例重复执行,每次需要先修改手机号,再执行用例"""
# update_sql = base_data["init_sql"]["update_user_telephone"]
# mysqler.execute_db(update_sql)
# step_first()
# logger.info("修改用户操作:手工修改用户的手机号,以便用例重复执行")
# logger.info("执行SQL:{}".format(update_sql))
@allure.step("前置步骤 ==>> 清理数据")
@allure.step("后置步骤 ==>> 清理数据")
@allure.step("前置步骤 ==>> 管理员用户登录")
if __name__ == '__main__':
pass
| 27.227586
| 79
| 0.669959
|
import allure
import pytest
from api.account import account
from api.user import user
from util.log_util import logger
from util.mysql_util import mysqler
# 在crm后台登录,获取cookies
@pytest.fixture(scope="session")
def crm_login_with_mm():
login_info = account.crm_login()
# yield login_info.json()
# 在h5后台登录,获取cookies
@pytest.fixture(scope="session")
def h5_login():
login_info = user.get_current_user_nocookie()
# yield login_info.json()
# 注册1个leads
def reg_leads(number):
user.phone_exist()
user.send_sms2()
user.register()
# 注册1个正课用户
def reg_student():
user.phone_exist()
user.send_sms2()
user.register()
user.modify_users_owner()
# 已老用户身份登录并拿到cookie
def get_user_cookie():
user.get_current_user()
user.login()
# 获取1个leads
def get_old_leads(phone):
mysqler.query("SELECT * FROM activityuser AS au WHERE au.phone = " + phone)
# 执行数据库,从leads表里取n个leads
# 获取n个正课学员
def get_old_student(number):
pass
# 执行数据库,从student_schedule表里取n个正课学员
# BASE_PATH = common_kit.env('PROJECT_ROOT')
# def get_data(yaml_file_name):
# try:
# # data_file_path = os.path.join(BASE_PATH, "data", yaml_file_name)
# yaml_file_path = f"{BASE_PATH}/data/{yaml_file_name}"
# yaml_data = data_pool.load_yml(yaml_file_path)
# except Exception as ex:
# pytest.skip(str(ex))
# else:
# return yaml_data
# base_data = get_data("base_data.yml")
# api_data = get_data("api_test_data.yml")
# scenario_data = get_data("scenario_test_data.yml")
# leadsapi_data = get_data("leadsapi_test_data.yml")
# get_add_leads_data = get_data("test_add_leads.yml")
# bbc_data = get_data("bbc_signup_data.yml")
#
# @pytest.fixture(scope="session")
# def login_fixture():
# username = base_data["init_admin_user"]["username"]
# password = base_data["init_admin_user"]["password"]
# header = {
# "Content-Type": "application/x-www-form-urlencoded"
# }
# payload = {
# "username": username,
# "password": password
# }
# loginInfo = user.login(data=payload, headers=header)
# step_login(username, password)
# yield loginInfo.json()
#
# @pytest.fixture(scope="function")
# def insert_delete_user():
# """删除用户前,先在数据库插入一条用户数据"""
# insert_sql = base_data["init_sql"]["insert_delete_user"][0]
# mysqler.execute_db(insert_sql)
# step_first()
# logger.info("删除用户操作:插入新用户--准备用于删除用户")
# logger.info("执行前置SQL:{}".format(insert_sql))
# yield
# # 因为有些情况是不给删除管理员用户的,这种情况需要手动清理上面插入的数据
# del_sql = base_data["init_sql"]["insert_delete_user"][1]
# mysqler.execute_db(del_sql)
# step_last()
# logger.info("删除用户操作:手工清理处理失败的数据")
# logger.info("执行后置SQL:{}".format(del_sql))
#
# @pytest.fixture(scope="function")
# def delete_register_user():
# """注册用户前,先删除数据,用例执行之后,再次删除以清理数据"""
# del_sql = base_data["init_sql"]["delete_register_user"]
# mysqler.execute_db(del_sql)
# step_first()
# logger.info("注册用户操作:清理用户--准备注册新用户")
# logger.info("执行前置SQL:{}".format(del_sql))
# yield
# mysqler.execute_db(del_sql)
# step_last()
# logger.info("注册用户操作:删除注册的用户")
# logger.info("执行后置SQL:{}".format(del_sql))
#
# @pytest.fixture(scope="function")
# def update_user_telephone():
# """修改用户前,因为手机号唯一,为了使用例重复执行,每次需要先修改手机号,再执行用例"""
# update_sql = base_data["init_sql"]["update_user_telephone"]
# mysqler.execute_db(update_sql)
# step_first()
# logger.info("修改用户操作:手工修改用户的手机号,以便用例重复执行")
# logger.info("执行SQL:{}".format(update_sql))
@allure.step("前置步骤 ==>> 清理数据")
def step_first():
logger.info("******************************")
logger.info("前置步骤开始 ==>> 清理数据")
@allure.step("后置步骤 ==>> 清理数据")
def step_last():
logger.info("后置步骤开始 ==>> 清理数据")
@allure.step("前置步骤 ==>> 管理员用户登录")
def step_login(username, password):
logger.info("前置步骤 ==>> 管理员 {} 登录,返回信息 为:{}".format(username, password))
if __name__ == '__main__':
pass
| 841
| 0
| 220
|
3644b2d7c8d6d952b40c533be05d846fe1519fee
| 520
|
py
|
Python
|
gdscript/editor/views.py
|
triptych/gdscript-online
|
542ea58a3b69251a2a44bf64b9f008165390dad6
|
[
"MIT"
] | 1
|
2019-06-23T12:52:26.000Z
|
2019-06-23T12:52:26.000Z
|
gdscript/editor/views.py
|
triptych/gdscript-online
|
542ea58a3b69251a2a44bf64b9f008165390dad6
|
[
"MIT"
] | null | null | null |
gdscript/editor/views.py
|
triptych/gdscript-online
|
542ea58a3b69251a2a44bf64b9f008165390dad6
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .gdscript import GDSCriptCLI, GODOT_BINARY
| 22.608696
| 62
| 0.696154
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .gdscript import GDSCriptCLI, GODOT_BINARY
def root(request):
return redirect('index')
def index(request):
return render(request, 'editor.html')
def script(request):
code = request.POST.get('script')
if code is not None:
print(code)
gds = GDSCriptCLI(GODOT_BINARY)
output = gds.block(code, timeout=0, sys_exit=False)[0]
return HttpResponse(output)
return redirect('index')
| 316
| 0
| 69
|
fe191273dc849bf8555a1a90ce02f8756127bfa6
| 289
|
py
|
Python
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_light-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_light-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_light-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
# Create a plotter that we initialize with no lights, and add a
# cube and a single headlight to it.
#
import pyvista as pv
plotter = pv.Plotter(lighting='none')
_ = plotter.add_mesh(pv.Cube())
light = pv.Light(color='cyan', light_type='headlight')
plotter.add_light(light)
plotter.show()
| 28.9
| 63
| 0.743945
|
# Create a plotter that we initialize with no lights, and add a
# cube and a single headlight to it.
#
import pyvista as pv
plotter = pv.Plotter(lighting='none')
_ = plotter.add_mesh(pv.Cube())
light = pv.Light(color='cyan', light_type='headlight')
plotter.add_light(light)
plotter.show()
| 0
| 0
| 0
|
6f927b8409974a28e2fed78ae93b333d976e1cc1
| 7,100
|
py
|
Python
|
DictionaryBot/DictionaryBot.py
|
darrenwjones/SlackBots
|
6a81fa7b2cd2eb51361bad2f1db3027656f69760
|
[
"MIT"
] | null | null | null |
DictionaryBot/DictionaryBot.py
|
darrenwjones/SlackBots
|
6a81fa7b2cd2eb51361bad2f1db3027656f69760
|
[
"MIT"
] | null | null | null |
DictionaryBot/DictionaryBot.py
|
darrenwjones/SlackBots
|
6a81fa7b2cd2eb51361bad2f1db3027656f69760
|
[
"MIT"
] | null | null | null |
# THE MOST INTERESTING MAN IN THE WORLD
# SLACK CHATBOT IN PYTHON
#
# Author: Zachary Gillis
#--------------------------------------
import os
import time
import re
import random
import urllib
import requests
import json
import sqlite3
from slackclient import SlackClient
from config import SLACK_BOT_TOKEN
from database import DictionaryBotDatabase
from urllib.parse import quote
# Instantiate Slack Client
slack_client = SlackClient(SLACK_BOT_TOKEN)
starterbot_id = None
# Database access
nameDB = None
# Constants
found = False
RTM_READ_DELAY = 1 # 1-second delay RTM read
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
print("Starter bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
starterbot_id = slack_client.api_call("auth.test")["user_id"]
try:
nameDB = DictionaryBotDatabase()
except Exception as e:
print(e)
while True:
try:
command, user_id, channel, text = parse_bot_commands(slack_client.rtm_read())
if command:
handle_command(command, channel, user_id, text)
time.sleep(RTM_READ_DELAY)
except Exception as e:
print(e)
print("\nRESTARTING BOT LOGIC")
if slack_client.rtm_connect(with_team_state=False):
starterbot_id = slack_client.api_call("auth.test")["user_id"]
continue
else:
exit(5)
else:
print("Connection failed.")
| 34.134615
| 159
| 0.584366
|
# THE MOST INTERESTING MAN IN THE WORLD
# SLACK CHATBOT IN PYTHON
#
# Author: Zachary Gillis
#--------------------------------------
import os
import time
import re
import random
import urllib
import requests
import json
import sqlite3
from slackclient import SlackClient
from config import SLACK_BOT_TOKEN
from database import DictionaryBotDatabase
from urllib.parse import quote
# Instantiate Slack Client
slack_client = SlackClient(SLACK_BOT_TOKEN)
starterbot_id = None
# Database access
nameDB = None
# Constants
found = False
RTM_READ_DELAY = 1 # 1-second delay RTM read
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
def parse_bot_commands(slack_events):
for event in slack_events:
if event['type'] == "message" and not "subtype" in event:
user_id, message, text = parse_direct_mention(event['text'])
if user_id == starterbot_id:
sender_id = event['user']
return message, sender_id, event['channel'], text
return None, None, None, None
def parse_direct_mention(message_text):
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip(), message_text) if matches else (None, None, None)
def handle_command(command, channel, sender_id, text):
# Finds and executes given command, filling in response
command = command.lower()
response = None
global found
attachments = None
split = text.split(" ")[1:]
msg = str(" ".join(text.lower().split(" ")[1:]))
print(text)
if msg == 'help':
message(channel, "Send me a word, bish... orrrrrrrr \n• Create a new definition using ' *DEFINE* _{word/phrase}_ *AS* _{definition}_ '" +
" \n• Change a definition using ' *CHANGE* _{word/phrase}_ *TO* _{definition}_ '")
elif split[0] == 'DEFINE' and 'AS' in split:
define(channel, sender_id, " ".join(split[1:(split.index('AS'))]).lower(), " ".join(split[(split.index('AS')+1):]).lower())
elif split[0] == 'CHANGE' and 'TO' in split:
change(channel, sender_id, " ".join(split[1:(split.index('TO'))]).lower(), " ".join(split[(split.index('TO')+1):]).lower())
else:
# COMMAND HANDLING
display(channel, msg)
webster(channel, msg)
oxford(channel, msg)
if not found:
message(channel, 'huh?')
found = False
def define(channel, sender_id, phrase, definition):
db = sqlite3.connect('/home/darren/SlackBots/DictionaryBot/DictionaryBot.db')
sql = ''' INSERT OR IGNORE INTO definitions(phrase, definition, name) VALUES(?,?,?) '''
cur = db.cursor()
cur.execute(sql, (phrase, definition, sender_id))
db.commit()
db.close()
message(channel, "The definition has been set")
def change(channel, sender_id, phrase, definition):
db = sqlite3.connect('/home/darren/SlackBots/DictionaryBot/DictionaryBot.db')
sql = ''' UPDATE definitions SET definition=? WHERE phrase=? AND name=?'''
cur = db.cursor()
cur.execute(sql, (definition, phrase, sender_id))
db.commit()
db.close()
message(channel, "The definition has been changed")
def display(channel, text):
global found
db = sqlite3.connect('/home/darren/SlackBots/DictionaryBot/DictionaryBot.db')
sql = ''' SELECT * FROM definitions WHERE phrase=? '''
cur = db.cursor()
cur.execute(sql, (text,))
for row in cur:
message(channel, "According to *" + nameDB.getName(row[2]) + "*, _" + row[0] + "_ is defined as: \n• " + row[1])
found = True
db.close()
def webster(channel, text):
global found
spelling = True
try:
msg = None
app_key = '0143d4cb-e83b-4c32-88c5-fb7665e9bee7'
word = urllib.parse.quote(text.encode('utf-8'))
url = 'https://dictionaryapi.com/api/v3/references/collegiate/json/' + word + '?key=' + app_key
r = requests.get(url)
msg = r.json()
definitions = []
except:
return
try:
if msg is not None:
for result in msg:
for defs in result['shortdef']:
found = True
spelling = False
definitions.append(defs)
length = len(definitions)
if length > 5:
length = 5
if not spelling:
message(channel, "According to the *Webster* dictionary, _" + text + "_ is defined as: \n• " + " \n• ".join(definitions[0:length]))
spelling = True
except:
return
def oxford(channel, text):
global found
try:
msg = None
app_id = '8749e6b9'
app_key = '69a7a0ae687d283ad4e125382036b61d'
language = 'en'
word = urllib.parse.quote(text.encode('utf-8'))
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word
r = requests.get(url, headers = {'app_id' : app_id, 'app_key' : app_key})
msg = r.json()
definitions = []
except:
return
try:
if msg is not None:
for results in msg['results']:
for lexical in results['lexicalEntries']:
for entry in lexical['entries']:
for senses in entry['senses']:
for defs in senses['definitions']:
found = True
definitions.append(defs)
length = len(definitions)
if length > 5:
length = 5
message(channel, " \n \nAccording to the *Oxford* dictionary, _" + text + "_ is defined as: \n• " + " \n• ".join(definitions[0:length]))
except:
return
def message(channel, response):
# Sends response back to channel.
slack_client.api_call(
"chat.postMessage",
channel=channel,
as_user=True,
text=response,
attachments=None
)
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
print("Starter bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
starterbot_id = slack_client.api_call("auth.test")["user_id"]
try:
nameDB = DictionaryBotDatabase()
except Exception as e:
print(e)
while True:
try:
command, user_id, channel, text = parse_bot_commands(slack_client.rtm_read())
if command:
handle_command(command, channel, user_id, text)
time.sleep(RTM_READ_DELAY)
except Exception as e:
print(e)
print("\nRESTARTING BOT LOGIC")
if slack_client.rtm_connect(with_team_state=False):
starterbot_id = slack_client.api_call("auth.test")["user_id"]
continue
else:
exit(5)
else:
print("Connection failed.")
| 5,233
| 0
| 207
|
880fbcfbd0aea4d12b6ca7297e8e327c30aee7b9
| 3,697
|
py
|
Python
|
osbrain/tests/test_agent_async_requests_handlers.py
|
nicoddemus/osbrain
|
26ba98e1d2dc3389e4ae1047d625f8d3180fe68b
|
[
"Apache-2.0"
] | null | null | null |
osbrain/tests/test_agent_async_requests_handlers.py
|
nicoddemus/osbrain
|
26ba98e1d2dc3389e4ae1047d625f8d3180fe68b
|
[
"Apache-2.0"
] | null | null | null |
osbrain/tests/test_agent_async_requests_handlers.py
|
nicoddemus/osbrain
|
26ba98e1d2dc3389e4ae1047d625f8d3180fe68b
|
[
"Apache-2.0"
] | null | null | null |
"""
Test file for asynchronous requests handlers.
"""
import pytest
from osbrain import Agent
from osbrain import run_agent
from osbrain.helper import wait_agent_attr
from common import nsproxy # noqa: F401
from common import append_received
def test_async_rep_handler_exists(nsproxy):
"""
When binding an ASYNC_REP socket without a handler, an exception must be
thrown, letting the user know that a handler must be specified.
"""
server = run_agent('server', base=Agent)
with pytest.raises(ValueError) as error:
server.bind('ASYNC_REP', alias='should_crash')
assert 'This socket requires a handler!' in str(error.value)
@pytest.mark.parametrize(
'handler',
['reply', append_received, lambda a, x: a.received.append(x)]
)
def test_async_rep_handler_types(nsproxy, handler):
"""
When binding an ASYNC_REP socket, we must accept different types of
handlers: methods, functions, lambda expressions...
"""
server = run_agent('server', base=ServerAsyncRep)
assert server.bind('ASYNC_REP', alias='should_not_crash',
handler=handler)
@pytest.mark.parametrize(
'handler, check_function',
[('receive_method', False),
(append_received, True),
(lambda a, x: a.received.append(x), False)])
def test_async_rep_connect_handler_types(nsproxy, handler, check_function):
"""
We should be able to specify the handler in the `connect` call in
different ways: method, functions, lambda expressions...
Note that this handler will be used if not overridden by the `handler`
parameter in the `send` call. However, that is specifically checked in
other test.
"""
server = run_agent('server', base=ServerAsyncRep)
client = run_agent('client', base=ClientWithHandler)
addr = server.addr('publish')
client.connect(addr, alias='sub', handler=handler)
client.send('sub', 'request!')
assert wait_agent_attr(client, length=1)
if check_function:
# Check that the function was not stored as a method for the object
with pytest.raises(AttributeError) as error:
assert client.get_attr('append_received')
assert 'object has no attribute' in str(error.value)
@pytest.mark.parametrize(
'handler, check_function',
[('receive_method', False),
(append_received, True),
(lambda a, x: a.received.append(x), False)])
def test_async_rep_send_handler_types(nsproxy, handler, check_function):
"""
We should be able to make requests even if we do not specify a handler
on the `connect` call, as long as we specify it on the `send` call.
"""
server = run_agent('server', base=ServerAsyncRep)
client = run_agent('client', base=ClientWithHandler)
addr = server.addr('publish')
# Connect without a handler
client.connect(addr, alias='sub')
client.send('sub', 'request!', handler=handler)
assert wait_agent_attr(client, length=1)
if check_function:
# Check that the function was not stored as a method for the object
with pytest.raises(AttributeError) as error:
assert client.get_attr('append_received')
assert 'object has no attribute' in str(error.value)
| 31.330508
| 76
| 0.689748
|
"""
Test file for asynchronous requests handlers.
"""
import pytest
from osbrain import Agent
from osbrain import run_agent
from osbrain.helper import wait_agent_attr
from common import nsproxy # noqa: F401
from common import append_received
class ServerAsyncRep(Agent):
def on_init(self):
self.received = []
self.bind('ASYNC_REP', alias='publish', handler='reply')
def reply(self, request):
self.received.append(request)
return 'reply!'
class ClientWithHandler(Agent):
def on_init(self):
self.received = []
def crash_handler(self, response):
raise Exception()
def receive_method(self, response):
self.received.append(response)
def test_async_rep_handler_exists(nsproxy):
"""
When binding an ASYNC_REP socket without a handler, an exception must be
thrown, letting the user know that a handler must be specified.
"""
server = run_agent('server', base=Agent)
with pytest.raises(ValueError) as error:
server.bind('ASYNC_REP', alias='should_crash')
assert 'This socket requires a handler!' in str(error.value)
@pytest.mark.parametrize(
'handler',
['reply', append_received, lambda a, x: a.received.append(x)]
)
def test_async_rep_handler_types(nsproxy, handler):
"""
When binding an ASYNC_REP socket, we must accept different types of
handlers: methods, functions, lambda expressions...
"""
server = run_agent('server', base=ServerAsyncRep)
assert server.bind('ASYNC_REP', alias='should_not_crash',
handler=handler)
@pytest.mark.parametrize(
'handler, check_function',
[('receive_method', False),
(append_received, True),
(lambda a, x: a.received.append(x), False)])
def test_async_rep_connect_handler_types(nsproxy, handler, check_function):
"""
We should be able to specify the handler in the `connect` call in
different ways: method, functions, lambda expressions...
Note that this handler will be used if not overridden by the `handler`
parameter in the `send` call. However, that is specifically checked in
other test.
"""
server = run_agent('server', base=ServerAsyncRep)
client = run_agent('client', base=ClientWithHandler)
addr = server.addr('publish')
client.connect(addr, alias='sub', handler=handler)
client.send('sub', 'request!')
assert wait_agent_attr(client, length=1)
if check_function:
# Check that the function was not stored as a method for the object
with pytest.raises(AttributeError) as error:
assert client.get_attr('append_received')
assert 'object has no attribute' in str(error.value)
@pytest.mark.parametrize(
'handler, check_function',
[('receive_method', False),
(append_received, True),
(lambda a, x: a.received.append(x), False)])
def test_async_rep_send_handler_types(nsproxy, handler, check_function):
"""
We should be able to make requests even if we do not specify a handler
on the `connect` call, as long as we specify it on the `send` call.
"""
server = run_agent('server', base=ServerAsyncRep)
client = run_agent('client', base=ClientWithHandler)
addr = server.addr('publish')
# Connect without a handler
client.connect(addr, alias='sub')
client.send('sub', 'request!', handler=handler)
assert wait_agent_attr(client, length=1)
if check_function:
# Check that the function was not stored as a method for the object
with pytest.raises(AttributeError) as error:
assert client.get_attr('append_received')
assert 'object has no attribute' in str(error.value)
| 271
| 17
| 179
|
7fb5ad404820adebd92f407705653e62e6fe4972
| 3,090
|
py
|
Python
|
revscoring/languages/tests/test_romanian.py
|
mariushoch/revscoring
|
5ecd54d31c4088b6f142c0ef54116cc5bdce0ff2
|
[
"MIT"
] | null | null | null |
revscoring/languages/tests/test_romanian.py
|
mariushoch/revscoring
|
5ecd54d31c4088b6f142c0ef54116cc5bdce0ff2
|
[
"MIT"
] | null | null | null |
revscoring/languages/tests/test_romanian.py
|
mariushoch/revscoring
|
5ecd54d31c4088b6f142c0ef54116cc5bdce0ff2
|
[
"MIT"
] | null | null | null |
import pickle
from .. import romanian
from ...datasources import revision_oriented
from ...dependencies import solve
from .util import compare_extraction
BAD = [
"bou",
"caca", "cacat",
"cur", "curu", "curva", "curve",
"dracu",
"fraier", "fraieri", "fraierilor",
"fut", "fute", "futut",
"kkt",
"laba",
"mata",
"muie", "muist",
"pidar",
"pizda",
"plm",
"porcarie",
"pula", "pule", "puli", "pulii",
"suge", "sugeti", "sugi",
"supt"
]
INFORMAL = [
"aia", "asa",
"asta", "astea",
"ati", "aveti",
"bag", "bagat",
"bla",
"naspa",
"prost", "prosti", "prostie", "prostii", "prostilor",
"rahat",
"smecher",
"tigani"
]
OTHER = [
"""
Perioada Dinastiei Song (în chineză 宋朝, Sòng Cháo; sʊŋ tʂʰɑʊ̯)
reprezintă denumirea unei epoci istorice din istoria Chinei,
care a a început în anul 960 și a durat până în anul 1279. Ea a a
fost precedată de „Perioada Celor Cinci Dinastii și a Celor Zece Regate”
și a fost urmată de „Perioada Dinastiei Yuan”. În timpul acestei perioade
au fost emiși primii bani adevărați de hârtie din istoria lumii - bancnote
- de către un guvern național. Tot în această perioadă a fost înființată
prima flotă maritimă militară permanentă a Chinei, s-a folosit pentru prima
dată praful de pușcă și s-a determinat, tot pentru prima dată, nordului
geografic cu ajutorul busolei.
""",
]
r_text = revision_oriented.revision.text
| 29.711538
| 84
| 0.645955
|
import pickle
from .. import romanian
from ...datasources import revision_oriented
from ...dependencies import solve
from .util import compare_extraction
BAD = [
"bou",
"caca", "cacat",
"cur", "curu", "curva", "curve",
"dracu",
"fraier", "fraieri", "fraierilor",
"fut", "fute", "futut",
"kkt",
"laba",
"mata",
"muie", "muist",
"pidar",
"pizda",
"plm",
"porcarie",
"pula", "pule", "puli", "pulii",
"suge", "sugeti", "sugi",
"supt"
]
INFORMAL = [
"aia", "asa",
"asta", "astea",
"ati", "aveti",
"bag", "bagat",
"bla",
"naspa",
"prost", "prosti", "prostie", "prostii", "prostilor",
"rahat",
"smecher",
"tigani"
]
OTHER = [
"""
Perioada Dinastiei Song (în chineză 宋朝, Sòng Cháo; sʊŋ tʂʰɑʊ̯)
reprezintă denumirea unei epoci istorice din istoria Chinei,
care a a început în anul 960 și a durat până în anul 1279. Ea a a
fost precedată de „Perioada Celor Cinci Dinastii și a Celor Zece Regate”
și a fost urmată de „Perioada Dinastiei Yuan”. În timpul acestei perioade
au fost emiși primii bani adevărați de hârtie din istoria lumii - bancnote
- de către un guvern național. Tot în această perioadă a fost înființată
prima flotă maritimă militară permanentă a Chinei, s-a folosit pentru prima
dată praful de pușcă și s-a determinat, tot pentru prima dată, nordului
geografic cu ajutorul busolei.
""",
]
r_text = revision_oriented.revision.text
def test_badwords():
compare_extraction(romanian.badwords.revision.datasources.matches,
BAD, OTHER)
assert romanian.badwords == pickle.loads(pickle.dumps(romanian.badwords))
def test_informals():
compare_extraction(romanian.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert romanian.informals == pickle.loads(pickle.dumps(romanian.informals))
def test_dictionary():
cache = {r_text: 'În timpul acestei perioade worngly. <td>'}
assert (solve(romanian.dictionary.revision.datasources.dict_words,
cache=cache) ==
['În', 'timpul', 'acestei', 'perioade'])
assert (solve(romanian.dictionary.revision.datasources.non_dict_words,
cache=cache) ==
["worngly"])
assert romanian.dictionary == pickle.loads(
pickle.dumps(romanian.dictionary))
def test_stopwords():
cache = {r_text: "În timpul acestei perioade"}
assert (solve(romanian.stopwords.revision.datasources.stopwords, cache=cache) ==
['În', 'acestei'])
assert (solve(romanian.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
['timpul', 'perioade'])
assert romanian.stopwords == pickle.loads(pickle.dumps(romanian.stopwords))
def test_stemmmed():
cache = {r_text: "În timpul acestei perioade"}
assert (solve(romanian.stemmed.revision.datasources.stems, cache=cache) ==
['în', 'timp', 'aceste', 'perioad'])
assert romanian.stemmed == pickle.loads(pickle.dumps(romanian.stemmed))
| 1,479
| 0
| 115
|
8663390282d738969fd591ac46f3c32c680cf5b5
| 977
|
py
|
Python
|
app/models/problem.py
|
just806me/StudyingPPBot
|
d958bfe7e634f988ca98a82afa83584c6e28f71c
|
[
"MIT"
] | null | null | null |
app/models/problem.py
|
just806me/StudyingPPBot
|
d958bfe7e634f988ca98a82afa83584c6e28f71c
|
[
"MIT"
] | 6
|
2019-01-19T11:50:00.000Z
|
2019-07-30T09:46:08.000Z
|
app/models/problem.py
|
just806me/StudyingPPBot
|
d958bfe7e634f988ca98a82afa83584c6e28f71c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, List
from ..database import Database
@dataclass
| 30.53125
| 91
| 0.627431
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, List
from ..database import Database
@dataclass
class Problem:
id: int
group: int
@staticmethod
def create(db: Database, id: int, group: int) -> Problem:
cursor = db.cursor()
cursor.execute('INSERT INTO "problems" ("id", "group") VALUES (?, ?)', (id, group))
db.commit()
return Problem(id, group)
@staticmethod
def find(db: Database, id: int) -> Optional[Problem]:
cursor = db.cursor()
cursor.execute('SELECT "id", "group" FROM "problems" WHERE "id" = ?', (id,))
values = cursor.fetchone()
return None if values is None else Problem(*values)
@staticmethod
def all(db: Database) -> List[Problem]:
cursor = db.cursor()
cursor.execute('SELECT "id", "group" FROM "problems" ORDER BY "group", "id"')
return [Problem(*values) for values in cursor.fetchall()]
| 651
| 155
| 22
|
43c5c0c0c39bbb309a3391a5aca7a64083a235be
| 1,350
|
py
|
Python
|
leetcode/1.Array_String/15.3Sum.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | null | null | null |
leetcode/1.Array_String/15.3Sum.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | null | null | null |
leetcode/1.Array_String/15.3Sum.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | 1
|
2015-01-10T16:02:43.000Z
|
2015-01-10T16:02:43.000Z
|
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
# Note: The solution set must not contain duplicate triplets.
# For example, given array S = [-1, 0, 1, 2, -1, -4],
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
| 32.926829
| 157
| 0.425185
|
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
# Note: The solution set must not contain duplicate triplets.
# For example, given array S = [-1, 0, 1, 2, -1, -4],
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
Sort the list; for each unique number, find two other numbers with
two-pointers method.
"""
nums, result, i = sorted(nums), [], 0
while i < len(nums) - 2:
if i > 0 and nums[i] == nums[i - 1]:
i += 1
continue
j, k = i + 1, len(nums) -1
while j < k:
sum_ijk = nums[i] + nums[j] + nums[k]
if sum_ijk == 0:
result.append([nums[i], nums[j], nums[k]])
j, k = j + 1, k - 1
while j < k and nums[j] == nums[j - 1]:
j += 1
while j < k and nums[k] == nums[k + 1]:
k -= 1
elif sum_ijk < 0:
j += 1
else:
k -= 1
i += 1
return result
| 0
| 986
| 23
|
f345e3d4640258bd791593fb57530d594dee8bb4
| 681
|
py
|
Python
|
ParaViewCore/ServerManager/Default/Testing/Python/ProxyBackwardsCompatibilityTest.py
|
trickyMan/paraview_view
|
3b38670e8259b688093e0d7ba2fe2edd7c5d57a7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ParaViewCore/ServerManager/Default/Testing/Python/ProxyBackwardsCompatibilityTest.py
|
trickyMan/paraview_view
|
3b38670e8259b688093e0d7ba2fe2edd7c5d57a7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ParaViewCore/ServerManager/Default/Testing/Python/ProxyBackwardsCompatibilityTest.py
|
trickyMan/paraview_view
|
3b38670e8259b688093e0d7ba2fe2edd7c5d57a7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
from paraview.simple import *
import paraview
sourceDs = Wavelet()
inputDs = Sphere()
# Test 'Resample With Dataset` proxies
p1 = ResampleWithDataset(Input=inputDs, Source=sourceDs)
print("Proxy Name before: %s" % p1.GetXMLName())
assert p1.GetXMLName() == "ResampleWithDataset", "The default proxy name must be ResampleWithDataset"
print("")
print("Setting compatibility version to 5.0...")
paraview.compatibility.major = 5
paraview.compatibility.minor = 0
p2 = paraview.simple.ResampleWithDataset(Input=inputDs, Source=sourceDs)
print("Proxy Name for compatibility version 5.0: %s" % p2.GetXMLName())
assert p2.GetXMLName() == "Probe", "The default proxy name must be Probe"
| 34.05
| 101
| 0.760646
|
from paraview.simple import *
import paraview
sourceDs = Wavelet()
inputDs = Sphere()
# Test 'Resample With Dataset` proxies
p1 = ResampleWithDataset(Input=inputDs, Source=sourceDs)
print("Proxy Name before: %s" % p1.GetXMLName())
assert p1.GetXMLName() == "ResampleWithDataset", "The default proxy name must be ResampleWithDataset"
print("")
print("Setting compatibility version to 5.0...")
paraview.compatibility.major = 5
paraview.compatibility.minor = 0
p2 = paraview.simple.ResampleWithDataset(Input=inputDs, Source=sourceDs)
print("Proxy Name for compatibility version 5.0: %s" % p2.GetXMLName())
assert p2.GetXMLName() == "Probe", "The default proxy name must be Probe"
| 0
| 0
| 0
|
808da29a79df41623dd584c2d77377f50775018c
| 533
|
py
|
Python
|
apps/tinyosGW/cgi/simple.py
|
jeonghoonkang/BerePi
|
e04283a94a6a0487ab0049dc3e514d6c5dda39cc
|
[
"BSD-2-Clause"
] | 22
|
2015-06-03T06:28:27.000Z
|
2022-03-18T08:02:45.000Z
|
apps/tinyosGW/cgi/simple.py
|
jeonghoonkang/BerePi
|
e04283a94a6a0487ab0049dc3e514d6c5dda39cc
|
[
"BSD-2-Clause"
] | 14
|
2015-06-08T01:31:53.000Z
|
2020-08-30T02:19:15.000Z
|
apps/tinyosGW/cgi/simple.py
|
jeonghoonkang/BerePi
|
e04283a94a6a0487ab0049dc3e514d6c5dda39cc
|
[
"BSD-2-Clause"
] | 26
|
2015-05-12T09:33:55.000Z
|
2021-08-30T05:41:00.000Z
|
#!/usr/bin/python
#-*- coding: UTF-8-*-
import cgi
import cgitb
import os
import sys
sys.path.append("/home/pi/devel/BerePi/apps/tinyosGW/cgi")
#import simple
if __name__ == "__main__":
cgitb.enable()
args = cgi.FieldStorage()
#print os.environ.items()
print "Content-type: text/html\n"
print "Simple CGI, Python ! <br>"
print args
# sample
# http://10.0.2.4/cgi-bin/simple.py?Id=4&time=2017/01/01
# FieldStorage(None, None, [MiniFieldStorage('Id', '4'), MiniFieldStorage('time', '2017/01/01')])
| 20.5
| 97
| 0.65666
|
#!/usr/bin/python
#-*- coding: UTF-8-*-
import cgi
import cgitb
import os
import sys
sys.path.append("/home/pi/devel/BerePi/apps/tinyosGW/cgi")
#import simple
if __name__ == "__main__":
cgitb.enable()
args = cgi.FieldStorage()
#print os.environ.items()
print "Content-type: text/html\n"
print "Simple CGI, Python ! <br>"
print args
# sample
# http://10.0.2.4/cgi-bin/simple.py?Id=4&time=2017/01/01
# FieldStorage(None, None, [MiniFieldStorage('Id', '4'), MiniFieldStorage('time', '2017/01/01')])
| 0
| 0
| 0
|
a6564b153fb2ffdc4f517dc5d57e7c5c16af1f18
| 2,619
|
py
|
Python
|
param_handle.py
|
mauricioAyllon/NASA-gamma
|
14f53a626096d2bf0fce811608c9e59a8d6b5287
|
[
"MIT"
] | 5
|
2020-12-03T21:45:43.000Z
|
2021-12-04T16:14:33.000Z
|
param_handle.py
|
mauricioAyllon/NASA-gamma
|
14f53a626096d2bf0fce811608c9e59a8d6b5287
|
[
"MIT"
] | null | null | null |
param_handle.py
|
mauricioAyllon/NASA-gamma
|
14f53a626096d2bf0fce811608c9e59a8d6b5287
|
[
"MIT"
] | 3
|
2020-12-03T20:58:44.000Z
|
2022-03-24T04:52:57.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 18:24:32 2021
@author: mauricio
parameter handle for GUI
"""
import pandas as pd
from nasagamma import spectrum as sp
from nasagamma import peaksearch as ps
from nasagamma import read_cnf
import re
| 29.761364
| 82
| 0.588774
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 18:24:32 2021
@author: mauricio
parameter handle for GUI
"""
import pandas as pd
from nasagamma import spectrum as sp
from nasagamma import peaksearch as ps
from nasagamma import read_cnf
import re
def get_spect_search(commands):
if commands["-o"]:
return None
file_name = commands["<file_name>"]
# The detector types below are accurate only for the example files.
# Add a similar command for your own detector or modify the values below.
if commands["--cebr"]:
fwhm_at_0 = 1.0
ref_x = 1317
ref_fwhm = 41 # 41
elif commands["--labr"]:
fwhm_at_0 = 1.0
ref_x = 427
ref_fwhm = 10
elif commands["--hpge"]:
fwhm_at_0 = 0.1
ref_x = 948
ref_fwhm = 4.4
else:
fwhm_at_0 = float(commands["--fwhm_at_0"])
ref_x = float(commands["--ref_x"])
ref_fwhm = float(commands["--ref_fwhm"])
if commands["--min_snr"] is None:
min_snr = 1.0
else:
min_snr = float(commands["--min_snr"])
file_name = file_name.lower()
if file_name[-4:] == ".csv":
e_units, spect, x = read_csv_file(file_name)
elif file_name[-4:] == ".cnf":
e_units, spect, x = read_cnf.read_cnf_to_spect(file_name)
# peaksearch class
search = ps.PeakSearch(spect, ref_x, ref_fwhm, fwhm_at_0, min_snr=min_snr)
return spect, search, e_units, x, ref_x, fwhm_at_0, ref_fwhm
def read_csv_file(file_name):
df = pd.read_csv(file_name)
###
name_lst = ["count", "counts", "cts", "data"]
e_lst = ["energy", "energies", "erg"]
u_lst = ["eV", "keV", "MeV", "GeV"]
col_lst = list(df.columns)
# cts_col = [s for s in col_lst if "counts" in s.lower()][0]
cts_col = 0
erg = 0
for s in col_lst:
s2 = re.split("[^a-zA-Z]", s) # split by non alphabetic character
if s.lower() in name_lst:
cts_col = s
next
for st in s2:
if st.lower() in e_lst:
erg = s
if st in u_lst:
unit = st
if cts_col == 0:
print("ERROR: no column named with counts keyword e.g counts, data, cts")
elif erg == 0:
print("working with channel numbers")
e_units = "channels"
spect = sp.Spectrum(counts=df[cts_col], e_units=e_units)
x = spect.channels
elif erg != 0:
print("working with energy values")
e_units = unit
spect = sp.Spectrum(counts=df[cts_col], energies=df[erg], e_units=e_units)
x = spect.energies
return e_units, spect, x
| 2,321
| 0
| 46
|
6a0407340f0f8e682733fd9d9686098ae9ce5d03
| 5,827
|
py
|
Python
|
demo/app.py
|
ignacioct/Temis
|
cf237470e728b45cfa6203eb81d4868720309c26
|
[
"CC0-1.0"
] | 4
|
2021-06-13T10:19:11.000Z
|
2021-06-29T16:45:50.000Z
|
demo/app.py
|
ignacioct/Temis
|
cf237470e728b45cfa6203eb81d4868720309c26
|
[
"CC0-1.0"
] | null | null | null |
demo/app.py
|
ignacioct/Temis
|
cf237470e728b45cfa6203eb81d4868720309c26
|
[
"CC0-1.0"
] | 1
|
2022-02-08T19:00:00.000Z
|
2022-02-08T19:00:00.000Z
|
import os
import streamlit as st
from text import (
spanish_text,
english_text,
spanish_labels_dictionary,
english_labels_dictionary,
)
import pandas as pd
import altair as alt
import biome.text
st.set_page_config(page_title="Temis Demo App", layout="centered")
os.environ["TOKENIZERS_PARALLELISM"] = "False" # To avoid warnings
@st.cache(suppress_st_warning=True, allow_output_mutation=True, show_spinner=False)
def loading_model():
"""Loading of the model classifier. Passed to a function to include cache decorator"""
return biome.text.Pipeline.from_pretrained("../models/temis_model.tar.gz")
def populating_predictions(input_df, threshold):
"""Method for getting which categories surpassed the threshold.
Parameters
----------
input_df: Pandas Dataframe
Dataframe with predictions and score (in %)
threshold: int
Value from which predictions are considered valid
Return
----------
prediction_output: List[str]
Predicted classes in descending order.
"""
predictions_output = []
df_sorted = input_df.sort_values(by="score")
for index, row in df_sorted.iterrows():
if row["score"] >= threshold * 100:
predictions_output.append(index)
return predictions_output
def bar_chart_generator(df, confidence_threshold):
"""Creating the bar chart, decluttering of code from main function"""
bars = (
alt.Chart(df.reset_index())
.mark_bar()
.encode(
x=alt.X("labels", sort="-y", title=None),
y=alt.Y("score", title=None),
# The highlight is set based on the result
# of the conditional statement
color=alt.condition(
alt.datum.score
>= confidence_threshold, # If the rating is >= threshold it returns True,
alt.value("green"), # and the matching bars are set as green.
# and if it does not satisfy the condition
# the color is set to steelblue.
alt.value("steelblue"),
),
)
.mark_bar(size=20)
)
return bars
def predict_labels(df, confidence_threshold, mode):
"""Returning predicted labels from a dataframe given a treshold.
mode=0 returns all labels, mode=1 returns only categories, not passive nor active"""
predicted_labels = []
if mode == 0:
for i in range(len(df)):
if df["score"][i] >= confidence_threshold:
predicted_labels.append(df.index[i])
elif mode == 1:
for i in range(len(df)):
if (
df["score"][i] >= confidence_threshold
and df.index[i] != "passive"
and df.index[i] != "active"
and df.index[i] != "pasiva"
and df.index[i] != "activa"
):
predicted_labels.append(df.index[i])
return predicted_labels
if __name__ == "__main__":
main()
| 26.852535
| 105
| 0.596019
|
import os
import streamlit as st
from text import (
spanish_text,
english_text,
spanish_labels_dictionary,
english_labels_dictionary,
)
import pandas as pd
import altair as alt
import biome.text
st.set_page_config(page_title="Temis Demo App", layout="centered")
os.environ["TOKENIZERS_PARALLELISM"] = "False" # To avoid warnings
def main():
text = english_text
# Header
title, _, subtitle = st.beta_columns((2.3, 0.25, 0.6))
title.title(text[0])
subtitle.subheader(text[1])
st.markdown("""---""")
title, subtitle = st.beta_columns((0.4, 1))
with title:
language = st.select_slider(
text[11],
options=["English", "Spanish"],
)
if language == "English":
text = english_text
dic = english_labels_dictionary
elif language == "Spanish":
text = spanish_text
dic = spanish_labels_dictionary
st.markdown(text[2])
st.markdown("") # empty space
st.header(text[3])
st.markdown(text[4])
st.header(text[5])
st.markdown(f"**{text[6]}**")
st.markdown(text[7])
st.markdown("") # empty space
st.header(text[8])
text_input = st.text_area(text[9])
confidence_threshold = (
0.5 # Starting value of the treshold, may be changed with the slider
)
pl = loading_model() # cached function
if text_input:
# Making model predictions and storing them into a dataframe
prediction = pl.predict(text_input)
# Confidence threshold slider, changes the green categories in the graph and the categories shown
confidence_threshold = st.slider(
text[10],
0,
100,
50,
1,
)
df = pd.DataFrame(
{
"labels": [dic.get(pred) for pred in prediction["labels"]],
"confidence": [s for s in prediction["probabilities"]],
"score": [s * 100 for s in prediction["probabilities"]],
}
).set_index("labels")
# Predictions according to the threshold
predictions = populating_predictions(df, confidence_threshold)
df_table, _, bar_chart = st.beta_columns((1.2, 0.1, 2))
# Class-Probabilities table
with df_table:
# Probabilities field
st.dataframe(df[["score"]])
# Class-Probabilities Chart with Confidence
with bar_chart:
bar_chart = bar_chart_generator(df, confidence_threshold)
st.altair_chart(bar_chart, use_container_width=True)
predicted_labels = predict_labels(df, confidence_threshold, 0)
predicted_categories = predict_labels(df, confidence_threshold, 1)
if len(predicted_categories) == 0:
st.markdown(text[12])
else:
st.markdown(text[13] + ", ".join([i for i in predicted_categories]))
if "active" or "activa" in predicted_labels:
st.markdown(text[14])
elif "passive" or "pasiva" in predicted_labels:
st.markdown(text[15])
st.header(text[16])
st.markdown(text[17])
@st.cache(suppress_st_warning=True, allow_output_mutation=True, show_spinner=False)
def loading_model():
"""Loading of the model classifier. Passed to a function to include cache decorator"""
return biome.text.Pipeline.from_pretrained("../models/temis_model.tar.gz")
def populating_predictions(input_df, threshold):
"""Method for getting which categories surpassed the threshold.
Parameters
----------
input_df: Pandas Dataframe
Dataframe with predictions and score (in %)
threshold: int
Value from which predictions are considered valid
Return
----------
prediction_output: List[str]
Predicted classes in descending order.
"""
predictions_output = []
df_sorted = input_df.sort_values(by="score")
for index, row in df_sorted.iterrows():
if row["score"] >= threshold * 100:
predictions_output.append(index)
return predictions_output
def bar_chart_generator(df, confidence_threshold):
"""Creating the bar chart, decluttering of code from main function"""
bars = (
alt.Chart(df.reset_index())
.mark_bar()
.encode(
x=alt.X("labels", sort="-y", title=None),
y=alt.Y("score", title=None),
# The highlight is set based on the result
# of the conditional statement
color=alt.condition(
alt.datum.score
>= confidence_threshold, # If the rating is >= threshold it returns True,
alt.value("green"), # and the matching bars are set as green.
# and if it does not satisfy the condition
# the color is set to steelblue.
alt.value("steelblue"),
),
)
.mark_bar(size=20)
)
return bars
def predict_labels(df, confidence_threshold, mode):
"""Returning predicted labels from a dataframe given a treshold.
mode=0 returns all labels, mode=1 returns only categories, not passive nor active"""
predicted_labels = []
if mode == 0:
for i in range(len(df)):
if df["score"][i] >= confidence_threshold:
predicted_labels.append(df.index[i])
elif mode == 1:
for i in range(len(df)):
if (
df["score"][i] >= confidence_threshold
and df.index[i] != "passive"
and df.index[i] != "active"
and df.index[i] != "pasiva"
and df.index[i] != "activa"
):
predicted_labels.append(df.index[i])
return predicted_labels
if __name__ == "__main__":
main()
| 2,802
| 0
| 23
|
180271a8b45178f5e56c2cb102be6b2943567928
| 19,214
|
py
|
Python
|
contsub_dirty_image.py
|
jmoldon/hcg_hi_pipeline
|
c06128c6f8acb0dcae9e45efc164e3d282f3ac8d
|
[
"MIT"
] | 1
|
2020-02-20T08:56:48.000Z
|
2020-02-20T08:56:48.000Z
|
contsub_dirty_image.py
|
jmoldon/hcg_hi_pipeline
|
c06128c6f8acb0dcae9e45efc164e3d282f3ac8d
|
[
"MIT"
] | 5
|
2019-12-11T12:02:22.000Z
|
2022-03-29T08:46:05.000Z
|
contsub_dirty_image.py
|
jmoldon/hcg_hi_pipeline
|
c06128c6f8acb0dcae9e45efc164e3d282f3ac8d
|
[
"MIT"
] | 1
|
2022-03-23T13:11:01.000Z
|
2022-03-23T13:11:01.000Z
|
import imp, glob, shutil, numpy
imp.load_source('common_functions','common_functions.py')
import common_functions as cf
def contsub(msfile,config,config_raw,config_file,logger):
"""
Subtracts the continuum from each of the science target MSs.
If the no line free range is set then the user is queried (in interactive mode) and the configuration file updated.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting continuum subtraction.')
contsub = config['continuum_subtraction']
calib = config['calibration']
src_dir = config['global']['src_dir']+'/'
logger.info('Checking for line free channel ranges in parameters.')
targets = calib['target_names'][:]
fields = calib['targets'][:]
for i in range(len(targets)):
target = targets[i]
if 'spw' in target:
inx = target.index('.spw')
target_name = target[:inx]
if target_name in calib['target_names'][i-1]:
fields.insert(i,fields[i-1])
if calib['mosaic']:
logger.info('The parameters file indicates that this data set is a mosaic.')
logger.info('All fields in the mosaic will have the same continuum channels.')
targets = list(set(targets))
reset_ch = False
if len(contsub['linefree_ch']) == 0 or len(contsub['linefree_ch']) != len(targets):
reset_ch = True
if len(contsub['linefree_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(contsub['linefree_ch']) < len(targets):
contsub['linefree_ch'].append('')
elif len(contsub['linefree_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(contsub['linefree_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
contsub['linefree_ch'] = contsub['linefree_ch'][:len(targets)]
elif interactive:
print('Current line free channels set as:')
for i in range(len(contsub['linefree_ch'])):
print('{0}: {1}'.format(targets[i],contsub['linefree_ch'][i]))
resp = str(raw_input('Do you want revise the line free channels (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_ch = True
if reset_ch:
if not interactive:
logger.critical('The number of line free channel ranges provided does not match the number of targets.')
logger.info('Line free change ranges: {}'.format(contsub['linefree_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
else:
print('For each target enter the line free channels in the following format:\nspwID1:min_ch1~max_ch1;min_ch2~max_ch2,spwID2:min_ch3~max_ch3;min_ch4~max_ch4')
for i in range(len(targets)):
contsub['linefree_ch'][i] = cf.uinput('Line free channels for {}: '.format(targets[i]), contsub['linefree_ch'][i])
logger.info('Setting line free channels for {0} as: {1}.'.format(targets[i], contsub['linefree_ch'][i]))
if type(contsub['fitorder']) == type(1):
order_set = False
while not order_set:
try:
order = int(cf.uinput('Set the fit order for {}: '.format(targets[i]), contsub['fitorder']))
if order >= 0:
order_set = True
except ValueError:
print 'Fit order must be an integer.'
if order != contsub['fitorder'] and len(targets) > 1:
order_list = list(numpy.zeros(len(targets),dtype='int')+contsub['fitorder'])
order_list[i] = order
order = order_list
contsub['fitorder'] = order
else:
order_set = False
while not order_set:
try:
order = int(cf.uinput('Set the fit order for {}: '.format(targets[i]), contsub['fitorder'][i]))
if order >= 0:
order_set = True
contsub['fitorder'] = order
except ValueError:
print 'Fit order must be an integer.'
logger.info('Updating config file to set line free channels and fit orders.')
config_raw.set('continuum_subtraction','linefree_ch',contsub['linefree_ch'])
config_raw.set('continuum_subtraction','fitorder',contsub['fitorder'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line free channels set as: {}.'.format(contsub['linefree_ch']))
logger.info('Fit order(s) set as: {}.'.format(contsub['fitorder']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = fields[i]
if calib['mosaic']:
for target_name in targets:
inx = [j for j in range(len(calib['target_names'])) if target_name in calib['target_names'][j]]
fields = numpy.array(calib['targets'],dtype='str')[inx]
field = ','.join(fields)
chans = contsub['linefree_ch'][i]
spws = chans.split(',')
for j in range(len(spws)):
spw = spws[j].strip()
inx = spw.index(':')
spw = spw[0:inx]
spws[j] = spw
logger.info('Subtracting the continuum from field: {}'.format(target))
if type(contsub['fitorder']) == type(1):
order = int(contsub['fitorder'])
else:
order = int(contsub['fitorder'][i])
command = "uvcontsub(vis='{0}{1}'+'.split', field='{2}', fitspw='{3}', spw='{4}', excludechans=False, combine='spw', solint='int', fitorder={5}, want_cont={6})".format(src_dir,target,field,chans,','.join(spws),order,contsub['save_cont'])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('Completed continuum subtraction.')
def plot_spec(config,logger,contsub=False):
"""
For each SPW and each science target amplitude vs channel and amplitude vs velocity are plotted.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting plotting amplitude spectrum.')
plots_obs_dir = './plots/'
cf.makedir(plots_obs_dir,logger)
calib = config['calibration']
targets = calib['target_names'][:]
if calib['mosaic']:
targets = list(set(calib['target_names']))
src_dir = config['global']['src_dir']+'/'
for target in targets:
if contsub:
MS_list = glob.glob('{0}{1}.split.contsub'.format(src_dir,target))
else:
MS_list = glob.glob('{0}{1}.split'.format(src_dir,target))
for MS in MS_list:
if contsub:
plot_file = plots_obs_dir+'{0}_contsub_amp_chn.png'.format(target)
else:
plot_file = plots_obs_dir+'{0}_amp_chn.png'.format(target)
logger.info('Plotting amplitude vs channel to {}'.format(plot_file))
plotms(vis=MS, xaxis='chan', yaxis='amp',
ydatacolumn='corrected', plotfile=plot_file,
expformat='png', overwrite=True, showgui=False)
if not contsub:
plot_file = plots_obs_dir+'{0}_amp_vel.png'.format(target)
logger.info('Plotting amplitude vs velocity to {}'.format(plot_file))
plotms(vis=MS, xaxis='velocity', yaxis='amp',
ydatacolumn='corrected', plotfile=plot_file,
expformat='png', overwrite=True, showgui=False,
freqframe='BARY', restfreq=str(config['global']['rest_freq']), veldef='OPTICAL')
logger.info('Completed plotting amplitude spectrum.')
def dirty_image(config,config_raw,config_file,logger):
"""
Generates a dirty (continuum subtracted) image of each science target.
Checks that the pixel size, image size, and line emission channels are set (will prompt user if in interactive mode).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting making dirty image.')
calib = config['calibration']
contsub = config['continuum_subtraction']
rest_freq = config['global']['rest_freq']
targets = calib['target_names'][:]
fields = calib['targets'][:]
for i in range(len(targets)):
target = targets[i]
if 'spw' in target:
inx = target.index('.spw')
target_name = target[:inx]
if target_name in calib['target_names'][i-1]:
fields.insert(i,fields[i-1])
if calib['mosaic']:
targets = list(set(calib['target_names']))
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
cf.makedir('./'+img_dir,logger)
logger.info('Removing any existing dirty images.')
for target in targets:
del_list = glob.glob(img_dir+'{}.dirty*'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
logger.info('Checking clean parameters for dirty image.')
reset_cln = False
if len(cln_param['pix_size']) == 0 or len(cln_param['pix_size']) != len(targets):
if not interactive:
logger.critical('The number of pixel sizes provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['pix_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['pix_size']) < len(targets):
logger.warning('There are more target fields than pixel sizes. Appending blanks.')
while len(cln_param['pix_size']) < len(targets):
cln_param['pix_size'].append('')
elif len(cln_param['pix_size']) > len(targets):
logger.warning('There are more pixel sizes than target fields.')
logger.info('Current pixel sizes: {}'.format(cln_param['pix_size']))
logger.warning('The pixel size list will now be truncated to match the number of targets.')
cln_param['pix_size'] = cln_param['pix_size'][:len(targets)]
elif interactive:
print('Current pixel sizes set as:')
for i in range(len(cln_param['pix_size'])):
print('{0}: {1}'.format(targets[i],cln_param['pix_size'][i]))
resp = str(raw_input('Do you want revise the pixel sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired pixel size:')
for i in range(len(targets)):
cln_param['pix_size'][i] = cf.uinput('Pixel size for {}: '.format(targets[i]), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(targets[i], cln_param['pix_size'][i]))
logger.info('Updating config file to set pixel sizes.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Pixel sizes set as: {}.'.format(cln_param['pix_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['im_size']) == 0 or len(cln_param['im_size']) != len(targets):
if not interactive:
logger.critical('The number of image sizes provided does not match the number of targets.')
logger.info('Image sizes: {}'.format(cln_param['im_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['im_size']) < len(targets):
logger.warning('There are more target fields than image sizes. Appending blanks.')
while len(cln_param['im_size']) < len(targets):
cln_param['im_size'].append('')
elif len(cln_param['im_size']) > len(targets):
logger.warning('There are more image sizes than target fields.')
logger.info('Current image sizes: {} pixels.'.format(cln_param['im_size']))
logger.warning('The image size list will now be truncated to match the number of targets.')
cln_param['im_size'] = cln_param['im_size'][:len(targets)]
elif interactive:
print('Current images sizes set as:')
for i in range(len(cln_param['im_size'])):
print('{0}: {1}'.format(targets[i],cln_param['im_size'][i]))
resp = str(raw_input('Do you want revise the image sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired image size:')
for i in range(len(targets)):
print('Note: The pixel size for this target was set to: {}'.format(cln_param['pix_size'][i]))
cln_param['im_size'][i] = cf.uinput('Image size for {}: '.format(targets[i]), cln_param['im_size'][i])
logger.info('Setting image size for {0} as: {1} x {2}.'.format(targets[i], cln_param['im_size'][i],cln_param['pix_size'][i]))
logger.info('Updating config file to set image sizes.')
config_raw.set('clean','im_size',cln_param['im_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Image sizes set as: {} pixels.'.format(cln_param['im_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['line_ch']) == 0 or len(cln_param['line_ch']) != len(targets):
if not interactive:
logger.critical('The number of line channel ranges provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['line_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['line_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(cln_param['line_ch']) < len(targets):
cln_param['line_ch'].append('')
elif len(cln_param['line_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(cln_param['line_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
cln_param['line_ch'] = cln_param['line_ch'][:len(targets)]
elif interactive:
print('Current image channels set as:')
for i in range(len(cln_param['line_ch'])):
print('{0}: {1}'.format(targets[i],cln_param['line_ch'][i]))
resp = str(raw_input('Do you want revise the channels that will be imaged (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the channels you want to image in the following format:\nspwID:min_ch~max_ch')
for i in range(len(targets)):
print('Note: The continuum channels for this target were set to: {}'.format(contsub['linefree_ch'][i]))
cln_param['line_ch'][i] = cf.uinput('Channels to image for {}: '.format(targets[i]), cln_param['line_ch'][i])
logger.info('Setting image channels for {0} as: {1}.'.format(targets[i], cln_param['line_ch'][i]))
logger.info('Updating config file to set channels to be imaged.')
config_raw.set('clean','line_ch',cln_param['line_ch'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line emission channels set as: {}.'.format(cln_param['line_ch']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = fields[i]
gridder = 'wproject'
if calib['mosaic']:
for target_name in targets:
inx = [j for j in range(len(calib['target_names'])) if target_name in calib['target_names'][j]]
fields = numpy.array(calib['targets'],dtype='str')[inx]
field = ','.join(fields)
gridder = 'mosaic'
logger.info('Making dirty image of {} (line only).'.format(target))
command = "tclean(vis='{0}{1}'+'.split.contsub', field='{2}', imagename='{3}{1}'+'.dirty', cell='{4}', imsize=[{5},{5}], specmode='cube', outframe='bary', veltype='radio', restfreq='{6}', gridder='{7}', wprojplanes=-1, pblimit=0.1, normtype='flatnoise', deconvolver='hogbom', weighting='briggs', robust={8}, restoringbeam='common', niter=0, phasecenter='{9}', interactive=False)".format(src_dir,target,field,img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,gridder,cln_param['robust'],cln_param['phasecenter'])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('Completed making dirty image.')
# Read configuration file with parameters
config_file = sys.argv[-1]
config,config_raw = cf.read_config(config_file)
interactive = config['global']['interactive']
# Set up your logger
logger = cf.get_logger(LOG_FILE_INFO = '{}.log'.format(config['global']['project_name']),
LOG_FILE_ERROR = '{}_errors.log'.format(config['global']['project_name'])) # Set up your logger
# Define MS file name
msfile = '{0}.ms'.format(config['global']['project_name'])
#Contsub
cf.check_casaversion(logger)
plot_spec(config,logger)
contsub(msfile,config,config_raw,config_file,logger)
plot_spec(config,logger,contsub=True)
#Remove previous dirty images
targets = config['calibration']['target_names']
for target in targets:
del_list = glob.glob(config['global']['img_dir']+'/'+'{}.dirty.*'.format(target))
if len(del_list) > 0:
logger.info('Deleting existing dirty image(s): {}'.format(del_list))
for file_path in del_list:
shutil.rmtree(file_path)
#Make dirty image
dirty_image(config,config_raw,config_file,logger)
#Review and backup parameters file
cf.diff_pipeline_params(config_file,logger)
cf.backup_pipeline_params(config_file,logger)
| 53.372222
| 536
| 0.610284
|
import imp, glob, shutil, numpy
imp.load_source('common_functions','common_functions.py')
import common_functions as cf
def contsub(msfile,config,config_raw,config_file,logger):
"""
Subtracts the continuum from each of the science target MSs.
If the no line free range is set then the user is queried (in interactive mode) and the configuration file updated.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting continuum subtraction.')
contsub = config['continuum_subtraction']
calib = config['calibration']
src_dir = config['global']['src_dir']+'/'
logger.info('Checking for line free channel ranges in parameters.')
targets = calib['target_names'][:]
fields = calib['targets'][:]
for i in range(len(targets)):
target = targets[i]
if 'spw' in target:
inx = target.index('.spw')
target_name = target[:inx]
if target_name in calib['target_names'][i-1]:
fields.insert(i,fields[i-1])
if calib['mosaic']:
logger.info('The parameters file indicates that this data set is a mosaic.')
logger.info('All fields in the mosaic will have the same continuum channels.')
targets = list(set(targets))
reset_ch = False
if len(contsub['linefree_ch']) == 0 or len(contsub['linefree_ch']) != len(targets):
reset_ch = True
if len(contsub['linefree_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(contsub['linefree_ch']) < len(targets):
contsub['linefree_ch'].append('')
elif len(contsub['linefree_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(contsub['linefree_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
contsub['linefree_ch'] = contsub['linefree_ch'][:len(targets)]
elif interactive:
print('Current line free channels set as:')
for i in range(len(contsub['linefree_ch'])):
print('{0}: {1}'.format(targets[i],contsub['linefree_ch'][i]))
resp = str(raw_input('Do you want revise the line free channels (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_ch = True
if reset_ch:
if not interactive:
logger.critical('The number of line free channel ranges provided does not match the number of targets.')
logger.info('Line free change ranges: {}'.format(contsub['linefree_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
else:
print('For each target enter the line free channels in the following format:\nspwID1:min_ch1~max_ch1;min_ch2~max_ch2,spwID2:min_ch3~max_ch3;min_ch4~max_ch4')
for i in range(len(targets)):
contsub['linefree_ch'][i] = cf.uinput('Line free channels for {}: '.format(targets[i]), contsub['linefree_ch'][i])
logger.info('Setting line free channels for {0} as: {1}.'.format(targets[i], contsub['linefree_ch'][i]))
if type(contsub['fitorder']) == type(1):
order_set = False
while not order_set:
try:
order = int(cf.uinput('Set the fit order for {}: '.format(targets[i]), contsub['fitorder']))
if order >= 0:
order_set = True
except ValueError:
print 'Fit order must be an integer.'
if order != contsub['fitorder'] and len(targets) > 1:
order_list = list(numpy.zeros(len(targets),dtype='int')+contsub['fitorder'])
order_list[i] = order
order = order_list
contsub['fitorder'] = order
else:
order_set = False
while not order_set:
try:
order = int(cf.uinput('Set the fit order for {}: '.format(targets[i]), contsub['fitorder'][i]))
if order >= 0:
order_set = True
contsub['fitorder'] = order
except ValueError:
print 'Fit order must be an integer.'
logger.info('Updating config file to set line free channels and fit orders.')
config_raw.set('continuum_subtraction','linefree_ch',contsub['linefree_ch'])
config_raw.set('continuum_subtraction','fitorder',contsub['fitorder'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line free channels set as: {}.'.format(contsub['linefree_ch']))
logger.info('Fit order(s) set as: {}.'.format(contsub['fitorder']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = fields[i]
if calib['mosaic']:
for target_name in targets:
inx = [j for j in range(len(calib['target_names'])) if target_name in calib['target_names'][j]]
fields = numpy.array(calib['targets'],dtype='str')[inx]
field = ','.join(fields)
chans = contsub['linefree_ch'][i]
spws = chans.split(',')
for j in range(len(spws)):
spw = spws[j].strip()
inx = spw.index(':')
spw = spw[0:inx]
spws[j] = spw
logger.info('Subtracting the continuum from field: {}'.format(target))
if type(contsub['fitorder']) == type(1):
order = int(contsub['fitorder'])
else:
order = int(contsub['fitorder'][i])
command = "uvcontsub(vis='{0}{1}'+'.split', field='{2}', fitspw='{3}', spw='{4}', excludechans=False, combine='spw', solint='int', fitorder={5}, want_cont={6})".format(src_dir,target,field,chans,','.join(spws),order,contsub['save_cont'])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('Completed continuum subtraction.')
def plot_spec(config,logger,contsub=False):
"""
For each SPW and each science target amplitude vs channel and amplitude vs velocity are plotted.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting plotting amplitude spectrum.')
plots_obs_dir = './plots/'
cf.makedir(plots_obs_dir,logger)
calib = config['calibration']
targets = calib['target_names'][:]
if calib['mosaic']:
targets = list(set(calib['target_names']))
src_dir = config['global']['src_dir']+'/'
for target in targets:
if contsub:
MS_list = glob.glob('{0}{1}.split.contsub'.format(src_dir,target))
else:
MS_list = glob.glob('{0}{1}.split'.format(src_dir,target))
for MS in MS_list:
if contsub:
plot_file = plots_obs_dir+'{0}_contsub_amp_chn.png'.format(target)
else:
plot_file = plots_obs_dir+'{0}_amp_chn.png'.format(target)
logger.info('Plotting amplitude vs channel to {}'.format(plot_file))
plotms(vis=MS, xaxis='chan', yaxis='amp',
ydatacolumn='corrected', plotfile=plot_file,
expformat='png', overwrite=True, showgui=False)
if not contsub:
plot_file = plots_obs_dir+'{0}_amp_vel.png'.format(target)
logger.info('Plotting amplitude vs velocity to {}'.format(plot_file))
plotms(vis=MS, xaxis='velocity', yaxis='amp',
ydatacolumn='corrected', plotfile=plot_file,
expformat='png', overwrite=True, showgui=False,
freqframe='BARY', restfreq=str(config['global']['rest_freq']), veldef='OPTICAL')
logger.info('Completed plotting amplitude spectrum.')
def dirty_image(config,config_raw,config_file,logger):
"""
Generates a dirty (continuum subtracted) image of each science target.
Checks that the pixel size, image size, and line emission channels are set (will prompt user if in interactive mode).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting making dirty image.')
calib = config['calibration']
contsub = config['continuum_subtraction']
rest_freq = config['global']['rest_freq']
targets = calib['target_names'][:]
fields = calib['targets'][:]
for i in range(len(targets)):
target = targets[i]
if 'spw' in target:
inx = target.index('.spw')
target_name = target[:inx]
if target_name in calib['target_names'][i-1]:
fields.insert(i,fields[i-1])
if calib['mosaic']:
targets = list(set(calib['target_names']))
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
cf.makedir('./'+img_dir,logger)
logger.info('Removing any existing dirty images.')
for target in targets:
del_list = glob.glob(img_dir+'{}.dirty*'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
logger.info('Checking clean parameters for dirty image.')
reset_cln = False
if len(cln_param['pix_size']) == 0 or len(cln_param['pix_size']) != len(targets):
if not interactive:
logger.critical('The number of pixel sizes provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['pix_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['pix_size']) < len(targets):
logger.warning('There are more target fields than pixel sizes. Appending blanks.')
while len(cln_param['pix_size']) < len(targets):
cln_param['pix_size'].append('')
elif len(cln_param['pix_size']) > len(targets):
logger.warning('There are more pixel sizes than target fields.')
logger.info('Current pixel sizes: {}'.format(cln_param['pix_size']))
logger.warning('The pixel size list will now be truncated to match the number of targets.')
cln_param['pix_size'] = cln_param['pix_size'][:len(targets)]
elif interactive:
print('Current pixel sizes set as:')
for i in range(len(cln_param['pix_size'])):
print('{0}: {1}'.format(targets[i],cln_param['pix_size'][i]))
resp = str(raw_input('Do you want revise the pixel sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired pixel size:')
for i in range(len(targets)):
cln_param['pix_size'][i] = cf.uinput('Pixel size for {}: '.format(targets[i]), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(targets[i], cln_param['pix_size'][i]))
logger.info('Updating config file to set pixel sizes.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Pixel sizes set as: {}.'.format(cln_param['pix_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['im_size']) == 0 or len(cln_param['im_size']) != len(targets):
if not interactive:
logger.critical('The number of image sizes provided does not match the number of targets.')
logger.info('Image sizes: {}'.format(cln_param['im_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['im_size']) < len(targets):
logger.warning('There are more target fields than image sizes. Appending blanks.')
while len(cln_param['im_size']) < len(targets):
cln_param['im_size'].append('')
elif len(cln_param['im_size']) > len(targets):
logger.warning('There are more image sizes than target fields.')
logger.info('Current image sizes: {} pixels.'.format(cln_param['im_size']))
logger.warning('The image size list will now be truncated to match the number of targets.')
cln_param['im_size'] = cln_param['im_size'][:len(targets)]
elif interactive:
print('Current images sizes set as:')
for i in range(len(cln_param['im_size'])):
print('{0}: {1}'.format(targets[i],cln_param['im_size'][i]))
resp = str(raw_input('Do you want revise the image sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired image size:')
for i in range(len(targets)):
print('Note: The pixel size for this target was set to: {}'.format(cln_param['pix_size'][i]))
cln_param['im_size'][i] = cf.uinput('Image size for {}: '.format(targets[i]), cln_param['im_size'][i])
logger.info('Setting image size for {0} as: {1} x {2}.'.format(targets[i], cln_param['im_size'][i],cln_param['pix_size'][i]))
logger.info('Updating config file to set image sizes.')
config_raw.set('clean','im_size',cln_param['im_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Image sizes set as: {} pixels.'.format(cln_param['im_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['line_ch']) == 0 or len(cln_param['line_ch']) != len(targets):
if not interactive:
logger.critical('The number of line channel ranges provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['line_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['line_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(cln_param['line_ch']) < len(targets):
cln_param['line_ch'].append('')
elif len(cln_param['line_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(cln_param['line_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
cln_param['line_ch'] = cln_param['line_ch'][:len(targets)]
elif interactive:
print('Current image channels set as:')
for i in range(len(cln_param['line_ch'])):
print('{0}: {1}'.format(targets[i],cln_param['line_ch'][i]))
resp = str(raw_input('Do you want revise the channels that will be imaged (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the channels you want to image in the following format:\nspwID:min_ch~max_ch')
for i in range(len(targets)):
print('Note: The continuum channels for this target were set to: {}'.format(contsub['linefree_ch'][i]))
cln_param['line_ch'][i] = cf.uinput('Channels to image for {}: '.format(targets[i]), cln_param['line_ch'][i])
logger.info('Setting image channels for {0} as: {1}.'.format(targets[i], cln_param['line_ch'][i]))
logger.info('Updating config file to set channels to be imaged.')
config_raw.set('clean','line_ch',cln_param['line_ch'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line emission channels set as: {}.'.format(cln_param['line_ch']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = fields[i]
gridder = 'wproject'
if calib['mosaic']:
for target_name in targets:
inx = [j for j in range(len(calib['target_names'])) if target_name in calib['target_names'][j]]
fields = numpy.array(calib['targets'],dtype='str')[inx]
field = ','.join(fields)
gridder = 'mosaic'
logger.info('Making dirty image of {} (line only).'.format(target))
command = "tclean(vis='{0}{1}'+'.split.contsub', field='{2}', imagename='{3}{1}'+'.dirty', cell='{4}', imsize=[{5},{5}], specmode='cube', outframe='bary', veltype='radio', restfreq='{6}', gridder='{7}', wprojplanes=-1, pblimit=0.1, normtype='flatnoise', deconvolver='hogbom', weighting='briggs', robust={8}, restoringbeam='common', niter=0, phasecenter='{9}', interactive=False)".format(src_dir,target,field,img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,gridder,cln_param['robust'],cln_param['phasecenter'])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('Completed making dirty image.')
# Read configuration file with parameters
config_file = sys.argv[-1]
config,config_raw = cf.read_config(config_file)
interactive = config['global']['interactive']
# Set up your logger
logger = cf.get_logger(LOG_FILE_INFO = '{}.log'.format(config['global']['project_name']),
LOG_FILE_ERROR = '{}_errors.log'.format(config['global']['project_name'])) # Set up your logger
# Define MS file name
msfile = '{0}.ms'.format(config['global']['project_name'])
#Contsub
cf.check_casaversion(logger)
plot_spec(config,logger)
contsub(msfile,config,config_raw,config_file,logger)
plot_spec(config,logger,contsub=True)
#Remove previous dirty images
targets = config['calibration']['target_names']
for target in targets:
del_list = glob.glob(config['global']['img_dir']+'/'+'{}.dirty.*'.format(target))
if len(del_list) > 0:
logger.info('Deleting existing dirty image(s): {}'.format(del_list))
for file_path in del_list:
shutil.rmtree(file_path)
#Make dirty image
dirty_image(config,config_raw,config_file,logger)
#Review and backup parameters file
cf.diff_pipeline_params(config_file,logger)
cf.backup_pipeline_params(config_file,logger)
| 0
| 0
| 0
|
3e848fde2ee28e85df566a703650c0fe2549e86b
| 6,556
|
py
|
Python
|
LiveData.py
|
jcedmiston/METRA-Rail-Delay-Study
|
c595ca060949da00f7dd841df5297d66606fc8f7
|
[
"MIT"
] | null | null | null |
LiveData.py
|
jcedmiston/METRA-Rail-Delay-Study
|
c595ca060949da00f7dd841df5297d66606fc8f7
|
[
"MIT"
] | null | null | null |
LiveData.py
|
jcedmiston/METRA-Rail-Delay-Study
|
c595ca060949da00f7dd841df5297d66606fc8f7
|
[
"MIT"
] | null | null | null |
from threading import Thread
import time
import datetime
import json
import requests
import csv
# Create keys.py in same directory and include API keys
from keys import METRA_USER, METRA_PASS, WEATHER_KEY
import logging as log # Setup logging
log.basicConfig(filename='data/data_collection.log',
filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',
level=log.INFO)
# Global API URLs
TRIPUPDATES_URL = "https://gtfsapi.metrarail.com/gtfs/tripUpdates"
ALERTS_URL = "https://gtfsapi.metrarail.com/gtfs/alerts"
WEATHER_URL = lambda lat,lon: "https://api.openweathermap.org/data/2.5/weather \
?lat="+str(lat)+"&lon="+str(lon)+"&appid="+WEATHER_KEY
if __name__ == "__main__":
mainDataThread = Thread(name='Main Data Collection', target=CollectData)
alertsDataThread = Thread(name='Alert Data Collection', target=CollectAlerts)
mainDataThread.setDaemon(True)
alertsDataThread.setDaemon(True)
mainDataThread.start()
alertsDataThread.start()
while True:
pass # allow threads to continue as daemons
| 38.792899
| 100
| 0.567877
|
from threading import Thread
import time
import datetime
import json
import requests
import csv
# Create keys.py in same directory and include API keys
from keys import METRA_USER, METRA_PASS, WEATHER_KEY
import logging as log # Setup logging
log.basicConfig(filename='data/data_collection.log',
filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',
level=log.INFO)
# Global API URLs
TRIPUPDATES_URL = "https://gtfsapi.metrarail.com/gtfs/tripUpdates"
ALERTS_URL = "https://gtfsapi.metrarail.com/gtfs/alerts"
WEATHER_URL = lambda lat,lon: "https://api.openweathermap.org/data/2.5/weather \
?lat="+str(lat)+"&lon="+str(lon)+"&appid="+WEATHER_KEY
def RequestData(URL, User=None, Pass=None):
return json.loads(requests.get(URL,
auth=(User, Pass) if User and Pass else None).text)
def CollectAlerts():
try:
with open("data/alert.csv","a", newline='') as alerts: # open/close file
alertsWriter = csv.writer(alerts) # setup writer
while True:
# setup variables
alertsRawData = None
attempts = 0
while attempts < 10: # try to connect
try:
alertsRawData = RequestData(ALERTS_URL, METRA_USER, METRA_PASS)
except requests.exceptions.RequestException as e:
# if requests fail after 10 trys skip, try in 30 min
attempts += 1
log.error('Operation failed: \
%s with %d attempts remaining' \
% (e, 10-attempts))
else: break # data collected break request loop
try:
if not alertsRawData:
raise ValueError("Data collection from the server failed")
# setup variables
affectedRoutes = []
alertsData = []
for alert in range(len(alertsRawData)):
# filter data and write to csv
for routes in alertsRawData[alert]['alert']['informed_entity']:
affectedRoutes += [routes['route_id']]
try:
alertsData = [time.time(),
datetime.datetime.now(),
alertsRawData[alert]['id'],
affectedRoutes,
alertsRawData[alert]['alert']['active_period'][0]['start']['low'],
alertsRawData[alert]['alert']['active_period'][0]['end']['low'],
alertsRawData[alert]['alert']['cause'],
alertsRawData[alert]['alert']['effect'],
alertsRawData[alert]['alert']['description_text']['translation'][0]['text']]
except KeyError as e:
log.error('Operation failed: %s , \
Alert data missing key' % e)
tripData = None
else:
alertsWriter.writerow(alertsData)
logData_lst = [str(data) for data in alertsData]
logData = "".join(logData_lst)
log.info("Alert Data: " + logData)
except ValueError as e:
log.error('Operation failed: %s' % e)
finally:
time.sleep(1800) # check for new alerts after 30 mins
except IOError as e:
log.error('Operation failed: %s' % e)
raise IOError(e)
def CollectData():
try:
with open("data/trip.csv","a", newline='') as trip: # open/close file
tripWriter = csv.writer(trip) # setup writer
while True:
# setup variables
tripData = []
weatherData = []
try:
tripRawData = RequestData(TRIPUPDATES_URL,
METRA_USER,
METRA_PASS) # request data
# keep collected time the same for each route
collectedTimeFormatted = datetime.datetime.now()
collectedTime = time.time()
for trip in range(len(tripRawData)): # filter/write data to csv
try:
tripData = [collectedTime,
collectedTimeFormatted,
tripRawData[trip]['id'],
tripRawData[trip]['trip_update']['trip']['route_id'],
tripRawData[trip]['trip_update']['position']['vehicle']['vehicle']['id'],
tripRawData[trip]['trip_update']['position']['vehicle']['vehicle']['label'],
tripRawData[trip]['trip_update']['stop_time_update'][0]['arrival']['delay'],
tripRawData[trip]['trip_update']['position']['vehicle']['position']['latitude'],
tripRawData[trip]['trip_update']['position']['vehicle']['position']['longitude']]
except KeyError as e:
log.error('Operation failed: %s , \
Trip data missing key' % e)
tripData = None
else:
logData_lst = [str(data) for data in tripData]
logData = "".join(logData_lst)
log.info("Trip Data: " + logData)
try:
weatherRawData = RequestData(WEATHER_URL(tripRawData[trip]['trip_update']['position'] \
['vehicle']['position'] \
['latitude'],
tripRawData[trip]['trip_update']['position'] \
['vehicle']['position'] \
['longitude']))
weatherData = [weatherRawData['weather'],
weatherRawData['main']['temp'],
weatherRawData['main']['temp_min'],
weatherRawData['main']['temp_max'],
weatherRawData['visibility'],
weatherRawData['wind']['speed']]
except KeyError as e:
log.error('Operation failed: %s , \
Weather data missing key' % e)
weatherData = []
else:
logData_lst = [str(data) for data in weatherData]
logData = "".join(logData_lst)
log.info("Weather Data: " + logData)
if tripData is not None:
fullData = tripData + weatherData
tripWriter.writerow(fullData)
except requests.exceptions.RequestException as e:
log.error('Operation failed: %s' % e)
finally:
time.sleep(30)
except IOError as e:
log.error('Operation failed: %s' % e)
raise IOError(e)
if __name__ == "__main__":
mainDataThread = Thread(name='Main Data Collection', target=CollectData)
alertsDataThread = Thread(name='Alert Data Collection', target=CollectAlerts)
mainDataThread.setDaemon(True)
alertsDataThread.setDaemon(True)
mainDataThread.start()
alertsDataThread.start()
while True:
pass # allow threads to continue as daemons
| 5,409
| 0
| 69
|
eb4155bb1e3d09a092a5353cc7fb49e067753001
| 22,109
|
py
|
Python
|
src/clustar/core.py
|
clustar/Clustar
|
83e155feffc10c4bf172f8ec769fb3c5ffe1d579
|
[
"MIT"
] | 4
|
2021-02-24T17:27:25.000Z
|
2021-06-28T04:45:32.000Z
|
src/clustar/core.py
|
clustar/Clustar
|
83e155feffc10c4bf172f8ec769fb3c5ffe1d579
|
[
"MIT"
] | 3
|
2021-04-05T14:53:26.000Z
|
2021-06-27T20:17:14.000Z
|
src/clustar/core.py
|
clustar/Clustar
|
83e155feffc10c4bf172f8ec769fb3c5ffe1d579
|
[
"MIT"
] | 1
|
2021-02-15T16:13:05.000Z
|
2021-02-15T16:13:05.000Z
|
"""
Contains the 'ClustarData' class, which is responsible for executing the
entire project pipeline for detecting groups in a single FITS image; this
class also collects and stores all relevant data, statistics, and variables in
this pipeline.
Visit <https://clustar.github.io/> for additional information.
"""
from clustar import denoise, group, graph, fit
import astropy.io.fits
import numpy as np
class ClustarData(object):
"""
A class for executing the entire pipline for detecting groups in a FITS
image and for storing all relevant data associated with each group.
Attributes
----------
path : str
Path to FITS file.
image : Image
Internal class for storing FITS image variables.
params : Params
Internal class for specifying the ClustarData parameters.
groups : list
List of 'Group' objects extracted from the given FITS image.
flag : bool
True if any detected group in the FITS image is flagged for manual
review, otherwise false.
Methods
-------
update(**kwargs)
Updates 'Params' object with the specified arguments and executes the
entire pipeline.
reset(**kwargs)
Resets 'Params' object to the default values, then updates 'Params'
object with the specified arguments and executes the entire pipeline.
identify(vmin=None, vmax=None, show=True, dpi=180)
Displays the FITS image and identifies the groups in green, orange,
or red rectangles, which are defined as:
1. 'Green' denotes that the group is not flagged for manual review
2. 'Orange' denotes that the group is not flagged for manual review,
but the group is smaller than the beam size.
3. 'Red' denotes that the group is flagged for manual review.
Beam size is the white oval shown on the bottom right corner of
the FITS image.
Examples
--------
Create the 'ClustarData' object by specifying the path to FITS file.
>>> cd = ClustarData(path='~/data/example.fits', threshold=0.025)
Visualize the detected groups.
>>> cd.identify()
Access individual 'Group' objects.
>>> cd.groups
Notes
-----
Visit <https://clustar.github.io/> for additional information.
"""
class Image(object):
"""
An internal class for storing FITS image variables.
Attributes
----------
clean : ndarray
Data from the FITS image after denoising process.
x : ndarray
Index values of the 'x' position from the data.
y : ndarray
Index values of the 'y' position from the data.
pos : ndarray
Index values of the data, given as (x, y).
nonzero : ndarray
Index values of nonzero points in the data.
std : ndarray
Standard deviation values from each block in the grid composed
in the denoise process; used to calculate the noise statistic.
rms : ndarray
Root mean squared values from each block in the grid composed
in the denoise process; used to calculate the noise statistic.
noise : float
Noise statistic generated for the denoise process; values less
than "noise" times "sigma" are set to zero.
major : float
Length of the major axis for the beam.
minor : float
Length of the minor axis for the beam.
degrees : float
Degrees of rotation for the beam.
area : float
Number of points inside the beam; used to identify groups smaller
than the beam size.
"""
def __init__(self, data, header):
"""
Parameters
----------
data : ndarray
Raw data from the FITS image; must be 2-D.
header : dict
Header dictionary stored in FITS file.
Raises
------
KeyError
If the following keys are missing from the FITS header:
'BMAJ', 'BMIN', 'BPA', 'CDELT1', 'CDELT2', and 'OBJECT'.
"""
self.data = data
self.header = header
self._setup()
class Group(object):
"""
An internal class for storing variables associated to a detection.
Attributes
----------
image : _Image
Internal subclass for storing image variables.
res : _Res
Internal subclass for storing residual variables.
fit : _Fit
Internal subclass for storing fit variables.
stats : _Stats
Internal subclass for storing statistics.
metrics : _Metrics
Internal subclass for storing the evaluated metrics.
flag : bool
Determines whether this group is marked for manual review.
"""
class _Image(object):
"""
An internal subclass for storing image variables associated to a
detection.
Attributes
----------
data : ndarray
Subset of raw data from the FITS image identifying the group.
clean : ndarray
Data of the group after the denoising process.
x : ndarray
Index values of the 'x' position from the group data.
y : ndarray
Index values of the 'y' position from the group data.
pos : ndarray
Index values of the group data, given as (x, y).
nonzero : ndarray
Index values of nonzero points in the group data.
ref : list
List containing the minimum row value and minimum column value
of the group data.
limit : list
List containing the maximum row value and maximum column value
of the overall FITS image.
"""
def __init__(self, bounds):
"""
Parameters
----------
bounds : list
List of four integers corresponding to minimum row value,
maximum row value, minimum column value, and maximum column
value in this order.
"""
self.bounds = bounds
self.data = None
self.clean = None
self.x = None
self.y = None
self.ref = None
self.limit = None
self.pos = None
self.nonzero = None
class _Residuals(object):
"""
An internal subclass for storing residual variables associated to
a detection.
Attributes
----------
data : ndarray
Residuals computed in the fitting process. Precisely, they
are [1 - ("bivariate Gaussian model" / "group data")].
clean : ndarray
Residuals computed in the fitting process, where points
outside of the ellipse are set to zero.
pos : ndarray
Index values of the residual data, given as (x, y).
inside : ndarray
Subset of index values that lie inside of the ellipse.
outside : ndarray
Subset of index values that lie outside of the ellipse.
output : array_like
List of residuals that lie inside of the ellipse; the result
of the evaluation metric that is computed on this list is
compared to the specified threshold; this determines which
groups are flagged for manual review.
"""
class _Fit(object):
"""
An internal subclass for storing fit variables associated to a
detection.
Attributes
----------
rv : multivariate_normal_frozen
Frozen multivariable normal distribution generated from the
group statistics.
bvg : ndarray
Results of the multivariate normal probability density
function evaluated at the points specified by the group data.
ellipse : Polygon
Polygon object containing the points that generate an ellipse
corresponding to the multivariate normal distribution.
major_peaks : int
Number of local maximas along the major axis of the ellipse.
minor_peaks : int
Number of local maximas along the minor axis of the ellipse.
"""
class _Stats(object):
"""
An internal subclass for storing statistics associated to a
detection.
Attributes
----------
x_bar : float
Average of index values in the 'x' position weighted by the
corresponding group data.
y_bar : float
Average of index values in the 'y' position weighted by the
corresponding group data.
x_var : float
Variance of index values in the 'x' position weighted by the
corresponding group data.
y_var : float
Variance of index values in the 'y' position weighted by the
corresponding group data.
covariance : float
Covariance of the index values weighted by the corresponding
group data.
covariance_matrix : array_like
Covariance matrix for the multivariate normal that is used in
the fitting process.
rho : float
Correlation coefficient computed from the covariance matrix.
eigen_values : array_like
Eigenvalues obtained from the eigendecomposition of the
covariance matrix.
eigen_vectors : array_like
Eigenvectors obtained from the eigendecomposition of the
covariance matrix.
x_len : float
Length of the major axis of the ellipse in pixels.
y_len : float
Length of the minor axis of the ellipse in pixels.
radians : float
Rotation of ellipse denoted in radians.
degrees : float
Rotation of ellipse denoted in degrees.
"""
class _Metrics(object):
"""
An internal subclass for storing the evaluated metrics associated
to a detection.
Attributes
----------
standard_deviation : float
Standard deviation of the output residuals for the group.
variance : float
Variance of the output residuals for the group.
average : float
Mean of the output residuals for the group.
weighted_average : float
Mean of the output residuals weighted by the group data.
"""
def __init__(self, bounds):
"""
Parameters
----------
bounds : list
List of four integers corresponding to minimum row value,
maximum row value, minimum column value, and maximum column
value in this order.
"""
self.image = self._Image(bounds)
self.res = self._Residuals()
self.fit = self._Fit()
self.stats = self._Stats()
self.metrics = self._Metrics()
self.flag = False
class Params(object):
"""
An internal class for specifying the ClustarData parameters.
Attributes
----------
radius_factor : float
Factor mulitplied to radius to determine cropping circle in the
denoising process; must be within the range [0, 1].
chunks : int
Number of chunks to use in a grid; must be an odd number.
quantile : float
Quantile of RMS to determine the noise level; must be within the
range [0, 1].
apply_gradient : bool
Determine if the FITS image should be multiplied by a gradient
in order to elevate central points; similar to multiplying the
FITS image by the associated 'pb' data.
sigma : float
Factor multiplied to noise level to determine the cutoff point,
where values less than this threshold are set to zero.
alpha : float
Determines the size of the ellipse in relation to the chi-squared
distribution; must be within the range (0, 1).
buffer_size : int
Number of points considered outside of the group range. For in-
stance, given a 1-d group range of [10, 20], the algorithm checks
for nonzero points within the range [5, 25] when the 'buffer_size'
is 5.
group_size : int
Minimum number of nonzero points that determines a group.
group_factor : float
Ratio between [0, 1] that specifies the minimum number of
nonzero points that determines a group in relation to the number
of nonzero points in the largest group.
metric : str
Method used for evaluating the groups; must be one of the
following: "standard_deviation", "variance", "average", or
"weighted_average".
threshold : float
Cutoff point that determines which groups are flagged for manual
review, given the specified metric.
split_binary : bool
Experimental; determine whether binary subgroups identified
within a group should be split into individual groups.
subgroup_factor : float
Experimental; ratio between [0, 1] that specifies the subgroup
range in terms of the absolute maximum intensity.
evaluate_peaks : bool
Experimental; determine whether the peaks of the output residuals
should be taken into consideration in the flagging process.
smoothing : int
Experimental; size of window used in the moving average smoothing
process for peak evaluation.
clip : float
Experimental; determines the percentage of tail values that are
trimmed for peak evaluation.
"""
def __init__(self, args):
"""
Parameters
----------
args : dict
Dictionary of keyword arguments; see 'Attributes' for keys.
Raises
------
KeyError
If specified key in 'args' does not match the label of the
specified attributes.
"""
self.radius_factor = 1
self.chunks = 3
self.quantile = 0.5
self.apply_gradient = True
self.sigma = 5
self.alpha = 0.2
self.buffer_size = 10
self.group_size = 50
self.group_factor = 0
self.split_binary = False
self.subgroup_factor = 0.5
self.metric = "variance"
self.threshold = 0.01
self.evaluate_peaks = False
self.smoothing = 5
self.clip = 0.75
self._extract(args)
def __init__(self, path, **kwargs):
"""
Parameters
----------
path : str
Path to FITS file.
**kwargs : optional
See '~clustar.core.ClustarData.params' for other possible
arguments.
"""
self.path = path
self.params = self.Params(kwargs)
self.groups = []
self.flag = False
self._load_file()
self._setup()
def update(self, **kwargs):
"""
Updates 'Params' object with the specified arguments and executes the
entire pipeline.
Parameters
----------
**kwargs : optional
See '~clustar.core.ClustarData.params' for other possible
arguments.
"""
self.params.extract(kwargs)
self._setup()
def reset(self, **kwargs):
"""
Resets 'Params' object to the default values, then updates 'Params'
object with the specified arguments and executes the entire pipeline.
Parameters
----------
**kwargs : optional
See '~clustar.core.ClustarData.params' for other possible
arguments.
"""
self.params = self.Params(kwargs)
self._setup()
def identify(self, vmin=None, vmax=None, show=True, dpi=180):
"""
Displays the FITS image and identifies the groups in green, orange, or
red rectangles, which are defined as:
1. Green denotes that the group is not flagged for manual review
2. Orange denotes that the group is not flagged for manual review, but
the group is smaller than the beam size.
3. Red denotes that the group is flagged for manual review.
Beam size is the white oval shown on the bottom right corner of
the FITS image.
Parameters
----------
vmin : float, optional
Lower bound for the shown intensities.
vmax : float, optional
Upper bound for the shown intensities.
show : bool, optional
Determines whether the groups should be identified. If false, the
rectangles identifying the groups are not drawn.
dpi : int, optional
Dots per inch.
"""
graph.identify_groups(self, vmin, vmax, show, dpi)
| 34.708006
| 79
| 0.529829
|
"""
Contains the 'ClustarData' class, which is responsible for executing the
entire project pipeline for detecting groups in a single FITS image; this
class also collects and stores all relevant data, statistics, and variables in
this pipeline.
Visit <https://clustar.github.io/> for additional information.
"""
from clustar import denoise, group, graph, fit
import astropy.io.fits
import numpy as np
class ClustarData(object):
"""
A class for executing the entire pipline for detecting groups in a FITS
image and for storing all relevant data associated with each group.
Attributes
----------
path : str
Path to FITS file.
image : Image
Internal class for storing FITS image variables.
params : Params
Internal class for specifying the ClustarData parameters.
groups : list
List of 'Group' objects extracted from the given FITS image.
flag : bool
True if any detected group in the FITS image is flagged for manual
review, otherwise false.
Methods
-------
update(**kwargs)
Updates 'Params' object with the specified arguments and executes the
entire pipeline.
reset(**kwargs)
Resets 'Params' object to the default values, then updates 'Params'
object with the specified arguments and executes the entire pipeline.
identify(vmin=None, vmax=None, show=True, dpi=180)
Displays the FITS image and identifies the groups in green, orange,
or red rectangles, which are defined as:
1. 'Green' denotes that the group is not flagged for manual review
2. 'Orange' denotes that the group is not flagged for manual review,
but the group is smaller than the beam size.
3. 'Red' denotes that the group is flagged for manual review.
Beam size is the white oval shown on the bottom right corner of
the FITS image.
Examples
--------
Create the 'ClustarData' object by specifying the path to FITS file.
>>> cd = ClustarData(path='~/data/example.fits', threshold=0.025)
Visualize the detected groups.
>>> cd.identify()
Access individual 'Group' objects.
>>> cd.groups
Notes
-----
Visit <https://clustar.github.io/> for additional information.
"""
class Image(object):
"""
An internal class for storing FITS image variables.
Attributes
----------
clean : ndarray
Data from the FITS image after denoising process.
x : ndarray
Index values of the 'x' position from the data.
y : ndarray
Index values of the 'y' position from the data.
pos : ndarray
Index values of the data, given as (x, y).
nonzero : ndarray
Index values of nonzero points in the data.
std : ndarray
Standard deviation values from each block in the grid composed
in the denoise process; used to calculate the noise statistic.
rms : ndarray
Root mean squared values from each block in the grid composed
in the denoise process; used to calculate the noise statistic.
noise : float
Noise statistic generated for the denoise process; values less
than "noise" times "sigma" are set to zero.
major : float
Length of the major axis for the beam.
minor : float
Length of the minor axis for the beam.
degrees : float
Degrees of rotation for the beam.
area : float
Number of points inside the beam; used to identify groups smaller
than the beam size.
"""
def __init__(self, data, header):
"""
Parameters
----------
data : ndarray
Raw data from the FITS image; must be 2-D.
header : dict
Header dictionary stored in FITS file.
Raises
------
KeyError
If the following keys are missing from the FITS header:
'BMAJ', 'BMIN', 'BPA', 'CDELT1', 'CDELT2', and 'OBJECT'.
"""
self.data = data
self.header = header
self._setup()
def _setup(self):
x = range(self.data.shape[1])
y = range(self.data.shape[0])
self.x, self.y = np.meshgrid(x, y)
self.pos = np.dstack((self.x, self.y))
header = dict(self.header)
keys = ['BMAJ', 'BMIN', 'BPA', 'CDELT1', 'CDELT2', 'OBJECT']
for key in keys:
if key not in header.keys():
raise KeyError("FITS header is missing the " +
f"keyword '{key}'; double check " +
"the file type specification.")
# Specify beam parameters.
self.major = header['BMAJ']/abs(header['CDELT1'])
self.minor = header['BMIN']/abs(header['CDELT2'])
self.degrees = header['BPA']
self.area = np.pi * self.major/2 * self.minor/2
class Group(object):
"""
An internal class for storing variables associated to a detection.
Attributes
----------
image : _Image
Internal subclass for storing image variables.
res : _Res
Internal subclass for storing residual variables.
fit : _Fit
Internal subclass for storing fit variables.
stats : _Stats
Internal subclass for storing statistics.
metrics : _Metrics
Internal subclass for storing the evaluated metrics.
flag : bool
Determines whether this group is marked for manual review.
"""
class _Image(object):
"""
An internal subclass for storing image variables associated to a
detection.
Attributes
----------
data : ndarray
Subset of raw data from the FITS image identifying the group.
clean : ndarray
Data of the group after the denoising process.
x : ndarray
Index values of the 'x' position from the group data.
y : ndarray
Index values of the 'y' position from the group data.
pos : ndarray
Index values of the group data, given as (x, y).
nonzero : ndarray
Index values of nonzero points in the group data.
ref : list
List containing the minimum row value and minimum column value
of the group data.
limit : list
List containing the maximum row value and maximum column value
of the overall FITS image.
"""
def __init__(self, bounds):
"""
Parameters
----------
bounds : list
List of four integers corresponding to minimum row value,
maximum row value, minimum column value, and maximum column
value in this order.
"""
self.bounds = bounds
self.data = None
self.clean = None
self.x = None
self.y = None
self.ref = None
self.limit = None
self.pos = None
self.nonzero = None
class _Residuals(object):
"""
An internal subclass for storing residual variables associated to
a detection.
Attributes
----------
data : ndarray
Residuals computed in the fitting process. Precisely, they
are [1 - ("bivariate Gaussian model" / "group data")].
clean : ndarray
Residuals computed in the fitting process, where points
outside of the ellipse are set to zero.
pos : ndarray
Index values of the residual data, given as (x, y).
inside : ndarray
Subset of index values that lie inside of the ellipse.
outside : ndarray
Subset of index values that lie outside of the ellipse.
output : array_like
List of residuals that lie inside of the ellipse; the result
of the evaluation metric that is computed on this list is
compared to the specified threshold; this determines which
groups are flagged for manual review.
"""
def __init__(self):
self.data = None
self.clean = None
self.pos = None
self.inside = None
self.outside = None
self.output = None
class _Fit(object):
"""
An internal subclass for storing fit variables associated to a
detection.
Attributes
----------
rv : multivariate_normal_frozen
Frozen multivariable normal distribution generated from the
group statistics.
bvg : ndarray
Results of the multivariate normal probability density
function evaluated at the points specified by the group data.
ellipse : Polygon
Polygon object containing the points that generate an ellipse
corresponding to the multivariate normal distribution.
major_peaks : int
Number of local maximas along the major axis of the ellipse.
minor_peaks : int
Number of local maximas along the minor axis of the ellipse.
"""
def __init__(self):
self.rv = None
self.bvg = None
self.ellipse = None
self.major_peaks = None
self.minor_peaks = None
class _Stats(object):
"""
An internal subclass for storing statistics associated to a
detection.
Attributes
----------
x_bar : float
Average of index values in the 'x' position weighted by the
corresponding group data.
y_bar : float
Average of index values in the 'y' position weighted by the
corresponding group data.
x_var : float
Variance of index values in the 'x' position weighted by the
corresponding group data.
y_var : float
Variance of index values in the 'y' position weighted by the
corresponding group data.
covariance : float
Covariance of the index values weighted by the corresponding
group data.
covariance_matrix : array_like
Covariance matrix for the multivariate normal that is used in
the fitting process.
rho : float
Correlation coefficient computed from the covariance matrix.
eigen_values : array_like
Eigenvalues obtained from the eigendecomposition of the
covariance matrix.
eigen_vectors : array_like
Eigenvectors obtained from the eigendecomposition of the
covariance matrix.
x_len : float
Length of the major axis of the ellipse in pixels.
y_len : float
Length of the minor axis of the ellipse in pixels.
radians : float
Rotation of ellipse denoted in radians.
degrees : float
Rotation of ellipse denoted in degrees.
"""
def __init__(self):
self.x_bar = None
self.y_bar = None
self.x_var = None
self.y_var = None
self.covariance = None
self.covariance_matrix = None
self.rho = None
self.eigen_values = None
self.eigen_vectors = None
self.x_len = None
self.y_len = None
self.radians = None
self.degrees = None
class _Metrics(object):
"""
An internal subclass for storing the evaluated metrics associated
to a detection.
Attributes
----------
standard_deviation : float
Standard deviation of the output residuals for the group.
variance : float
Variance of the output residuals for the group.
average : float
Mean of the output residuals for the group.
weighted_average : float
Mean of the output residuals weighted by the group data.
"""
def __init__(self):
self.standard_deviation = None
self.variance = None
self.average = None
self.weighted_average = None
def __init__(self, bounds):
"""
Parameters
----------
bounds : list
List of four integers corresponding to minimum row value,
maximum row value, minimum column value, and maximum column
value in this order.
"""
self.image = self._Image(bounds)
self.res = self._Residuals()
self.fit = self._Fit()
self.stats = self._Stats()
self.metrics = self._Metrics()
self.flag = False
class Params(object):
"""
An internal class for specifying the ClustarData parameters.
Attributes
----------
radius_factor : float
Factor mulitplied to radius to determine cropping circle in the
denoising process; must be within the range [0, 1].
chunks : int
Number of chunks to use in a grid; must be an odd number.
quantile : float
Quantile of RMS to determine the noise level; must be within the
range [0, 1].
apply_gradient : bool
Determine if the FITS image should be multiplied by a gradient
in order to elevate central points; similar to multiplying the
FITS image by the associated 'pb' data.
sigma : float
Factor multiplied to noise level to determine the cutoff point,
where values less than this threshold are set to zero.
alpha : float
Determines the size of the ellipse in relation to the chi-squared
distribution; must be within the range (0, 1).
buffer_size : int
Number of points considered outside of the group range. For in-
stance, given a 1-d group range of [10, 20], the algorithm checks
for nonzero points within the range [5, 25] when the 'buffer_size'
is 5.
group_size : int
Minimum number of nonzero points that determines a group.
group_factor : float
Ratio between [0, 1] that specifies the minimum number of
nonzero points that determines a group in relation to the number
of nonzero points in the largest group.
metric : str
Method used for evaluating the groups; must be one of the
following: "standard_deviation", "variance", "average", or
"weighted_average".
threshold : float
Cutoff point that determines which groups are flagged for manual
review, given the specified metric.
split_binary : bool
Experimental; determine whether binary subgroups identified
within a group should be split into individual groups.
subgroup_factor : float
Experimental; ratio between [0, 1] that specifies the subgroup
range in terms of the absolute maximum intensity.
evaluate_peaks : bool
Experimental; determine whether the peaks of the output residuals
should be taken into consideration in the flagging process.
smoothing : int
Experimental; size of window used in the moving average smoothing
process for peak evaluation.
clip : float
Experimental; determines the percentage of tail values that are
trimmed for peak evaluation.
"""
def __init__(self, args):
"""
Parameters
----------
args : dict
Dictionary of keyword arguments; see 'Attributes' for keys.
Raises
------
KeyError
If specified key in 'args' does not match the label of the
specified attributes.
"""
self.radius_factor = 1
self.chunks = 3
self.quantile = 0.5
self.apply_gradient = True
self.sigma = 5
self.alpha = 0.2
self.buffer_size = 10
self.group_size = 50
self.group_factor = 0
self.split_binary = False
self.subgroup_factor = 0.5
self.metric = "variance"
self.threshold = 0.01
self.evaluate_peaks = False
self.smoothing = 5
self.clip = 0.75
self._extract(args)
def _extract(self, args):
for key in args:
if key not in vars(self).keys():
raise KeyError(f"Invalid keyword '{key}' has been " +
"passed into the ClustarData object.")
setattr(self, key, args[key])
def __init__(self, path, **kwargs):
"""
Parameters
----------
path : str
Path to FITS file.
**kwargs : optional
See '~clustar.core.ClustarData.params' for other possible
arguments.
"""
self.path = path
self.params = self.Params(kwargs)
self.groups = []
self.flag = False
self._load_file()
self._setup()
def _load_file(self):
file = astropy.io.fits.open(self.path)
data = file[0].data[0, 0, :, :]
header = file[0].header
self.image = self.Image(data, header)
def _setup(self):
self = denoise.resolve(self)
self = group.arrange(self)
self._build()
if self.params.split_binary:
self = group.detect(self)
self._build()
self._evaluate()
def _build(self):
self = group.rectify(self)
self = group.merge(self)
self = group.refine(self)
self = group.extract(self)
self = group.screen(self)
self = group.calculate(self)
def _evaluate(self):
self = fit.compute_fit(self)
self = fit.compute_ellipse(self)
self = fit.compute_metrics(self)
self = fit.compute_peaks(self)
self = fit.validate(self)
def update(self, **kwargs):
"""
Updates 'Params' object with the specified arguments and executes the
entire pipeline.
Parameters
----------
**kwargs : optional
See '~clustar.core.ClustarData.params' for other possible
arguments.
"""
self.params.extract(kwargs)
self._setup()
def reset(self, **kwargs):
"""
Resets 'Params' object to the default values, then updates 'Params'
object with the specified arguments and executes the entire pipeline.
Parameters
----------
**kwargs : optional
See '~clustar.core.ClustarData.params' for other possible
arguments.
"""
self.params = self.Params(kwargs)
self._setup()
def identify(self, vmin=None, vmax=None, show=True, dpi=180):
"""
Displays the FITS image and identifies the groups in green, orange, or
red rectangles, which are defined as:
1. Green denotes that the group is not flagged for manual review
2. Orange denotes that the group is not flagged for manual review, but
the group is smaller than the beam size.
3. Red denotes that the group is flagged for manual review.
Beam size is the white oval shown on the bottom right corner of
the FITS image.
Parameters
----------
vmin : float, optional
Lower bound for the shown intensities.
vmax : float, optional
Upper bound for the shown intensities.
show : bool, optional
Determines whether the groups should be identified. If false, the
rectangles identifying the groups are not drawn.
dpi : int, optional
Dots per inch.
"""
graph.identify_groups(self, vmin, vmax, show, dpi)
| 2,902
| 0
| 322
|
028b9e4cadf11f8e513a8d672a8eded618c72dec
| 1,017
|
py
|
Python
|
q2/agents/supervised/supervised_agent.py
|
tdb-alcorn/q2
|
ca03e419b1c62660ca65981ff790b70fe979c51f
|
[
"MIT"
] | 3
|
2018-07-03T06:14:58.000Z
|
2018-07-10T22:56:21.000Z
|
q2/agents/supervised/supervised_agent.py
|
tdb-alcorn/q2
|
ca03e419b1c62660ca65981ff790b70fe979c51f
|
[
"MIT"
] | 10
|
2018-07-02T09:02:44.000Z
|
2022-02-09T23:45:31.000Z
|
q2/agents/supervised/supervised_agent.py
|
tdb-alcorn/q2
|
ca03e419b1c62660ca65981ff790b70fe979c51f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from ..agent import Agent
from abc import ABC, abstractmethod
| 20.755102
| 35
| 0.554572
|
import tensorflow as tf
import numpy as np
from ..agent import Agent
from abc import ABC, abstractmethod
class Supervised(Agent, ABC):
# Not implemented
def act(self,
sess:tf.Session,
state:np.array,
train:bool,
) -> np.array:
raise NotImplementedError()
# Not implemented
def step(self,
sess:tf.Session,
state:np.array,
action:np.array,
reward:float,
next_state:np.array,
done:bool
):
raise NotImplementedError()
@abstractmethod
def learn(self,
sess:tf.Session,
states:np.array,
actions:np.array,
rewards:np.array,
next_states:np.array,
episode_ends:np.array
) -> float:
pass
@abstractmethod
def load(self,
sess:tf.Session,
# saver:tf.train.Saver,
):
pass
@abstractmethod
def save(self,
sess:tf.Session,
# saver:tf.train.Saver,
):
pass
| 640
| 250
| 23
|
a17a4b709c3e8c5ef2969f1908748b99c2a32a43
| 1,171
|
py
|
Python
|
scripts/download/download_from_sra_by_ids_by_fastq_dump.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 10
|
2015-04-28T14:15:04.000Z
|
2021-03-15T00:07:38.000Z
|
scripts/download/download_from_sra_by_ids_by_fastq_dump.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | null | null | null |
scripts/download/download_from_sra_by_ids_by_fastq_dump.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 6
|
2017-03-16T22:38:41.000Z
|
2021-08-11T00:22:52.000Z
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Collections.General import IdList
from RouToolPa.Tools.NCBIToolkit import FastqDump
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ids", action="store", dest="ids",
type=lambda s: s.split(","),
help="Comma-separated list of SRA ids to download")
parser.add_argument("-f", "--id_file", action="store", dest="id_file",
help="File with SRA ids(one per line) to download")
parser.add_argument("-t", "--threads", action="store", dest="threads", type=int, default=1,
help="Number of simultaneous downloads")
parser.add_argument("-o", "--out_dir", action="store", dest="out_dir", default="./",
help="Output directory. Default: current directory")
args = parser.parse_args()
if (not args.ids) and (not args.id_file):
raise ValueError("Both ids and id file were not set")
id_list = IdList(filename=args.id_file) if args.id_file else args.ids
FastqDump.threads = args.threads
FastqDump.parallel_download(id_list, args.out_dir, split_pe=True, retain_original_ids=True)
| 40.37931
| 91
| 0.678907
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Collections.General import IdList
from RouToolPa.Tools.NCBIToolkit import FastqDump
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ids", action="store", dest="ids",
type=lambda s: s.split(","),
help="Comma-separated list of SRA ids to download")
parser.add_argument("-f", "--id_file", action="store", dest="id_file",
help="File with SRA ids(one per line) to download")
parser.add_argument("-t", "--threads", action="store", dest="threads", type=int, default=1,
help="Number of simultaneous downloads")
parser.add_argument("-o", "--out_dir", action="store", dest="out_dir", default="./",
help="Output directory. Default: current directory")
args = parser.parse_args()
if (not args.ids) and (not args.id_file):
raise ValueError("Both ids and id file were not set")
id_list = IdList(filename=args.id_file) if args.id_file else args.ids
FastqDump.threads = args.threads
FastqDump.parallel_download(id_list, args.out_dir, split_pe=True, retain_original_ids=True)
| 0
| 0
| 0
|
b85cf814833c5a45d46362a5401bbc9b0d0199ae
| 8,597
|
py
|
Python
|
problems/intrusion_bulk.py
|
gautelinga/-BERNAISE
|
aa45ae5ccc323d9c61c93542cc327889cae4d0b2
|
[
"MIT"
] | 15
|
2017-07-19T18:33:26.000Z
|
2021-03-25T18:36:47.000Z
|
problems/intrusion_bulk.py
|
gautelinga/-BERNAISE
|
aa45ae5ccc323d9c61c93542cc327889cae4d0b2
|
[
"MIT"
] | 9
|
2017-05-30T16:13:08.000Z
|
2017-08-25T09:09:05.000Z
|
problems/intrusion_bulk.py
|
gautelinga/-BERNAISE
|
aa45ae5ccc323d9c61c93542cc327889cae4d0b2
|
[
"MIT"
] | 7
|
2018-05-08T22:50:15.000Z
|
2020-06-25T13:50:37.000Z
|
import dolfin as df
import os
from . import *
from common.io import mpi_is_root
from common.bcs import Fixed, Pressure
# from ufl import max_value
__author__ = "Asger Bolet; Gaute Linga"
# Left boundary is target domain
def initialize(Lx, Ly, rad_init,
interface_thickness, solutes, restart_folder,
field_to_subspace,
inlet_velocity, front_position_init, concentration_left,
enable_NS, enable_PF, enable_EC, initial_interface, **namespace):
""" Create the initial state.
The initial states are specified in a dict indexed by field. The format
should be
w_init_field[field] = 'df.Function(...)'.
The work dicts w_ and w_1 are automatically initialized from these
functions elsewhere in the code.
Note: You only need to specify the initial states that are nonzero.
"""
w_init_field = dict()
if not restart_folder:
if enable_NS:
try:
subspace = field_to_subspace["u"].collapse()
except:
subspace = field_to_subspace["u"]
w_init_field["u"] = initial_velocity(inlet_velocity,
subspace)
# Phase field
if enable_PF:
w_init_field["phi"] = initial_phasefield(
front_position_init, Lx/2, rad_init, interface_thickness,
field_to_subspace["phi"].collapse(), shape=initial_interface)
if enable_EC:
for solute in solutes:
w_init_field[solute[0]] = initial_phasefield(
front_position_init, Lx/2, rad_init, interface_thickness,
field_to_subspace[solute[0]].collapse(),
shape=initial_interface)
w_init_field[solute[0]].vector()[:] = \
concentration_left*(
- w_init_field[solute[0]].vector()[:]
+ 1.0)/2.0
return w_init_field
def create_bcs(Lx, Ly, inlet_velocity, V_0, solutes,
concentration_left,
enable_NS, enable_PF, enable_EC, **namespace):
""" The boundaries and boundary conditions are defined here. """
boundaries = dict(
right=[Right(Lx)],
left=[Left(0)]
)
# Alocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
bcs["left"] = dict()
bcs["right"] = dict()
inlet_velocity = Fixed((inlet_velocity, 0.))
#pressurein_out = Pressure(0.0)
phi_inlet = Fixed(-1.0)
phi_outlet = Fixed(1.0)
V_left = Fixed(V_0)
V_right = Fixed(0.)
if enable_NS:
bcs["left"]["u"] = inlet_velocity
bcs["right"]["u"] = inlet_velocity
# bcs["left"]["p"] = pressurein_out
bcs_pointwise["p"] = (0., "x[0] < DOLFIN_EPS && x[1] < DOLFIN_EPS")
if enable_PF:
bcs["left"]["phi"] = phi_inlet
bcs["right"]["phi"] = phi_outlet
if enable_EC:
bcs["left"]["V"] = V_left
bcs["right"]["V"] = V_right
for solute in solutes:
bcs["left"][solute[0]] = Fixed(concentration_left)
return boundaries, bcs, bcs_pointwise
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2
# return 0.75 * gamma * max_value(func, 0.)
return gamma
def reference(t_0, front_position_init, inlet_velocity, interface_thickness,
**namespace):
""" This contains the analytical reference for convergence analysis. """
expr_str_phi = "tanh((x[0]-x0-u0x*t)/(sqrt(2)*eps))"
expr = dict()
expr["phi"] = df.Expression(expr_str_phi, t=t_0,
x0=front_position_init, u0x=inlet_velocity,
eps=interface_thickness, degree=2)
expr["u"] = df.Expression(("u0x", "0."), u0x=inlet_velocity, degree=2)
expr["p"] = df.Expression("0.", degree=2)
expr["g"] = df.Expression("0.", degree=2) # ?
return expr
| 33.321705
| 80
| 0.552402
|
import dolfin as df
import os
from . import *
from common.io import mpi_is_root
from common.bcs import Fixed, Pressure
# from ufl import max_value
__author__ = "Asger Bolet; Gaute Linga"
class PeriodicBoundary(df.SubDomain):
# Left boundary is target domain
def __init__(self, Ly):
self.Ly = Ly
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[1], 0.) and on_boundary)
def map(self, x, y):
y[0] = x[0]
y[1] = x[1] - self.Ly
class Left(df.SubDomain):
def inside(self, x, on_boundary):
return bool(df.near(x[0], 0.0) and on_boundary)
class Right(df.SubDomain):
def __init__(self, Lx):
self.Lx = Lx
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[0], self.Lx) and on_boundary)
def problem():
info_cyan("Bulk intrusion of a front of one fluid into another.")
# 2, beta in phase 1, beta in phase 2
solutes = [["c_p", 1, 1e-4, 1e-2, 4., 1.],
["c_m", -1, 1e-4, 1e-2, 4., 1.]]
# Format: name : (family, degree, is_vector)
base_elements = dict(u=["Lagrange", 2, True],
p=["Lagrange", 1, False],
phi=["Lagrange", 1, False],
g=["Lagrange", 1, False],
c=["Lagrange", 1, False],
V=["Lagrange", 1, False])
factor = 1.
# Default parameters to be loaded unless starting from checkpoint.
parameters = dict(
solver="basic",
folder="results_intrusion_bulk",
restart_folder=False,
enable_NS=True,
enable_PF=True,
enable_EC=True,
save_intv=5,
stats_intv=5,
checkpoint_intv=50,
tstep=0,
dt=factor*0.08,
t_0=0.,
T=20.,
grid_spacing=factor*1./16,
interface_thickness=factor*0.030,
solutes=solutes,
base_elements=base_elements,
Lx=5.,
Ly=2.,
rad_init=0.25,
front_position_init=2.4,
concentration_left=1.,
#
surface_tension=2.45, # 24.5,
grav_const=0.0,
inlet_velocity=0.1,
comoving_velocity=[0.0, 0.0],
V_0=10.,
friction_coeff=0*100.,
#
pf_mobility_coeff=factor*0.000040,
density=[1., 1.],
viscosity=[1000., .1],
permittivity=[1., 10.],
#
initial_interface="random",
#
use_iterative_solvers=True,
use_pressure_stabilization=False,
)
return parameters
def constrained_domain(Ly, **namespace):
return PeriodicBoundary(Ly)
def mesh(Lx=1, Ly=5, grid_spacing=1./16, **namespace):
return df.RectangleMesh(df.Point(0., 0.), df.Point(Lx, Ly),
int(Lx/grid_spacing), int(Ly/grid_spacing))
def initialize(Lx, Ly, rad_init,
interface_thickness, solutes, restart_folder,
field_to_subspace,
inlet_velocity, front_position_init, concentration_left,
enable_NS, enable_PF, enable_EC, initial_interface, **namespace):
""" Create the initial state.
The initial states are specified in a dict indexed by field. The format
should be
w_init_field[field] = 'df.Function(...)'.
The work dicts w_ and w_1 are automatically initialized from these
functions elsewhere in the code.
Note: You only need to specify the initial states that are nonzero.
"""
w_init_field = dict()
if not restart_folder:
if enable_NS:
try:
subspace = field_to_subspace["u"].collapse()
except:
subspace = field_to_subspace["u"]
w_init_field["u"] = initial_velocity(inlet_velocity,
subspace)
# Phase field
if enable_PF:
w_init_field["phi"] = initial_phasefield(
front_position_init, Lx/2, rad_init, interface_thickness,
field_to_subspace["phi"].collapse(), shape=initial_interface)
if enable_EC:
for solute in solutes:
w_init_field[solute[0]] = initial_phasefield(
front_position_init, Lx/2, rad_init, interface_thickness,
field_to_subspace[solute[0]].collapse(),
shape=initial_interface)
w_init_field[solute[0]].vector()[:] = \
concentration_left*(
- w_init_field[solute[0]].vector()[:]
+ 1.0)/2.0
return w_init_field
def create_bcs(Lx, Ly, inlet_velocity, V_0, solutes,
concentration_left,
enable_NS, enable_PF, enable_EC, **namespace):
""" The boundaries and boundary conditions are defined here. """
boundaries = dict(
right=[Right(Lx)],
left=[Left(0)]
)
# Alocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
bcs["left"] = dict()
bcs["right"] = dict()
inlet_velocity = Fixed((inlet_velocity, 0.))
#pressurein_out = Pressure(0.0)
phi_inlet = Fixed(-1.0)
phi_outlet = Fixed(1.0)
V_left = Fixed(V_0)
V_right = Fixed(0.)
if enable_NS:
bcs["left"]["u"] = inlet_velocity
bcs["right"]["u"] = inlet_velocity
# bcs["left"]["p"] = pressurein_out
bcs_pointwise["p"] = (0., "x[0] < DOLFIN_EPS && x[1] < DOLFIN_EPS")
if enable_PF:
bcs["left"]["phi"] = phi_inlet
bcs["right"]["phi"] = phi_outlet
if enable_EC:
bcs["left"]["V"] = V_left
bcs["right"]["V"] = V_right
for solute in solutes:
bcs["left"][solute[0]] = Fixed(concentration_left)
return boundaries, bcs, bcs_pointwise
def initial_phasefield(x0, y0, rad, eps, function_space, shape="flat"):
if shape == "flat":
expr_str = "tanh((x[0]-x0)/(sqrt(2)*eps))"
elif shape == "sine":
expr_str = "tanh((x[0]-x0-eps*sin(4*x[1]*pi))/(sqrt(2)*eps))"
elif shape == "random":
expr_str = ("tanh((x[0]-x0-"
"0.01*sin(1*x[1]*pi+12)+"
"0.01*sin(2*x[1]*pi+1235)+"
"0.01*sin(3*x[1]*pi+1233)+"
"0.01*sin(4*x[1]*pi+623)+"
"0.01*sin(5*x[1]*pi+234)+"
"0.01*sin(6*x[1]*pi+23445)+"
"0.01*sin(7*x[1]*pi+4234)+"
"0.01*sin(8*x[1]*pi+2346)+"
"0.01*sin(9*x[1]*pi+6544)+"
"0.01*sin(10*x[1]*pi+67)+"
"0.01*sin(11*x[1]*pi+234)+"
"0.01*sin(12*x[1]*pi+4525)+"
"0.01*sin(13*x[1]*pi+756)+"
"0.01*sin(14*x[1]*pi+24)"
")/(sqrt(2)*eps))")
elif shape == "circle":
expr_str = ("tanh(sqrt(2)*(sqrt(pow(x[0]-x0,2)" +
"+pow(x[1]-y0,2))-rad)/eps)")
else:
info_red("Unrecognized shape: " + shape)
exit()
phi_init_expr = df.Expression(expr_str, x0=x0, y0=y0, rad=rad,
eps=eps, degree=2)
phi_init = df.interpolate(phi_init_expr, function_space)
return phi_init
def initial_velocity(inlet_velocity, function_space):
u_init_expr = df.Constant((inlet_velocity, 0.))
u_init = df.interpolate(u_init_expr, function_space)
return u_init
def tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,
field_to_subproblem, subproblems, w_, **namespace):
info_blue("Timestep = {}".format(tstep))
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2
# return 0.75 * gamma * max_value(func, 0.)
return gamma
def start_hook(newfolder, **namespace):
statsfile = os.path.join(newfolder, "Statistics/stats.dat")
return dict(statsfile=statsfile)
def reference(t_0, front_position_init, inlet_velocity, interface_thickness,
**namespace):
""" This contains the analytical reference for convergence analysis. """
expr_str_phi = "tanh((x[0]-x0-u0x*t)/(sqrt(2)*eps))"
expr = dict()
expr["phi"] = df.Expression(expr_str_phi, t=t_0,
x0=front_position_init, u0x=inlet_velocity,
eps=interface_thickness, degree=2)
expr["u"] = df.Expression(("u0x", "0."), u0x=inlet_velocity, degree=2)
expr["p"] = df.Expression("0.", degree=2)
expr["g"] = df.Expression("0.", degree=2) # ?
return expr
| 4,168
| 25
| 389
|
48e20ebefdbcecf12574ebbe717f08292ee1fd42
| 5,022
|
py
|
Python
|
test/test_cnvaeon_.py
|
peter88213/aeon3odt
|
f20572177e1e10e2dcc6a130cf3b2b05ed11eed9
|
[
"MIT"
] | null | null | null |
test/test_cnvaeon_.py
|
peter88213/aeon3odt
|
f20572177e1e10e2dcc6a130cf3b2b05ed11eed9
|
[
"MIT"
] | 1
|
2021-10-14T16:00:11.000Z
|
2021-12-22T11:51:05.000Z
|
test/test_cnvaeon_.py
|
peter88213/aeon3odt
|
f20572177e1e10e2dcc6a130cf3b2b05ed11eed9
|
[
"MIT"
] | null | null | null |
""" Python unit tests for the aeon3odt project.
Test suite for aeon3yw.pyw.
For further information see https://github.com/peter88213/aeon3yw
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
from shutil import copyfile
import zipfile
import os
import unittest
import cnvaeon_stub_
# Test environment
# The paths are relative to the "test" directory,
# where this script is placed and executed
TEST_PATH = os.getcwd() + '/../test'
TEST_DATA_PATH = TEST_PATH + '/data/'
TEST_EXEC_PATH = TEST_PATH + '/yw7/'
# To be placed in TEST_DATA_PATH:
NORMAL_CSV = TEST_DATA_PATH + 'normal.csv'
PARTS_CONTENT = TEST_DATA_PATH + 'parts.xml'
CHAPTERS_CONTENT = TEST_DATA_PATH + 'chapters.xml'
SCENES_CONTENT = TEST_DATA_PATH + 'scenes.xml'
CHARACTERS_CONTENT = TEST_DATA_PATH + 'characters.xml'
LOCATIONS_CONTENT = TEST_DATA_PATH + 'locations.xml'
REPORT_CONTENT = TEST_DATA_PATH + 'report.xml'
# Test data
TEST_CSV = TEST_EXEC_PATH + 'yw7 Sample Project.csv'
TEST_PARTS = TEST_EXEC_PATH + 'yw7 Sample Project_chapter_overview.odt'
TEST_CHAPTERS = TEST_EXEC_PATH + 'yw7 Sample Project_brief_synopsis.odt'
TEST_SCENES = TEST_EXEC_PATH + 'yw7 Sample Project_full_synopsis.odt'
TEST_CHARACTERS = TEST_EXEC_PATH + 'yw7 Sample Project_character_sheets.odt'
TEST_LOCATIONS = TEST_EXEC_PATH + 'yw7 Sample Project_location_sheets.odt'
TEST_REPORT = TEST_EXEC_PATH + 'yw7 Sample Project_report.odt'
ODF_CONTENT = 'content.xml'
class NormalOperation(unittest.TestCase):
"""Test case: Normal operation."""
@unittest.skip('No example available')
if __name__ == '__main__':
main()
| 26.712766
| 81
| 0.660295
|
""" Python unit tests for the aeon3odt project.
Test suite for aeon3yw.pyw.
For further information see https://github.com/peter88213/aeon3yw
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
from shutil import copyfile
import zipfile
import os
import unittest
import cnvaeon_stub_
# Test environment
# The paths are relative to the "test" directory,
# where this script is placed and executed
TEST_PATH = os.getcwd() + '/../test'
TEST_DATA_PATH = TEST_PATH + '/data/'
TEST_EXEC_PATH = TEST_PATH + '/yw7/'
# To be placed in TEST_DATA_PATH:
NORMAL_CSV = TEST_DATA_PATH + 'normal.csv'
PARTS_CONTENT = TEST_DATA_PATH + 'parts.xml'
CHAPTERS_CONTENT = TEST_DATA_PATH + 'chapters.xml'
SCENES_CONTENT = TEST_DATA_PATH + 'scenes.xml'
CHARACTERS_CONTENT = TEST_DATA_PATH + 'characters.xml'
LOCATIONS_CONTENT = TEST_DATA_PATH + 'locations.xml'
REPORT_CONTENT = TEST_DATA_PATH + 'report.xml'
# Test data
TEST_CSV = TEST_EXEC_PATH + 'yw7 Sample Project.csv'
TEST_PARTS = TEST_EXEC_PATH + 'yw7 Sample Project_chapter_overview.odt'
TEST_CHAPTERS = TEST_EXEC_PATH + 'yw7 Sample Project_brief_synopsis.odt'
TEST_SCENES = TEST_EXEC_PATH + 'yw7 Sample Project_full_synopsis.odt'
TEST_CHARACTERS = TEST_EXEC_PATH + 'yw7 Sample Project_character_sheets.odt'
TEST_LOCATIONS = TEST_EXEC_PATH + 'yw7 Sample Project_location_sheets.odt'
TEST_REPORT = TEST_EXEC_PATH + 'yw7 Sample Project_report.odt'
ODF_CONTENT = 'content.xml'
def read_file(inputFile):
try:
with open(inputFile, 'r', encoding='utf-8') as f:
return f.read()
except:
# HTML files exported by a word processor may be ANSI encoded.
with open(inputFile, 'r') as f:
return f.read()
def remove_all_testfiles():
try:
os.remove(TEST_EXEC_PATH + ODF_CONTENT)
except:
pass
try:
os.remove(TEST_CSV)
except:
pass
try:
os.remove(TEST_PARTS)
except:
pass
try:
os.remove(TEST_CHAPTERS)
except:
pass
try:
os.remove(TEST_SCENES)
except:
pass
try:
os.remove(TEST_CHARACTERS)
except:
pass
try:
os.remove(TEST_LOCATIONS)
except:
pass
try:
os.remove(TEST_REPORT)
except:
pass
class NormalOperation(unittest.TestCase):
"""Test case: Normal operation."""
def setUp(self):
try:
os.mkdir(TEST_EXEC_PATH)
except:
pass
remove_all_testfiles()
def test_chapter_overview(self):
copyfile(NORMAL_CSV, TEST_CSV)
cnvaeon_stub_.run(TEST_CSV, '_chapter_overview')
with zipfile.ZipFile(TEST_PARTS, 'r') as myzip:
myzip.extract(ODF_CONTENT, TEST_EXEC_PATH)
myzip.close
self.assertEqual(read_file(TEST_EXEC_PATH + ODF_CONTENT),
read_file(PARTS_CONTENT))
def test_brief_synopsis(self):
copyfile(NORMAL_CSV, TEST_CSV)
cnvaeon_stub_.run(TEST_CSV, '_brief_synopsis')
with zipfile.ZipFile(TEST_CHAPTERS, 'r') as myzip:
myzip.extract(ODF_CONTENT, TEST_EXEC_PATH)
myzip.close
self.assertEqual(read_file(TEST_EXEC_PATH + ODF_CONTENT),
read_file(CHAPTERS_CONTENT))
def test_full_synopsis(self):
copyfile(NORMAL_CSV, TEST_CSV)
cnvaeon_stub_.run(TEST_CSV, '_full_synopsis')
with zipfile.ZipFile(TEST_SCENES, 'r') as myzip:
myzip.extract(ODF_CONTENT, TEST_EXEC_PATH)
myzip.close
self.assertEqual(read_file(TEST_EXEC_PATH + ODF_CONTENT),
read_file(SCENES_CONTENT))
def test_character_sheets(self):
copyfile(NORMAL_CSV, TEST_CSV)
cnvaeon_stub_.run(TEST_CSV, '_character_sheets')
with zipfile.ZipFile(TEST_CHARACTERS, 'r') as myzip:
myzip.extract(ODF_CONTENT, TEST_EXEC_PATH)
myzip.close
self.assertEqual(read_file(TEST_EXEC_PATH + ODF_CONTENT),
read_file(CHARACTERS_CONTENT))
@unittest.skip('No example available')
def test_location_sheets(self):
copyfile(NORMAL_CSV, TEST_CSV)
cnvaeon_stub_.run(TEST_CSV, '_location_sheets')
with zipfile.ZipFile(TEST_LOCATIONS, 'r') as myzip:
myzip.extract(ODF_CONTENT, TEST_EXEC_PATH)
myzip.close
self.assertEqual(read_file(TEST_EXEC_PATH + ODF_CONTENT),
read_file(LOCATIONS_CONTENT))
def test_report(self):
copyfile(NORMAL_CSV, TEST_CSV)
cnvaeon_stub_.run(TEST_CSV, '_report')
with zipfile.ZipFile(TEST_REPORT, 'r') as myzip:
myzip.extract(ODF_CONTENT, TEST_EXEC_PATH)
myzip.close
self.assertEqual(read_file(TEST_EXEC_PATH + ODF_CONTENT),
read_file(REPORT_CONTENT))
def tearDown(self):
remove_all_testfiles()
def main():
unittest.main()
if __name__ == '__main__':
main()
| 3,120
| 0
| 284
|
81743b15ebc1431123671402435d75028b031bce
| 973
|
py
|
Python
|
Code/extraction/tensor.py
|
chrislcs/linear-vegetation-elements
|
8e45a40dca472ca9d5cbb58593d9f5b5bc855bf4
|
[
"Apache-2.0"
] | 3
|
2020-06-16T09:05:54.000Z
|
2020-11-02T06:48:26.000Z
|
Code/extraction/tensor.py
|
chrislcs/linear-vegetation-elements
|
8e45a40dca472ca9d5cbb58593d9f5b5bc855bf4
|
[
"Apache-2.0"
] | null | null | null |
Code/extraction/tensor.py
|
chrislcs/linear-vegetation-elements
|
8e45a40dca472ca9d5cbb58593d9f5b5bc855bf4
|
[
"Apache-2.0"
] | 5
|
2019-01-07T18:03:32.000Z
|
2021-10-07T12:56:39.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import numpy as np
def structure_tensor(points):
"""
Computes the structure tensor of points by computing the eigenvalues
and eigenvectors of the covariance matrix of a point cloud.
Parameters
----------
points : (Mx3) array
X, Y and Z coordinates of points.
Returns
-------
eigenvalues : (1x3) array
The eigenvalues corrisponding to the eigenvectors of the covariance
matrix.
eigenvectors : (3,3) array
The eigenvectors of the covariance matrix.
"""
if len(points) > 3:
cov_mat = np.cov(points, rowvar=False)
eigenvalues, eigenvectors = np.linalg.eig(cov_mat)
order = np.argsort(-eigenvalues)
eigenvalues = eigenvalues[order]
eigenvectors = eigenvectors[:, order]
return eigenvalues, eigenvectors
else:
raise ValueError('Not enough points to compute eigenvalues/vectors.')
| 26.297297
| 77
| 0.63926
|
# -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import numpy as np
def structure_tensor(points):
"""
Computes the structure tensor of points by computing the eigenvalues
and eigenvectors of the covariance matrix of a point cloud.
Parameters
----------
points : (Mx3) array
X, Y and Z coordinates of points.
Returns
-------
eigenvalues : (1x3) array
The eigenvalues corrisponding to the eigenvectors of the covariance
matrix.
eigenvectors : (3,3) array
The eigenvectors of the covariance matrix.
"""
if len(points) > 3:
cov_mat = np.cov(points, rowvar=False)
eigenvalues, eigenvectors = np.linalg.eig(cov_mat)
order = np.argsort(-eigenvalues)
eigenvalues = eigenvalues[order]
eigenvectors = eigenvectors[:, order]
return eigenvalues, eigenvectors
else:
raise ValueError('Not enough points to compute eigenvalues/vectors.')
| 0
| 0
| 0
|
5df36d17a510263aaced24f3c376b33c45ba1593
| 17,425
|
py
|
Python
|
sdk/lusid/models/equity_swap_all_of.py
|
bogdanLicaFinbourne/lusid-sdk-python-preview
|
f0f91f992e0417733c4c8abd2674d080a52b6890
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/equity_swap_all_of.py
|
bogdanLicaFinbourne/lusid-sdk-python-preview
|
f0f91f992e0417733c4c8abd2674d080a52b6890
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/equity_swap_all_of.py
|
bogdanLicaFinbourne/lusid-sdk-python-preview
|
f0f91f992e0417733c4c8abd2674d080a52b6890
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2863
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class EquitySwapAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'start_date': 'datetime',
'maturity_date': 'datetime',
'code': 'str',
'equity_flow_conventions': 'FlowConventions',
'funding_leg': 'InstrumentLeg',
'include_dividends': 'bool',
'initial_price': 'float',
'notional_reset': 'bool',
'quantity': 'float',
'underlying_identifier': 'str',
'instrument_type': 'str'
}
attribute_map = {
'start_date': 'startDate',
'maturity_date': 'maturityDate',
'code': 'code',
'equity_flow_conventions': 'equityFlowConventions',
'funding_leg': 'fundingLeg',
'include_dividends': 'includeDividends',
'initial_price': 'initialPrice',
'notional_reset': 'notionalReset',
'quantity': 'quantity',
'underlying_identifier': 'underlyingIdentifier',
'instrument_type': 'instrumentType'
}
required_map = {
'start_date': 'required',
'maturity_date': 'required',
'code': 'required',
'equity_flow_conventions': 'required',
'funding_leg': 'required',
'include_dividends': 'required',
'initial_price': 'required',
'notional_reset': 'required',
'quantity': 'required',
'underlying_identifier': 'required',
'instrument_type': 'required'
}
def __init__(self, start_date=None, maturity_date=None, code=None, equity_flow_conventions=None, funding_leg=None, include_dividends=None, initial_price=None, notional_reset=None, quantity=None, underlying_identifier=None, instrument_type=None): # noqa: E501
"""
EquitySwapAllOf - a model defined in OpenAPI
:param start_date: The start date of the EquitySwap (required)
:type start_date: datetime
:param maturity_date: The maturity date of the EquitySwap. (required)
:type maturity_date: datetime
:param code: The code of the underlying. (required)
:type code: str
:param equity_flow_conventions: (required)
:type equity_flow_conventions: lusid.FlowConventions
:param funding_leg: (required)
:type funding_leg: lusid.InstrumentLeg
:param include_dividends: Dividend inclusion flag, if true dividends are included in the equity leg (total return). (required)
:type include_dividends: bool
:param initial_price: The initial equity price of the Equity Swap. (required)
:type initial_price: float
:param notional_reset: Notional reset flag, if true the notional of the funding leg is reset at the start of every coupon to match the value of the equity leg (equity price at start of coupon times quantity) (required)
:type notional_reset: bool
:param quantity: The quantity or number of shares in the Equity Swap. (required)
:type quantity: float
:param underlying_identifier: external market codes and identifiers for the EquitySwap, e.g. RIC. Supported string (enumeration) values are: [LusidInstrumentId, Isin, Sedol, Cusip, ClientInternal, Figi, RIC, QuotePermId]. (required)
:type underlying_identifier: str
:param instrument_type: The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket (required)
:type instrument_type: str
""" # noqa: E501
self._start_date = None
self._maturity_date = None
self._code = None
self._equity_flow_conventions = None
self._funding_leg = None
self._include_dividends = None
self._initial_price = None
self._notional_reset = None
self._quantity = None
self._underlying_identifier = None
self._instrument_type = None
self.discriminator = None
self.start_date = start_date
self.maturity_date = maturity_date
self.code = code
self.equity_flow_conventions = equity_flow_conventions
self.funding_leg = funding_leg
self.include_dividends = include_dividends
self.initial_price = initial_price
self.notional_reset = notional_reset
self.quantity = quantity
self.underlying_identifier = underlying_identifier
self.instrument_type = instrument_type
@property
def start_date(self):
"""Gets the start_date of this EquitySwapAllOf. # noqa: E501
The start date of the EquitySwap # noqa: E501
:return: The start_date of this EquitySwapAllOf. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EquitySwapAllOf.
The start date of the EquitySwap # noqa: E501
:param start_date: The start_date of this EquitySwapAllOf. # noqa: E501
:type: datetime
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def maturity_date(self):
"""Gets the maturity_date of this EquitySwapAllOf. # noqa: E501
The maturity date of the EquitySwap. # noqa: E501
:return: The maturity_date of this EquitySwapAllOf. # noqa: E501
:rtype: datetime
"""
return self._maturity_date
@maturity_date.setter
def maturity_date(self, maturity_date):
"""Sets the maturity_date of this EquitySwapAllOf.
The maturity date of the EquitySwap. # noqa: E501
:param maturity_date: The maturity_date of this EquitySwapAllOf. # noqa: E501
:type: datetime
"""
if maturity_date is None:
raise ValueError("Invalid value for `maturity_date`, must not be `None`") # noqa: E501
self._maturity_date = maturity_date
@property
def code(self):
"""Gets the code of this EquitySwapAllOf. # noqa: E501
The code of the underlying. # noqa: E501
:return: The code of this EquitySwapAllOf. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this EquitySwapAllOf.
The code of the underlying. # noqa: E501
:param code: The code of this EquitySwapAllOf. # noqa: E501
:type: str
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def equity_flow_conventions(self):
"""Gets the equity_flow_conventions of this EquitySwapAllOf. # noqa: E501
:return: The equity_flow_conventions of this EquitySwapAllOf. # noqa: E501
:rtype: FlowConventions
"""
return self._equity_flow_conventions
@equity_flow_conventions.setter
def equity_flow_conventions(self, equity_flow_conventions):
"""Sets the equity_flow_conventions of this EquitySwapAllOf.
:param equity_flow_conventions: The equity_flow_conventions of this EquitySwapAllOf. # noqa: E501
:type: FlowConventions
"""
if equity_flow_conventions is None:
raise ValueError("Invalid value for `equity_flow_conventions`, must not be `None`") # noqa: E501
self._equity_flow_conventions = equity_flow_conventions
@property
def funding_leg(self):
"""Gets the funding_leg of this EquitySwapAllOf. # noqa: E501
:return: The funding_leg of this EquitySwapAllOf. # noqa: E501
:rtype: InstrumentLeg
"""
return self._funding_leg
@funding_leg.setter
def funding_leg(self, funding_leg):
"""Sets the funding_leg of this EquitySwapAllOf.
:param funding_leg: The funding_leg of this EquitySwapAllOf. # noqa: E501
:type: InstrumentLeg
"""
if funding_leg is None:
raise ValueError("Invalid value for `funding_leg`, must not be `None`") # noqa: E501
self._funding_leg = funding_leg
@property
def include_dividends(self):
"""Gets the include_dividends of this EquitySwapAllOf. # noqa: E501
Dividend inclusion flag, if true dividends are included in the equity leg (total return). # noqa: E501
:return: The include_dividends of this EquitySwapAllOf. # noqa: E501
:rtype: bool
"""
return self._include_dividends
@include_dividends.setter
def include_dividends(self, include_dividends):
"""Sets the include_dividends of this EquitySwapAllOf.
Dividend inclusion flag, if true dividends are included in the equity leg (total return). # noqa: E501
:param include_dividends: The include_dividends of this EquitySwapAllOf. # noqa: E501
:type: bool
"""
if include_dividends is None:
raise ValueError("Invalid value for `include_dividends`, must not be `None`") # noqa: E501
self._include_dividends = include_dividends
@property
def initial_price(self):
"""Gets the initial_price of this EquitySwapAllOf. # noqa: E501
The initial equity price of the Equity Swap. # noqa: E501
:return: The initial_price of this EquitySwapAllOf. # noqa: E501
:rtype: float
"""
return self._initial_price
@initial_price.setter
def initial_price(self, initial_price):
"""Sets the initial_price of this EquitySwapAllOf.
The initial equity price of the Equity Swap. # noqa: E501
:param initial_price: The initial_price of this EquitySwapAllOf. # noqa: E501
:type: float
"""
if initial_price is None:
raise ValueError("Invalid value for `initial_price`, must not be `None`") # noqa: E501
self._initial_price = initial_price
@property
def notional_reset(self):
"""Gets the notional_reset of this EquitySwapAllOf. # noqa: E501
Notional reset flag, if true the notional of the funding leg is reset at the start of every coupon to match the value of the equity leg (equity price at start of coupon times quantity) # noqa: E501
:return: The notional_reset of this EquitySwapAllOf. # noqa: E501
:rtype: bool
"""
return self._notional_reset
@notional_reset.setter
def notional_reset(self, notional_reset):
"""Sets the notional_reset of this EquitySwapAllOf.
Notional reset flag, if true the notional of the funding leg is reset at the start of every coupon to match the value of the equity leg (equity price at start of coupon times quantity) # noqa: E501
:param notional_reset: The notional_reset of this EquitySwapAllOf. # noqa: E501
:type: bool
"""
if notional_reset is None:
raise ValueError("Invalid value for `notional_reset`, must not be `None`") # noqa: E501
self._notional_reset = notional_reset
@property
def quantity(self):
"""Gets the quantity of this EquitySwapAllOf. # noqa: E501
The quantity or number of shares in the Equity Swap. # noqa: E501
:return: The quantity of this EquitySwapAllOf. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this EquitySwapAllOf.
The quantity or number of shares in the Equity Swap. # noqa: E501
:param quantity: The quantity of this EquitySwapAllOf. # noqa: E501
:type: float
"""
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`") # noqa: E501
self._quantity = quantity
@property
def underlying_identifier(self):
"""Gets the underlying_identifier of this EquitySwapAllOf. # noqa: E501
external market codes and identifiers for the EquitySwap, e.g. RIC. Supported string (enumeration) values are: [LusidInstrumentId, Isin, Sedol, Cusip, ClientInternal, Figi, RIC, QuotePermId]. # noqa: E501
:return: The underlying_identifier of this EquitySwapAllOf. # noqa: E501
:rtype: str
"""
return self._underlying_identifier
@underlying_identifier.setter
def underlying_identifier(self, underlying_identifier):
"""Sets the underlying_identifier of this EquitySwapAllOf.
external market codes and identifiers for the EquitySwap, e.g. RIC. Supported string (enumeration) values are: [LusidInstrumentId, Isin, Sedol, Cusip, ClientInternal, Figi, RIC, QuotePermId]. # noqa: E501
:param underlying_identifier: The underlying_identifier of this EquitySwapAllOf. # noqa: E501
:type: str
"""
if underlying_identifier is None:
raise ValueError("Invalid value for `underlying_identifier`, must not be `None`") # noqa: E501
self._underlying_identifier = underlying_identifier
@property
def instrument_type(self):
"""Gets the instrument_type of this EquitySwapAllOf. # noqa: E501
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket # noqa: E501
:return: The instrument_type of this EquitySwapAllOf. # noqa: E501
:rtype: str
"""
return self._instrument_type
@instrument_type.setter
def instrument_type(self, instrument_type):
"""Sets the instrument_type of this EquitySwapAllOf.
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket # noqa: E501
:param instrument_type: The instrument_type of this EquitySwapAllOf. # noqa: E501
:type: str
"""
if instrument_type is None:
raise ValueError("Invalid value for `instrument_type`, must not be `None`") # noqa: E501
allowed_values = ["QuotedSecurity", "InterestRateSwap", "FxForward", "Future", "ExoticInstrument", "FxOption", "CreditDefaultSwap", "InterestRateSwaption", "Bond", "EquityOption", "FixedLeg", "FloatingLeg", "BespokeCashflowLeg", "Unknown", "TermDeposit", "ContractForDifference", "EquitySwap", "CashPerpetual", "CashSettled", "CdsIndex", "Basket"] # noqa: E501
if instrument_type not in allowed_values:
raise ValueError(
"Invalid value for `instrument_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_type, allowed_values)
)
self._instrument_type = instrument_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EquitySwapAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.722222
| 369
| 0.652626
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2863
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class EquitySwapAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'start_date': 'datetime',
'maturity_date': 'datetime',
'code': 'str',
'equity_flow_conventions': 'FlowConventions',
'funding_leg': 'InstrumentLeg',
'include_dividends': 'bool',
'initial_price': 'float',
'notional_reset': 'bool',
'quantity': 'float',
'underlying_identifier': 'str',
'instrument_type': 'str'
}
attribute_map = {
'start_date': 'startDate',
'maturity_date': 'maturityDate',
'code': 'code',
'equity_flow_conventions': 'equityFlowConventions',
'funding_leg': 'fundingLeg',
'include_dividends': 'includeDividends',
'initial_price': 'initialPrice',
'notional_reset': 'notionalReset',
'quantity': 'quantity',
'underlying_identifier': 'underlyingIdentifier',
'instrument_type': 'instrumentType'
}
required_map = {
'start_date': 'required',
'maturity_date': 'required',
'code': 'required',
'equity_flow_conventions': 'required',
'funding_leg': 'required',
'include_dividends': 'required',
'initial_price': 'required',
'notional_reset': 'required',
'quantity': 'required',
'underlying_identifier': 'required',
'instrument_type': 'required'
}
def __init__(self, start_date=None, maturity_date=None, code=None, equity_flow_conventions=None, funding_leg=None, include_dividends=None, initial_price=None, notional_reset=None, quantity=None, underlying_identifier=None, instrument_type=None): # noqa: E501
"""
EquitySwapAllOf - a model defined in OpenAPI
:param start_date: The start date of the EquitySwap (required)
:type start_date: datetime
:param maturity_date: The maturity date of the EquitySwap. (required)
:type maturity_date: datetime
:param code: The code of the underlying. (required)
:type code: str
:param equity_flow_conventions: (required)
:type equity_flow_conventions: lusid.FlowConventions
:param funding_leg: (required)
:type funding_leg: lusid.InstrumentLeg
:param include_dividends: Dividend inclusion flag, if true dividends are included in the equity leg (total return). (required)
:type include_dividends: bool
:param initial_price: The initial equity price of the Equity Swap. (required)
:type initial_price: float
:param notional_reset: Notional reset flag, if true the notional of the funding leg is reset at the start of every coupon to match the value of the equity leg (equity price at start of coupon times quantity) (required)
:type notional_reset: bool
:param quantity: The quantity or number of shares in the Equity Swap. (required)
:type quantity: float
:param underlying_identifier: external market codes and identifiers for the EquitySwap, e.g. RIC. Supported string (enumeration) values are: [LusidInstrumentId, Isin, Sedol, Cusip, ClientInternal, Figi, RIC, QuotePermId]. (required)
:type underlying_identifier: str
:param instrument_type: The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket (required)
:type instrument_type: str
""" # noqa: E501
self._start_date = None
self._maturity_date = None
self._code = None
self._equity_flow_conventions = None
self._funding_leg = None
self._include_dividends = None
self._initial_price = None
self._notional_reset = None
self._quantity = None
self._underlying_identifier = None
self._instrument_type = None
self.discriminator = None
self.start_date = start_date
self.maturity_date = maturity_date
self.code = code
self.equity_flow_conventions = equity_flow_conventions
self.funding_leg = funding_leg
self.include_dividends = include_dividends
self.initial_price = initial_price
self.notional_reset = notional_reset
self.quantity = quantity
self.underlying_identifier = underlying_identifier
self.instrument_type = instrument_type
@property
def start_date(self):
"""Gets the start_date of this EquitySwapAllOf. # noqa: E501
The start date of the EquitySwap # noqa: E501
:return: The start_date of this EquitySwapAllOf. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EquitySwapAllOf.
The start date of the EquitySwap # noqa: E501
:param start_date: The start_date of this EquitySwapAllOf. # noqa: E501
:type: datetime
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def maturity_date(self):
"""Gets the maturity_date of this EquitySwapAllOf. # noqa: E501
The maturity date of the EquitySwap. # noqa: E501
:return: The maturity_date of this EquitySwapAllOf. # noqa: E501
:rtype: datetime
"""
return self._maturity_date
@maturity_date.setter
def maturity_date(self, maturity_date):
"""Sets the maturity_date of this EquitySwapAllOf.
The maturity date of the EquitySwap. # noqa: E501
:param maturity_date: The maturity_date of this EquitySwapAllOf. # noqa: E501
:type: datetime
"""
if maturity_date is None:
raise ValueError("Invalid value for `maturity_date`, must not be `None`") # noqa: E501
self._maturity_date = maturity_date
@property
def code(self):
"""Gets the code of this EquitySwapAllOf. # noqa: E501
The code of the underlying. # noqa: E501
:return: The code of this EquitySwapAllOf. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this EquitySwapAllOf.
The code of the underlying. # noqa: E501
:param code: The code of this EquitySwapAllOf. # noqa: E501
:type: str
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def equity_flow_conventions(self):
"""Gets the equity_flow_conventions of this EquitySwapAllOf. # noqa: E501
:return: The equity_flow_conventions of this EquitySwapAllOf. # noqa: E501
:rtype: FlowConventions
"""
return self._equity_flow_conventions
@equity_flow_conventions.setter
def equity_flow_conventions(self, equity_flow_conventions):
"""Sets the equity_flow_conventions of this EquitySwapAllOf.
:param equity_flow_conventions: The equity_flow_conventions of this EquitySwapAllOf. # noqa: E501
:type: FlowConventions
"""
if equity_flow_conventions is None:
raise ValueError("Invalid value for `equity_flow_conventions`, must not be `None`") # noqa: E501
self._equity_flow_conventions = equity_flow_conventions
@property
def funding_leg(self):
"""Gets the funding_leg of this EquitySwapAllOf. # noqa: E501
:return: The funding_leg of this EquitySwapAllOf. # noqa: E501
:rtype: InstrumentLeg
"""
return self._funding_leg
@funding_leg.setter
def funding_leg(self, funding_leg):
"""Sets the funding_leg of this EquitySwapAllOf.
:param funding_leg: The funding_leg of this EquitySwapAllOf. # noqa: E501
:type: InstrumentLeg
"""
if funding_leg is None:
raise ValueError("Invalid value for `funding_leg`, must not be `None`") # noqa: E501
self._funding_leg = funding_leg
@property
def include_dividends(self):
"""Gets the include_dividends of this EquitySwapAllOf. # noqa: E501
Dividend inclusion flag, if true dividends are included in the equity leg (total return). # noqa: E501
:return: The include_dividends of this EquitySwapAllOf. # noqa: E501
:rtype: bool
"""
return self._include_dividends
@include_dividends.setter
def include_dividends(self, include_dividends):
"""Sets the include_dividends of this EquitySwapAllOf.
Dividend inclusion flag, if true dividends are included in the equity leg (total return). # noqa: E501
:param include_dividends: The include_dividends of this EquitySwapAllOf. # noqa: E501
:type: bool
"""
if include_dividends is None:
raise ValueError("Invalid value for `include_dividends`, must not be `None`") # noqa: E501
self._include_dividends = include_dividends
@property
def initial_price(self):
"""Gets the initial_price of this EquitySwapAllOf. # noqa: E501
The initial equity price of the Equity Swap. # noqa: E501
:return: The initial_price of this EquitySwapAllOf. # noqa: E501
:rtype: float
"""
return self._initial_price
@initial_price.setter
def initial_price(self, initial_price):
"""Sets the initial_price of this EquitySwapAllOf.
The initial equity price of the Equity Swap. # noqa: E501
:param initial_price: The initial_price of this EquitySwapAllOf. # noqa: E501
:type: float
"""
if initial_price is None:
raise ValueError("Invalid value for `initial_price`, must not be `None`") # noqa: E501
self._initial_price = initial_price
@property
def notional_reset(self):
"""Gets the notional_reset of this EquitySwapAllOf. # noqa: E501
Notional reset flag, if true the notional of the funding leg is reset at the start of every coupon to match the value of the equity leg (equity price at start of coupon times quantity) # noqa: E501
:return: The notional_reset of this EquitySwapAllOf. # noqa: E501
:rtype: bool
"""
return self._notional_reset
@notional_reset.setter
def notional_reset(self, notional_reset):
"""Sets the notional_reset of this EquitySwapAllOf.
Notional reset flag, if true the notional of the funding leg is reset at the start of every coupon to match the value of the equity leg (equity price at start of coupon times quantity) # noqa: E501
:param notional_reset: The notional_reset of this EquitySwapAllOf. # noqa: E501
:type: bool
"""
if notional_reset is None:
raise ValueError("Invalid value for `notional_reset`, must not be `None`") # noqa: E501
self._notional_reset = notional_reset
@property
def quantity(self):
"""Gets the quantity of this EquitySwapAllOf. # noqa: E501
The quantity or number of shares in the Equity Swap. # noqa: E501
:return: The quantity of this EquitySwapAllOf. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this EquitySwapAllOf.
The quantity or number of shares in the Equity Swap. # noqa: E501
:param quantity: The quantity of this EquitySwapAllOf. # noqa: E501
:type: float
"""
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`") # noqa: E501
self._quantity = quantity
@property
def underlying_identifier(self):
"""Gets the underlying_identifier of this EquitySwapAllOf. # noqa: E501
external market codes and identifiers for the EquitySwap, e.g. RIC. Supported string (enumeration) values are: [LusidInstrumentId, Isin, Sedol, Cusip, ClientInternal, Figi, RIC, QuotePermId]. # noqa: E501
:return: The underlying_identifier of this EquitySwapAllOf. # noqa: E501
:rtype: str
"""
return self._underlying_identifier
@underlying_identifier.setter
def underlying_identifier(self, underlying_identifier):
"""Sets the underlying_identifier of this EquitySwapAllOf.
external market codes and identifiers for the EquitySwap, e.g. RIC. Supported string (enumeration) values are: [LusidInstrumentId, Isin, Sedol, Cusip, ClientInternal, Figi, RIC, QuotePermId]. # noqa: E501
:param underlying_identifier: The underlying_identifier of this EquitySwapAllOf. # noqa: E501
:type: str
"""
if underlying_identifier is None:
raise ValueError("Invalid value for `underlying_identifier`, must not be `None`") # noqa: E501
self._underlying_identifier = underlying_identifier
@property
def instrument_type(self):
"""Gets the instrument_type of this EquitySwapAllOf. # noqa: E501
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket # noqa: E501
:return: The instrument_type of this EquitySwapAllOf. # noqa: E501
:rtype: str
"""
return self._instrument_type
@instrument_type.setter
def instrument_type(self, instrument_type):
"""Sets the instrument_type of this EquitySwapAllOf.
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket # noqa: E501
:param instrument_type: The instrument_type of this EquitySwapAllOf. # noqa: E501
:type: str
"""
if instrument_type is None:
raise ValueError("Invalid value for `instrument_type`, must not be `None`") # noqa: E501
allowed_values = ["QuotedSecurity", "InterestRateSwap", "FxForward", "Future", "ExoticInstrument", "FxOption", "CreditDefaultSwap", "InterestRateSwaption", "Bond", "EquityOption", "FixedLeg", "FloatingLeg", "BespokeCashflowLeg", "Unknown", "TermDeposit", "ContractForDifference", "EquitySwap", "CashPerpetual", "CashSettled", "CdsIndex", "Basket"] # noqa: E501
if instrument_type not in allowed_values:
raise ValueError(
"Invalid value for `instrument_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_type, allowed_values)
)
self._instrument_type = instrument_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EquitySwapAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0
| 0
| 0
|
48cb6a6aa2374a2429827425a598c6f0045d652a
| 4,008
|
py
|
Python
|
Choruslib/blat.py
|
kramundson/Chorus2
|
43be15e082f346dc45b94a7fadb141b3a6f8c37c
|
[
"MIT"
] | 11
|
2019-05-23T13:28:39.000Z
|
2022-02-10T00:24:54.000Z
|
Choruslib/blat.py
|
kramundson/Chorus2
|
43be15e082f346dc45b94a7fadb141b3a6f8c37c
|
[
"MIT"
] | 3
|
2019-03-15T01:47:53.000Z
|
2021-12-16T07:41:08.000Z
|
Choruslib/blat.py
|
kramundson/Chorus2
|
43be15e082f346dc45b94a7fadb141b3a6f8c37c
|
[
"MIT"
] | 11
|
2019-02-28T08:17:02.000Z
|
2021-12-13T11:44:02.000Z
|
from __future__ import print_function
import sys
import subprocess
import signal
import time
import os
import shlex
from Choruslib.Blatres import Blatres
def start_gfServer(file2bit, gfspath, stepsize=7, blatport=10010):
'''
:param file2bit:
:param gfspath:
:param stepsize:
:param blatport:
:return: blat gfserver pid
'''
gfserverpath = '"' + os.path.realpath(gfspath) + '"'
genomefile = os.path.realpath(file2bit)
genomefilename = os.path.basename(genomefile)
genomefilepath = os.path.dirname(genomefile)
blatcmd = gfserverpath + " start 127.0.0.1 "\
+str(blatport) + " -maxDnaHits=20 -stepSize=" \
+ str(stepsize) + ' ' + genomefilename
blatcmd = shlex.split(blatcmd)
print("start gfServer: ", blatcmd)
p = subprocess.Popen(blatcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=genomefilepath)
print(p.pid)
return p.pid
def blat_searchpb(gfcpath, sequence, blatport=10010, minIdentity=75, file2bit='/'):
'''
:param gfcpath: gfclient path
:param sequence: seqeunce or probe
:param blatport: blat port
:param minIdentity:
:param file2bit: genome 2bit file for blat
:return: blatres class
'''
gfclientpath = '"'+os.path.realpath(gfcpath)+'"'
genomepath = os.path.dirname(file2bit)
queryseq = ">%s\n%s" % (sequence, sequence)
searchcmd = gfclientpath + " -minIdentity="+str(minIdentity)+" -nohead 127.0.0.1 "+str(blatport)+" " + "." + " /dev/stdin /dev/stdout"
num = 0
searchcmd = shlex.split(searchcmd)
p = subprocess.Popen(searchcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=genomepath)
p.stdin.write(queryseq.encode('ascii'))
p.stdin.close()
blatlines = list()
for line in p.stdout:
if line == b"Output is in /dev/stdout\n":
continue
else:
blatlines.append(line.decode("utf-8"))
blatres = Blatres(seq=sequence, blatlines=blatlines)
return blatres
| 21.901639
| 138
| 0.621507
|
from __future__ import print_function
import sys
import subprocess
import signal
import time
import os
import shlex
from Choruslib.Blatres import Blatres
def start_gfServer(file2bit, gfspath, stepsize=7, blatport=10010):
'''
:param file2bit:
:param gfspath:
:param stepsize:
:param blatport:
:return: blat gfserver pid
'''
gfserverpath = '"' + os.path.realpath(gfspath) + '"'
genomefile = os.path.realpath(file2bit)
genomefilename = os.path.basename(genomefile)
genomefilepath = os.path.dirname(genomefile)
blatcmd = gfserverpath + " start 127.0.0.1 "\
+str(blatport) + " -maxDnaHits=20 -stepSize=" \
+ str(stepsize) + ' ' + genomefilename
blatcmd = shlex.split(blatcmd)
print("start gfServer: ", blatcmd)
p = subprocess.Popen(blatcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=genomefilepath)
print(p.pid)
return p.pid
def check_gfServer_running():
p = subprocess.Popen('ps -A', shell=True, stdout=subprocess.PIPE)
lines = p.stdout.readlines()
for line in lines:
if 'gfServer' in line:
return True
return False
def stop_gfServer(p=None):
if p is not None:
os.kill(p.pid, signal.SIGTERM)
time.sleep(5)
else:
pids = []
p = subprocess.Popen('ps -A', shell=True, stdout=subprocess.PIPE)
lines = p.stdout.readlines()
for line in lines:
if b'gfServer' in line:
pids.append(int(line.split()[0]))
for pid in pids:
os.kill(pid,signal.SIGTERM)
time.sleep(10)
def blat_search_sequence(gfcpath, sequence, blatport=10010, minIdentity=75, file2bit='/'):
gfclientpath = '"'+os.path.realpath(gfcpath)+'"'
genomepath = os.path.dirname(file2bit)
# searchcmd = gfclientpath + " -minIdentity="+str(75)+" -nohead 127.0.0.1 "+str(10010)+" " + "." + " /dev/stdin /dev/stdout"
queryseq = ">%s\n%s" % (sequence, sequence)
searchcmd = gfclientpath + " -minIdentity="+str(minIdentity)+" -nohead 127.0.0.1 "+str(blatport)+" " + "." + " /dev/stdin /dev/stdout"
num = 0
searchcmd = shlex.split(searchcmd)
# print(searchcmd)
p = subprocess.Popen(searchcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=genomepath)
p.stdin.write(queryseq.encode('ascii'))
p.stdin.close()
for line in p.stdout:
if line == b"Output is in /dev/stdout\n":
continue
num += 1
return num
def blat_search(blatpath, sequence, minIdentity, samplename, file2bit='/'):
pslfile = samplename+'.psl'
blatcmd = blatpath + ' ' +file2bit + ' ' + sequence + ' ' + " -minIdentity="+str(minIdentity) + ' ' + pslfile
print("blat", blatcmd)
p = subprocess.call(blatcmd, shell=True)
return p
def blat_searchpb(gfcpath, sequence, blatport=10010, minIdentity=75, file2bit='/'):
'''
:param gfcpath: gfclient path
:param sequence: seqeunce or probe
:param blatport: blat port
:param minIdentity:
:param file2bit: genome 2bit file for blat
:return: blatres class
'''
gfclientpath = '"'+os.path.realpath(gfcpath)+'"'
genomepath = os.path.dirname(file2bit)
queryseq = ">%s\n%s" % (sequence, sequence)
searchcmd = gfclientpath + " -minIdentity="+str(minIdentity)+" -nohead 127.0.0.1 "+str(blatport)+" " + "." + " /dev/stdin /dev/stdout"
num = 0
searchcmd = shlex.split(searchcmd)
p = subprocess.Popen(searchcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=genomepath)
p.stdin.write(queryseq.encode('ascii'))
p.stdin.close()
blatlines = list()
for line in p.stdout:
if line == b"Output is in /dev/stdout\n":
continue
else:
blatlines.append(line.decode("utf-8"))
blatres = Blatres(seq=sequence, blatlines=blatlines)
return blatres
def build2bit(fato2bitpath, genomefile):
pass
| 1,837
| 0
| 115
|
c459274929ba35904b278fdbc79e4f6bc3bee160
| 19,800
|
py
|
Python
|
api/migrations/versions/af9c317d2c92_.py
|
NikolaSiplakova/Baobab
|
180cd3cb492ed47d38ca0b473572fad0ac6f604b
|
[
"Apache-2.0"
] | 52
|
2019-01-10T16:04:26.000Z
|
2022-02-10T00:55:59.000Z
|
api/migrations/versions/af9c317d2c92_.py
|
NikolaSiplakova/Baobab
|
180cd3cb492ed47d38ca0b473572fad0ac6f604b
|
[
"Apache-2.0"
] | 535
|
2019-01-08T21:24:01.000Z
|
2022-02-27T15:24:06.000Z
|
api/migrations/versions/af9c317d2c92_.py
|
NikolaSiplakova/Baobab
|
180cd3cb492ed47d38ca0b473572fad0ac6f604b
|
[
"Apache-2.0"
] | 36
|
2019-01-10T16:09:15.000Z
|
2021-06-28T21:02:47.000Z
|
"""empty message
Revision ID: af9c317d2c92
Revises: 245d12695c69
Create Date: 2020-03-12 08:49:36.009020
"""
# revision identifiers, used by Alembic.
revision = 'af9c317d2c92'
down_revision = '245d12695c69'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm
from app import db
from enum import Enum
import datetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
| 38.076923
| 400
| 0.677222
|
"""empty message
Revision ID: af9c317d2c92
Revises: 245d12695c69
Create Date: 2020-03-12 08:49:36.009020
"""
# revision identifiers, used by Alembic.
revision = 'af9c317d2c92'
down_revision = '245d12695c69'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm
from app import db
from enum import Enum
import datetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Organisation(Base):
__tablename__ = "organisation"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), nullable=False)
system_name = db.Column(db.String(50), nullable=False)
small_logo = db.Column(db.String(100), nullable=False)
large_logo = db.Column(db.String(100), nullable=False)
domain = db.Column(db.String(100), nullable=False)
url = db.Column(db.String(100), nullable=False)
email_from = db.Column(db.String(100), nullable=True)
system_url = db.Column(db.String(100), nullable=False)
privacy_policy = db.Column(db.String(100), nullable=False)
def __init__(self, name, system_name, small_logo, large_logo, domain, url, email_from, system_url, privacy_policy):
self.name = name
self.small_logo = small_logo
self.large_logo = large_logo
self.domain = domain
self.system_name = system_name
self.url = url
self.email_from = email_from
self.system_url = system_url
self.privacy_policy = privacy_policy
class Country(Base):
__tablename__ = "country"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(100), nullable=False)
def __init__(self, name):
self.name = name
class EventType(Enum):
EVENT = 'event'
AWARD = 'award'
class Event(Base):
__tablename__ = "event"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(255), nullable=False)
start_date = db.Column(db.DateTime(), nullable=False)
end_date = db.Column(db.DateTime(), nullable=False)
key = db.Column(db.String(255), nullable=False, unique=True)
organisation_id = db.Column(db.Integer(), db.ForeignKey(
'organisation.id'), nullable=False)
email_from = db.Column(db.String(255), nullable=False)
url = db.Column(db.String(255), nullable=False)
application_open = db.Column(db.DateTime(), nullable=False)
application_close = db.Column(db.DateTime(), nullable=False)
review_open = db.Column(db.DateTime(), nullable=False)
review_close = db.Column(db.DateTime(), nullable=False)
selection_open = db.Column(db.DateTime(), nullable=False)
selection_close = db.Column(db.DateTime(), nullable=False)
offer_open = db.Column(db.DateTime(), nullable=False)
offer_close = db.Column(db.DateTime(), nullable=False)
registration_open = db.Column(db.DateTime(), nullable=False)
registration_close = db.Column(db.DateTime(), nullable=False)
event_type = db.Column(db.Enum(EventType), nullable=False)
def __init__(self,
name,
description,
start_date,
end_date,
key,
organisation_id,
email_from,
url,
application_open,
application_close,
review_open,
review_close,
selection_open,
selection_close,
offer_open,
offer_close,
registration_open,
registration_close,
event_type
):
self.name = name
self.description = description
self.start_date = start_date
self.end_date = end_date
self.key = key
self.organisation_id = organisation_id
self.email_from = email_from
self.url = url
self.application_open = application_open
self.application_close = application_close
self.review_open = review_open
self.review_close = review_close
self.selection_open = selection_open
self.selection_close = selection_close
self.offer_open = offer_open
self.offer_close = offer_close
self.registration_open = registration_open
self.registration_close = registration_close
self.event_roles = []
self.event_type = event_type
class ApplicationForm(Base):
__tablename__ = 'application_form'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
event_id = db.Column(db.Integer(), db.ForeignKey('event.id'), nullable=False)
is_open = db.Column(db.Boolean(), nullable=False)
event = db.relationship('Event', foreign_keys=[event_id])
nominations = db.Column(db.Boolean(), nullable=False)
def __init__(self, event_id, is_open, nominations):
self.event_id = event_id
self.is_open = is_open
self.nominations = nominations
class Question(Base):
__tablename__ = 'question'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
application_form_id = db.Column(db.Integer(), db.ForeignKey('application_form.id'), nullable=False)
section_id = db.Column(db.Integer(), db.ForeignKey('section.id'), nullable=False)
type = db.Column(db.String(), nullable=False)
description = db.Column(db.String(), nullable=True)
headline = db.Column(db.String(), nullable=False)
placeholder = db.Column(db.String(), nullable=True)
validation_regex = db.Column(db.String(), nullable=True)
validation_text = db.Column(db.String(), nullable=True)
order = db.Column(db.Integer(), nullable=False)
options = db.Column(db.JSON(), nullable=True)
is_required = db.Column(db.Boolean(), nullable=False)
depends_on_question_id = db.Column(db.Integer(), db.ForeignKey('question.id'), nullable=True)
show_for_values = db.Column(db.JSON(), nullable=True)
def __init__(self, application_form_id, section_id, headline, placeholder, order, questionType, validation_regex, validation_text=None, is_required = True, description = None, options = None):
self.application_form_id = application_form_id
self.section_id = section_id
self.headline = headline
self.placeholder = placeholder
self.order = order
self.type = questionType
self.description = description
self.options = options
self.is_required = is_required
self.validation_regex = validation_regex
self.validation_text = validation_text
class Section(Base):
__tablename__ = 'section'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
application_form_id = db.Column(db.Integer(), db.ForeignKey('application_form.id'), nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255), nullable=False)
order = db.Column(db.Integer(), nullable=False)
depends_on_question_id = db.Column(db.Integer(), db.ForeignKey('question.id', use_alter=True), nullable=True)
show_for_values = db.Column(db.JSON(), nullable=True)
def __init__(self, application_form_id, name, description, order):
self.application_form_id = application_form_id
self.name = name
self.description = description
self.order = order
def get_country_list(session):
countries = session.query(Country).all()
country_list = []
for country in countries:
country_list.append({
'label': country.name,
'value': country.name
})
return country_list
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# pass
# ### end Alembic commands ###
Base.metadata.bind = op.get_bind()
session = orm.Session(bind=Base.metadata.bind)
maathaiimpact2020 = Event('Wangari Maathai Impact Award 2020',
'Wangari Maathai Impact Award 2020',
datetime.date(2020, 8, 23), datetime.date(2020, 8, 28), 'maathai2020',
1, 'baobab@deeplearningindaba.com', 'http://www.deeplearningindaba.com',
datetime.date(2020,3,1), datetime.date(2020,4,17), datetime.date(2020,4,25),
datetime.date(2020,5,15),datetime.date(2020,1,1), datetime.date(2020,1,1),
datetime.date(2020,1,1),datetime.date(2020,1,1), datetime.date(2020,1,1),
datetime.date(2020,1,1), EventType.AWARD)
session.add(maathaiimpact2020)
session.commit()
event_id = maathaiimpact2020.id
application_form = ApplicationForm(event_id, True, True)
session.add(application_form)
session.commit()
app_form_id = application_form.id
main_section = Section(app_form_id, 'Wangari Maathai Impact Award 2020', """
This is the official application form for the Wangari Maathai Impact Award 2020, an award to encourage and recognise work by African innovators that shows impactful application of machine learning and artificial intelligence. This award will be made at the Deep Learning Indaba in Tunis, Tunisia in August 2020.
This application will require:
- Personal details about the nominee,
- Details about the impactful work, including why it is impactful, who it impacts and why is it innovative,
- Details of 2 people other than the nominator to provide supporting letters for the nominee
For eligibility criteria for the Maathai Award, please see www.deeplearningindaba.com/maathai-2020
For any queries, please email awards@deeplearningindaba.com.
""", 1)
session.add(main_section)
session.commit()
q1_nomination_capacity = Question(
application_form_id=app_form_id,
section_id=main_section.id,
headline='Nominating Capacity',
placeholder='',
order=1,
questionType='multi-choice',
validation_regex=None,
is_required=True,
options=[
{'label':'Self-nomination', 'value':'self'},
{'label':'Nomination on behalf of a candidate','value':'other'}
]
)
session.add(q1_nomination_capacity)
session.commit()
nominator_information = Section(app_form_id, 'Nominator Information',"""
Details of the person nominating an individual, team or organisation
""",2)
nominator_information.depends_on_question_id = q1_nomination_capacity.id
nominator_information.show_for_values = ['other']
session.add(nominator_information)
session.commit()
nominator_q1 = Question(
application_form_id=app_form_id,
section_id=nominator_information.id,
headline='Affiliation',
placeholder='Affiliation',
order=1,
questionType='short-text',
validation_regex=None,
is_required=True,
description='(university, institute, company, etc)'
)
nominator_q2 = Question(
application_form_id=app_form_id,
section_id=nominator_information.id,
headline='Department',
placeholder='Department',
order=2,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominator_q3 = Question(
application_form_id=app_form_id,
section_id=nominator_information.id,
headline='Describe your relationship to the nominee',
placeholder='',
order=3,
questionType='long-text',
validation_regex=None,
is_required=True
)
session.add_all([nominator_q1, nominator_q2, nominator_q3])
session.commit()
nominee_information = Section(app_form_id, 'Nominee Information',"""
Details of the nominated individual, team or organisation to be considered for the award. For any teams/organisations, details of the principal contact should be entered below.
""",3)
session.add(nominee_information)
session.commit()
nominee_q1 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Title',
placeholder='Title',
order=1,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q1.depends_on_question_id = q1_nomination_capacity.id
nominee_q1.show_for_values = ['other']
nominee_q2 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Firstname',
placeholder='Firstname',
order=2,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q2.depends_on_question_id = q1_nomination_capacity.id
nominee_q2.show_for_values = ['other']
nominee_q3 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Lastname',
placeholder='Lastname',
order=3,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q3.depends_on_question_id = q1_nomination_capacity.id
nominee_q3.show_for_values = ['other']
nominee_q4 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Email Address',
placeholder='Email Address',
order=4,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q4.depends_on_question_id = q1_nomination_capacity.id
nominee_q4.show_for_values = ['other']
nominee_q5 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Affiliation',
placeholder='Affiliation',
order=5,
questionType='short-text',
validation_regex=None,
is_required=True,
description='(university, institute, company, etc)'
)
nominee_q6 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='If a team/organisation, names of team members',
placeholder='Names of team members',
order=6,
questionType='short-text',
validation_regex=None,
is_required=False
)
nominee_q7 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Country of Residence',
placeholder='Choose an option',
order=7,
questionType='multi-choice',
validation_regex=None,
is_required=True,
options=get_country_list(session)
)
nominee_q8 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Nationality',
placeholder='Choose an option',
order=8,
questionType='multi-choice',
validation_regex=None,
is_required=True,
options=get_country_list(session)
)
nominee_q9 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Website (or other online presence)',
placeholder='Enter a URL',
order=9,
questionType='short-text',
validation_regex=None,
is_required=False
)
session.add_all([nominee_q1,nominee_q2,nominee_q3,nominee_q4,nominee_q5,
nominee_q6,nominee_q7,nominee_q8,nominee_q9])
session.commit()
impact_info = Section(app_form_id, 'Information about impactful work','',3)
session.add(impact_info)
session.commit()
impact_q1 = Question(
application_form_id=app_form_id,
section_id=impact_info.id,
headline='What impactful work or project is the team/individual doing?',
placeholder='Enter 300-500 words',
order=1,
questionType='long-text',
validation_regex=r'^\s*(\S+(\s+|$)){300,500}$',
is_required=True,
description='Describe the work/project. In particular, describe the role of machine learning and/or artificial intelligence (300-500 words)'
)
impact_q2 = Question(
application_form_id=app_form_id,
section_id=impact_info.id,
headline='Who does this work impact? Say how.',
placeholder='Enter 150-200 words',
order=2,
questionType='long-text',
validation_regex=r'^\s*(\S+(\s+|$)){150,200}$',
is_required=True,
description='Describe who is benefitting from this work (location, how many people etc). Describe how this work is positively affecting this group (150-200 words)'
)
impact_q3 = Question(
application_form_id=app_form_id,
section_id=impact_info.id,
headline='Why is this work innovative?',
placeholder='Enter 150-200 words',
order=3,
questionType='long-text',
validation_regex=r'^\s*(\S+(\s+|$)){150,200}$',
is_required=True,
description='Describe the novel parts of the work, what difference it is making, or how it is moving Africa forwards (150-200 words)'
)
session.add_all([impact_q1,impact_q2,impact_q3])
session.commit()
supporting_docs = Section(app_form_id, 'Supporting Documentation', """
If this is a self-nomination, two supporting letters are required, otherwise one supporting letter is sufficient. The supporting letters should describe the nature of the impactful work, why it is considered to be impactful, and in what way the candidate strengthens African machine learning, and any other relevant information. Letter writers can be from anyone familiar with the impactful work.
Supporting letters should be 600 words at most, written in English, and submitted electronically in PDF by the closing date through Baobab
""",4)
session.add(supporting_docs)
session.commit()
supporting_docs_q1 = Question(
application_form_id=app_form_id,
section_id=supporting_docs.id,
headline='Add the details of the 1 or 2 people who will provide supporting letters.',
placeholder='',
order=1,
questionType='reference',
validation_regex=None,
is_required=True,
description='Add at least two people if this is a self nomination and at least one if you are nominating someone else.',
options={'min_num_referral': 1, 'max_num_referral': 3}
)
supporting_docs_q2 = Question(
application_form_id=app_form_id,
section_id=supporting_docs.id,
headline='Additional comments',
placeholder='',
order=2,
questionType='long-text',
validation_regex=None,
is_required=False,
description='Use this space to provide any additional details which you feel are relevant to this nomination and have not been captured by this form.'
)
session.add_all([supporting_docs_q1, supporting_docs_q2])
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
# pass
# ### end Alembic commands ###
Base.metadata.bind = op.get_bind()
session = orm.Session(bind=Base.metadata.bind)
event = session.query(Event).filter_by(key='maathai2020').first()
app_form = session.query(ApplicationForm).filter_by(event_id=event.id).first()
nominator = session.query(Section).filter_by(name='Nominator Information').first()
nominator.depends_on_question_id = None
session.query(Question).filter_by(application_form_id=app_form.id).delete()
session.query(Section).filter_by(application_form_id=app_form.id).delete()
session.query(ApplicationForm).filter_by(event_id=event.id).delete()
session.query(Event).filter_by(key='maathai2020').delete()
session.commit()
| 14,842
| 4,301
| 230
|
ece5c4da754e79fc13f9c5db7f22ff6ccf734a97
| 892
|
py
|
Python
|
textovni.py
|
timenbob/3-v-vrsto-4x4
|
bdb1dcdeec7ea3c977f739643d08762f5e6a38ee
|
[
"MIT"
] | null | null | null |
textovni.py
|
timenbob/3-v-vrsto-4x4
|
bdb1dcdeec7ea3c977f739643d08762f5e6a38ee
|
[
"MIT"
] | 1
|
2021-05-11T07:58:22.000Z
|
2021-05-11T07:58:22.000Z
|
textovni.py
|
timenbob/3-v-vrsto-4x4
|
bdb1dcdeec7ea3c977f739643d08762f5e6a38ee
|
[
"MIT"
] | null | null | null |
from model import Igrica
nova_igra = 'n'
while nova_igra == "n":
trenutna_igra = Igrica()
# print(trenutna_igra)
while True:
trenutna_igra.izpis_igre()
print(f"na vrsti je igralec {trenutna_igra.igralec}")
while True:
print("Napiši št: ")
polje = input()
poteza = trenutna_igra.shrani_potezo(polje)
if poteza:
break
if trenutna_igra.zmaga() != False:
print(f"Zmagovalec je {trenutna_igra.zmaga()}")
print("!!!!!!!!!!!!!!")
trenutna_igra.izpis_igre()
print("!!!!!!!!!!!!!!")
break
if trenutna_igra.neodloceno():
print("Neodloceno")
break
trenutna_igra.menjava_igralcev()
print("Igre je konec")
nova_igra = input("Pritisnite N za novo igro: ")
print("KONEC PROGRAMA")
| 24.108108
| 61
| 0.539238
|
from model import Igrica
nova_igra = 'n'
while nova_igra == "n":
trenutna_igra = Igrica()
# print(trenutna_igra)
while True:
trenutna_igra.izpis_igre()
print(f"na vrsti je igralec {trenutna_igra.igralec}")
while True:
print("Napiši št: ")
polje = input()
poteza = trenutna_igra.shrani_potezo(polje)
if poteza:
break
if trenutna_igra.zmaga() != False:
print(f"Zmagovalec je {trenutna_igra.zmaga()}")
print("!!!!!!!!!!!!!!")
trenutna_igra.izpis_igre()
print("!!!!!!!!!!!!!!")
break
if trenutna_igra.neodloceno():
print("Neodloceno")
break
trenutna_igra.menjava_igralcev()
print("Igre je konec")
nova_igra = input("Pritisnite N za novo igro: ")
print("KONEC PROGRAMA")
| 0
| 0
| 0
|
db3e4d24dfba95e51f64f911028d197a6aed38a5
| 214,999
|
py
|
Python
|
demo/ESI_tranquility_latest.py
|
nicoscha/PESI
|
4ce85a51b05bca891cc7e191fca67e93788011bd
|
[
"Apache-2.0"
] | null | null | null |
demo/ESI_tranquility_latest.py
|
nicoscha/PESI
|
4ce85a51b05bca891cc7e191fca67e93788011bd
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:15:39.000Z
|
2021-06-02T00:15:39.000Z
|
demo/ESI_tranquility_latest.py
|
nicoscha/PESI
|
4ce85a51b05bca891cc7e191fca67e93788011bd
|
[
"Apache-2.0"
] | null | null | null |
# Python EVE Swagger Interface
# https://github.com/nicoscha/PESI
# ESI version 0.8.9
import ESI_request
def get_alliances(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
List all active player alliances
---
Alternate route: `/dev/alliances/`
Alternate route: `/legacy/alliances/`
Alternate route: `/v1/alliances/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/')
def get_alliances_alliance_id(*, alliance_id, if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Public information about an alliance
---
Alternate route: `/dev/alliances/{alliance_id}/`
Alternate route: `/v3/alliances/{alliance_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/')
def get_alliances_alliance_id_contacts(*,
alliance_id,
token,
if_none_match=None,
page='1'):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return contacts of an alliance
---
Alternate route: `/dev/alliances/{alliance_id}/contacts/`
Alternate route: `/v2/alliances/{alliance_id}/contacts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/contacts/')
def get_alliances_alliance_id_contacts_labels(*,
alliance_id,
token,
if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return custom labels for an alliance's contacts
---
Alternate route: `/dev/alliances/{alliance_id}/contacts/labels/`
Alternate route: `/legacy/alliances/{alliance_id}/contacts/labels/`
Alternate route: `/v1/alliances/{alliance_id}/contacts/labels/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/contacts/labels/')
def get_alliances_alliance_id_corporations(*, alliance_id, if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
List all current member corporations of an alliance
---
Alternate route: `/dev/alliances/{alliance_id}/corporations/`
Alternate route: `/legacy/alliances/{alliance_id}/corporations/`
Alternate route: `/v1/alliances/{alliance_id}/corporations/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/corporations/')
def get_alliances_alliance_id_icons(*, alliance_id, if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the icon urls for a alliance
---
Alternate route: `/dev/alliances/{alliance_id}/icons/`
Alternate route: `/legacy/alliances/{alliance_id}/icons/`
Alternate route: `/v1/alliances/{alliance_id}/icons/`
---
This route expires daily at 11:05
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/icons/')
def post_characters_affiliation(*, characters):
"""
:param characters: The character IDs to fetch affiliations for. All characters must exist, or none will be returned
Bulk lookup of character IDs to corporation, alliance and faction
---
Alternate route: `/dev/characters/affiliation/`
Alternate route: `/legacy/characters/affiliation/`
Alternate route: `/v1/characters/affiliation/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(characters=characters,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/affiliation/')
def get_characters_character_id(*, character_id, if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Public information about a character
---
Alternate route: `/dev/characters/{character_id}/`
Alternate route: `/v4/characters/{character_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/')
def get_characters_character_id_agents_research(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of agents research information for a character. The formula for finding the current research points with an agent is: currentPoints = remainderPoints + pointsPerDay * days(currentTime - researchStartDate)
---
Alternate route: `/dev/characters/{character_id}/agents_research/`
Alternate route: `/legacy/characters/{character_id}/agents_research/`
Alternate route: `/v1/characters/{character_id}/agents_research/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/agents_research/')
def get_characters_character_id_assets(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of the characters assets
---
Alternate route: `/dev/characters/{character_id}/assets/`
Alternate route: `/v3/characters/{character_id}/assets/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/assets/')
def post_characters_character_id_assets_locations(*, character_id, item_ids,
token):
"""
:param character_id: An EVE character ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return locations for a set of item ids, which you can get from character assets endpoint. Coordinates for items in hangars or stations are set to (0,0,0)
---
Alternate route: `/dev/characters/{character_id}/assets/locations/`
Alternate route: `/v2/characters/{character_id}/assets/locations/`
"""
ESI_request.request(character_id=character_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/assets/locations/')
def post_characters_character_id_assets_names(*, character_id, item_ids,
token):
"""
:param character_id: An EVE character ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return names for a set of item ids, which you can get from character assets endpoint. Typically used for items that can customize names, like containers or ships.
---
Alternate route: `/dev/characters/{character_id}/assets/names/`
Alternate route: `/legacy/characters/{character_id}/assets/names/`
Alternate route: `/v1/characters/{character_id}/assets/names/`
"""
ESI_request.request(character_id=character_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/assets/names/')
def get_characters_character_id_attributes(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return attributes of a character
---
Alternate route: `/dev/characters/{character_id}/attributes/`
Alternate route: `/legacy/characters/{character_id}/attributes/`
Alternate route: `/v1/characters/{character_id}/attributes/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/attributes/')
def get_characters_character_id_blueprints(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of blueprints the character owns
---
Alternate route: `/dev/characters/{character_id}/blueprints/`
Alternate route: `/v2/characters/{character_id}/blueprints/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/blueprints/')
def get_characters_character_id_bookmarks(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your character's personal bookmarks
---
Alternate route: `/dev/characters/{character_id}/bookmarks/`
Alternate route: `/v2/characters/{character_id}/bookmarks/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/bookmarks/')
def get_characters_character_id_bookmarks_folders(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your character's personal bookmark folders
---
Alternate route: `/dev/characters/{character_id}/bookmarks/folders/`
Alternate route: `/v2/characters/{character_id}/bookmarks/folders/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/bookmarks/folders/')
def get_characters_character_id_calendar(*,
character_id,
from_event,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param from_event: The event ID to retrieve events from
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get 50 event summaries from the calendar. If no from_event ID is given, the resource will return the next 50 chronological event summaries from now. If a from_event ID is specified, it will return the next 50 chronological event summaries from after that event
---
Alternate route: `/dev/characters/{character_id}/calendar/`
Alternate route: `/legacy/characters/{character_id}/calendar/`
Alternate route: `/v1/characters/{character_id}/calendar/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(character_id=character_id,
from_event=from_event,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/calendar/')
def get_characters_character_id_calendar_event_id(*,
character_id,
event_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param event_id: The id of the event requested
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get all the information for a specific event
---
Alternate route: `/dev/characters/{character_id}/calendar/{event_id}/`
Alternate route: `/v3/characters/{character_id}/calendar/{event_id}/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(
character_id=character_id,
event_id=event_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/calendar/{event_id}/')
def put_characters_character_id_calendar_event_id(*, character_id, event_id,
response, token):
"""
:param character_id: An EVE character ID
:param event_id: The ID of the event requested
:param response: The response value to set, overriding current value
:param token: Access token to use if unable to set a header
Set your response status to an event
---
Alternate route: `/dev/characters/{character_id}/calendar/{event_id}/`
Alternate route: `/v3/characters/{character_id}/calendar/{event_id}/`
"""
ESI_request.request(
character_id=character_id,
event_id=event_id,
response=response,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/characters/{character_id}/calendar/{event_id}/')
def get_characters_character_id_calendar_event_id_attendees(
*, character_id, event_id, token, if_none_match=None):
"""
:param character_id: An EVE character ID
:param event_id: The id of the event requested
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get all invited attendees for a given event
---
Alternate route: `/dev/characters/{character_id}/calendar/{event_id}/attendees/`
Alternate route: `/legacy/characters/{character_id}/calendar/{event_id}/attendees/`
Alternate route: `/v1/characters/{character_id}/calendar/{event_id}/attendees/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(
character_id=character_id,
event_id=event_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/calendar/{event_id}/attendees/')
def get_characters_character_id_clones(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
A list of the character's clones
---
Alternate route: `/dev/characters/{character_id}/clones/`
Alternate route: `/v3/characters/{character_id}/clones/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/clones/')
def delete_characters_character_id_contacts(*, character_id, contact_ids,
token):
"""
:param character_id: An EVE character ID
:param contact_ids: A list of contacts to delete
:param token: Access token to use if unable to set a header
Bulk delete contacts
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
"""
ESI_request.request(character_id=character_id,
contact_ids=contact_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/contacts/')
def get_characters_character_id_contacts(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return contacts of a character
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contacts/')
def post_characters_character_id_contacts(*, character_id, contact_ids,
label_ids, standing, token, watched):
"""
:param character_id: An EVE character ID
:param contact_ids: A list of contacts
:param label_ids: Add custom labels to the new contact
:param standing: Standing for the contact
:param token: Access token to use if unable to set a header
:param watched: Whether the contact should be watched, note this is only effective on characters
Bulk add contacts with same settings
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
"""
ESI_request.request(character_id=character_id,
contact_ids=contact_ids,
label_ids=label_ids,
standing=standing,
token=token,
watched=watched,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/contacts/')
def put_characters_character_id_contacts(*, character_id, contact_ids,
label_ids, standing, token, watched):
"""
:param character_id: An EVE character ID
:param contact_ids: A list of contacts
:param label_ids: Add custom labels to the contact
:param standing: Standing for the contact
:param token: Access token to use if unable to set a header
:param watched: Whether the contact should be watched, note this is only effective on characters
Bulk edit contacts with same settings
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
"""
ESI_request.request(character_id=character_id,
contact_ids=contact_ids,
label_ids=label_ids,
standing=standing,
token=token,
watched=watched,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/characters/{character_id}/contacts/')
def get_characters_character_id_contacts_labels(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return custom labels for a character's contacts
---
Alternate route: `/dev/characters/{character_id}/contacts/labels/`
Alternate route: `/legacy/characters/{character_id}/contacts/labels/`
Alternate route: `/v1/characters/{character_id}/contacts/labels/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contacts/labels/')
def get_characters_character_id_contracts(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns contracts available to a character, only if the character is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is "in_progress".
---
Alternate route: `/dev/characters/{character_id}/contracts/`
Alternate route: `/legacy/characters/{character_id}/contracts/`
Alternate route: `/v1/characters/{character_id}/contracts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contracts/')
def get_characters_character_id_contracts_contract_id_bids(
*, character_id, contract_id, token, if_none_match=None):
"""
:param character_id: An EVE character ID
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Lists bids on a particular auction contract
---
Alternate route: `/dev/characters/{character_id}/contracts/{contract_id}/bids/`
Alternate route: `/legacy/characters/{character_id}/contracts/{contract_id}/bids/`
Alternate route: `/v1/characters/{character_id}/contracts/{contract_id}/bids/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(
character_id=character_id,
contract_id=contract_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contracts/{contract_id}/bids/')
def get_characters_character_id_contracts_contract_id_items(
*, character_id, contract_id, token, if_none_match=None):
"""
:param character_id: An EVE character ID
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Lists items of a particular contract
---
Alternate route: `/dev/characters/{character_id}/contracts/{contract_id}/items/`
Alternate route: `/legacy/characters/{character_id}/contracts/{contract_id}/items/`
Alternate route: `/v1/characters/{character_id}/contracts/{contract_id}/items/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
character_id=character_id,
contract_id=contract_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contracts/{contract_id}/items/')
def get_characters_character_id_corporationhistory(*,
character_id,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of all the corporations a character has been a member of
---
Alternate route: `/dev/characters/{character_id}/corporationhistory/`
Alternate route: `/legacy/characters/{character_id}/corporationhistory/`
Alternate route: `/v1/characters/{character_id}/corporationhistory/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/corporationhistory/')
def post_characters_character_id_cspa(*, character_id, characters, token):
"""
:param character_id: An EVE character ID
:param characters: The target characters to calculate the charge for
:param token: Access token to use if unable to set a header
Takes a source character ID in the url and a set of target character ID's in the body, returns a CSPA charge cost
---
Alternate route: `/dev/characters/{character_id}/cspa/`
Alternate route: `/v4/characters/{character_id}/cspa/`
"""
ESI_request.request(character_id=character_id,
characters=characters,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/cspa/')
def get_characters_character_id_fatigue(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a character's jump activation and fatigue information
---
Alternate route: `/dev/characters/{character_id}/fatigue/`
Alternate route: `/legacy/characters/{character_id}/fatigue/`
Alternate route: `/v1/characters/{character_id}/fatigue/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fatigue/')
def get_characters_character_id_fittings(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return fittings of a character
---
Alternate route: `/dev/characters/{character_id}/fittings/`
Alternate route: `/v2/characters/{character_id}/fittings/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fittings/')
def post_characters_character_id_fittings(*, character_id, fitting, token):
"""
:param character_id: An EVE character ID
:param fitting: Details about the new fitting
:param token: Access token to use if unable to set a header
Save a new fitting for a character
---
Alternate route: `/dev/characters/{character_id}/fittings/`
Alternate route: `/v2/characters/{character_id}/fittings/`
"""
ESI_request.request(character_id=character_id,
fitting=fitting,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/fittings/')
def delete_characters_character_id_fittings_fitting_id(*, character_id,
fitting_id, token):
"""
:param character_id: An EVE character ID
:param fitting_id: ID for a fitting of this character
:param token: Access token to use if unable to set a header
Delete a fitting from a character
---
Alternate route: `/dev/characters/{character_id}/fittings/{fitting_id}/`
Alternate route: `/legacy/characters/{character_id}/fittings/{fitting_id}/`
Alternate route: `/v1/characters/{character_id}/fittings/{fitting_id}/`
"""
ESI_request.request(
character_id=character_id,
fitting_id=fitting_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/fittings/{fitting_id}/')
def get_characters_character_id_fleet(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return the fleet ID the character is in, if any.
---
Alternate route: `/legacy/characters/{character_id}/fleet/`
Alternate route: `/v1/characters/{character_id}/fleet/`
---
This route is cached for up to 60 seconds
---
Warning: This route has an upgrade available
---
[Diff of the upcoming changes](https://esi.evetech.net/diff/latest/dev/#GET-/characters/{character_id}/fleet/)
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fleet/')
def get_characters_character_id_fw_stats(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Statistical overview of a character involved in faction warfare
---
Alternate route: `/dev/characters/{character_id}/fw/stats/`
Alternate route: `/legacy/characters/{character_id}/fw/stats/`
Alternate route: `/v1/characters/{character_id}/fw/stats/`
---
This route expires daily at 11:05
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fw/stats/')
def get_characters_character_id_implants(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return implants on the active clone of a character
---
Alternate route: `/dev/characters/{character_id}/implants/`
Alternate route: `/legacy/characters/{character_id}/implants/`
Alternate route: `/v1/characters/{character_id}/implants/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/implants/')
def get_characters_character_id_industry_jobs(*,
character_id,
include_completed,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param include_completed: Whether to retrieve completed character industry jobs. Only includes jobs from the past 90 days
:param token: Access token to use if unable to set a header
List industry jobs placed by a character
---
Alternate route: `/dev/characters/{character_id}/industry/jobs/`
Alternate route: `/legacy/characters/{character_id}/industry/jobs/`
Alternate route: `/v1/characters/{character_id}/industry/jobs/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
include_completed=include_completed,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/industry/jobs/')
def get_characters_character_id_killmails_recent(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of a character's kills and losses going back 90 days
---
Alternate route: `/dev/characters/{character_id}/killmails/recent/`
Alternate route: `/legacy/characters/{character_id}/killmails/recent/`
Alternate route: `/v1/characters/{character_id}/killmails/recent/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/killmails/recent/')
def get_characters_character_id_location(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Information about the characters current location. Returns the current solar system id, and also the current station or structure ID if applicable
---
Alternate route: `/legacy/characters/{character_id}/location/`
Alternate route: `/v1/characters/{character_id}/location/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/location/')
def get_characters_character_id_loyalty_points(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of loyalty points for all corporations the character has worked for
---
Alternate route: `/dev/characters/{character_id}/loyalty/points/`
Alternate route: `/legacy/characters/{character_id}/loyalty/points/`
Alternate route: `/v1/characters/{character_id}/loyalty/points/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/loyalty/points/')
def get_characters_character_id_mail(*,
character_id,
labels,
last_mail_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param labels: Fetch only mails that match one or more of the given labels
:param last_mail_id: List only mail with an ID lower than the given ID, if present
:param token: Access token to use if unable to set a header
Return the 50 most recent mail headers belonging to the character that match the query criteria. Queries can be filtered by label, and last_mail_id can be used to paginate backwards
---
Alternate route: `/dev/characters/{character_id}/mail/`
Alternate route: `/legacy/characters/{character_id}/mail/`
Alternate route: `/v1/characters/{character_id}/mail/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
labels=labels,
last_mail_id=last_mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/')
def post_characters_character_id_mail(*, character_id, mail, token):
"""
:param character_id: An EVE character ID
:param mail: The mail to send
:param token: Access token to use if unable to set a header
Create and send a new mail
---
Alternate route: `/dev/characters/{character_id}/mail/`
Alternate route: `/legacy/characters/{character_id}/mail/`
Alternate route: `/v1/characters/{character_id}/mail/`
"""
ESI_request.request(character_id=character_id,
mail=mail,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/mail/')
def get_characters_character_id_mail_labels(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of the users mail labels, unread counts for each label and a total unread count.
---
Alternate route: `/dev/characters/{character_id}/mail/labels/`
Alternate route: `/v3/characters/{character_id}/mail/labels/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/labels/')
def post_characters_character_id_mail_labels(*, character_id, label, token):
"""
:param character_id: An EVE character ID
:param label: Label to create
:param token: Access token to use if unable to set a header
Create a mail label
---
Alternate route: `/dev/characters/{character_id}/mail/labels/`
Alternate route: `/legacy/characters/{character_id}/mail/labels/`
Alternate route: `/v2/characters/{character_id}/mail/labels/`
"""
ESI_request.request(character_id=character_id,
label=label,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/mail/labels/')
def delete_characters_character_id_mail_labels_label_id(
*, character_id, label_id, token):
"""
:param character_id: An EVE character ID
:param label_id: An EVE label id
:param token: Access token to use if unable to set a header
Delete a mail label
---
Alternate route: `/dev/characters/{character_id}/mail/labels/{label_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/labels/{label_id}/`
Alternate route: `/v1/characters/{character_id}/mail/labels/{label_id}/`
"""
ESI_request.request(
character_id=character_id,
label_id=label_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/mail/labels/{label_id}/')
def get_characters_character_id_mail_lists(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return all mailing lists that the character is subscribed to
---
Alternate route: `/dev/characters/{character_id}/mail/lists/`
Alternate route: `/legacy/characters/{character_id}/mail/lists/`
Alternate route: `/v1/characters/{character_id}/mail/lists/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/lists/')
def delete_characters_character_id_mail_mail_id(*, character_id, mail_id,
token):
"""
:param character_id: An EVE character ID
:param mail_id: An EVE mail ID
:param token: Access token to use if unable to set a header
Delete a mail
---
Alternate route: `/dev/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/v1/characters/{character_id}/mail/{mail_id}/`
"""
ESI_request.request(character_id=character_id,
mail_id=mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/mail/{mail_id}/')
def get_characters_character_id_mail_mail_id(*,
character_id,
mail_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param mail_id: An EVE mail ID
:param token: Access token to use if unable to set a header
Return the contents of an EVE mail
---
Alternate route: `/dev/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/v1/characters/{character_id}/mail/{mail_id}/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
mail_id=mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/{mail_id}/')
def put_characters_character_id_mail_mail_id(*, character_id, contents,
mail_id, token):
"""
:param character_id: An EVE character ID
:param contents: Data used to update the mail
:param mail_id: An EVE mail ID
:param token: Access token to use if unable to set a header
Update metadata about a mail
---
Alternate route: `/dev/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/v1/characters/{character_id}/mail/{mail_id}/`
"""
ESI_request.request(character_id=character_id,
contents=contents,
mail_id=mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/characters/{character_id}/mail/{mail_id}/')
def get_characters_character_id_medals(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of medals the character has
---
Alternate route: `/dev/characters/{character_id}/medals/`
Alternate route: `/legacy/characters/{character_id}/medals/`
Alternate route: `/v1/characters/{character_id}/medals/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/medals/')
def get_characters_character_id_mining(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Paginated record of all mining done by a character for the past 30 days
---
Alternate route: `/dev/characters/{character_id}/mining/`
Alternate route: `/legacy/characters/{character_id}/mining/`
Alternate route: `/v1/characters/{character_id}/mining/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mining/')
def get_characters_character_id_notifications(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return character notifications
---
Alternate route: `/dev/characters/{character_id}/notifications/`
Alternate route: `/v5/characters/{character_id}/notifications/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/notifications/')
def get_characters_character_id_notifications_contacts(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return notifications about having been added to someone's contact list
---
Alternate route: `/dev/characters/{character_id}/notifications/contacts/`
Alternate route: `/legacy/characters/{character_id}/notifications/contacts/`
Alternate route: `/v1/characters/{character_id}/notifications/contacts/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(
character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/notifications/contacts/')
def get_characters_character_id_online(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Checks if the character is currently online
---
Alternate route: `/v2/characters/{character_id}/online/`
---
This route is cached for up to 60 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/online/')
def get_characters_character_id_opportunities(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of tasks finished by a character
---
Alternate route: `/dev/characters/{character_id}/opportunities/`
Alternate route: `/legacy/characters/{character_id}/opportunities/`
Alternate route: `/v1/characters/{character_id}/opportunities/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/opportunities/')
def get_characters_character_id_orders(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
List open market orders placed by a character
---
Alternate route: `/dev/characters/{character_id}/orders/`
Alternate route: `/v2/characters/{character_id}/orders/`
---
This route is cached for up to 1200 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/orders/')
def get_characters_character_id_orders_history(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List cancelled and expired market orders placed by a character up to 90 days in the past.
---
Alternate route: `/dev/characters/{character_id}/orders/history/`
Alternate route: `/legacy/characters/{character_id}/orders/history/`
Alternate route: `/v1/characters/{character_id}/orders/history/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/orders/history/')
def get_characters_character_id_planets(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a list of all planetary colonies owned by a character.
---
Alternate route: `/dev/characters/{character_id}/planets/`
Alternate route: `/legacy/characters/{character_id}/planets/`
Alternate route: `/v1/characters/{character_id}/planets/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/planets/')
def get_characters_character_id_planets_planet_id(*,
character_id,
planet_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param planet_id: Planet id of the target planet
:param token: Access token to use if unable to set a header
Returns full details on the layout of a single planetary colony, including links, pins and routes. Note: Planetary information is only recalculated when the colony is viewed through the client. Information will not update until this criteria is met.
---
Alternate route: `/dev/characters/{character_id}/planets/{planet_id}/`
Alternate route: `/v3/characters/{character_id}/planets/{planet_id}/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(
character_id=character_id,
if_none_match=if_none_match,
planet_id=planet_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/planets/{planet_id}/')
def get_characters_character_id_portrait(*, character_id, if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get portrait urls for a character
---
Alternate route: `/dev/characters/{character_id}/portrait/`
Alternate route: `/v2/characters/{character_id}/portrait/`
---
This route expires daily at 11:05
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/portrait/')
def get_characters_character_id_roles(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a character's corporation roles
---
Alternate route: `/dev/characters/{character_id}/roles/`
Alternate route: `/v2/characters/{character_id}/roles/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/roles/')
def get_characters_character_id_search(*,
categories,
character_id,
language,
search,
strict,
token,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param categories: Type of entities to search for
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param search: The string to search on
:param strict: Whether the search should be a strict match
:param token: Access token to use if unable to set a header
Search for entities that match a given sub-string.
---
Alternate route: `/dev/characters/{character_id}/search/`
Alternate route: `/legacy/characters/{character_id}/search/`
Alternate route: `/v3/characters/{character_id}/search/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(accept_language=accept_language,
categories=categories,
character_id=character_id,
if_none_match=if_none_match,
language=language,
search=search,
strict=strict,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/search/')
def get_characters_character_id_ship(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get the current ship type, name and id
---
Alternate route: `/legacy/characters/{character_id}/ship/`
Alternate route: `/v1/characters/{character_id}/ship/`
---
This route is cached for up to 5 seconds
---
[Diff of the upcoming changes](https://esi.evetech.net/diff/latest/dev/#GET-/characters/{character_id}/ship/)
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/ship/')
def get_characters_character_id_skillqueue(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
List the configured skill queue for the given character
---
Alternate route: `/dev/characters/{character_id}/skillqueue/`
Alternate route: `/legacy/characters/{character_id}/skillqueue/`
Alternate route: `/v2/characters/{character_id}/skillqueue/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/skillqueue/')
def get_characters_character_id_skills(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
List all trained skills for the given character
---
Alternate route: `/dev/characters/{character_id}/skills/`
Alternate route: `/v4/characters/{character_id}/skills/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/skills/')
def get_characters_character_id_standings(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return character standings from agents, NPC corporations, and factions
---
Alternate route: `/dev/characters/{character_id}/standings/`
Alternate route: `/legacy/characters/{character_id}/standings/`
Alternate route: `/v1/characters/{character_id}/standings/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/standings/')
def get_characters_character_id_stats(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns aggregate yearly stats for a character
---
Alternate route: `/dev/characters/{character_id}/stats/`
Alternate route: `/v2/characters/{character_id}/stats/`
---
This route is cached for up to 86400 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/stats/')
def get_characters_character_id_titles(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a character's titles
---
Alternate route: `/dev/characters/{character_id}/titles/`
Alternate route: `/legacy/characters/{character_id}/titles/`
Alternate route: `/v1/characters/{character_id}/titles/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/titles/')
def get_characters_character_id_wallet(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a character's wallet balance
---
Alternate route: `/dev/characters/{character_id}/wallet/`
Alternate route: `/legacy/characters/{character_id}/wallet/`
Alternate route: `/v1/characters/{character_id}/wallet/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/wallet/')
def get_characters_character_id_wallet_journal(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Retrieve the given character's wallet journal going 30 days back
---
Alternate route: `/dev/characters/{character_id}/wallet/journal/`
Alternate route: `/v6/characters/{character_id}/wallet/journal/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/wallet/journal/')
def get_characters_character_id_wallet_transactions(*,
character_id,
from_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param from_id: Only show transactions happened before the one referenced by this id
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get wallet transactions of a character
---
Alternate route: `/dev/characters/{character_id}/wallet/transactions/`
Alternate route: `/legacy/characters/{character_id}/wallet/transactions/`
Alternate route: `/v1/characters/{character_id}/wallet/transactions/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
character_id=character_id,
from_id=from_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/wallet/transactions/')
def get_contracts_public_bids_contract_id(*,
contract_id,
if_none_match=None,
page='1'):
"""
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Lists bids on a public auction contract
---
Alternate route: `/dev/contracts/public/bids/{contract_id}/`
Alternate route: `/legacy/contracts/public/bids/{contract_id}/`
Alternate route: `/v1/contracts/public/bids/{contract_id}/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(contract_id=contract_id,
if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/contracts/public/bids/{contract_id}/')
def get_contracts_public_items_contract_id(*,
contract_id,
if_none_match=None,
page='1'):
"""
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Lists items of a public contract
---
Alternate route: `/dev/contracts/public/items/{contract_id}/`
Alternate route: `/legacy/contracts/public/items/{contract_id}/`
Alternate route: `/v1/contracts/public/items/{contract_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(contract_id=contract_id,
if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/contracts/public/items/{contract_id}/')
def get_contracts_public_region_id(*, region_id, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param region_id: An EVE region id
Returns a paginated list of all public contracts in the given region
---
Alternate route: `/dev/contracts/public/{region_id}/`
Alternate route: `/legacy/contracts/public/{region_id}/`
Alternate route: `/v1/contracts/public/{region_id}/`
---
This route is cached for up to 1800 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
region_id=region_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/contracts/public/{region_id}/')
def get_corporation_corporation_id_mining_extractions(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Extraction timers for all moon chunks being extracted by refineries belonging to a corporation.
---
Alternate route: `/dev/corporation/{corporation_id}/mining/extractions/`
Alternate route: `/legacy/corporation/{corporation_id}/mining/extractions/`
Alternate route: `/v1/corporation/{corporation_id}/mining/extractions/`
---
This route is cached for up to 1800 seconds
---
Requires one of the following EVE corporation role(s): Station_Manager
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporation/{corporation_id}/mining/extractions/')
def get_corporation_corporation_id_mining_observers(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Paginated list of all entities capable of observing and recording mining for a corporation
---
Alternate route: `/dev/corporation/{corporation_id}/mining/observers/`
Alternate route: `/legacy/corporation/{corporation_id}/mining/observers/`
Alternate route: `/v1/corporation/{corporation_id}/mining/observers/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporation/{corporation_id}/mining/observers/')
def get_corporation_corporation_id_mining_observers_observer_id(
*, corporation_id, observer_id, token, if_none_match=None, page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param observer_id: A mining observer id
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Paginated record of all mining seen by an observer
---
Alternate route: `/dev/corporation/{corporation_id}/mining/observers/{observer_id}/`
Alternate route: `/legacy/corporation/{corporation_id}/mining/observers/{observer_id}/`
Alternate route: `/v1/corporation/{corporation_id}/mining/observers/{observer_id}/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
observer_id=observer_id,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporation/{corporation_id}/mining/observers/{observer_id}/')
def get_corporations_npccorps(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of npc corporations
---
Alternate route: `/dev/corporations/npccorps/`
Alternate route: `/legacy/corporations/npccorps/`
Alternate route: `/v1/corporations/npccorps/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/npccorps/')
def get_corporations_corporation_id(*, corporation_id, if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Public information about a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/`
Alternate route: `/v4/corporations/{corporation_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/')
def get_corporations_corporation_id_alliancehistory(*,
corporation_id,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of all the alliances a corporation has been a member of
---
Alternate route: `/dev/corporations/{corporation_id}/alliancehistory/`
Alternate route: `/v2/corporations/{corporation_id}/alliancehistory/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/alliancehistory/')
def get_corporations_corporation_id_assets(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of the corporation assets
---
Alternate route: `/dev/corporations/{corporation_id}/assets/`
Alternate route: `/v3/corporations/{corporation_id}/assets/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/assets/')
def post_corporations_corporation_id_assets_locations(*, corporation_id,
item_ids, token):
"""
:param corporation_id: An EVE corporation ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return locations for a set of item ids, which you can get from corporation assets endpoint. Coordinates for items in hangars or stations are set to (0,0,0)
---
Alternate route: `/dev/corporations/{corporation_id}/assets/locations/`
Alternate route: `/v2/corporations/{corporation_id}/assets/locations/`
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/corporations/{corporation_id}/assets/locations/')
def post_corporations_corporation_id_assets_names(*, corporation_id, item_ids,
token):
"""
:param corporation_id: An EVE corporation ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return names for a set of item ids, which you can get from corporation assets endpoint. Only valid for items that can customize names, like containers or ships
---
Alternate route: `/dev/corporations/{corporation_id}/assets/names/`
Alternate route: `/legacy/corporations/{corporation_id}/assets/names/`
Alternate route: `/v1/corporations/{corporation_id}/assets/names/`
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/corporations/{corporation_id}/assets/names/')
def get_corporations_corporation_id_blueprints(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns a list of blueprints the corporation owns
---
Alternate route: `/dev/corporations/{corporation_id}/blueprints/`
Alternate route: `/v2/corporations/{corporation_id}/blueprints/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/blueprints/')
def get_corporations_corporation_id_bookmarks(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your corporation's bookmarks
---
Alternate route: `/dev/corporations/{corporation_id}/bookmarks/`
Alternate route: `/legacy/corporations/{corporation_id}/bookmarks/`
Alternate route: `/v1/corporations/{corporation_id}/bookmarks/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/bookmarks/')
def get_corporations_corporation_id_bookmarks_folders(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your corporation's bookmark folders
---
Alternate route: `/dev/corporations/{corporation_id}/bookmarks/folders/`
Alternate route: `/legacy/corporations/{corporation_id}/bookmarks/folders/`
Alternate route: `/v1/corporations/{corporation_id}/bookmarks/folders/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/bookmarks/folders/')
def get_corporations_corporation_id_contacts(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return contacts of a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/contacts/`
Alternate route: `/v2/corporations/{corporation_id}/contacts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contacts/')
def get_corporations_corporation_id_contacts_labels(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return custom labels for a corporation's contacts
---
Alternate route: `/dev/corporations/{corporation_id}/contacts/labels/`
Alternate route: `/legacy/corporations/{corporation_id}/contacts/labels/`
Alternate route: `/v1/corporations/{corporation_id}/contacts/labels/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contacts/labels/')
def get_corporations_corporation_id_containers_logs(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation
---
Alternate route: `/dev/corporations/{corporation_id}/containers/logs/`
Alternate route: `/v2/corporations/{corporation_id}/containers/logs/`
---
This route is cached for up to 600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/containers/logs/')
def get_corporations_corporation_id_contracts(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns contracts available to a corporation, only if the corporation is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is "in_progress".
---
Alternate route: `/dev/corporations/{corporation_id}/contracts/`
Alternate route: `/legacy/corporations/{corporation_id}/contracts/`
Alternate route: `/v1/corporations/{corporation_id}/contracts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contracts/')
def get_corporations_corporation_id_contracts_contract_id_bids(
*, contract_id, corporation_id, token, if_none_match=None, page='1'):
"""
:param contract_id: ID of a contract
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Lists bids on a particular auction contract
---
Alternate route: `/dev/corporations/{corporation_id}/contracts/{contract_id}/bids/`
Alternate route: `/legacy/corporations/{corporation_id}/contracts/{contract_id}/bids/`
Alternate route: `/v1/corporations/{corporation_id}/contracts/{contract_id}/bids/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
contract_id=contract_id,
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contracts/{contract_id}/bids/')
def get_corporations_corporation_id_contracts_contract_id_items(
*, contract_id, corporation_id, token, if_none_match=None):
"""
:param contract_id: ID of a contract
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Lists items of a particular contract
---
Alternate route: `/dev/corporations/{corporation_id}/contracts/{contract_id}/items/`
Alternate route: `/legacy/corporations/{corporation_id}/contracts/{contract_id}/items/`
Alternate route: `/v1/corporations/{corporation_id}/contracts/{contract_id}/items/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
contract_id=contract_id,
corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contracts/{contract_id}/items/')
def get_corporations_corporation_id_customs_offices(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List customs offices owned by a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/customs_offices/`
Alternate route: `/legacy/corporations/{corporation_id}/customs_offices/`
Alternate route: `/v1/corporations/{corporation_id}/customs_offices/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/customs_offices/')
def get_corporations_corporation_id_divisions(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return corporation hangar and wallet division names, only show if a division is not using the default name
---
Alternate route: `/dev/corporations/{corporation_id}/divisions/`
Alternate route: `/legacy/corporations/{corporation_id}/divisions/`
Alternate route: `/v1/corporations/{corporation_id}/divisions/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/divisions/')
def get_corporations_corporation_id_facilities(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a corporation's facilities
---
Alternate route: `/dev/corporations/{corporation_id}/facilities/`
Alternate route: `/legacy/corporations/{corporation_id}/facilities/`
Alternate route: `/v1/corporations/{corporation_id}/facilities/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Factory_Manager
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/facilities/')
def get_corporations_corporation_id_fw_stats(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Statistics about a corporation involved in faction warfare
---
Alternate route: `/dev/corporations/{corporation_id}/fw/stats/`
Alternate route: `/legacy/corporations/{corporation_id}/fw/stats/`
Alternate route: `/v1/corporations/{corporation_id}/fw/stats/`
---
This route expires daily at 11:05
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/fw/stats/')
def get_corporations_corporation_id_icons(*,
corporation_id,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the icon urls for a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/icons/`
Alternate route: `/legacy/corporations/{corporation_id}/icons/`
Alternate route: `/v1/corporations/{corporation_id}/icons/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/icons/')
def get_corporations_corporation_id_industry_jobs(*,
corporation_id,
include_completed,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param include_completed: Whether to retrieve completed corporation industry jobs. Only includes jobs from the past 90 days
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List industry jobs run by a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/industry/jobs/`
Alternate route: `/legacy/corporations/{corporation_id}/industry/jobs/`
Alternate route: `/v1/corporations/{corporation_id}/industry/jobs/`
---
This route is cached for up to 300 seconds
---
Requires one of the following EVE corporation role(s): Factory_Manager
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
include_completed=include_completed,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/industry/jobs/')
def get_corporations_corporation_id_killmails_recent(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Get a list of a corporation's kills and losses going back 90 days
---
Alternate route: `/dev/corporations/{corporation_id}/killmails/recent/`
Alternate route: `/legacy/corporations/{corporation_id}/killmails/recent/`
Alternate route: `/v1/corporations/{corporation_id}/killmails/recent/`
---
This route is cached for up to 300 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/killmails/recent/')
def get_corporations_corporation_id_medals(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns a corporation's medals
---
Alternate route: `/dev/corporations/{corporation_id}/medals/`
Alternate route: `/legacy/corporations/{corporation_id}/medals/`
Alternate route: `/v1/corporations/{corporation_id}/medals/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/medals/')
def get_corporations_corporation_id_medals_issued(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns medals issued by a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/medals/issued/`
Alternate route: `/legacy/corporations/{corporation_id}/medals/issued/`
Alternate route: `/v1/corporations/{corporation_id}/medals/issued/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/medals/issued/')
def get_corporations_corporation_id_members(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return the current member list of a corporation, the token's character need to be a member of the corporation.
---
Alternate route: `/dev/corporations/{corporation_id}/members/`
Alternate route: `/v3/corporations/{corporation_id}/members/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/members/')
def get_corporations_corporation_id_members_limit(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a corporation's member limit, not including CEO himself
---
Alternate route: `/dev/corporations/{corporation_id}/members/limit/`
Alternate route: `/legacy/corporations/{corporation_id}/members/limit/`
Alternate route: `/v1/corporations/{corporation_id}/members/limit/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/members/limit/')
def get_corporations_corporation_id_members_titles(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a corporation's members' titles
---
Alternate route: `/dev/corporations/{corporation_id}/members/titles/`
Alternate route: `/legacy/corporations/{corporation_id}/members/titles/`
Alternate route: `/v1/corporations/{corporation_id}/members/titles/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/members/titles/')
def get_corporations_corporation_id_membertracking(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns additional information about a corporation's members which helps tracking their activities
---
Alternate route: `/dev/corporations/{corporation_id}/membertracking/`
Alternate route: `/legacy/corporations/{corporation_id}/membertracking/`
Alternate route: `/v1/corporations/{corporation_id}/membertracking/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/membertracking/')
def get_corporations_corporation_id_orders(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List open market orders placed on behalf of a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/orders/`
Alternate route: `/v3/corporations/{corporation_id}/orders/`
---
This route is cached for up to 1200 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Trader
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/orders/')
def get_corporations_corporation_id_orders_history(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List cancelled and expired market orders placed on behalf of a corporation up to 90 days in the past.
---
Alternate route: `/dev/corporations/{corporation_id}/orders/history/`
Alternate route: `/v2/corporations/{corporation_id}/orders/history/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Trader
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/orders/history/')
def get_corporations_corporation_id_roles(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return the roles of all members if the character has the personnel manager role or any grantable role.
---
Alternate route: `/dev/corporations/{corporation_id}/roles/`
Alternate route: `/legacy/corporations/{corporation_id}/roles/`
Alternate route: `/v1/corporations/{corporation_id}/roles/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/roles/')
def get_corporations_corporation_id_roles_history(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return how roles have changed for a coporation's members, up to a month
---
Alternate route: `/dev/corporations/{corporation_id}/roles/history/`
Alternate route: `/legacy/corporations/{corporation_id}/roles/history/`
Alternate route: `/v1/corporations/{corporation_id}/roles/history/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/roles/history/')
def get_corporations_corporation_id_shareholders(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return the current shareholders of a corporation.
---
Alternate route: `/dev/corporations/{corporation_id}/shareholders/`
Alternate route: `/legacy/corporations/{corporation_id}/shareholders/`
Alternate route: `/v1/corporations/{corporation_id}/shareholders/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/shareholders/')
def get_corporations_corporation_id_standings(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return corporation standings from agents, NPC corporations, and factions
---
Alternate route: `/dev/corporations/{corporation_id}/standings/`
Alternate route: `/legacy/corporations/{corporation_id}/standings/`
Alternate route: `/v1/corporations/{corporation_id}/standings/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/standings/')
def get_corporations_corporation_id_starbases(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns list of corporation starbases (POSes)
---
Alternate route: `/dev/corporations/{corporation_id}/starbases/`
Alternate route: `/legacy/corporations/{corporation_id}/starbases/`
Alternate route: `/v1/corporations/{corporation_id}/starbases/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/starbases/')
def get_corporations_corporation_id_starbases_starbase_id(
*, corporation_id, starbase_id, system_id, token, if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param starbase_id: An EVE starbase (POS) ID
:param system_id: The solar system this starbase (POS) is located in,
:param token: Access token to use if unable to set a header
Returns various settings and fuels of a starbase (POS)
---
Alternate route: `/dev/corporations/{corporation_id}/starbases/{starbase_id}/`
Alternate route: `/legacy/corporations/{corporation_id}/starbases/{starbase_id}/`
Alternate route: `/v1/corporations/{corporation_id}/starbases/{starbase_id}/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
starbase_id=starbase_id,
system_id=system_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/starbases/{starbase_id}/')
def get_corporations_corporation_id_structures(*,
corporation_id,
language,
token,
accept_language='en-us',
if_none_match=None,
page='1'):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Get a list of corporation structures. This route's version includes the changes to structures detailed in this blog: https://www.eveonline.com/article/upwell-2.0-structures-changes-coming-on-february-13th
---
Alternate route: `/dev/corporations/{corporation_id}/structures/`
Alternate route: `/v3/corporations/{corporation_id}/structures/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Station_Manager
"""
ESI_request.request(accept_language=accept_language,
corporation_id=corporation_id,
if_none_match=if_none_match,
language=language,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/structures/')
def get_corporations_corporation_id_titles(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a corporation's titles
---
Alternate route: `/dev/corporations/{corporation_id}/titles/`
Alternate route: `/legacy/corporations/{corporation_id}/titles/`
Alternate route: `/v1/corporations/{corporation_id}/titles/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/titles/')
def get_corporations_corporation_id_wallets(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get a corporation's wallets
---
Alternate route: `/dev/corporations/{corporation_id}/wallets/`
Alternate route: `/legacy/corporations/{corporation_id}/wallets/`
Alternate route: `/v1/corporations/{corporation_id}/wallets/`
---
This route is cached for up to 300 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Junior_Accountant
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/wallets/')
def get_corporations_corporation_id_wallets_division_journal(
*, corporation_id, division, token, if_none_match=None, page='1'):
"""
:param corporation_id: An EVE corporation ID
:param division: Wallet key of the division to fetch journals from
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Retrieve the given corporation's wallet journal for the given division going 30 days back
---
Alternate route: `/dev/corporations/{corporation_id}/wallets/{division}/journal/`
Alternate route: `/v4/corporations/{corporation_id}/wallets/{division}/journal/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Junior_Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
division=division,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/wallets/{division}/journal/')
def get_corporations_corporation_id_wallets_division_transactions(
*, corporation_id, division, from_id, token, if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param division: Wallet key of the division to fetch journals from
:param from_id: Only show journal entries happened before the transaction referenced by this id
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get wallet transactions of a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/wallets/{division}/transactions/`
Alternate route: `/legacy/corporations/{corporation_id}/wallets/{division}/transactions/`
Alternate route: `/v1/corporations/{corporation_id}/wallets/{division}/transactions/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Junior_Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
division=division,
from_id=from_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/wallets/{division}/transactions/'
)
def get_dogma_attributes(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of dogma attribute ids
---
Alternate route: `/dev/dogma/attributes/`
Alternate route: `/legacy/dogma/attributes/`
Alternate route: `/v1/dogma/attributes/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/attributes/')
def get_dogma_attributes_attribute_id(*, attribute_id, if_none_match=None):
"""
:param attribute_id: A dogma attribute ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on a dogma attribute
---
Alternate route: `/dev/dogma/attributes/{attribute_id}/`
Alternate route: `/legacy/dogma/attributes/{attribute_id}/`
Alternate route: `/v1/dogma/attributes/{attribute_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(attribute_id=attribute_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/attributes/{attribute_id}/')
def get_dogma_dynamic_items_type_id_item_id(*,
item_id,
type_id,
if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param item_id: item_id integer
:param type_id: type_id integer
Returns info about a dynamic item resulting from mutation with a mutaplasmid.
---
Alternate route: `/dev/dogma/dynamic/items/{type_id}/{item_id}/`
Alternate route: `/legacy/dogma/dynamic/items/{type_id}/{item_id}/`
Alternate route: `/v1/dogma/dynamic/items/{type_id}/{item_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
item_id=item_id,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/dynamic/items/{type_id}/{item_id}/')
def get_dogma_effects(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of dogma effect ids
---
Alternate route: `/dev/dogma/effects/`
Alternate route: `/legacy/dogma/effects/`
Alternate route: `/v1/dogma/effects/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/effects/')
def get_dogma_effects_effect_id(*, effect_id, if_none_match=None):
"""
:param effect_id: A dogma effect ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on a dogma effect
---
Alternate route: `/dev/dogma/effects/{effect_id}/`
Alternate route: `/v2/dogma/effects/{effect_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(effect_id=effect_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/effects/{effect_id}/')
def get_fleets_fleet_id(*, fleet_id, token, if_none_match=None):
"""
:param fleet_id: ID for a fleet
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return details about a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/`
Alternate route: `/v1/fleets/{fleet_id}/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(fleet_id=fleet_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fleets/{fleet_id}/')
def put_fleets_fleet_id(*, fleet_id, new_settings, token):
"""
:param fleet_id: ID for a fleet
:param new_settings: What to update for this fleet
:param token: Access token to use if unable to set a header
Update settings about a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/`
Alternate route: `/v1/fleets/{fleet_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
new_settings=new_settings,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/')
def get_fleets_fleet_id_members(*,
fleet_id,
language,
token,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param fleet_id: ID for a fleet
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param token: Access token to use if unable to set a header
Return information about fleet members
---
Alternate route: `/dev/fleets/{fleet_id}/members/`
Alternate route: `/legacy/fleets/{fleet_id}/members/`
Alternate route: `/v1/fleets/{fleet_id}/members/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(accept_language=accept_language,
fleet_id=fleet_id,
if_none_match=if_none_match,
language=language,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fleets/{fleet_id}/members/')
def post_fleets_fleet_id_members(*, fleet_id, invitation, token):
"""
:param fleet_id: ID for a fleet
:param invitation: Details of the invitation
:param token: Access token to use if unable to set a header
Invite a character into the fleet. If a character has a CSPA charge set it is not possible to invite them to the fleet using ESI
---
Alternate route: `/dev/fleets/{fleet_id}/members/`
Alternate route: `/legacy/fleets/{fleet_id}/members/`
Alternate route: `/v1/fleets/{fleet_id}/members/`
"""
ESI_request.request(fleet_id=fleet_id,
invitation=invitation,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/fleets/{fleet_id}/members/')
def delete_fleets_fleet_id_members_member_id(*, fleet_id, member_id, token):
"""
:param fleet_id: ID for a fleet
:param member_id: The character ID of a member in this fleet
:param token: Access token to use if unable to set a header
Kick a fleet member
---
Alternate route: `/dev/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/v1/fleets/{fleet_id}/members/{member_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
member_id=member_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/fleets/{fleet_id}/members/{member_id}/')
def put_fleets_fleet_id_members_member_id(*, fleet_id, member_id, movement,
token):
"""
:param fleet_id: ID for a fleet
:param member_id: The character ID of a member in this fleet
:param movement: Details of the invitation
:param token: Access token to use if unable to set a header
Move a fleet member around
---
Alternate route: `/dev/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/v1/fleets/{fleet_id}/members/{member_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
member_id=member_id,
movement=movement,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/members/{member_id}/')
def delete_fleets_fleet_id_squads_squad_id(*, fleet_id, squad_id, token):
"""
:param fleet_id: ID for a fleet
:param squad_id: The squad to delete
:param token: Access token to use if unable to set a header
Delete a fleet squad, only empty squads can be deleted
---
Alternate route: `/dev/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/v1/fleets/{fleet_id}/squads/{squad_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
squad_id=squad_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/fleets/{fleet_id}/squads/{squad_id}/')
def put_fleets_fleet_id_squads_squad_id(*, fleet_id, naming, squad_id, token):
"""
:param fleet_id: ID for a fleet
:param naming: New name of the squad
:param squad_id: The squad to rename
:param token: Access token to use if unable to set a header
Rename a fleet squad
---
Alternate route: `/dev/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/v1/fleets/{fleet_id}/squads/{squad_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
naming=naming,
squad_id=squad_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/squads/{squad_id}/')
def get_fleets_fleet_id_wings(*,
fleet_id,
language,
token,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param fleet_id: ID for a fleet
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param token: Access token to use if unable to set a header
Return information about wings in a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/wings/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/`
Alternate route: `/v1/fleets/{fleet_id}/wings/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(accept_language=accept_language,
fleet_id=fleet_id,
if_none_match=if_none_match,
language=language,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fleets/{fleet_id}/wings/')
def post_fleets_fleet_id_wings(*, fleet_id, token):
"""
:param fleet_id: ID for a fleet
:param token: Access token to use if unable to set a header
Create a new wing in a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/wings/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/`
Alternate route: `/v1/fleets/{fleet_id}/wings/`
"""
ESI_request.request(fleet_id=fleet_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/fleets/{fleet_id}/wings/')
def delete_fleets_fleet_id_wings_wing_id(*, fleet_id, token, wing_id):
"""
:param fleet_id: ID for a fleet
:param token: Access token to use if unable to set a header
:param wing_id: The wing to delete
Delete a fleet wing, only empty wings can be deleted. The wing may contain squads, but the squads must be empty
---
Alternate route: `/dev/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/v1/fleets/{fleet_id}/wings/{wing_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
token=token,
wing_id=wing_id,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/fleets/{fleet_id}/wings/{wing_id}/')
def put_fleets_fleet_id_wings_wing_id(*, fleet_id, naming, token, wing_id):
"""
:param fleet_id: ID for a fleet
:param naming: New name of the wing
:param token: Access token to use if unable to set a header
:param wing_id: The wing to rename
Rename a fleet wing
---
Alternate route: `/dev/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/v1/fleets/{fleet_id}/wings/{wing_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
naming=naming,
token=token,
wing_id=wing_id,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/wings/{wing_id}/')
def post_fleets_fleet_id_wings_wing_id_squads(*, fleet_id, token, wing_id):
"""
:param fleet_id: ID for a fleet
:param token: Access token to use if unable to set a header
:param wing_id: The wing_id to create squad in
Create a new squad in a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/wings/{wing_id}/squads/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/{wing_id}/squads/`
Alternate route: `/v1/fleets/{fleet_id}/wings/{wing_id}/squads/`
"""
ESI_request.request(fleet_id=fleet_id,
token=token,
wing_id=wing_id,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/fleets/{fleet_id}/wings/{wing_id}/squads/')
def get_fw_leaderboards(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Top 4 leaderboard of factions for kills and victory points separated by total, last week and yesterday
---
Alternate route: `/dev/fw/leaderboards/`
Alternate route: `/legacy/fw/leaderboards/`
Alternate route: `/v1/fw/leaderboards/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/leaderboards/')
def get_fw_leaderboards_characters(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Top 100 leaderboard of pilots for kills and victory points separated by total, last week and yesterday
---
Alternate route: `/dev/fw/leaderboards/characters/`
Alternate route: `/legacy/fw/leaderboards/characters/`
Alternate route: `/v1/fw/leaderboards/characters/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/leaderboards/characters/')
def get_fw_leaderboards_corporations(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Top 10 leaderboard of corporations for kills and victory points separated by total, last week and yesterday
---
Alternate route: `/dev/fw/leaderboards/corporations/`
Alternate route: `/legacy/fw/leaderboards/corporations/`
Alternate route: `/v1/fw/leaderboards/corporations/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/leaderboards/corporations/')
def get_fw_stats(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Statistical overviews of factions involved in faction warfare
---
Alternate route: `/dev/fw/stats/`
Alternate route: `/legacy/fw/stats/`
Alternate route: `/v1/fw/stats/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/stats/')
def get_fw_systems(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
An overview of the current ownership of faction warfare solar systems
---
Alternate route: `/dev/fw/systems/`
Alternate route: `/v2/fw/systems/`
---
This route is cached for up to 1800 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/systems/')
def get_fw_wars(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Data about which NPC factions are at war
---
Alternate route: `/dev/fw/wars/`
Alternate route: `/legacy/fw/wars/`
Alternate route: `/v1/fw/wars/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/wars/')
def get_incursions(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of current incursions
---
Alternate route: `/dev/incursions/`
Alternate route: `/legacy/incursions/`
Alternate route: `/v1/incursions/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/incursions/')
def get_industry_facilities(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of industry facilities
---
Alternate route: `/dev/industry/facilities/`
Alternate route: `/legacy/industry/facilities/`
Alternate route: `/v1/industry/facilities/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/industry/facilities/')
def get_industry_systems(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return cost indices for solar systems
---
Alternate route: `/dev/industry/systems/`
Alternate route: `/legacy/industry/systems/`
Alternate route: `/v1/industry/systems/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/industry/systems/')
def get_insurance_prices(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Return available insurance levels for all ship types
---
Alternate route: `/dev/insurance/prices/`
Alternate route: `/legacy/insurance/prices/`
Alternate route: `/v1/insurance/prices/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/insurance/prices/')
def get_killmails_killmail_id_killmail_hash(*,
killmail_hash,
killmail_id,
if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param killmail_hash: The killmail hash for verification
:param killmail_id: The killmail ID to be queried
Return a single killmail from its ID and hash
---
Alternate route: `/dev/killmails/{killmail_id}/{killmail_hash}/`
Alternate route: `/legacy/killmails/{killmail_id}/{killmail_hash}/`
Alternate route: `/v1/killmails/{killmail_id}/{killmail_hash}/`
---
This route is cached for up to 1209600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
killmail_hash=killmail_hash,
killmail_id=killmail_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/killmails/{killmail_id}/{killmail_hash}/')
def get_loyalty_stores_corporation_id_offers(*,
corporation_id,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of offers from a specific corporation's loyalty store
---
Alternate route: `/dev/loyalty/stores/{corporation_id}/offers/`
Alternate route: `/legacy/loyalty/stores/{corporation_id}/offers/`
Alternate route: `/v1/loyalty/stores/{corporation_id}/offers/`
---
This route expires daily at 11:05
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/loyalty/stores/{corporation_id}/offers/')
def get_markets_groups(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of item groups
---
Alternate route: `/dev/markets/groups/`
Alternate route: `/legacy/markets/groups/`
Alternate route: `/v1/markets/groups/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/groups/')
def get_markets_groups_market_group_id(*,
language,
market_group_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param market_group_id: An Eve item group ID
Get information on an item group
---
Alternate route: `/dev/markets/groups/{market_group_id}/`
Alternate route: `/legacy/markets/groups/{market_group_id}/`
Alternate route: `/v1/markets/groups/{market_group_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
market_group_id=market_group_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/groups/{market_group_id}/')
def get_markets_prices(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of prices
---
Alternate route: `/dev/markets/prices/`
Alternate route: `/legacy/markets/prices/`
Alternate route: `/v1/markets/prices/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/prices/')
def get_markets_structures_structure_id(*,
structure_id,
token,
if_none_match=None,
page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param structure_id: Return orders in this structure
:param token: Access token to use if unable to set a header
Return all orders in a structure
---
Alternate route: `/dev/markets/structures/{structure_id}/`
Alternate route: `/legacy/markets/structures/{structure_id}/`
Alternate route: `/v1/markets/structures/{structure_id}/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
structure_id=structure_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/structures/{structure_id}/')
def get_markets_region_id_history(*, region_id, type_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param region_id: Return statistics in this region
:param type_id: Return statistics for this type
Return a list of historical market statistics for the specified type in a region
---
Alternate route: `/dev/markets/{region_id}/history/`
Alternate route: `/legacy/markets/{region_id}/history/`
Alternate route: `/v1/markets/{region_id}/history/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
region_id=region_id,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/{region_id}/history/')
def get_markets_region_id_orders(*,
order_type,
region_id,
type_id,
if_none_match=None,
page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param order_type: ['buy', 'sell', 'all'] Filter buy/sell orders, return all orders by default. If you query without type_id, we always return both buy and sell orders
:param page: Which page of results to return
:param region_id: Return orders in this region
:param type_id: Return orders only for this type
Return a list of orders in a region
---
Alternate route: `/dev/markets/{region_id}/orders/`
Alternate route: `/legacy/markets/{region_id}/orders/`
Alternate route: `/v1/markets/{region_id}/orders/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(if_none_match=if_none_match,
order_type=order_type,
page=page,
region_id=region_id,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/{region_id}/orders/')
def get_markets_region_id_types(*, region_id, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param region_id: Return statistics in this region
Return a list of type IDs that have active orders in the region, for efficient market indexing.
---
Alternate route: `/dev/markets/{region_id}/types/`
Alternate route: `/legacy/markets/{region_id}/types/`
Alternate route: `/v1/markets/{region_id}/types/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
region_id=region_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/{region_id}/types/')
def get_opportunities_groups(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of opportunities groups
---
Alternate route: `/dev/opportunities/groups/`
Alternate route: `/legacy/opportunities/groups/`
Alternate route: `/v1/opportunities/groups/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/groups/')
def get_opportunities_groups_group_id(*,
group_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param group_id: ID of an opportunities group
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Return information of an opportunities group
---
Alternate route: `/dev/opportunities/groups/{group_id}/`
Alternate route: `/legacy/opportunities/groups/{group_id}/`
Alternate route: `/v1/opportunities/groups/{group_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
group_id=group_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/groups/{group_id}/')
def get_opportunities_tasks(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of opportunities tasks
---
Alternate route: `/dev/opportunities/tasks/`
Alternate route: `/legacy/opportunities/tasks/`
Alternate route: `/v1/opportunities/tasks/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/tasks/')
def get_opportunities_tasks_task_id(*, task_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param task_id: ID of an opportunities task
Return information of an opportunities task
---
Alternate route: `/dev/opportunities/tasks/{task_id}/`
Alternate route: `/legacy/opportunities/tasks/{task_id}/`
Alternate route: `/v1/opportunities/tasks/{task_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
task_id=task_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/tasks/{task_id}/')
def get_route_origin_destination(*,
avoid,
connections,
destination,
flag,
origin,
if_none_match=None):
"""
:param avoid: avoid solar system ID(s)
:param connections: connected solar system pairs
:param destination: destination solar system ID
:param flag: ['shortest', 'secure', 'insecure'] route security preference
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param origin: origin solar system ID
Get the systems between origin and destination
---
Alternate route: `/dev/route/{origin}/{destination}/`
Alternate route: `/legacy/route/{origin}/{destination}/`
Alternate route: `/v1/route/{origin}/{destination}/`
---
This route is cached for up to 86400 seconds
"""
ESI_request.request(avoid=avoid,
connections=connections,
destination=destination,
flag=flag,
if_none_match=if_none_match,
origin=origin,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/route/{origin}/{destination}/')
def get_search(*,
categories,
language,
search,
strict,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param categories: Type of entities to search for
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param search: The string to search on
:param strict: Whether the search should be a strict match
Search for entities that match a given sub-string.
---
Alternate route: `/dev/search/`
Alternate route: `/legacy/search/`
Alternate route: `/v2/search/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(accept_language=accept_language,
categories=categories,
if_none_match=if_none_match,
language=language,
search=search,
strict=strict,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/search/')
def get_sovereignty_campaigns(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Shows sovereignty data for campaigns.
---
Alternate route: `/dev/sovereignty/campaigns/`
Alternate route: `/legacy/sovereignty/campaigns/`
Alternate route: `/v1/sovereignty/campaigns/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/sovereignty/campaigns/')
def get_sovereignty_map(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Shows sovereignty information for solar systems
---
Alternate route: `/dev/sovereignty/map/`
Alternate route: `/legacy/sovereignty/map/`
Alternate route: `/v1/sovereignty/map/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/sovereignty/map/')
def get_sovereignty_structures(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Shows sovereignty data for structures.
---
Alternate route: `/dev/sovereignty/structures/`
Alternate route: `/legacy/sovereignty/structures/`
Alternate route: `/v1/sovereignty/structures/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/sovereignty/structures/')
def get_status(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
EVE Server status
---
Alternate route: `/dev/status/`
Alternate route: `/legacy/status/`
Alternate route: `/v1/status/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/status/')
def post_ui_autopilot_waypoint(*, add_to_beginning, clear_other_waypoints,
destination_id, token):
"""
:param add_to_beginning: Whether this solar system should be added to the beginning of all waypoints
:param clear_other_waypoints: Whether clean other waypoints beforing adding this one
:param destination_id: The destination to travel to, can be solar system, station or structure's id
:param token: Access token to use if unable to set a header
Set a solar system as autopilot waypoint
---
Alternate route: `/dev/ui/autopilot/waypoint/`
Alternate route: `/legacy/ui/autopilot/waypoint/`
Alternate route: `/v2/ui/autopilot/waypoint/`
"""
ESI_request.request(add_to_beginning=add_to_beginning,
clear_other_waypoints=clear_other_waypoints,
destination_id=destination_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/autopilot/waypoint/')
def post_ui_openwindow_contract(*, contract_id, token):
"""
:param contract_id: The contract to open
:param token: Access token to use if unable to set a header
Open the contract window inside the client
---
Alternate route: `/dev/ui/openwindow/contract/`
Alternate route: `/legacy/ui/openwindow/contract/`
Alternate route: `/v1/ui/openwindow/contract/`
"""
ESI_request.request(contract_id=contract_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/contract/')
def post_ui_openwindow_information(*, target_id, token):
"""
:param target_id: The target to open
:param token: Access token to use if unable to set a header
Open the information window for a character, corporation or alliance inside the client
---
Alternate route: `/dev/ui/openwindow/information/`
Alternate route: `/legacy/ui/openwindow/information/`
Alternate route: `/v1/ui/openwindow/information/`
"""
ESI_request.request(target_id=target_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/information/')
def post_ui_openwindow_marketdetails(*, token, type_id):
"""
:param token: Access token to use if unable to set a header
:param type_id: The item type to open in market window
Open the market details window for a specific typeID inside the client
---
Alternate route: `/dev/ui/openwindow/marketdetails/`
Alternate route: `/legacy/ui/openwindow/marketdetails/`
Alternate route: `/v1/ui/openwindow/marketdetails/`
"""
ESI_request.request(token=token,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/marketdetails/')
def post_ui_openwindow_newmail(*, new_mail, token):
"""
:param new_mail: The details of mail to create
:param token: Access token to use if unable to set a header
Open the New Mail window, according to settings from the request if applicable
---
Alternate route: `/dev/ui/openwindow/newmail/`
Alternate route: `/legacy/ui/openwindow/newmail/`
Alternate route: `/v1/ui/openwindow/newmail/`
"""
ESI_request.request(new_mail=new_mail,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/newmail/')
def get_universe_ancestries(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get all character ancestries
---
Alternate route: `/dev/universe/ancestries/`
Alternate route: `/legacy/universe/ancestries/`
Alternate route: `/v1/universe/ancestries/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/ancestries/')
def get_universe_asteroid_belts_asteroid_belt_id(*,
asteroid_belt_id,
if_none_match=None):
"""
:param asteroid_belt_id: asteroid_belt_id integer
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on an asteroid belt
---
Alternate route: `/dev/universe/asteroid_belts/{asteroid_belt_id}/`
Alternate route: `/legacy/universe/asteroid_belts/{asteroid_belt_id}/`
Alternate route: `/v1/universe/asteroid_belts/{asteroid_belt_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(asteroid_belt_id=asteroid_belt_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/asteroid_belts/{asteroid_belt_id}/')
def get_universe_bloodlines(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get a list of bloodlines
---
Alternate route: `/dev/universe/bloodlines/`
Alternate route: `/legacy/universe/bloodlines/`
Alternate route: `/v1/universe/bloodlines/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/bloodlines/')
def get_universe_categories(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of item categories
---
Alternate route: `/dev/universe/categories/`
Alternate route: `/legacy/universe/categories/`
Alternate route: `/v1/universe/categories/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/categories/')
def get_universe_categories_category_id(*,
category_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param category_id: An Eve item category ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get information of an item category
---
Alternate route: `/dev/universe/categories/{category_id}/`
Alternate route: `/legacy/universe/categories/{category_id}/`
Alternate route: `/v1/universe/categories/{category_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
category_id=category_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/categories/{category_id}/')
def get_universe_constellations(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of constellations
---
Alternate route: `/dev/universe/constellations/`
Alternate route: `/legacy/universe/constellations/`
Alternate route: `/v1/universe/constellations/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/constellations/')
def get_universe_constellations_constellation_id(*,
constellation_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param constellation_id: constellation_id integer
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get information on a constellation
---
Alternate route: `/dev/universe/constellations/{constellation_id}/`
Alternate route: `/legacy/universe/constellations/{constellation_id}/`
Alternate route: `/v1/universe/constellations/{constellation_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
constellation_id=constellation_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/constellations/{constellation_id}/')
def get_universe_factions(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get a list of factions
---
Alternate route: `/dev/universe/factions/`
Alternate route: `/v2/universe/factions/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/factions/')
def get_universe_graphics(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of graphics
---
Alternate route: `/dev/universe/graphics/`
Alternate route: `/legacy/universe/graphics/`
Alternate route: `/v1/universe/graphics/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/graphics/')
def get_universe_graphics_graphic_id(*, graphic_id, if_none_match=None):
"""
:param graphic_id: graphic_id integer
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on a graphic
---
Alternate route: `/dev/universe/graphics/{graphic_id}/`
Alternate route: `/legacy/universe/graphics/{graphic_id}/`
Alternate route: `/v1/universe/graphics/{graphic_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(graphic_id=graphic_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/graphics/{graphic_id}/')
def get_universe_groups(*, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Get a list of item groups
---
Alternate route: `/dev/universe/groups/`
Alternate route: `/legacy/universe/groups/`
Alternate route: `/v1/universe/groups/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/groups/')
def get_universe_groups_group_id(*,
group_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param group_id: An Eve item group ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get information on an item group
---
Alternate route: `/dev/universe/groups/{group_id}/`
Alternate route: `/legacy/universe/groups/{group_id}/`
Alternate route: `/v1/universe/groups/{group_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
group_id=group_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/groups/{group_id}/')
def post_universe_ids(*, language, names, accept_language='en-us'):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param names: The names to resolve
Resolve a set of names to IDs in the following categories: agents, alliances, characters, constellations, corporations factions, inventory_types, regions, stations, and systems. Only exact matches will be returned. All names searched for are cached for 12 hours
---
Alternate route: `/dev/universe/ids/`
Alternate route: `/legacy/universe/ids/`
Alternate route: `/v1/universe/ids/`
"""
ESI_request.request(accept_language=accept_language,
language=language,
names=names,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/universe/ids/')
def get_universe_moons_moon_id(*, moon_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param moon_id: moon_id integer
Get information on a moon
---
Alternate route: `/dev/universe/moons/{moon_id}/`
Alternate route: `/legacy/universe/moons/{moon_id}/`
Alternate route: `/v1/universe/moons/{moon_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
moon_id=moon_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/moons/{moon_id}/')
def post_universe_names(*, ids):
"""
:param ids: The ids to resolve
Resolve a set of IDs to names and categories. Supported ID's for resolving are: Characters, Corporations, Alliances, Stations, Solar Systems, Constellations, Regions, Types, Factions
---
Alternate route: `/dev/universe/names/`
Alternate route: `/v3/universe/names/`
"""
ESI_request.request(ids=ids,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/universe/names/')
def get_universe_planets_planet_id(*, planet_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param planet_id: planet_id integer
Get information on a planet
---
Alternate route: `/dev/universe/planets/{planet_id}/`
Alternate route: `/legacy/universe/planets/{planet_id}/`
Alternate route: `/v1/universe/planets/{planet_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
planet_id=planet_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/planets/{planet_id}/')
def get_universe_races(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get a list of character races
---
Alternate route: `/dev/universe/races/`
Alternate route: `/legacy/universe/races/`
Alternate route: `/v1/universe/races/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/races/')
def get_universe_regions(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of regions
---
Alternate route: `/dev/universe/regions/`
Alternate route: `/legacy/universe/regions/`
Alternate route: `/v1/universe/regions/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/regions/')
def get_universe_regions_region_id(*,
language,
region_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param region_id: region_id integer
Get information on a region
---
Alternate route: `/dev/universe/regions/{region_id}/`
Alternate route: `/legacy/universe/regions/{region_id}/`
Alternate route: `/v1/universe/regions/{region_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
region_id=region_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/regions/{region_id}/')
def get_universe_schematics_schematic_id(*, schematic_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param schematic_id: A PI schematic ID
Get information on a planetary factory schematic
---
Alternate route: `/dev/universe/schematics/{schematic_id}/`
Alternate route: `/legacy/universe/schematics/{schematic_id}/`
Alternate route: `/v1/universe/schematics/{schematic_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
schematic_id=schematic_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/schematics/{schematic_id}/')
def get_universe_stargates_stargate_id(*, stargate_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param stargate_id: stargate_id integer
Get information on a stargate
---
Alternate route: `/dev/universe/stargates/{stargate_id}/`
Alternate route: `/legacy/universe/stargates/{stargate_id}/`
Alternate route: `/v1/universe/stargates/{stargate_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
stargate_id=stargate_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/stargates/{stargate_id}/')
def get_universe_stars_star_id(*, star_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param star_id: star_id integer
Get information on a star
---
Alternate route: `/dev/universe/stars/{star_id}/`
Alternate route: `/legacy/universe/stars/{star_id}/`
Alternate route: `/v1/universe/stars/{star_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
star_id=star_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/stars/{star_id}/')
def get_universe_stations_station_id(*, station_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param station_id: station_id integer
Get information on a station
---
Alternate route: `/dev/universe/stations/{station_id}/`
Alternate route: `/v2/universe/stations/{station_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
station_id=station_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/stations/{station_id}/')
def get_universe_structures(*, filter, if_none_match=None):
"""
:param filter: ['market', 'manufacturing_basic'] Only list public structures that have this service online
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
List all public structures
---
Alternate route: `/dev/universe/structures/`
Alternate route: `/legacy/universe/structures/`
Alternate route: `/v1/universe/structures/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(filter=filter,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/structures/')
def get_universe_structures_structure_id(*,
structure_id,
token,
if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param structure_id: An Eve structure ID
:param token: Access token to use if unable to set a header
Returns information on requested structure if you are on the ACL. Otherwise, returns "Forbidden" for all inputs.
---
Alternate route: `/dev/universe/structures/{structure_id}/`
Alternate route: `/v2/universe/structures/{structure_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
structure_id=structure_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/structures/{structure_id}/')
def get_universe_system_jumps(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the number of jumps in solar systems within the last hour ending at the timestamp of the Last-Modified header, excluding wormhole space. Only systems with jumps will be listed
---
Alternate route: `/dev/universe/system_jumps/`
Alternate route: `/legacy/universe/system_jumps/`
Alternate route: `/v1/universe/system_jumps/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/system_jumps/')
def get_universe_system_kills(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the number of ship, pod and NPC kills per solar system within the last hour ending at the timestamp of the Last-Modified header, excluding wormhole space. Only systems with kills will be listed
---
Alternate route: `/dev/universe/system_kills/`
Alternate route: `/v2/universe/system_kills/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/system_kills/')
def get_universe_systems(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of solar systems
---
Alternate route: `/dev/universe/systems/`
Alternate route: `/legacy/universe/systems/`
Alternate route: `/v1/universe/systems/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/systems/')
def get_universe_systems_system_id(*,
language,
system_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param system_id: system_id integer
Get information on a solar system.
---
Alternate route: `/dev/universe/systems/{system_id}/`
Alternate route: `/v4/universe/systems/{system_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
system_id=system_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/systems/{system_id}/')
def get_universe_types(*, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Get a list of type ids
---
Alternate route: `/dev/universe/types/`
Alternate route: `/legacy/universe/types/`
Alternate route: `/v1/universe/types/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/types/')
def get_universe_types_type_id(*,
language,
type_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param type_id: An Eve item type ID
Get information on a type
---
Alternate route: `/dev/universe/types/{type_id}/`
Alternate route: `/v3/universe/types/{type_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/types/{type_id}/')
def get_wars(*, max_war_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param max_war_id: Only return wars with ID smaller than this
Return a list of wars
---
Alternate route: `/dev/wars/`
Alternate route: `/legacy/wars/`
Alternate route: `/v1/wars/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
max_war_id=max_war_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/wars/')
def get_wars_war_id(*, war_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param war_id: ID for a war
Return details about a war
---
Alternate route: `/dev/wars/{war_id}/`
Alternate route: `/legacy/wars/{war_id}/`
Alternate route: `/v1/wars/{war_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
war_id=war_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/wars/{war_id}/')
def get_wars_war_id_killmails(*, war_id, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param war_id: A valid war ID
Return a list of kills related to a war
---
Alternate route: `/dev/wars/{war_id}/killmails/`
Alternate route: `/legacy/wars/{war_id}/killmails/`
Alternate route: `/v1/wars/{war_id}/killmails/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
war_id=war_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/wars/{war_id}/killmails/')
| 43.060084
| 265
| 0.579654
|
# Python EVE Swagger Interface
# https://github.com/nicoscha/PESI
# ESI version 0.8.9
import ESI_request
def get_alliances(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
List all active player alliances
---
Alternate route: `/dev/alliances/`
Alternate route: `/legacy/alliances/`
Alternate route: `/v1/alliances/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/')
def get_alliances_alliance_id(*, alliance_id, if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Public information about an alliance
---
Alternate route: `/dev/alliances/{alliance_id}/`
Alternate route: `/v3/alliances/{alliance_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/')
def get_alliances_alliance_id_contacts(*,
alliance_id,
token,
if_none_match=None,
page='1'):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return contacts of an alliance
---
Alternate route: `/dev/alliances/{alliance_id}/contacts/`
Alternate route: `/v2/alliances/{alliance_id}/contacts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/contacts/')
def get_alliances_alliance_id_contacts_labels(*,
alliance_id,
token,
if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return custom labels for an alliance's contacts
---
Alternate route: `/dev/alliances/{alliance_id}/contacts/labels/`
Alternate route: `/legacy/alliances/{alliance_id}/contacts/labels/`
Alternate route: `/v1/alliances/{alliance_id}/contacts/labels/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/contacts/labels/')
def get_alliances_alliance_id_corporations(*, alliance_id, if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
List all current member corporations of an alliance
---
Alternate route: `/dev/alliances/{alliance_id}/corporations/`
Alternate route: `/legacy/alliances/{alliance_id}/corporations/`
Alternate route: `/v1/alliances/{alliance_id}/corporations/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/corporations/')
def get_alliances_alliance_id_icons(*, alliance_id, if_none_match=None):
"""
:param alliance_id: An EVE alliance ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the icon urls for a alliance
---
Alternate route: `/dev/alliances/{alliance_id}/icons/`
Alternate route: `/legacy/alliances/{alliance_id}/icons/`
Alternate route: `/v1/alliances/{alliance_id}/icons/`
---
This route expires daily at 11:05
"""
ESI_request.request(alliance_id=alliance_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/alliances/{alliance_id}/icons/')
def post_characters_affiliation(*, characters):
"""
:param characters: The character IDs to fetch affiliations for. All characters must exist, or none will be returned
Bulk lookup of character IDs to corporation, alliance and faction
---
Alternate route: `/dev/characters/affiliation/`
Alternate route: `/legacy/characters/affiliation/`
Alternate route: `/v1/characters/affiliation/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(characters=characters,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/affiliation/')
def get_characters_character_id(*, character_id, if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Public information about a character
---
Alternate route: `/dev/characters/{character_id}/`
Alternate route: `/v4/characters/{character_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/')
def get_characters_character_id_agents_research(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of agents research information for a character. The formula for finding the current research points with an agent is: currentPoints = remainderPoints + pointsPerDay * days(currentTime - researchStartDate)
---
Alternate route: `/dev/characters/{character_id}/agents_research/`
Alternate route: `/legacy/characters/{character_id}/agents_research/`
Alternate route: `/v1/characters/{character_id}/agents_research/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/agents_research/')
def get_characters_character_id_assets(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of the characters assets
---
Alternate route: `/dev/characters/{character_id}/assets/`
Alternate route: `/v3/characters/{character_id}/assets/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/assets/')
def post_characters_character_id_assets_locations(*, character_id, item_ids,
token):
"""
:param character_id: An EVE character ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return locations for a set of item ids, which you can get from character assets endpoint. Coordinates for items in hangars or stations are set to (0,0,0)
---
Alternate route: `/dev/characters/{character_id}/assets/locations/`
Alternate route: `/v2/characters/{character_id}/assets/locations/`
"""
ESI_request.request(character_id=character_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/assets/locations/')
def post_characters_character_id_assets_names(*, character_id, item_ids,
token):
"""
:param character_id: An EVE character ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return names for a set of item ids, which you can get from character assets endpoint. Typically used for items that can customize names, like containers or ships.
---
Alternate route: `/dev/characters/{character_id}/assets/names/`
Alternate route: `/legacy/characters/{character_id}/assets/names/`
Alternate route: `/v1/characters/{character_id}/assets/names/`
"""
ESI_request.request(character_id=character_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/assets/names/')
def get_characters_character_id_attributes(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return attributes of a character
---
Alternate route: `/dev/characters/{character_id}/attributes/`
Alternate route: `/legacy/characters/{character_id}/attributes/`
Alternate route: `/v1/characters/{character_id}/attributes/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/attributes/')
def get_characters_character_id_blueprints(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of blueprints the character owns
---
Alternate route: `/dev/characters/{character_id}/blueprints/`
Alternate route: `/v2/characters/{character_id}/blueprints/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/blueprints/')
def get_characters_character_id_bookmarks(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your character's personal bookmarks
---
Alternate route: `/dev/characters/{character_id}/bookmarks/`
Alternate route: `/v2/characters/{character_id}/bookmarks/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/bookmarks/')
def get_characters_character_id_bookmarks_folders(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your character's personal bookmark folders
---
Alternate route: `/dev/characters/{character_id}/bookmarks/folders/`
Alternate route: `/v2/characters/{character_id}/bookmarks/folders/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/bookmarks/folders/')
def get_characters_character_id_calendar(*,
character_id,
from_event,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param from_event: The event ID to retrieve events from
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get 50 event summaries from the calendar. If no from_event ID is given, the resource will return the next 50 chronological event summaries from now. If a from_event ID is specified, it will return the next 50 chronological event summaries from after that event
---
Alternate route: `/dev/characters/{character_id}/calendar/`
Alternate route: `/legacy/characters/{character_id}/calendar/`
Alternate route: `/v1/characters/{character_id}/calendar/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(character_id=character_id,
from_event=from_event,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/calendar/')
def get_characters_character_id_calendar_event_id(*,
character_id,
event_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param event_id: The id of the event requested
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get all the information for a specific event
---
Alternate route: `/dev/characters/{character_id}/calendar/{event_id}/`
Alternate route: `/v3/characters/{character_id}/calendar/{event_id}/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(
character_id=character_id,
event_id=event_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/calendar/{event_id}/')
def put_characters_character_id_calendar_event_id(*, character_id, event_id,
response, token):
"""
:param character_id: An EVE character ID
:param event_id: The ID of the event requested
:param response: The response value to set, overriding current value
:param token: Access token to use if unable to set a header
Set your response status to an event
---
Alternate route: `/dev/characters/{character_id}/calendar/{event_id}/`
Alternate route: `/v3/characters/{character_id}/calendar/{event_id}/`
"""
ESI_request.request(
character_id=character_id,
event_id=event_id,
response=response,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/characters/{character_id}/calendar/{event_id}/')
def get_characters_character_id_calendar_event_id_attendees(
*, character_id, event_id, token, if_none_match=None):
"""
:param character_id: An EVE character ID
:param event_id: The id of the event requested
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get all invited attendees for a given event
---
Alternate route: `/dev/characters/{character_id}/calendar/{event_id}/attendees/`
Alternate route: `/legacy/characters/{character_id}/calendar/{event_id}/attendees/`
Alternate route: `/v1/characters/{character_id}/calendar/{event_id}/attendees/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(
character_id=character_id,
event_id=event_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/calendar/{event_id}/attendees/')
def get_characters_character_id_clones(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
A list of the character's clones
---
Alternate route: `/dev/characters/{character_id}/clones/`
Alternate route: `/v3/characters/{character_id}/clones/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/clones/')
def delete_characters_character_id_contacts(*, character_id, contact_ids,
token):
"""
:param character_id: An EVE character ID
:param contact_ids: A list of contacts to delete
:param token: Access token to use if unable to set a header
Bulk delete contacts
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
"""
ESI_request.request(character_id=character_id,
contact_ids=contact_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/contacts/')
def get_characters_character_id_contacts(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return contacts of a character
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contacts/')
def post_characters_character_id_contacts(*, character_id, contact_ids,
label_ids, standing, token, watched):
"""
:param character_id: An EVE character ID
:param contact_ids: A list of contacts
:param label_ids: Add custom labels to the new contact
:param standing: Standing for the contact
:param token: Access token to use if unable to set a header
:param watched: Whether the contact should be watched, note this is only effective on characters
Bulk add contacts with same settings
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
"""
ESI_request.request(character_id=character_id,
contact_ids=contact_ids,
label_ids=label_ids,
standing=standing,
token=token,
watched=watched,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/contacts/')
def put_characters_character_id_contacts(*, character_id, contact_ids,
label_ids, standing, token, watched):
"""
:param character_id: An EVE character ID
:param contact_ids: A list of contacts
:param label_ids: Add custom labels to the contact
:param standing: Standing for the contact
:param token: Access token to use if unable to set a header
:param watched: Whether the contact should be watched, note this is only effective on characters
Bulk edit contacts with same settings
---
Alternate route: `/dev/characters/{character_id}/contacts/`
Alternate route: `/v2/characters/{character_id}/contacts/`
"""
ESI_request.request(character_id=character_id,
contact_ids=contact_ids,
label_ids=label_ids,
standing=standing,
token=token,
watched=watched,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/characters/{character_id}/contacts/')
def get_characters_character_id_contacts_labels(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return custom labels for a character's contacts
---
Alternate route: `/dev/characters/{character_id}/contacts/labels/`
Alternate route: `/legacy/characters/{character_id}/contacts/labels/`
Alternate route: `/v1/characters/{character_id}/contacts/labels/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contacts/labels/')
def get_characters_character_id_contracts(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns contracts available to a character, only if the character is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is "in_progress".
---
Alternate route: `/dev/characters/{character_id}/contracts/`
Alternate route: `/legacy/characters/{character_id}/contracts/`
Alternate route: `/v1/characters/{character_id}/contracts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contracts/')
def get_characters_character_id_contracts_contract_id_bids(
*, character_id, contract_id, token, if_none_match=None):
"""
:param character_id: An EVE character ID
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Lists bids on a particular auction contract
---
Alternate route: `/dev/characters/{character_id}/contracts/{contract_id}/bids/`
Alternate route: `/legacy/characters/{character_id}/contracts/{contract_id}/bids/`
Alternate route: `/v1/characters/{character_id}/contracts/{contract_id}/bids/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(
character_id=character_id,
contract_id=contract_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contracts/{contract_id}/bids/')
def get_characters_character_id_contracts_contract_id_items(
*, character_id, contract_id, token, if_none_match=None):
"""
:param character_id: An EVE character ID
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Lists items of a particular contract
---
Alternate route: `/dev/characters/{character_id}/contracts/{contract_id}/items/`
Alternate route: `/legacy/characters/{character_id}/contracts/{contract_id}/items/`
Alternate route: `/v1/characters/{character_id}/contracts/{contract_id}/items/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
character_id=character_id,
contract_id=contract_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/contracts/{contract_id}/items/')
def get_characters_character_id_corporationhistory(*,
character_id,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of all the corporations a character has been a member of
---
Alternate route: `/dev/characters/{character_id}/corporationhistory/`
Alternate route: `/legacy/characters/{character_id}/corporationhistory/`
Alternate route: `/v1/characters/{character_id}/corporationhistory/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/corporationhistory/')
def post_characters_character_id_cspa(*, character_id, characters, token):
"""
:param character_id: An EVE character ID
:param characters: The target characters to calculate the charge for
:param token: Access token to use if unable to set a header
Takes a source character ID in the url and a set of target character ID's in the body, returns a CSPA charge cost
---
Alternate route: `/dev/characters/{character_id}/cspa/`
Alternate route: `/v4/characters/{character_id}/cspa/`
"""
ESI_request.request(character_id=character_id,
characters=characters,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/cspa/')
def get_characters_character_id_fatigue(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a character's jump activation and fatigue information
---
Alternate route: `/dev/characters/{character_id}/fatigue/`
Alternate route: `/legacy/characters/{character_id}/fatigue/`
Alternate route: `/v1/characters/{character_id}/fatigue/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fatigue/')
def get_characters_character_id_fittings(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return fittings of a character
---
Alternate route: `/dev/characters/{character_id}/fittings/`
Alternate route: `/v2/characters/{character_id}/fittings/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fittings/')
def post_characters_character_id_fittings(*, character_id, fitting, token):
"""
:param character_id: An EVE character ID
:param fitting: Details about the new fitting
:param token: Access token to use if unable to set a header
Save a new fitting for a character
---
Alternate route: `/dev/characters/{character_id}/fittings/`
Alternate route: `/v2/characters/{character_id}/fittings/`
"""
ESI_request.request(character_id=character_id,
fitting=fitting,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/fittings/')
def delete_characters_character_id_fittings_fitting_id(*, character_id,
fitting_id, token):
"""
:param character_id: An EVE character ID
:param fitting_id: ID for a fitting of this character
:param token: Access token to use if unable to set a header
Delete a fitting from a character
---
Alternate route: `/dev/characters/{character_id}/fittings/{fitting_id}/`
Alternate route: `/legacy/characters/{character_id}/fittings/{fitting_id}/`
Alternate route: `/v1/characters/{character_id}/fittings/{fitting_id}/`
"""
ESI_request.request(
character_id=character_id,
fitting_id=fitting_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/fittings/{fitting_id}/')
def get_characters_character_id_fleet(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return the fleet ID the character is in, if any.
---
Alternate route: `/legacy/characters/{character_id}/fleet/`
Alternate route: `/v1/characters/{character_id}/fleet/`
---
This route is cached for up to 60 seconds
---
Warning: This route has an upgrade available
---
[Diff of the upcoming changes](https://esi.evetech.net/diff/latest/dev/#GET-/characters/{character_id}/fleet/)
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fleet/')
def get_characters_character_id_fw_stats(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Statistical overview of a character involved in faction warfare
---
Alternate route: `/dev/characters/{character_id}/fw/stats/`
Alternate route: `/legacy/characters/{character_id}/fw/stats/`
Alternate route: `/v1/characters/{character_id}/fw/stats/`
---
This route expires daily at 11:05
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/fw/stats/')
def get_characters_character_id_implants(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return implants on the active clone of a character
---
Alternate route: `/dev/characters/{character_id}/implants/`
Alternate route: `/legacy/characters/{character_id}/implants/`
Alternate route: `/v1/characters/{character_id}/implants/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/implants/')
def get_characters_character_id_industry_jobs(*,
character_id,
include_completed,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param include_completed: Whether to retrieve completed character industry jobs. Only includes jobs from the past 90 days
:param token: Access token to use if unable to set a header
List industry jobs placed by a character
---
Alternate route: `/dev/characters/{character_id}/industry/jobs/`
Alternate route: `/legacy/characters/{character_id}/industry/jobs/`
Alternate route: `/v1/characters/{character_id}/industry/jobs/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
include_completed=include_completed,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/industry/jobs/')
def get_characters_character_id_killmails_recent(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of a character's kills and losses going back 90 days
---
Alternate route: `/dev/characters/{character_id}/killmails/recent/`
Alternate route: `/legacy/characters/{character_id}/killmails/recent/`
Alternate route: `/v1/characters/{character_id}/killmails/recent/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/killmails/recent/')
def get_characters_character_id_location(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Information about the characters current location. Returns the current solar system id, and also the current station or structure ID if applicable
---
Alternate route: `/legacy/characters/{character_id}/location/`
Alternate route: `/v1/characters/{character_id}/location/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/location/')
def get_characters_character_id_loyalty_points(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of loyalty points for all corporations the character has worked for
---
Alternate route: `/dev/characters/{character_id}/loyalty/points/`
Alternate route: `/legacy/characters/{character_id}/loyalty/points/`
Alternate route: `/v1/characters/{character_id}/loyalty/points/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/loyalty/points/')
def get_characters_character_id_mail(*,
character_id,
labels,
last_mail_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param labels: Fetch only mails that match one or more of the given labels
:param last_mail_id: List only mail with an ID lower than the given ID, if present
:param token: Access token to use if unable to set a header
Return the 50 most recent mail headers belonging to the character that match the query criteria. Queries can be filtered by label, and last_mail_id can be used to paginate backwards
---
Alternate route: `/dev/characters/{character_id}/mail/`
Alternate route: `/legacy/characters/{character_id}/mail/`
Alternate route: `/v1/characters/{character_id}/mail/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
labels=labels,
last_mail_id=last_mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/')
def post_characters_character_id_mail(*, character_id, mail, token):
"""
:param character_id: An EVE character ID
:param mail: The mail to send
:param token: Access token to use if unable to set a header
Create and send a new mail
---
Alternate route: `/dev/characters/{character_id}/mail/`
Alternate route: `/legacy/characters/{character_id}/mail/`
Alternate route: `/v1/characters/{character_id}/mail/`
"""
ESI_request.request(character_id=character_id,
mail=mail,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/mail/')
def get_characters_character_id_mail_labels(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of the users mail labels, unread counts for each label and a total unread count.
---
Alternate route: `/dev/characters/{character_id}/mail/labels/`
Alternate route: `/v3/characters/{character_id}/mail/labels/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/labels/')
def post_characters_character_id_mail_labels(*, character_id, label, token):
"""
:param character_id: An EVE character ID
:param label: Label to create
:param token: Access token to use if unable to set a header
Create a mail label
---
Alternate route: `/dev/characters/{character_id}/mail/labels/`
Alternate route: `/legacy/characters/{character_id}/mail/labels/`
Alternate route: `/v2/characters/{character_id}/mail/labels/`
"""
ESI_request.request(character_id=character_id,
label=label,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/characters/{character_id}/mail/labels/')
def delete_characters_character_id_mail_labels_label_id(
*, character_id, label_id, token):
"""
:param character_id: An EVE character ID
:param label_id: An EVE label id
:param token: Access token to use if unable to set a header
Delete a mail label
---
Alternate route: `/dev/characters/{character_id}/mail/labels/{label_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/labels/{label_id}/`
Alternate route: `/v1/characters/{character_id}/mail/labels/{label_id}/`
"""
ESI_request.request(
character_id=character_id,
label_id=label_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/mail/labels/{label_id}/')
def get_characters_character_id_mail_lists(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return all mailing lists that the character is subscribed to
---
Alternate route: `/dev/characters/{character_id}/mail/lists/`
Alternate route: `/legacy/characters/{character_id}/mail/lists/`
Alternate route: `/v1/characters/{character_id}/mail/lists/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/lists/')
def delete_characters_character_id_mail_mail_id(*, character_id, mail_id,
token):
"""
:param character_id: An EVE character ID
:param mail_id: An EVE mail ID
:param token: Access token to use if unable to set a header
Delete a mail
---
Alternate route: `/dev/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/v1/characters/{character_id}/mail/{mail_id}/`
"""
ESI_request.request(character_id=character_id,
mail_id=mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/characters/{character_id}/mail/{mail_id}/')
def get_characters_character_id_mail_mail_id(*,
character_id,
mail_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param mail_id: An EVE mail ID
:param token: Access token to use if unable to set a header
Return the contents of an EVE mail
---
Alternate route: `/dev/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/v1/characters/{character_id}/mail/{mail_id}/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
mail_id=mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mail/{mail_id}/')
def put_characters_character_id_mail_mail_id(*, character_id, contents,
mail_id, token):
"""
:param character_id: An EVE character ID
:param contents: Data used to update the mail
:param mail_id: An EVE mail ID
:param token: Access token to use if unable to set a header
Update metadata about a mail
---
Alternate route: `/dev/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/legacy/characters/{character_id}/mail/{mail_id}/`
Alternate route: `/v1/characters/{character_id}/mail/{mail_id}/`
"""
ESI_request.request(character_id=character_id,
contents=contents,
mail_id=mail_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/characters/{character_id}/mail/{mail_id}/')
def get_characters_character_id_medals(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of medals the character has
---
Alternate route: `/dev/characters/{character_id}/medals/`
Alternate route: `/legacy/characters/{character_id}/medals/`
Alternate route: `/v1/characters/{character_id}/medals/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/medals/')
def get_characters_character_id_mining(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Paginated record of all mining done by a character for the past 30 days
---
Alternate route: `/dev/characters/{character_id}/mining/`
Alternate route: `/legacy/characters/{character_id}/mining/`
Alternate route: `/v1/characters/{character_id}/mining/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/mining/')
def get_characters_character_id_notifications(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return character notifications
---
Alternate route: `/dev/characters/{character_id}/notifications/`
Alternate route: `/v5/characters/{character_id}/notifications/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/notifications/')
def get_characters_character_id_notifications_contacts(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return notifications about having been added to someone's contact list
---
Alternate route: `/dev/characters/{character_id}/notifications/contacts/`
Alternate route: `/legacy/characters/{character_id}/notifications/contacts/`
Alternate route: `/v1/characters/{character_id}/notifications/contacts/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(
character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/notifications/contacts/')
def get_characters_character_id_online(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Checks if the character is currently online
---
Alternate route: `/v2/characters/{character_id}/online/`
---
This route is cached for up to 60 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/online/')
def get_characters_character_id_opportunities(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a list of tasks finished by a character
---
Alternate route: `/dev/characters/{character_id}/opportunities/`
Alternate route: `/legacy/characters/{character_id}/opportunities/`
Alternate route: `/v1/characters/{character_id}/opportunities/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/opportunities/')
def get_characters_character_id_orders(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
List open market orders placed by a character
---
Alternate route: `/dev/characters/{character_id}/orders/`
Alternate route: `/v2/characters/{character_id}/orders/`
---
This route is cached for up to 1200 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/orders/')
def get_characters_character_id_orders_history(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List cancelled and expired market orders placed by a character up to 90 days in the past.
---
Alternate route: `/dev/characters/{character_id}/orders/history/`
Alternate route: `/legacy/characters/{character_id}/orders/history/`
Alternate route: `/v1/characters/{character_id}/orders/history/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/orders/history/')
def get_characters_character_id_planets(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a list of all planetary colonies owned by a character.
---
Alternate route: `/dev/characters/{character_id}/planets/`
Alternate route: `/legacy/characters/{character_id}/planets/`
Alternate route: `/v1/characters/{character_id}/planets/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/planets/')
def get_characters_character_id_planets_planet_id(*,
character_id,
planet_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param planet_id: Planet id of the target planet
:param token: Access token to use if unable to set a header
Returns full details on the layout of a single planetary colony, including links, pins and routes. Note: Planetary information is only recalculated when the colony is viewed through the client. Information will not update until this criteria is met.
---
Alternate route: `/dev/characters/{character_id}/planets/{planet_id}/`
Alternate route: `/v3/characters/{character_id}/planets/{planet_id}/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(
character_id=character_id,
if_none_match=if_none_match,
planet_id=planet_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/planets/{planet_id}/')
def get_characters_character_id_portrait(*, character_id, if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get portrait urls for a character
---
Alternate route: `/dev/characters/{character_id}/portrait/`
Alternate route: `/v2/characters/{character_id}/portrait/`
---
This route expires daily at 11:05
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/portrait/')
def get_characters_character_id_roles(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a character's corporation roles
---
Alternate route: `/dev/characters/{character_id}/roles/`
Alternate route: `/v2/characters/{character_id}/roles/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/roles/')
def get_characters_character_id_search(*,
categories,
character_id,
language,
search,
strict,
token,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param categories: Type of entities to search for
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param search: The string to search on
:param strict: Whether the search should be a strict match
:param token: Access token to use if unable to set a header
Search for entities that match a given sub-string.
---
Alternate route: `/dev/characters/{character_id}/search/`
Alternate route: `/legacy/characters/{character_id}/search/`
Alternate route: `/v3/characters/{character_id}/search/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(accept_language=accept_language,
categories=categories,
character_id=character_id,
if_none_match=if_none_match,
language=language,
search=search,
strict=strict,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/search/')
def get_characters_character_id_ship(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get the current ship type, name and id
---
Alternate route: `/legacy/characters/{character_id}/ship/`
Alternate route: `/v1/characters/{character_id}/ship/`
---
This route is cached for up to 5 seconds
---
[Diff of the upcoming changes](https://esi.evetech.net/diff/latest/dev/#GET-/characters/{character_id}/ship/)
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/ship/')
def get_characters_character_id_skillqueue(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
List the configured skill queue for the given character
---
Alternate route: `/dev/characters/{character_id}/skillqueue/`
Alternate route: `/legacy/characters/{character_id}/skillqueue/`
Alternate route: `/v2/characters/{character_id}/skillqueue/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/skillqueue/')
def get_characters_character_id_skills(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
List all trained skills for the given character
---
Alternate route: `/dev/characters/{character_id}/skills/`
Alternate route: `/v4/characters/{character_id}/skills/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/skills/')
def get_characters_character_id_standings(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return character standings from agents, NPC corporations, and factions
---
Alternate route: `/dev/characters/{character_id}/standings/`
Alternate route: `/legacy/characters/{character_id}/standings/`
Alternate route: `/v1/characters/{character_id}/standings/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/standings/')
def get_characters_character_id_stats(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns aggregate yearly stats for a character
---
Alternate route: `/dev/characters/{character_id}/stats/`
Alternate route: `/v2/characters/{character_id}/stats/`
---
This route is cached for up to 86400 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/stats/')
def get_characters_character_id_titles(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a character's titles
---
Alternate route: `/dev/characters/{character_id}/titles/`
Alternate route: `/legacy/characters/{character_id}/titles/`
Alternate route: `/v1/characters/{character_id}/titles/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/titles/')
def get_characters_character_id_wallet(*,
character_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a character's wallet balance
---
Alternate route: `/dev/characters/{character_id}/wallet/`
Alternate route: `/legacy/characters/{character_id}/wallet/`
Alternate route: `/v1/characters/{character_id}/wallet/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/wallet/')
def get_characters_character_id_wallet_journal(*,
character_id,
token,
if_none_match=None,
page='1'):
"""
:param character_id: An EVE character ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Retrieve the given character's wallet journal going 30 days back
---
Alternate route: `/dev/characters/{character_id}/wallet/journal/`
Alternate route: `/v6/characters/{character_id}/wallet/journal/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(character_id=character_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/wallet/journal/')
def get_characters_character_id_wallet_transactions(*,
character_id,
from_id,
token,
if_none_match=None):
"""
:param character_id: An EVE character ID
:param from_id: Only show transactions happened before the one referenced by this id
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get wallet transactions of a character
---
Alternate route: `/dev/characters/{character_id}/wallet/transactions/`
Alternate route: `/legacy/characters/{character_id}/wallet/transactions/`
Alternate route: `/v1/characters/{character_id}/wallet/transactions/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
character_id=character_id,
from_id=from_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/characters/{character_id}/wallet/transactions/')
def get_contracts_public_bids_contract_id(*,
contract_id,
if_none_match=None,
page='1'):
"""
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Lists bids on a public auction contract
---
Alternate route: `/dev/contracts/public/bids/{contract_id}/`
Alternate route: `/legacy/contracts/public/bids/{contract_id}/`
Alternate route: `/v1/contracts/public/bids/{contract_id}/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(contract_id=contract_id,
if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/contracts/public/bids/{contract_id}/')
def get_contracts_public_items_contract_id(*,
contract_id,
if_none_match=None,
page='1'):
"""
:param contract_id: ID of a contract
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Lists items of a public contract
---
Alternate route: `/dev/contracts/public/items/{contract_id}/`
Alternate route: `/legacy/contracts/public/items/{contract_id}/`
Alternate route: `/v1/contracts/public/items/{contract_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(contract_id=contract_id,
if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/contracts/public/items/{contract_id}/')
def get_contracts_public_region_id(*, region_id, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param region_id: An EVE region id
Returns a paginated list of all public contracts in the given region
---
Alternate route: `/dev/contracts/public/{region_id}/`
Alternate route: `/legacy/contracts/public/{region_id}/`
Alternate route: `/v1/contracts/public/{region_id}/`
---
This route is cached for up to 1800 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
region_id=region_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/contracts/public/{region_id}/')
def get_corporation_corporation_id_mining_extractions(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Extraction timers for all moon chunks being extracted by refineries belonging to a corporation.
---
Alternate route: `/dev/corporation/{corporation_id}/mining/extractions/`
Alternate route: `/legacy/corporation/{corporation_id}/mining/extractions/`
Alternate route: `/v1/corporation/{corporation_id}/mining/extractions/`
---
This route is cached for up to 1800 seconds
---
Requires one of the following EVE corporation role(s): Station_Manager
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporation/{corporation_id}/mining/extractions/')
def get_corporation_corporation_id_mining_observers(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Paginated list of all entities capable of observing and recording mining for a corporation
---
Alternate route: `/dev/corporation/{corporation_id}/mining/observers/`
Alternate route: `/legacy/corporation/{corporation_id}/mining/observers/`
Alternate route: `/v1/corporation/{corporation_id}/mining/observers/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporation/{corporation_id}/mining/observers/')
def get_corporation_corporation_id_mining_observers_observer_id(
*, corporation_id, observer_id, token, if_none_match=None, page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param observer_id: A mining observer id
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Paginated record of all mining seen by an observer
---
Alternate route: `/dev/corporation/{corporation_id}/mining/observers/{observer_id}/`
Alternate route: `/legacy/corporation/{corporation_id}/mining/observers/{observer_id}/`
Alternate route: `/v1/corporation/{corporation_id}/mining/observers/{observer_id}/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
observer_id=observer_id,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporation/{corporation_id}/mining/observers/{observer_id}/')
def get_corporations_npccorps(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of npc corporations
---
Alternate route: `/dev/corporations/npccorps/`
Alternate route: `/legacy/corporations/npccorps/`
Alternate route: `/v1/corporations/npccorps/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/npccorps/')
def get_corporations_corporation_id(*, corporation_id, if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Public information about a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/`
Alternate route: `/v4/corporations/{corporation_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/')
def get_corporations_corporation_id_alliancehistory(*,
corporation_id,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of all the alliances a corporation has been a member of
---
Alternate route: `/dev/corporations/{corporation_id}/alliancehistory/`
Alternate route: `/v2/corporations/{corporation_id}/alliancehistory/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/alliancehistory/')
def get_corporations_corporation_id_assets(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return a list of the corporation assets
---
Alternate route: `/dev/corporations/{corporation_id}/assets/`
Alternate route: `/v3/corporations/{corporation_id}/assets/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/assets/')
def post_corporations_corporation_id_assets_locations(*, corporation_id,
item_ids, token):
"""
:param corporation_id: An EVE corporation ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return locations for a set of item ids, which you can get from corporation assets endpoint. Coordinates for items in hangars or stations are set to (0,0,0)
---
Alternate route: `/dev/corporations/{corporation_id}/assets/locations/`
Alternate route: `/v2/corporations/{corporation_id}/assets/locations/`
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/corporations/{corporation_id}/assets/locations/')
def post_corporations_corporation_id_assets_names(*, corporation_id, item_ids,
token):
"""
:param corporation_id: An EVE corporation ID
:param item_ids: A list of item ids
:param token: Access token to use if unable to set a header
Return names for a set of item ids, which you can get from corporation assets endpoint. Only valid for items that can customize names, like containers or ships
---
Alternate route: `/dev/corporations/{corporation_id}/assets/names/`
Alternate route: `/legacy/corporations/{corporation_id}/assets/names/`
Alternate route: `/v1/corporations/{corporation_id}/assets/names/`
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
item_ids=item_ids,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/corporations/{corporation_id}/assets/names/')
def get_corporations_corporation_id_blueprints(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns a list of blueprints the corporation owns
---
Alternate route: `/dev/corporations/{corporation_id}/blueprints/`
Alternate route: `/v2/corporations/{corporation_id}/blueprints/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/blueprints/')
def get_corporations_corporation_id_bookmarks(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your corporation's bookmarks
---
Alternate route: `/dev/corporations/{corporation_id}/bookmarks/`
Alternate route: `/legacy/corporations/{corporation_id}/bookmarks/`
Alternate route: `/v1/corporations/{corporation_id}/bookmarks/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/bookmarks/')
def get_corporations_corporation_id_bookmarks_folders(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
A list of your corporation's bookmark folders
---
Alternate route: `/dev/corporations/{corporation_id}/bookmarks/folders/`
Alternate route: `/legacy/corporations/{corporation_id}/bookmarks/folders/`
Alternate route: `/v1/corporations/{corporation_id}/bookmarks/folders/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/bookmarks/folders/')
def get_corporations_corporation_id_contacts(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return contacts of a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/contacts/`
Alternate route: `/v2/corporations/{corporation_id}/contacts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contacts/')
def get_corporations_corporation_id_contacts_labels(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return custom labels for a corporation's contacts
---
Alternate route: `/dev/corporations/{corporation_id}/contacts/labels/`
Alternate route: `/legacy/corporations/{corporation_id}/contacts/labels/`
Alternate route: `/v1/corporations/{corporation_id}/contacts/labels/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contacts/labels/')
def get_corporations_corporation_id_containers_logs(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation
---
Alternate route: `/dev/corporations/{corporation_id}/containers/logs/`
Alternate route: `/v2/corporations/{corporation_id}/containers/logs/`
---
This route is cached for up to 600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/containers/logs/')
def get_corporations_corporation_id_contracts(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns contracts available to a corporation, only if the corporation is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is "in_progress".
---
Alternate route: `/dev/corporations/{corporation_id}/contracts/`
Alternate route: `/legacy/corporations/{corporation_id}/contracts/`
Alternate route: `/v1/corporations/{corporation_id}/contracts/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contracts/')
def get_corporations_corporation_id_contracts_contract_id_bids(
*, contract_id, corporation_id, token, if_none_match=None, page='1'):
"""
:param contract_id: ID of a contract
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Lists bids on a particular auction contract
---
Alternate route: `/dev/corporations/{corporation_id}/contracts/{contract_id}/bids/`
Alternate route: `/legacy/corporations/{corporation_id}/contracts/{contract_id}/bids/`
Alternate route: `/v1/corporations/{corporation_id}/contracts/{contract_id}/bids/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
contract_id=contract_id,
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contracts/{contract_id}/bids/')
def get_corporations_corporation_id_contracts_contract_id_items(
*, contract_id, corporation_id, token, if_none_match=None):
"""
:param contract_id: ID of a contract
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Lists items of a particular contract
---
Alternate route: `/dev/corporations/{corporation_id}/contracts/{contract_id}/items/`
Alternate route: `/legacy/corporations/{corporation_id}/contracts/{contract_id}/items/`
Alternate route: `/v1/corporations/{corporation_id}/contracts/{contract_id}/items/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(
contract_id=contract_id,
corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/contracts/{contract_id}/items/')
def get_corporations_corporation_id_customs_offices(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List customs offices owned by a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/customs_offices/`
Alternate route: `/legacy/corporations/{corporation_id}/customs_offices/`
Alternate route: `/v1/corporations/{corporation_id}/customs_offices/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/customs_offices/')
def get_corporations_corporation_id_divisions(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return corporation hangar and wallet division names, only show if a division is not using the default name
---
Alternate route: `/dev/corporations/{corporation_id}/divisions/`
Alternate route: `/legacy/corporations/{corporation_id}/divisions/`
Alternate route: `/v1/corporations/{corporation_id}/divisions/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/divisions/')
def get_corporations_corporation_id_facilities(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a corporation's facilities
---
Alternate route: `/dev/corporations/{corporation_id}/facilities/`
Alternate route: `/legacy/corporations/{corporation_id}/facilities/`
Alternate route: `/v1/corporations/{corporation_id}/facilities/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Factory_Manager
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/facilities/')
def get_corporations_corporation_id_fw_stats(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Statistics about a corporation involved in faction warfare
---
Alternate route: `/dev/corporations/{corporation_id}/fw/stats/`
Alternate route: `/legacy/corporations/{corporation_id}/fw/stats/`
Alternate route: `/v1/corporations/{corporation_id}/fw/stats/`
---
This route expires daily at 11:05
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/fw/stats/')
def get_corporations_corporation_id_icons(*,
corporation_id,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the icon urls for a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/icons/`
Alternate route: `/legacy/corporations/{corporation_id}/icons/`
Alternate route: `/v1/corporations/{corporation_id}/icons/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/icons/')
def get_corporations_corporation_id_industry_jobs(*,
corporation_id,
include_completed,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param include_completed: Whether to retrieve completed corporation industry jobs. Only includes jobs from the past 90 days
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List industry jobs run by a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/industry/jobs/`
Alternate route: `/legacy/corporations/{corporation_id}/industry/jobs/`
Alternate route: `/v1/corporations/{corporation_id}/industry/jobs/`
---
This route is cached for up to 300 seconds
---
Requires one of the following EVE corporation role(s): Factory_Manager
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
include_completed=include_completed,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/industry/jobs/')
def get_corporations_corporation_id_killmails_recent(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Get a list of a corporation's kills and losses going back 90 days
---
Alternate route: `/dev/corporations/{corporation_id}/killmails/recent/`
Alternate route: `/legacy/corporations/{corporation_id}/killmails/recent/`
Alternate route: `/v1/corporations/{corporation_id}/killmails/recent/`
---
This route is cached for up to 300 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/killmails/recent/')
def get_corporations_corporation_id_medals(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns a corporation's medals
---
Alternate route: `/dev/corporations/{corporation_id}/medals/`
Alternate route: `/legacy/corporations/{corporation_id}/medals/`
Alternate route: `/v1/corporations/{corporation_id}/medals/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/medals/')
def get_corporations_corporation_id_medals_issued(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns medals issued by a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/medals/issued/`
Alternate route: `/legacy/corporations/{corporation_id}/medals/issued/`
Alternate route: `/v1/corporations/{corporation_id}/medals/issued/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/medals/issued/')
def get_corporations_corporation_id_members(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return the current member list of a corporation, the token's character need to be a member of the corporation.
---
Alternate route: `/dev/corporations/{corporation_id}/members/`
Alternate route: `/v3/corporations/{corporation_id}/members/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/members/')
def get_corporations_corporation_id_members_limit(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return a corporation's member limit, not including CEO himself
---
Alternate route: `/dev/corporations/{corporation_id}/members/limit/`
Alternate route: `/legacy/corporations/{corporation_id}/members/limit/`
Alternate route: `/v1/corporations/{corporation_id}/members/limit/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/members/limit/')
def get_corporations_corporation_id_members_titles(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a corporation's members' titles
---
Alternate route: `/dev/corporations/{corporation_id}/members/titles/`
Alternate route: `/legacy/corporations/{corporation_id}/members/titles/`
Alternate route: `/v1/corporations/{corporation_id}/members/titles/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/members/titles/')
def get_corporations_corporation_id_membertracking(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns additional information about a corporation's members which helps tracking their activities
---
Alternate route: `/dev/corporations/{corporation_id}/membertracking/`
Alternate route: `/legacy/corporations/{corporation_id}/membertracking/`
Alternate route: `/v1/corporations/{corporation_id}/membertracking/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/membertracking/')
def get_corporations_corporation_id_orders(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List open market orders placed on behalf of a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/orders/`
Alternate route: `/v3/corporations/{corporation_id}/orders/`
---
This route is cached for up to 1200 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Trader
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/orders/')
def get_corporations_corporation_id_orders_history(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
List cancelled and expired market orders placed on behalf of a corporation up to 90 days in the past.
---
Alternate route: `/dev/corporations/{corporation_id}/orders/history/`
Alternate route: `/v2/corporations/{corporation_id}/orders/history/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Trader
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/orders/history/')
def get_corporations_corporation_id_roles(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return the roles of all members if the character has the personnel manager role or any grantable role.
---
Alternate route: `/dev/corporations/{corporation_id}/roles/`
Alternate route: `/legacy/corporations/{corporation_id}/roles/`
Alternate route: `/v1/corporations/{corporation_id}/roles/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/roles/')
def get_corporations_corporation_id_roles_history(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return how roles have changed for a coporation's members, up to a month
---
Alternate route: `/dev/corporations/{corporation_id}/roles/history/`
Alternate route: `/legacy/corporations/{corporation_id}/roles/history/`
Alternate route: `/v1/corporations/{corporation_id}/roles/history/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/roles/history/')
def get_corporations_corporation_id_shareholders(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return the current shareholders of a corporation.
---
Alternate route: `/dev/corporations/{corporation_id}/shareholders/`
Alternate route: `/legacy/corporations/{corporation_id}/shareholders/`
Alternate route: `/v1/corporations/{corporation_id}/shareholders/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/shareholders/')
def get_corporations_corporation_id_standings(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Return corporation standings from agents, NPC corporations, and factions
---
Alternate route: `/dev/corporations/{corporation_id}/standings/`
Alternate route: `/legacy/corporations/{corporation_id}/standings/`
Alternate route: `/v1/corporations/{corporation_id}/standings/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/standings/')
def get_corporations_corporation_id_starbases(*,
corporation_id,
token,
if_none_match=None,
page='1'):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Returns list of corporation starbases (POSes)
---
Alternate route: `/dev/corporations/{corporation_id}/starbases/`
Alternate route: `/legacy/corporations/{corporation_id}/starbases/`
Alternate route: `/v1/corporations/{corporation_id}/starbases/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/starbases/')
def get_corporations_corporation_id_starbases_starbase_id(
*, corporation_id, starbase_id, system_id, token, if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param starbase_id: An EVE starbase (POS) ID
:param system_id: The solar system this starbase (POS) is located in,
:param token: Access token to use if unable to set a header
Returns various settings and fuels of a starbase (POS)
---
Alternate route: `/dev/corporations/{corporation_id}/starbases/{starbase_id}/`
Alternate route: `/legacy/corporations/{corporation_id}/starbases/{starbase_id}/`
Alternate route: `/v1/corporations/{corporation_id}/starbases/{starbase_id}/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(
corporation_id=corporation_id,
if_none_match=if_none_match,
starbase_id=starbase_id,
system_id=system_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/starbases/{starbase_id}/')
def get_corporations_corporation_id_structures(*,
corporation_id,
language,
token,
accept_language='en-us',
if_none_match=None,
page='1'):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Get a list of corporation structures. This route's version includes the changes to structures detailed in this blog: https://www.eveonline.com/article/upwell-2.0-structures-changes-coming-on-february-13th
---
Alternate route: `/dev/corporations/{corporation_id}/structures/`
Alternate route: `/v3/corporations/{corporation_id}/structures/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Station_Manager
"""
ESI_request.request(accept_language=accept_language,
corporation_id=corporation_id,
if_none_match=if_none_match,
language=language,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/structures/')
def get_corporations_corporation_id_titles(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Returns a corporation's titles
---
Alternate route: `/dev/corporations/{corporation_id}/titles/`
Alternate route: `/legacy/corporations/{corporation_id}/titles/`
Alternate route: `/v1/corporations/{corporation_id}/titles/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Director
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/titles/')
def get_corporations_corporation_id_wallets(*,
corporation_id,
token,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get a corporation's wallets
---
Alternate route: `/dev/corporations/{corporation_id}/wallets/`
Alternate route: `/legacy/corporations/{corporation_id}/wallets/`
Alternate route: `/v1/corporations/{corporation_id}/wallets/`
---
This route is cached for up to 300 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Junior_Accountant
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/wallets/')
def get_corporations_corporation_id_wallets_division_journal(
*, corporation_id, division, token, if_none_match=None, page='1'):
"""
:param corporation_id: An EVE corporation ID
:param division: Wallet key of the division to fetch journals from
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param token: Access token to use if unable to set a header
Retrieve the given corporation's wallet journal for the given division going 30 days back
---
Alternate route: `/dev/corporations/{corporation_id}/wallets/{division}/journal/`
Alternate route: `/v4/corporations/{corporation_id}/wallets/{division}/journal/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Junior_Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
division=division,
if_none_match=if_none_match,
page=page,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/wallets/{division}/journal/')
def get_corporations_corporation_id_wallets_division_transactions(
*, corporation_id, division, from_id, token, if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param division: Wallet key of the division to fetch journals from
:param from_id: Only show journal entries happened before the transaction referenced by this id
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Get wallet transactions of a corporation
---
Alternate route: `/dev/corporations/{corporation_id}/wallets/{division}/transactions/`
Alternate route: `/legacy/corporations/{corporation_id}/wallets/{division}/transactions/`
Alternate route: `/v1/corporations/{corporation_id}/wallets/{division}/transactions/`
---
This route is cached for up to 3600 seconds
---
Requires one of the following EVE corporation role(s): Accountant, Junior_Accountant
"""
ESI_request.request(
corporation_id=corporation_id,
division=division,
from_id=from_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/corporations/{corporation_id}/wallets/{division}/transactions/'
)
def get_dogma_attributes(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of dogma attribute ids
---
Alternate route: `/dev/dogma/attributes/`
Alternate route: `/legacy/dogma/attributes/`
Alternate route: `/v1/dogma/attributes/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/attributes/')
def get_dogma_attributes_attribute_id(*, attribute_id, if_none_match=None):
"""
:param attribute_id: A dogma attribute ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on a dogma attribute
---
Alternate route: `/dev/dogma/attributes/{attribute_id}/`
Alternate route: `/legacy/dogma/attributes/{attribute_id}/`
Alternate route: `/v1/dogma/attributes/{attribute_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(attribute_id=attribute_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/attributes/{attribute_id}/')
def get_dogma_dynamic_items_type_id_item_id(*,
item_id,
type_id,
if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param item_id: item_id integer
:param type_id: type_id integer
Returns info about a dynamic item resulting from mutation with a mutaplasmid.
---
Alternate route: `/dev/dogma/dynamic/items/{type_id}/{item_id}/`
Alternate route: `/legacy/dogma/dynamic/items/{type_id}/{item_id}/`
Alternate route: `/v1/dogma/dynamic/items/{type_id}/{item_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
item_id=item_id,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/dynamic/items/{type_id}/{item_id}/')
def get_dogma_effects(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of dogma effect ids
---
Alternate route: `/dev/dogma/effects/`
Alternate route: `/legacy/dogma/effects/`
Alternate route: `/v1/dogma/effects/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/effects/')
def get_dogma_effects_effect_id(*, effect_id, if_none_match=None):
"""
:param effect_id: A dogma effect ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on a dogma effect
---
Alternate route: `/dev/dogma/effects/{effect_id}/`
Alternate route: `/v2/dogma/effects/{effect_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(effect_id=effect_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/dogma/effects/{effect_id}/')
def get_fleets_fleet_id(*, fleet_id, token, if_none_match=None):
"""
:param fleet_id: ID for a fleet
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param token: Access token to use if unable to set a header
Return details about a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/`
Alternate route: `/v1/fleets/{fleet_id}/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(fleet_id=fleet_id,
if_none_match=if_none_match,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fleets/{fleet_id}/')
def put_fleets_fleet_id(*, fleet_id, new_settings, token):
"""
:param fleet_id: ID for a fleet
:param new_settings: What to update for this fleet
:param token: Access token to use if unable to set a header
Update settings about a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/`
Alternate route: `/v1/fleets/{fleet_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
new_settings=new_settings,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/')
def get_fleets_fleet_id_members(*,
fleet_id,
language,
token,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param fleet_id: ID for a fleet
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param token: Access token to use if unable to set a header
Return information about fleet members
---
Alternate route: `/dev/fleets/{fleet_id}/members/`
Alternate route: `/legacy/fleets/{fleet_id}/members/`
Alternate route: `/v1/fleets/{fleet_id}/members/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(accept_language=accept_language,
fleet_id=fleet_id,
if_none_match=if_none_match,
language=language,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fleets/{fleet_id}/members/')
def post_fleets_fleet_id_members(*, fleet_id, invitation, token):
"""
:param fleet_id: ID for a fleet
:param invitation: Details of the invitation
:param token: Access token to use if unable to set a header
Invite a character into the fleet. If a character has a CSPA charge set it is not possible to invite them to the fleet using ESI
---
Alternate route: `/dev/fleets/{fleet_id}/members/`
Alternate route: `/legacy/fleets/{fleet_id}/members/`
Alternate route: `/v1/fleets/{fleet_id}/members/`
"""
ESI_request.request(fleet_id=fleet_id,
invitation=invitation,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/fleets/{fleet_id}/members/')
def delete_fleets_fleet_id_members_member_id(*, fleet_id, member_id, token):
"""
:param fleet_id: ID for a fleet
:param member_id: The character ID of a member in this fleet
:param token: Access token to use if unable to set a header
Kick a fleet member
---
Alternate route: `/dev/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/v1/fleets/{fleet_id}/members/{member_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
member_id=member_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/fleets/{fleet_id}/members/{member_id}/')
def put_fleets_fleet_id_members_member_id(*, fleet_id, member_id, movement,
token):
"""
:param fleet_id: ID for a fleet
:param member_id: The character ID of a member in this fleet
:param movement: Details of the invitation
:param token: Access token to use if unable to set a header
Move a fleet member around
---
Alternate route: `/dev/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/members/{member_id}/`
Alternate route: `/v1/fleets/{fleet_id}/members/{member_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
member_id=member_id,
movement=movement,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/members/{member_id}/')
def delete_fleets_fleet_id_squads_squad_id(*, fleet_id, squad_id, token):
"""
:param fleet_id: ID for a fleet
:param squad_id: The squad to delete
:param token: Access token to use if unable to set a header
Delete a fleet squad, only empty squads can be deleted
---
Alternate route: `/dev/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/v1/fleets/{fleet_id}/squads/{squad_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
squad_id=squad_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/fleets/{fleet_id}/squads/{squad_id}/')
def put_fleets_fleet_id_squads_squad_id(*, fleet_id, naming, squad_id, token):
"""
:param fleet_id: ID for a fleet
:param naming: New name of the squad
:param squad_id: The squad to rename
:param token: Access token to use if unable to set a header
Rename a fleet squad
---
Alternate route: `/dev/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/squads/{squad_id}/`
Alternate route: `/v1/fleets/{fleet_id}/squads/{squad_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
naming=naming,
squad_id=squad_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/squads/{squad_id}/')
def get_fleets_fleet_id_wings(*,
fleet_id,
language,
token,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param fleet_id: ID for a fleet
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param token: Access token to use if unable to set a header
Return information about wings in a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/wings/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/`
Alternate route: `/v1/fleets/{fleet_id}/wings/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(accept_language=accept_language,
fleet_id=fleet_id,
if_none_match=if_none_match,
language=language,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fleets/{fleet_id}/wings/')
def post_fleets_fleet_id_wings(*, fleet_id, token):
"""
:param fleet_id: ID for a fleet
:param token: Access token to use if unable to set a header
Create a new wing in a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/wings/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/`
Alternate route: `/v1/fleets/{fleet_id}/wings/`
"""
ESI_request.request(fleet_id=fleet_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/fleets/{fleet_id}/wings/')
def delete_fleets_fleet_id_wings_wing_id(*, fleet_id, token, wing_id):
"""
:param fleet_id: ID for a fleet
:param token: Access token to use if unable to set a header
:param wing_id: The wing to delete
Delete a fleet wing, only empty wings can be deleted. The wing may contain squads, but the squads must be empty
---
Alternate route: `/dev/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/v1/fleets/{fleet_id}/wings/{wing_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
token=token,
wing_id=wing_id,
data_source='tranquility',
version='latest',
HTTP_method='DELETE',
path=f'/fleets/{fleet_id}/wings/{wing_id}/')
def put_fleets_fleet_id_wings_wing_id(*, fleet_id, naming, token, wing_id):
"""
:param fleet_id: ID for a fleet
:param naming: New name of the wing
:param token: Access token to use if unable to set a header
:param wing_id: The wing to rename
Rename a fleet wing
---
Alternate route: `/dev/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/{wing_id}/`
Alternate route: `/v1/fleets/{fleet_id}/wings/{wing_id}/`
"""
ESI_request.request(fleet_id=fleet_id,
naming=naming,
token=token,
wing_id=wing_id,
data_source='tranquility',
version='latest',
HTTP_method='PUT',
path=f'/fleets/{fleet_id}/wings/{wing_id}/')
def post_fleets_fleet_id_wings_wing_id_squads(*, fleet_id, token, wing_id):
"""
:param fleet_id: ID for a fleet
:param token: Access token to use if unable to set a header
:param wing_id: The wing_id to create squad in
Create a new squad in a fleet
---
Alternate route: `/dev/fleets/{fleet_id}/wings/{wing_id}/squads/`
Alternate route: `/legacy/fleets/{fleet_id}/wings/{wing_id}/squads/`
Alternate route: `/v1/fleets/{fleet_id}/wings/{wing_id}/squads/`
"""
ESI_request.request(fleet_id=fleet_id,
token=token,
wing_id=wing_id,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/fleets/{fleet_id}/wings/{wing_id}/squads/')
def get_fw_leaderboards(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Top 4 leaderboard of factions for kills and victory points separated by total, last week and yesterday
---
Alternate route: `/dev/fw/leaderboards/`
Alternate route: `/legacy/fw/leaderboards/`
Alternate route: `/v1/fw/leaderboards/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/leaderboards/')
def get_fw_leaderboards_characters(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Top 100 leaderboard of pilots for kills and victory points separated by total, last week and yesterday
---
Alternate route: `/dev/fw/leaderboards/characters/`
Alternate route: `/legacy/fw/leaderboards/characters/`
Alternate route: `/v1/fw/leaderboards/characters/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/leaderboards/characters/')
def get_fw_leaderboards_corporations(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Top 10 leaderboard of corporations for kills and victory points separated by total, last week and yesterday
---
Alternate route: `/dev/fw/leaderboards/corporations/`
Alternate route: `/legacy/fw/leaderboards/corporations/`
Alternate route: `/v1/fw/leaderboards/corporations/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/leaderboards/corporations/')
def get_fw_stats(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Statistical overviews of factions involved in faction warfare
---
Alternate route: `/dev/fw/stats/`
Alternate route: `/legacy/fw/stats/`
Alternate route: `/v1/fw/stats/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/stats/')
def get_fw_systems(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
An overview of the current ownership of faction warfare solar systems
---
Alternate route: `/dev/fw/systems/`
Alternate route: `/v2/fw/systems/`
---
This route is cached for up to 1800 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/systems/')
def get_fw_wars(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Data about which NPC factions are at war
---
Alternate route: `/dev/fw/wars/`
Alternate route: `/legacy/fw/wars/`
Alternate route: `/v1/fw/wars/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/fw/wars/')
def get_incursions(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of current incursions
---
Alternate route: `/dev/incursions/`
Alternate route: `/legacy/incursions/`
Alternate route: `/v1/incursions/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/incursions/')
def get_industry_facilities(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of industry facilities
---
Alternate route: `/dev/industry/facilities/`
Alternate route: `/legacy/industry/facilities/`
Alternate route: `/v1/industry/facilities/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/industry/facilities/')
def get_industry_systems(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return cost indices for solar systems
---
Alternate route: `/dev/industry/systems/`
Alternate route: `/legacy/industry/systems/`
Alternate route: `/v1/industry/systems/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/industry/systems/')
def get_insurance_prices(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Return available insurance levels for all ship types
---
Alternate route: `/dev/insurance/prices/`
Alternate route: `/legacy/insurance/prices/`
Alternate route: `/v1/insurance/prices/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/insurance/prices/')
def get_killmails_killmail_id_killmail_hash(*,
killmail_hash,
killmail_id,
if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param killmail_hash: The killmail hash for verification
:param killmail_id: The killmail ID to be queried
Return a single killmail from its ID and hash
---
Alternate route: `/dev/killmails/{killmail_id}/{killmail_hash}/`
Alternate route: `/legacy/killmails/{killmail_id}/{killmail_hash}/`
Alternate route: `/v1/killmails/{killmail_id}/{killmail_hash}/`
---
This route is cached for up to 1209600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
killmail_hash=killmail_hash,
killmail_id=killmail_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/killmails/{killmail_id}/{killmail_hash}/')
def get_loyalty_stores_corporation_id_offers(*,
corporation_id,
if_none_match=None):
"""
:param corporation_id: An EVE corporation ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of offers from a specific corporation's loyalty store
---
Alternate route: `/dev/loyalty/stores/{corporation_id}/offers/`
Alternate route: `/legacy/loyalty/stores/{corporation_id}/offers/`
Alternate route: `/v1/loyalty/stores/{corporation_id}/offers/`
---
This route expires daily at 11:05
"""
ESI_request.request(corporation_id=corporation_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/loyalty/stores/{corporation_id}/offers/')
def get_markets_groups(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of item groups
---
Alternate route: `/dev/markets/groups/`
Alternate route: `/legacy/markets/groups/`
Alternate route: `/v1/markets/groups/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/groups/')
def get_markets_groups_market_group_id(*,
language,
market_group_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param market_group_id: An Eve item group ID
Get information on an item group
---
Alternate route: `/dev/markets/groups/{market_group_id}/`
Alternate route: `/legacy/markets/groups/{market_group_id}/`
Alternate route: `/v1/markets/groups/{market_group_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
market_group_id=market_group_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/groups/{market_group_id}/')
def get_markets_prices(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of prices
---
Alternate route: `/dev/markets/prices/`
Alternate route: `/legacy/markets/prices/`
Alternate route: `/v1/markets/prices/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/prices/')
def get_markets_structures_structure_id(*,
structure_id,
token,
if_none_match=None,
page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param structure_id: Return orders in this structure
:param token: Access token to use if unable to set a header
Return all orders in a structure
---
Alternate route: `/dev/markets/structures/{structure_id}/`
Alternate route: `/legacy/markets/structures/{structure_id}/`
Alternate route: `/v1/markets/structures/{structure_id}/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
structure_id=structure_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/structures/{structure_id}/')
def get_markets_region_id_history(*, region_id, type_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param region_id: Return statistics in this region
:param type_id: Return statistics for this type
Return a list of historical market statistics for the specified type in a region
---
Alternate route: `/dev/markets/{region_id}/history/`
Alternate route: `/legacy/markets/{region_id}/history/`
Alternate route: `/v1/markets/{region_id}/history/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
region_id=region_id,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/{region_id}/history/')
def get_markets_region_id_orders(*,
order_type,
region_id,
type_id,
if_none_match=None,
page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param order_type: ['buy', 'sell', 'all'] Filter buy/sell orders, return all orders by default. If you query without type_id, we always return both buy and sell orders
:param page: Which page of results to return
:param region_id: Return orders in this region
:param type_id: Return orders only for this type
Return a list of orders in a region
---
Alternate route: `/dev/markets/{region_id}/orders/`
Alternate route: `/legacy/markets/{region_id}/orders/`
Alternate route: `/v1/markets/{region_id}/orders/`
---
This route is cached for up to 300 seconds
"""
ESI_request.request(if_none_match=if_none_match,
order_type=order_type,
page=page,
region_id=region_id,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/{region_id}/orders/')
def get_markets_region_id_types(*, region_id, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param region_id: Return statistics in this region
Return a list of type IDs that have active orders in the region, for efficient market indexing.
---
Alternate route: `/dev/markets/{region_id}/types/`
Alternate route: `/legacy/markets/{region_id}/types/`
Alternate route: `/v1/markets/{region_id}/types/`
---
This route is cached for up to 600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
region_id=region_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/markets/{region_id}/types/')
def get_opportunities_groups(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of opportunities groups
---
Alternate route: `/dev/opportunities/groups/`
Alternate route: `/legacy/opportunities/groups/`
Alternate route: `/v1/opportunities/groups/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/groups/')
def get_opportunities_groups_group_id(*,
group_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param group_id: ID of an opportunities group
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Return information of an opportunities group
---
Alternate route: `/dev/opportunities/groups/{group_id}/`
Alternate route: `/legacy/opportunities/groups/{group_id}/`
Alternate route: `/v1/opportunities/groups/{group_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
group_id=group_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/groups/{group_id}/')
def get_opportunities_tasks(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Return a list of opportunities tasks
---
Alternate route: `/dev/opportunities/tasks/`
Alternate route: `/legacy/opportunities/tasks/`
Alternate route: `/v1/opportunities/tasks/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/tasks/')
def get_opportunities_tasks_task_id(*, task_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param task_id: ID of an opportunities task
Return information of an opportunities task
---
Alternate route: `/dev/opportunities/tasks/{task_id}/`
Alternate route: `/legacy/opportunities/tasks/{task_id}/`
Alternate route: `/v1/opportunities/tasks/{task_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
task_id=task_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/opportunities/tasks/{task_id}/')
def get_route_origin_destination(*,
avoid,
connections,
destination,
flag,
origin,
if_none_match=None):
"""
:param avoid: avoid solar system ID(s)
:param connections: connected solar system pairs
:param destination: destination solar system ID
:param flag: ['shortest', 'secure', 'insecure'] route security preference
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param origin: origin solar system ID
Get the systems between origin and destination
---
Alternate route: `/dev/route/{origin}/{destination}/`
Alternate route: `/legacy/route/{origin}/{destination}/`
Alternate route: `/v1/route/{origin}/{destination}/`
---
This route is cached for up to 86400 seconds
"""
ESI_request.request(avoid=avoid,
connections=connections,
destination=destination,
flag=flag,
if_none_match=if_none_match,
origin=origin,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/route/{origin}/{destination}/')
def get_search(*,
categories,
language,
search,
strict,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param categories: Type of entities to search for
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param search: The string to search on
:param strict: Whether the search should be a strict match
Search for entities that match a given sub-string.
---
Alternate route: `/dev/search/`
Alternate route: `/legacy/search/`
Alternate route: `/v2/search/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(accept_language=accept_language,
categories=categories,
if_none_match=if_none_match,
language=language,
search=search,
strict=strict,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/search/')
def get_sovereignty_campaigns(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Shows sovereignty data for campaigns.
---
Alternate route: `/dev/sovereignty/campaigns/`
Alternate route: `/legacy/sovereignty/campaigns/`
Alternate route: `/v1/sovereignty/campaigns/`
---
This route is cached for up to 5 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/sovereignty/campaigns/')
def get_sovereignty_map(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Shows sovereignty information for solar systems
---
Alternate route: `/dev/sovereignty/map/`
Alternate route: `/legacy/sovereignty/map/`
Alternate route: `/v1/sovereignty/map/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/sovereignty/map/')
def get_sovereignty_structures(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Shows sovereignty data for structures.
---
Alternate route: `/dev/sovereignty/structures/`
Alternate route: `/legacy/sovereignty/structures/`
Alternate route: `/v1/sovereignty/structures/`
---
This route is cached for up to 120 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/sovereignty/structures/')
def get_status(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
EVE Server status
---
Alternate route: `/dev/status/`
Alternate route: `/legacy/status/`
Alternate route: `/v1/status/`
---
This route is cached for up to 30 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/status/')
def post_ui_autopilot_waypoint(*, add_to_beginning, clear_other_waypoints,
destination_id, token):
"""
:param add_to_beginning: Whether this solar system should be added to the beginning of all waypoints
:param clear_other_waypoints: Whether clean other waypoints beforing adding this one
:param destination_id: The destination to travel to, can be solar system, station or structure's id
:param token: Access token to use if unable to set a header
Set a solar system as autopilot waypoint
---
Alternate route: `/dev/ui/autopilot/waypoint/`
Alternate route: `/legacy/ui/autopilot/waypoint/`
Alternate route: `/v2/ui/autopilot/waypoint/`
"""
ESI_request.request(add_to_beginning=add_to_beginning,
clear_other_waypoints=clear_other_waypoints,
destination_id=destination_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/autopilot/waypoint/')
def post_ui_openwindow_contract(*, contract_id, token):
"""
:param contract_id: The contract to open
:param token: Access token to use if unable to set a header
Open the contract window inside the client
---
Alternate route: `/dev/ui/openwindow/contract/`
Alternate route: `/legacy/ui/openwindow/contract/`
Alternate route: `/v1/ui/openwindow/contract/`
"""
ESI_request.request(contract_id=contract_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/contract/')
def post_ui_openwindow_information(*, target_id, token):
"""
:param target_id: The target to open
:param token: Access token to use if unable to set a header
Open the information window for a character, corporation or alliance inside the client
---
Alternate route: `/dev/ui/openwindow/information/`
Alternate route: `/legacy/ui/openwindow/information/`
Alternate route: `/v1/ui/openwindow/information/`
"""
ESI_request.request(target_id=target_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/information/')
def post_ui_openwindow_marketdetails(*, token, type_id):
"""
:param token: Access token to use if unable to set a header
:param type_id: The item type to open in market window
Open the market details window for a specific typeID inside the client
---
Alternate route: `/dev/ui/openwindow/marketdetails/`
Alternate route: `/legacy/ui/openwindow/marketdetails/`
Alternate route: `/v1/ui/openwindow/marketdetails/`
"""
ESI_request.request(token=token,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/marketdetails/')
def post_ui_openwindow_newmail(*, new_mail, token):
"""
:param new_mail: The details of mail to create
:param token: Access token to use if unable to set a header
Open the New Mail window, according to settings from the request if applicable
---
Alternate route: `/dev/ui/openwindow/newmail/`
Alternate route: `/legacy/ui/openwindow/newmail/`
Alternate route: `/v1/ui/openwindow/newmail/`
"""
ESI_request.request(new_mail=new_mail,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/ui/openwindow/newmail/')
def get_universe_ancestries(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get all character ancestries
---
Alternate route: `/dev/universe/ancestries/`
Alternate route: `/legacy/universe/ancestries/`
Alternate route: `/v1/universe/ancestries/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/ancestries/')
def get_universe_asteroid_belts_asteroid_belt_id(*,
asteroid_belt_id,
if_none_match=None):
"""
:param asteroid_belt_id: asteroid_belt_id integer
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on an asteroid belt
---
Alternate route: `/dev/universe/asteroid_belts/{asteroid_belt_id}/`
Alternate route: `/legacy/universe/asteroid_belts/{asteroid_belt_id}/`
Alternate route: `/v1/universe/asteroid_belts/{asteroid_belt_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(asteroid_belt_id=asteroid_belt_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/asteroid_belts/{asteroid_belt_id}/')
def get_universe_bloodlines(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get a list of bloodlines
---
Alternate route: `/dev/universe/bloodlines/`
Alternate route: `/legacy/universe/bloodlines/`
Alternate route: `/v1/universe/bloodlines/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/bloodlines/')
def get_universe_categories(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of item categories
---
Alternate route: `/dev/universe/categories/`
Alternate route: `/legacy/universe/categories/`
Alternate route: `/v1/universe/categories/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/categories/')
def get_universe_categories_category_id(*,
category_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param category_id: An Eve item category ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get information of an item category
---
Alternate route: `/dev/universe/categories/{category_id}/`
Alternate route: `/legacy/universe/categories/{category_id}/`
Alternate route: `/v1/universe/categories/{category_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
category_id=category_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/categories/{category_id}/')
def get_universe_constellations(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of constellations
---
Alternate route: `/dev/universe/constellations/`
Alternate route: `/legacy/universe/constellations/`
Alternate route: `/v1/universe/constellations/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/constellations/')
def get_universe_constellations_constellation_id(*,
constellation_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param constellation_id: constellation_id integer
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get information on a constellation
---
Alternate route: `/dev/universe/constellations/{constellation_id}/`
Alternate route: `/legacy/universe/constellations/{constellation_id}/`
Alternate route: `/v1/universe/constellations/{constellation_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
constellation_id=constellation_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/constellations/{constellation_id}/')
def get_universe_factions(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get a list of factions
---
Alternate route: `/dev/universe/factions/`
Alternate route: `/v2/universe/factions/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/factions/')
def get_universe_graphics(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of graphics
---
Alternate route: `/dev/universe/graphics/`
Alternate route: `/legacy/universe/graphics/`
Alternate route: `/v1/universe/graphics/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/graphics/')
def get_universe_graphics_graphic_id(*, graphic_id, if_none_match=None):
"""
:param graphic_id: graphic_id integer
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get information on a graphic
---
Alternate route: `/dev/universe/graphics/{graphic_id}/`
Alternate route: `/legacy/universe/graphics/{graphic_id}/`
Alternate route: `/v1/universe/graphics/{graphic_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(graphic_id=graphic_id,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/graphics/{graphic_id}/')
def get_universe_groups(*, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Get a list of item groups
---
Alternate route: `/dev/universe/groups/`
Alternate route: `/legacy/universe/groups/`
Alternate route: `/v1/universe/groups/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/groups/')
def get_universe_groups_group_id(*,
group_id,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param group_id: An Eve item group ID
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get information on an item group
---
Alternate route: `/dev/universe/groups/{group_id}/`
Alternate route: `/legacy/universe/groups/{group_id}/`
Alternate route: `/v1/universe/groups/{group_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
group_id=group_id,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/groups/{group_id}/')
def post_universe_ids(*, language, names, accept_language='en-us'):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param names: The names to resolve
Resolve a set of names to IDs in the following categories: agents, alliances, characters, constellations, corporations factions, inventory_types, regions, stations, and systems. Only exact matches will be returned. All names searched for are cached for 12 hours
---
Alternate route: `/dev/universe/ids/`
Alternate route: `/legacy/universe/ids/`
Alternate route: `/v1/universe/ids/`
"""
ESI_request.request(accept_language=accept_language,
language=language,
names=names,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/universe/ids/')
def get_universe_moons_moon_id(*, moon_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param moon_id: moon_id integer
Get information on a moon
---
Alternate route: `/dev/universe/moons/{moon_id}/`
Alternate route: `/legacy/universe/moons/{moon_id}/`
Alternate route: `/v1/universe/moons/{moon_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
moon_id=moon_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/moons/{moon_id}/')
def post_universe_names(*, ids):
"""
:param ids: The ids to resolve
Resolve a set of IDs to names and categories. Supported ID's for resolving are: Characters, Corporations, Alliances, Stations, Solar Systems, Constellations, Regions, Types, Factions
---
Alternate route: `/dev/universe/names/`
Alternate route: `/v3/universe/names/`
"""
ESI_request.request(ids=ids,
data_source='tranquility',
version='latest',
HTTP_method='POST',
path=f'/universe/names/')
def get_universe_planets_planet_id(*, planet_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param planet_id: planet_id integer
Get information on a planet
---
Alternate route: `/dev/universe/planets/{planet_id}/`
Alternate route: `/legacy/universe/planets/{planet_id}/`
Alternate route: `/v1/universe/planets/{planet_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
planet_id=planet_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/planets/{planet_id}/')
def get_universe_races(*,
language,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
Get a list of character races
---
Alternate route: `/dev/universe/races/`
Alternate route: `/legacy/universe/races/`
Alternate route: `/v1/universe/races/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/races/')
def get_universe_regions(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of regions
---
Alternate route: `/dev/universe/regions/`
Alternate route: `/legacy/universe/regions/`
Alternate route: `/v1/universe/regions/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/regions/')
def get_universe_regions_region_id(*,
language,
region_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param region_id: region_id integer
Get information on a region
---
Alternate route: `/dev/universe/regions/{region_id}/`
Alternate route: `/legacy/universe/regions/{region_id}/`
Alternate route: `/v1/universe/regions/{region_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
region_id=region_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/regions/{region_id}/')
def get_universe_schematics_schematic_id(*, schematic_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param schematic_id: A PI schematic ID
Get information on a planetary factory schematic
---
Alternate route: `/dev/universe/schematics/{schematic_id}/`
Alternate route: `/legacy/universe/schematics/{schematic_id}/`
Alternate route: `/v1/universe/schematics/{schematic_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
schematic_id=schematic_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/schematics/{schematic_id}/')
def get_universe_stargates_stargate_id(*, stargate_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param stargate_id: stargate_id integer
Get information on a stargate
---
Alternate route: `/dev/universe/stargates/{stargate_id}/`
Alternate route: `/legacy/universe/stargates/{stargate_id}/`
Alternate route: `/v1/universe/stargates/{stargate_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
stargate_id=stargate_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/stargates/{stargate_id}/')
def get_universe_stars_star_id(*, star_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param star_id: star_id integer
Get information on a star
---
Alternate route: `/dev/universe/stars/{star_id}/`
Alternate route: `/legacy/universe/stars/{star_id}/`
Alternate route: `/v1/universe/stars/{star_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
star_id=star_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/stars/{star_id}/')
def get_universe_stations_station_id(*, station_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param station_id: station_id integer
Get information on a station
---
Alternate route: `/dev/universe/stations/{station_id}/`
Alternate route: `/v2/universe/stations/{station_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
station_id=station_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/stations/{station_id}/')
def get_universe_structures(*, filter, if_none_match=None):
"""
:param filter: ['market', 'manufacturing_basic'] Only list public structures that have this service online
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
List all public structures
---
Alternate route: `/dev/universe/structures/`
Alternate route: `/legacy/universe/structures/`
Alternate route: `/v1/universe/structures/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(filter=filter,
if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/structures/')
def get_universe_structures_structure_id(*,
structure_id,
token,
if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param structure_id: An Eve structure ID
:param token: Access token to use if unable to set a header
Returns information on requested structure if you are on the ACL. Otherwise, returns "Forbidden" for all inputs.
---
Alternate route: `/dev/universe/structures/{structure_id}/`
Alternate route: `/v2/universe/structures/{structure_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
structure_id=structure_id,
token=token,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/structures/{structure_id}/')
def get_universe_system_jumps(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the number of jumps in solar systems within the last hour ending at the timestamp of the Last-Modified header, excluding wormhole space. Only systems with jumps will be listed
---
Alternate route: `/dev/universe/system_jumps/`
Alternate route: `/legacy/universe/system_jumps/`
Alternate route: `/v1/universe/system_jumps/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/system_jumps/')
def get_universe_system_kills(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get the number of ship, pod and NPC kills per solar system within the last hour ending at the timestamp of the Last-Modified header, excluding wormhole space. Only systems with kills will be listed
---
Alternate route: `/dev/universe/system_kills/`
Alternate route: `/v2/universe/system_kills/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/system_kills/')
def get_universe_systems(*, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
Get a list of solar systems
---
Alternate route: `/dev/universe/systems/`
Alternate route: `/legacy/universe/systems/`
Alternate route: `/v1/universe/systems/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/systems/')
def get_universe_systems_system_id(*,
language,
system_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param system_id: system_id integer
Get information on a solar system.
---
Alternate route: `/dev/universe/systems/{system_id}/`
Alternate route: `/v4/universe/systems/{system_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
system_id=system_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/systems/{system_id}/')
def get_universe_types(*, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
Get a list of type ids
---
Alternate route: `/dev/universe/types/`
Alternate route: `/legacy/universe/types/`
Alternate route: `/v1/universe/types/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/types/')
def get_universe_types_type_id(*,
language,
type_id,
accept_language='en-us',
if_none_match=None):
"""
:param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language
:param type_id: An Eve item type ID
Get information on a type
---
Alternate route: `/dev/universe/types/{type_id}/`
Alternate route: `/v3/universe/types/{type_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(accept_language=accept_language,
if_none_match=if_none_match,
language=language,
type_id=type_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/types/{type_id}/')
def get_wars(*, max_war_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param max_war_id: Only return wars with ID smaller than this
Return a list of wars
---
Alternate route: `/dev/wars/`
Alternate route: `/legacy/wars/`
Alternate route: `/v1/wars/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
max_war_id=max_war_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/wars/')
def get_wars_war_id(*, war_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param war_id: ID for a war
Return details about a war
---
Alternate route: `/dev/wars/{war_id}/`
Alternate route: `/legacy/wars/{war_id}/`
Alternate route: `/v1/wars/{war_id}/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
war_id=war_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/wars/{war_id}/')
def get_wars_war_id_killmails(*, war_id, if_none_match=None, page='1'):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param page: Which page of results to return
:param war_id: A valid war ID
Return a list of kills related to a war
---
Alternate route: `/dev/wars/{war_id}/killmails/`
Alternate route: `/legacy/wars/{war_id}/killmails/`
Alternate route: `/v1/wars/{war_id}/killmails/`
---
This route is cached for up to 3600 seconds
"""
ESI_request.request(if_none_match=if_none_match,
page=page,
war_id=war_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/wars/{war_id}/killmails/')
| 0
| 0
| 0
|
07e63312aff52436333be7a1e3761ff63ade86f0
| 522
|
py
|
Python
|
CCC/ccc18s2.py
|
devAdhiraj/coding-problems
|
e15d0eeb025e1ad503c73eb9280e6226fd979164
|
[
"MIT"
] | null | null | null |
CCC/ccc18s2.py
|
devAdhiraj/coding-problems
|
e15d0eeb025e1ad503c73eb9280e6226fd979164
|
[
"MIT"
] | null | null | null |
CCC/ccc18s2.py
|
devAdhiraj/coding-problems
|
e15d0eeb025e1ad503c73eb9280e6226fd979164
|
[
"MIT"
] | null | null | null |
n = int(input())
a = [list(map(int, input().split())) for i in range(n)]
c = min([a[0][0], a[0][n-1], a[n - 1][0], a[n - 1][n - 1]])
if a[0][0] == c:
for i in a:
print(*i)
pass
elif a[0][n - 1] == c:
for i in range(n - 1, -1, -1):
for j in range(n):
print(a[j][i], end=' ')
print()
elif a[n - 1][0] == c:
for i in range(n):
for j in range(n - 1, -1, -1):
print(a[j][i], end=' ')
print()
else:
for i in a[::-1]:
print(*i[::-1])
| 21.75
| 59
| 0.398467
|
n = int(input())
a = [list(map(int, input().split())) for i in range(n)]
c = min([a[0][0], a[0][n-1], a[n - 1][0], a[n - 1][n - 1]])
if a[0][0] == c:
for i in a:
print(*i)
pass
elif a[0][n - 1] == c:
for i in range(n - 1, -1, -1):
for j in range(n):
print(a[j][i], end=' ')
print()
elif a[n - 1][0] == c:
for i in range(n):
for j in range(n - 1, -1, -1):
print(a[j][i], end=' ')
print()
else:
for i in a[::-1]:
print(*i[::-1])
| 0
| 0
| 0
|
d9295e12456f31b422c2f1a459ef43e826ffca66
| 596
|
py
|
Python
|
tests/test_util.py
|
infolab-csail/lispify
|
f0156b2ccf5edb3cc77de3cd7ffc63f2883fdd30
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
infolab-csail/lispify
|
f0156b2ccf5edb3cc77de3cd7ffc63f2883fdd30
|
[
"MIT"
] | 11
|
2016-07-25T01:23:07.000Z
|
2020-11-25T18:35:00.000Z
|
tests/test_util.py
|
infolab-csail/lispify
|
f0156b2ccf5edb3cc77de3cd7ffc63f2883fdd30
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_util
----------------------------------
Tests for `util` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from lispify import util
if __name__ == '__main__':
unittest.main()
| 19.225806
| 63
| 0.654362
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_util
----------------------------------
Tests for `util` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from lispify import util
class TestUtil(unittest.TestCase):
def test_camel_case_to_lisp_name(self):
name = util.camel_case_to_lisp_name("UnknownAttribute")
self.assertEqual(name, "unknown-attribute")
name = util.camel_case_to_lisp_name("ResourceNotFound")
self.assertEqual(name, "resource-not-found")
if __name__ == '__main__':
unittest.main()
| 251
| 13
| 50
|
2d47349b76d89028925916b033d8dcdc74aebae3
| 937
|
py
|
Python
|
PYTHON/LAB/TW7/TW7a.py
|
flick-23/SEM-4
|
fe64c19e7df64390e94ba560006d32a561fd3579
|
[
"MIT"
] | 24
|
2021-03-24T08:52:41.000Z
|
2021-11-13T11:52:54.000Z
|
PYTHON/LAB/TW7/TW7a.py
|
flick-23/SEM-4
|
fe64c19e7df64390e94ba560006d32a561fd3579
|
[
"MIT"
] | 1
|
2021-04-28T06:16:44.000Z
|
2021-04-28T06:16:44.000Z
|
PYTHON/LAB/TW7/TW7a.py
|
flick-23/SEM-4
|
fe64c19e7df64390e94ba560006d32a561fd3579
|
[
"MIT"
] | 8
|
2021-04-16T10:31:50.000Z
|
2021-05-31T03:48:52.000Z
|
# Three IA’s are conducted for a class of 10 students for the subject Maths.
# The name, marks and USN are read from a file in.txt. Find the average of
# the IA for each student and write the USN and average to a file out.txt.
# Display the highest average of the class on the console
with open("in.txt", "r") as input_file:
lines = input_file.readlines()
# print(lines)
usns = []
mks = []
avgs = []
for line in lines:
# print(line)
name, m1, m2, m3, usn = map(str, line.split())
# print(name,m1,m2,m3,usn)
usns.append(usn)
mks.append([int(m1), int(m2), int(m3)])
avg = (int(m1)+int(m2)+int(m3))/3
avgs.append(avg)
with open("out.txt", "w") as output_file:
for i in range(len(usns)):
str = f"USN : {usns[i]} Avg : {avgs[i]}\n"
output_file.writelines(str)
print("Higest Average of the class is :: ", max(avgs))
| 36.038462
| 77
| 0.590181
|
# Three IA’s are conducted for a class of 10 students for the subject Maths.
# The name, marks and USN are read from a file in.txt. Find the average of
# the IA for each student and write the USN and average to a file out.txt.
# Display the highest average of the class on the console
with open("in.txt", "r") as input_file:
lines = input_file.readlines()
# print(lines)
usns = []
mks = []
avgs = []
for line in lines:
# print(line)
name, m1, m2, m3, usn = map(str, line.split())
# print(name,m1,m2,m3,usn)
usns.append(usn)
mks.append([int(m1), int(m2), int(m3)])
avg = (int(m1)+int(m2)+int(m3))/3
avgs.append(avg)
with open("out.txt", "w") as output_file:
for i in range(len(usns)):
str = f"USN : {usns[i]} Avg : {avgs[i]}\n"
output_file.writelines(str)
print("Higest Average of the class is :: ", max(avgs))
| 0
| 0
| 0
|
efb8348e75f424b5ce684725233888c553e87260
| 8,930
|
py
|
Python
|
ashic/commands/simulateyeast.py
|
wmalab/ASHIC
|
f9dbee64ef13c7f10c25bc266209fb7fc430d39e
|
[
"MIT"
] | 5
|
2021-01-28T21:51:55.000Z
|
2022-03-04T17:35:59.000Z
|
ashic/commands/simulateyeast.py
|
wmalab/ASHIC
|
f9dbee64ef13c7f10c25bc266209fb7fc430d39e
|
[
"MIT"
] | 4
|
2020-09-01T06:23:49.000Z
|
2022-01-11T03:58:04.000Z
|
ashic/commands/simulateyeast.py
|
wmalab/ASHIC
|
f9dbee64ef13c7f10c25bc266209fb7fc430d39e
|
[
"MIT"
] | 1
|
2020-12-06T07:03:50.000Z
|
2020-12-06T07:03:50.000Z
|
import os
import json
import numpy as np
from ashic import simulation
from ashic import structure
from ashic.utils import find_closestlength_chrom, encodejson
from ashic.utils import get_localinds, get_rdis, parse_localinds
from ashic.utils import centroid_distance
from ashic.commands.fit import initialx, create_model
from ashic.em import emfit
from ashic.progresscb import SimulationProgress
from ashic.model.zipoisson import ZeroInflatedPoisson
from ashic.model.poisson import Poisson
| 38.995633
| 99
| 0.560022
|
import os
import json
import numpy as np
from ashic import simulation
from ashic import structure
from ashic.utils import find_closestlength_chrom, encodejson
from ashic.utils import get_localinds, get_rdis, parse_localinds
from ashic.utils import centroid_distance
from ashic.commands.fit import initialx, create_model
from ashic.em import emfit
from ashic.progresscb import SimulationProgress
from ashic.model.zipoisson import ZeroInflatedPoisson
from ashic.model.poisson import Poisson
def load_yeaststructures():
datadir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../examples/yeast'))
# load yeast structures
structures = np.loadtxt(os.path.join(datadir, 'yeast.pdb.txt'))
structures = structures.reshape((len(structures) / 3, 3))
lengths = np.loadtxt(os.path.join(datadir, "yeast_lengths.txt"), dtype='int32')
start = 0
chroms = {}
for i, length in enumerate(lengths, start=1):
end = start + length
chroms['chr'+str(i)] = structures[start:end, :]
start = end
return chroms
def simulate_yeastdata(params, seed=0):
sim = simulation.Simulation(params, seed=seed)
sim.simulate_data()
return sim
def sample_simulation_params(chroms, chrom1=None, simtype=None,
alpha=-3.0, beta=1.0,
p_a=2.0, p_b=2.0,
gamma_a=0.9, gamma_b=-0.2, gamma_inter=0.05,
seed=0, **kwargs):
settings = {
'chrom1': chrom1,
'simtype': simtype,
'alpha': alpha,
'beta': beta,
'p_a': p_a,
'p_b': p_b,
'gamma_a': gamma_a,
'gamma_b': gamma_b,
'gamma_inter': gamma_inter,
'seed': seed
}
if simtype == 'diff':
assert kwargs.get('chrom2') is not None, "chrom2 must be provided."
settings['chrom2'] = kwargs['chrom2']
x = simulation.sample_diff_structure(chroms[chrom1], chroms[kwargs['chrom2']])
n = int(x.shape[0] / 2)
elif simtype == 'same' or simtype == 'local':
# center distance is estiamted from chrom1 and the chrom with closest length
# cut at the same length
if kwargs.get('chrom2') is not None:
minchrom = kwargs['chrom2']
else:
minchrom = find_closestlength_chrom(chroms, chrom1)
settings['chrom2'] = minchrom
n = min(chroms[chrom1].shape[0], chroms[minchrom].shape[0])
# TODO change center
# if kwargs.get('centerdis') is None:
# cd = structure.center_distance(chroms[chrom1], chroms[minchrom])
# else:
# cd = kwargs['centerdis']
# settings['centerdis'] = cd
if simtype == 'same':
x = simulation.sample_same_structure(chroms[chrom1], chroms[minchrom])
cd = centroid_distance(x[:n, :], x[n:, :])
settings['centerdis'] = cd
else:
if kwargs.get('localinds') is None:
localinds = get_localinds(n=n, percentile=kwargs.get('percentile', 0.2),
fragment_size=kwargs.get('fragmentsize', 5))
else:
localinds = parse_localinds(kwargs['localinds'])
settings['localinds'] = localinds
if kwargs.get('diffd') is None:
# TODO multiply 2?
radius = kwargs.get('radius', 2)
diffd = get_rdis(chroms[chrom1]) * radius
settings['radius'] = radius
else:
diffd = kwargs['diffd']
settings['diffd'] = diffd
x = simulation.sample_localdiff_structure(chroms[chrom1], chroms[minchrom],
localinds, diffd, randstate=seed)
cd = centroid_distance(x[:n, :], x[n:, :])
settings['centerdis'] = cd
else:
raise ValueError("Simulation type should be diff, same or local.")
p = simulation.sample_p(a=p_a, b=p_b, n=n, randstate=seed)
gamma = simulation.sample_gamma(a=gamma_a, b=gamma_b, inter=gamma_inter, n=n)
params = {
'alpha': alpha,
'beta': beta,
'p': p,
'gamma': gamma,
'x': x,
'n': n
}
return params, settings
def cmd_sample_params(chrom1, simtype, outdir, **kwargs):
yeastchroms = load_yeaststructures()
simparams, simsettings = sample_simulation_params(yeastchroms,
chrom1=chrom1, simtype=simtype, **kwargs)
if not os.path.exists(outdir):
os.makedirs(outdir)
encodejson(simparams)
encodejson(simsettings)
with open(os.path.join(outdir, 'params.json'), 'w') as fh:
json.dump(simparams, fh, indent=4, sort_keys=True)
with open(os.path.join(outdir, 'settings.json'), 'w') as fh:
json.dump(simsettings, fh, indent=4, sort_keys=True)
def cmd_simulate_fromparams(paramsfile, outdir, modeltype,
numruns=5, maxiter=20, tol=1e-2,
alpha=-3.0, beta=1.0, seed=0, tail=None, **kwargs):
if not os.path.exists(outdir):
os.makedirs(outdir)
with open(paramsfile, 'r') as fh:
params = json.load(fh)
sim = simulate_yeastdata(params, seed=seed)
# np.savetxt(os.path.join(outdir, 'z.txt'), sim.hidden['z'])
# np.savetxt(os.path.join(outdir, 't.txt'), sim.hidden['t'])
# for var in ('aa', 'ab', 'bb', 'ax', 'bx', 'xx'):
# np.savetxt(os.path.join(outdir, var+'.txt'), sim.obs[var])
# TODO change as a function
best_loglikelihood = -np.inf
best_model = None
best_converge = False
best_expected = None
best_sim = None
best_rseed = None
for rseed in range(numruns):
init = {
'n': params['n'],
'x': initialx(sim.obs, alpha=alpha, beta=beta, seed=rseed, **kwargs),
'alpha': alpha,
'beta': beta
}
if tail is None:
merge = None
elif 1 <= tail < params['n']:
merge = params['n'] - tail
else:
raise ValueError("tail should between 1 and {}.".format(params['n'] - 1))
model = create_model(init, modeltype=modeltype, seed=rseed, merge=merge)
simprogress = SimulationProgress(model, outdir=os.path.join(outdir, 'em_seed_'+str(rseed)),
simobj=sim, seed=rseed, maxiter=maxiter, tol=tol)
model, converge, loglikelihood, expected = emfit(model, sim.obs, maxiter=maxiter, tol=tol,
callback=simprogress.callback)
with open(simprogress.logfile, 'a') as fh:
fh.write("# converge={}\n".format(converge))
# choose the model with maximum loglikelihood in all runs
if loglikelihood > best_loglikelihood:
best_loglikelihood = loglikelihood
best_model = model
best_converge = converge
best_expected = expected
best_sim = simprogress
best_rseed = rseed
# save best result
with open(os.path.join(outdir, 'result.json'), 'w') as fh:
retdict = {
'loglikelihood': best_loglikelihood,
'converge': best_converge,
'em_seed': best_rseed,
'simulation_seed': seed,
'params_filepath': os.path.relpath(paramsfile, outdir),
'relative_error': best_sim.errors
}
json.dump(retdict, fh, indent=4, sort_keys=True)
best_model.dumpjson(os.path.join(outdir, 'result_model.json'),
indent=4, sort_keys=True)
with open(os.path.join(outdir, 'result_expected.json'), 'w') as fh:
row, col = np.where(best_model.mask)
values = {}
if isinstance(best_model, Poisson):
values['T'] = {
'aa': best_expected[0],
'ab': best_expected[1],
'ba': best_expected[2],
'bb': best_expected[3]
}
elif isinstance(best_model, ZeroInflatedPoisson):
values['Z'] = {
'aa': best_expected[0][0],
'ab': best_expected[0][1],
'ba': best_expected[0][2],
'bb': best_expected[0][3],
}
values['T'] = {
'aa': best_expected[1][0],
'ab': best_expected[1][1],
'ba': best_expected[1][2],
'bb': best_expected[1][3],
}
else:
raise ValueError("model should be zip or poisson.")
encodejson(values)
expectdict = {
'n': params['n'],
'row': row.flatten().tolist(),
'col': col.flatten().tolist(),
'values': values
}
json.dump(expectdict, fh, indent=4, sort_keys=True)
def cmd_simulate_fromsettings():
pass
| 8,297
| 0
| 138
|
05f2ea61cd36ac167513516a2307a89ee3de2985
| 1,473
|
py
|
Python
|
solutions/0217-contains-duplicate/contains-duplicate.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
solutions/0217-contains-duplicate/contains-duplicate.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
solutions/0217-contains-duplicate/contains-duplicate.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
# Given an array of integers, find if the array contains any duplicates.
#
# Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
#
# Example 1:
#
#
# Input: [1,2,3,1]
# Output: true
#
# Example 2:
#
#
# Input: [1,2,3,4]
# Output: false
#
# Example 3:
#
#
# Input: [1,1,1,3,3,4,3,2,4,2]
# Output: true
#
#
# @lc app=leetcode id=217 lang=python3
#
# [217] Contains Duplicate
#
# https://leetcode.com/problems/contains-duplicate/description/
#
# algorithms
# Easy (52.40%)
# Likes: 422
# Dislikes: 529
# Total Accepted: 352.1K
# Total Submissions: 672K
# Testcase Example: '[1,2,3,1]'
#
# Given an array of integers, find if the array contains any duplicates.
#
# Your function should return true if any value appears at least twice in the
# array, and it should return false if every element is distinct.
#
# Example 1:
#
#
# Input: [1,2,3,1]
# Output: true
#
# Example 2:
#
#
# Input: [1,2,3,4]
# Output: false
#
# Example 3:
#
#
# Input: [1,1,1,3,3,4,3,2,4,2]
# Output: true
#
#
| 17.535714
| 142
| 0.606246
|
# Given an array of integers, find if the array contains any duplicates.
#
# Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
#
# Example 1:
#
#
# Input: [1,2,3,1]
# Output: true
#
# Example 2:
#
#
# Input: [1,2,3,4]
# Output: false
#
# Example 3:
#
#
# Input: [1,1,1,3,3,4,3,2,4,2]
# Output: true
#
#
# @lc app=leetcode id=217 lang=python3
#
# [217] Contains Duplicate
#
# https://leetcode.com/problems/contains-duplicate/description/
#
# algorithms
# Easy (52.40%)
# Likes: 422
# Dislikes: 529
# Total Accepted: 352.1K
# Total Submissions: 672K
# Testcase Example: '[1,2,3,1]'
#
# Given an array of integers, find if the array contains any duplicates.
#
# Your function should return true if any value appears at least twice in the
# array, and it should return false if every element is distinct.
#
# Example 1:
#
#
# Input: [1,2,3,1]
# Output: true
#
# Example 2:
#
#
# Input: [1,2,3,4]
# Output: false
#
# Example 3:
#
#
# Input: [1,1,1,3,3,4,3,2,4,2]
# Output: true
#
#
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
if len(nums) < 2:
return False
if len(nums) is 2:
return nums[0] == nums[1]
hash_table = {}
for num in nums:
if num in hash_table:
return True
else:
hash_table[num] = True
return False
| 341
| -6
| 49
|
bc3e21f237dd600b4b9a15b1a26af2981ee29aad
| 2,128
|
py
|
Python
|
app/metrics/client.py
|
TouchPal/guldan
|
74cc0bf687109d16c3eb94010b4cc25bd5c5bcc0
|
[
"BSD-3-Clause"
] | 43
|
2017-12-27T13:20:15.000Z
|
2021-04-15T03:02:03.000Z
|
app/metrics/client.py
|
TouchPal/guldan
|
74cc0bf687109d16c3eb94010b4cc25bd5c5bcc0
|
[
"BSD-3-Clause"
] | null | null | null |
app/metrics/client.py
|
TouchPal/guldan
|
74cc0bf687109d16c3eb94010b4cc25bd5c5bcc0
|
[
"BSD-3-Clause"
] | 4
|
2018-03-28T08:46:07.000Z
|
2018-10-12T09:33:38.000Z
|
# -*- coding: utf-8 -*-
import time
import functools
import socket
import logging
from .metricspy.groupmetrics import GroupMetrics
from app import load_app_config
logger = logging.getLogger(__name__)
metrics_clients = {}
HOSTNAME = socket.getfqdn()
| 30.84058
| 117
| 0.584586
|
# -*- coding: utf-8 -*-
import time
import functools
import socket
import logging
from .metricspy.groupmetrics import GroupMetrics
from app import load_app_config
logger = logging.getLogger(__name__)
metrics_clients = {}
HOSTNAME = socket.getfqdn()
class FakeGroupMetrics(object):
def write_exc(self, name, value, tags=None):
pass
def write_count(self, name, value, tags=None):
pass
def get_metrics_clent(measurement):
try:
metrics_client = metrics_clients.get(measurement, None)
if not metrics_client:
host_and_port = load_app_config().METRICS_URL.split(":")
if len(host_and_port) != 2:
metrics_client = FakeGroupMetrics()
else:
tags = {"hostname": HOSTNAME}
metrics_client = GroupMetrics(
(host_and_port[0], int(host_and_port[1])),
measurement,
aggregate_count=1000,
ring_buffer_capacity=10000,
predefined_tags=tags
)
metrics_clients[measurement] = metrics_client
return metrics_client
except:
logger.exception("exc when get metrics client")
return FakeGroupMetrics()
def metrics(measurement, name, request=None):
def metrics_deco(func):
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = 0
new_tags = {
"remote_addr": request.remote_addr if request else "None"
}
try:
start_time = time.time() * 1000
result = func(*args, **kwargs)
return result
except:
get_metrics_clent(measurement).write_exc(name, 1, tags=new_tags)
raise
finally:
end_time = time.time() * 1000
get_metrics_clent(measurement).write_count(name, 1, tags=new_tags)
get_metrics_clent(measurement).write_count(name + ".cost", int(end_time - start_time), tags=new_tags)
return inner
return metrics_deco
| 1,743
| 10
| 122
|
76da7517e74e4fe0d0a7dea147637beb93fde4b1
| 6,970
|
py
|
Python
|
jina/__init__.py
|
mohamed--abdel-maksoud/jina
|
f55a2fb89cae1f5c5d03db682ebd7add0e7d58a4
|
[
"Apache-2.0"
] | null | null | null |
jina/__init__.py
|
mohamed--abdel-maksoud/jina
|
f55a2fb89cae1f5c5d03db682ebd7add0e7d58a4
|
[
"Apache-2.0"
] | null | null | null |
jina/__init__.py
|
mohamed--abdel-maksoud/jina
|
f55a2fb89cae1f5c5d03db682ebd7add0e7d58a4
|
[
"Apache-2.0"
] | null | null | null |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
# DO SOME OS-WISE PATCHES
import datetime as _datetime
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import types as _types
from google.protobuf.internal import api_implementation as _api_implementation
if _api_implementation._default_implementation_type != 'cpp':
import warnings as _warnings
_warnings.warn(
'''
You are using Python protobuf backend, not the C++ version, which is much faster.
This is often due to C++ implementation failed to compile while installing Protobuf
- You are using in Python 3.9 (https://github.com/jina-ai/jina/issues/1801)
- You are using on architecture other than x86_64/armv6/armv7
- You installation is broken, try `pip install --force protobuf`
- You have C++ backend but you shut it down, try `export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`
''',
RuntimeWarning,
)
if _sys.version_info < (3, 7, 0) or _sys.version_info >= (3, 10, 0):
raise OSError(f'Jina requires Python 3.7/3.8/3.9, but yours is {_sys.version_info}')
if _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# Underscore variables shared globally
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '1.1.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.80'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS
# 1. clean this tuple,
# 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = (
'JINA_ARRAY_QUANT',
'JINA_BINARY_DELIMITER',
'JINA_CONTRIB_MODULE',
'JINA_CONTRIB_MODULE_IS_LOADING',
'JINA_CONTROL_PORT',
'JINA_DEFAULT_HOST',
'JINA_DISABLE_UVLOOP',
'JINA_EXECUTOR_WORKDIR',
'JINA_FULL_CLI',
'JINA_IPC_SOCK_TMP',
'JINA_LOG_CONFIG',
'JINA_LOG_ID',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_LOG_WORKSPACE',
'JINA_POD_NAME',
'JINA_RAISE_ERROR_EARLY',
'JINA_RANDOM_PORTS',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_SOCKET_HWM',
'JINA_VCS_VERSION',
'JINA_WARN_UNNAMED',
'JINA_WORKSPACE',
)
__default_host__ = _os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__binary_delimiter__ = _os.environ.get(
'JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57'
).encode()
__root_dir__ = _os.path.dirname(_os.path.abspath(__file__))
_names_with_underscore = [
'__version__',
'__copyright__',
'__license__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__binary_delimiter__',
'__jina_env__',
'__uptime__',
'__root_dir__',
]
# Primitive data type,
# note, they must be loaded BEFORE all executors/drivers/... to avoid cyclic imports
from jina.types.ndarray.generic import NdArray
from jina.types.request import Request, Response
from jina.types.message import Message
from jina.types.querylang import QueryLang
from jina.types.document import Document
from jina.types.document.multimodal import MultimodalDocument
from jina.types.sets import DocumentSet, QueryLangSet
# ADD GLOBAL NAMESPACE VARIABLES
JINA_GLOBAL = _types.SimpleNamespace()
JINA_GLOBAL.scipy_installed = None
JINA_GLOBAL.tensorflow_installed = None
JINA_GLOBAL.torch_installed = None
import jina.importer as _ji
# driver first, as executor may contain driver
_ji.import_classes('jina.drivers', show_import_table=False, import_once=True)
_ji.import_classes('jina.executors', show_import_table=False, import_once=True)
_ji.import_classes('jina.hub', show_import_table=False, import_once=True)
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
from .logging import default_logger
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(
f'trouble with max limit, retrying with soft,hard {soft},{hard}'
)
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return soft, hard
_set_nofile()
# Flow
from jina.flow import Flow
from jina.flow.asyncio import AsyncFlow
# Client
from jina.clients import Client
from jina.clients.asyncio import AsyncClient
# Executor
from jina.executors import BaseExecutor as Executor
from jina.executors.classifiers import BaseClassifier as Classifier
from jina.executors.crafters import BaseCrafter as Crafter
from jina.executors.encoders import BaseEncoder as Encoder
from jina.executors.evaluators import BaseEvaluator as Evaluator
from jina.executors.indexers import BaseIndexer as Indexer
from jina.executors.rankers import BaseRanker as Ranker
from jina.executors.segmenters import BaseSegmenter as Segmenter
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend([_s for _s in _names_with_underscore])
| 32.570093
| 122
| 0.726973
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
# DO SOME OS-WISE PATCHES
import datetime as _datetime
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import types as _types
from google.protobuf.internal import api_implementation as _api_implementation
if _api_implementation._default_implementation_type != 'cpp':
import warnings as _warnings
_warnings.warn(
'''
You are using Python protobuf backend, not the C++ version, which is much faster.
This is often due to C++ implementation failed to compile while installing Protobuf
- You are using in Python 3.9 (https://github.com/jina-ai/jina/issues/1801)
- You are using on architecture other than x86_64/armv6/armv7
- You installation is broken, try `pip install --force protobuf`
- You have C++ backend but you shut it down, try `export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`
''',
RuntimeWarning,
)
if _sys.version_info < (3, 7, 0) or _sys.version_info >= (3, 10, 0):
raise OSError(f'Jina requires Python 3.7/3.8/3.9, but yours is {_sys.version_info}')
if _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# Underscore variables shared globally
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '1.1.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.80'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS
# 1. clean this tuple,
# 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = (
'JINA_ARRAY_QUANT',
'JINA_BINARY_DELIMITER',
'JINA_CONTRIB_MODULE',
'JINA_CONTRIB_MODULE_IS_LOADING',
'JINA_CONTROL_PORT',
'JINA_DEFAULT_HOST',
'JINA_DISABLE_UVLOOP',
'JINA_EXECUTOR_WORKDIR',
'JINA_FULL_CLI',
'JINA_IPC_SOCK_TMP',
'JINA_LOG_CONFIG',
'JINA_LOG_ID',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_LOG_WORKSPACE',
'JINA_POD_NAME',
'JINA_RAISE_ERROR_EARLY',
'JINA_RANDOM_PORTS',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_SOCKET_HWM',
'JINA_VCS_VERSION',
'JINA_WARN_UNNAMED',
'JINA_WORKSPACE',
)
__default_host__ = _os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__binary_delimiter__ = _os.environ.get(
'JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57'
).encode()
__root_dir__ = _os.path.dirname(_os.path.abspath(__file__))
_names_with_underscore = [
'__version__',
'__copyright__',
'__license__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__binary_delimiter__',
'__jina_env__',
'__uptime__',
'__root_dir__',
]
# Primitive data type,
# note, they must be loaded BEFORE all executors/drivers/... to avoid cyclic imports
from jina.types.ndarray.generic import NdArray
from jina.types.request import Request, Response
from jina.types.message import Message
from jina.types.querylang import QueryLang
from jina.types.document import Document
from jina.types.document.multimodal import MultimodalDocument
from jina.types.sets import DocumentSet, QueryLangSet
# ADD GLOBAL NAMESPACE VARIABLES
JINA_GLOBAL = _types.SimpleNamespace()
JINA_GLOBAL.scipy_installed = None
JINA_GLOBAL.tensorflow_installed = None
JINA_GLOBAL.torch_installed = None
import jina.importer as _ji
# driver first, as executor may contain driver
_ji.import_classes('jina.drivers', show_import_table=False, import_once=True)
_ji.import_classes('jina.executors', show_import_table=False, import_once=True)
_ji.import_classes('jina.hub', show_import_table=False, import_once=True)
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
from .logging import default_logger
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(
f'trouble with max limit, retrying with soft,hard {soft},{hard}'
)
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return soft, hard
_set_nofile()
# Flow
from jina.flow import Flow
from jina.flow.asyncio import AsyncFlow
# Client
from jina.clients import Client
from jina.clients.asyncio import AsyncClient
# Executor
from jina.executors import BaseExecutor as Executor
from jina.executors.classifiers import BaseClassifier as Classifier
from jina.executors.crafters import BaseCrafter as Crafter
from jina.executors.encoders import BaseEncoder as Encoder
from jina.executors.evaluators import BaseEvaluator as Evaluator
from jina.executors.indexers import BaseIndexer as Indexer
from jina.executors.rankers import BaseRanker as Ranker
from jina.executors.segmenters import BaseSegmenter as Segmenter
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend([_s for _s in _names_with_underscore])
| 0
| 0
| 0
|
e7961c5f43823cb180276e63bee8cf767d3ea8b5
| 75,260
|
py
|
Python
|
src/genie/libs/parser/iosxe/show_wlan.py
|
danielgraziano/genieparser
|
74d5e1ded9794561af1ac3284307c58365617673
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/show_wlan.py
|
danielgraziano/genieparser
|
74d5e1ded9794561af1ac3284307c58365617673
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/show_wlan.py
|
danielgraziano/genieparser
|
74d5e1ded9794561af1ac3284307c58365617673
|
[
"Apache-2.0"
] | null | null | null |
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Optional
# ======================
# Schema for:
# * 'show wlan summary'
# ======================
class ShowWlanSummarySchema(MetaParser):
"""Schema for show wlan summary."""
schema = {
"wlan_summary": {
"wlan_count": int,
"wlan_id": {
int: {
"profile_name": str,
"ssid": str,
"status": str,
"security": str
}
}
}
}
# ======================
# Parser for:
# * 'show wlan summary'
# ======================
class ShowWlanSummary(ShowWlanSummarySchema):
"""Parser for show wlan summary"""
cli_command = 'show ap cdp neighbor'
# ==================
# Schema for:
# * 'show wlan all'
# ==================
class ShowWlanAllSchema(MetaParser):
"""Schema for show wlan all."""
schema = {
"wlan_names" : {
Optional(str): {
"identifier": int,
Optional("description"): str,
"ssid": str,
"status": str,
"broadcast_ssid": str,
"advertise_ap_name": str,
"universal_ap_admin": str,
"max_clients_wlan": int,
"max_clients_ap": int,
"max_clients_ap_radio": int,
"okc": str,
"active_clients": int,
"chd_per_wlan": str,
"wmm": str,
"channel_scan_defer_priority": {
"priority": list
},
"scan_defer_time_msecs": int,
"media_stream_multicast_direct": str,
"ccx_aironet_support": str,
"p2p_blocking_action": str,
"radio_policy": str,
Optional("dtim_period_dot11a"): str,
Optional("dtim_period_dot11b"): str,
"local_eap_authentication": str,
"mac_filter_authorization_list_name": str,
"mac_filter_override_authorization_list_name": str,
"dot1x_authentication_list_name": str,
"dot1x_authorization_list_name": str,
"security": {
"dot11_authentication": str,
"static_wep_keys": str,
"wifi_protected_access_wpa1_wpa2_wpa3": str,
Optional("wifi_protected_access_wpa"): {
Optional("wpa_ssn_ie"): str
},
Optional("wifi_protected_access_wpa2"): {
Optional("wpa2_rsn_ie"): str,
Optional("wpa2_attributes"): {
Optional("mpsk"): str,
Optional("aes"): str,
Optional("ccmp256"): str,
Optional("gcmp128"): str,
Optional("gcmp256"): str,
Optional("randomized_gtk"): str
}
},
Optional("wifi_protected_access_wpa3"): {
Optional("wpa3_ie"): str
},
Optional("auth_key_mgmt"): {
Optional("dot1x"): str,
Optional("psk"): str,
Optional("cckm"): str,
Optional("ft_dot1x"): str,
Optional("ft_psk"): str,
Optional("dot1x_sha256"): str,
Optional("psk_sha256"): str,
Optional("sae"): str,
Optional("owe"): str,
Optional("suiteb_1x"): str,
Optional("suiteb192_1x"): str
},
Optional("cckm_tsf_tolerance_msecs"): int,
"owe_transition_mode": str,
"osen": str,
"ft_support": str,
"ft_support": {
"ft_support_status": str,
"ft_reassociation_timer_secs": int,
"ft_over_the_ds_mode": str
},
"pmf_support": {
"pmf_support_status": str,
"pmf_association_comeback_timeout_secs": int,
"pmf_sa_query_time_msecs": int
},
"web_based_authenticaion": str,
"conditional_web_redirect": str,
"splash_page_web_redirect": str,
"webauth_on_mac_filter_failure": str,
"webauth_authentication_list_name": str,
"webauth_authorization_list_name": str,
"webauth_parameter_map": str
},
"band_select": str,
"load_balancing": str,
"multicast_buffer": str,
"multicast_buffer_size": int,
"ip_source_guard": str,
"assisted_roaming": {
"neighbbor_list": str,
"prediction_list": str,
"dual_band_support": str
},
"ieee_dot11v_parameters": {
"directed_multicast_service": str,
"bss_max_idle": {
"bss_max_idle_status": str,
"protected_mode": str
},
"traffic_filtering_servce": str,
"bss_transition": {
"bss_transition_status": str,
"disassociation_imminent": {
"disassociation_imminent_status": str,
"optimised_roaming_timer": int,
"timer": int,
}
},
"wmn_sleep_mode": str
},
"dot11ac_mu_mimo": str,
"dot11ax_parameters": {
"ofdma_downlink": str,
"ofdma_uplink": str,
"mu_mimo_downlink": str,
"mu_mimo_uplink": str,
"bss_target_wake_up_time": str,
"bss_target_wake_up_time_broadcast_support": str
},
"mdns_gateway_status": str,
"wifi_alliance_agile_multiband": str,
"device_analytics": {
"advertise_support": str,
"share_data_with_client": str
},
Optional("wifi_to_cellular_steering"): str
}
}
}
# ==================
# Parser for:
# * 'show wlan all'
# ==================
class ShowWlanAll(ShowWlanAllSchema):
"""Parser for show wlan all"""
cli_command = 'show wlan all'
| 55.872309
| 196
| 0.498379
|
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Optional
# ======================
# Schema for:
# * 'show wlan summary'
# ======================
class ShowWlanSummarySchema(MetaParser):
"""Schema for show wlan summary."""
schema = {
"wlan_summary": {
"wlan_count": int,
"wlan_id": {
int: {
"profile_name": str,
"ssid": str,
"status": str,
"security": str
}
}
}
}
# ======================
# Parser for:
# * 'show wlan summary'
# ======================
class ShowWlanSummary(ShowWlanSummarySchema):
"""Parser for show wlan summary"""
cli_command = 'show ap cdp neighbor'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
wlan_summary_dict = {}
# Number of WLANs: 4
#
# ID Profile Name SSID Status Security
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# 17 lizzard_Global lizzard UP [WPA2][802.1x][FT + 802.1x][AES],[FT Enabled]
# 18 wip_Global wip UP [WPA2][802.1x + CCKM][AES]
# 19 internet_Global internet UP [open],MAC Filtering
# 20 lizzard-l_Global lizzard-legacy UP [WPA2][802.1x][AES]
# Number of WLANs: 4
wlan_count_capture = re.compile(r"^Number\s+of\s+WLANs:\s+(?P<wlan_count>\d+)$")
# ID Profile Name SSID Status Security
wlan_info_header_capture = re.compile(r"^ID\s+Profile\s+Name\s+SSID\s+Status\s+Security$")
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
delimiter_capture = re.compile(
r"^----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------$")
# 17 lizzard_Global lizzard UP [WPA2][802.1x][FT + 802.1x][AES],[FT Enabled]
wlan_info_capture = re.compile(
r"^(?P<wlan_id>\d+)\s+(?P<profile_name>\S+)\s+(?P<ssid>\S+)\s+(?P<wlan_status>\S+)\s+(?P<status_security>.*$)")
for line in out.splitlines():
line = line.strip()
# Number of WLANs: 4
if wlan_count_capture.match(line):
wlan_count_capture_match = wlan_count_capture.match(line)
groups = wlan_count_capture_match.groupdict()
if not wlan_summary_dict.get('wlan_summary', {}):
wlan_summary_dict['wlan_summary'] = {}
wlan_count = int(groups['wlan_count'])
wlan_summary_dict['wlan_summary']['wlan_count'] = wlan_count
continue
# ID Profile Name SSID Status Security
elif wlan_info_header_capture.match(line):
wlan_info_header_capture_match = wlan_info_header_capture.match(line)
groups = wlan_info_header_capture_match.groupdict()
continue
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
elif delimiter_capture.match(line):
delimiter_capture_match = delimiter_capture.match(line)
groups = delimiter_capture_match.groupdict()
continue
# 17 lizzard_Global lizzard UP [WPA2][802.1x][FT + 802.1x][AES],[FT Enabled]
elif wlan_info_capture.match(line):
wlan_info_capture_match = wlan_info_capture.match(line)
groups = wlan_info_capture_match.groupdict()
wlan_id = int(groups['wlan_id'])
profile_name = groups['profile_name']
ssid = groups['ssid']
wlan_status = groups['wlan_status']
status_security = groups['status_security']
if not wlan_summary_dict['wlan_summary'].get('wlan_id', {}):
wlan_summary_dict['wlan_summary']['wlan_id'] = {}
wlan_summary_dict['wlan_summary']['wlan_id'][wlan_id] = {}
wlan_summary_dict['wlan_summary']['wlan_id'][wlan_id].update({'profile_name': profile_name})
wlan_summary_dict['wlan_summary']['wlan_id'][wlan_id].update({'ssid': ssid})
wlan_summary_dict['wlan_summary']['wlan_id'][wlan_id].update({'status': wlan_status})
wlan_summary_dict['wlan_summary']['wlan_id'][wlan_id].update({'security': status_security})
continue
return wlan_summary_dict
# ==================
# Schema for:
# * 'show wlan all'
# ==================
class ShowWlanAllSchema(MetaParser):
"""Schema for show wlan all."""
schema = {
"wlan_names" : {
Optional(str): {
"identifier": int,
Optional("description"): str,
"ssid": str,
"status": str,
"broadcast_ssid": str,
"advertise_ap_name": str,
"universal_ap_admin": str,
"max_clients_wlan": int,
"max_clients_ap": int,
"max_clients_ap_radio": int,
"okc": str,
"active_clients": int,
"chd_per_wlan": str,
"wmm": str,
"channel_scan_defer_priority": {
"priority": list
},
"scan_defer_time_msecs": int,
"media_stream_multicast_direct": str,
"ccx_aironet_support": str,
"p2p_blocking_action": str,
"radio_policy": str,
Optional("dtim_period_dot11a"): str,
Optional("dtim_period_dot11b"): str,
"local_eap_authentication": str,
"mac_filter_authorization_list_name": str,
"mac_filter_override_authorization_list_name": str,
"dot1x_authentication_list_name": str,
"dot1x_authorization_list_name": str,
"security": {
"dot11_authentication": str,
"static_wep_keys": str,
"wifi_protected_access_wpa1_wpa2_wpa3": str,
Optional("wifi_protected_access_wpa"): {
Optional("wpa_ssn_ie"): str
},
Optional("wifi_protected_access_wpa2"): {
Optional("wpa2_rsn_ie"): str,
Optional("wpa2_attributes"): {
Optional("mpsk"): str,
Optional("aes"): str,
Optional("ccmp256"): str,
Optional("gcmp128"): str,
Optional("gcmp256"): str,
Optional("randomized_gtk"): str
}
},
Optional("wifi_protected_access_wpa3"): {
Optional("wpa3_ie"): str
},
Optional("auth_key_mgmt"): {
Optional("dot1x"): str,
Optional("psk"): str,
Optional("cckm"): str,
Optional("ft_dot1x"): str,
Optional("ft_psk"): str,
Optional("dot1x_sha256"): str,
Optional("psk_sha256"): str,
Optional("sae"): str,
Optional("owe"): str,
Optional("suiteb_1x"): str,
Optional("suiteb192_1x"): str
},
Optional("cckm_tsf_tolerance_msecs"): int,
"owe_transition_mode": str,
"osen": str,
"ft_support": str,
"ft_support": {
"ft_support_status": str,
"ft_reassociation_timer_secs": int,
"ft_over_the_ds_mode": str
},
"pmf_support": {
"pmf_support_status": str,
"pmf_association_comeback_timeout_secs": int,
"pmf_sa_query_time_msecs": int
},
"web_based_authenticaion": str,
"conditional_web_redirect": str,
"splash_page_web_redirect": str,
"webauth_on_mac_filter_failure": str,
"webauth_authentication_list_name": str,
"webauth_authorization_list_name": str,
"webauth_parameter_map": str
},
"band_select": str,
"load_balancing": str,
"multicast_buffer": str,
"multicast_buffer_size": int,
"ip_source_guard": str,
"assisted_roaming": {
"neighbbor_list": str,
"prediction_list": str,
"dual_band_support": str
},
"ieee_dot11v_parameters": {
"directed_multicast_service": str,
"bss_max_idle": {
"bss_max_idle_status": str,
"protected_mode": str
},
"traffic_filtering_servce": str,
"bss_transition": {
"bss_transition_status": str,
"disassociation_imminent": {
"disassociation_imminent_status": str,
"optimised_roaming_timer": int,
"timer": int,
}
},
"wmn_sleep_mode": str
},
"dot11ac_mu_mimo": str,
"dot11ax_parameters": {
"ofdma_downlink": str,
"ofdma_uplink": str,
"mu_mimo_downlink": str,
"mu_mimo_uplink": str,
"bss_target_wake_up_time": str,
"bss_target_wake_up_time_broadcast_support": str
},
"mdns_gateway_status": str,
"wifi_alliance_agile_multiband": str,
"device_analytics": {
"advertise_support": str,
"share_data_with_client": str
},
Optional("wifi_to_cellular_steering"): str
}
}
}
# ==================
# Parser for:
# * 'show wlan all'
# ==================
class ShowWlanAll(ShowWlanAllSchema):
"""Parser for show wlan all"""
cli_command = 'show wlan all'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
else:
output=output
# WLAN Profile Name : lizzard_Fabric_F_cf6efda4
# ================================================
# Identifier : 17
# Description :
# Network Name (SSID) : lizzard
# Status : Enabled
# Broadcast SSID : Enabled
# Advertise-Apname : Disabled
# Universal AP Admin : Disabled
# Max Associated Clients per WLAN : 0
# Max Associated Clients per AP per WLAN : 0
# Max Associated Clients per AP Radio per WLAN : 200
# OKC : Enabled
# Number of Active Clients : 4
# CHD per WLAN : Enabled
# WMM : Allowed
# Channel Scan Defer Priority:
# Priority (default) : 5
# Priority (default) : 6
# Scan Defer Time (msecs) : 100
# Media Stream Multicast-direct : Disabled
# CCX - AironetIe Support : Disabled
# Peer-to-Peer Blocking Action : Disabled
# Radio Policy : 802.11a only
# DTIM period for 802.11a radio :
# DTIM period for 802.11b radio :
# Local EAP Authentication : Disabled
# Mac Filter Authorization list name : Disabled
# Mac Filter Override Authorization list name : Disabled
# Accounting list name :
# 802.1x authentication list name : dnac-list
# 802.1x authorization list name : Disabled
# Security
# 802.11 Authentication : Open System
# Static WEP Keys : Disabled
# Wi-Fi Protected Access (WPA/WPA2/WPA3) : Enabled
# WPA (SSN IE) : Disabled
# WPA2 (RSN IE) : Enabled
# MPSK : Disabled
# AES Cipher : Enabled
# CCMP256 Cipher : Disabled
# GCMP128 Cipher : Disabled
# GCMP256 Cipher : Disabled
# Randomized GTK : Disabled
# WPA3 (WPA3 IE) : Disabled
# Auth Key Management
# 802.1x : Enabled
# PSK : Disabled
# CCKM : Disabled
# FT dot1x : Enabled
# FT PSK : Disabled
# Dot1x-SHA256 : Disabled
# PSK-SHA256 : Disabled
# SAE : Disabled
# OWE : Disabled
# SUITEB-1X : Disabled
# SUITEB192-1X : Disabled
# CCKM TSF Tolerance : 1000
# OWE Transition Mode : Disabled
# OSEN : Disabled
# FT Support : Enabled
# FT Reassociation Timeout : 20
# FT Over-The-DS mode : Disabled
# PMF Support : Disabled
# PMF Association Comeback Timeout : 1
# PMF SA Query Time : 200
# Web Based Authentication : Disabled
# Conditional Web Redirect : Disabled
# Splash-Page Web Redirect : Disabled
# Webauth On-mac-filter Failure : Disabled
# Webauth Authentication List Name : Disabled
# Webauth Authorization List Name : Disabled
# Webauth Parameter Map : Disabled
# Band Select : Disabled
# Load Balancing : Disabled
# Multicast Buffer : Disabled
# Multicast Buffer Size : 0
# IP Source Guard : Disabled
# Assisted-Roaming
# Neighbor List : Enabled
# Prediction List : Disabled
# Dual Band Support : Disabled
# IEEE 802.11v parameters
# Directed Multicast Service : Enabled
# BSS Max Idle : Enabled
# Protected Mode : Disabled
# Traffic Filtering Service : Disabled
# BSS Transition : Enabled
# Disassociation Imminent : Disabled
# Optimised Roaming Timer : 40
# Timer : 200
# WNM Sleep Mode : Disabled
# 802.11ac MU-MIMO : Enabled
# 802.11ax parameters
# OFDMA Downlink : Enabled
# OFDMA Uplink : Enabled
# MU-MIMO Downlink : Enabled
# MU-MIMO Uplink : Enabled
# BSS Target Wake Up Time : Enabled
# BSS Target Wake Up Time Broadcast Support : Enabled
# mDNS Gateway Status : Bridge
# WIFI Alliance Agile Multiband : Disabled
# Device Analytics
# Advertise Support : Enabled
# Share Data with Client : Disabled
# WLAN Profile Name : north
p_name = re.compile(r"^WLAN\s+Profile\s+Name\s+:\s+(?P<value>\S+)$")
# ================================================
p_delimeter = re.compile(r"^=+$")
# Identifier : 1
p_identifier = re.compile(r"^Identifier\s+:\s+(?P<value>\d+)$")
# Description :
p_description = re.compile(r"^Description\s+:\s+(?P<value>.*)$")
# Network Name (SSID) : north
p_name_ssid = re.compile(r"^Network\s+Name\s+\(SSID\)\s+:\s+(?P<value>\S+)$")
# Status : Enabled
p_status = re.compile(r"^Status\s+:\s+(?P<value>\S+)$")
# Broadcast SSID : Enabled
p_broadcast = re.compile(r"^Broadcast\s+SSID\s+:\s+(?P<value>\S+)$")
# Advertise-Apname : Disabled
p_advertise = re.compile(r"^Advertise-Apname\s+:\s+(?P<value>\S+)$")
# Universal AP Admin : Disabled
p_universal_admin = re.compile(r"^Universal\s+AP\s+Admin\s+:\s+(?P<value>\S+)$")
# Max Associated Clients per WLAN : 0
p_max_clients_wlan = re.compile(r"^Max\s+Associated\s+Clients\s+per\s+WLAN\s+:\s+(?P<value>\d+)$")
# Max Associated Clients per AP per WLAN : 0
p_max_clients_ap = re.compile(r"^Max\s+Associated\s+Clients\s+per\s+AP\s+per\s+WLAN\s+:\s+(?P<value>\d+)$")
# Max Associated Clients per AP Radio per WLAN : 200
p_max_clients_radio = re.compile(r"^Max\s+Associated\s+Clients\s+per\s+AP\s+Radio\s+per\s+WLAN\s+:\s+(?P<value>\d+)$")
# OKC : Enabled
p_okc = re.compile(r"^OKC\s+:\s+(?P<value>\S+)$")
# Number of Active Clients : 4
p_active_clients = re.compile(r"^Number\s+of\s+Active\s+Clients\s+:\s+(?P<value>\d+)$")
# CHD per WLAN : Enabled
p_chd = re.compile(r"^CHD\s+per\s+WLAN\s+:\s+(?P<value>\S+)$")
# WMM : Allowed
p_wmm = re.compile(r"^WMM\s+:\s+(?P<value>\S+)$")
# WiFi Direct Policy : Disabled
p_direct_policy = re.compile(r"^WiFi\s+Direct\s+Policy\s+:\s+(?P<value>\S+)$")
# Channel Scan Defer Priority:
p_channel_scan = re.compile(r"Channel\s+Scan\s+Defer\s+Priority:$")
# Priority (default) : 5
# Priority (default) : 6
p_priority = re.compile(r"^Priority\s+\(default\)\s+:\s+(?P<value>\d+)$")
# Scan Defer Time (msecs) : 100
p_scan_defer = re.compile(r"^Scan\s+Defer\s+Time\s+\(msecs\)\s+:\s+(?P<value>\d+)$")
# Media Stream Multicast-direct : Disabled
p_media_multi = re.compile(r"^Media\s+Stream\s+Multicast-direct\s+:\s+(?P<value>\S+)$")
# CCX - AironetIe Support : Disabled
p_ccx_aironet = re.compile(r"^CCX\s+-\s+AironetIe\s+Support\s+:\s+(?P<value>\S+)$")
# Peer-to-Peer Blocking Action : Disabled
p_p2p_block = re.compile(r"^Peer-to-Peer\s+Blocking\s+Action\s+:\s+(?P<value>\S+)$")
# Radio Policy : 802.11a only
p_radio_policy = re.compile(r"^Radio\s+Policy\s+:\s+(?P<value>.*)$")
# DTIM period for 802.11a radio :
p_dtim_a = re.compile(r"^DTIM\s+period\s+for\s+802.11a\s+radio\s+:\s+(?P<value>.*)$")
# DTIM period for 802.11b radio :
p_dtim_b = re.compile(r"^DTIM\s+period\s+for\s+802.11b\s+radio\s+:\s+(?P<value>.*)$")
# Local EAP Authentication : Disabled
p_local_eap = re.compile(r"^Local\s+EAP\s+Authentication\s+:\s+(?P<value>\S+)$")
# Mac Filter Authorization list name : Disabled
p_mac_filter_auth = re.compile(r"^Mac\s+Filter\s+Authorization\s+list\s+name\s+:\s+(?P<value>\S+)$")
# Mac Filter Override Authorization list name : Disabled
p_mac_filter_override = re.compile(r"^Mac\s+Filter\s+Override\s+Authorization\s+list\s+name\s+:\s+(?P<value>\S+)$")
# Accounting list name :
p_accounting_list = re.compile(r"^Accounting\s+list\s+name\s+:\s+(?P<value>.*)$")
# 802.1x authentication list name : default
p_dot1x_authen = re.compile(r"^802.1x\s+authentication\s+list\s+name\s+:\s+(?P<value>\S+)$")
# 802.1x authorization list name : Disabled
p_dot1x_author = re.compile(r"^802.1x\s+authorization\s+list\s+name\s+:\s+(?P<value>\S+)$")
# Security
p_security = re.compile(r"^Security$")
# 802.11 Authentication : Open System
p_dot11_authen = re.compile(r"^802.11\s+Authentication\s+:\s+(?P<value>.*)$")
# Static WEP Keys : Disabled
p_static_wep = re.compile(r"^Static\s+WEP\s+Keys\s+:\s+(?P<value>.*)$")
# Wi-Fi Protected Access (WPA/WPA2/WPA3) : Enabled
p_pro_access = re.compile(r"^Wi-Fi\s+Protected\s+Access\s+\(WPA\/WPA2\/WPA3\)\s+:\s+(?P<value>\S+)$")
# WPA (SSN IE) : Disabled
p_wpa_ssn = re.compile(r"^WPA\s+\(SSN\s+IE\)\s+:\s+(?P<value>\S+)$")
# WPA2 (RSN IE) : Enabled
p_wpa2_rsn = re.compile(r"^WPA2\s+\(RSN\s+IE\)\s+:\s+(?P<value>\S+)$")
# MPSK : Disabled
p_mpsk = re.compile(r"^MPSK\s+:\s+(?P<value>\S+)$")
# AES Cipher : Enabled
p_aes = re.compile(r"^AES\s+Cipher\s+:\s+(?P<value>\S+)$")
# CCMP256 Cipher : Disabled
p_ccmp256 = re.compile(r"^CCMP256\s+Cipher\s+:\s+(?P<value>\S+)$")
# GCMP128 Cipher : Disabled
p_gcmp128 = re.compile(r"^GCMP128\s+Cipher\s+:\s+(?P<value>\S+)$")
# GCMP256 Cipher : Disabled
p_gcmp256 = re.compile(r"^GCMP256\s+Cipher\s+:\s+(?P<value>\S+)$")
# Randomized GTK : Disabled
p_gtk = re.compile(r"^Randomized\s+GTK\s+:\s+(?P<value>\S+)$")
# WPA3 (WPA3 IE) : Disabled
p_wpa3 = re.compile(r"^WPA3\s+\(WPA3\s+IE\)\s+:\s+(?P<value>\S+)$")
# Auth Key Management
p_auth_key_mgmt = re.compile(r"^Auth\s+Key\s+Management$")
# 802.1x : Enabled
p_key_dot1x = re.compile(r"^802.1x\s+:\s+(?P<value>\S+)$")
# PSK : Disabled
p_key_psk = re.compile(r"^PSK\s+:\s+(?P<value>\S+)$")
# CCKM : Disabled
p_key_cckm = re.compile(r"^CCKM\s+:\s+(?P<value>\S+)$")
# FT dot1x : Disabled
p_key_ftdot1x = re.compile(r"^FT\s+dot1x\s+:\s+(?P<value>\S+)$")
# FT PSK : Disabled
p_key_ftpsk = re.compile(r"^FT\s+PSK\s+:\s+(?P<value>\S+)$")
# Dot1x-SHA256 : Disabled
p_key_dot1xsha = re.compile(r"^Dot1x-SHA256\s+:\s+(?P<value>\S+)$")
# PSK-SHA256 : Disabled
p_key_psksha = re.compile(r"^PSK-SHA256\s+:\s+(?P<value>\S+)$")
# SAE : Disabled
p_key_sae = re.compile(r"^SAE\s+:\s+(?P<value>\S+)$")
# OWE : Disabled
p_key_owe = re.compile(r"^OWE\s+:\s+(?P<value>\S+)$")
# SUITEB-1X : Disabled
p_key_suiteb = re.compile(r"^SUITEB-1X\s+:\s+(?P<value>\S+)$")
# SUITEB192-1X : Disabled
p_key_suiteb192 = re.compile(r"^SUITEB192-1X\s+:\s+(?P<value>\S+)$")
# CCKM TSF Tolerance (msecs) : 1000
p_cckm_tsf_msec = re.compile(r"^CCKM\s+TSF\s+Tolerance\s+\(msecs\)\s+:\s+(?P<value>\d+)$")
# CCKM TSF Tolerance : 1000
p_cckm_tsf = re.compile(r"^CCKM\s+TSF\s+Tolerance\s+:\s+(?P<value>\d+)$")
# OWE Transition Mode : Disabled
p_owe_transition = re.compile(r"^OWE\s+Transition\s+Mode\s+:\s+(?P<value>\S+)$")
# OSEN : Disabled
p_osen = re.compile(r"^OSEN\s+:\s+(?P<value>\S+)$")
# FT Support : Adaptive
p_ftsupport = re.compile(r"^FT\s+Support\s+:\s+(?P<value>\S+)$")
# FT Reassociation Timeout (secs) : 20
p_ft_re_timeout_secs = re.compile(r"^FT\s+Reassociation\s+Timeout\s+\(secs\)\s+:\s+(?P<value>\d+)$")
# FT Reassociation Timeout : 20
p_ft_re_timeout = re.compile(r"^FT\s+Reassociation\s+Timeout\s+:\s+(?P<value>\d+)$")
# FT Over-The-DS mode : Disabled
p_ft_dst = re.compile(r"^FT\s+Over-The-DS\s+mode\s+:\s+(?P<value>\S+)$")
# PMF Support : Disabled
p_pmf = re.compile(r"^PMF\s+Support\s+:\s+(?P<value>\S+)$")
# PMF Association Comeback Timeout (secs): 1
p_association_comeback_secs = re.compile(r"^PMF\s+Association\s+Comeback\s+Timeout\s+\(secs\):\s+(?P<value>\d+)$")
# PMF Association Comeback Timeout : 1
p_association_comeback = re.compile(r"^PMF\s+Association\s+Comeback\s+Timeout\s+:\s+(?P<value>\d+)$")
# PMF SA Query Time (msecs) : 200
p_pmf_sa_msecs = re.compile(r"^PMF\s+SA\s+Query\s+Time\s+\(msecs\)\s+:\s+(?P<value>\d+)$")
# PMF SA Query Time : 200
p_pmf_sa = re.compile(r"^PMF\s+SA\s+Query\s+Time\s+:\s+(?P<value>\d+)$")
# Web Based Authentication : Disabled
p_web_authen = re.compile(r"^Web\s+Based\s+Authentication\s+:\s+(?P<value>\S+)$")
# Conditional Web Redirect : Disabled
p_web_redirect = re.compile(r"^Conditional\s+Web\s+Redirect\s+:\s+(?P<value>\S+)$")
# Splash-Page Web Redirect : Disabled
p_splash_page = re.compile(r"^Splash-Page\s+Web\s+Redirect\s+:\s+(?P<value>\S+)$")
# Webauth On-mac-filter Failure : Disabled
p_webauth_mac = re.compile(r"^Webauth\s+On-mac-filter\s+Failure\s+:\s+(?P<value>\S+)$")
# Webauth Authentication List Name : Disabled
p_webauthen_list = re.compile(r"^Webauth\s+Authentication\s+List\s+Name\s+:\s+(?P<value>\S+)$")
# Webauth Authorization List Name : Disabled
p_webauthor_list = re.compile(r"^Webauth\s+Authorization\s+List\s+Name\s+:\s+(?P<value>\S+)$")
# Webauth Parameter Map : Disabled
p_webauthen_map = re.compile(r"^Webauth\s+Parameter\s+Map\s+:\s+(?P<value>\S+)$")
# Band Select : Disabled
p_band_select = re.compile(r"^Band\s+Select\s+:\s+(?P<value>\S+)$")
# Load Balancing : Disabled
p_load_balancing = re.compile(r"^Load\s+Balancing\s+:\s+(?P<value>\S+)$")
# Multicast Buffer : Disabled
p_multi_buffer = re.compile(r"^Multicast\s+Buffer\s+:\s+(?P<value>\S+)$")
# Multicast Buffers (frames) : 0
p_multi_buffer_frames = re.compile(r"^Multicast\s+Buffer\s+\(frames\)\s+:\s+(?P<value>\d+)$")
# Multicast Buffer Size : 0
p_multi_buffer_size = re.compile(r"^Multicast\s+Buffer\s+Size\s+:\s+(?P<value>\d+)$")
# IP Source Guard : Disabled
p_ip_sourceguard = re.compile(r"^IP\s+Source\s+Guard\s+:\s+(?P<value>\S+)$")
# Assisted-Roaming
p_assisted_roaming = re.compile(r"^Assisted-Roaming$")
# Neighbor List : Enabled
p_ar_neighbor_list = re.compile(r"^Neighbor\s+List\s+:\s+(?P<value>\S+)$")
# Prediction List : Disabled
p_ar_prediction = re.compile(r"^Prediction\s+List\s+:\s+(?P<value>\S+)$")
# Dual Band Support : Disabled
p_ar_db = re.compile(r"^Dual\s+Band\s+Support\s+:\s+(?P<value>\S+)$")
# IEEE 802.11v parameters
p_ieee_11v = re.compile(r"^IEEE\s+802.11v\s+parameters$")
# Directed Multicast Service : Enabled
p_11v_multicast = re.compile(r"^Directed\s+Multicast\s+Service\s+:\s+(?P<value>\S+)$")
# BSS Max Idle : Enabled
p_11v_bss = re.compile(r"^BSS\s+Max\s+Idle\s+:\s+(?P<value>\S+)$")
# Protected Mode : Disabled
p_11v_bss_protected = re.compile(r"^Protected\s+Mode\s+:\s+(?P<value>\S+)$")
# Traffic Filtering Service : Disabled
p_11v_filtering = re.compile(r"^Traffic\s+Filtering\s+Service\s+:\s+(?P<value>\S+)$")
# BSS Transition : Enabled
p_11v_bss_trans = re.compile(r"^BSS\s+Transition\s+:\s+(?P<value>\S+)$")
# Disassociation Imminent : Disabled
p_11v_bss_trans_disassoc = re.compile(r"^Disassociation\s+Imminent\s+:\s+(?P<value>\S+)$")
# Optimised Roaming Timer (TBTTS) : 40
p_11v_bss_trans_disassoc_tbtts = re.compile(r"^Optimised\s+Roaming\s+Timer\s+\(TBTTS\)\s+:\s+(?P<value>\d+)$")
# Timer (TBTTS) : 200
p_11v_bss_trans_disassoc_timer = re.compile(r"^Timer\s+\(TBTTS\)\s+:\s+(?P<value>\d+)$")
# Optimised Roaming Timer (TBTTS) : 40
p_11v_bss_trans_disassoc_tbtts_extra = re.compile(r"^Optimised\s+Roaming\s+Timer\s+\(TBTTS\)\s+:\s+(?P<value>\d+)$")
# Timer (TBTTS) : 200
p_11v_bss_trans_disassoc_timer_extra = re.compile(r"^Timer\s+\(TBTTS\)\s+:\s+(?P<value>\d+)$")
# Optimised Roaming Timer : 40
p_11v_bss_trans_disassoc_tbtts = re.compile(r"^Optimised\s+Roaming\s+Timer\s+:\s+(?P<value>\d+)$")
# Timer : 200
p_11v_bss_trans_disassoc_timer = re.compile(r"^Timer\s+:\s+(?P<value>\d+)$")
# Dual Neighbor List : Disabled
p_11v_dual_neighbor = re.compile(r"^Dual\s+Neighbor\s+List\s+:\s+(?P<value>\S+)$")
# WNM Sleep Mode : Disabled
p_11v_wnm = re.compile(r"^WNM\s+Sleep\s+Mode\s+:\s+(?P<value>\S+)$")
# 802.11ac MU-MIMO : Enabled
p_11ac_mimo = re.compile(r"^802.11ac\s+MU-MIMO\s+:\s+(?P<value>\S+)$")
# 802.11ax parameters
p_11ax_params = re.compile(r"^802.11ax\s+parameters$")
# OFDMA Downlink : Enabled
p_11ax_ofdma_down = re.compile(r"^OFDMA\s+Downlink\s+:\s+(?P<value>\S+)$")
# OFDMA Uplink : Enabled
p_11ax_ofdma_up = re.compile(r"^OFDMA\s+Uplink\s+:\s+(?P<value>\S+)$")
# MU-MIMO Downlink : Enabled
p_11ax_mimo_down = re.compile(r"^MU-MIMO\s+Downlink\s+:\s+(?P<value>\S+)$")
# MU-MIMO Uplink : Enabled
p_11ax_mimo_up = re.compile(r"^MU-MIMO\s+Uplink\s+:\s+(?P<value>\S+)$")
# BSS Target Wake Up Time : Enabled
p_11ax_bss = re.compile(r"^BSS\s+Target\s+Wake\s+Up\s+Time\s+:\s+(?P<value>\S+)$")
# BSS Target Wake Up Time Broadcast Support : Enabled
p_11ax_bss_broad = re.compile(r"^BSS\s+Target\s+Wake\s+Up\s+Time\s+Broadcast\s+Support\s+:\s+(?P<value>\S+)$")
# mDNS Gateway Status : Bridge
p_mdns = re.compile(r"^mDNS\s+Gateway\s+Status\s+:\s+(?P<value>\S+)$")
# WIFI Alliance Agile Multiband : Disabled
p_wifi_alliance = re.compile(r"^WIFI\s+Alliance\s+Agile\s+Multiband\s+:\s+(?P<value>\S+)$")
# Device Analytics
p_device_analytics = re.compile(r"^Device\s+Analytics$")
# Advertise Support : Enabled
p_da_advetise = re.compile(r"^Advertise\s+Support\s+:\s+(?P<value>\S+)$")
# Share Data with Client : Disabled
p_da_share = re.compile(r"^Share\s+Data\s+with\s+Client\s+:\s+(?P<value>\S+)$")
# Client Scan Report (11k Beacon Radio Measurement)
p_client_11k = re.compile(r"^Client\s+Scan\s+Report\s+\(11k\s+Beacon\s+Radio\s+Measurement\)$")
# Request on Association : Disabled
p_client_11k_assoc = re.compile(r"^Request\s+on\s+Association\s+:\s+(?P<value>\S+)$")
# Request on Roam : Disabled
p_client_11k_roam = re.compile(r"^Request\s+on\s+Roam\s+:\s+(?P<value>\S+)$")
# WiFi to Cellular Steering : Disabled
P_wifi_steering = re.compile(r"^WiFi\s+to\s+Cellular\s+Steering\s+:\s+(?P<value>\S+)$")
wlan_dict = {}
current_wlan = ""
for line in output.splitlines():
line = line.strip()
if p_name.match(line):
# WLAN Profile Name : north
match = p_name.match(line)
if not wlan_dict.get("wlan_names"):
wlan_dict.update({ "wlan_names": {} })
wlan_dict["wlan_names"].update({ match.group("value") : {} })
current_wlan = match.group("value")
continue
elif p_delimeter.match(line):
continue
elif p_identifier.match(line):
# Identifier : 1
match = p_identifier.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "identifier": int(match.group("value")) })
continue
elif p_description.match(line):
# Description :
match = p_description.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "description": match.group("value") })
continue
elif p_name_ssid.match(line):
# Description :
match = p_name_ssid.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "ssid": match.group("value") })
continue
elif p_status.match(line):
# Status : Enabled
match = p_status.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "status": match.group("value") })
continue
elif p_broadcast.match(line):
# Broadcast SSID : Enabled
match = p_broadcast.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "broadcast_ssid": match.group("value") })
continue
elif p_advertise.match(line):
# Advertise-Apname : Disabled
match = p_advertise.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "advertise_ap_name": match.group("value") })
continue
elif p_universal_admin.match(line):
# Universal AP Admin : Disabled
match = p_universal_admin.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "universal_ap_admin": match.group("value") })
continue
elif p_max_clients_wlan.match(line):
# Max Associated Clients per WLAN : 0
match = p_max_clients_wlan.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "max_clients_wlan": int(match.group("value")) })
continue
elif p_max_clients_ap.match(line):
# Max Associated Clients per AP per WLAN : 0
match = p_max_clients_ap.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "max_clients_ap": int(match.group("value")) })
continue
elif p_max_clients_radio.match(line):
# Max Associated Clients per AP Radio per WLAN : 200
match = p_max_clients_radio.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "max_clients_ap_radio": int(match.group("value")) })
continue
elif p_max_clients_radio.match(line):
# Max Associated Clients per AP Radio per WLAN : 200
match = p_max_clients_radio.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "max_clients_ap_radio": int(match.group("value")) })
continue
elif p_okc.match(line):
# OKC : Enabled
match = p_okc.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "okc": match.group("value") })
continue
elif p_active_clients.match(line):
# Number of Active Clients : 4
match = p_active_clients.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "active_clients": int(match.group("value")) })
continue
elif p_chd.match(line):
# CHD per WLAN : Enabled
match = p_chd.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "chd_per_wlan": match.group("value") })
continue
elif p_wmm.match(line):
# WMM : Allowed
match = p_wmm.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "wmm": match.group("value") })
continue
elif p_direct_policy.match(line):
# WiFi Direct Policy : Disabled
match = p_direct_policy.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "wifi_direct_policy": match.group("value") })
continue
elif p_channel_scan.match(line):
# Channel Scan Defer Priority:
match = p_direct_policy.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "channel_scan_defer_priority": {} })
continue
elif p_priority.match(line):
# Priority (default) : 5
match = p_priority.match(line)
if not wlan_dict["wlan_names"][current_wlan]["channel_scan_defer_priority"].get("priority"):
wlan_dict["wlan_names"][current_wlan]["channel_scan_defer_priority"].update({"priority": [] })
wlan_dict["wlan_names"][current_wlan]["channel_scan_defer_priority"]["priority"].append(int(match.group("value")))
continue
elif p_scan_defer.match(line):
# Scan Defer Time (msecs) : 100
match = p_scan_defer.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "scan_defer_time_msecs": int(match.group("value")) })
continue
elif p_media_multi.match(line):
# Media Stream Multicast-direct : Disabled
match = p_media_multi.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "media_stream_multicast_direct": match.group("value") })
continue
elif p_ccx_aironet.match(line):
# CCX - AironetIe Support : Disabled
match = p_ccx_aironet.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "ccx_aironet_support": match.group("value") })
continue
elif p_p2p_block.match(line):
# Peer-to-Peer Blocking Action : Disabled
match = p_p2p_block.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "p2p_blocking_action": match.group("value") })
continue
elif p_radio_policy.match(line):
# Radio Policy : 802.11a only
match = p_radio_policy.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "radio_policy": match.group("value") })
continue
elif p_dtim_a.match(line):
# DTIM period for 802.11a radio :
match = p_dtim_a.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "dtim_period_dot11a": match.group("value") })
continue
elif p_dtim_b.match(line):
# DTIM period for 802.11b radio :
match = p_dtim_b.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "dtim_period_dot11b": match.group("value") })
continue
elif p_local_eap.match(line):
# Local EAP Authentication : Disabled
match = p_local_eap.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "local_eap_authentication": match.group("value") })
continue
elif p_mac_filter_auth.match(line):
# Mac Filter Authorization list name : Disabled
match = p_mac_filter_auth.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "mac_filter_authorization_list_name": match.group("value") })
continue
elif p_mac_filter_override.match(line):
# Mac Filter Override Authorization list name : Disabled
match = p_mac_filter_override.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "mac_filter_override_authorization_list_name": match.group("value") })
continue
elif p_accounting_list.match(line):
# Accounting list name :
match = p_accounting_list.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "accounting_list_name": match.group("value") })
continue
elif p_dot1x_authen.match(line):
# 802.1x authentication list name : default
match = p_dot1x_authen.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "dot1x_authentication_list_name": match.group("value") })
continue
elif p_dot1x_author.match(line):
# 802.1x authorization list name : Disabled
match = p_dot1x_author.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "dot1x_authorization_list_name": match.group("value") })
continue
elif p_security.match(line):
# Security
if not wlan_dict["wlan_names"][current_wlan].get("security"):
wlan_dict["wlan_names"][current_wlan].update({ "security": {} })
continue
elif p_dot11_authen.match(line):
# 802.11 Authentication : Open System
match = p_dot11_authen.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "dot11_authentication": match.group("value") })
continue
elif p_static_wep.match(line):
# Static WEP Keys : Disabled
match = p_static_wep.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "static_wep_keys": match.group("value") })
continue
elif p_pro_access.match(line):
# Wi-Fi Protected Access (WPA/WPA2/WPA3) : Enabled
match = p_pro_access.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "wifi_protected_access_wpa1_wpa2_wpa3": match.group("value") })
continue
elif p_wpa_ssn.match(line):
# WPA (SSN IE) : Disabled
match = p_wpa_ssn.match(line)
if not wlan_dict["wlan_names"][current_wlan]["security"].get("wifi_protected_access_wpa"):
wlan_dict["wlan_names"][current_wlan]["security"].update({ "wifi_protected_access_wpa": {} })
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa"].update({ "wpa_ssn_ie" : match.group("value") })
continue
elif p_wpa2_rsn.match(line):
# WPA2 (RSN IE) : Enabled
match = p_wpa2_rsn.match(line)
if not wlan_dict["wlan_names"][current_wlan]["security"].get("wifi_protected_access_wpa2"):
wlan_dict["wlan_names"][current_wlan]["security"].update({ "wifi_protected_access_wpa2": {} })
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"].update({ "wpa2_rsn_ie" : match.group("value") })
continue
elif p_mpsk.match(line):
# MPSK : Disabled
match = p_mpsk.match(line)
if not wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"].get("wpa2_attributes"):
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"].update({ "wpa2_attributes": {} })
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"]["wpa2_attributes"].update({ "mpsk" : match.group("value") })
continue
elif p_aes.match(line):
# AES Cipher : Enabled
match = p_aes.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"]["wpa2_attributes"].update({ "aes" : match.group("value") })
continue
elif p_ccmp256.match(line):
# CCMP256 Cipher : Disabled
match = p_ccmp256.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"]["wpa2_attributes"].update({ "ccmp256" : match.group("value") })
continue
elif p_gcmp128.match(line):
# GCMP128 Cipher : Disabled
match = p_gcmp128.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"]["wpa2_attributes"].update({ "gcmp128" : match.group("value") })
continue
elif p_gcmp256.match(line):
# GCMP128 Cipher : Disabled
match = p_gcmp256.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"]["wpa2_attributes"].update({ "gcmp256" : match.group("value") })
continue
elif p_gtk.match(line):
# Randomized GTK : Disabled
match = p_gtk.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa2"]["wpa2_attributes"].update({ "randomized_gtk" : match.group("value") })
continue
elif p_wpa3.match(line):
# WPA3 (WPA3 IE) : Disabled
match = p_wpa3.match(line)
if not wlan_dict["wlan_names"][current_wlan]["security"].get("wifi_protected_access_wpa3"):
wlan_dict["wlan_names"][current_wlan]["security"].update({ "wifi_protected_access_wpa3": {} })
wlan_dict["wlan_names"][current_wlan]["security"]["wifi_protected_access_wpa3"].update({ "wpa3_ie" : match.group("value") })
continue
elif p_auth_key_mgmt.match(line):
# Auth Key Management
if not wlan_dict["wlan_names"][current_wlan]["security"].get("auth_key_mgmt"):
wlan_dict["wlan_names"][current_wlan]["security"].update({ "auth_key_mgmt": {} })
continue
elif p_key_dot1x.match(line):
# 802.1x : Enabled
match = p_key_dot1x.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "dot1x": match.group("value")})
continue
elif p_key_psk.match(line):
# PSK : Disabled
match = p_key_psk.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "psk": match.group("value")})
continue
elif p_key_cckm.match(line):
# CCKM : Disabled
match = p_key_cckm.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "cckm": match.group("value")})
continue
elif p_key_ftdot1x.match(line):
# FT dot1x : Disabled
match = p_key_ftdot1x.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "ft_dot1x": match.group("value")})
continue
elif p_key_ftpsk.match(line):
# FT PSK : Disabled
match = p_key_ftpsk.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "ft_psk": match.group("value")})
continue
elif p_key_dot1xsha.match(line):
# Dot1x-SHA256 : Disabled
match = p_key_dot1xsha.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "dot1x_sha256": match.group("value")})
continue
elif p_key_psksha.match(line):
# PSK-SHA256 : Disabled
match = p_key_psksha.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "psk_sha256": match.group("value")})
continue
elif p_key_sae.match(line):
# SAE : Disabled
match = p_key_sae.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "sae": match.group("value")})
continue
elif p_key_owe.match(line):
# OWE : Disabled
match = p_key_owe.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "owe": match.group("value")})
continue
elif p_key_suiteb.match(line):
# SUITEB-1X : Disabled
match = p_key_suiteb.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "suiteb_1x": match.group("value")})
continue
elif p_key_suiteb192.match(line):
# SUITEB192-1X : Disabled
match = p_key_suiteb192.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["auth_key_mgmt"].update({ "suiteb192_1x": match.group("value")})
continue
elif p_cckm_tsf.match(line):
# CCKM TSF Tolerance : 1000
match = p_cckm_tsf.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "cckm_tsf_tolerance_msecs": int(match.group("value")) })
continue
elif p_cckm_tsf_msec.match(line):
# CCKM TSF Tolerance (msecs) : 1000
match = p_cckm_tsf_msec.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "cckm_tsf_tolerance_msecs": int(match.group("value")) })
continue
elif p_owe_transition.match(line):
# OWE Transition Mode : Disabled
match = p_owe_transition.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "owe_transition_mode": match.group("value") })
continue
elif p_osen.match(line):
# OSEN : Disabled
match = p_osen.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "osen": match.group("value") })
continue
elif p_ftsupport.match(line):
# FT Support : Adaptive
match = p_ftsupport.match(line)
if not wlan_dict["wlan_names"][current_wlan]["security"].get("ft_support"):
wlan_dict["wlan_names"][current_wlan]["security"].update({ "ft_support": {} })
wlan_dict["wlan_names"][current_wlan]["security"]["ft_support"].update({ "ft_support_status": match.group("value")})
elif p_ft_re_timeout_secs.match(line):
# FT Reassociation Timeout (secs) : 20
match = p_ft_re_timeout_secs.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["ft_support"].update({ "ft_reassociation_timer_secs": int(match.group("value")) })
continue
elif p_ft_re_timeout.match(line):
# FT Reassociation Timeout (secs) : 20
match = p_ft_re_timeout.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["ft_support"].update({ "ft_reassociation_timer_secs": int(match.group("value")) })
continue
elif p_ft_dst.match(line):
# FT Over-The-DS mode : Disabled
match = p_ft_dst.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["ft_support"].update({ "ft_over_the_ds_mode": match.group("value") })
continue
elif p_pmf.match(line):
# PMF Support : Disabled
match = p_pmf.match(line)
if not wlan_dict["wlan_names"][current_wlan]["security"].get("pmf_support"):
wlan_dict["wlan_names"][current_wlan]["security"].update({ "pmf_support": {} })
wlan_dict["wlan_names"][current_wlan]["security"]["pmf_support"].update({ "pmf_support_status": match.group("value") })
continue
elif p_association_comeback_secs.match(line):
# PMF Association Comeback Timeout (secs): 1
match = p_association_comeback_secs.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["pmf_support"].update({ "pmf_association_comeback_timeout_secs": int(match.group("value")) })
continue
elif p_association_comeback.match(line):
match = p_association_comeback.match(line)
# PMF Association Comeback Timeout (secs): 1
wlan_dict["wlan_names"][current_wlan]["security"]["pmf_support"].update({ "pmf_association_comeback_timeout_secs": int(match.group("value")) })
continue
elif p_pmf_sa_msecs.match(line):
# PMF SA Query Time (msecs) : 200
match = p_pmf_sa_msecs.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["pmf_support"].update({ "pmf_sa_query_time_msecs": int(match.group("value")) })
continue
elif p_pmf_sa.match(line):
# PMF SA Query Time (msecs) : 200
match = p_pmf_sa.match(line)
wlan_dict["wlan_names"][current_wlan]["security"]["pmf_support"].update({ "pmf_sa_query_time_msecs": int(match.group("value")) })
continue
elif p_web_authen.match(line):
# Web Based Authentication : Disabled
match = p_web_authen.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "web_based_authenticaion": match.group("value") })
continue
elif p_web_redirect.match(line):
# Conditional Web Redirect : Disabled
match = p_web_redirect.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "conditional_web_redirect": match.group("value") })
continue
elif p_splash_page.match(line):
# Splash-Page Web Redirect : Disabled
match = p_splash_page.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "splash_page_web_redirect": match.group("value") })
continue
elif p_webauth_mac.match(line):
# Webauth On-mac-filter Failure : Disabled
match = p_webauth_mac.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "webauth_on_mac_filter_failure": match.group("value") })
continue
elif p_webauthen_list.match(line):
# Webauth Authentication List Name : Disabled
match = p_webauthen_list.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "webauth_authentication_list_name": match.group("value") })
continue
elif p_webauthor_list.match(line):
# Webauth Authorization List Name : Disabled
match = p_webauthor_list.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "webauth_authorization_list_name": match.group("value") })
continue
elif p_webauthen_map.match(line):
# Webauth Parameter Map : Disabled
match = p_webauthen_map.match(line)
wlan_dict["wlan_names"][current_wlan]["security"].update({ "webauth_parameter_map": match.group("value") })
continue
elif p_band_select.match(line):
# Band Select : Disabled
match = p_band_select.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "band_select": match.group("value") })
continue
elif p_load_balancing.match(line):
# Load Balancing : Disabled
match = p_load_balancing.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "load_balancing": match.group("value") })
continue
elif p_multi_buffer.match(line):
# Multicast Buffer : Disabled
match = p_multi_buffer.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "multicast_buffer": match.group("value") })
continue
elif p_multi_buffer_frames.match(line):
# Multicast Buffers (frames) : 0
match = p_multi_buffer_frames.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "multicast_buffer_frames": int(match.group("value")) })
continue
elif p_multi_buffer_size.match(line):
# Multicast Buffer Size : 0
match = p_multi_buffer_size.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "multicast_buffer_size": int(match.group("value")) })
continue
elif p_ip_sourceguard.match(line):
# IP Source Guard : Disabled
match = p_ip_sourceguard.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "ip_source_guard": match.group("value") })
continue
elif p_assisted_roaming.match(line):
# Assisted-Roaming
if not wlan_dict["wlan_names"][current_wlan].get("assisted_roaming"):
wlan_dict["wlan_names"][current_wlan].update({ "assisted_roaming": {} })
continue
elif p_ar_neighbor_list.match(line):
# Neighbor List : Enabled
match = p_ar_neighbor_list.match(line)
wlan_dict["wlan_names"][current_wlan]["assisted_roaming"].update({ "neighbbor_list": match.group("value") })
continue
elif p_ar_prediction.match(line):
# Prediction List : Disabled
match = p_ar_prediction.match(line)
wlan_dict["wlan_names"][current_wlan]["assisted_roaming"].update({ "prediction_list": match.group("value") })
continue
elif p_ar_db.match(line):
# Dual Band Support : Disabled
match = p_ar_db.match(line)
wlan_dict["wlan_names"][current_wlan]["assisted_roaming"].update({ "dual_band_support": match.group("value") })
continue
elif p_ieee_11v.match(line):
# IEEE 802.11v parameters
if not wlan_dict["wlan_names"][current_wlan].get("ieee_dot11v_parameters"):
wlan_dict["wlan_names"][current_wlan].update({ "ieee_dot11v_parameters": {} })
continue
elif p_11v_multicast.match(line):
# Directed Multicast Service : Enabled
match = p_11v_multicast.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"].update({ "directed_multicast_service": match.group("value") })
continue
elif p_11v_bss.match(line):
# BSS Max Idle : Enabled
match = p_11v_bss.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"].update({ "bss_max_idle": {} })
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_max_idle"].update({ "bss_max_idle_status": match.group("value") })
continue
elif p_11v_bss_protected.match(line):
# Protected Mode : Disabled
match = p_11v_bss_protected.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_max_idle"].update({ "protected_mode": match.group("value") })
continue
elif p_11v_filtering.match(line):
# Traffic Filtering Service : Disabled
match = p_11v_filtering.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"].update({ "traffic_filtering_servce": match.group("value") })
continue
elif p_11v_bss_trans.match(line):
# BSS Transition : Enabled
match = p_11v_bss_trans.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"].update({ "bss_transition": {}})
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"].update({ "bss_transition_status": match.group("value") })
continue
elif p_11v_bss_trans_disassoc.match(line):
# Disassociation Imminent : Disabled
match = p_11v_bss_trans_disassoc.match(line)
if not wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"].get("disassociation_imminent"):
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"].update({ "disassociation_imminent": {} })
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"]["disassociation_imminent"].update({ "disassociation_imminent_status": match.group("value") })
continue
elif p_11v_bss_trans_disassoc_tbtts_extra.match(line):
# Optimised Roaming Timer (TBTTS) : 40
match = p_11v_bss_trans_disassoc_tbtts_extra.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"]["disassociation_imminent"].update({ "optimised_roaming_timer": int(match.group("value")) })
continue
elif p_11v_bss_trans_disassoc_timer_extra.match(line):
# Timer (TBTTS) : 200
match = p_11v_bss_trans_disassoc_timer_extra.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"]["disassociation_imminent"].update({ "timer": int(match.group("value")) })
continue
elif p_11v_bss_trans_disassoc_tbtts.match(line):
# Optimised Roaming Timer (TBTTS) : 40
match = p_11v_bss_trans_disassoc_tbtts.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"]["disassociation_imminent"].update({ "optimised_roaming_timer": int(match.group("value")) })
continue
elif p_11v_bss_trans_disassoc_timer.match(line):
# Timer (TBTTS) : 200
match = p_11v_bss_trans_disassoc_timer.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"]["disassociation_imminent"].update({ "timer": int(match.group("value")) })
continue
elif p_11v_dual_neighbor.match(line):
# Dual Neighbor List : Disabled
match = p_11v_dual_neighbor.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"]["bss_transition"].update({ "dual_neighbor_list": match.group("value") })
continue
elif p_11v_wnm.match(line):
# BSS Max Idle : Enabled
match = p_11v_wnm.match(line)
wlan_dict["wlan_names"][current_wlan]["ieee_dot11v_parameters"].update({ "wmn_sleep_mode": match.group("value") })
continue
elif p_11ac_mimo.match(line):
# 802.11ac MU-MIMO : Enabled
match = p_11ac_mimo.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "dot11ac_mu_mimo": match.group("value") })
continue
elif p_11ax_params.match(line):
# 802.11ax parameters
if not wlan_dict["wlan_names"][current_wlan].get("dot11ax_parameters"):
wlan_dict["wlan_names"][current_wlan].update({ "dot11ax_parameters": {} })
continue
elif p_11ax_ofdma_down.match(line):
# OFDMA Downlink : Enabled
match = p_11ax_ofdma_down.match(line)
wlan_dict["wlan_names"][current_wlan]["dot11ax_parameters"].update({ "ofdma_downlink": match.group("value") })
continue
elif p_11ax_ofdma_up.match(line):
# OFDMA Uplink : Enabled
match = p_11ax_ofdma_up.match(line)
wlan_dict["wlan_names"][current_wlan]["dot11ax_parameters"].update({ "ofdma_uplink": match.group("value") })
continue
elif p_11ax_mimo_down.match(line):
# MU-MIMO Downlink : Enabled
match = p_11ax_mimo_down.match(line)
wlan_dict["wlan_names"][current_wlan]["dot11ax_parameters"].update({ "mu_mimo_downlink": match.group("value") })
continue
elif p_11ax_mimo_up.match(line):
# MU-MIMO Downlink : Enabled
match = p_11ax_mimo_up.match(line)
wlan_dict["wlan_names"][current_wlan]["dot11ax_parameters"].update({ "mu_mimo_uplink": match.group("value") })
continue
elif p_11ax_bss.match(line):
# BSS Target Wake Up Time : Enabled
match = p_11ax_bss.match(line)
wlan_dict["wlan_names"][current_wlan]["dot11ax_parameters"].update({ "bss_target_wake_up_time": match.group("value") })
continue
elif p_11ax_bss_broad.match(line):
# BSS Target Wake Up Time : Enabled
match = p_11ax_bss_broad.match(line)
wlan_dict["wlan_names"][current_wlan]["dot11ax_parameters"].update({ "bss_target_wake_up_time_broadcast_support": match.group("value") })
continue
elif p_mdns.match(line):
# mDNS Gateway Status : Bridge
match = p_mdns.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "mdns_gateway_status": match.group("value") })
continue
elif p_wifi_alliance.match(line):
# WIFI Alliance Agile Multiband : Disabled
match = p_wifi_alliance.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "wifi_alliance_agile_multiband": match.group("value") })
continue
elif p_device_analytics.match(line):
# Device Analytics
if not wlan_dict["wlan_names"][current_wlan].get("device_analytics"):
wlan_dict["wlan_names"][current_wlan].update({ "device_analytics": {} })
continue
elif p_da_advetise.match(line):
match = p_da_advetise.match(line)
# Advertise Support : Enabled
wlan_dict["wlan_names"][current_wlan]["device_analytics"].update({ "advertise_support": match.group("value")})
continue
elif p_da_share.match(line):
match = p_da_share.match(line)
# Share Data with Client : Disabled
wlan_dict["wlan_names"][current_wlan]["device_analytics"].update({ "share_data_with_client": match.group("value")})
continue
elif p_client_11k.match(line):
# Client Scan Report (11k Beacon Radio Measurement)
if not wlan_dict["wlan_names"][current_wlan].get("client_scan_report_11k_beacon_radio_measurement"):
wlan_dict["wlan_names"][current_wlan].update({ "client_scan_report_11k_beacon_radio_measurement": {} })
continue
elif p_client_11k_assoc.match(line):
# Request on Association : Disabled
match = p_client_11k_assoc.match(line)
wlan_dict["wlan_names"][current_wlan]["client_scan_report_11k_beacon_radio_measurement"].update({ "request_on_association": match.group("value") })
continue
elif p_client_11k_roam.match(line):
# Request on Roam : Disabled
match = p_client_11k_roam.match(line)
wlan_dict["wlan_names"][current_wlan]["client_scan_report_11k_beacon_radio_measurement"].update({ "request_on_roam": match.group("value") })
continue
elif P_wifi_steering.match(line):
# WiFi to Cellular Steering : Disabled
match = P_wifi_steering.match(line)
wlan_dict["wlan_names"][current_wlan].update({ "wifi_to_cellular_steering": match.group("value") })
continue
return wlan_dict
| 68,220
| 0
| 54
|
b3afb93ed72248dc0eecddf59cfb296696006867
| 3,314
|
py
|
Python
|
example_app.py
|
garethr/mnml
|
30876c0a26ddee3e736c3ccca094ecb2250f9011
|
[
"MIT"
] | 2
|
2016-05-09T13:58:38.000Z
|
2016-07-27T11:53:36.000Z
|
example_app.py
|
garethr/mnml
|
30876c0a26ddee3e736c3ccca094ecb2250f9011
|
[
"MIT"
] | null | null | null |
example_app.py
|
garethr/mnml
|
30876c0a26ddee3e736c3ccca094ecb2250f9011
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from mnml import TokenBasedApplication, RegexBasedApplication, \
RequestHandler, HttpResponse, HttpResponseRedirect, \
development_server
# a MNML application consists of a series of RequestHandlers which return
# HttpResponse objects when methods are called. The method called should
# match the HTTP method of the request.
class Foo(RequestHandler):
"Demonstrates multiple methods and taking optional arguments"
def GET(self, number=1, number2=1):
"""
A get request which may provide two arguments, both of which have
default values.
"""
# If you wanted you could use a template engine of your choice
# here as long as it can render to a string which
# can be returned in the body of a HttpResponse
return HttpResponse("<h1>Hello World of %s and %s</h1>" %
(number, number2)
)
def PUT(self):
"A simple demonstration of a PUT method"
return HttpResponse("<h1>Hello World of Put</h1>")
class Form(RequestHandler):
"Demonstrates display a form and accept POST data"
def GET(self, number=1, number2=1):
"""
Display the form
"""
# If you wanted you could use a template engine of your choice
# here as long as it can render to a string which
# can be returned in the body of a HttpResponse
return HttpResponse("""<form action="/form" method="post">
<input type="text" name="data" id="data"/>
<input type="submit" value="submit"/>
</form>"""
)
def POST(self):
"Take the POST data and display it on the page"
data = self.request.POST['data'].value
return HttpResponse("<h1>Hello World of %s</h1>" % data)
class Bar(RequestHandler):
"A second handler, demonstrating different methods"
def GET(self):
"Demonstration of using a 301 redirect"
return HttpResponseRedirect("/")
def DELETE(self):
"Demonstration of using a 302 redirect"
return HttpResponseRedirect("/", False)
class NotFoundPageHandler(RequestHandler):
"""
Although the framework will capture page not found errors you
might want to do that yourself so you have more control over
what happends next
"""
def GET(self):
"""
Demonstration of using the build in error response.
You'll probably want to overload that in some cases to
get even more control over procedings
"""
return self.error(404)
# MNML supports two different routing mechanisms. One using regular
# expressions and another using tokens. This is done predominantly to
# highlight the pluggable nature of MNML, but also because the two
# authors both prefer different approaches. Use whichever suites you best.
routes = (
(r'^/$', Foo),
(r'^/form$', Form),
(r'^/foo/([0-9]+)/([0-9]+)', Foo),
(r'^/bar$', Bar),
('/.*', NotFoundPageHandler),
)
application = RegexBasedApplication(routes)
"""
routes = (
('/', Foo),
('/myview/:stuff/', Bar)
)
application = TokenBasedApplication(routes)
"""
if __name__ == '__main__':
# run the MNML development server
development_server(application)
| 34.164948
| 74
| 0.637296
|
#!/usr/bin/env python
from mnml import TokenBasedApplication, RegexBasedApplication, \
RequestHandler, HttpResponse, HttpResponseRedirect, \
development_server
# a MNML application consists of a series of RequestHandlers which return
# HttpResponse objects when methods are called. The method called should
# match the HTTP method of the request.
class Foo(RequestHandler):
"Demonstrates multiple methods and taking optional arguments"
def GET(self, number=1, number2=1):
"""
A get request which may provide two arguments, both of which have
default values.
"""
# If you wanted you could use a template engine of your choice
# here as long as it can render to a string which
# can be returned in the body of a HttpResponse
return HttpResponse("<h1>Hello World of %s and %s</h1>" %
(number, number2)
)
def PUT(self):
"A simple demonstration of a PUT method"
return HttpResponse("<h1>Hello World of Put</h1>")
class Form(RequestHandler):
"Demonstrates display a form and accept POST data"
def GET(self, number=1, number2=1):
"""
Display the form
"""
# If you wanted you could use a template engine of your choice
# here as long as it can render to a string which
# can be returned in the body of a HttpResponse
return HttpResponse("""<form action="/form" method="post">
<input type="text" name="data" id="data"/>
<input type="submit" value="submit"/>
</form>"""
)
def POST(self):
"Take the POST data and display it on the page"
data = self.request.POST['data'].value
return HttpResponse("<h1>Hello World of %s</h1>" % data)
class Bar(RequestHandler):
"A second handler, demonstrating different methods"
def GET(self):
"Demonstration of using a 301 redirect"
return HttpResponseRedirect("/")
def DELETE(self):
"Demonstration of using a 302 redirect"
return HttpResponseRedirect("/", False)
class NotFoundPageHandler(RequestHandler):
"""
Although the framework will capture page not found errors you
might want to do that yourself so you have more control over
what happends next
"""
def GET(self):
"""
Demonstration of using the build in error response.
You'll probably want to overload that in some cases to
get even more control over procedings
"""
return self.error(404)
# MNML supports two different routing mechanisms. One using regular
# expressions and another using tokens. This is done predominantly to
# highlight the pluggable nature of MNML, but also because the two
# authors both prefer different approaches. Use whichever suites you best.
routes = (
(r'^/$', Foo),
(r'^/form$', Form),
(r'^/foo/([0-9]+)/([0-9]+)', Foo),
(r'^/bar$', Bar),
('/.*', NotFoundPageHandler),
)
application = RegexBasedApplication(routes)
"""
routes = (
('/', Foo),
('/myview/:stuff/', Bar)
)
application = TokenBasedApplication(routes)
"""
if __name__ == '__main__':
# run the MNML development server
development_server(application)
| 0
| 0
| 0
|
6b267beae5c010d77c5213de3663205fb00b003f
| 2,235
|
py
|
Python
|
urls.py
|
NexellCorp/infrastructure_server_linaro-license-protection
|
c328a1e023c60b443b2fca6349179104f7637077
|
[
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null |
urls.py
|
NexellCorp/infrastructure_server_linaro-license-protection
|
c328a1e023c60b443b2fca6349179104f7637077
|
[
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null |
urls.py
|
NexellCorp/infrastructure_server_linaro-license-protection
|
c328a1e023c60b443b2fca6349179104f7637077
|
[
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null |
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
# Use "linaro-openid" to allow peaceful coexistence of both
# python-apache-openid and django-openid authentication on the
# same server. When we get rid of apache openid protection,
# we can go back to using just "openid" here.
url(r'^linaro-openid/', include('django_openid_auth.urls')),
url(r'^logout/$', 'django.contrib.auth.views.logout'),
# Handle JS libs and CSS.
url(r'^js/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.JS_PATH}),
url(r'^css/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.CSS_PATH}),
url(r'^get-remote-static',
'license_protected_downloads.views.get_remote_static',
name='get_remote_static'),
# The license page...
url(r'^license$',
'license_protected_downloads.views.show_license',
name='show_license'),
# Exceptions redirected to root...
url(r'^license',
'license_protected_downloads.views.redirect_to_root',
name='redirect_to_root'),
# Accept the license
url(r'^accept-license',
'license_protected_downloads.views.accept_license',
name='accept_license'),
# Recursively get files for rendering (async calls accepted only).
url(r'^get-textile-files',
'license_protected_downloads.views.get_textile_files',
name='get_textile_files'),
url(r'^api/ls/(?P<path>.*)$',
'license_protected_downloads.api.v1.list_files_api'),
url(r'^api/license/(?P<path>.*)$',
'license_protected_downloads.api.v1.get_license_api'),
url(r'^api/v2/token/(?P<token>.*)$',
'license_protected_downloads.api.v2.token'),
url(r'^api/v2/publish/(?P<path>.*)$',
'license_protected_downloads.api.v2.publish'),
# Catch-all. We always return a file (or try to) if it exists.
# This handler does that.
url(r'(?P<path>.*)', 'license_protected_downloads.views.file_server'),
)
| 33.863636
| 74
| 0.666219
|
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
# Use "linaro-openid" to allow peaceful coexistence of both
# python-apache-openid and django-openid authentication on the
# same server. When we get rid of apache openid protection,
# we can go back to using just "openid" here.
url(r'^linaro-openid/', include('django_openid_auth.urls')),
url(r'^logout/$', 'django.contrib.auth.views.logout'),
# Handle JS libs and CSS.
url(r'^js/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.JS_PATH}),
url(r'^css/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.CSS_PATH}),
url(r'^get-remote-static',
'license_protected_downloads.views.get_remote_static',
name='get_remote_static'),
# The license page...
url(r'^license$',
'license_protected_downloads.views.show_license',
name='show_license'),
# Exceptions redirected to root...
url(r'^license',
'license_protected_downloads.views.redirect_to_root',
name='redirect_to_root'),
# Accept the license
url(r'^accept-license',
'license_protected_downloads.views.accept_license',
name='accept_license'),
# Recursively get files for rendering (async calls accepted only).
url(r'^get-textile-files',
'license_protected_downloads.views.get_textile_files',
name='get_textile_files'),
url(r'^api/ls/(?P<path>.*)$',
'license_protected_downloads.api.v1.list_files_api'),
url(r'^api/license/(?P<path>.*)$',
'license_protected_downloads.api.v1.get_license_api'),
url(r'^api/v2/token/(?P<token>.*)$',
'license_protected_downloads.api.v2.token'),
url(r'^api/v2/publish/(?P<path>.*)$',
'license_protected_downloads.api.v2.publish'),
# Catch-all. We always return a file (or try to) if it exists.
# This handler does that.
url(r'(?P<path>.*)', 'license_protected_downloads.views.file_server'),
)
| 0
| 0
| 0
|
f8e96160c49a25e10f1d1aa9fdbf711670774933
| 7,285
|
py
|
Python
|
src/gather/api.py
|
pygather/gather
|
dbe5a2109b66e40817f805f07aeb4d98a765d1b2
|
[
"MIT"
] | 7
|
2017-06-02T03:56:44.000Z
|
2017-11-19T23:15:06.000Z
|
src/gather/api.py
|
pygather/gather
|
dbe5a2109b66e40817f805f07aeb4d98a765d1b2
|
[
"MIT"
] | 3
|
2017-09-28T16:41:50.000Z
|
2017-11-14T02:53:03.000Z
|
src/gather/api.py
|
elcaminoreal/gather
|
dbe5a2109b66e40817f805f07aeb4d98a765d1b2
|
[
"MIT"
] | null | null | null |
"""Gather -- Collect all your plugins
Gather allows a way to register plugins.
It features the ability to register the plugins from any module,
in any package, in any distribution.
A given module can register plugins of multiple types.
In order to have anything registered from a package,
it needs to declare that it supports :code:`gather` in its `setup.py`:
.. code::
entry_points={
'gather': [
"dummy=ROOT_PACKAGE:dummy",
]
The :code:`ROOT_PACKAGE` should point to the Python name of the package:
i.e., what users are expected to :code:`import` at the top-level.
Note that while having special facilities to run functions as subcommands,
Gather can be used to collect anything.
"""
import importlib
import sys
import pkg_resources
import attr
import venusian
class GatherCollisionError(ValueError):
"""Two or more plugins registered for the same name."""
def _one_of(_registry, _effective_name, objct):
"""
Assign one of the possible options.
When given as a collection strategy to :code:`collect`,
this will assign one of the options to a name in case more
than one item is registered to the same name.
This is the default.
"""
return objct
def _all(registry, effective_name, objct):
"""
Assign all of the possible options.
Collect all registered items into a set,
and assign that set to a name. Note that
even if only one item is assigned to a name,
that name will be assigned to a set of length 1.
"""
myset = registry.get(effective_name, set())
myset.add(objct)
return myset
def _exactly_one(registry, effective_name, objct):
"""
Raise an error on colliding registration.
If more than one item is registered to the
same name, raise a :code:`GatherCollisionError`.
"""
if effective_name in registry:
raise GatherCollisionError("Attempt to double register",
registry, effective_name, objct)
return objct
@attr.s(frozen=True)
class Collector(object):
"""
A plugin collector.
A collector allows to *register* functions or classes by modules,
and *collect*-ing them when they need to be used.
"""
name = attr.ib(default=None)
depth = attr.ib(default=1)
one_of = staticmethod(_one_of)
all = staticmethod(_all)
exactly_one = staticmethod(_exactly_one)
def register(self, name=None, transform=lambda x: x):
"""
Register a class or function
Args:
name (str): optional. Name to register the class or function as.
(default is name of object)
transform (callable): optional. A one-argument function. Will be called,
and the return value used in collection.
Default is identity function
This is meant to be used as a decoator:
.. code::
@COLLECTOR.register()
def specific_subcommand(args):
pass
@COLLECTOR.register(name='another_specific_name')
def main(args):
pass
"""
def attach(func):
"""Attach callback to be called when object is scanned"""
venusian.attach(func, callback, depth=self.depth)
return func
return attach
def collect(self, strategy=one_of.__func__):
"""
Collect all registered functions or classes.
Returns a dictionary mapping names to registered elements.
"""
def ignore_import_error(_unused):
"""
Ignore ImportError during collection.
Some modules raise import errors for various reasons,
and should be just treated as missing.
"""
if not issubclass(sys.exc_info()[0], ImportError):
raise # pragma: no cover
params = _ScannerParameters(strategy=strategy)
scanner = venusian.Scanner(update=params.update, tag=self)
for module in _get_modules():
scanner.scan(module, onerror=ignore_import_error)
params.raise_if_needed()
return params.registry
@attr.s
class _ScannerParameters(object):
"""
Parameters for scanner
Update the registry respecting the strategy,
and raise errors at the end.
"""
_please_raise = attr.ib(init=False, default=None)
_strategy = attr.ib()
registry = attr.ib(init=False, default=attr.Factory(dict))
def update(self, name, objct):
"""Update registry with name->objct"""
try:
res = self._strategy(self.registry, name, objct)
self.registry[name] = res
except GatherCollisionError as exc:
self._please_raise = exc
def raise_if_needed(self):
"""Raise exception if any of the updates failed."""
if self._please_raise is not None:
raise self._please_raise
def run(argv, commands, version, output):
"""
Run the specified subcommand.
Args:
argv (list of str): Arguments to be processed
commands (mapping of str to callables): Commands (usually collected by a :code:`Collector`)
version (str): Version to display if :code:`--version` is asked
output (file): Where to write output to
"""
if len(argv) < 1:
argv = argv + ['help']
if argv[0] in ('version', '--version'):
output.write("Version {}\n".format(version))
return
if argv[0] in ('help', '--help') or argv[0] not in commands:
output.write("Available subcommands:\n")
for command in commands.keys():
output.write("\t{}\n".format(command))
output.write("Run subcommand with '--help' for more information\n")
return
commands[argv[0]](argv)
@attr.s(frozen=True)
class Wrapper(object):
"""Add extra data to an object"""
original = attr.ib()
extra = attr.ib()
@classmethod
def glue(cls, extra):
"""
Glue extra data to an object
Args:
extra: what to add
Returns:
callable: function of one argument that returns a :code:`Wrapped`
This method is useful mainly as the :code:`transform` parameter
of a :code:`register` call.
"""
def ret(original):
"""Return a :code:`Wrapper` with the original and extra"""
return cls(original=original, extra=extra)
return ret
__all__ = ['Collector', 'run', 'Wrapper']
| 28.568627
| 99
| 0.619355
|
"""Gather -- Collect all your plugins
Gather allows a way to register plugins.
It features the ability to register the plugins from any module,
in any package, in any distribution.
A given module can register plugins of multiple types.
In order to have anything registered from a package,
it needs to declare that it supports :code:`gather` in its `setup.py`:
.. code::
entry_points={
'gather': [
"dummy=ROOT_PACKAGE:dummy",
]
The :code:`ROOT_PACKAGE` should point to the Python name of the package:
i.e., what users are expected to :code:`import` at the top-level.
Note that while having special facilities to run functions as subcommands,
Gather can be used to collect anything.
"""
import importlib
import sys
import pkg_resources
import attr
import venusian
def _get_modules():
for entry_point in pkg_resources.iter_entry_points(group='gather'):
module = importlib.import_module(entry_point.module_name)
yield module
class GatherCollisionError(ValueError):
"""Two or more plugins registered for the same name."""
def _one_of(_registry, _effective_name, objct):
"""
Assign one of the possible options.
When given as a collection strategy to :code:`collect`,
this will assign one of the options to a name in case more
than one item is registered to the same name.
This is the default.
"""
return objct
def _all(registry, effective_name, objct):
"""
Assign all of the possible options.
Collect all registered items into a set,
and assign that set to a name. Note that
even if only one item is assigned to a name,
that name will be assigned to a set of length 1.
"""
myset = registry.get(effective_name, set())
myset.add(objct)
return myset
def _exactly_one(registry, effective_name, objct):
"""
Raise an error on colliding registration.
If more than one item is registered to the
same name, raise a :code:`GatherCollisionError`.
"""
if effective_name in registry:
raise GatherCollisionError("Attempt to double register",
registry, effective_name, objct)
return objct
@attr.s(frozen=True)
class Collector(object):
"""
A plugin collector.
A collector allows to *register* functions or classes by modules,
and *collect*-ing them when they need to be used.
"""
name = attr.ib(default=None)
depth = attr.ib(default=1)
one_of = staticmethod(_one_of)
all = staticmethod(_all)
exactly_one = staticmethod(_exactly_one)
def register(self, name=None, transform=lambda x: x):
"""
Register a class or function
Args:
name (str): optional. Name to register the class or function as.
(default is name of object)
transform (callable): optional. A one-argument function. Will be called,
and the return value used in collection.
Default is identity function
This is meant to be used as a decoator:
.. code::
@COLLECTOR.register()
def specific_subcommand(args):
pass
@COLLECTOR.register(name='another_specific_name')
def main(args):
pass
"""
def callback(scanner, inner_name, objct):
("""
Venusian_ callback, called from scan
.. _Venusian: http://docs.pylonsproject.org/projects/"""
"""venusian/en/latest/api.html#venusian.attach
""")
tag = getattr(scanner, 'tag', None)
if tag is not self:
return
if name is None:
effective_name = inner_name
else:
effective_name = name
objct = transform(objct)
scanner.update(effective_name, objct)
def attach(func):
"""Attach callback to be called when object is scanned"""
venusian.attach(func, callback, depth=self.depth)
return func
return attach
def collect(self, strategy=one_of.__func__):
"""
Collect all registered functions or classes.
Returns a dictionary mapping names to registered elements.
"""
def ignore_import_error(_unused):
"""
Ignore ImportError during collection.
Some modules raise import errors for various reasons,
and should be just treated as missing.
"""
if not issubclass(sys.exc_info()[0], ImportError):
raise # pragma: no cover
params = _ScannerParameters(strategy=strategy)
scanner = venusian.Scanner(update=params.update, tag=self)
for module in _get_modules():
scanner.scan(module, onerror=ignore_import_error)
params.raise_if_needed()
return params.registry
@attr.s
class _ScannerParameters(object):
"""
Parameters for scanner
Update the registry respecting the strategy,
and raise errors at the end.
"""
_please_raise = attr.ib(init=False, default=None)
_strategy = attr.ib()
registry = attr.ib(init=False, default=attr.Factory(dict))
def update(self, name, objct):
"""Update registry with name->objct"""
try:
res = self._strategy(self.registry, name, objct)
self.registry[name] = res
except GatherCollisionError as exc:
self._please_raise = exc
def raise_if_needed(self):
"""Raise exception if any of the updates failed."""
if self._please_raise is not None:
raise self._please_raise
def run(argv, commands, version, output):
"""
Run the specified subcommand.
Args:
argv (list of str): Arguments to be processed
commands (mapping of str to callables): Commands (usually collected by a :code:`Collector`)
version (str): Version to display if :code:`--version` is asked
output (file): Where to write output to
"""
if len(argv) < 1:
argv = argv + ['help']
if argv[0] in ('version', '--version'):
output.write("Version {}\n".format(version))
return
if argv[0] in ('help', '--help') or argv[0] not in commands:
output.write("Available subcommands:\n")
for command in commands.keys():
output.write("\t{}\n".format(command))
output.write("Run subcommand with '--help' for more information\n")
return
commands[argv[0]](argv)
@attr.s(frozen=True)
class Wrapper(object):
"""Add extra data to an object"""
original = attr.ib()
extra = attr.ib()
@classmethod
def glue(cls, extra):
"""
Glue extra data to an object
Args:
extra: what to add
Returns:
callable: function of one argument that returns a :code:`Wrapped`
This method is useful mainly as the :code:`transform` parameter
of a :code:`register` call.
"""
def ret(original):
"""Return a :code:`Wrapper` with the original and extra"""
return cls(original=original, extra=extra)
return ret
__all__ = ['Collector', 'run', 'Wrapper']
| 709
| 0
| 53
|
8481c7b9a2b60e392c3a9fd15bf0609835a1835a
| 1,117
|
py
|
Python
|
gmsh-4.2.2/demos/api/crack.py
|
Poofee/fastFEM
|
14eb626df973e2123604041451912c867ab7188c
|
[
"MIT"
] | 4
|
2019-05-06T09:35:08.000Z
|
2021-05-14T16:26:45.000Z
|
onelab/share/doc/gmsh/demos/api/crack.py
|
Christophe-Foyer/pyFEA
|
344996d6b075ee4b2214283f0af8159d86d154fd
|
[
"MIT"
] | null | null | null |
onelab/share/doc/gmsh/demos/api/crack.py
|
Christophe-Foyer/pyFEA
|
344996d6b075ee4b2214283f0af8159d86d154fd
|
[
"MIT"
] | 1
|
2020-12-15T13:47:23.000Z
|
2020-12-15T13:47:23.000Z
|
import gmsh
import sys
gmsh.initialize(sys.argv)
gmsh.fltk.initialize()
gmsh.option.setNumber("General.Terminal", 1)
gmsh.model.add("square with cracks")
surf1 = 1
gmsh.model.occ.addRectangle(0, 0, 0, 1, 1, surf1)
pt1 = gmsh.model.occ.addPoint(0.2, 0.2, 0)
pt2 = gmsh.model.occ.addPoint(0.4, 0.4, 0)
line1 = gmsh.model.occ.addLine(pt1, pt2)
pt3 = gmsh.model.occ.addPoint(0.6, 0.1, 0)
pt4 = gmsh.model.occ.addPoint(0.1, 0.3, 0)
line2 = gmsh.model.occ.addLine(pt3, pt4)
o, m = gmsh.model.occ.fragment([(2,surf1)], [(1,line1), (1,line2)])
gmsh.model.occ.synchronize()
# m contains, for each input entity (surf1, line1 and line2), the child entities
# (if any) after the fragmentation, as lists of tuples. To apply the crack
# plugin we group all the intersecting lines in a physical group
new_surf = m[0][0][1]
new_lines = [item[1] for sublist in m[1:] for item in sublist]
gmsh.model.addPhysicalGroup(2, [new_surf], 100)
gmsh.model.addPhysicalGroup(1, new_lines, 101)
gmsh.model.mesh.generate(2)
gmsh.plugin.setNumber("Crack", "PhysicalGroup", 101)
gmsh.plugin.run("Crack")
gmsh.fltk.run()
gmsh.finalize()
| 26.595238
| 80
| 0.715309
|
import gmsh
import sys
gmsh.initialize(sys.argv)
gmsh.fltk.initialize()
gmsh.option.setNumber("General.Terminal", 1)
gmsh.model.add("square with cracks")
surf1 = 1
gmsh.model.occ.addRectangle(0, 0, 0, 1, 1, surf1)
pt1 = gmsh.model.occ.addPoint(0.2, 0.2, 0)
pt2 = gmsh.model.occ.addPoint(0.4, 0.4, 0)
line1 = gmsh.model.occ.addLine(pt1, pt2)
pt3 = gmsh.model.occ.addPoint(0.6, 0.1, 0)
pt4 = gmsh.model.occ.addPoint(0.1, 0.3, 0)
line2 = gmsh.model.occ.addLine(pt3, pt4)
o, m = gmsh.model.occ.fragment([(2,surf1)], [(1,line1), (1,line2)])
gmsh.model.occ.synchronize()
# m contains, for each input entity (surf1, line1 and line2), the child entities
# (if any) after the fragmentation, as lists of tuples. To apply the crack
# plugin we group all the intersecting lines in a physical group
new_surf = m[0][0][1]
new_lines = [item[1] for sublist in m[1:] for item in sublist]
gmsh.model.addPhysicalGroup(2, [new_surf], 100)
gmsh.model.addPhysicalGroup(1, new_lines, 101)
gmsh.model.mesh.generate(2)
gmsh.plugin.setNumber("Crack", "PhysicalGroup", 101)
gmsh.plugin.run("Crack")
gmsh.fltk.run()
gmsh.finalize()
| 0
| 0
| 0
|