blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9f43d1c561b4a410c8366be55dde291f4661542 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/operations/_usage_operations.py | ce8b45dc92c31a4dae09fdf96881fa1357edd348 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,392 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsageOperations(object):
"""UsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListUsagesResult"]
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListUsagesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListUsagesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
86df727426d76011726712116223b2d60c2e785b | 83616d726820d90951b4dfb413ad1dd883d84eea | /TriAquae/models/Ubuntu_12/paramiko/client.py | c5a2d1ac727b36fda9f44c4ae1eee7bc604cb53a | [] | no_license | goser1158/triaquae | 9d67dc2ec4ba3aa1afc5d4ce7ab22a4ef6c3e4b1 | e7594825e73e1e116b5f15e6c3b02ec7bd98b2ef | refs/heads/master | 2021-01-19T21:16:18.914895 | 2018-12-05T08:46:10 | 2018-12-05T08:46:10 | 101,248,034 | 0 | 0 | null | 2017-08-24T03:02:55 | 2017-08-24T03:02:55 | null | UTF-8 | Python | false | false | 21,636 | py | # Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{SSHClient}.
"""
from binascii import hexlify
import getpass
import os
import socket
import warnings
from paramiko.agent import Agent
from paramiko.common import *
from paramiko.config import SSH_PORT
from paramiko.dsskey import DSSKey
from paramiko.hostkeys import HostKeys
from paramiko.resource import ResourceManager
from paramiko.rsakey import RSAKey
from paramiko.ssh_exception import SSHException, BadHostKeyException
from paramiko.transport import Transport
from paramiko.util import retry_on_signal
class MissingHostKeyPolicy (object):
"""
Interface for defining the policy that L{SSHClient} should use when the
SSH server's hostname is not in either the system host keys or the
application's keys. Pre-made classes implement policies for automatically
adding the key to the application's L{HostKeys} object (L{AutoAddPolicy}),
and for automatically rejecting the key (L{RejectPolicy}).
This function may be used to ask the user to verify the key, for example.
"""
def missing_host_key(self, client, hostname, key):
"""
Called when an L{SSHClient} receives a server key for a server that
isn't in either the system or local L{HostKeys} object. To accept
the key, simply return. To reject, raised an exception (which will
be passed to the calling application).
"""
pass
class AutoAddPolicy (MissingHostKeyPolicy):
"""
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
client._host_keys.add(hostname, key.get_name(), key)
if client._host_keys_filename is not None:
client.save_host_keys(client._host_keys_filename)
client._log(DEBUG, 'Adding %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
class RejectPolicy (MissingHostKeyPolicy):
"""
Policy for automatically rejecting the unknown hostname & key. This is
used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
client._log(DEBUG, 'Rejecting %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
raise SSHException('Server %r not found in known_hosts' % hostname)
class WarningPolicy (MissingHostKeyPolicy):
"""
Policy for logging a python-style warning for an unknown host key, but
accepting it. This is used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
warnings.warn('Unknown %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
class SSHClient (object):
"""
A high-level representation of a session with an SSH server. This class
wraps L{Transport}, L{Channel}, and L{SFTPClient} to take care of most
aspects of authenticating and opening channels. A typical use case is::
client = SSHClient()
client.load_system_host_keys()
client.connect('ssh.example.com')
stdin, stdout, stderr = client.exec_command('ls -l')
You may pass in explicit overrides for authentication and server host key
checking. The default mechanism is to try to use local key files or an
SSH agent (if one is running).
@since: 1.6
"""
def __init__(self):
"""
Create a new SSHClient.
"""
self._system_host_keys = HostKeys()
self._host_keys = HostKeys()
self._host_keys_filename = None
self._log_channel = None
self._policy = RejectPolicy()
self._transport = None
self._agent = None
def load_system_host_keys(self, filename=None):
"""
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by L{save_host_keys}.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If C{filename} is left as C{None}, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
@param filename: the filename to read, or C{None}
@type filename: str
@raise IOError: if a filename was provided and the file could not be
read
"""
if filename is None:
# try the user's .ssh key file, and mask exceptions
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._system_host_keys.load(filename)
except IOError:
pass
return
self._system_host_keys.load(filename)
def load_host_keys(self, filename):
"""
Load host keys from a local host-key file. Host keys read with this
method will be checked I{after} keys loaded via L{load_system_host_keys},
but will be saved back by L{save_host_keys} (so they can be modified).
The missing host key policy L{AutoAddPolicy} adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
@param filename: the filename to read
@type filename: str
@raise IOError: if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename)
def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written
"""
# update local host keys from file (in case other SSH clients
# have written to the known_hosts file meanwhile.
if self.known_hosts is not None:
self.load_host_keys(self.known_hosts)
f = open(filename, 'w')
for hostname, keys in self._host_keys.iteritems():
for keytype, key in keys.iteritems():
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
f.close()
def get_host_keys(self):
"""
Get the local L{HostKeys} object. This can be used to examine the
local host keys or change them.
@return: the local host keys
@rtype: L{HostKeys}
"""
return self._host_keys
def set_log_channel(self, name):
"""
Set the channel for logging. The default is C{"paramiko.transport"}
but it can be set to anything you want.
@param name: new channel name for logging
@type name: str
"""
self._log_channel = name
def set_missing_host_key_policy(self, policy):
"""
Set the policy to use when connecting to a server that doesn't have a
host key in either the system or local L{HostKeys} objects. The
default policy is to reject all unknown servers (using L{RejectPolicy}).
You may substitute L{AutoAddPolicy} or write your own policy class.
@param policy: the policy to use when receiving a host key from a
previously-unknown server
@type policy: L{MissingHostKeyPolicy}
"""
self._policy = policy
def connect(self, hostname, port=SSH_PORT, username=None, password=None, pkey=None,
key_filename=None, timeout=None, allow_agent=True, look_for_keys=True,
compress=False, sock=None):
"""
Connect to an SSH server and authenticate to it. The server's host key
is checked against the system host keys (see L{load_system_host_keys})
and any local host keys (L{load_host_keys}). If the server's hostname
is not found in either set of host keys, the missing host key policy
is used (see L{set_missing_host_key_policy}). The default policy is
to reject the key and raise an L{SSHException}.
Authentication is attempted in the following order of priority:
- The C{pkey} or C{key_filename} passed in (if any)
- Any key we can find through an SSH agent
- Any "id_rsa" or "id_dsa" key discoverable in C{~/.ssh/}
- Plain username/password auth, if a password was given
If a private key requires a password to unlock it, and a password is
passed in, that password will be used to attempt to unlock the key.
@param hostname: the server to connect to
@type hostname: str
@param port: the server port to connect to
@type port: int
@param username: the username to authenticate as (defaults to the
current local username)
@type username: str
@param password: a password to use for authentication or for unlocking
a private key
@type password: str
@param pkey: an optional private key to use for authentication
@type pkey: L{PKey}
@param key_filename: the filename, or list of filenames, of optional
private key(s) to try for authentication
@type key_filename: str or list(str)
@param timeout: an optional timeout (in seconds) for the TCP connect
@type timeout: float
@param allow_agent: set to False to disable connecting to the SSH agent
@type allow_agent: bool
@param look_for_keys: set to False to disable searching for discoverable
private key files in C{~/.ssh/}
@type look_for_keys: bool
@param compress: set to True to turn on compression
@type compress: bool
@param sock: an open socket or socket-like object (such as a
L{Channel}) to use for communication to the target host
@type sock: socket
@raise BadHostKeyException: if the server's host key could not be
verified
@raise AuthenticationException: if authentication failed
@raise SSHException: if there was any other error connecting or
establishing an SSH session
@raise socket.error: if a socket error occurred while connecting
"""
if not sock:
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
break
else:
# some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(
af, _, _, _, addr = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
sock = socket.socket(af, socket.SOCK_STREAM)
if timeout is not None:
try:
sock.settimeout(timeout)
except:
pass
retry_on_signal(lambda: sock.connect(addr))
t = self._transport = Transport(sock)
t.use_compression(compress=compress)
if self._log_channel is not None:
t.set_log_channel(self._log_channel)
t.start_client()
ResourceManager.register(self, t)
server_key = t.get_remote_server_key()
keytype = server_key.get_name()
if port == SSH_PORT:
server_hostkey_name = hostname
else:
server_hostkey_name = "[%s]:%d" % (hostname, port)
our_server_key = self._system_host_keys.get(server_hostkey_name, {}).get(keytype, None)
if our_server_key is None:
our_server_key = self._host_keys.get(server_hostkey_name, {}).get(keytype, None)
if our_server_key is None:
# will raise exception if the key is rejected; let that fall out
self._policy.missing_host_key(self, server_hostkey_name, server_key)
# if the callback returns, assume the key is ok
our_server_key = server_key
if server_key != our_server_key:
raise BadHostKeyException(hostname, server_key, our_server_key)
if username is None:
username = getpass.getuser()
if key_filename is None:
key_filenames = []
elif isinstance(key_filename, (str, unicode)):
key_filenames = [ key_filename ]
else:
key_filenames = key_filename
self._auth(username, password, pkey, key_filenames, allow_agent, look_for_keys)
def close(self):
"""
Close this SSHClient and its underlying L{Transport}.
"""
if self._transport is None:
return
self._transport.close()
self._transport = None
if self._agent != None:
self._agent.close()
self._agent = None
def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False):
"""
Execute a command on the SSH server. A new L{Channel} is opened and
the requested command is executed. The command's input and output
streams are returned as python C{file}-like objects representing
stdin, stdout, and stderr.
@param command: the command to execute
@type command: str
@param bufsize: interpreted the same way as by the built-in C{file()} function in python
@type bufsize: int
@param timeout: set command's channel timeout. See L{Channel.settimeout}.settimeout
@type timeout: int
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile})
@raise SSHException: if the server fails to execute the command
"""
chan = self._transport.open_session()
if(get_pty):
chan.get_pty()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
def invoke_shell(self, term='vt100', width=80, height=24, width_pixels=0,
height_pixels=0):
"""
Start an interactive shell session on the SSH server. A new L{Channel}
is opened and connected to a pseudo-terminal using the requested
terminal type and size.
@param term: the terminal type to emulate (for example, C{"vt100"})
@type term: str
@param width: the width (in characters) of the terminal window
@type width: int
@param height: the height (in characters) of the terminal window
@type height: int
@param width_pixels: the width (in pixels) of the terminal window
@type width_pixels: int
@param height_pixels: the height (in pixels) of the terminal window
@type height_pixels: int
@return: a new channel connected to the remote shell
@rtype: L{Channel}
@raise SSHException: if the server fails to invoke a shell
"""
chan = self._transport.open_session()
chan.get_pty(term, width, height, width_pixels, height_pixels)
chan.invoke_shell()
return chan
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
@return: a new SFTP session object
@rtype: L{SFTPClient}
"""
return self._transport.open_sftp_client()
def get_transport(self):
"""
Return the underlying L{Transport} object for this SSH connection.
This can be used to perform lower-level tasks, like opening specific
kinds of channels.
@return: the Transport for this connection
@rtype: L{Transport}
"""
return self._transport
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys):
"""
Try, in order:
- The key passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key, or for
two-factor authentication [for which it is required].)
"""
saved_exception = None
two_factor = False
allowed_types = []
if pkey is not None:
try:
self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
allowed_types = self._transport.auth_publickey(username, pkey)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
except SSHException, e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
for pkey_class in (RSAKey, DSSKey):
try:
key = pkey_class.from_private_key_file(key_filename, password)
self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent == None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
if not two_factor:
keyfiles = []
rsa_key = os.path.expanduser('~/.ssh/id_rsa')
dsa_key = os.path.expanduser('~/.ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
# look in ~/ssh/ for windows users:
rsa_key = os.path.expanduser('~/ssh/id_rsa')
dsa_key = os.path.expanduser('~/ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = pkey_class.from_private_key_file(filename, password)
self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
except IOError, e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException, e:
saved_exception = e
elif two_factor:
raise SSHException('Two-factor authentication requires a password')
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException('No authentication methods available')
def _log(self, level, msg):
self._transport._log(level, msg)
| [
"lijie3721@126.com"
] | lijie3721@126.com |
13cbf185e243bfd91725740f6832e60a9a782369 | 148ba38e84919b7a44fa193e2899993051fdb542 | /tests/requester_session.py | e637a86122e6c2896ab3ef483741bd0d3abf7332 | [
"MIT"
] | permissive | igor-kupczynski/Requester | e8e76f8f421558638439083d4863445d3b6ad5db | 93fd2673cf69a97b0e0d49a2411abe6148326f41 | refs/heads/master | 2022-03-05T21:10:59.099429 | 2019-11-12T14:06:58 | 2019-11-12T14:06:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # flake8: noqa
###env
import requests
s = requests.Session()
s.get('http://127.0.0.1:8000/cookies/set?k0=v0', timeout=5)
s.headers.update({'X-Test': 'true'})
prepped = s.prepare_request(requests.Request('POST', 'http://127.0.0.1:8000/post', json={'a': 'b'}))
###env
s.get('http://127.0.0.1:8000/cookies/set?k1=v1', allow_redirects=False)
s.send(prepped)
| [
"kylebebak@gmail.com"
] | kylebebak@gmail.com |
8024ba5b4330783ded7dd067ea1cf890af49dc58 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractNightFallTranslations.py | 594af0962bcbd229a5adb300cb23106ebecb7668 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 222 | py | def extractNightFallTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
| [
"something@fake-url.com"
] | something@fake-url.com |
e69a89c9aca5d7a747ce35c9a3614f0749e9b9c6 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/class_of_use_ref_structure.py | acfe741dd3c81420428778d7f91ac88418740eea | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 225 | py | from dataclasses import dataclass
from .type_of_value_ref_structure import TypeOfValueRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class ClassOfUseRefStructure(TypeOfValueRefStructure):
pass
| [
"chris@komposta.net"
] | chris@komposta.net |
cce790149c0236665d2879cd5d9796223e903f6d | 32824b902ff0d97b4f7fdea03251ee8e8fd8b859 | /backend/ytb_21198/urls.py | 703d1fd95d132c870eace5738f46d9dbac582349 | [] | no_license | crowdbotics-apps/ytb-21198 | 761241bfc2fdbcb9b16c4fd21cb60b5d477c532a | c30beec282dabcfcfff09e478311b819af3eaa0e | refs/heads/master | 2022-12-28T14:49:18.847967 | 2020-10-06T22:17:47 | 2020-10-06T22:17:47 | 301,867,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | """ytb_21198 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "YTB"
admin.site.site_title = "YTB Admin Portal"
admin.site.index_title = "YTB Admin"
# swagger
api_info = openapi.Info(
title="YTB API",
default_version="v1",
description="API documentation for YTB App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4914f091478c59559e9a61fa777ab6870e7ab96b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4347/codes/1596_2894.py | 372d41623df33594fd6a8e6a027a5fb1410be9b1 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | valor_do_jogo1= float(input("Qual o valor unitario do jogo? "))
valor_do_frete= 45.0
total=valor_do_jogo1 * 8 + valor_do_frete
print(total) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
9c0c4fb179834a7ca36de9a9189ada09e37ceef0 | bfb35e35c2e44b55ac77c66b483d737e0a947e16 | /20210201/生成pyd文件/To_pyd.py | 959cdac1739c66aa26cbc736972887f959be7102 | [] | no_license | Aimee888/Python-20210127 | 8c68230650f4ebb9b06c0facffc2976bd653d8ab | c683d440e21821f2a9a0e52d39d82071ae0b3405 | refs/heads/main | 2023-03-17T04:32:58.714464 | 2021-02-27T01:18:29 | 2021-02-27T01:18:29 | 333,348,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | #!/usr/bin/env python
# _*_ coding: UTF-8 _*_
"""=================================================
@Project -> File : Python-20210127 -> To_pyd.py
@IDE : PyCharm
@Author : Aimee
@Date : 2021/2/1 10:51
@Desc :
================================================="""
def test():
print("Test .pyd function")
| [
"961745931@qq.com"
] | 961745931@qq.com |
2f30f57136d05751f501d186279ee37d5dc703c3 | 38edf3974ac953fc916858651ccea167470e5ec8 | /Hackfest/Hackfest-Resources/Level 7/flip.py | 2f75915a8a24f7612d0451685ad3e69d68291724 | [] | no_license | duaraghav8/Duncheon | 597d49417aec9784c7428c740a2cfc5c5469079b | 69f7a775cb7df4d7ae99a3766260c3a098c52978 | refs/heads/master | 2016-09-06T07:19:34.153274 | 2015-08-14T17:31:37 | 2015-08-14T17:31:37 | 39,578,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | password = "136ilovebunnies247"
enc = ""
for c in password:
enc += chr(255-ord(c))
f = open('password.txt', 'w')
f.write(enc)
| [
"duaraghav8@gmail.com"
] | duaraghav8@gmail.com |
d8f44e410c0a7f63d40415236b0febfddc26566e | 754da0ff4d08621511e905c088284e8680edc0df | /0x03-python-data_structures/2-replace_in_list.py | b1e91dc09721778cdf7a954c04007114b87fe111 | [] | no_license | AngieCastano1634/holbertonschool-higher_level_programming-1 | 0e77289f6761f2057bed5b2163df43dea4d6908a | 67d5ed8181c51f5ff67a905fff39dcc605fb32ce | refs/heads/master | 2022-12-22T13:20:36.889380 | 2020-09-25T03:31:25 | 2020-09-25T03:31:25 | 299,758,479 | 0 | 1 | null | 2020-09-29T23:10:59 | 2020-09-29T23:10:58 | null | UTF-8 | Python | false | false | 248 | py | #!/usr/bin/python3
def replace_in_list(my_list, idx, element):
if idx < 0 or idx > (len(my_list) - 1):
return my_list
for i in range(0, len(my_list)):
if i == idx:
my_list[i] = element
return my_list
| [
"andergcp@hotmail.com"
] | andergcp@hotmail.com |
e6f2da70126b5f9645097ed27b380bfdb7169bfe | ea43a4ca9ad544fc3107aff1e5a4dd8c9a3898b1 | /biliob_analyzer/coin.py | e8d7027b84952516743eee5660fc09d0ea323b86 | [
"MIT"
] | permissive | z464244404/biliob-spider | 9447459cb722f71b6419ecaa53f80fdcbae11b22 | dc6880ab8457e469677575d1961899bd966bc1b0 | refs/heads/master | 2020-08-10T13:58:10.332858 | 2019-10-01T01:24:43 | 2019-10-01T01:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | from db import settings
from db import db
import datetime
coll = db['video'] # 获得collection的句柄
start_date = datetime.datetime(2018,11,22)
end_date = datetime.datetime(2018,12,22)
value = 'view'
d = {}
for each in coll.find():
author_name = each['author']
d[author_name] = []
each['data'].reverse()
s_value = None
s_date = None
for each_data in each['data']:
if each_data['datetime'] < start_date:
continue
if each_data['datetime'] > end_date:
continue
if s_value == None:
s_value = each_data[value]
s_date = each_data['datetime']
d[author_name] = [{'value':0,'date':s_date.date()}]
continue
c_value = each_data[value] - s_value
c_date = each_data['datetime']
d[author_name].append({'value':c_value,'date':c_date.date()})
pass
pass | [
"jannchie@gmail.com"
] | jannchie@gmail.com |
c35b734f8c3c106552cfa8fe5adbf206f6d0c97d | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/DocumentTemplate-2.13.4-py2.7-linux-x86_64.egg/TreeDisplay/TreeTag.py | d1190e833a3c24e777df2a73d11e74b7d68f7f9d | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,009 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Rendering object hierarchies as Trees
"""
from binascii import a2b_base64
from binascii import b2a_base64
import json
import sys
import zlib
from DocumentTemplate.DT_Util import add_with_prefix
from DocumentTemplate.DT_Util import Eval
from DocumentTemplate.DT_Util import InstanceDict
from DocumentTemplate.DT_Util import name_param
from DocumentTemplate.DT_Util import parse_params
from DocumentTemplate.DT_Util import ParseError
from DocumentTemplate.DT_Util import render_blocks
from DocumentTemplate.DT_Util import simple_name
from DocumentTemplate.DT_String import String
tbl = ''.join(map(chr, range(256)))
tplus = tbl[:ord('+')] + b'-' + tbl[ord('+') + 1:]
tminus = tbl[:ord('-')] + b'+' + tbl[ord('-') + 1:]
class Tree:
name = 'tree'
blockContinuations = ()
expand = None
def __init__(self, blocks):
tname, args, section = blocks[0]
args = parse_params(args,
name=None,
expr=None,
nowrap=1,
expand=None,
leaves=None,
header=None,
footer=None,
branches=None,
branches_expr=None,
sort=None,
reverse=1,
skip_unauthorized=1,
id=None,
single=1,
url=None,
# opened_decoration=None,
# closed_decoration=None,
# childless_decoration=None,
assume_children=1,
urlparam=None, prefix=None)
has_key = args.has_key
if has_key('') or has_key('name') or has_key('expr'):
name, expr = name_param(args,'tree',1)
if expr is not None:
args['expr'] = expr
elif has_key(''):
args['name'] = name
else:
name='a tree tag'
if has_key('branches_expr'):
if has_key('branches'):
raise ParseError, _tm(
'branches and and branches_expr given', 'tree')
args['branches_expr'] = Eval(args['branches_expr']).eval
elif not has_key('branches'):
args['branches']='tpValues'
if not has_key('id'):
args['id'] = 'tpId'
if not has_key('url'):
args['url'] = 'tpURL'
if not has_key('childless_decoration'):
args['childless_decoration']=''
prefix = args.get('prefix')
if prefix and not simple_name(prefix):
raise ParseError, _tm(
'prefix is not a simple name', 'tree')
self.__name__ = name
self.section = section.blocks
self.args = args
def render(self, md):
args = self.args
have = args.has_key
if have('name'):
v = md[args['name']]
elif have('expr'):
v = args['expr'].eval(md)
else:
v = md.this
return tpRender(v, md, self.section, self.args)
__call__ = render
String.commands['tree']=Tree
pyid=id # Copy builtin
simple_types = {str: 1, unicode: 1, int: 1, float: 1, long: 1,
tuple: 1, list: 1, dict: 1}
def try_call_attr(ob, attrname, simple_types=simple_types):
attr = getattr(ob, attrname)
if type(attr) in simple_types:
return attr
try:
return attr()
except TypeError:
return attr
def tpRender(self, md, section, args,
try_call_attr=try_call_attr):
"""Render data organized as a tree.
We keep track of open nodes using a cookie. The cookie stored the
tree state. State should be a tree represented like:
[] # all closed
['eagle'], # eagle is open
['eagle'], ['jeep', [1983, 1985]] # eagle, jeep, 1983 jeep and 1985 jeep
where the items are object ids. The state will be pickled to a
compressed and base64ed string that gets unencoded, uncompressed,
and unpickled on the other side.
Note that ids used in state need not be connected to urls, since
state manipulation is internal to rendering logic.
Note that to make unpickling safe, we use the MiniPickle module,
that only creates safe objects
"""
data=[]
idattr=args['id']
if hasattr(self, idattr):
id = try_call_attr(self, idattr)
elif hasattr(self, '_p_oid'): id=oid(self)
else: id=pyid(self)
try:
# see if we are being run as a sub-document
root=md['tree-root-url']
url=md['tree-item-url']
state=md['tree-state']
diff=md['tree-diff']
substate=md['-tree-substate-']
colspan=md['tree-colspan']
level=md['tree-level']
except KeyError:
# OK, we are a top-level invocation
level=-1
if md.has_key('collapse_all'):
state=[id,[]],
elif md.has_key('expand_all'):
have_arg=args.has_key
if have_arg('branches'):
def get_items(node, branches=args['branches'], md=md):
get = md.guarded_getattr
if get is None:
get = getattr
items = get(node, branches)
return items()
elif have_arg('branches_expr'):
def get_items(node, branches_expr=args['branches_expr'], md=md):
md._push(InstanceDict(node, md))
items=branches_expr(md)
md._pop()
return items
state=[id, tpValuesIds(self, get_items, args)],
else:
if md.has_key('tree-s'):
state=md['tree-s']
state=decode_seq(state)
try:
if state[0][0] != id: state=[id,[]],
except IndexError: state=[id,[]],
else: state=[id,[]],
if md.has_key('tree-e'):
diff=decode_seq(md['tree-e'])
apply_diff(state, diff, 1)
if md.has_key('tree-c'):
diff=decode_seq(md['tree-c'])
apply_diff(state, diff, 0)
colspan=tpStateLevel(state)
substate=state
diff=[]
url=''
root=md['URL']
l=root.rfind('/')
if l >= 0: root=root[l+1:]
treeData={'tree-root-url': root,
'tree-colspan': colspan,
'tree-state': state }
prefix = args.get('prefix')
if prefix:
for k, v in treeData.items():
treeData[prefix + k[4:].replace('-', '_')] = v
md._push(InstanceDict(self, md))
md._push(treeData)
try: tpRenderTABLE(self,id,root,url,state,substate,diff,data,colspan,
section,md,treeData, level, args)
finally: md._pop(2)
if state is substate and not (args.has_key('single') and args['single']):
state=state or ([id],)
state=encode_seq(state)
md['RESPONSE'].setCookie('tree-s',state)
return ''.join(data)
def tpRenderTABLE(self, id, root_url, url, state, substate, diff, data,
colspan, section, md, treeData, level=0, args=None,
try_call_attr=try_call_attr,
):
"Render a tree as a table"
have_arg=args.has_key
exp=0
if level >= 0:
urlattr=args['url']
if urlattr and hasattr(self, urlattr):
tpUrl = try_call_attr(self, urlattr)
url = (url and ('%s/%s' % (url, tpUrl))) or tpUrl
root_url = root_url or tpUrl
ptreeData = add_with_prefix(treeData, 'tree', args.get('prefix'))
ptreeData['tree-item-url']=url
ptreeData['tree-level']=level
ptreeData['tree-item-expanded']=0
idattr=args['id']
output=data.append
items=None
if (have_arg('assume_children') and args['assume_children']
and substate is not state):
# We should not compute children unless we have to.
# See if we've been asked to expand our children.
for i in range(len(substate)):
sub=substate[i]
if sub[0]==id:
exp=i+1
break
if not exp: items=1
get=md.guarded_getattr
if get is None:
get = getattr
if items is None:
if have_arg('branches') and hasattr(self, args['branches']):
items = get(self, args['branches'])
items = items()
elif have_arg('branches_expr'):
items=args['branches_expr'](md)
if not items and have_arg('leaves'): items=1
if items and items != 1:
getitem = getattr(md, 'guarded_getitem', None)
if getitem is not None:
unauth=[]
for index in range(len(items)):
try:
getitem(items, index)
except ValidationError:
unauth.append(index)
if unauth:
if have_arg('skip_unauthorized') and args['skip_unauthorized']:
items=list(items)
unauth.reverse()
for index in unauth: del items[index]
else:
raise ValidationError, unauth
if have_arg('sort'):
# Faster/less mem in-place sort
if type(items)==type(()):
items=list(items)
sort=args['sort']
size=range(len(items))
for i in size:
v=items[i]
k=getattr(v,sort)
try: k=k()
except: pass
items[i]=(k,v)
items.sort()
for i in size:
items[i]=items[i][1]
if have_arg('reverse'):
items=list(items) # Copy the list
items.reverse()
diff.append(id)
_td_colspan='<td colspan="%s" style="white-space: nowrap"></td>'
_td_single ='<td width="16" style="white-space: nowrap"></td>'
sub=None
if substate is state:
output('<table cellspacing="0">\n')
sub=substate[0]
exp=items
else:
# Add prefix
output('<tr>\n')
# Add +/- icon
if items:
if level:
if level > 3: output(_td_colspan % (level-1))
elif level > 1: output(_td_single * (level-1))
output(_td_single)
output('\n')
output('<td width="16" valign="top" style="white-space: nowrap">')
for i in range(len(substate)):
sub=substate[i]
if sub[0]==id:
exp=i+1
break
####################################
# Mostly inline encode_seq for speed
s = compress(json.dumps(diff))
if len(s) > 57:
s = encode_str(s)
else:
s = b2a_base64(s)[:-1]
l = s.find('=')
if l >= 0:
s = s[:l]
s = s.translate(tplus)
####################################
script=md['BASEPATH1']
# Propagate extra args through tree.
if args.has_key( 'urlparam' ):
param = args['urlparam']
param = "%s&" % param
else:
param = ""
if exp:
ptreeData['tree-item-expanded']=1
output('<a name="%s" href="%s?%stree-c=%s#%s">'
'<img src="%s/p_/mi" alt="-" border="0" /></a>' %
(id, root_url, param, s, id, script))
else:
output('<a name="%s" href="%s?%stree-e=%s#%s">'
'<img src="%s/p_/pl" alt="+" border="0" /></a>' %
(id, root_url, param, s, id, script))
output('</td>\n')
else:
if level > 2: output(_td_colspan % level)
elif level > 0: output(_td_single * level)
output(_td_single)
output('\n')
# add item text
dataspan=colspan-level
output('<td%s%s valign="top" align="left">' %
((dataspan > 1 and (' colspan="%s"' % dataspan) or ''),
(have_arg('nowrap') and
args['nowrap'] and ' style="white-space: nowrap"' or ''))
)
output(render_blocks(section, md))
output('</td>\n</tr>\n')
if exp:
level=level+1
dataspan=colspan-level
if level > 2: h=_td_colspan % level
elif level > 0: h=_td_single * level
else: h=''
if have_arg('header'):
doc=args['header']
if md.has_key(doc): doc=md.getitem(doc,0)
else: doc=None
if doc is not None:
output(doc(
None, md,
standard_html_header=(
'<tr>%s'
'<td width="16" style="white-space: nowrap"></td>'
'<td%s valign="top">'
% (h,
(dataspan > 1 and (' colspan="%s"' % dataspan)
or ''))),
standard_html_footer='</td></tr>',
))
if items==1:
# leaves
if have_arg('leaves'):
doc=args['leaves']
if md.has_key(doc): doc=md.getitem(doc,0)
else: doc=None
if doc is not None:
treeData['-tree-substate-']=sub
ptreeData['tree-level']=level
md._push(treeData)
try: output(doc(
None,md,
standard_html_header=(
'<tr>%s'
'<td width="16" style="white-space: nowrap"></td>'
'<td%s valign="top">'
% (h,
(dataspan > 1 and
(' colspan="%s"' % dataspan) or ''))),
standard_html_footer='</td></tr>',
))
finally: md._pop(1)
elif have_arg('expand'):
doc=args['expand']
if md.has_key(doc): doc=md.getitem(doc,0)
else: doc=None
if doc is not None:
treeData['-tree-substate-']=sub
ptreeData['tree-level']=level
md._push(treeData)
try: output(doc(
None,md,
standard_html_header=(
'<tr>%s<td width="16" style="white-space: nowrap"></td>'
'<td%s valign="top">'
% (h,
(dataspan > 1 and
(' colspan="%s"' % dataspan) or ''))),
standard_html_footer='</td></tr>',
))
finally: md._pop(1)
else:
__traceback_info__=sub, args, state, substate
ids={}
for item in items:
if hasattr(item, idattr):
id = try_call_attr(item, idattr)
elif hasattr(item, '_p_oid'): id=oid(item)
else: id=pyid(item)
if len(sub)==1: sub.append([])
substate=sub[1]
ids[id]=1
md._push(InstanceDict(item,md))
try: data=tpRenderTABLE(
item,id,root_url,url,state,substate,diff,data,
colspan, section, md, treeData, level, args)
finally: md._pop()
if not sub[1]: del sub[1]
ids=ids.has_key
for i in range(len(substate)-1,-1):
if not ids(substate[i][0]): del substate[i]
if have_arg('footer'):
doc=args['footer']
if md.has_key(doc): doc=md.getitem(doc,0)
else: doc=None
if doc is not None:
output(doc(
None, md,
standard_html_header=(
'<tr>%s<td width="16" style="white-space: nowrap"></td>'
'<td%s valign="top">'
% (h,
(dataspan > 1 and (' colspan="%s"' % dataspan)
or ''))),
standard_html_footer='</td></tr>',
))
del diff[-1]
if not diff: output('</table>\n')
return data
def apply_diff(state, diff, expand):
if not diff: return
s=[None, state]
diff.reverse()
__traceback_info__=s, diff
while diff:
id=diff[-1]
del diff[-1]
if len(s)==1: s.append([])
s=s[1]
if type(s)==type(()):
s=list(s)
loc=-1
for i in range(len(s)):
if s[i][0]==id:
loc=i
break
if loc >= 0:
if not diff and not expand:
del s[loc]
else:
s=s[loc]
elif diff or expand:
s.append([id,[]])
s=s[-1][1]
while diff:
id=diff[-1]
del diff[-1]
if diff or expand:
s.append([id,[]])
s=s[-1][1]
def encode_seq(state):
"Convert a sequence to an encoded string"
state = compress(json.dumps(state))
l = len(state)
if l > 57:
states = []
for i in range(0, l, 57):
states.append(b2a_base64(state[i:i + 57])[:-1])
state = b''.join(states)
else:
state = b2a_base64(state)[:-1]
l = state.find(b'=')
if l >= 0:
state = state[:l]
state = state.translate(tplus)
return state
def encode_str(state):
"Convert a sequence to an encoded string"
l=len(state)
if l > 57:
states=[]
for i in range(0,l,57):
states.append(b2a_base64(state[i:i+57])[:-1])
state=''.join(states)
else: state=b2a_base64(state)[:-1]
l=state.find('=')
if l >= 0: state=state[:l]
state = state.translate(tplus)
return state
def decode_seq(state):
"Convert an encoded string to a sequence"
state = state.translate(tminus)
l = len(state)
if l > 76:
states = []
j = 0
for i in range(l // 76):
k = j + 76
states.append(a2b_base64(state[j:k]))
j=k
if j < l:
state=state[j:]
l=len(state)
k=l%4
if k: state=state+'='*(4-k)
states.append(a2b_base64(state))
state=''.join(states)
else:
l=len(state)
k=l%4
if k: state=state+'='*(4-k)
state=a2b_base64(state)
state = decompress(state)
try:
return json.loads(state)
except Exception:
return []
def compress(input):
return zlib.compress(input.encode('utf-8'))
def decompress(input):
return zlib.decompress(input).decode('utf-8')
def tpStateLevel(state, level=0):
for sub in state:
if len(sub)==2: level = max(level, 1+tpStateLevel(sub[1]))
else: level=max(level,1)
return level
def tpValuesIds(self, get_items, args,
try_call_attr=try_call_attr,
):
# get_item(node) is a function that returns the subitems of node
# This should build the ids of subitems which are
# expandable (non-empty). Leaves should never be
# in the state - it will screw the colspan counting.
r=[]
idattr=args['id']
try:
try: items=get_items(self)
except AttributeError: items=()
for item in items:
try:
if get_items(item):
if hasattr(item, idattr):
id = try_call_attr(item, idattr)
elif hasattr(item, '_p_oid'): id=oid(item)
else: id=pyid(item)
e=tpValuesIds(item, get_items, args)
if e: id=[id,e]
else: id=[id]
r.append(id)
except: pass
except: pass
return r
def oid(self):
return b2a_base64(str(self._p_oid))[:-1]
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
2c8f22898642d623f2dc9b9b5efdc099726242f5 | 3122ac39f1ce0a882b48293a77195476299c2a3b | /clients/python/generated/swaggyjenkins/models/github_respository_container.py | 48dee5604c6f39d2d948716e12bb4e6c2e073ec9 | [
"MIT"
] | permissive | miao1007/swaggy-jenkins | 4e6fe28470eda2428cbc584dcd365a21caa606ef | af79438c120dd47702b50d51c42548b4db7fd109 | refs/heads/master | 2020-08-30T16:50:27.474383 | 2019-04-10T13:47:17 | 2019-04-10T13:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,610 | py | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class GithubRespositoryContainer(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'links': 'GithubRespositoryContainerlinks',
'repositories': 'GithubRepositories'
}
attribute_map = {
'_class': '_class',
'links': '_links',
'repositories': 'repositories'
}
def __init__(self, _class=None, links=None, repositories=None): # noqa: E501
"""GithubRespositoryContainer - a model defined in OpenAPI""" # noqa: E501
self.__class = None
self._links = None
self._repositories = None
self.discriminator = None
if _class is not None:
self._class = _class
if links is not None:
self.links = links
if repositories is not None:
self.repositories = repositories
@property
def _class(self):
"""Gets the _class of this GithubRespositoryContainer. # noqa: E501
:return: The _class of this GithubRespositoryContainer. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this GithubRespositoryContainer.
:param _class: The _class of this GithubRespositoryContainer. # noqa: E501
:type: str
"""
self.__class = _class
@property
def links(self):
"""Gets the links of this GithubRespositoryContainer. # noqa: E501
:return: The links of this GithubRespositoryContainer. # noqa: E501
:rtype: GithubRespositoryContainerlinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this GithubRespositoryContainer.
:param links: The links of this GithubRespositoryContainer. # noqa: E501
:type: GithubRespositoryContainerlinks
"""
self._links = links
@property
def repositories(self):
"""Gets the repositories of this GithubRespositoryContainer. # noqa: E501
:return: The repositories of this GithubRespositoryContainer. # noqa: E501
:rtype: GithubRepositories
"""
return self._repositories
@repositories.setter
def repositories(self, repositories):
"""Sets the repositories of this GithubRespositoryContainer.
:param repositories: The repositories of this GithubRespositoryContainer. # noqa: E501
:type: GithubRepositories
"""
self._repositories = repositories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GithubRespositoryContainer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
caefdb750f7451f754136362f793e60d708369c7 | 85a27847c7bab4dd49c48ea19d16ad4c9de963f8 | /examples/2d/hubbard_honeycomb/main.py | 452ad469b3225d4987ab0a5b1cbe46ef4bc1b894 | [] | no_license | zx-sdu/pygra | edcf3a2648f8413cff5ed753120d414999e3cc76 | bd7f930ccac414892d18dc240b88ee95def1e50b | refs/heads/master | 2020-09-27T19:55:34.719955 | 2018-06-30T09:17:04 | 2018-06-30T09:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | # zigzag ribbon
import sys
import os
sys.path.append("../../../pygra") # add pygra library
import numpy as np
import geometry
import scftypes
import operators
from scipy.sparse import csc_matrix
g = geometry.honeycomb_lattice()
g.write()
Us = np.linspace(0.,4.,10) # different Us
#Us = [2.,4.]
#Us = [2.]
f = open("EVOLUTION.OUT","w") # file with the results
for U in Us: # loop over Us
# import scftypes
h = g.get_hamiltonian() # create hamiltonian of the system
h = h.get_multicell()
h.shift_fermi(0.0)
mf = scftypes.guess(h,mode="antiferro")
scf = scftypes.selfconsistency(h,nkp=10,filling=0.5,g=U,
mix=0.9,mf=mf,mode="U")
h = scf.hamiltonian # get the Hamiltonian
# h.get_bands() # calculate band structure
# import groundstate
f.write(str(U)+" "+str(scf.gap)+"\n")
# groundstate.swave(h)
#groundstate.hopping(h)
f.close()
| [
"jose.luis.lado@gmail.com"
] | jose.luis.lado@gmail.com |
e032f064b6c222a89f88751f05f5b40f17fc697f | bd55c7d73a95caed5f47b0031264ec05fd6ff60a | /apps/core/migrations/0060_auto_20180714_2320.py | a8550d55f1c2ad5b649d3cc9977d3d155fe3b2b4 | [] | no_license | phonehtetpaing/ebdjango | 3c8610e2d96318aff3b1db89480b2f298ad91b57 | 1b77d7662ec2bce9a6377690082a656c8e46608c | refs/heads/main | 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | # Generated by Django 2.0.5 on 2018-07-14 14:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0059_merge_20180713_2203'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='oem_service_name',
field=models.CharField(max_length=256, null=True, verbose_name='OEM Service Name'),
),
migrations.AlterField(
model_name='automessagecontroller',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagecontroller_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagetrigger',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagetrigger_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
]
| [
"phonehtetpaing1221@gmail.com"
] | phonehtetpaing1221@gmail.com |
653405bc92fc15389f4fab309c31b070d3b23323 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_24/models/resource_space_get_response.py | 309ad35ae95011fe8cf2eadb3a0de09b694fed58 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,065 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class ResourceSpaceGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ResourceSpace]',
'total': 'list[ResourceSpace]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ResourceSpace]
total=None, # type: List[models.ResourceSpace]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ResourceSpace]): Returns a list of all items after filtering. The values are displayed for each name, if applicable. If `total_only=true`, the `items` list will be empty.
total (list[ResourceSpace]): The aggregate value of all items after filtering. If applicable, the average value is displayed instead. If applicable, the values are displayed for each field.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourceSpaceGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceSpaceGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
05f44b6902d671803609ac1d4c1dfd0e4525986d | 4cd35b22c2bbe3658c5582132a07a18764db47d5 | /tests/test_github.py | d53ae2bb02b2b22526738c1dbcb0d99cae28f729 | [] | no_license | crouchred/img2url | ec7e9274ba2f5b15021965b3494ce96da3b80f79 | d317d27d8927624f0fdcf73ce6c5c16d3cfd7d21 | refs/heads/master | 2020-04-10T03:06:22.931641 | 2018-12-07T03:17:41 | 2018-12-07T03:17:41 | 160,760,849 | 0 | 0 | null | 2018-12-07T02:39:22 | 2018-12-07T02:39:22 | null | UTF-8 | Python | false | false | 2,391 | py | # -*- coding: utf-8 -*-
from __future__ import (
division, absolute_import, print_function, unicode_literals,
)
from builtins import * # noqa
from future.builtins.disabled import * # noqa
from tempfile import NamedTemporaryFile
import random
import string
import base64
import pytest
from img2url.config import load_config
from img2url.remotes.github import (
GitHubConfig, GitHubOperation,
)
def random_str(n):
return ''.join(
random.SystemRandom().choice(string.ascii_uppercase)
for _ in range(n)
)
def tmpfile(content, _disable_gc=[]):
f = NamedTemporaryFile(prefix='tmp-img2url-' + random_str(10))
_disable_gc.append(f)
with open(f.name, 'w', encoding='utf-8') as _f:
_f.write(content)
return f.name
def token():
_b64token = 'OTBkZGE1MGQyZjBjNTViMGFhYzIwMzE1YmEwYjU2ZmZhMGEyMWY4Mw=='
t = base64.b64decode(_b64token)
return t.decode('ascii')
CONFIG_PATH = tmpfile('''
github_token: {0}
github_user: img2url-testing
github_repo: img2url-testing-travisci
'''.format(token()))
def test_config():
GitHubConfig(load_config(CONFIG_PATH))
bad_path = tmpfile('''
github_user: img2url-testing
github_repo: img2url-testing-travisci
''')
with pytest.raises(RuntimeError):
GitHubConfig(load_config(bad_path))
def test_create_and_update():
path = tmpfile(random_str(10))
config = GitHubConfig(load_config(CONFIG_PATH))
operator = GitHubOperation(config, path)
assert operator.create_file()
assert operator.update_file(old_fhash=operator.fhash)
def test_branch():
CONFIG_PATH_WITH_BRANCH = tmpfile('''
github_token: {0}
github_user: img2url-testing
github_repo: img2url-testing-travisci
github_branch: branch-test
'''.format(token()))
path = tmpfile(random_str(10))
config = GitHubConfig(load_config(CONFIG_PATH_WITH_BRANCH))
operator = GitHubOperation(config, path)
assert operator.create_file()
def test_path():
CONFIG_PATH_WITH_PATH = tmpfile('''
github_token: {0}
github_user: img2url-testing
github_repo: img2url-testing-travisci
github_path: this-is/random-nested-path-{1}/
'''.format(token(), random_str(10)))
path = tmpfile(random_str(10))
config = GitHubConfig(load_config(CONFIG_PATH_WITH_PATH))
operator = GitHubOperation(config, path)
# list an non-existed dir.
assert operator.list_remote()
| [
"programmer.zhx@gmail.com"
] | programmer.zhx@gmail.com |
00c18797659e9dd2846a468f4a91c204477c5b34 | 5ae3bc1920fafc33693cdfa3928a48158aa6f725 | /563/563.py | 7a36bafd4463155409d5f74749b8c25c86689ec7 | [] | no_license | sjzyjc/leetcode | 2d0764aec6681d567bffd8ff9a8cc482c44336c2 | 5e09a5d36ac55d782628a888ad57d48e234b61ac | refs/heads/master | 2021-04-03T08:26:38.232218 | 2019-08-15T21:54:59 | 2019-08-15T21:54:59 | 124,685,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
return self.computeTilt(root)[1]
def computeTilt(self, node):
if node is None:
return 0, 0
left_sum, left_tilt = self.computeTilt(node.left)
right_sum, right_tilt = self.computeTilt(node.right)
return left_sum + right_sum + node.val, abs(left_sum - right_sum) + left_tilt + right_tilt
| [
"jcyang@MacBook-Air.local"
] | jcyang@MacBook-Air.local |
619e9c696c4a361adf9c8907d8eac77d710902ea | b41f1ea0b23e90115a3cf983508fe75f2023d6ef | /Tally/opencount/opencount/specify_voting_targets/select_targets.py | 691a830cb17796d77e93edd816aade1f964ddcea | [] | no_license | FreeAndFair/evoting-systems | 3250a653d13016e8aeacbdc8fab6d8536a3f0ea3 | 84988fa53813b2ed846930e90ae3f953d01cf030 | refs/heads/master | 2018-10-28T20:11:11.574619 | 2018-08-22T21:00:16 | 2018-08-22T21:00:16 | 8,267,026 | 9 | 5 | null | null | null | null | UTF-8 | Python | false | false | 141,752 | py | import sys, os, time, math, pdb, traceback, threading, Queue, copy, textwrap
import multiprocessing, csv
try:
import cPickle as pickle
except ImportError:
import pickle
from os.path import join as pathjoin
sys.path.append('..')
import wx, cv, numpy as np, Image, scipy, scipy.misc
from wx.lib.pubsub import Publisher
from wx.lib.scrolledpanel import ScrolledPanel
from panel_opencount import OpenCountPanel
import util_gui, util, graphcolour, config
import grouping.tempmatch as tempmatch
import labelcontest.group_contests as group_contests
import pixel_reg.shared as shared
import pixel_reg.imagesAlign as imagesAlign
import global_align.global_align as global_align
JOBID_TEMPMATCH_TARGETS = util.GaugeID("TemplateMatchTargets")
class SelectTargetsMainPanel(OpenCountPanel):
GLOBALALIGN_JOBID = util.GaugeID("GlobalAlignJobId")
def __init__(self, parent, *args, **kwargs):
OpenCountPanel.__init__(self, parent, *args, **kwargs)
self.proj = None
_sanitycheck_callbacks = ((ID_FLAG_ONLYONE_TARGET,
self._sanitycheck_handle_onlyone_target),
(ID_FLAG_LONELY_TARGETS,
self._sanitycheck_handle_lonelytargets),
(ID_FLAG_CONTEST_ONE_TARGET,
self._sanitycheck_handle_contest_one_target),
(ID_FLAG_EMPTY_CONTESTS,
self._sanitycheck_handle_empty_contest))
for (id_flag, fn) in _sanitycheck_callbacks:
self.add_sanity_check_callback(id_flag, fn)
self.init_ui()
def init_ui(self):
self.seltargets_panel = SelectTargetsPanel(self)
btn_getimgpath = wx.Button(self, label="Get Image Path...")
btn_getimgpath.Bind(wx.EVT_BUTTON, self.onButton_getimgpath)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.seltargets_panel, proportion=1, flag=wx.EXPAND)
self.sizer.Add(btn_getimgpath)
self.SetSizer(self.sizer)
self.Layout()
def start(self, proj, stateP, ocrtmpdir):
self.proj = proj
self.stateP = stateP
# Maps group to reference image. Used if a different image
# is needed to be used for alignment purposes.
self.group_to_Iref = {}
# GROUP2BALLOT: dict {int groupID: [int ballotID_i, ...]}
group2ballot = pickle.load(open(pathjoin(proj.projdir_path,
proj.group_to_ballots), 'rb'))
group_exmpls = pickle.load(open(pathjoin(proj.projdir_path,
proj.group_exmpls), 'rb'))
b2imgs = pickle.load(open(proj.ballot_to_images, 'rb'))
img2page = pickle.load(open(pathjoin(proj.projdir_path,
proj.image_to_page), 'rb'))
self.img2flip = pickle.load(open(pathjoin(proj.projdir_path,
proj.image_to_flip), 'rb'))
# 0.) Munge GROUP2BALLOT to list of lists of lists
groups = []
numtasks = 0
for groupID, ballotids in sorted(group_exmpls.iteritems(), key=lambda t: t[0]):
group = []
for ballotid in ballotids:
if len(group) >= 5:
break
imgpaths = b2imgs[ballotid]
imgpaths_ordered = sorted(imgpaths, key=lambda imP: img2page[imP])
group.append(imgpaths_ordered)
numtasks += 1
groups.append(group)
self.displayed_imgpaths = groups
self.proj.addCloseEvent(self.save_session)
self.proj.addCloseEvent(self.seltargets_panel.save_session)
align_outdir = pathjoin(proj.projdir_path, 'groupsAlign_seltargs')
class GlobalAlignThread(threading.Thread):
def __init__(self, groups, img2flip, align_outdir, ocrtmpdir,
manager, queue, callback, jobid, tlisten, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.groups = groups
self.img2flip = img2flip
self.align_outdir = align_outdir
self.ocrtmpdir = ocrtmpdir
self.manager = manager
self.queue = queue
self.callback = callback
self.jobid = jobid
self.tlisten = tlisten
def run(self):
print '...Globally-aligning a subset of each partition...'
t = time.time()
groups_align_map = do_align_partitions(self.groups, self.img2flip,
self.align_outdir, self.manager, self.queue,
N=None)
dur = time.time() - t
print '...Finished globally-aligning a subset of each partition ({0:.4f} s)'.format(dur)
wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.done", (self.jobid,))
wx.CallAfter(self.callback, groups_align_map, self.ocrtmpdir)
self.tlisten.stop()
class ListenThread(threading.Thread):
def __init__(self, queue, jobid, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.queue = queue
self.jobid = jobid
self._stop = threading.Event()
def stop(self):
self._stop.set()
def is_stopped(self):
return self._stop.isSet()
def run(self):
while True:
if self.is_stopped():
return
try:
val = self.queue.get(block=True, timeout=1)
if val == True:
wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.tick", (self.jobid,))
except Queue.Empty:
pass
#if not os.path.exists(align_outdir):
if not self.restore_session():
manager = multiprocessing.Manager()
queue = manager.Queue()
if config.TIMER:
config.TIMER.start_task("SelectTargets_GlobalAlign_CPU")
tlisten = ListenThread(queue, self.GLOBALALIGN_JOBID)
workthread = GlobalAlignThread(groups, self.img2flip, align_outdir, ocrtmpdir,
manager, queue, self.on_align_done,
self.GLOBALALIGN_JOBID, tlisten)
workthread.start()
tlisten.start()
gauge = util.MyGauge(self, 1, thread=workthread, msg="Running Global Alignment...",
job_id=self.GLOBALALIGN_JOBID)
gauge.Show()
wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.nextjob", (numtasks, self.GLOBALALIGN_JOBID))
else:
# SelectTargets restores its self.partitions from stateP.
seltargets_stateP = pathjoin(self.proj.projdir_path, '_state_selecttargets.p')
self.seltargets_panel.start(None, self.img2flip, seltargets_stateP, ocrtmpdir)
def on_align_done(self, groups_align_map, ocrtmpdir):
if config.TIMER:
config.TIMER.stop_task("SelectTargets_GlobalAlign_CPU")
groups_align = []
for groupid in sorted(groups_align_map.keys()):
ballots = groups_align_map[groupid]
groups_align.append(ballots)
# Order the displayed groups by size (smallest to largest)
groups_sizes = map(lambda g: -len(g), groups_align)
groups_sizes_argsort = np.argsort(groups_sizes)
groups_align_bysize = [groups_align[i] for i in groups_sizes_argsort]
self.i2groupid = groups_sizes_argsort
seltargets_stateP = pathjoin(self.proj.projdir_path, '_state_selecttargets.p')
self.seltargets_panel.start(groups_align_bysize, self.img2flip, seltargets_stateP, ocrtmpdir)
def stop(self):
self.proj.removeCloseEvent(self.save_session)
self.proj.removeCloseEvent(self.seltargets_panel.save_session)
self.save_session()
self.seltargets_panel.save_session()
self.export_results()
def restore_session(self):
try:
state = pickle.load(open(self.stateP, 'rb'))
i2groupid = state['i2groupid']
displayed_imgpaths = state['displayed_imgpaths']
#self.group_to_Iref = state['group_to_Iref']
self.i2groupid = i2groupid
self.displayed_imgpaths = displayed_imgpaths
except:
return False
return True
def save_session(self):
state = {'i2groupid': self.i2groupid,
'displayed_imgpaths': self.displayed_imgpaths,
#'group_to_Iref': self.group_to_Iref
}
pickle.dump(state, open(self.stateP, 'wb'), pickle.HIGHEST_PROTOCOL)
def export_results(self):
""" For each group, export the locations of the voting
targets to two locations:
1.) A proj.target_locs pickle'd data structure
2.) A dir of .csv files (for integration with LabelContests+
InferContests).
3.) A file 'target_roi' that SetThreshold/TargetExtraction will
use when determining the target sort criterion.
4.) A set of quarantined ballotids ('flagged' partitions)
"""
try:
os.makedirs(self.proj.target_locs_dir)
except:
pass
pickle.dump(self.group_to_Iref, open(pathjoin(self.proj.projdir_path,'group_to_Iref.p'),'wb', pickle.HIGHEST_PROTOCOL))
# Output the flagged groups at a ballotid-granularity
groups_quar = sorted([self.i2groupid[idx] for idx in self.seltargets_panel.flagged_idxs])
grp2bals = pickle.load(open(pathjoin(self.proj.projdir_path, 'group_to_ballots.p')))
balids_quar = set()
for grp_bad in groups_quar:
balids = grp2bals[grp_bad]
for balid in balids:
balids_quar.add(balid)
print "(SelectTargets) Quarantining {0} flagged groups ({1} ballots total)".format(len(groups_quar), len(balids_quar))
pickle.dump(balids_quar, open(pathjoin(self.proj.projdir_path, 'quarantinedbals_seltargets.p'), 'wb'))
# Also, temporarily export the quarantined groups.
print "(SelectTargets) Exporting 'quarantinedgroups_seltarets.p' as well, just in case."
pickle.dump(groups_quar, open(pathjoin(self.proj.projdir_path, 'quarantinedgroups_seltargets.p'), 'wb'))
del grp2bals
group_targets_map = {} # maps {int groupID: [csvpath_side0, ...]}
# TARGET_LOCS_MAP: maps {int groupID: {int page: [CONTEST_i, ...]}}, where each
# CONTEST_i is: [contestbox, targetbox_i, ...], where each
# box := [x1, y1, width, height, id, contest_id]
target_locs_map = {}
lonely_targets_map = {} # maps {int i: {int side: [TargetBox_i, ...]}}
fields = ('imgpath', 'id', 'x', 'y', 'width', 'height', 'label', 'is_contest', 'contest_id')
imgsize = None # Assumes all voted ballots are the same dimensions
for i, boxes_sides in self.seltargets_panel.boxes.iteritems():
if i in self.seltargets_panel.flagged_idxs:
continue
group_idx = self.i2groupid[i]
csvpaths = []
for side, boxes in enumerate(boxes_sides):
outpath = pathjoin(self.proj.target_locs_dir,
"group_{0}_side_{1}.csv".format(group_idx, side))
csvpaths.append(outpath)
writer = csv.DictWriter(open(outpath, 'wb'), fields)
# Make sure that TARGET_LOCS_MAP at least has something for this
# (To help out target extraction)
target_locs_map.setdefault(group_idx, {}).setdefault(side, [])
# BOX_ASSOCS: dict {int contest_id: [ContestBox, [TargetBox_i, ...]]}
# LONELY_TARGETS: list [TargetBox_i, ...]
box_assocs, lonely_targets = compute_box_ids(boxes)
lonely_targets_map.setdefault(i, {}).setdefault(side, []).extend(lonely_targets)
# For now, just grab one exemplar image from this group
imgpath = self.seltargets_panel.partitions[i][0][side]
if imgsize == None:
imgsize = cv.GetSize(cv.LoadImage(imgpath))
rows_contests = []
rows_targets = []
id_c, id_t = 0, 0
for contest_id, (contestbox, targetboxes) in box_assocs.iteritems():
x1_out, y1_out = contestbox.x1, contestbox.y1
w_out, h_out = contestbox.width, contestbox.height
# Make sure contest doesn't extend outside image.
x1_out = max(x1_out, 0)
y1_out = max(y1_out, 0)
if (x1_out + w_out) >= imgsize[0]:
w_out = imgsize[0] - x1_out - 1
if (y1_out + h_out) >= imgsize[1]:
h_out = imgsize[1] - y1_out - 1
rowC = {'imgpath': imgpath, 'id': id_c,
'x': x1_out, 'y': y1_out,
'width': w_out,
'height': h_out,
'label': '', 'is_contest': 1,
'contest_id': contest_id}
rows_contests.append(rowC)
cbox = [x1_out, y1_out, w_out, h_out, id_c, contest_id]
curcontest = [] # list [contestbox, targetbox_i, ...]
curcontest.append(cbox)
id_c += 1
for box in targetboxes:
# Note: Ensure that all exported targets have the same dimensions,
# or risk breaking SetThreshold!
w, h = self.seltargets_panel.boxsize
x1_out, y1_out = box.x1, box.y1
# Don't let target extend outside the image
if (x1_out + w) >= imgsize[0]:
x1_out -= ((x1_out + w) - imgsize[0] + 1)
if (y1_out + h) >= imgsize[1]:
y1_out -= ((y1_out + h) - imgsize[1] + 1)
x1_out = max(x1_out, 0)
y1_out = max(y1_out, 0)
# Note: This doesn't necessarily guarantee that T
# is inside img bbox - however, since targets are
# small w.r.t image, this will always work.
rowT = {'imgpath': imgpath, 'id': id_t,
'x': x1_out, 'y': y1_out,
'width': w, 'height': h,
'label': '', 'is_contest': 0,
'contest_id': contest_id}
rows_targets.append(rowT)
tbox = [x1_out, y1_out, w, h, id_t, contest_id]
curcontest.append(tbox)
id_t += 1
target_locs_map.setdefault(group_idx, {}).setdefault(side, []).append(curcontest)
writer.writerows(rows_contests + rows_targets)
group_targets_map[group_idx] = csvpaths
pickle.dump(group_targets_map, open(pathjoin(self.proj.projdir_path,
self.proj.group_targets_map), 'wb'),
pickle.HIGHEST_PROTOCOL)
pickle.dump(target_locs_map, open(pathjoin(self.proj.projdir_path,
self.proj.target_locs_map), 'wb'),
pickle.HIGHEST_PROTOCOL)
# target_roi := (int x1, y1, x2, y2). If self.target_roi is None, then
# the string None will be written to the output file.
# Otherwise, four comma-delimited ints will be written, like:
# 40,40,100,100
f_target_roi = open(pathjoin(self.proj.projdir_path, 'target_roi'), 'w')
if self.seltargets_panel.target_roi:
outstr = "{0},{1},{2},{3}".format(*self.seltargets_panel.target_roi)
else:
outstr = "None"
print "(SelectTargets) Wrote '{0}' to: {1}".format(outstr, pathjoin(self.proj.projdir_path, 'target_roi'))
print >>f_target_roi, outstr
f_target_roi.close()
# Warn User about lonely targets.
# Note: This is currently handled in the UI during sanitychecks,
# but no harm in leaving this here too.
_lst = []
cnt = 0
for i, targs_sidesMap in lonely_targets_map.iteritems():
for side, targets in targs_sidesMap.iteritems():
if targets:
print "...On Partition {0}, side {1}, there were {2} \
Lonely Targets - please check them out, or else they'll get ignored by \
LabelContests.".format(i, side, len(targets))
_lst.append("Partition={0} Side={1}".format(i, side))
cnt += len(targets)
if _lst:
dlg = wx.MessageDialog(self, message="Warning - there were {0} \
targets that were not enclosed in a contest. Please check them out, otherwise \
they'll get ignored by LabelContests. They are: {1}".format(cnt, str(_lst)),
style=wx.OK)
dlg.ShowModal()
def invoke_sanity_checks(self, *args, **kwargs):
""" Code that actually calls each sanity-check with application
specific arguments. Outputs a list of statuses.
"""
lst_statuses = check_all_images_have_targets(self.seltargets_panel.boxes, self.seltargets_panel.flagged_idxs)
#lst_statuses = check_all_images_have_targets(self.seltargets_panel.boxes, set())
return lst_statuses
def onButton_getimgpath(self, evt):
S = self.seltargets_panel
cur_groupid = self.i2groupid[S.cur_i]
imgpath = self.displayed_imgpaths[cur_groupid][S.cur_j][S.cur_page]
dlg = wx.MessageDialog(self, message="Displayed Imagepath: {0}".format(imgpath),
style=wx.OK)
dlg.ShowModal()
"""
===============================================
==== SanityCheck callback handling methods ====
===============================================
"""
def rails_show_images(self, grps, grps_data=None,
btn_labels=("Stop Here", "Next Image"),
btn_fns=(lambda i, grps, grps_data: False,
lambda i, grps, grps_data: 'next'),
retval_end=False,
title_fn=lambda i, grps, grps_data: "Generic Dialog Title",
msg_fn=lambda i, grps, grps_data: "Generic Description"):
""" A simple framework that takes control of the UI, and directs
the user to the images indicated by BTN_LABELS. In other words,
this is a 'rail-guided' UI flow.
Input:
list GRPS: [(int grp_idx, int side), ...]
dict GRPS_DATA: {(int grp_idx, int side): obj DATA}
(Optional) Allows you to pass additional data associated
with each (grp_idx, side) to relevant btn_fns or dialog
message fns.
tuple BTN_LABELS: (str LABEL_0, ..., str LABEL_N)
At each image in GRPS, a dialog window with N buttons
will be displayed to the user. You can control what each
button says via this argument.
tuple BTN_FNS: (func FN_0, ..., func FN_N)
The function to invoke when the user clicks button N
after image i is shown. Each function accepts to
arguments: int i - the index we are at in GRPS
lst grps - the input GRPS.
dict grps_data - data for each group GRP.
It should return one of three outputs:
bool True:
Stop the rail-guided tour, and signal that the
user /can/ move on
bool False:
Stop the rail-guided tour, and signal that the
user /can't/ move on
tuple 'next':
Display the next image.
bool retval_end:
Value to return if the user reaches the end of the list
of images. True if the user can move on, False o.w.
func TITLE_FN:
func MSG_FN:
"""
grps_data = grps_data if grps_data != None else {}
panel = self.seltargets_panel
btn_ids = range(len(btn_labels))
for i, (grp_idx, side) in enumerate(grps):
panel.txt_totalballots.SetLabel(str(len(panel.partitions[grp_idx])))
panel.txt_totalpages.SetLabel(str(len(panel.partitions[grp_idx][0])))
panel.display_image(grp_idx, 0, side)
dlg_title = title_fn(i, grps, grps_data)
dlg_msg = msg_fn(i, grps, grps_data)
status = util.WarningDialog(self, dlg_msg,
btn_labels,
btn_ids,
title=dlg_title).ShowModal()
btn_fn = btn_fns[status]
result = btn_fn(i, grps, grps_data)
if result == True:
return True
elif result == False:
return False
return retval_end
def _sanitycheck_handle_onlyone_target(self, grps):
""" Guides the user to each image that only has one voting target.
Input:
list GRPS: [(int grp_idx, int side), ...]
Output:
True if the user can move on, False o.w.
"""
fn_stop = lambda i, grps, grps_data: False
fn_nextimg = lambda i, grps, grps_data: 'next'
fn_skip = lambda i, grps, grps_data: True
btn_labels = ("I can fix this.", "Next Image", "Everything is Fine")
btn_fns = (fn_stop, fn_nextimg, fn_skip)
dlg_titlefn = lambda i,grps,grps_data: "OpenCount Warning: Only one voting target"
dlg_msgfn = lambda i,grps, grps_data: "This is an image with only \
one voting target.\n\
Image {0} out of {1}".format(i+1, len(grps))
return self.rails_show_images(grps, btn_labels=btn_labels,
btn_fns=btn_fns,
retval_end=True,
title_fn=dlg_titlefn,
msg_fn=dlg_msgfn)
def _sanitycheck_handle_lonelytargets(self, lonely_targets_map):
""" Guide the user to each image that has a voting target(s) not
enclosed within a contest.
Input:
dict LONELY_TARGETS_MAP: {(int grp_idx, int side): [TargetBox_i, ...]}
Output:
False (this is a fatal error.)
"""
def make_dlg_msg(i, grps, grps_data):
msgbase = "This is an image with {0} voting targets that are \
not enclosed within a contest."
stats = "Image {0} out of {1}."
return msgbase.format(len(grps_data[grps[i]])) + "\n" + stats.format(i+1, total_imgs)
total_imgs = len(lonely_targets_map)
total_targets = sum(map(len, lonely_targets_map.values()))
# Sort GRPS by first grpidx, then by side
grps = sorted(lonely_targets_map.keys(), key=lambda t: t[0]+t[1])
return self.rails_show_images(grps, grps_data=lonely_targets_map,
retval_end=False,
btn_labels=("I can fix this.", "Next Image"),
btn_fns=(lambda i,grps,grpdata: False,
lambda i,grps,grpdata: 'next'),
title_fn=lambda i,grps,grpdata: "OpenCount Fatal Warning: Lonely Targets",
msg_fn=make_dlg_msg)
def _sanitycheck_handle_contest_one_target(self, contests_one_target):
""" Guides the user to each offending image.
Input:
dict CONTESTS_ONE_TARGET: {(int grp_idx, int side): [ContestBox_i, ...]}
Output:
False (this is a fatal error).
"""
# TODO: Refactor to use self.rails_show_images
panel = self.seltargets_panel
total_imgs = len(contests_one_target)
total_contests = sum(map(len, contests_one_target.values()))
_title = "OpenCount Fatal Warning: Bad Contests Detected"
_msgbase = "This is an image with {0} contests that only have \
one voting target."
_msgfooter = "Image {0} out of {1}"
ID_RESUME = 0
ID_NEXT_IMAGE = 1
for i, ((grp_idx, side), contestboxes) in enumerate(contests_one_target.iteritems()):
panel.txt_totalballots.SetLabel(str(len(panel.partitions[grp_idx])))
panel.txt_totalpages.SetLabel(str(len(panel.partitions[grp_idx][0])))
panel.display_image(grp_idx, 0, side)
msgout = _msgbase.format(len(contestboxes)) + "\n" + _msgfooter.format(i+1, total_imgs)
status = util.WarningDialog(self, msgout, ("I can fix this.", "Next Image"),
(ID_RESUME, ID_NEXT_IMAGE), title=_title).ShowModal()
if status == ID_RESUME:
return False
return False
def _sanitycheck_handle_empty_contest(self, empty_contests):
""" Guides the user to each empty contest box.
Input:
dict EMPTY_CONTESTS: {(int grp_idx, int side): [ContestBox_i, ...]
Output:
False (this is a fatal error).
"""
# TODO: Refactor to use self.rails_show_images
panel = self.seltargets_panel
total_imgs = len(empty_contests)
_title = "OpenCount Fatal Warning: Empty Contests Detected"
_msgbase = "This is an image with {0} contests that have no \
voting targets enclosed."
_msgfooter = "Image {0} out of {1}"
ID_RESUME = 0
ID_NEXT_IMAGE = 1
ID_SKIPALL = 2
for i, ((grp_idx, side), contestboxes) in enumerate(empty_contests.iteritems()):
panel.txt_totalballots.SetLabel(str(len(panel.partitions[grp_idx])))
panel.txt_totalpages.SetLabel(str(len(panel.partitions[grp_idx][0])))
panel.display_image(grp_idx, 0, side)
msgout = _msgbase.format(len(contestboxes)) + "\n" + _msgfooter.format(i+1, total_imgs)
status = util.WarningDialog(self, msgout, ("I can fix this.", "Next Image", "- Ignore the empty contests -"),
(ID_RESUME, ID_NEXT_IMAGE, ID_SKIPALL), title=_title).ShowModal()
if status == ID_RESUME:
return False
elif status == ID_SKIPALL:
return True
# TODO: Temp. make this a non-fatal error
return True
class SelectTargetsPanel(ScrolledPanel):
""" A widget that allows you to find voting targets on N ballot
partitions
"""
# TM_MODE_ALL: Run template matching on all images
TM_MODE_ALL = 901
# TM_MODE_POST: Run template matching only on images after (post) the
# currently-displayed group.
TM_MODE_POST = 902
def __init__(self, parent, *args, **kwargs):
ScrolledPanel.__init__(self, parent, *args, **kwargs)
self.parent = parent
# self.partitions: [[[imgpath_i0front, ...], ...], [[imgpath_i1front, ...], ...], ...]
self.partitions = None
# self.inv_map: {str imgpath: (int i, int j, int page)}
self.inv_map = None
# self.cur_i: Index of currently-displayed partition
self.cur_i = None
# self.cur_j: Index of currently-displayed image within partition CUR_I.
self.cur_j = None
# self.cur_page: Currently-displayed page
self.cur_page = None
# self.boxes: {int i: [[Box_iFront, ...], ...]}
self.boxes = {}
# BOXSIZE: (int w, int h), used to enforce that all voting targets
# are the same size.
self.boxsize = None
# Sensitivity for Template Matching
self.tm_param = 0.93
# Window sizes for Smoothing
self.win_ballot = (13, 13)
self.win_target = (15, 15)
self.tm_mode = self.TM_MODE_POST
# STATEP: Path for state file.
self.stateP = None
# tuple TARGET_ROI: (int x1, y1, x2, y2). Is set when the user
# draws their first target box.
self.target_roi = None
# set FLAGGED_IDXS: set([int i0, int i1, ...])
# Stores any flagged 'partitions'.
self.flagged_idxs = None
self.toolbar = Toolbar(self)
self.imagepanel = TargetFindPanel(self, self.do_tempmatch)
txt = wx.StaticText(self, label="Draw a rectangle around each \
voting target on this ballot.")
btn_next = wx.Button(self, label="Next Image...")
btn_next.Bind(wx.EVT_BUTTON, self.onButton_nextimage)
btn_prev = wx.Button(self, label="Prev Image...")
btn_prev.Bind(wx.EVT_BUTTON, self.onButton_previmage)
sizer_btnmove = wx.BoxSizer(wx.VERTICAL)
sizer_btnmove.AddMany([(btn_next,), (btn_prev,)])
btn_nextpartition = wx.Button(self, label="Next Style...")
btn_prevpartition = wx.Button(self, label="Previous Style...")
sizer_partitionbtns = wx.BoxSizer(wx.VERTICAL)
sizer_partitionbtns.AddMany([(btn_nextpartition,), (btn_prevpartition,)])
btn_nextimg = wx.Button(self, label="Next Ballot")
btn_previmg = wx.Button(self, label="Previous Ballot")
sizer_ballotbtns = wx.BoxSizer(wx.VERTICAL)
sizer_ballotbtns.AddMany([(btn_nextimg,), (btn_previmg,)])
btn_nextpage = wx.Button(self, label="Next Page")
btn_prevpage = wx.Button(self, label="Previous Page")
sizer_pagebtns = wx.BoxSizer(wx.VERTICAL)
sizer_pagebtns.AddMany([(btn_nextpage,), (btn_prevpage,)])
btn_nextpartition.Bind(wx.EVT_BUTTON, self.onButton_nextpartition)
btn_prevpartition.Bind(wx.EVT_BUTTON, self.onButton_prevpartition)
btn_nextimg.Bind(wx.EVT_BUTTON, self.onButton_nextimg)
btn_previmg.Bind(wx.EVT_BUTTON, self.onButton_previmg)
btn_nextpage.Bind(wx.EVT_BUTTON, self.onButton_nextpage)
btn_prevpage.Bind(wx.EVT_BUTTON, self.onButton_prevpage)
sizer_jump_stylebal = wx.BoxSizer(wx.VERTICAL)
btn_jump_partition = wx.Button(self, label="Jump to Style...")
btn_jump_ballot = wx.Button(self, label="Jump to Ballot...")
sizer_jump_stylebal.AddMany([(btn_jump_partition,), (btn_jump_ballot,)])
btn_jump_error = wx.Button(self, label="Next Error...")
btn_selectIref = wx.Button(self, label="Select as reference image.")
btn_selectIref.Bind(wx.EVT_BUTTON, self.onButton_selectIref)
btn_flag_partition = wx.Button(self, label="Quarantine this Style")
btn_flag_partition.Bind(wx.EVT_BUTTON, self.onButton_flagpartition)
sizer_btn_jump = wx.BoxSizer(wx.HORIZONTAL)
#sizer_btn_jump.Add(btn_jump_partition, border=10, flag=wx.ALL)
#sizer_btn_jump.Add(btn_jump_ballot, border=10, flag=wx.ALL)
sizer_btn_jump.Add(btn_jump_error, border=10, flag=wx.ALL)
sizer_btn_jump.Add(btn_selectIref, border=10, flag=wx.ALL)
sizer_btn_jump.Add(btn_flag_partition, border=10, flag=wx.ALL)
btn_jump_partition.Bind(wx.EVT_BUTTON, self.onButton_jump_partition)
btn_jump_ballot.Bind(wx.EVT_BUTTON, self.onButton_jump_ballot)
btn_jump_error.Bind(wx.EVT_BUTTON, self.onButton_jump_error)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.Add(sizer_btnmove, border=10, flag=wx.ALL)
btn_sizer.Add(sizer_partitionbtns, border=10, flag=wx.ALL)
btn_sizer.Add((80,0))
btn_sizer.Add(sizer_ballotbtns, border=10, flag=wx.ALL)
btn_sizer.Add(sizer_pagebtns, border=10, flag=wx.ALL)
btn_sizer.Add(sizer_jump_stylebal, border=10, flag=wx.ALL)
btn_sizer.Add((60,0))
btn_sizer.Add(sizer_btn_jump, border=10, flag=wx.ALL)
txt1 = wx.StaticText(self, label="Style: ")
self.txt_curpartition = wx.StaticText(self, label="1")
txt_slash0 = wx.StaticText(self, label=" / ")
self.txt_totalpartitions = wx.StaticText(self, label="Foo")
txt2 = wx.StaticText(self, label="Ballot (subset of full): ")
self.txt_curballot = wx.StaticText(self, label="1")
txt_slash1 = wx.StaticText(self, label=" / ")
self.txt_totalballots = wx.StaticText(self, label="Bar")
txt3 = wx.StaticText(self, label="Page: ")
self.txt_curpage = wx.StaticText(self, label="1")
txt_slash2 = wx.StaticText(self, label=" / ")
self.txt_totalpages = wx.StaticText(self, label="Baz")
self.txt_curimgpath = wx.StaticText(self, label="")
self.txt_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.txt_sizer.AddMany([(txt1,),
(self.txt_curpartition,), (txt_slash0,), (self.txt_totalpartitions,),
(50,0), (txt2,),
(self.txt_curballot,), (txt_slash1,), (self.txt_totalballots,),
(50,0), (txt3,),
(self.txt_curpage,), (txt_slash2,), (self.txt_totalpages,),
(50,0), (self.txt_curimgpath)])
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(txt, flag=wx.ALIGN_CENTER)
self.sizer.Add(self.toolbar, flag=wx.EXPAND)
self.sizer.Add(self.imagepanel, proportion=1, flag=wx.EXPAND)
self.sizer.Add(btn_sizer, flag=wx.ALIGN_CENTER)
self.sizer.Add(self.txt_sizer)
self.SetSizer(self.sizer)
def start(self, partitions, img2flip, stateP, ocrtempdir):
"""
Input:
list PARTITIONS: A list of lists of lists, encoding partition+ballot+side(s):
[[[imgpath_i0_front, ...], ...], [[imgpath_i1_front, ...], ...], ...]
dict IMG2FLIP: maps {str imgpath: bool isflipped}
str STATEP: Path of the statefile.
str OCRTEMPDIR: Used for InferContestRegion.
"""
self.img2flip = img2flip
self.stateP = stateP
self.ocrtempdir = ocrtempdir
if not self.restore_session():
# 0.) Populate my self.INV_MAP
self.partitions = partitions
self.inv_map = {}
self.boxes = {}
self.boxsize = None
self.flagged_idxs = set()
for i, imgpaths in enumerate(self.partitions):
for j, ballot in enumerate(imgpaths):
for page, imgpath in enumerate(ballot):
self.inv_map[imgpath] = i, j, page
# (allows for variable-num pages)
self.boxes[i] = [[] for _ in xrange(len(ballot))]
# 1.) Update any StaticTexts in the UI.
self.txt_totalpartitions.SetLabel(str(len(self.partitions)))
self.txt_totalballots.SetLabel(str(len(self.partitions[0])))
self.txt_totalpages.SetLabel(str(len(self.partitions[0][0])))
self.txt_sizer.Layout()
self.display_image(0, 0, 0, autofit=True)
# 2.) Start in Target-Create mode.
self.imagepanel.set_mode_m(BoxDrawPanel.M_CREATE)
self.imagepanel.boxtype = TargetBox
def restore_session(self):
try:
state = pickle.load(open(self.stateP, 'rb'))
inv_map = state['inv_map']
boxes = state['boxes']
boxsize = state['boxsize']
partitions = state['partitions']
target_roi = state.get('target_roi', None) # Handle legacy statefiles
flagged_idxs = state.get('flagged_idxs', set()) # Legacy
self.inv_map = inv_map
self.boxes = boxes
self.boxsize = boxsize
self.partitions = partitions
self.target_roi = target_roi
self.flagged_idxs = flagged_idxs
except:
return False
return True
def save_session(self):
state = {'inv_map': self.inv_map,
'boxes': self.boxes,
'boxsize': self.boxsize,
'partitions': self.partitions,
'target_roi': self.target_roi,
'flagged_idxs': self.flagged_idxs}
pickle.dump(state, open(self.stateP, 'wb'), pickle.HIGHEST_PROTOCOL)
def do_tempmatch(self, box, img, patch=None):
""" Runs template matching on all images within the current
partition, using the BOX from IMG as the template.
Input:
Box BOX:
PIL IMG:
PIL PATCH:
If given, then this PATCH directly as the template image,
rather than using BOX to extract a patch from IMG. This
will skip the auto-cropping.
"""
self.Disable()
if config.TIMER:
config.TIMER.start_task("SelectTargets_TempMatch_CPU")
if patch == None:
# 1.) Do an autofit.
patch_prefit = img.crop((box.x1, box.y1, box.x2, box.y2))
patch = util_gui.fit_image(patch_prefit, padx=2, pady=2)
patch_cv = pil2iplimage(patch)
# 2.) Apply a smooth on PATCH (first adding a white border, to
# avoid the smooth darkening PATCH, but brightening IMG).
BRD = 20 # Amt. of white border
patchB = cv.CreateImage((patch_cv.width+BRD, patch_cv.height+BRD), patch_cv.depth, patch_cv.channels)
# Pass '0' as bordertype due to undocumented OpenCV flag IPL_BORDER_CONSTANT
# being 0. Wow!
cv.CopyMakeBorder(patch_cv, patchB, (BRD/2, BRD/2), 0, 255)
xwin, ywin = self.win_target
cv.Smooth(patchB, patchB, cv.CV_GAUSSIAN, param1=xwin, param2=ywin)
# 2.a.) Copy the smooth'd PATCHB back into PATCH
#patch_cv = cv.GetSubRect(patchB, (BRD/2, BRD/2, patch_cv.width, patch_cv.height))
cv.SetImageROI(patchB, (BRD/2, BRD/2, patch_cv.width, patch_cv.height))
patch = patchB
#patch = iplimage2pil(patchB)
#patch.save("_patch.png")
cv.SaveImage("_patch.png", patch)
# 3.) Run template matching across all images in self.IMGPATHS,
# using PATCH as the template.
if self.tm_mode == self.TM_MODE_ALL:
# Template match on /all/ images across all partitions, all pages
imgpaths = sum([t for t in sum(self.partitions, [])], [])
elif self.tm_mode == self.TM_MODE_POST:
# Template match only on images after this partition (including
# this partition)
imgpaths = sum([t for t in sum(self.partitions[self.cur_i:], [])], [])
imgpaths = imgpaths[self.cur_page:] # Don't run on prior pages
print "...Running template matching on {0} images...".format(len(imgpaths))
queue = Queue.Queue()
thread = TM_Thread(queue, JOBID_TEMPMATCH_TARGETS, patch, img,
imgpaths, self.tm_param, self.win_ballot, self.win_target,
self.on_tempmatch_done)
thread.start()
gauge = util.MyGauge(self, 1, job_id=JOBID_TEMPMATCH_TARGETS,
msg="Finding Voting Targets...")
gauge.Show()
num_tasks = len(imgpaths)
Publisher().sendMessage("signals.MyGauge.nextjob", (num_tasks, JOBID_TEMPMATCH_TARGETS))
def on_tempmatch_done(self, results, w, h):
""" Invoked after template matching computation is complete.
Input:
dict RESULTS: maps {str imgpath: [(x1,y1,x2,y2,score_i), ...}. The matches
that template matching discovered.
int w: width of the patch
int h: height of the patch
"""
def is_overlap(rect1, rect2):
def is_within_box(pt, box):
return box.x1 < pt[0] < box.x2 and box.y1 < pt[1] < box.y2
x1, y1, x2, y2 = rect1.x1, rect1.y1, rect1.x2, rect1.y2
w, h = abs(x2-x1), abs(y2-y1)
# Checks (in order): UL, UR, LR, LL corners
return (is_within_box((x1,y1), rect2) or
is_within_box((x1+w,y1), rect2) or
is_within_box((x1+w,y1+h), rect2) or
is_within_box((x1,y1+h), rect2))
def too_close(b1, b2):
w, h = abs(b1.x1-b1.x2), abs(b1.y1-b1.y2)
return ((abs(b1.x1 - b2.x1) <= w / 2.0 and
abs(b1.y1 - b2.y1) <= h / 2.0) or
is_overlap(b1, b2) or
is_overlap(b2, b1))
if config.TIMER:
config.TIMER.stop_task("SelectTargets_TempMatch_CPU")
# 1.) Add the new matches to self.BOXES, but also filter out
# any matches in RESULTS that are too close to previously-found
# matches.
_cnt_added = 0
# Sort by the 'j' index, e.g. prefer matches from the main-displayed
# image first.
for imgpath, matches in sorted(results.iteritems(), key=lambda (imP, mats): self.inv_map[imP][1]):
partition_idx, j, page = self.inv_map[imgpath]
for (x1, y1, x2, y2, score) in matches:
boxB = TargetBox(x1, y1, x1+w, y1+h)
# 1.a.) See if any already-existing TargetBox is too close
do_add = True
for boxA in [b for b in self.boxes[partition_idx][page] if isinstance(b, TargetBox)]:
if too_close(boxA, boxB):
do_add = False
break
if do_add:
# 1.b.) Enforce constraint that all voting targets
# are the same size.
if self.boxsize == None:
self.boxsize = (w, h)
else:
boxB.x2 = boxB.x1 + self.boxsize[0]
boxB.y2 = boxB.y1 + self.boxsize[1]
self.boxes.setdefault(partition_idx, [])[page].append(boxB)
_cnt_added += 1
print 'Added {0} new boxes from this tempmatch run.'.format(_cnt_added)
print 'Num boxes in current partition:', len(self.boxes[self.cur_i][self.cur_page])
self.imagepanel.set_boxes(self.boxes[self.cur_i][self.cur_page])
self.Refresh()
self.Enable()
print "...Finished adding results from tempmatch run."
def display_image(self, i, j, page, autofit=False):
""" Displays the J-th image in partition I. Also handles
reading/saving in the currently-created boxes for the old/new image.
If AUTOFIT is True, then this will auto-scale the image such that
if fits entirely in the current client size.
Input:
int I: Which partition to display
int J: Which image in partition I to display.
int PAGE: Which page to display.
Output:
Returns the (I,J,PAGE) we decided to display, if successful.
"""
if i < 0 or i >= len(self.partitions):
print "Invalid partition idx:", i
pdb.set_trace()
elif j < 0 or j >= len(self.partitions[i]):
print "Invalid image idx {0} into partition {1}".format(j, i)
pdb.set_trace()
# 0.) Save boxes of old image
'''
if self.cur_i != None:
self.boxes.setdefault(self.cur_i, []).extend(self.imagepanel.boxes)
'''
self.cur_i, self.cur_j, self.cur_page = i, j, page
imgpath = self.partitions[i][j][page]
# 1.) Display New Image
wximg = wx.Image(imgpath, wx.BITMAP_TYPE_ANY)
if autofit:
wP, hP = self.imagepanel.GetClientSize()
w_img, h_img = wximg.GetWidth(), wximg.GetHeight()
if w_img > h_img and w_img > wP:
_c = w_img / float(wP)
w_img_new = wP
h_img_new = int(round(h_img / _c))
elif w_img < h_img and h_img > hP:
_c = h_img / float(hP)
w_img_new = int(round(w_img / _c))
h_img_new = hP
else:
w_img_new, h_img_new = w_img, h_img
self.imagepanel.set_image(wximg, size=(w_img_new, h_img_new))
else:
self.imagepanel.set_image(wximg)
# 2.) Read in previously-created boxes for I (if exists)
boxes = self.boxes.get(self.cur_i, [])[page]
self.imagepanel.set_boxes(boxes)
#self.SetupScrolling()
# 3.) Finally, update relevant StaticText in the UI.
self.txt_curimgpath.SetLabel(imgpath)
self.txt_curpartition.SetLabel(str(self.cur_i+1))
self.txt_curballot.SetLabel(str(self.cur_j+1))
self.txt_curpage.SetLabel(str(self.cur_page+1))
self.txt_sizer.Layout()
self.Refresh()
if self.cur_i in self.flagged_idxs:
print "(SelectTargets) Idx {0} was flagged by user!".format(self.cur_i)
dlg = wx.MessageDialog(self, style=wx.OK,
message="This 'partition' was flagged by the user.")
dlg.ShowModal()
return (self.cur_i,self.cur_j,self.cur_page)
def resize_targets(self, x1_del, y1_del, x2_del, y2_del):
""" Resizes all voting targets by shifting each corner by input
X1_DEL, Y1_DEL, X2_DEL, Y2_DEL.
"""
if not self.boxsize:
print "Can't call resize_targets() when no targets exist."
return
w_new = self.boxsize[0] - x1_del + x2_del
h_new = self.boxsize[1] - y1_del + y2_del
if w_new <= 1 or h_new <= 1:
print "New dimensions are degenerate: w,h=({0},{1})".format(w_new, h_new)
return
self.boxsize = w_new, h_new
for partition_idx, pages_tpl in self.boxes.iteritems():
for page, boxes in enumerate(pages_tpl):
for target in [b for b in boxes if isinstance(b, TargetBox)]:
target.x1 += x1_del
target.y1 += y1_del
target.x2 += x2_del
target.y2 += y2_del
self.imagepanel.dirty_all_boxes()
self.Refresh()
def display_nextpartition(self):
next_idx = self.cur_i + 1
if next_idx >= len(self.partitions):
return None
self.txt_totalballots.SetLabel(str(len(self.partitions[next_idx])))
self.txt_totalpages.SetLabel(str(len(self.partitions[next_idx][0])))
return self.display_image(next_idx, 0, 0)
def display_prevpartition(self):
prev_idx = self.cur_i - 1
if prev_idx < 0:
return None
self.txt_totalballots.SetLabel(str(len(self.partitions[prev_idx])))
self.txt_totalpages.SetLabel(str(len(self.partitions[prev_idx][0])))
return self.display_image(prev_idx, 0, 0)
def display_nextimg(self):
""" Displays the next image in the current partition. If the end
of the list is reached, returns None, and does nothing. Else,
returns the new image index.
"""
next_idx = self.cur_j + 1
if next_idx >= len(self.partitions[self.cur_i]):
return None
self.txt_totalpages.SetLabel(str(len(self.partitions[self.cur_i][next_idx])))
return self.display_image(self.cur_i, next_idx, self.cur_page)
def display_previmg(self):
prev_idx = self.cur_j - 1
if prev_idx < 0:
return None
self.txt_totalpages.SetLabel(str(len(self.partitions[self.cur_i][prev_idx])))
return self.display_image(self.cur_i, prev_idx, self.cur_page)
def display_nextpage(self):
next_idx = self.cur_page + 1
if next_idx >= len(self.partitions[self.cur_i][self.cur_j]):
return None
return self.display_image(self.cur_i, self.cur_j, next_idx)
def display_prevpage(self):
prev_idx = self.cur_page - 1
if prev_idx < 0:
return None
return self.display_image(self.cur_i, self.cur_j, prev_idx)
def onButton_nextimage(self, evt):
""" Take the user to the next page or partition. """
if self.display_nextpage() == None:
self.display_nextpartition()
def onButton_previmage(self, evt):
""" Take the user to the previous page or partition. """
if self.display_prevpage() == None:
prev_i = self.cur_i - 1
if prev_i < 0:
return
if not self.partitions[prev_i]:
print "(Error) There appears to be an empty partition at i={0}".format(prev_i)
return
numpages = len(self.partitions[prev_i][0])
self.display_image(prev_i, 0, numpages - 1)
def onButton_nextpartition(self, evt):
self.display_nextpartition()
def onButton_prevpartition(self, evt):
self.display_prevpartition()
def onButton_nextimg(self, evt):
self.display_nextimg()
def onButton_previmg(self, evt):
self.display_previmg()
def onButton_nextpage(self, evt):
self.display_nextpage()
def onButton_prevpage(self, evt):
self.display_prevpage()
def onButton_flagpartition(self, evt):
print "(SelectTargets) Flagging partition '{0}' as quarantined.".format(self.cur_i)
self.flagged_idxs.add(self.cur_i)
def zoomin(self, amt=0.1):
self.imagepanel.zoomin(amt=amt)
def zoomout(self, amt=0.1):
self.imagepanel.zoomout(amt=amt)
def onButton_jump_partition(self, evt):
dlg = wx.TextEntryDialog(self, "Which Group Number?", "Enter group number")
status = dlg.ShowModal()
if status == wx.ID_CANCEL:
return
try:
idx = int(dlg.GetValue()) - 1
except:
print "Invalid index:", idx
return
if idx < 0 or idx >= len(self.partitions):
print "Invalid group index:", idx
return
self.txt_totalballots.SetLabel(str(len(self.partitions[idx])))
self.txt_totalpages.SetLabel(str(len(self.partitions[idx][0])))
self.display_image(idx, 0, 0)
def onButton_jump_ballot(self, evt):
dlg = wx.TextEntryDialog(self, "Which Ballot Number?", "Enter Ballot number")
status = dlg.ShowModal()
if status == wx.ID_CANCEL:
return
try:
idx = int(dlg.GetValue()) - 1
except:
print "Invalid index:", idx
return
if idx < 0 or idx >= len(self.partitions[self.cur_i]):
print "Invalid ballot index:", idx
return
self.txt_totalpages.SetLabel(str(len(self.partitions[self.cur_i][idx])))
self.display_image(self.cur_i, idx, 0)
def onButton_jump_error(self, evt):
if 'next_errors' in dir(self) and self.next_errors != []:
idx,v = self.next_errors.pop()
self.txt_totalballots.SetLabel(str(len(self.partitions[idx])))
self.txt_totalpages.SetLabel(str(len(self.partitions[idx][0])))
self.display_image(idx, 0, v)
def onButton_selectIref(self, evt):
print self.cur_j
S = self.parent.seltargets_panel
cur_groupid = self.parent.i2groupid[S.cur_i]
print cur_groupid
self.parent.group_to_Iref[cur_groupid] = self.cur_j
def onButton_jump_page(self, evt):
dlg = wx.TextEntryDialog(self, "Which Page Number?", "Enter Page number")
status = dlg.ShowModal()
if status == wx.ID_CANCEL:
return
try:
idx = int(dlg.GetValue()) - 1
except:
print "Invalid index:", idx
return
if idx < 0 or idx >= len(self.partitions[self.cur_i][self.cur_j]):
print "Invalid page index:", idx
return
self.display_image(self.cur_i, self.cur_j, idx)
def infercontests(self):
def are_there_boxes():
for i, sides in self.boxes.iteritems():
if sides:
return True
return False
if not self.boxsize or not are_there_boxes():
dlg = wx.MessageDialog(self, style=wx.OK, caption="Can't detect \
contests right now",
message="Please annotate voting targets \
before running the automatic contest detection routine.")
dlg.ShowModal()
return
if config.TIMER:
config.TIMER.start_task("SelectTargets_InferContestRegions_CPU")
imgpaths_exs = [] # list of [imgpath_i, ...]
# Arbitrarily choose the first one Ballot from each partition
for partition_idx, imgpaths_sides in enumerate(self.partitions):
for imgpaths in imgpaths_sides:
for side, imgpath in enumerate(imgpaths):
# Only add imgpaths that have boxes
if self.boxes[partition_idx][side]:
imgpaths_exs.append(imgpath)
break
# Since the ordering of these dataStructs encode semantic meaning,
# and since I don't want to pass in an empty contest to InferContests
# (it crashes), I have to manually remove all empty-pages from IMGPATHS_EXS
# and TARGETS
# Let i=target #, j=ballot style, k=contest idx:
targets = [] # list of [[[box_ijk, ...], [box_ijk+1, ...], ...], ...]
for partition_idx, boxes_sides in self.boxes.iteritems():
for side, boxes in enumerate(boxes_sides):
style_boxes = [] # [[contest_i, ...], ...]
for box in boxes:
# InferContests throws out the pre-determined contest
# grouping, so just stick each target in its own
# 'contest'
if type(box) == TargetBox:
style_boxes.append([(box.x1, box.y1, box.x2, box.y2)])
if style_boxes:
targets.append(style_boxes)
# CONTEST_RESULTS: [[box_i, ...], ...], each subtuple_i is for imgpath_i.
def infercontest_finish(contest_results):
# 1.) Update my self.BOXES
for i, contests in enumerate(contest_results):
partition_idx, j, page = self.inv_map[imgpaths_exs[i]]
# Remove previous contest boxes
justtargets = [b for b in self.boxes[partition_idx][page] if not isinstance(b, ContestBox)]
contest_boxes = []
for (x1,y1,x2,y2) in contests:
contest_boxes.append(ContestBox(x1,y1,x2,y2))
recolour_contests(contest_boxes)
self.boxes[partition_idx][page] = justtargets+contest_boxes
# 2.) Update self.IMAGEPANEL.BOXES (i.e. the UI)
self.imagepanel.set_boxes(self.boxes[self.cur_i][self.cur_page])
# 3.) Finally, update the self.proj.infer_bounding_boxes flag,
# so that LabelContests does the right thing.
self.GetParent().proj.infer_bounding_boxes = True
self.Refresh()
if config.TIMER:
config.TIMER.stop_task("SelectTargets_InferContestRegions_CPU")
ocrtempdir = self.ocrtempdir
class RunThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
def run(self):
self.tt = group_contests.find_contests(ocrtempdir, imgpaths_exs, targets)
wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.done")
tt = RunThread()
tt.start()
gauge = util.MyGauge(self, 1, ondone=lambda: infercontest_finish(tt.tt))
gauge.Show()
wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.nextjob", len(imgpaths_exs))
def set_target_roi(self, roi):
""" Updates/sets the target region-of-interest (ROI), which is the
region where the user is expected to 'draw' on. This information is
useful for the SetThreshold panel, so that it can better-sort the
voting targets.
Input:
tuple ROI: (int x1, y1, x2, y2)
"""
self.target_roi = tuple([int(round(coord)) for coord in roi])
return self.target_roi
class Toolbar(wx.Panel):
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.parent = parent
self._setup_ui()
self._setup_evts()
self.Layout()
def _setup_ui(self):
self.btn_addtarget = wx.Button(self, label="Add Target")
self.btn_forceaddtarget = wx.Button(self, label="Force Add Target")
self.btn_addcontest = wx.Button(self, label="Add Contest")
self.btn_modify = wx.Button(self, label="Modify")
self.btn_zoomin = wx.Button(self, label="Zoom In")
self.btn_zoomout = wx.Button(self, label="Zoom Out")
self.btn_infercontests = wx.Button(self, label="Infer Contest Regions...")
self.btn_opts = wx.Button(self, label="Advanced: Options")
self.btn_resize_targets = wx.Button(self, label="Resize Voting Targets")
self.btn_detect_errors = wx.Button(self, label="Detect Errors")
self.btn_set_target_roi = wx.Button(self, label="Set Mark Region")
self.sizer = wx.BoxSizer(wx.VERTICAL)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.AddMany([(self.btn_addtarget,), (self.btn_forceaddtarget,),
(self.btn_addcontest), (self.btn_modify,),
(self.btn_zoomin,), (self.btn_zoomout,),
(self.btn_infercontests,), (self.btn_opts,),
(self.btn_resize_targets,), (self.btn_detect_errors,),
(self.btn_set_target_roi)])
self.sizer.Add(btn_sizer)
self.SetSizer(self.sizer)
def _setup_evts(self):
self.btn_addtarget.Bind(wx.EVT_BUTTON, self.onButton_addtarget)
self.btn_forceaddtarget.Bind(wx.EVT_BUTTON, self.onButton_forceaddtarget)
self.btn_addcontest.Bind(wx.EVT_BUTTON, self.onButton_addcontest)
self.btn_modify.Bind(wx.EVT_BUTTON, lambda evt: self.setmode(BoxDrawPanel.M_IDLE))
self.btn_zoomin.Bind(wx.EVT_BUTTON, lambda evt: self.parent.zoomin())
self.btn_zoomout.Bind(wx.EVT_BUTTON, lambda evt: self.parent.zoomout())
self.btn_infercontests.Bind(wx.EVT_BUTTON, lambda evt: self.parent.infercontests())
self.btn_opts.Bind(wx.EVT_BUTTON, self.onButton_opts)
self.btn_resize_targets.Bind(wx.EVT_BUTTON, self.onButton_resizetargets)
self.btn_detect_errors.Bind(wx.EVT_BUTTON, self.onButton_detecterrors)
self.btn_set_target_roi.Bind(wx.EVT_BUTTON, self.onButton_settargetroi)
def onButton_addtarget(self, evt):
self.setmode(BoxDrawPanel.M_CREATE)
self.parent.imagepanel.boxtype = TargetBox
def onButton_forceaddtarget(self, evt):
self.setmode(TargetFindPanel.M_FORCEADD_TARGET)
self.parent.imagepanel.boxtype = TargetBox
def onButton_addcontest(self, evt):
self.setmode(BoxDrawPanel.M_CREATE)
self.parent.imagepanel.boxtype = ContestBox
def setmode(self, mode_m):
self.parent.imagepanel.set_mode_m(mode_m)
def onButton_opts(self, evt):
dlg = OptionsDialog(self)
status = dlg.ShowModal()
if status == wx.ID_CANCEL:
return
self.parent.tm_param = dlg.tm_param
self.parent.win_ballot = dlg.win_ballot
self.parent.win_target = dlg.win_target
self.parent.tm_mode = dlg.tm_mode
def onButton_resizetargets(self, evt):
if not self.parent.boxsize:
wx.MessageDialog(self, style=wx.OK, caption="Must create voting targets first",
message="Please first create voting targets on the ballot. \
Then, you may resize the voting targets here.").ShowModal()
return
dlg = ResizeTargetsDialog(self, self.parent.boxsize)
status = dlg.ShowModal()
if status == wx.ID_CANCEL:
return
x1_del, y1_del, x2_del, y2_del = dlg.x1_del, dlg.y1_del, dlg.x2_del, dlg.y2_del
self.parent.resize_targets(x1_del, y1_del, x2_del, y2_del)
def onButton_detecterrors(self, evt):
events = {}
lookup = {}
kinds = {}
votes_for_errors = {}
stats_by_ballot = {}
def set_events(ee):
for e in ee: events[e[0]] = {}
for e in ee: lookup[e[0]] = {}
for e in ee: kinds[e[0]] = e[1]
def observe(e, obs, uid):
if obs not in events[e]:
events[e][obs] = 0
lookup[e][obs] = []
events[e][obs] += 1
lookup[e][obs].append(uid)
votes_for_errors[uid] = 0
if uid not in stats_by_ballot:
stats_by_ballot[uid] = {}
stats_by_ballot[uid][e] = obs
#self.parent.boxes = dict((k,[[z for z in y if z.y1 > 400 or z.y2 > 400] for y in v]) for k,v in self.parent.boxes.items())
#self.parent.boxes = dict((k,[[z for z in y] for y in v]) for k,v in self.parent.boxes.items())
set_events([("exists", "entropy"),
("target count", "entropy"),
("columns", "entropy"),
("targets by column", "entropy"),
("contest count", "entropy"),
("contests by column", "entropy"),
("contest width", "entropy"),
("colspread", "smaller")])
def intersect(line1, line2):
top = max(line1.y1, line2.y1)
bottom = min(line1.y2, line2.y2)
left = max(line1.x1, line2.x1)
right = min(line1.x2, line2.x2)
return bottom > top and right > left
for pid,partition in self.parent.boxes.items():
for i,page in enumerate(partition):
targets = [x for x in page if type(x) == TargetBox]
observe("exists", True, (pid,i))
observe("target count", len(targets), (pid,i))
if len(targets) == 0: continue
leftcoord = sorted([x.x1 for x in targets])
width = abs(targets[0].x1-targets[0].x2)
def group(leftcoord):
cols = [[]]
for ii,(x1,x2) in enumerate(zip(leftcoord,leftcoord[1:]+[-1<<30])):
cols[-1].append(x1)
if abs(x1-x2) > width/2:
cols.append([])
return cols[:-1]
cols = group(leftcoord)
observe("columns", len(cols), (pid,i))
for val in tuple(map(len,cols)):
observe("targets by column", val, (pid,i))
contests = [x for x in page if type(x) == ContestBox]
if len(contests) == 0: continue
#observe("exists", tuple([sum([intersect(x,y) for y in contests]) == 1 for x in targets]), (pid,i))
if not all([sum([intersect(x,y) for y in contests]) == 1 for x in targets]):
print "OH NO THIS IS BAD", pid+1, i, [x[1] for x in zip([sum([intersect(x,y) for y in contests]) == 1 for x in targets],targets) if x[0] == False]
observe("contest width", (max((x.x2-x.x1)/50 for x in contests),min((x.x2-x.x1)/50 for x in contests)), (pid,i))
observe("contest count", len(contests), (pid,i))
observe("contests by column", tuple(map(len,group(sorted([x.x1 for x in contests])))), (pid,i))
#spread = 10*sum([1-scipy.stats.linregress(range(len(col)),col)[2]**2 for col in cols[:-1]])
#print spread
#print scipy.stats.linregress(range(len(cols[0])),cols[0])
#observe("colspread", spread, (pid,i))
for evtname,obs in events.items():
evttype = kinds[evtname]
count = sum(obs.values())
if evttype == "entropy":
for what,c in obs.items():
for ballot in lookup[evtname][what]:
votes_for_errors[ballot] += math.log(float(c)/count)
elif evttype == "smaller":
expanded = [k for k,v in obs.items() for _ in range(v)]
average = np.mean(expanded)
std = np.std(expanded)
print average, std
for what,c in obs.items():
for ballot in lookup[evtname][what]:
if what > average:
votes_for_errors[ballot] += -(float(what)-average)/std*2
self.parent.next_errors = [x[0] for x in sorted(votes_for_errors.items(), key=lambda x: -x[1])]
for (ballot,side),votes in sorted(votes_for_errors.items(), key=lambda x: -x[1]):
print ballot+1, side, votes, stats_by_ballot[ballot,side]
print events
def onButton_settargetroi(self, evt):
if not self.GetParent().boxsize:
dlg = wx.MessageDialog(self, style=wx.OK,
message="Please select at least one voting \
target first.")
dlg.ShowModal()
return
imgpath = self.GetParent().partitions[0][0][0]
# boxes: {int i: [[Box_iFront, ...], ...]}
box = self.GetParent().boxes[0][0][0]
# targetroi: [int x1, int y1, int x2, int y2]
targetroi = self.GetParent().target_roi
I = scipy.misc.imread(imgpath, flatten=True)
Itarget = I[box.y1:box.y2, box.x1:box.x2]
dlg = DrawROIDialog(self, Itarget, roi=targetroi)
status = dlg.ShowModal()
if status == wx.CANCEL:
return
# tuple ROI: (int x1, y1, x2, y2)
roi = dlg.roi
print "(SelectTargets) Set target_roi from {0} to: {1}".format(self.GetParent().target_roi, roi)
self.GetParent().set_target_roi(roi)
class ResizeTargetsDialog(wx.Dialog):
def __init__(self, parent, boxsize, *args, **kwargs):
wx.Dialog.__init__(self, parent, size=(475, 350), title="Resizing Voting Targets",
style=wx.CAPTION | wx.RESIZE_BORDER | wx.SYSTEM_MENU, *args, **kwargs)
self.boxsize = boxsize
self.x1_del, self.y1_del = None, None
self.x2_del, self.y2_del = None, None
msg_inst = (textwrap.fill("Please indicate (in pixels) \
how much to shift the x1, y1 (Upper-left corner) and x2, y2 (Lower-right corner) \
by.", 75) + "\n\n" + textwrap.fill("Positive X values shift to the Right, negative X values shift to the Left.", 75)
+ "\n\n" + textwrap.fill("Positive Y values shift Downwards, negative Y values shift Upward.", 75))
txt_inst = wx.StaticText(self, label=msg_inst)
txt_boxsize_old = wx.StaticText(self, label="Current Target Size (width,height): ({0}, {1})".format(boxsize[0], boxsize[1]))
szh_upperleft = wx.BoxSizer(wx.HORIZONTAL)
txt_upperleft = wx.StaticText(self, label="UpperLeft Corner: ")
szv_x1y1 = wx.BoxSizer(wx.VERTICAL)
szh_x1 = wx.BoxSizer(wx.HORIZONTAL)
txt_x1 = wx.StaticText(self, label="Shift X1 by: ")
self.txtctrl_x1del = wx.TextCtrl(self, value='0')
szh_x1.AddMany([(txt_x1,), (self.txtctrl_x1del,)])
szh_y1 = wx.BoxSizer(wx.HORIZONTAL)
txt_y1 = wx.StaticText(self, label="Shift Y1 by: ")
self.txtctrl_y1del = wx.TextCtrl(self, value='0')
szh_y1.AddMany([(txt_y1,), (self.txtctrl_y1del,)])
szv_x1y1.AddMany([(szh_x1,), (szh_y1,)])
szh_upperleft.AddMany([(txt_upperleft,), (szv_x1y1,)])
szh_lowerright = wx.BoxSizer(wx.HORIZONTAL)
txt_lowerright = wx.StaticText(self, label="LowerRight Corner: ")
szv_x2y2 = wx.BoxSizer(wx.VERTICAL)
szh_x2 = wx.BoxSizer(wx.HORIZONTAL)
txt_x2 = wx.StaticText(self, label="Shift X2 by: ")
self.txtctrl_x2del = wx.TextCtrl(self, value='0')
szh_x2.AddMany([(txt_x2,), (self.txtctrl_x2del,)])
szh_y2 = wx.BoxSizer(wx.HORIZONTAL)
txt_y2 = wx.StaticText(self, label="Shift Y2 by: ")
self.txtctrl_y2del = wx.TextCtrl(self, value='0')
szh_y2.AddMany([(txt_y2,), (self.txtctrl_y2del,)])
szv_x2y2.AddMany([(szh_x2,), (szh_y2,)])
szh_lowerright.AddMany([(txt_lowerright,), (szv_x2y2)])
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_ok = wx.Button(self, label="Resize All Targets")
btn_ok.Bind(wx.EVT_BUTTON, self.onButton_ok)
btn_cancel = wx.Button(self, label="Cancel")
btn_cancel.Bind(wx.EVT_BUTTON, lambda e: self.EndModal(wx.ID_CANCEL))
btn_sizer.AddMany([(btn_ok,), ((30, 0),), (btn_cancel,)])
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(txt_inst)
self.sizer.Add((0, 15))
self.sizer.Add(txt_boxsize_old, flag=wx.ALIGN_CENTER)
self.sizer.Add((0, 15))
self.sizer.AddMany([(szh_upperleft,), (szh_lowerright,)])
self.sizer.Add((0, 15))
self.sizer.Add(btn_sizer, flag=wx.ALIGN_CENTER)
self.SetSizer(self.sizer)
self.Layout()
def onButton_ok(self, evt):
def alert_bad_number(self):
wx.MessageDialog(self, style=wx.OK,
message="Must enter valid integer values for each field.").ShowModal()
try:
self.x1_del = int(self.txtctrl_x1del.GetValue())
self.y1_del = int(self.txtctrl_y1del.GetValue())
self.x2_del = int(self.txtctrl_x2del.GetValue())
self.y2_del = int(self.txtctrl_y2del.GetValue())
except:
alert_bad_number()
return
self.EndModal(wx.ID_OK)
class OptionsDialog(wx.Dialog):
ID_APPLY = 42
def __init__(self, parent, *args, **kwargs):
wx.Dialog.__init__(self, parent, title="Options.", *args, **kwargs)
self.parent = parent
self.tm_param = None
self.win_ballot = None
self.win_target = None
txt0 = wx.StaticText(self, label="Options for Template Matching.")
tm_sizer = wx.BoxSizer(wx.HORIZONTAL)
txt1 = wx.StaticText(self, label="Template Matching sensitivity: ")
_val = str(self.parent.parent.tm_param)
self.tm_param = wx.TextCtrl(self, value=_val)
tm_sizer.AddMany([(txt1,), (self.tm_param,)])
txt00 = wx.StaticText(self, label="Ballot Smoothing parameters (must be odd-integers).")
txt01 = wx.StaticText(self, label="X-window size: ")
txt02 = wx.StaticText(self, label="Y-window size: ")
_val = str(self.parent.parent.win_ballot[0])
self.xwin_ballot = wx.TextCtrl(self, value=_val)
_val = str(self.parent.parent.win_ballot[1])
self.ywin_ballot = wx.TextCtrl(self, value=_val)
sizer00 = wx.BoxSizer(wx.HORIZONTAL)
sizer00.AddMany([(txt01,), (self.xwin_ballot,)])
sizer01 = wx.BoxSizer(wx.HORIZONTAL)
sizer01.AddMany([(txt02,), (self.ywin_ballot,)])
sizer0 = wx.BoxSizer(wx.VERTICAL)
sizer0.AddMany([(txt00,), (sizer00,), (sizer01,)])
txt10 = wx.StaticText(self, label="Target Smoothing parameters (must be odd-integers)")
txt11 = wx.StaticText(self, label="X-window size: ")
txt12 = wx.StaticText(self, label="Y-window size: ")
_val = str(self.parent.parent.win_target[0])
self.xwin_target = wx.TextCtrl(self, value=_val)
_val = str(self.parent.parent.win_target[1])
self.ywin_target = wx.TextCtrl(self, value=_val)
sizer10 = wx.BoxSizer(wx.HORIZONTAL)
sizer10.AddMany([(txt11,), (self.xwin_target,)])
sizer11 = wx.BoxSizer(wx.HORIZONTAL)
sizer11.AddMany([(txt12,), (self.ywin_target,)])
sizer1 = wx.BoxSizer(wx.VERTICAL)
sizer1.AddMany([(txt10,), (sizer10,), (sizer11,)])
txt_tm_mode = wx.StaticText(self, label="Template Matching Mode")
self.radio_tm_mode_all = wx.RadioButton(self, label="Template match on all images",
style=wx.RB_GROUP)
self.radio_tm_mode_post = wx.RadioButton(self, label="Template match only on images \
after (and including) the currently-displayed group.")
if self.GetParent().GetParent().tm_mode == SelectTargetsPanel.TM_MODE_ALL:
self.radio_tm_mode_all.SetValue(True)
elif self.GetParent().GetParent().tm_mode == SelectTargetsPanel.TM_MODE_POST:
self.radio_tm_mode_post.SetValue(True)
sizer_tm_mode = wx.BoxSizer(wx.VERTICAL)
sizer_tm_mode.AddMany([(txt_tm_mode,), (self.radio_tm_mode_all,),
(self.radio_tm_mode_post,)])
btn_apply = wx.Button(self, label="Apply")
btn_apply.Bind(wx.EVT_BUTTON, self.onButton_apply)
btn_cancel = wx.Button(self, label="Cancel")
btn_cancel.Bind(wx.EVT_BUTTON, self.onButton_cancel)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.AddMany([(btn_apply,), (btn_cancel,)])
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(txt0, flag=wx.ALIGN_CENTER)
sizer.AddMany([(tm_sizer,), (sizer0,), (sizer1,), (sizer_tm_mode,), (btn_sizer, 0, wx.ALIGN_CENTER)])
self.SetSizer(sizer)
self.Fit()
def onButton_apply(self, evt):
self.tm_param = float(self.tm_param.GetValue())
self.win_ballot = (int(self.xwin_ballot.GetValue()), int(self.ywin_ballot.GetValue()))
self.win_target = (int(self.xwin_target.GetValue()), int(self.ywin_target.GetValue()))
if self.radio_tm_mode_all.GetValue():
self.tm_mode = SelectTargetsPanel.TM_MODE_ALL
else:
self.tm_mode = SelectTargetsPanel.TM_MODE_POST
self.EndModal(OptionsDialog.ID_APPLY)
def onButton_cancel(self, evt):
self.EndModal(wx.ID_CANCEL)
class ImagePanel(ScrolledPanel):
""" Basic widget class that display one image out of N image paths.
Also comes with a 'Next' and 'Previous' button. Extend me to add
more functionality (i.e. mouse-related events).
"""
def __init__(self, parent, *args, **kwargs):
ScrolledPanel.__init__(self, parent, *args, **kwargs)
self.parent = parent
# self.img := a WxImage
self.img = None
# self.imgbitmap := A WxBitmap
self.imgbitmap = None
# self.npimg := A Numpy-version of an untarnished-version of self.imgbitmap
self.npimg = None
# self.scale: Scaling factor used to display self.IMGBITMAP
self.scale = 1.0
# If True, a signal to completely-redraw the original image
self._imgredraw = False
self._setup_ui()
self._setup_evts()
def _setup_ui(self):
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
def _setup_evts(self):
self.Bind(wx.EVT_PAINT, self.onPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.onLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.onLeftUp)
self.Bind(wx.EVT_MOTION, self.onMotion)
self.Bind(wx.EVT_CHILD_FOCUS, self.onChildFocus)
def set_image(self, img, size=None):
""" Updates internal data-structures to allow viewing a new input
IMG. If SIZE is given (width, height), then we will scale image
to match SIZE, maintaining aspect ratio.
"""
self.img = img
c = size[0] / float(self.img.GetWidth()) if size else self.scale
self.set_scale(c)
def set_scale(self, scale):
""" Changes scale, i.e. to acommodate zoom in/out. Mutates the
self.IMGBITMAP.
Input:
float scale: Smaller values -> zoomed out images.
"""
self.scale = scale
w, h = self.img.GetWidth(), self.img.GetHeight()
new_w, new_h = int(round(w*scale)), int(round(h*scale))
self.imgbitmap = img_to_wxbitmap(self.img, (new_w, new_h))
self.npimg = wxBitmap2np_v2(self.imgbitmap, is_rgb=True)
self.sizer.Detach(0)
self.sizer.Add(self.imgbitmap.GetSize())
self.SetupScrolling()
self.Refresh()
def zoomin(self, amt=0.1):
self.set_scale(self.scale + amt)
def zoomout(self, amt=0.1):
self.set_scale(self.scale - amt)
def client_to_imgcoord(self, x, y):
""" Transforms client (widget) coordinates to the Image
coordinate system -- i.e. accounts for image scaling.
Input:
int (x,y): Client (UI) Coordinates.
Output:
int (X,Y), image coordinates.
"""
return (int(round(x/self.scale)), int(round(y/self.scale)))
def c2img(self, x, y):
""" Convenience method to self.CLIENT_TO_IMGCOORD. """
return self.client_to_imgcoord(x,y)
def img_to_clientcoord(self, x, y):
""" Transforms Image coords to client (widget) coords -- i.e.
accounts for image scaling.
Input:
int (X,Y): Image coordinates.
Output:
int (x,y): Client (UI) coordinates.
"""
return (int(round(x*self.scale)), int(round(y*self.scale)))
def img2c(self, x, y):
return self.img_to_clientcoord(x,y)
def force_new_img_redraw(self):
""" Forces this widget to completely-redraw self.IMG, even if
self.imgbitmap contains modifications.
"""
self._imgredraw = True
def draw_image(self, dc):
if not self.imgbitmap:
return
if self._imgredraw:
# Draw the 'virgin' self.img
self._imgredraw = False
w, h = self.img.GetWidth(), self.img.GetHeight()
new_w, new_h = int(round(w*self.scale)), int(round(h*self.scale))
self.imgbitmap = img_to_wxbitmap(self.img, (new_w, new_h))
dc.DrawBitmap(self.imgbitmap, 0, 0)
return dc
def onPaint(self, evt):
if self.IsDoubleBuffered():
dc = wx.PaintDC(self)
else:
dc = wx.BufferedPaintDC(self)
self.PrepareDC(dc)
self.draw_image(dc)
return dc
def onLeftDown(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
evt.Skip()
def onMotion(self, evt):
evt.Skip()
def onChildFocus(self, evt):
# If I don't override this child focus event, then wx will
# reset the scrollbars at extremely annoying times. Weird.
# For inspiration, see:
# http://wxpython-users.1045709.n5.nabble.com/ScrolledPanel-mouse-click-resets-scrollbars-td2335368.html
pass
class BoxDrawPanel(ImagePanel):
""" A widget that allows a user to draw boxes on a displayed image,
and each image remembers its list of boxes.
"""
""" Mouse Mouse:
M_CREATE: Create a box on LeftDown.
M_IDLE: Allow user to resize/move/select(multiple) boxes.
"""
M_CREATE = 0
M_IDLE = 1
def __init__(self, parent, *args, **kwargs):
ImagePanel.__init__(self, parent, *args, **kwargs)
self.parent = parent
# self.boxes := [Box_i, ...]
self.boxes = []
# self.sel_boxes := [Box_i, ...]
self.sel_boxes = []
# Vars to keep track of box-being-created
self.isCreate = False
self.box_create = None
# Vars for resizing behavior
self.isResize = False
self.box_resize = None
self.resize_orient = None # 'N', 'NE', etc...
# self.isDragging : Is the user moving-mouse while mouse-left-down
# is held down?
self.isDragging = False
self.mode_m = BoxDrawPanel.M_CREATE
# BOXTYPE: Class of the Box to create
self.boxtype = Box
# _x,_y keep track of last mouse position
self._x, self._y = 0, 0
def _setup_evts(self):
ImagePanel._setup_evts(self)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown)
def set_mode_m(self, mode):
""" Sets my MouseMode. """
self.mode_m = mode
self.update_cursor()
def update_cursor(self, force_cursor=None):
""" Updates the mouse cursor depending on the current state.
Returns the wx.Cursor that it decides to set.
To force the mouse cursor, pass in a wx.Cursor as FORCE_CURSOR.
"""
if force_cursor != None:
self.SetCursor(force_cursor)
return force_cursor
if self.mode_m == BoxDrawPanel.M_CREATE:
cursor = wx.StockCursor(wx.CURSOR_CROSS)
elif self.mode_m == BoxDrawPanel.M_IDLE:
if self.isResize:
cursor = wx.StockCursor(wx.CURSOR_SIZING)
elif self.get_box_to_resize(self._x, self._y)[0]:
cursor = wx.StockCursor(wx.CURSOR_SIZING)
elif self.get_boxes_within(self._x, self._y, mode='any'):
cursor = wx.StockCursor(wx.CURSOR_HAND)
else:
cursor = wx.StockCursor(wx.CURSOR_ARROW)
else:
cursor = wx.StockCursor(wx.CURSOR_ARROW)
if self.GetCursor() != cursor:
self.SetCursor(cursor)
return cursor
def set_boxes(self, boxes):
self.boxes = boxes
self.dirty_all_boxes()
def startBox(self, x, y, boxtype=None):
""" Starts creating a box at (x,y). """
if boxtype == None:
boxtype = self.boxtype
print "...Creating Box: {0}, {1}".format((x,y), boxtype)
self.isCreate = True
self.box_create = boxtype(x, y, x+1, y+1)
# Map Box coords to Image coords, not UI coords.
self.box_create.scale(1 / self.scale)
def finishBox(self, x, y):
""" Finishes box creation at (x,y). """
print "...Finished Creating Box:", (x,y)
self.isCreate = False
# 0.) Canonicalize box coords s.t. order is: UpperLeft, LowerRight.
self.box_create.canonicalize()
toreturn = self.box_create
self.box_create = None
self.dirty_all_boxes()
return toreturn
def set_scale(self, scale, *args, **kwargs):
self.dirty_all_boxes()
return ImagePanel.set_scale(self, scale, *args, **kwargs)
def dirty_all_boxes(self):
""" Signal to unconditionally-redraw all boxes. """
for box in self.boxes:
box._dirty = True
def select_boxes(self, *boxes):
for box in boxes:
box.is_sel = True
self.sel_boxes.extend(boxes)
self.dirty_all_boxes()
def clear_selected(self):
""" Un-selects all currently-selected boxes, if any. """
for box in self.sel_boxes:
box.is_sel = False
self.sel_boxes = []
def delete_boxes(self, *boxes):
""" Deletes the boxes in BOXES. """
for box in boxes:
self.boxes.remove(box)
if box in self.sel_boxes:
self.sel_boxes.remove(box)
self.dirty_all_boxes()
if not self.boxes:
# Force a redraw of the image - otherwise, the last-removed
# boxes don't go away.
self.force_new_img_redraw()
self.Refresh()
def get_boxes_within(self, x, y, C=8.0, mode='any'):
""" Returns a list of Boxes that are at most C units within
the position (x,y), sorted by distance (increasing).
Input:
int (x,y):
Output:
list MATCHES, of the form:
[(obj Box_i, float dist_i), ...]
"""
results = []
for box in self.boxes:
if mode == 'N':
x1, y1 = self.img2c((box.x1 + (box.width/2)), box.y1)
elif mode == 'NE':
x1, y1 = self.img2c(box.x1 + box.width, box.y1)
elif mode == 'E':
x1, y1 = self.img2c(box.x1 + box.width, box.y1 + (box.height/2))
elif mode == 'SE':
x1, y1 = self.img2c(box.x1 + box.width, box.y1 + box.height)
elif mode == 'S':
x1, y1 = self.img2c(box.x1 + (box.width/2), box.y1 + box.height)
elif mode == 'SW':
x1, y1 = self.img2c(box.x1, box.y1 + box.height)
elif mode == 'W':
x1, y1 = self.img2c(box.x1, box.y1 + (box.heigth/2))
elif mode == 'NW':
x1, y1 = self.img2c(box.x1, box.y1)
else:
# Default to 'any'
x1, y1 = self.img2c(box.x1, box.y1)
x2, y2 = self.img2c(box.x2, box.y2)
if (x > x1 and x < x2 and
y > y1 and y < y2):
results.append((box, None))
continue
dist = distL2(x1, y1, x, y)
if dist <= C:
results.append((box, dist))
if mode == 'any':
return results
results = sorted(results, key=lambda t: t[1])
return results
def get_box_to_resize(self, x, y, C=5.0):
""" Returns a Box instance if the current mouse location is
close enough to a resize location, or None o.w.
Input:
int X, Y: Mouse location.
int C: How close the mouse has to be to a box corner.
Output:
(Box, str orientation) or (None, None).
"""
results = [] # [[orient, box, dist], ...]
for box in self.boxes:
locs = {'N': self.img2c(box.x1 + (box.width/2), box.y1),
'NE': self.img2c(box.x1 + box.width, box.y1),
'E': self.img2c(box.x1 + box.width, box.y1 + (box.height/2)),
'SE': self.img2c(box.x1 + box.width, box.y1 + box.height),
'S': self.img2c(box.x1 + (box.width/2),box.y1 + box.height),
'SW': self.img2c(box.x1, box.y1 + box.height),
'W': self.img2c(box.x1, box.y1 + (box.height/2)),
'NW': self.img2c(box.x1, box.y1)}
for (orient, (x1,y1)) in locs.iteritems():
dist = distL2(x1,y1,x,y)
if dist <= C:
results.append((orient, box, dist))
if not results:
return None, None
results = sorted(results, key=lambda t: t[2])
return results[0][1], results[0][0]
def onLeftDown(self, evt):
self.SetFocus()
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
x_img, y_img = self.c2img(x,y)
w_img, h_img = self.img.GetSize()
if x_img >= (w_img-1) or y_img >= (h_img-1):
return
box_resize, orient = self.get_box_to_resize(x, y)
if self.mode_m == BoxDrawPanel.M_IDLE and box_resize:
self.isResize = True
self.box_resize = box_resize
self.resize_orient = orient
self.Refresh()
self.update_cursor()
return
if self.mode_m == BoxDrawPanel.M_CREATE:
print "...Creating Target box."
self.clear_selected()
self.startBox(x, y)
self.update_cursor()
elif self.mode_m == BoxDrawPanel.M_IDLE:
boxes = self.get_boxes_within(x, y, mode='any')
if boxes:
b = boxes[0][0]
if b not in self.sel_boxes:
self.clear_selected()
self.select_boxes(boxes[0][0])
else:
self.clear_selected()
self.startBox(x, y, SelectionBox)
self.update_cursor()
def onLeftUp(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
self.isDragging = False
if self.isResize:
self.box_resize.canonicalize()
self.box_resize = None
self.isResize = False
self.dirty_all_boxes()
self.update_cursor()
if self.mode_m == BoxDrawPanel.M_CREATE and self.isCreate:
box = self.finishBox(x, y)
self.boxes.append(box)
self.update_cursor()
elif self.mode_m == BoxDrawPanel.M_IDLE and self.isCreate:
box = self.finishBox(x, y)
boxes = get_boxes_within(self.boxes, box)
print "...Selecting {0} boxes.".format(len(boxes))
self.select_boxes(*boxes)
self.update_cursor()
self.Refresh()
def onMotion(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
xdel, ydel = x - self._x, y - self._y
self._x, self._y = x, y
if self.isResize and evt.Dragging():
xdel_img, ydel_img = self.c2img(xdel, ydel)
if 'N' in self.resize_orient:
self.box_resize.y1 += ydel_img
if 'E' in self.resize_orient:
self.box_resize.x2 += xdel_img
if 'S' in self.resize_orient:
self.box_resize.y2 += ydel_img
if 'W' in self.resize_orient:
self.box_resize.x1 += xdel_img
self.update_cursor()
self.Refresh()
return
if self.isCreate:
self.box_create.x2, self.box_create.y2 = self.c2img(x, y)
self.Refresh()
elif self.sel_boxes and evt.Dragging():
self.isDragging = True
xdel_img, ydel_img = self.c2img(xdel, ydel)
for box in self.sel_boxes:
box.x1 += xdel_img
box.y1 += ydel_img
box.x2 += xdel_img
box.y2 += ydel_img
# Surprisingly, forcing a redraw for each mouse mvmt. is
# a very fast operation! Very convenient.
self.dirty_all_boxes()
self.Refresh()
self.update_cursor()
def onKeyDown(self, evt):
keycode = evt.GetKeyCode()
if (keycode == wx.WXK_DELETE or keycode == wx.WXK_BACK):
self.delete_boxes(*self.sel_boxes)
self.Refresh()
elif ((keycode in (wx.WXK_UP, wx.WXK_DOWN, wx.WXK_LEFT, wx.WXK_RIGHT))
and self.sel_boxes):
xdel, ydel = 0, 0
if keycode == wx.WXK_UP:
ydel -= 1
elif keycode == wx.WXK_DOWN:
ydel += 1
elif keycode == wx.WXK_LEFT:
xdel -= 1
else:
xdel += 1
xdel_img, ydel_img = self.c2img(xdel, ydel)
for box in self.sel_boxes:
box.x1 += xdel_img
box.y1 += ydel_img
box.x2 += xdel_img
box.y2 += ydel_img
self.dirty_all_boxes()
self.Refresh()
def onPaint(self, evt):
total_t = time.time()
dc = ImagePanel.onPaint(self, evt)
if self.isResize:
dboxes = [b for b in self.boxes if b != self.box_resize]
else:
dboxes = self.boxes
t = time.time()
self.drawBoxes(dboxes, dc)
dur = time.time() - t
if self.isCreate:
# Draw Box-Being-Created
can_box = self.box_create.copy().canonicalize()
self.drawBox(can_box, dc)
if self.isResize:
resize_box_can = self.box_resize.copy().canonicalize()
self.drawBox(resize_box_can, dc)
total_dur = time.time() - total_t
#print "Total Time: {0:.5f}s (drawBoxes: {1:.5f}s, {2:.4f}%)".format(total_dur, dur, 100*float(dur / total_dur))
return dc
def drawBoxes(self, boxes, dc):
boxes_todo = [b for b in boxes if b._dirty]
if not boxes_todo:
return
# First draw contests, then targets on-top.
contest_boxes, target_boxes = [], []
for box in boxes_todo:
if isinstance(box, ContestBox):
contest_boxes.append(box)
else:
target_boxes.append(box)
npimg_cpy = self.npimg.copy()
def draw_border(npimg, box, thickness=2, color=(0, 0, 0)):
T = thickness
clr = np.array(color)
x1,y1,x2,y2 = box.x1, box.y1, box.x2, box.y2
x1,y1 = self.img2c(x1,y1)
x2,y2 = self.img2c(x2,y2)
x1 = max(x1, 0)
y1 = max(y1, 0)
# Top
npimg[y1:y1+T, x1:x2] *= 0.2
npimg[y1:y1+T, x1:x2] += clr*0.8
# Bottom
npimg[max(0, (y2-T)):y2, x1:x2] *= 0.2
npimg[max(0, (y2-T)):y2, x1:x2] += clr*0.8
# Left
npimg[y1:y2, x1:(x1+T)] *= 0.2
npimg[y1:y2, x1:(x1+T)] += clr*0.8
# Right
npimg[y1:y2, max(0, (x2-T)):x2] *= 0.2
npimg[y1:y2, max(0, (x2-T)):x2] += clr*0.8
return npimg
# Handle legacy-ContestBoxes that don't have the .colour property
# TODO: This can be eventually removed. This is more for internal
# purposes, to not crash-and-burn on projects created before
# this feature was pushed to the repo. Harmless to leave in.
if contest_boxes and not hasattr(contest_boxes[0], 'colour'):
recolour_contests(contest_boxes)
for i, contestbox in enumerate(contest_boxes):
clr, thickness = contestbox.get_draw_opts()
draw_border(npimg_cpy, contestbox, thickness=thickness, color=(0, 0, 0))
if contestbox.is_sel:
transparent_color = np.array(contestbox.shading_selected_clr) if contestbox.shading_selected_clr else None
else:
transparent_color = np.array(contestbox.colour) if contestbox.colour else None
if transparent_color != None:
t = time.time()
_x1, _y1 = self.img2c(contestbox.x1, contestbox.y1)
_x2, _y2 = self.img2c(contestbox.x2, contestbox.y2)
np_rect = npimg_cpy[max(0, _y1):_y2, max(0, _x1):_x2]
np_rect[:,:] *= 0.7
np_rect[:,:] += transparent_color*0.3
dur_wxbmp2np = time.time() - t
contestbox._dirty = False
for targetbox in target_boxes:
clr, thickness = targetbox.get_draw_opts()
draw_border(npimg_cpy, targetbox, thickness=thickness, color=(0, 0, 0))
if targetbox.is_sel:
transparent_color = np.array(targetbox.shading_selected_clr) if targetbox.shading_selected_clr else None
else:
transparent_color = np.array(targetbox.shading_clr) if targetbox.shading_clr else None
if transparent_color != None:
t = time.time()
_x1, _y1 = self.img2c(targetbox.x1, targetbox.y1)
_x2, _y2 = self.img2c(targetbox.x2, targetbox.y2)
np_rect = npimg_cpy[max(0, _y1):_y2, max(0, _x1):_x2]
np_rect[:,:] *= 0.7
np_rect[:,:] += transparent_color*0.3
dur_wxbmp2np = time.time() - t
targetbox._dirty = False
h, w = npimg_cpy.shape[:2]
t = time.time()
_image = wx.EmptyImage(w, h)
_image.SetData(npimg_cpy.tostring())
bitmap = _image.ConvertToBitmap()
dur_img2bmp = time.time() - t
self.imgbitmap = bitmap
self.Refresh()
def drawBox(self, box, dc):
""" Draws BOX onto DC.
Note: This draws directly to the PaintDC - this should only be done
for user-driven 'dynamic' behavior (such as resizing a box), as
drawing to the DC is much slower than just blitting everything to
the self.imgbitmap.
self.drawBoxes does all heavy-lifting box-related drawing in a single
step.
Input:
list box: (x1, y1, x2, y2)
wxDC DC:
"""
total_t = time.time()
t = time.time()
dc.SetBrush(wx.TRANSPARENT_BRUSH)
drawops = box.get_draw_opts()
dc.SetPen(wx.Pen(*drawops))
w = int(round(abs(box.x2 - box.x1) * self.scale))
h = int(round(abs(box.y2 - box.y1) * self.scale))
client_x, client_y = self.img2c(box.x1, box.y1)
dc.DrawRectangle(client_x, client_y, w, h)
dur_drawrect = time.time() - t
transparent_color = np.array([200, 0, 0]) if isinstance(box, TargetBox) else np.array([0, 0, 200])
if self.imgbitmap and type(box) in (TargetBox, ContestBox):
t = time.time()
_x1, _y1 = self.img2c(box.x1, box.y1)
_x2, _y2 = self.img2c(box.x2, box.y2)
_x1, _y1 = max(0, _x1), max(0, _y1)
_x2, _y2 = min(self.imgbitmap.Width-1, _x2), min(self.imgbitmap.Height-1, _y2)
if (_x2 - _x1) <= 1 or (_y2 - _y1) <= 1:
return
sub_bitmap = self.imgbitmap.GetSubBitmap((_x1, _y1, _x2-_x1, _y2-_y1))
# I don't think I need to do a .copy() here...
#np_rect = wxBitmap2np_v2(sub_bitmap, is_rgb=True).copy()
np_rect = wxBitmap2np_v2(sub_bitmap, is_rgb=True)
np_rect[:,:] *= 0.7
np_rect[:,:] += transparent_color*0.3
dur_wxbmp2np = time.time() - t
_h, _w, channels = np_rect.shape
t = time.time()
_image = wx.EmptyImage(_w, _h)
_image.SetData(np_rect.tostring())
bitmap = _image.ConvertToBitmap()
dur_img2bmp = time.time() - t
t = time.time()
memdc = wx.MemoryDC()
memdc.SelectObject(bitmap)
dc.Blit(client_x, client_y, _w, _h, memdc, 0, 0)
memdc.SelectObject(wx.NullBitmap)
dur_memdcBlit = time.time() - t
t = time.time()
if isinstance(box, TargetBox) or isinstance(box, ContestBox):
# Draw the 'grabber' circles
CIRCLE_RAD = 2
dc.SetPen(wx.Pen("Black", 1))
dc.SetBrush(wx.Brush("White"))
dc.DrawCircle(client_x, client_y, CIRCLE_RAD) # Upper-Left
dc.DrawCircle(client_x+(w/2), client_y, CIRCLE_RAD) # Top
dc.DrawCircle(client_x+w, client_y, CIRCLE_RAD) # Upper-Right
dc.DrawCircle(client_x, client_y+(h/2), CIRCLE_RAD) # Left
dc.DrawCircle(client_x+w, client_y+(h/2), CIRCLE_RAD) # Right
dc.DrawCircle(client_x, client_y+h, CIRCLE_RAD) # Lower-Left
dc.DrawCircle(client_x+(w/2), client_y+h, CIRCLE_RAD) # Bottom
dc.DrawCircle(client_x+w, client_y+h, CIRCLE_RAD) # Lower-Right
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dur_drawgrabbers = time.time() - t
total_dur = time.time() - total_t
'''
print "== drawBox Total {0:.6f}s (wxbmp2np: {1:.6f}s {2:.3f}%) \
(_img2bmp: {3:.6f}s {4:.3f}%) (memdc.blit {5:.3f}s {6:.3f}%) \
(drawrect: {7:.6f}s {8:.3f}%) (drawgrabbers {9:.6f} {10:.3f}%)".format(total_dur,
dur_wxbmp2np,
100*(dur_wxbmp2np / total_dur),
dur_img2bmp,
100*(dur_img2bmp / total_dur),
dur_memdcBlit,
100*(dur_memdcBlit / total_dur),
dur_drawrect,
100*(dur_drawrect / total_dur),
dur_drawgrabbers,
100*(dur_drawgrabbers / total_dur))
'''
class TemplateMatchDrawPanel(BoxDrawPanel):
""" Like a BoxDrawPanel, but when you create a Target box, it runs
Template Matching to try to find similar instances.
"""
def __init__(self, parent, tempmatch_fn, *args, **kwargs):
BoxDrawPanel.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.tempmatch_fn = tempmatch_fn
def onLeftUp(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
MIN_LEN = 13
if self.mode_m == BoxDrawPanel.M_CREATE and self.isCreate:
x_img, y_img = self.c2img(x,y)
if (abs(self.box_create.x1 - x_img) <= MIN_LEN) or (abs(self.box_create.y1 - y_img) <= MIN_LEN):
print "...User drew a too-small box..."
dlg = wx.MessageDialog(self, style=wx.YES_NO | wx.NO_DEFAULT,
message="You drew a box that \
was a bit small. \n\n\
Would you still like to use this box? If so, then choose the 'Yes' button. \n\n\
Otherwise, if this box was created by mistake, and you would like to \
create a better box around a voting target, then choose the 'No' button.")
self.Disable()
status = dlg.ShowModal()
self.Enable()
if status == wx.ID_NO:
self.isCreate = False
self.box_create = None
self.Refresh()
return
box = self.finishBox(x, y)
if isinstance(box, TargetBox):
imgpil = util_gui.imageToPil(self.img)
imgpil = imgpil.convert('L')
targetimg_prefit = imgpil.crop((box.x1, box.y1, box.x2, box.y2))
targetimg_crop = util_gui.fit_image(targetimg_prefit, padx=2, pady=2)
if self.GetParent().boxsize == None:
# First time user drew a box
print "(SelectTargets) First target selected."
targetimg_crop_np = np.array(targetimg_crop)
dlg = DrawROIDialog(self, targetimg_crop_np)
status = dlg.ShowModal()
if status == wx.CANCEL:
return
# tuple ROI: (int x1, y1, x2, y2)
roi = dlg.roi
print "(SelectTargets) Set target_roi from {0} to: {1}".format(self.GetParent().target_roi, roi)
self.GetParent().set_target_roi(roi)
self.tempmatch_fn(box, imgpil, patch=targetimg_crop)
elif isinstance(box, ContestBox):
self.boxes.append(box)
self.Refresh()
else:
BoxDrawPanel.onLeftUp(self, evt)
class TargetFindPanel(TemplateMatchDrawPanel):
M_FORCEADD_TARGET = 3
def finishBox(self, *args, **kwargs):
toret = TemplateMatchDrawPanel.finishBox(self, *args, **kwargs)
if isinstance(toret, ContestBox):
recolour_contests([b for b in self.boxes if isinstance(b, ContestBox)]+[toret])
self.dirty_all_boxes()
self.Refresh()
return toret
def update_cursor(self, *args, **kwargs):
if self.mode_m == TargetFindPanel.M_FORCEADD_TARGET:
cursor = wx.StockCursor(wx.CURSOR_CROSS)
self.SetCursor(cursor)
return cursor
return TemplateMatchDrawPanel.update_cursor(self, *args, **kwargs)
def onLeftDown(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
x_img, y_img = self.c2img(x,y)
w_img, h_img = self.img.GetSize()
if x_img >= (w_img-1) or y_img >= (h_img-1):
return
if self.mode_m == self.M_FORCEADD_TARGET:
print "...Creating Forced Target."
self.clear_selected()
self.startBox(x, y)
self.Refresh()
self.update_cursor()
else:
TemplateMatchDrawPanel.onLeftDown(self, evt)
def onLeftUp(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
# Restrict (x,y) to lie within the image
w_img, h_img = self.img.GetSize()
w_c, h_c = self.img2c(w_img-1, h_img-1)
x = min(w_c, x)
y = min(h_c, y)
if self.mode_m == self.M_FORCEADD_TARGET and self.isCreate:
# If this is the first-created box B, then make sure that
# subsequent-created boxes match the dimensions of B
box = self.finishBox(x, y)
if self.GetParent().boxsize == None:
self.GetParent().boxsize = (box.width, box.height)
else:
w, h = self.GetParent().boxsize
box.x2 = box.x1 + w
box.y2 = box.y1 + h
self.boxes.append(box)
self.Refresh()
self.update_cursor()
else:
TemplateMatchDrawPanel.onLeftUp(self, evt)
class DrawROIDialog(wx.Dialog):
""" A simple dialog that displays an image, and the user either:
a.) Draws a sub-box within the image
or:
b.) Cancels the action.
"""
def __init__(self, parent, npimg, roi=None, *args, **kwargs):
wx.Dialog.__init__(self, parent, title="Draw Target Region-of-Interest", size=(600, 400),
style=wx.RESIZE_BORDER | wx.CAPTION | wx.MAXIMIZE_BOX | wx.MINIMIZE_BOX, *args, **kwargs)
if npimg.dtype != 'uint8':
npimg = npimg.astype('uint8')
self.npimg = gray2rgb_np(npimg)
h, w = npimg.shape[:2]
self.wximg = wx.ImageFromBuffer(w, h, self.npimg)
# tuple ROI: (int x1, y1, x2, y2)
self.roi = None
txt_inst = (textwrap.fill("Displayed is the selected region.", 75) + "\n" +
textwrap.fill("Please select the subregion of \
the voting target where voter marks are expected to be made.", 75))
stxt_inst = wx.StaticText(self, label=txt_inst)
self.boxdrawpanel = SimpleImagePanel(self, self.wximg)
self.boxdrawpanel.box = roi
self.boxdrawpanel.rescale(250)
btn_ok = wx.Button(self, label="Use this region")
btn_ok.Bind(wx.EVT_BUTTON, self.onButton_ok)
btn_ok.Disable() # Can only click if the user draws a box.
self.btn_ok = btn_ok
btn_cancel = wx.Button(self, label="Cancel")
btn_cancel.Bind(wx.EVT_BUTTON, self.onButton_cancel)
btn_zoomin = wx.Button(self, label="Zoom In")
btn_zoomin.Bind(wx.EVT_BUTTON, self.onButton_zoomin)
btn_zoomout = wx.Button(self, label="Zoom Out")
btn_zoomout.Bind(wx.EVT_BUTTON, self.onButton_zoomout)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.AddMany([(btn_ok,), ((20,0),), (btn_cancel,),
((40,0,),),
(btn_zoomin,), ((20,0),), (btn_zoomout,)])
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(stxt_inst, flag=wx.CENTER)
self.sizer.Add(self.boxdrawpanel, proportion=1, flag=wx.EXPAND | wx.CENTER)
self.sizer.Add(btn_sizer, flag=wx.CENTER)
self.SetSizer(self.sizer)
self.Layout()
def onButton_ok(self, evt):
if not self.roi:
wx.MessageDialog(self, message="Please select a region.").ShowModal()
return
self.roi = list(self.roi)
self.roi[0] = max(self.roi[0] / self.boxdrawpanel.scale, 0)
self.roi[1] = max(self.roi[1] / self.boxdrawpanel.scale, 0)
self.roi[2] = min(self.roi[2] / self.boxdrawpanel.scale, self.npimg.shape[1])
self.roi[3] = min(self.roi[3] / self.boxdrawpanel.scale, self.npimg.shape[0])
self.roi = [int(round(coord)) for coord in self.roi]
self.EndModal(wx.OK)
def onButton_cancel(self, evt):
self.roi = None
self.EndModal(wx.CANCEL)
def onButton_zoomin(self, evt):
self.boxdrawpanel.zoomin()
def onButton_zoomout(self, evt):
self.boxdrawpanel.zoomout()
class SimpleImagePanel(ScrolledPanel):
def __init__(self, parent, wximg, *args, **kwargs):
ScrolledPanel.__init__(self, parent, *args, **kwargs)
self.wximg = wximg
self.wximg_orig = wximg
self.isCreating = False
self.isMoving = False
# list BOX_IP: [int x1, y1, x2, y2], the 'in-progress' box.
self.box_ip = None
# list BOX: [int ul_x, ul_y, lr_x, lr_y]
self.box = None
self.scale = 1.0
self.Bind(wx.EVT_PAINT, self.onPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.onLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.onLeftUp)
self.Bind(wx.EVT_MOTION, self.onMotion)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add((wximg.GetWidth(), wximg.GetHeight()))
self.SetSizer(self.sizer)
self.Layout()
self.SetupScrolling()
def zoomin(self, amt=0.25):
scale = self.scale + amt
w_new = int(round(self.wximg_orig.GetWidth() * scale))
self.rescale(w_new)
def zoomout(self, amt=0.25):
scale = self.scale - amt
w_new = int(round(self.wximg_orig.GetWidth() * scale))
self.rescale(w_new)
def rescale(self, w_new):
""" Rescale displayed image s.t. the image has width W_NEW.
Maintains aspect ratio.
"""
if self.wximg.GetWidth() == w_new:
return
npimg = util.wxImage2np(self.wximg_orig)
h_new = int(round(npimg.shape[0] / (npimg.shape[1] / float(w_new))))
if w_new <= 5 or h_new <= 5: # Don't downsize too much
return
self.scale = w_new / float(self.wximg_orig.GetWidth())
npimg_resize = scipy.misc.imresize(npimg, (h_new, w_new), interp='bicubic')
wximg_resize = wx.ImageFromBuffer(w_new, h_new, npimg_resize)
w_old, h_old = self.wximg.GetWidth(), self.wximg.GetHeight()
self.wximg = wximg_resize
if self.box:
# Rescale box
fac = w_old / float(w_new)
self.box = [int(round(coord / fac)) for coord in self.box]
self.sizer.Detach(0)
self.sizer.Add((w_new, h_new))
self.Layout()
self.SetupScrolling()
self.Refresh()
def onPaint(self, evt):
if self.IsDoubleBuffered():
dc = wx.PaintDC(self)
else:
dc = wx.BufferedPaintDC(self)
self.PrepareDC(dc)
dc.DrawBitmap(wx.BitmapFromImage(self.wximg), 0, 0)
def draw_box(box, color="BLACK", thick=3):
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(color, thick))
x1, y1, x2, y2 = canonicalize_box(box)
dc.DrawRectangle(x1, y1, abs(x2-x1), abs(y2-y1))
if self.box:
draw_box(self.box, color="BLUE", thick=3)
if self.box_ip:
draw_box(self.box_ip, color="RED", thick=3)
def onLeftDown(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
if self.box:
self.box = None
self.box_ip = [x, y, x+1, y+1]
self.isCreating = True
self.Refresh()
def onLeftUp(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
if not self.isCreating:
return
self.box_ip[2], self.box_ip[3] = max(min(x, self.wximg.GetWidth()), 0), max(min(y, self.wximg.GetHeight()), 0)
self.box = canonicalize_box(self.box_ip)
self.box_ip = None
self.isCreating = False
self.Refresh()
self.GetParent().roi = self.box
self.GetParent().btn_ok.Enable()
def onMotion(self, evt):
x, y = self.CalcUnscrolledPosition(evt.GetPositionTuple())
if not self.isCreating:
return
self.box_ip[2], self.box_ip[3] = max(min(x, self.wximg.GetWidth()), 0), max(min(y, self.wximg.GetHeight()), 0)
self.Refresh()
def gray2rgb_np(npimg):
""" Convert a grayscale nparray image to an RGB nparray image. """
if len(npimg.shape) == 3:
return npimg
npimg_rgb = np.zeros((npimg.shape[0], npimg.shape[1], 3), dtype=npimg.dtype)
npimg_rgb[:,:,0] = npimg
npimg_rgb[:,:,1] = npimg
npimg_rgb[:,:,2] = npimg
return npimg_rgb
class TM_Thread(threading.Thread):
def __init__(self, queue, job_id, patch, img, imgpaths, tm_param,
win_ballot, win_target,
callback, *args, **kwargs):
"""
Input:
PATCH: An IplImage.
"""
threading.Thread.__init__(self, *args, **kwargs)
self.queue = queue
self.job_id = job_id
self.patch = patch
self.img = img
self.imgpaths = imgpaths
self.tm_param = tm_param
self.win_ballot = win_ballot
self.win_target = win_target
self.callback = callback
def run(self):
print "...running template matching..."
t = time.time()
#patch_str = self.patch.tostring()
w, h = cv.GetSize(self.patch)
# results: {str imgpath: [(x,y,score_i), ...]}
#results = partask.do_partask(do_find_matches, self.imgpaths,
# _args=(patch_str, w, h, self.tm_param, self.win_ballot),
# combfn='dict', singleproc=False)
xwinB, ywinB = self.win_ballot
xwinT, ywinT = self.win_target
# results: {str imgpath: [(x1,y1,x2,y2,score_i), ...]}
# Note: self.patch is already smooth'd.
results = tempmatch.get_tempmatches_par(self.patch, self.imgpaths,
do_smooth=tempmatch.SMOOTH_IMG_BRD,
T=self.tm_param, xwinA=xwinT, ywinA=ywinT,
xwinI=xwinB, ywinI=ywinB,
jobid=self.job_id)
dur = time.time() - t
print "...finished running template matching ({0} s).".format(dur)
wx.CallAfter(self.callback, results, w, h)
class Box(object):
# SHADING: (int R, int G, int B)
# (Optional) color of transparent shading for drawing
shading_clr = None
shading_selected_clr = None
shading_clr_cycle = None
def __init__(self, x1, y1, x2, y2):
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
self._dirty = True
@property
def width(self):
return abs(self.x1-self.x2)
@property
def height(self):
return abs(self.y1-self.y2)
def __str__(self):
return "Box({0},{1},{2},{3})".format(self.x1, self.y1, self.x2, self.y2)
def __repr__(self):
return "Box({0},{1},{2},{3})".format(self.x1, self.y1, self.x2, self.y2)
def __eq__(self, o):
return (isinstance(o, Box) and self.x1 == o.x1 and self.x2 == o.x2
and self.y1 == o.y1 and self.y2 == o.y2)
def canonicalize(self):
""" Re-arranges my points (x1,y1),(x2,y2) such that we get:
(x_upperleft, y_upperleft, x_lowerright, y_lowerright)
"""
xa, ya, xb, yb = self.x1, self.y1, self.x2, self.y2
w, h = abs(xa - xb), abs(ya - yb)
if xa < xb and ya < yb:
# UpperLeft, LowerRight
self.x1, self.y1 = xa, ya
self.x2, self.y2 = xb, yb
elif xa < xb and ya > yb:
# LowerLeft, UpperRight
self.x1, self.y1 = xa, ya - h,
self.x2, self.y2 = xb, yb + h
elif xa > xb and ya < yb:
# UpperRight, LowerLeft
self.x1, self.y1 = xa - w, ya
self.x2, self.y2 = xb + w, yb
else:
# LowerRight, UpperLeft
self.x1, self.y1 = xb, yb
self.x2, self.y2 = xa, ya
return self
def scale(self, scale):
self.x1 = int(round(self.x1*scale))
self.y1 = int(round(self.y1*scale))
self.x2 = int(round(self.x2*scale))
self.y2 = int(round(self.y2*scale))
def copy(self):
return Box(self.x1, self.y1, self.x2, self.y2)
def get_draw_opts(self):
""" Given the state of me, return the color+line-width for the
DC to use.
"""
return ("Green", 2)
def marshall(self):
return {'x1': self.x1, 'y1': self.y1, 'x2': self.x2, 'y2': self.y2}
class TargetBox(Box):
shading_clr = (0, 255, 0) # Green
shading_selected_clr = (255, 0, 0) # Red
shading_clr_cycle = None
def __init__(self, x1, y1, x2, y2, is_sel=False):
Box.__init__(self, x1, y1, x2, y2)
self.is_sel = is_sel
def __str__(self):
return "TargetBox({0},{1},{2},{3},is_sel={4})".format(self.x1, self.y1, self.x2, self.y2, self.is_sel)
def __repr__(self):
return "TargetBox({0},{1},{2},{3},is_sel={4})".format(self.x1, self.y1, self.x2, self.y2, self.is_sel)
def get_draw_opts(self):
""" Given the state of me, return the color+line-width for the
DC to use.
"""
if self.is_sel:
return ("Yellow", 1)
else:
return ("Green", 1)
def copy(self):
return TargetBox(self.x1, self.y1, self.x2, self.y2, is_sel=self.is_sel)
class ContestBox(Box):
shading_clr = (0, 0, 200) # Blue
shading_selected_clr = (171, 0, 240) # Purple
# shading_clr_cycle := A list of colors to alternate from
shading_clr_cycle = ((0, 0, 200), (100, 0, 0), (0, 150, 245), (0, 230, 150), (100, 0, 190))
def __init__(self, x1, y1, x2, y2, is_sel=False):
Box.__init__(self, x1, y1, x2, y2)
self.is_sel = is_sel
self.colour = None
def __str__(self):
return "ContestBox({0},{1},{2},{3},is_sel={4})".format(self.x1, self.y1, self.x2, self.y2, self.is_sel)
def __repr__(self):
return "ContestBox({0},{1},{2},{3},is_sel={4})".format(self.x1, self.y1, self.x2, self.y2, self.is_sel)
def get_draw_opts(self):
""" Given the state of me, return the color+line-width for the
DC to use.
"""
if self.is_sel:
return ("Yellow", 1)
else:
return ("Blue", 1)
def copy(self):
return ContestBox(self.x1, self.y1, self.x2, self.y2, is_sel=self.is_sel)
class SelectionBox(Box):
def __str__(self):
return "SelectionBox({0},{1},{2},{3})".format(self.x1, self.y1, self.x2, self.y2)
def __repr__(self):
return "SelectionBox({0},{1},{2},{3})".format(self.x1, self.y1, self.x2, self.y2)
def get_draw_opts(self):
return ("Black", 1)
def copy(self):
return SelectionBox(self.x1, self.y1, self.x2, self.y2)
def canonicalize_box(box):
""" Takes two arbitrary (x,y) points and re-arranges them
such that we get:
(x_upperleft, y_upperleft, x_lowerright, y_lowerright)
"""
xa, ya, xb, yb = box
w, h = abs(xa - xb), abs(ya - yb)
if xa < xb and ya < yb:
# UpperLeft, LowerRight
return (xa, ya, xb, yb)
elif xa < xb and ya > yb:
# LowerLeft, UpperRight
return (xa, ya - h, xb, yb + h)
elif xa > xb and ya < yb:
# UpperRight, LowerLeft
return (xa - w, ya, xb + w, yb)
else:
# LowerRight, UpperLeft
return (xb, yb, xa, ya)
def get_boxes_within(boxes, box):
""" Returns all boxes in BOXES that lie within BOX.
Input:
list boxes: [Box_i, ...]
Box box: Enclosing box.
Output:
list outboxes.
"""
result = []
for boxA in boxes:
wA, hA = int(abs(boxA.x1-boxA.x2)), int(abs(boxA.y1-boxA.y2))
if (((boxA.x1+(wA/3)) >= box.x1) and
((boxA.x2-(wA/3)) <= box.x2) and
((boxA.y1+(hA/3)) >= box.y1) and
((boxA.y2-(hA/3)) <= box.y2)):
result.append(boxA)
return result
def expand_box(box, factor, bounds=None):
""" Expands the Box BOX by FACTOR in each dimension. If BOUNDS is
given, then this dictates the maximum (width,height) allowed.
Input:
Box BOX:
float FACTOR: If 0.0, same size. 0.5 is 50% bigger, etc.
list BOUNDS: (w,h)
Output:
Box OUTBOX.
"""
b = box.copy()
b.x1 = int(round(max(0, box.x1 - (box.width*factor))))
b.y1 = int(round(max(0, box.y1 - (box.height*factor))))
if bounds != None:
b.x2 = int(round(min(bounds[0]-1, box.x2 + (box.width*factor))))
b.y2 = int(round(min(bounds[1]-1, box.y2 + (box.height*factor))))
else:
b.x2 = int(round(box.x2 + (box.width*factor)))
b.y2 = int(round(box.y2 + (box.height*factor)))
return b
def compute_box_ids(boxes):
""" Given a list of Boxes, some of which are Targets, others
of which are Contests, geometrically compute the correct
target->contest associations. Also outputs all voting targets
which are not contained in a contest.
Input:
list BOXES:
Output:
(ASSOCS, LONELY_TARGETS)
dict ASSOCS: {int contest_id, [ContestBox, [TargetBox_i, ...]]}
list LONELY_TARGETS: [TargetBox_i, ...]
"""
def containing_box(box, boxes):
""" Returns the box in BOXES that contains BOX. """
w, h = box.width, box.height
# Allow some slack when checking which targets are contained by a contest
slack_fact = 0.1
xEps = int(round(w*slack_fact))
yEps = int(round(h*slack_fact))
for i, otherbox in enumerate(boxes):
if ((box.x1+xEps) >= otherbox.x1 and (box.y1+yEps) >= otherbox.y1
and (box.x2-xEps) <= otherbox.x2 and (box.y2-yEps) <= otherbox.y2):
return i, otherbox
return None, None
assocs = {}
contests = [b for b in boxes if isinstance(b, ContestBox)]
#print contests
targets = [b for b in boxes if isinstance(b, TargetBox)]
lonely_targets = []
# Ensure that each contest C is present in output ASSOCS, even if
# it has no contained voting targets
# Note: output contest ids are determined by ordering in the CONTESTS list
for contestid, c in enumerate(contests):
assocs[contestid] = (c, [])
for t in targets:
id, c = containing_box(t, contests)
if id == None:
#print "Warning", t, "is not contained in any box."
lonely_targets.append(t)
elif id in assocs:
assocs[id][1].append(t)
else:
assocs[id] = [c, [t]]
return assocs, lonely_targets
def recolour_contests(contests):
""" Performs a five-colouring on CONTESTS to improve UI experience.
Input:
list CONTESTS: [ContestBox_i, ...]
Output:
None. Mutates the input contests.
"""
def contests2graph():
contest2node = {} # maps {ContestBox: Node}
node2contest = {} # maps {Node: ContestBox}
for i, contest in enumerate(contests):
node = graphcolour.Node((contest.x1,contest.y1, contest.x2, contest.y2, id(contest)))
contest2node[contest] = node
node2contest[node] = contest
for i, contest0 in enumerate(contests):
for j, contest1 in enumerate(contests):
if i == j: continue
if is_adjacent(contest0, contest1):
contest2node[contest0].add_neighbor(contest2node[contest1])
return graphcolour.AdjListGraph(contest2node.values()), node2contest
graph, node2contest = contests2graph()
colouring = graphcolour.fivecolour_planar(graph, colours=ContestBox.shading_clr_cycle)
if not colouring:
print "Graph isn't planar, that's odd! Running general colouring algo..."
colouring = graphcolour.graphcolour(graph, colours=ContestBox.shading_clr_cycle)
for node, colour in colouring.iteritems():
cbox = node2contest[node]
cbox.colour = colour
def is_line_overlap_horiz(a, b):
left = a if a[0] <= b[0] else b
right = a if a[0] > b[0] else b
if (left[0] < right[0] and left[1] < right[0]):
return False
return True
def is_line_overlap_vert(a, b):
top = a if a[0] <= b[0] else b
bottom = a if a[0] > b[0] else b
if (top[0] < bottom[0] and top[1] < bottom[0]):
return False
return True
def is_adjacent(contest0, contest1, C=0.2):
""" Returns True if the input ContestBoxes are adjacent. """
def check_topbot(top, bottom):
return (abs(top.y2 - bottom.y1) < thresh_h and
is_line_overlap_horiz((top.x1, top.x2), (bottom.x1, bottom.x2)))
def check_leftright(left, right):
return (abs(left.x2 - right.x1) < thresh_w and
is_line_overlap_vert((left.y1,left.y2), (right.y1,right.y2)))
thresh_w = C * min(contest0.width, contest1.width)
thresh_h = C * min(contest0.height, contest1.height)
left = contest0 if contest0.x1 <= contest1.x1 else contest1
right = contest0 if contest0.x1 > contest1.x1 else contest1
top = left if left.y1 <= right.y1 else right
bottom = left if left.y1 > right.y1 else right
if check_topbot(top, bottom):
return True
elif check_leftright(left, right):
return True
return False
def test_recolour_contests():
A = ContestBox(50, 50, 75, 100)
B = ContestBox(77, 48, 117, 102)
C = ContestBox(200, 50, 250, 100)
D = ContestBox(51, 100, 76, 121)
contests = [A, B, C, D]
recolour_contests(contests)
for contest in contests:
print "Contest ({0},{1}) Colour: {2}".format(contest.x1, contest.y1, contest.colour)
pdb.set_trace()
"""
=======================
==== Sanity Checks ====
=======================
"""
# There are absolutely /no/ voting targets
ID_FLAG_NO_TARGETS = 0
_MSG_NO_TARGETS = "Error: No voting targets have been created yet. You \
will not be able to proceed until you select the voting targets."
# There are images that have no voting targets
ID_FLAG_EMPTY_IMAGES = 1
# There are images with only one voting target
ID_FLAG_ONLYONE_TARGET = 2
# There are voting targets not within a contest
ID_FLAG_LONELY_TARGETS = 3
# There are no contests defined
ID_FLAG_NO_CONTESTS = 4
_MSG_NO_CONTESTS = "Error: No contests have been created. You must define \
contests to proceed. The easiest way to define the contests is to click \
the Infer Contest Regions button; this automatically tries to detect the \
contest boundaries. Alternatively, if for some reason you need to do it \
entirely manually (which is much more work), you can click the Add Contest \
button and draw a rectangle around each individual contest."
# There are contests with no voting targets contained
ID_FLAG_EMPTY_CONTESTS = 5
# There are contests with only one voting target
ID_FLAG_CONTEST_ONE_TARGET = 6
def check_all_images_have_targets(boxes_map, flagged_idxs):
"""
Input:
dict BOXES_MAP: {int grp_idx: [[Box_i_side0, Box_i+1_side0, ...], [Box_i_side1, ...], ...]}
set FLAGGED_IDXS: set([int idx, ...])
Stores which grp_idxs in BOXES_MAP were flagged-to-be-quarantined
by the user.
Output:
[(bool isOk_i, bool isFatal_i, str msg_i, int ID_FLAG, obj data), ...]
"""
PASS, NOTPASS = True, False
FATAL, NOTFATAL = True, False
out_lst = []
grp_contestcnts = util.Counter() # maps {(grp_idx, side): int contest_cnt}
grp_targetcnts = util.Counter() # maps {(grp_idx, side): int target_cnt}
grp_notargs = [] # [(int grp_idx, int side), ...]
grp_nocontests = [] # [(int grp_idx, int side), ...]
grp_onlyone_targ = [] # [(int grp_idx, int side), ...]
lonely_targets_map = {} # maps {(int grp_idx, int side): [TargetBox_i, ...]}}
cnt_lonely_targets = 0
grp_contests_one_target = {} # maps {(int grp_idx, int side): [ContestBox_i, ...]}
grp_empty_contests = {} # maps {(int grp_idx, int side): [ContestBox_i, ...]}
for grp_idx, boxes_tups in boxes_map.iteritems():
if grp_idx in flagged_idxs:
continue
for side, boxes in enumerate(boxes_tups):
box_assocs, lonely_targets = compute_box_ids(boxes)
cnt_lonely_targets += len(lonely_targets)
if lonely_targets:
lonely_targets_map.setdefault((grp_idx, side), []).extend(lonely_targets)
targets = [b for b in boxes if isinstance(b, TargetBox)]
contests = [b for b in boxes if isinstance(b, ContestBox)]
grp_targetcnts[(grp_idx, side)] += len(targets)
grp_contestcnts[(grp_idx, side)] += len(contests)
if not targets:
grp_notargs.append((grp_idx, side))
if not contests:
grp_nocontests.append((grp_idx, side))
if len(targets) == 1:
grp_onlyone_targ.append((grp_idx, side))
for contestid, contest_tup in box_assocs.iteritems():
contestbox, contest_targets = contest_tup[0], contest_tup[1]
if len(contest_targets) == 0:
grp_empty_contests.setdefault((grp_idx, side), []).append(contestbox)
elif len(contest_targets) == 1:
grp_contests_one_target.setdefault((grp_idx, side), []).append(contestbox)
isok_notargets = sum(grp_targetcnts.values()) > 0
isok_nocontests = sum(grp_contestcnts.values()) > 0
out_lst.append((isok_notargets, True, _MSG_NO_TARGETS, ID_FLAG_NO_TARGETS, None))
if not isok_notargets:
return out_lst
out_lst.append((isok_nocontests, True, _MSG_NO_CONTESTS, ID_FLAG_NO_CONTESTS, None))
if not isok_nocontests:
return out_lst
if grp_notargs:
msg_empty_images = "Warning: {0} different ballot images did not have \
any voting targets detected. If this is a mistake, please go back and \
correct it. \n\
Otherwise, if these images in fact do not contain any voting \
targets (e.g. they are blank), you may continue.".format(len(grp_notargs))
out_lst.append((NOTPASS, NOTFATAL, msg_empty_images, ID_FLAG_EMPTY_IMAGES, grp_notargs))
else:
out_lst.append((PASS, NOTFATAL, "Pass", ID_FLAG_EMPTY_IMAGES, None))
if grp_onlyone_targ:
msg_onlyone_targ = "Warning: {0} ballot images only had one \
voting target detected. If this is a mistake, please bo back and correct \
the images.".format(len(grp_onlyone_targ))
out_lst.append((NOTPASS, NOTFATAL, msg_onlyone_targ, ID_FLAG_ONLYONE_TARGET, grp_onlyone_targ))
else:
out_lst.append((PASS, NOTFATAL, "Pass", ID_FLAG_ONLYONE_TARGET, None))
if cnt_lonely_targets > 0:
msg_lonelytargets = "Warning: There were {0} targets that were \
not enclosed within a contest.".format(cnt_lonely_targets)
out_lst.append((NOTPASS, FATAL, msg_lonelytargets, ID_FLAG_LONELY_TARGETS, lonely_targets_map))
else:
out_lst.append((PASS, FATAL, "Pass", ID_FLAG_LONELY_TARGETS, None))
if grp_empty_contests:
msg_emptycontests = "Warning: There were {0} contests that had \
no voting targets enclosed.".format(len(grp_empty_contests))
out_lst.append((NOTPASS, NOTFATAL, msg_emptycontests, ID_FLAG_EMPTY_CONTESTS, grp_empty_contests))
else:
out_lst.append((PASS, NOTFATAL, "Pass", ID_FLAG_EMPTY_CONTESTS, None))
if grp_contests_one_target:
msg_contests_one_target = "Warning: There were {0} contests that \
had only one voting target.".format(len(grp_contests_one_target))
out_lst.append((NOTPASS, FATAL, msg_contests_one_target, ID_FLAG_CONTEST_ONE_TARGET, grp_contests_one_target))
else:
out_lst.append((PASS, FATAL, "Pass", ID_FLAG_CONTEST_ONE_TARGET, None))
return out_lst
"""
===========================
==== END Sanity Checks ====
===========================
"""
def img_to_wxbitmap(img, size=None):
""" Converts IMG to a wxBitmap. """
# TODO: Assumes that IMG is a wxImage
if size:
img_scaled = img.Scale(size[0], size[1], quality=wx.IMAGE_QUALITY_HIGH)
else:
img_scaled = img
return wx.BitmapFromImage(img_scaled)
def pil2iplimage(img):
""" Converts a (grayscale) PIL img IMG to a CV IplImage. """
img_cv = cv.CreateImageHeader(map(int, img.size), cv.IPL_DEPTH_8U, 1)
cv.SetData(img_cv, img.tostring())
return img_cv
def iplimage2pil(img):
""" Converts a (grayscale) CV IplImage to a PIL image. """
return Image.fromstring("L", cv.GetSize(img), img.tostring())
def bestmatch(A, B):
""" Tries to find the image A within the (larger) image B.
For instance, A could be a voting target, and B could be a
contest.
Input:
IplImage A: Patch to search for
IplImage B: Image to search over
Output:
((x,y), s_mat), location on B of the best match for A.
"""
w_A, h_A = A.width, A.height
w_B, h_B = B.width, B.height
s_mat = cv.CreateMat(h_B - h_A + 1, w_B - w_A + 1, cv.CV_32F)
cv.MatchTemplate(B, A, s_mat, cv.CV_TM_CCOEFF_NORMED)
minResp, maxResp, minLoc, maxLoc = cv.MinMaxLoc(s_mat)
return maxLoc, s_mat
def align_partitions(partitions, (outrootdir, img2flip), queue=None, result_queue=None):
"""
Input:
list PARTITIONS: [[partitionID, [Ballot_i, ...]], [partitionID, [Ballot_i, ...]], ...]
str OUTROOTDIR: Rootdir to save aligned images to.
Output:
dict PARTITIONS_ALIGN: {int partitionID: [BALLOT_i, ...]}
"""
# Global Alignment approach: Perform alignment on a smaller patch
# near the center, then apply the discovered transformation H to
# the entire image. Works better than working on the entire image.
partitions_align = {} # maps {partitionID: [[imgpath_i, ...], ...]}
t = time.time()
print "...this process is aligning {0} ballots...".format(sum(map(lambda t: len(t[1]), partitions), 0))
try:
for idx, (partitionid, ballots) in enumerate(partitions):
outdir = pathjoin(outrootdir, 'partition_{0}'.format(partitionid))
try: os.makedirs(outdir)
except: pass
ballotRef = ballots[0]
Irefs = []
for side, imP in enumerate(ballotRef):
I = shared.standardImread_v2(imP, flatten=True)
if img2flip[imP]:
I = shared.fastFlip(I)
Irefs.append((imP, I))
# 0.) First, handle the reference Ballot
curBallot = []
for side, (Iref_imP, Iref) in enumerate(Irefs):
outname = 'bal_{0}_side_{1}.png'.format(0, side)
outpath = pathjoin(outdir, outname)
scipy.misc.imsave(outpath, Iref)
curBallot.append(outpath)
partitions_align[partitionid] = [curBallot]
# 1.) Now, align all other Ballots to BALLOTREF
for i, ballot in enumerate(ballots[1:]):
curBallot = []
for side, imgpath in enumerate(ballot):
Iref_imgP, Iref = Irefs[side]
I = shared.standardImread_v2(imgpath, flatten=True)
if img2flip[imgpath]:
I = shared.fastFlip(I)
H, Ireg, err = global_align.align_image(I, Iref)
outname = 'bal_{0}_side_{1}.png'.format(i + 1, side)
outpath = pathjoin(outdir, outname)
scipy.misc.imsave(outpath, Ireg)
curBallot.append(outpath)
partitions_align[partitionid].append(curBallot)
if queue:
queue.put(True)
dur = time.time() - t
if result_queue:
result_queue.put(partitions_align)
return partitions_align
except:
traceback.print_exc()
if result_queue:
result_queue.put({})
return None
def do_align_partitions(partitions, img2flip, outrootdir, manager, queue, N=None):
"""
Input:
list PARTITIONS[i][j][k] := i-th partition, j-th ballot, k-th side.
dict IMG2FLIP: maps {str imgpath: bool isflipped}
Output:
dict PARTITIONS_ALIGN. maps {int partitionID: [[imgpath_i, ...], ...]}
"""
try:
if N == None:
N = min(multiprocessing.cpu_count(), len(partitions))
# Evenly-distribute partitions by partition size.
partitions_evenly = divy_lists(partitions, N)
pool = multiprocessing.Pool()
result_queue = manager.Queue()
if N == 1:
align_partitions(partitions_evenly[0], (outrootdir, img2flip), queue, result_queue)
else:
for i,task in enumerate(partitions_evenly):
# TASK := [[partitionID, [Ballot_i, ...]], [partitionID, [Ballot_i, ...]], ...]
pool.apply_async(align_partitions, args=(task, (outrootdir, img2flip),
queue, result_queue))
pool.close()
pool.join()
cnt = 0; num_tasks = len(partitions_evenly)
partitions_align = {}
while cnt < num_tasks:
subresult = result_queue.get()
print '...got result {0}...'.format(cnt)
partitions_align = dict(partitions_align.items() + subresult.items())
cnt += 1
return partitions_align
except:
traceback.print_exc()
return None
def divy_lists(lst, N):
""" Given a list of sublists (where each sublist may be of unequal
size), output N lists of list of sublists, where the size of each
list of sublists is maximized (i.e. ensuring an equal distribution
of sublist sizes).
Input:
list LST[i][j] := j-th element of i-th sublist
Output:
list OUT[i][j][k] := k-th element of j-th sublist within i-th list.
"""
if len(lst) <= N:
return [[[i, l]] for i, l in enumerate(lst)]
outlst = [None]*N
lst_np = np.array(lst)
lstlens = map(lambda l: -len(l), lst_np)
lstlens_argsort = np.argsort(lstlens)
for i, lst_idx in enumerate(lstlens_argsort):
sublist = lst[lst_idx]
out_idx = i % N
if outlst[out_idx] == None:
outlst[out_idx] = [[lst_idx, sublist]]
else:
outlst[out_idx].append([lst_idx, sublist])
return outlst
# TODO: Reference the util.py versions of the following conversion methods
def wxImage2np(Iwx, is_rgb=True):
""" Converts wxImage to numpy array """
w, h = Iwx.GetSize()
Inp_flat = np.frombuffer(Iwx.GetDataBuffer(), dtype='uint8')
if is_rgb:
Inp = Inp_flat.reshape(h,w,3)
else:
Inp = Inp_flat.reshape(h,w)
return Inp
def wxBitmap2np(wxBmp, is_rgb=True):
""" Converts wxBitmap to numpy array """
total_t = time.time()
t = time.time()
Iwx = wxBmp.ConvertToImage()
dur_bmp2wximg = time.time() - t
t = time.time()
npimg = wxImage2np(Iwx, is_rgb=True)
dur_wximg2np = time.time() - t
total_dur = time.time() - total_t
print "==== wxBitmap2np: {0:.6f}s (bmp2wximg: {1:.5f}s {2:.3f}%) \
(wximg2np: {3:.5f}s {4:.3f}%)".format(total_dur,
dur_bmp2wximg,
100*(dur_bmp2wximg / total_dur),
dur_wximg2np,
100*(dur_wximg2np / total_dur))
return npimg
def wxBitmap2np_v2(wxBmp, is_rgb=True):
""" Converts wxBitmap to numpy array """
total_t = time.time()
w, h = wxBmp.GetSize()
npimg = np.zeros(h*w*3, dtype='uint8')
wxBmp.CopyToBuffer(npimg, format=wx.BitmapBufferFormat_RGB)
npimg = npimg.reshape(h,w,3)
total_dur = time.time() - total_t
#print "==== wxBitmap2np_v2: {0:.6f}s".format(total_dur)
return npimg
def isimgext(f):
return os.path.splitext(f)[1].lower() in ('.png', '.bmp', 'jpeg', '.jpg', '.tif')
def distL2(x1,y1,x2,y2):
return math.sqrt((float(y1)-y2)**2.0 + (float(x1)-x2)**2.0)
def main():
class TestFrame(wx.Frame):
def __init__(self, parent, partitions, *args, **kwargs):
wx.Frame.__init__(self, parent, size=(800, 900), *args, **kwargs)
self.parent = parent
self.partitions = partitions
self.st_panel = SelectTargetsPanel(self)
self.st_panel.start(partitions)
args = sys.argv[1:]
imgsdir = args[0]
try:
mode = args[1]
except:
mode = 'single'
partitions = []
for dirpath, _, filenames in os.walk(imgsdir):
partition = []
imgpaths = [f for f in filenames if isimgext(f)]
if mode == 'single':
for imgname in [f for f in filenames if isimgext(f)]:
partition.append([os.path.join(dirpath, imgname)])
else:
imgpaths = util.sorted_nicely(imgpaths)
for i, imgname in enumerate(imgpaths[:-1:2]):
page1 = os.path.join(dirpath, imgname)
page2 = os.path.join(dirpath, imgpaths[i+1])
partition.append([page1, page2])
if partition:
partitions.append(partition)
app = wx.App(False)
f = TestFrame(None, partitions)
f.Show()
app.MainLoop()
if __name__ == '__main__':
#main()
test_recolour_contests()
| [
"kiniry@acm.org"
] | kiniry@acm.org |
aad780c77bd68cbe3aca2ab016bb4aedf822e810 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/dn5/M-228.py | 28ec14812b0decedb82bfb5e86721302c836b662 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,466 | py | # Obvezen del
def unikati(s):
xs = []
for e in s:
if e not in xs:
xs.append(e)
return xs
def avtor(tvit):
return tvit.split(":")[0]
def vsi_avtorji(tviti):
s = []
for tvit in tviti:
s.append(avtor(tvit))
return unikati(s)
def izloci_besedo(beseda):
i = j = 0
for e in beseda:
if e.isalnum():
break
else:
i += 1
for e in beseda[::-1]:
if e.isalnum():
break
else:
j += 1
j = len(beseda) - j
return beseda[i:j]
def se_zacne_z(tvit, c):
s = []
for e in tvit.split():
if e[0] == c:
s.append(izloci_besedo(e))
return s
def zberi_se_zacne_z(tviti, c):
s = []
for tvit in tviti:
s.append(se_zacne_z(tvit, c))
xs = [x for xs in s for x in xs] # == for xs in s: for x in xs: xs.append(x)
return unikati(xs)
def vse_afne(tviti):
return zberi_se_zacne_z(tviti, "@")
def vsi_hashtagi(tviti):
return zberi_se_zacne_z(tviti, "#")
def vse_osebe(tviti):
s = unikati(vsi_avtorji(tviti) + vse_afne(tviti))
s.sort()
return s
# Dodaten del
def custva(tviti, hashtagi):
s = []
for hashtag in hashtagi:
for tvit in tviti:
vsi_hashi = se_zacne_z(tvit, "#")
if hashtag in vsi_hashi:
s.append(avtor(tvit))
return vse_osebe(s)
def se_poznata(tviti, oseba1, oseba2):
for tvit in tviti:
if oseba1 == avtor(tvit) and oseba2 in se_zacne_z(tvit, "@") or oseba2 == avtor(tvit) and oseba1 in se_zacne_z(tvit, "@"):
return True
return False
# Testi
import unittest
class TestTviti(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_unikat(self):
self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])
self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])
self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])
self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])
self.assertEqual(unikati([1]), [1])
self.assertEqual(unikati([]), [])
self.assertEqual(unikati(["Ana", "Berta", "Cilka", "Berta"]), ["Ana", "Berta", "Cilka"])
def test_avtor(self):
self.assertEqual(avtor("janez: pred dvopičjem avtor, potem besedilo"), "janez")
self.assertEqual(avtor("ana: malo krajse ime"), "ana")
self.assertEqual(avtor("benjamin: pomembne so tri stvari: prva, druga in tretja"), "benjamin")
def test_vsi_avtorji(self):
self.assertEqual(vsi_avtorji(self.tviti), ["sandra", "berta", "ana", "cilka", "benjamin", "ema"])
self.assertEqual(vsi_avtorji(self.tviti[:3]), ["sandra", "berta"])
def test_izloci_besedo(self):
self.assertEqual(izloci_besedo("@ana"), "ana")
self.assertEqual(izloci_besedo("@@ana!!!"), "ana")
self.assertEqual(izloci_besedo("ana"), "ana")
self.assertEqual(izloci_besedo("!#$%\"=%/%()/Ben-jamin'"), "Ben-jamin")
def test_vse_na_crko(self):
self.assertEqual(se_zacne_z("Benjamin $je $skocil! Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("Benjamin $je $skocil! #Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("ana: kdo so te @berta, @cilka, @dani? #krneki", "@"), ["berta", "cilka", "dani"])
def test_zberi_na_crko(self):
self.assertEqual(zberi_se_zacne_z(self.tviti, "@"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
self.assertEqual(zberi_se_zacne_z(self.tviti, "#"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_afne(self):
self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
def test_vsi_hashtagi(self):
self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_osebe(self):
self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])
class TestDodatna(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_custva(self):
self.assertEqual(custva(self.tviti, ["dougcajt", "krneki"]), ["ana", "sandra"])
self.assertEqual(custva(self.tviti, ["luft"]), ["cilka"])
self.assertEqual(custva(self.tviti, ["meh"]), [])
def test_se_poznata(self):
self.assertTrue(se_poznata(self.tviti, "ana", "berta"))
self.assertTrue(se_poznata(self.tviti, "ema", "ana"))
self.assertFalse(se_poznata(self.tviti, "sandra", "ana"))
self.assertFalse(se_poznata(self.tviti, "cilka", "luft"))
self.assertFalse(se_poznata(self.tviti, "cilka", "balon"))
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
6ecca778ed6a34b13e7cd83ca131937ed98a014a | 2773bf2a53a64ab44ba47c100fbb73187d93354a | /test/test_lsb_release.py | 478380d8e8b184ae2c031d6d9fb1e5fc1968e7a7 | [] | no_license | balabit-deps/balabit-os-7-lsb | 73c0a6fda68c4d847d43d2b9d3307ad23e5c29bc | 6cda662bfdeeee0bef37b3c1e86821cbb2072bdc | refs/heads/master | 2020-04-07T09:21:58.195001 | 2017-12-05T22:54:13 | 2018-07-20T19:16:45 | 158,249,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,836 | py | #!/usr/bin/python3
# coding=utf-8
import unittest
import lsb_release as lr
import random
import string
import os
import sys
import warnings
def rnd_string(min_l,max_l):
return ''.join( [random.choice(string.ascii_letters) for i in range(random.randint(min_l,max_l))])
def get_arch_distinfo():
# Copied verbatim from guess_debian_release; sucks but unavoidable.
distinfo = {'ID' : 'Debian'}
kern = os.uname()[0]
if kern in ('Linux', 'Hurd', 'NetBSD'):
distinfo['OS'] = 'GNU/'+kern
elif kern == 'FreeBSD':
distinfo['OS'] = 'GNU/k'+kern
elif kern in ('GNU/Linux', 'GNU/kFreeBSD'):
distinfo['OS'] = kern
else:
distinfo['OS'] = 'GNU'
return distinfo
class TestLSBRelease(unittest.TestCase):
def test_lookup_codename(self):
# Test all versions
for rno in lr.RELEASE_CODENAME_LOOKUP:
cdn = lr.RELEASE_CODENAME_LOOKUP[rno]
# Test that 1.1, 1.1r0 and 1.1.8 lead to buzz. Default is picked randomly and is not supposed to go trough
badDefault = rnd_string(0,9)
# From Wheezy on, the codename is defined by the first number but a dot-revision is mandatory
if float(rno) >= 7:
rno = rno + '.' + str(random.randint(0,9))
self.assertEqual(lr.lookup_codename(rno,badDefault),cdn,'Release name `' + rno + '` is not recognized.')
self.assertEqual(lr.lookup_codename(rno + 'r' + str(random.randint(0,9)),badDefault),cdn,'Release name `' + rno + 'r*` is not recognized.')
self.assertEqual(lr.lookup_codename(rno + '.' + str(random.randint(0,9)),badDefault),cdn,'Release name `' + rno + '.*` is not recognized.')
self.assertEqual(lr.lookup_codename('inexistent_release' + str(random.randint(0,9)),badDefault),badDefault,'Default release codename is not accepted.')
def test_valid_lsb_versions(self):
# List versions in which the modules are available
lsb_modules = {
'cxx' : ['3.0', '3.1', '3.2', '4.0', '4.1'],
'desktop' : ['3.1', '3.2', '4.0', '4.1'],
'languages' : ['3.2', '4.0', '4.1'],
'multimedia' : ['3.2', '4.0', '4.1'],
'printing' : ['3.2', '4.0', '4.1'],
'qt4' : ['3.1'],
'security' : ['4.0','4.1'],
}
lsb_known_versions = ['2.0', '3.0', '3.1', '3.2', '4.0', '4.1'];
for lsb_module in lsb_modules:
in_versions = lsb_modules[lsb_module]
for test_v in lsb_known_versions:
vlv_result = lr.valid_lsb_versions(test_v,lsb_module)
assert_text = 'valid_lsb_versions(' + test_v + ',' + lsb_module + ')'
# For 2.0, all output 2.0 only.
if test_v == '2.0':
self.assertEqual(vlv_result,
['2.0'],
assert_text)
# For 3.0, all output 2.0 and 3.0.
elif test_v == '3.0':
self.assertEqual(vlv_result,
['2.0', '3.0'],
assert_text)
# Before appearance, it outputs all past LSB versions
elif int(float(test_v)*10) < int(float(in_versions[0])*10):
self.assertEqual(vlv_result,
[elem for elem in lsb_known_versions if int(float(elem)*10) <= int(float(test_v)*10)],
assert_text)
# From appearence on, it outputs all lower versions from the in_versions
else:
self.assertEqual(vlv_result,
[elem for elem in in_versions if int(float(elem)*10) <= int(float(test_v)*10)],
assert_text)
def test_check_modules_installed(self):
# Test that when no packages are available, then we get nothing out.
os.environ['TEST_DPKG_QUERY_NONE'] = '1'
self.assertEqual(lr.check_modules_installed(),[])
os.environ.pop('TEST_DPKG_QUERY_NONE')
def test_parse_policy_line(self):
release_line = ''
shortnames = list(lr.longnames.keys())
random.shuffle(shortnames)
longnames = {}
for shortname in shortnames:
longnames[lr.longnames[shortname]] = rnd_string(1,9)
release_line += shortname + '=' + longnames[lr.longnames[shortname]] + ','
release_line = release_line[:-1]
self.assertEqual(sorted(lr.parse_policy_line(release_line)),sorted(longnames),'parse_policy_line(' + release_line + ')')
def test_compare_release(self):
# Test that equal suite strings lead to 0
fake_release_equal = rnd_string(1,25)
x = [rnd_string(1,12), {'suite': fake_release_equal}]
y = [rnd_string(1,12), {'suite': fake_release_equal}]
self.assertEqual(lr.compare_release(x,y),0)
# Test that sequences in RELEASES_ORDER lead to reliable output
RO_min = 0
RO_max = len(lr.RELEASES_ORDER) - 1
x_suite_i = random.randint(RO_min,RO_max)
y_suite_i = random.randint(RO_min,RO_max)
x[1]['suite'] = lr.RELEASES_ORDER[x_suite_i]
y[1]['suite'] = lr.RELEASES_ORDER[y_suite_i]
supposed_output = y_suite_i - x_suite_i
self.assertEqual(lr.compare_release(x,y),
supposed_output,
'compare_release(' + x[1]['suite'] + ',' + y[1]['suite'] + ') =? ' + str(supposed_output))
def test_parse_apt_policy(self):
# Test almost-empty apt-cache policy
supposed_output = [(100, {'suite': 'now'})]
self.assertEqual(lr.parse_apt_policy(),supposed_output)
# Add one fake entry
os.environ['TEST_APT_CACHE1'] = '932'
supposed_output.append((932, {'origin': 'oRigIn', 'suite': 'SuiTe', 'component': 'C0mp0nent', 'label': 'lABel'}))
self.assertEqual(lr.parse_apt_policy(),supposed_output)
# Add a second fake entry, unordered
os.environ['TEST_APT_CACHE2'] = '600'
supposed_output.append((600, {'origin': '0RigIn', 'suite': '5uiTe', 'component': 'C03p0nent', 'label': '1ABel'}))
self.assertEqual(lr.parse_apt_policy(),supposed_output)
# Add a third fake entry, unordered, with non-ascii chars (#675618)
os.environ['TEST_APT_CACHE3'] = '754'
supposed_output.append((754, {'origin': u'Jérôme Helvète', 'suite': '5uiTe', 'component': 'C03p0nent', 'label': '1ABel'}))
self.assertEqual(lr.parse_apt_policy(),supposed_output)
os.environ.pop('TEST_APT_CACHE1')
os.environ.pop('TEST_APT_CACHE2')
os.environ.pop('TEST_APT_CACHE3')
def test_guess_release_from_apt(self):
os.environ['TEST_APT_CACHE1'] = '932'
os.environ['TEST_APT_CACHE2'] = '600'
os.environ['TEST_APT_CACHE3'] = '754'
os.environ['TEST_APT_CACHE_RELEASE'] = '512'
supposed_output = {'origin': 'or1g1n', 'suite': 'testing', 'component': 'c0mp0nent', 'label': 'l8bel'}
self.assertEqual(
lr.guess_release_from_apt(
origin='or1g1n',
label='l8bel',
component='c0mp0nent',
ignoresuites=('c0mp0nentIgn')),
supposed_output)
# Test with a special repository (for Ports)
supposed_output = {'origin': 'P-or1g1n', 'suite': 'sid', 'component': 'OtherComp', 'label': 'P-l8bel'}
self.assertEqual(
lr.guess_release_from_apt(
origin='or1g1n',
label='l8bel',
component='c0mp0nent',
ignoresuites=('c0mp0nentIgn'),
alternate_olabels={'P-or1g1n': ('P-l8bel', 'P-l9bel')}),
supposed_output)
os.environ.pop('TEST_APT_CACHE1')
os.environ.pop('TEST_APT_CACHE2')
os.environ.pop('TEST_APT_CACHE3')
os.environ.pop('TEST_APT_CACHE_RELEASE')
def test_guess_debian_release(self):
distinfo = get_arch_distinfo()
# Test different dpkg origin with an fake "unstable releases" that ends in /sid, and an invalid apt-cache policy
distinfo['ID'] = rnd_string(5,12)
fn = 'test/dpkg_origins_default_' + rnd_string(5,5)
f = open(fn,'w')
f.write('Vendor: ' + distinfo['ID'] + "\n")
f.close()
os.environ['LSB_ETC_DPKG_ORIGINS_DEFAULT'] = fn
distinfo['RELEASE'] = 'testing/unstable'
distinfo['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s' % distinfo
fn2 = 'test/debian_version_' + rnd_string(5,12)
f = open(fn2,'w')
f.write(rnd_string(5,12) + '/sid')
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn2
self.assertEqual(lr.guess_debian_release(),distinfo)
os.remove(fn)
# Make sure no existing /etc/dpkg/origins/default is used
os.environ['LSB_ETC_DPKG_ORIGINS_DEFAULT'] = '/non-existant'
distinfo['ID'] = 'Debian'
# Test "stable releases" with numeric debian_versions
for rno in lr.RELEASE_CODENAME_LOOKUP:
# From Wheezy on, the codename is defined by the first number but a dot-revision is mandatory
if float(rno) >= 7:
distinfo['RELEASE'] = rno + '.' + str(random.randint(0,9))
else:
distinfo['RELEASE'] = rno + random.choice('.r') + str(random.randint(0,9))
distinfo['CODENAME'] = lr.RELEASE_CODENAME_LOOKUP[rno]
distinfo['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s (%(CODENAME)s)' % distinfo
fn = 'test/debian_version_' + rnd_string(5,5)
f = open(fn,'w')
f.write(distinfo['RELEASE'])
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn
self.assertEqual(lr.guess_debian_release(),distinfo)
os.remove(fn)
os.environ.pop('LSB_ETC_DEBIAN_VERSION')
# Remove the CODENAME from the supposed output
distinfo.pop('CODENAME')
# Test "stable releases" with string debian_versions, go read invalid apt-cache policy
for rno in lr.RELEASE_CODENAME_LOOKUP:
distinfo['RELEASE'] = lr.RELEASE_CODENAME_LOOKUP[rno]
distinfo['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s' % distinfo
fn = 'test/debian_version_' + rnd_string(5,12)
f = open(fn,'w')
f.write(distinfo['RELEASE'])
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn
self.assertEqual(lr.guess_debian_release(),distinfo)
os.remove(fn)
os.environ.pop('LSB_ETC_DEBIAN_VERSION')
# Test "unstable releases" that end in /sid, go read invalid apt-cache policy
distinfo['RELEASE'] = 'testing/unstable'
distinfo['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s' % distinfo
for rno in lr.RELEASE_CODENAME_LOOKUP:
fn = 'test/debian_version_' + rnd_string(5,12)
f = open(fn,'w')
f.write(lr.RELEASE_CODENAME_LOOKUP[rno] + '/sid')
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn
self.assertEqual(lr.guess_debian_release(),distinfo)
os.remove(fn)
os.environ.pop('LSB_ETC_DEBIAN_VERSION')
# Test "unstable releases" that end in /sid, go read valid apt-cache policy
os.environ['TEST_APT_CACHE_UNSTABLE'] = '500'
distinfo['CODENAME'] = 'sid'
distinfo['RELEASE'] = 'unstable'
distinfo['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s (%(CODENAME)s)' % distinfo
for rno in lr.RELEASE_CODENAME_LOOKUP:
fn = 'test/debian_version_' + rnd_string(5,12)
f = open(fn,'w')
f.write(lr.RELEASE_CODENAME_LOOKUP[rno] + '/sid')
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn
self.assertEqual(lr.guess_debian_release(),distinfo)
os.remove(fn)
os.environ.pop('LSB_ETC_DEBIAN_VERSION')
distinfo['CODENAME'] = 'sid'
distinfo['RELEASE'] = 'unstable'
distinfo['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s (%(CODENAME)s)' % distinfo
for CODE in ('PORTS', 'PORTS_OLD'):
# Test "unstable releases with Debian Ports" that end in /sid, go read valid apt-cache policy
os.environ['TEST_APT_CACHE_UNSTABLE_' + CODE] = '500'
for rno in lr.RELEASE_CODENAME_LOOKUP:
fn = 'test/debian_version_' + rnd_string(5,12)
f = open(fn,'w')
f.write(lr.RELEASE_CODENAME_LOOKUP[rno] + '/sid')
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn
self.assertEqual(lr.guess_debian_release(),distinfo)
os.remove(fn)
os.environ.pop('TEST_APT_CACHE_UNSTABLE_' + CODE)
os.environ.pop('LSB_ETC_DEBIAN_VERSION')
os.environ.pop('TEST_APT_CACHE_UNSTABLE')
def test_get_lsb_information(self):
# Test that an inexistant /etc/lsb-release leads to empty output
supposed_output = {}
os.environ['LSB_ETC_LSB_RELEASE'] = 'test/inexistant_file_' + rnd_string(2,5)
self.assertEqual(lr.get_lsb_information(),supposed_output)
# Test that a fake /etc/lsb-release leads to output with only the content we want
supposed_output = {'RELEASE': '(The release number)',
'CODENAME': '(The codename for the release)',
'ID': '(Distributor ID)',
'DESCRIPTION': '(A human-readable description of the release)'}
os.environ['LSB_ETC_LSB_RELEASE'] = 'test/lsb-release'
self.assertEqual(lr.get_lsb_information(),supposed_output)
os.environ.pop('LSB_ETC_LSB_RELEASE')
def test_get_distro_information_no_distinfo_file(self):
# Test that a missing /usr/share/distro-info/{distro}.csv indeed falls
# back on Debian's information
debian_info = lr.get_distro_info()
other_distro_info = lr.get_distro_info(origin='x-not-debian')
self.assertEqual(debian_info, other_distro_info)
def test_get_distro_information(self):
# Test that an inexistant /etc/lsb-release leads to empty output
supposed_output = get_arch_distinfo()
supposed_output['RELEASE'] = 'testing/unstable';
supposed_output['DESCRIPTION'] = '%(ID)s %(OS)s %(RELEASE)s' % supposed_output
os.environ['LSB_ETC_LSB_RELEASE'] = 'test/inexistant_file_' + rnd_string(2,5)
fn = 'test/debian_version_' + rnd_string(5,12)
f = open(fn,'w')
f.write('testing/sid')
f.close()
os.environ['LSB_ETC_DEBIAN_VERSION'] = fn
os.environ['LSB_ETC_DPKG_ORIGINS_DEFAULT'] = ''
self.assertEqual(lr.get_distro_information(),supposed_output)
os.remove(fn)
os.environ.pop('LSB_ETC_DPKG_ORIGINS_DEFAULT')
os.environ.pop('LSB_ETC_DEBIAN_VERSION')
if __name__ == '__main__':
unittest.main()
| [
"testbot@balabit.com"
] | testbot@balabit.com |
9ab4e63e98acdc0ede42cb493cab7d0abdda6f41 | 1811ef650cff1dbf0e73bb7b90d6e671ada699d9 | /speach/__version__.py | 56294e44714053b09a6fa899ed435d89799d9dd7 | [
"MIT"
] | permissive | nickduran/speach | 8da8d20e5fb4795eccc99057080c8fffc220b9ed | 05e25138b419dfecc5f96b454b61d481a95e3345 | refs/heads/main | 2023-05-26T06:00:41.532391 | 2021-06-15T05:33:24 | 2021-06-15T05:33:24 | 381,417,392 | 1 | 0 | MIT | 2021-06-29T15:42:58 | 2021-06-29T15:42:58 | null | UTF-8 | Python | false | false | 900 | py | # -*- coding: utf-8 -*-
# This code is a part of speach library: https://github.com/neocl/speach/
# :copyright: (c) 2018 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
__author__ = "Le Tuan Anh"
__email__ = "tuananh.ke@gmail.com"
__copyright__ = "Copyright (c) 2018, Le Tuan Anh <tuananh.ke@gmail.com>"
__credits__ = []
__license__ = "MIT License"
__description__ = "a Python library for managing, annotating, and converting natural language corpuses using popular formats (CoNLL, ELAN, Praat, CSV, JSON, SQLite, VTT, Audacity, TTL, TIG, ISF)"
__url__ = "https://github.com/neocl/speach/"
__issue__ = "https://github.com/neocl/speach/issues/"
__maintainer__ = "Le Tuan Anh"
__version_major__ = "0.1" # follow PEP-0440
__version__ = "{}a9.post2".format(__version_major__)
__version_long__ = "{} - Alpha 9.post2".format(__version_major__)
__status__ = "3 - Alpha"
| [
"tuananh.ke@gmail.com"
] | tuananh.ke@gmail.com |
4b2c22ea97c6081de88619a4b1a0ede4c0ca1348 | cc8010890757670cc2933a6cc2477f3bc10f53b4 | /python/geneditidtools/get_data.py | 32b73a507fe872c74d702d4f62e6df2aad768129 | [
"MIT"
] | permissive | GenEditID/GenEditID | 209153fe16610afa4fe4ed38b34f82905a808dc1 | dd4f454bab1e9d1d64170a16eb7c247799afd436 | refs/heads/master | 2022-06-11T05:40:22.404699 | 2022-04-05T22:12:27 | 2022-04-05T22:12:27 | 183,217,466 | 0 | 2 | MIT | 2022-04-05T22:12:28 | 2019-04-24T11:44:44 | HTML | UTF-8 | Python | false | false | 2,512 | py | import os
import argparse
import pandas
import subprocess
import shutil
import gzip
from pathlib import Path
import geneditid.log as logger
from geneditid.config import cfg
def run_process(log, cmd, dry_run=True):
if not dry_run:
process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = process.communicate()[0]
retcode = process.returncode
log.info("command '{}' executed".format(" ".join(cmd)))
if retcode == 0:
log.debug(out)
return out
else:
raise subprocess.CalledProcessError(retcode, cmd, out)
else:
log.info("[dry-run] command '{}' executed".format(" ".join(cmd)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--meta", dest="meta", action="store", help="Metadata file listing accession codes for SRA", required=True)
parser.add_argument("--fqdump", dest="fqdump", action="store", help="Path to fastq_dump tool", default='~/sratoolkit/bin/fastq-dump', required=False)
options = parser.parse_args()
log = logger.get_custom_logger(os.path.join(cfg['PROJECTS_FOLDER'], 'get_data.log'))
fqdump_cmd = options.fqdump.replace('~', str(Path.home()))
codes = pandas.read_csv(options.meta, sep='\t')
for i, row in codes.iterrows():
# create study folder
study_folder = os.path.join(cfg['PROJECTS_FOLDER'], row.study)
if not os.path.exists(study_folder):
os.makedirs(study_folder)
log.info('Project folder {} created'.format(study_folder))
log.info('Downloading {}'.format(row.accession))
run_process(log, [fqdump_cmd, '--split-files', row.accession], False)
log.info('Compressing {}_1.fastq into {}'.format(row.accession, os.path.join(study_folder, row.filename)))
with open("{}_1.fastq".format(row.accession), 'rb') as f_in:
with gzip.open(os.path.join(study_folder, row.filename), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove("{}_1.fastq".format(row.accession))
log.info('Compressing {}_2.fastq into {}'.format(row.accession, os.path.join(study_folder, row.filename2)))
with open("{}_2.fastq".format(row.accession), 'rb') as f_in:
with gzip.open(os.path.join(study_folder, row.filename2), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove("{}_2.fastq".format(row.accession))
if __name__ == '__main__':
main()
| [
"pajanne@gmail.com"
] | pajanne@gmail.com |
cdf1aa18f2637488b927f81e1b53c1b40d319b18 | 69096ca0d67b3d6809a2fe05af51341df62ebc60 | /tibiapy/models/event.py | 4b5f126e048074622e45f3f9bd773a3312b36e33 | [
"Apache-2.0"
] | permissive | Galarzaa90/tibia.py | 6f648aff8b6fbac7be4886435711f7ff08420402 | f8c145dd597c558398bac50e035711e34863b571 | refs/heads/main | 2023-08-17T15:50:31.354488 | 2023-08-17T14:00:07 | 2023-08-17T14:00:07 | 143,892,750 | 30 | 12 | Apache-2.0 | 2023-08-24T17:24:19 | 2018-08-07T15:25:23 | Python | UTF-8 | Python | false | false | 2,504 | py | import datetime
from typing import Optional, List
from tibiapy.models import BaseModel
from tibiapy.urls import get_event_schedule_url
class EventEntry(BaseModel):
"""Represents an event's entry in the calendar."""
title: str
"""The title of the event."""
description: str
"""The description of the event."""
start_date: Optional[datetime.date] = None
"""The day the event starts.
If the event is continuing from the previous month, this will be :obj:`None`."""
end_date: Optional[datetime.date] = None
"""The day the event ends.
If the event is continuing on the next month, this will be :obj:`None`."""
color: Optional[str] = None
"""The displayed color of the event."""
def __eq__(self, other):
return self.title == other.title
@property
def duration(self) -> int:
"""The number of days this event will be active for."""
return (self.end_date - self.start_date + datetime.timedelta(days=1)).days \
if (self.end_date and self.start_date) else None
class EventSchedule(BaseModel):
"""Represents the event's calendar in Tibia.com."""
month: int
"""The month being displayed.
Note that some days from the previous and next month may be included too."""
year: int
"""The year being displayed."""
events: List[EventEntry] = []
"""A list of events that happen during this month.
It might include some events from the previous and next months as well."""
@property
def url(self) -> str:
"""Get the URL to the event calendar with the current parameters."""
return get_event_schedule_url(self.month, self.year)
def get_events_on(self, date: datetime.date) -> List[EventEntry]:
"""Get a list of events that are active during the specified desired_date.
Parameters
----------
date: :class:`datetime.date`
The date to check.
Returns
-------
:class:`list` of :class:`EventEntry`
The events that are active during the desired_date, if any.
Notes
-----
Dates outside the calendar's month and year may yield unexpected results.
"""
def is_between(start, end, desired_date):
start = start or datetime.date.min
end = end or datetime.date.max
return start <= desired_date <= end
return [e for e in self.events if is_between(e.start_date, e.end_date, date)]
| [
"allan.galarza@gmail.com"
] | allan.galarza@gmail.com |
8b83066b58c8832fd2ed754662b8d39d9f60975b | e14d3da0a697297fbbb75079395fc824e34cde72 | /ocaml/3.11.1/tasklets/backup.py | 96ce9c40effd22563aebf57650243adfdd38fa28 | [] | no_license | racktivity/ext-qp5-unstable-qpackages5 | 5be84d377e1d389e9b5922199e545abec206a1a7 | 369b9b2a8ad05d8bd6abedd7d3af07a9b329d8ef | refs/heads/master | 2021-01-10T04:50:55.759871 | 2012-06-04T16:11:33 | 2012-06-04T16:11:33 | 54,315,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
__author__ = 'amplidata'
__tags__ = 'backup',
def main(q, i, params, tags):
qpackage = params['qpackage']
backupurl=params['backupurl'] #e.g. ftp://login:passwd@10.10.1.1/myroot/ @point to doc about cloudfilesystem
if params['action']=="backup":
pass
if params['action']=="restore":
pass
if params['action']=="backupconfig":
pass
if params['action']=="restoreconfig":
pass
| [
"devnull@localhost"
] | devnull@localhost |
5a7154dbbe545bd1f27db62c3236e70bbb03a201 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /LdXYzf5d3xJgYZur8_16.py | abe941fa827b363ae2d9c71b9a701e21e5f48742 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | """
Create a function that takes three values:
* `h` hours
* `m` minutes
* `s` seconds
Return the value that's the **longest duration**.
### Examples
longest_time(1, 59, 3598) ➞ 1
longest_time(2, 300, 15000) ➞ 300
longest_time(15, 955, 59400) ➞ 59400
### Notes
No two durations will be the same.
"""
def longest_time(h, m, s):
if h*60>m and h*60*60>s:
return h
elif m>h*60 and m*60>s:
return m
else:
return s
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
033f2a2f14af23b00c1af3155fa5681d1d181ef9 | 25978f857bcd50b425481168f30f025771a657ad | /src/test/test_tfrecords.py | 75e4d687046e8f76f9b41bcd44b23448dea34cce | [
"MIT"
] | permissive | DeepLearningInMedicine/BTClassification | 81de836c18ef15d2064a718f0d68ebd1f377781b | ec98a242d4f215505f2640a8bdf12f2015bfd5c7 | refs/heads/master | 2020-03-11T14:01:19.799529 | 2018-04-13T18:12:43 | 2018-04-13T18:12:43 | 130,041,649 | 1 | 0 | null | 2018-04-18T09:53:23 | 2018-04-18T09:53:23 | null | UTF-8 | Python | false | false | 8,910 | py | import os
import numpy as np
from tqdm import *
import pandas as pd
import tensorflow as tf
from btc_settings import *
# # Path settings
# parent_dir = os.path.dirname(os.getcwd())
# input_dir = os.path.join(parent_dir, DATA_FOLDER, AUGMENT_FOLDER)
# label_file = os.path.join(parent_dir, DATA_FOLDER, LABEL_FILE)
# # Load labels and cases' names
# labels = pd.read_csv(label_file)
# case_names = os.listdir(input_dir)
# #
# # # Plot one case
# # import matplotlib.pyplot as plt
# #
# # idx = 7
# # case_no = case_names[idx]
# # case_grade = labels[GRADE_LABEL][labels[CASE_NO] == case_no].values[0]
# # case_path = os.path.join(input_dir, case_no)
# # volume_names = os.listdir(case_path)
# # # Plot the middle slice of each volume
# # slice_no = int((PARTIAL_SIZE + 1) / 2)
# # channel = 3
# # volumes, vmins, vmaxs = [], [], []
# # for vn in volume_names:
# # volume = np.load(os.path.join(case_path, vn))
# # volumes.append(volume)
# # vmins.append(np.min(volume[..., channel]))
# # vmaxs.append(np.max(volume[..., channel]))
# # vmin = np.min(vmins)
# # vmax = np.max(vmaxs)
# # column_num = 10
# # volumes_num = len(volumes)
# # row_num = np.ceil(volumes_num / column_num)
# # plt.figure()
# # for i in range(volumes_num):
# # vc = volumes[i][..., channel]
# # plt.subplot(row_num, column_num, i + 1)
# # plt.axis("off")
# # plt.imshow(vc[:, :, slice_no], cmap="gray", vmin=vmin, vmax=vmax)
# # mng = plt.get_current_fig_manager()
# # mng.resize(*mng.window.maxsize())
# # plt.show()
# # Obtain amount of each grade type
# grade2_num = 0
# grade3_num = 0
# grade4_num = 0
# for cn in case_names:
# case_grade = labels[GRADE_LABEL][labels[CASE_NO] == cn].values[0]
# volumes_num = len(os.listdir(os.path.join(input_dir, cn)))
# if case_grade == GRADE_II:
# grade2_num += volumes_num
# elif case_grade == GRADE_III:
# grade3_num += volumes_num
# elif case_grade == GRADE_IV:
# grade4_num += volumes_num
# else:
# # print("The grade of " + cn + " is unknown.")
# continue
# print("Total number of patches: ", str(grade2_num + grade3_num + grade4_num))
# print("The number of Grade II patches: ", str(grade2_num))
# print("The number of Grade III patches: ", str(grade3_num))
# print("The number of Grade IV patches: ", str(grade4_num))
# if os.path.isfile("train_cases.txt"):
# os.remove("train_cases.txt")
# open("train_cases.txt", "a").close()
# if os.path.isfile("validate_cases.txt"):
# os.remove("validate_cases.txt")
# open("validate_cases.txt", "a").close()
# def generate_cases_set(labels, grade):
# # Set cases for training and validating
# cases = labels[CASE_NO][labels[GRADE_LABEL] == grade].values.tolist()
# # Generate cases' names for training set and validating set
# cases_num = len(cases)
# train_num = int(cases_num * PROPORTION)
# index = list(np.arange(cases_num))
# np.random.seed(RANDOM_SEED)
# train_index = list(np.random.choice(cases_num, train_num, replace=False))
# validate_index = [i for i in index if i not in train_index]
# train = [[cases[i], grade] for i in train_index]
# validate = [[cases[i], grade] for i in validate_index]
# return train, validate
# # Write cases' names into text files
# def save_cases_names(file, cases):
# txt = open(file, "a")
# for case in cases:
# txt.write("{}\n".format(case))
# grade2_train, grade2_validate = generate_cases_set(labels, GRADE_II)
# # save_cases_names("train_cases.txt", grade2_train)
# # save_cases_names("validate_cases.txt", grade2_validate)
# grade3_train, grade3_validate = generate_cases_set(labels, GRADE_III)
# # save_cases_names("train_cases.txt", grade3_train)
# # save_cases_names("validate_cases.txt", grade3_validate)
# grade4_train, grade4_validate = generate_cases_set(labels, GRADE_IV)
# # save_cases_names("train_cases.txt", grade4_train)
# # save_cases_names("validate_cases.txt", grade4_validate)
# # Count patches in training set of each grade group
# def count_patches(input_dir, dataset):
# num = 0
# for d in dataset:
# num += len(os.listdir(os.path.join(input_dir, d[0])))
# return num
# grade2_train_num = count_patches(input_dir, grade2_train)
# grade3_train_num = count_patches(input_dir, grade3_train)
# grade4_train_num = count_patches(input_dir, grade4_train)
# print("Total of training set: ", str(grade2_train_num + grade3_train_num + grade4_train_num))
# print("The number of Grade II patches: ", grade2_train_num)
# print("The number of Grade III patches: ", grade3_train_num)
# print("The number of Grade IV patches: ", grade4_train_num)
# grade2_validate_num = count_patches(input_dir, grade2_validate)
# grade3_validate_num = count_patches(input_dir, grade3_validate)
# grade4_validate_num = count_patches(input_dir, grade4_validate)
# print("Total of validating set: ", str(grade2_validate_num + grade3_validate_num + grade4_validate_num))
# print("The number of Grade II patches: ", grade2_validate_num)
# print("The number of Grade III patches: ", grade3_validate_num)
# print("The number of Grade IV patches: ", grade4_validate_num)
# train_cases = grade2_train + grade3_train + grade4_train
# validate_cases = grade2_validate + grade3_validate + grade4_validate
# save_cases_names("train_cases.txt", train_cases)
# save_cases_names("validate_cases.txt", validate_cases)
# print(train_cases)
# print(validate_cases)
# size49_num = 0
# case_num = len(case_names)
# for idx in tqdm(range(case_num)):
# # idx = 9
# case_no = case_names[idx]
# case_path = os.path.join(input_dir, case_no)
# volumes_path = os.listdir(case_path)
# for vp in volumes_path:
# volume_path = os.path.join(case_path, vp)
# # print(volume_path)
# volume = np.load(volume_path)
# # print(volume.shape, np.mean(volume), np.std(volume))
# if list(volume.shape)[0] == 49:
# size49_num += 1
# print(size49_num)
# partial_train_set = [["TCGA-CS-4944", 0], ["TCGA-CS-4942", 1], ["TCGA-02-0006", 2],
# ["TCGA-02-0009", 2], ["TCGA-02-0011", 2], ["TCGA-02-0027", 2]]
# partial_validate_set = [["TCGA-CS-6668", 0], ["TCGA-CS-5393", 1], ["TCGA-02-0033", 2],
# ["TCGA-02-0034", 2], ["TCGA-02-0037", 2], ["TCGA-02-0046", 2]]
# partial_input_dir = os.path.join(parent_dir, DATA_FOLDER, "Augmented_Partial")
# output_dir = os.path.join(parent_dir, DATA_FOLDER, TFRECORDS_FOLDER)
# partial_train_set_path = os.path.join(output_dir, "partial_train.tfrecord")
# partial_validate_set_path = os.path.join(output_dir, "partial_validate.tfrecord")
partial_train_set = [["TCGA-CS-4944", 0], ["TCGA-CS-4942", 1], ["TCGA-02-0006", 2]]
partial_validate_set = [["TCGA-CS-6668", 0], ["TCGA-CS-5393", 1], ["TCGA-02-0009", 2]]
parent_dir = os.path.dirname(os.getcwd())
partial_input_dir = os.path.join(parent_dir, DATA_FOLDER, "Volumes")
output_dir = os.path.join(parent_dir, DATA_FOLDER, TFRECORDS_FOLDER, VOLUMES_FOLDER)
partial_train_set_path = os.path.join(output_dir, "partial_train.tfrecord")
partial_validate_set_path = os.path.join(output_dir, "partial_validate.tfrecord")
def create_tfrecord(input_dir, tfrecord_path, cases, mode):
'''_CREATE_TFRECORD
'''
def normalize(volume):
temp = np.copy(volume)
for c in range(CHANNELS):
channel = volume[..., c]
channel_object = channel[np.where(channel > 0)]
# if c == 0:
# print(np.min(channel_object), np.max(channel_object), np.mean(channel_object), np.std(channel_object))
# temp[..., c] = (channel - np.mean(channel_object)) / np.std(channel_object)
temp[..., c] = channel / np.max(channel)
return temp
print("Create TFRecord to " + mode)
writer = tf.python_io.TFRecordWriter(tfrecord_path)
for case in tqdm(cases):
case_path = os.path.join(input_dir, case[0])
volumes_path = [os.path.join(case_path, p) for p in os.listdir(case_path)]
for vp in volumes_path:
if not os.path.isfile(vp):
continue
volume = np.load(vp)
volume = normalize(volume)
volume = volume[..., 0]
volume = np.reshape(volume, [112, 112, 88, 1])
volume_raw = volume.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[case[1]])),
"volume": tf.train.Feature(bytes_list=tf.train.BytesList(value=[volume_raw]))
}))
writer.write(example.SerializeToString())
writer.close()
return
create_tfrecord(partial_input_dir, partial_train_set_path, partial_train_set, "train")
create_tfrecord(partial_input_dir, partial_validate_set_path, partial_validate_set, "validate")
| [
"quqixun@gmail.com"
] | quqixun@gmail.com |
53397084a66e49b842443597d6d166e71c597ece | 89b6997b24e404c176358073626a8bfad7bcdb8e | /.history/courses/urls_20210412142600.py | 8b0b7efaad01dd31d2638b7887b651d48c78295f | [] | no_license | mohamedhawas123/Education-platform-django | 513e64ac112880385402ce609077796578b4e9ee | 7b83e66bba66b8b2b1a007f5818a534653e6abfb | refs/heads/main | 2023-07-18T16:19:52.177886 | 2021-09-24T12:04:09 | 2021-09-24T12:04:09 | 352,306,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from django.urls import path
from . import views
urlpatterns = [
path('mine/', views.ManageCourseListView.as_view(), name="manage_course_list"),
path('create/', views.CourseCreateView.as_view(), name='course_create'),
path('<pk>/edit/', views.CourseUpdateView.as_view(), name="course_edit"),
path('<pk>/delete', views.CourseDeleteView.as_view(), name="course_delete"),
path('<pk>/module/', views.CourseModuleUpdateView.as_view(), name="course_module_update"),
path('module/<int:module_id>/content/<model_name>/create/', views.ContentCreateUpdateView.as_view(), name="module_content_create"),
path('module/<int:module_id>/content/<model_name>/<id>/', views.ContentCreateUpdateView.as_view(), name="module_content_update"),
path('content/<int:id>/delete/', views.ContentDeleteView.as_view(), name="module_content_delete"),
path('module/<int:module_id>/', views.ModuleContentListView.as_view(), name="module_content_list")
]
| [
"mohamedhawas123@gmail.com"
] | mohamedhawas123@gmail.com |
13e482c59e12df4a48a9b402aa0c400716050055 | 7d91170c0c6b7abb0784a8521755aefee650f7e3 | /3WMvhCe5ZiVsEu8g/ARksNiS1EYfHhgbZ.py | fee809e1cc7629067ea4ae0588a79d57b5cea73f | [] | no_license | urlib/usi35g | e577ca090fc4a60b68662d251370a7cd7cc2afe3 | 46562fa29da32e4b75d8f8fd88496e95a3937099 | refs/heads/master | 2021-04-08T04:02:59.860711 | 2020-04-15T04:22:45 | 2020-04-15T04:22:45 | 248,737,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,336 | py | 𒌼㠤𨠎愼𪙆숆縉徍鍑㰐𭈯⠁ꔥ캳耟룉𭆃𗋗𠨲喳뙩㧽𐀛𘠝𩋴𣕶䛐螪Ὦ𤒗𫁥ᐦ𡾄𣯕𪕉㈉𩧻𬧩ꏀ즍𗢨𗿮펬𠰒𗔺𑠺ᔛ̫︧𫕢🎍ᨳ𦢹䱊鮸𢸓陞屽譕𥰹𧀘𦚕𢧇ቺ𘨳ª鎦ȿ𠪈𒈿𩡞𥱸𨐽𖢷𩈅𠓤𣈘𣀊㗧𐠘㧷𤩕𮠽鵩酔𨧊ᣆ䦁갃𘧣𨘞ᐂ𗿽𬽙𮠭𫿴𦤃霩𑄆㦴𩣗𤧲뛮🦕峁𫄌𢯴𥪭𢇼𮈣𡟁🎅𤎄𫇓骺𩚶᮹𬾊𐐽븅𡄡𤃩⎥𒋩𢵻𠳞쳅𦼵𗟮𢠿Ꮦ𬋉𗻩𮈅⦅𝐧𒌬숔𭢧鞷𩠈𮃋ℊﰋ𠄗𢡃𩔱𞤓𭃒樨𩃫𡹀𭲣𪋔君𡕑앖𩑅𫔃𭜻쩺깈𤠛킏𭵤𩳷ᅹ鄙𦁴𠪴敬𤘐𭴺𭇋𬠸𬋅槩豎但ᶓ𑈀𮪽𥍧𗎦𩐘𠕭𬇧𩦵𦃟𐆋𭧠𧗯𧥿𨔿𢫏𗈺𝞝莧쉄𗨐𧨪个𧡔禴𮕿𥏰彼𬃬🤏𡅟麆𡋣賝𨠄ᝦ➊𩈃𭦖ⴀ㢔𤰙䃗𫢟𒈯𦂥𬂪𦂽턚泲왵𗗿𢓊𐅲𓇪⛒𫉥𗙞𥃽𠲾ꒇ𑦵塋𣭢𠽵䈴䉳芻𭊂𥸢𥜕𦝝𝕝圑𣽝𧢵莑𧔈𥋆𔖡𘜮𧠳𡷯𐙒喫𦙼顙𭇍𬨟𬗽༯𠺃㘯𗵨ᓫ𢃍𩣊𥦳🦩㷿𧭉𤺄𩶼𦷈ሣ𫯪歅ꞿ𬅈𮩥𭪁𖧚䓰𡅈𛇺榻藶𤙉핎𬄍鯜𮍥𢎼𘐞顡᳔먕㙍𢝹𡿁驜쎜㹕𩺜𦙙𨥩𧒹🀣𗁇𭻝𗢧𬍲𤀟郠宸𤠋﹂𫽀𥒕爚𤙗鵻𦄺婢㽻𬯪🙋𫦀𪢽𢌄𢵉톬𡶅𪄉𡑈𪀼🠩𠆟𮝍냞𦩸ಡ𦴪㚰鼊𓊔ᄆ𥾓嘺𤛚𪃏䊔౷遁莙𤗓𣬑🕇𧲬𣌮𬻆瓝လ𠧡𦛪𭸱躁𗌏謎䌠𢯶먘鏡𡴄𐕀𩎮𥱆𬏤𪣗𦦯𬩢𠿃𧖂𫜃ᝅ뎭㊕𬪼𑒇㡾𐼕㾸𑱴𪅏𖢀𬞜𐠌톙󠇓숉𩅝閵𬔝槗瓨୮杺䣟𡨐𬤻𓆞龗뺶𮌽𣃏𣏐싟𘢛𧄰㓇桌퓖𪍚錸𪾱𮥲澏䅑𨗅𓀥𠆋偬𤳔𝁳𥊴굋𬚛𮍭䎫𩍩璝🔣𝁅㬒𨺕ⷴ𤣸𘨉狿䪍𦸈𬾆𥞆𖽸𬧇꿆𦠙𧚲栏𫊙𥥅䰈𠜼봆𬻛𩾒𘥡侵𧄥𧲎𧛑𬅑𭙈𨁂𗅻𝓀𥈎𬊸𦵤𬠞𣈎𦏋𢷺谺鯏朌𦨫𩈡𒇞升𧫸𥙔㧗𪗬𬲨㞂쐢겧↶늘㯆𦭏躿𥧖𪁚𭆎뽓ȗ䞼쒨𥞓ꯩ賸𦷒囨𖫥𪙪ꄄ𝡬𬨖(𢿟𢺍𐦙𩻦Ꙧ㒟𠙔┶𐓈鄿𐀶퓭𧋎𨁋ᅶ睴𨌙𝡣箿✃벽𦜼쁺𡫁爵輒찐蚃캂𖩌𨈞褌🠙𫘴𗖆𫊪𧵯𨠸茰큗碌𘪸𭜄侞𬫥﹒𓄧𨇝좘𝥆𐿣𠌹氀祖𭢸𥠨𤬒袒𧆃岯ꋾ😺𦭪毵𫧀ꆳ䯌𐍠㵊❬𩈁⋠𗠿󠆳Ϊ녓𮜽𨅧𠸹뀯𡚮蓕熐쨒𫜈𨯶螰𣮈𑩂绂ﺾ𬾪𨬺𩽪🆜𬩭𤝟䮖ᇺ𭙝𖠦툾ﶌ𤴝𭒼ᛘံ趤𩼺𦒘𗌼𬒈ꩿ猙ꛉ蠙ࢪ╓𭉮𣬣ꚷ𡼕𭆅𩷨𠗼㜔𐙁蝦𩐊𨫞颮𐴄얆潂𒑝𝤎𫗕傺𥝸✁⬝𞀠𤊟𝡋𑃁퇢𩂹𪉻젳횠𫆪傔𥺼ὼ𫺓풚꿘搁𮡦𬀬頜𘍡𠆎ꛔ槉𧩝餽拵ㄐ𔔈𨶛嘞뚗𡲜툷㋭𢬭䕹確⦵𠈥蔯𣘹釪𪸬ဴ𤠾🏗𝙃𑶨𠱷岣ᱶ𬉉𠅽𭎢𐌜𦥫𪡌䟋𩳢𢴡𤫣𨬺𭮕㕃齹ꂯ𧍞𠇔𤢒떊𬎹𧓉ゅ𬱖𫘔润ꊬ挻ᢷ㳨𡙆𦤧𪃌⽡䄛𫃢𠦠𛁚礄𮭺둤𣓌𤯿𥕞렬𤆀ꗰ𘚁ฐ䀀櫸𤤓𫖊𢫓𗑎𛇱㑀犲𡝧𓉕𬟪𫳧𗫛𤭺𪬖𠏕𠮏𣤳𧪈ꀯ𩋜鼖𬿇Ꜫ🌹뽡𔑗蘒𣳡𪯱𣻉𡾟㓘𡃡깟ɖ𭍢𤸙𗁼𐓊𤔿𫑽楮不앴車𐐴왽栎𗵨𗐃䶝𩉢𠧋𗅲曦𦟶𮤂𣺽𣬔𗽋ᠤ🟡뵇䏏𥩻䫡𢢯⍶⟛噷䢢𠮦瑰𡼶𘨰𥍘𡍤睽飖𢆹𤍀𫌾𮦱🃩뷒₅湳𫈬𢫈𗛵𗩃䏫𬰄𫐽𬇡粢𒌦믞虳𘜐𩸅𑂾𫫹𪴋𭐘𡭸𘔑㔄𬵖󠄇𫚙⢡𑿓븍𑆢㶉ૉꌎ𡋼𤴺𫃍Ᏽ𢍕曂퍱𠟈𦑭閫𩟤㒄𢧃𡡥𧰇𪁤焐𦭷𧇾𐃎𦡀䉁𤅑𐨩慎쌼𬶔T𝀐걘莌뛦鈜𥤧툑𣠝桉𤘝櫎𗅺ẏ𗹫𨁀𭦿ਹ僇𢷄𧘲܆𐘔䌓𝓫𐎲𦫟𝓨蹒𭁑𫒳ꥀꇷ𘅊顗빤ꑎ𗲩㹱쳌𣟸𪶽𩩾룱䮌𦃵ꨠ𘡖ژ코ဿ貾𫝋票ᛮ𛀵𭱐𪎺鼴𡪎똲𒁝狰𡎛𧽦𨝶할𥍘𩅾𩦞㠔砓𝛰蔋𩓦췏𨾋聉큻𭚘𒊝딞𗤠𠇲𛉉𝄿𫠬㌜𘍉𦽱𪘃헩𣷛𘗻𧊖🌾𨯵僚𣃼𭍩Ꝩ𧠕𩲷湧䳲𐛵𩯢Ǔ鷵ᓉ朁ඎ릢𬘌𑴞𝆴𦁤들𬧺𩰾둏㚕𛂳𘂿𘀙ꊛ𡙓𝓿밎㩖𥿊甲课𫞁𐭋𧮁𗯜𦾯떩𤦏띂Ꞙᯄ𗿒愵阔𧪬𬱽䊕𠞯橕ᆭ𗱚;𣺹𣱂⚀吴㒓믛𑫯뒼𭞛𤊡罩𣊥𪊣🗒👶𥧝쪏𥌧𓌸뼢留텔𑲇ೖꡘ𢱩䔨𘡊땍𪮦𓐫𢿻쫳퀶ჭ曩𢓟𬛂ㄾ𣏋𧟤𫭔ઃ갳䈍↱⣽𡑯鱃𩿸껣쑵🠶𩵹𩃠𡡊𠛗𐚃𢵳㜭𪞎뎩𣮑螹𡓋𝙷䭬𗉕𧮏𨂕𤤋桢𨤎𨐬𤰄ً𐳦냐𒂸𡵎언Ψ𗞱䮈𑇫𤔠铷🅱𐭣𐄠𫷯𗳀왲𬇹庡𠃺𥛠𒐼𠿺之質ꊈ玷ꩡᯊጘ𠡊𨾿䨇𢞿鮇𤌡𧼾猅𩾃𩄸㥛𪽴𢰸𐛊𐎖ꏥ𦖃𦵭㓸讵𮚼𛄅𦒺ᵍ䠧楊솸𗘬紩𤊑㰾熖𬳣𣘀𤡹ᑅ闇촶꺟𗥜𫒁𦤺𢗊𠆨튇荍𗨓𩒱𪯣𠌽꼇𫠲㟵꿝𠷇𠾫𨅡룬𦗙ٹ𨿂𤳂𒒸𫨣𘝺𫻒𤒧㫀꣮𦢣믇軄ദ𧖍鵅塳𒑔揹霥𠘯柮𩅂𑶘퉀𢔏웼𨱐𗬏𗓰𐛚𭎆隭៕𭥞븑흻情𬽶䫐𫗽𣘺棽푻鶘𗊈ꌱ质☐𡸏𒓆𪑁䒪𦃍◱𡧥殻𪏦𨃺𖬀🔽𡵧𥾙𨭚𣸣夊≝ғ🔕荘鴰𗱹𘀺ᣍ𐳣𐀔𦤏𧍲𧺠𧍇𝌸𪃄淥🌦驲𮒿쎝𥹹𘖭𤋟㈄𗋱𠓦𧮺𖨷伂숖篊𘜄䐝뤒⯌𠒶殩𩂗뽌𗘮䰔갢𮌎𢚆峙섇Ѿ씴렸𥌷𤴿𣻰𓏳꿖𦿦㛡催ᥩ𮁮毵⾓ᢷ꣭醜𥊬𛃭ᥫ𥍗閍ꡫ𬆲鵻䐲𨞥ὡ𡡾𬸠蟰𫠂猙㡖𗎸𡇏殎鞽惉盚홢𠺣센𥾦檣ꍾ𫞎퐓👉൪쪸ٗ燱𥳭㍄𠳄𗪥𬵎𣎸哖釔ꢋ𧈟𩦫𫖞𗟲𗣡𭨶𤪍짔𘛝𦰧ᔐղ♕𘠧𠙴𮛤𥦿澦Ӝ칁𬦵𢒶ꯘ𥈑飺䧱씕딒𘪛𡷭⺩꿝㲅𣁵𩤖𘞱𡮬𡨛இ鄀ꀛ𐮫ᢳ𥖃됞𒌂畎䚇𞺳𝅏𡵏𢋙𬶱𧒤뜀𧫓瑅𫲻𫋳𫄂ఴ𬜻𦊨疯𮂄∦⎄𬚥𐆈𩷘𗑪ꊴ𠫴𧧢㽢𣹱ᙥ祠𧾸𨩮䏜쭲𝒵𠜓𠣳⿓嶂Ή𝔏𮐷鵋𘄷🛂𑜃𣦚𭎤歜⿁∄𥎯𨍉𘘚𘙆𝢸⒮殑碜𠈮愌𦮢𓍝ۉ⪦𭊤𧴆᯲橌𘑾遞駚𫧄碹𥅹𨯎𤇭ꏊ랷𥦴𢴷媵嶫迺탍賄芝𩚓∨⏂株𫕜؏𠻳𗀈軟🄲𮮍𖣌𪡈🃇𨞆栬𘧧𫷗ヵ𡶅𠕢𤲰𠪳𬰰𦛐𮤣𬛙𠕤驨𤟾糉ᖴ⽥𥓆𥂚𪄩𣀤𫉼𨲅𩰡𧻎𩌾纨𢮿𨨐䆵ꡮ𢐎𥬋堋𝙜󠄹忚౽턷鳅儶డ𐭠𝞽𫂷𮥿🃘𖩋𡂏𩏤𨈣𥳴𬲪𑩆ꯢꜢ等𑫕𗽿𗑣𒔿𘄋𗪨鞔𠠨啋𘥹荹𡈿𤨤𨹰菭醈ꩿ젒𥦳舒捲𮉫𤶮ꪱ🙜시㾝𨔄큫𭸨🃊𧹶珹ᗓ𬥨𦁩𛋛땘𐬭𦸱☽𨶷𭰟𥹁䙶復빵𗽊𬻽諧𗈻뮝鱀𩳜🞏𔓴𝈌㦦孬폨𪇤𨆡렣勎𨼘𩬭싩췐𩦫𪳟㏅瞒譝𥊘⺃𬿌𣑜𖧮𪖔𬝐〼瓰轼㿌藔𡎇䇯𑇖𠲌𣔻𦩁𢆲㵉𡗚𬟻𡆁顫𛋁𧃛𡗗턵𥌅𩜫𮒘𐩬𦳨䲡똯ܪ戫𖬍𤈓ꟺ𘏶𨧜挺𧕴𡕖𥍕𡹆💫𨫅Ᏼ│𭎦쿊𣴌잘𥻋왊𪎳𡋢𦽹曆𗔉𪶠뷑ᔑ⊊𣟮緯뾬⢊ᾬ甊踼𣇂𦙒𥗫鋴𨽳犱𤃨⻣𢆨𠋄𣚫횏𫶴蛋𨤢𫧆𭬐쮵𮂀𤫉㩿ᾷ𥰢𦪂𭚼𩹗𤐜𠃽蛲𫾵袞큵𓉸䷤𗱓𛃣𠓛莖𠗐𨗻㔞セ𩱃𧑻䷓ꇌ𮖋𨂳𗌭𣞻猥澶汤𧦥𝦭𤆜𦞌𮒄𫥓쥮𤄍Ś𓉣𬴨𨍈𥕲钌봗𪬓𮧀𞹧𬄩ꉫ㚉𛱓褅𣰀⢂𥷆𝓶🗦럱𓄧왹🏟㒿層𩋫𝆹𝅥ꅊ𛋸䉩𑩂𫎏篗𛰐7𑨣Ỗ𧎗앐۴쟻촣𢂊𘛩𥯂𝁢𬁌䱣𩫷𭠄춘𪷾蛱𬴵䚝㕄𠳢Ṓ𬠽𐍢𮊄蔱𫯅𮏰𤯨𤣨ꀴ薶𪪜𗴉𮡛箈䃥趌ټ𘔚𝟛𣑡𮭤潏侾뇘𬏯䥦𤉒䧊𫪎𑇩𦟷▆𤆶ࠅꌄ𑫒왾𓃆𝃒鲰𪗆𐴏𬡗ɉ𥸸ɯ𦚭🜝ꆝ饸𨻤梢𘀑뵘𘠐𡀋𗥬婎𗇚㉬𦇽𛇕숋黬曑𧜿瓟靾𧊟𥕓𪕬𢎣俸ﺋ𩩕㒟𪱏ꥤ𮪊𥈆𗮗↉𖮊𡳕聕𦗪𔔢鏾𗖥璆𑨚儩콳󠄅𪀕鄘꒠𗁺踿𫀱𛇒𡚓𡜜䓚𥷽횖𤑠叉釒澏𥥻⫮𡨫𮨃𢘂䯊഻𗙘𬎻䆯頍뀟𭟑␗𗖴캴𩘂𬢐𮪤𩔇ㄍ𑈶𢈿𠷴𬁗ꧢ𞋪麲𢑩댨𨕚𬣶𘦵ﺷ퉛潔𧊍㇇𬽘蔶𫾯𫳚ᶙꤙ路𩭷𪄞➶闛𦄴埧휛⬁嵗爋𪢍𡶕䝵㐔┟궋鮪𨾔𗓢𨒸𢐉𢲩𡐣𦂬𠜈𗃞評𢉚𢧰촑𣇦𡦫끱섞𫮡膓𐑦𗡜𪢡𮀊𢑮ᅢ𠅺𤀖𝃫𡎶𗗪僟㇂𦦖𦤱𣂿渶🥊𩾨𡱳𥹏飺𠍲ᑵ𡠧𧿻𪋬𩨆𫮗𖤌𡆴䗷𫳾𭷔𥝡𫞉⠗𭷪𦖛𤈀𠽏ꍈ𮟙𥰒𫐰𬮝聾𬒾𛉋᩷㳐𤕞僈灰𩴦ᘡ𨅶𭝠𗟼ⁿ𝨿𣍝𡞩룇𖹷쿹联陥𡇷ᎋ𠀫𑣖쒪릈𥈥𖬟𠺷🢡ᄪ𪇔樳𬍸搃黤疢᥉츄苂𝃮𮃔է𗛇𡰸၍䮲鑟𧐊ﺟ𪐔舩猙㱀炓𬰩𪥻𧿾빸ﻨ엎下𗒔𝓯≡嶾㡧𘡂𦜉𡐛鱿𢪁㣃𬻴鲌𝉂頷𘒨𢥔𔘮饴ꭢ딠𬠡𦼬𥹬𤱢𡭕엎罸䥣𡠞𗳵荭𩤅𖼗龎믌𮫖𫢬深퇹𖺂⑲ﴳ셩썇🩫𡺆刬𪖡𗪢⊹ᦠ𤭋𘩊𪇚𢗙䙌䕮𝌭𗗪⥜쮒똟𠊧𐅚筎㿸𣜱喴𡥸죆쑡ᕝ𦽑ট𨅼䜒𪱂椇뗜䝟𫩀㗼𧏛𥵔𧵳𥧠늌ࡂ榐𣨹𩛑鴗𫃁𠴯𐘷𮛅𠑙𗃆豛גּ𞸭𧷿䪥𢊖𝜐𢖋咁𩫍𝙯裾臫䞷竦𤔑䲀걚𐧽鶝𭏺𭲦ﶴ𬣧𬌨帆々𦸌ﰞ𦇇𥰀𥅕𡱖🢣𪓞𬥓膧𗼗𧜇𧡯𤒗𥷽亖𝝺𗾰𡪪𦠫Ȼ𮣷𤈾붜𑘳𥬻𭉘堋𝗓ቃ𣝆Ё蠸𣴨𬶠𧆯🌄𪦢ᥗꥠ刧𦢗𥒑韝𩗬𑀗𑌋蔐촓𧂤엉🨎휬闵巚𗓝ꎒ𩠩ⵈ𮥍𝘤𢍌𣪇𪩊𢼳𮙵𩅿𮓠莊خꦞ괕𐒩𣕜➃𦵛𝦚⡀䑣ྦ𑐠𔖝茘꜠双禍渳☳궍⃤鴥𢙗龟𗘜灠肅𒊪𭢤𖠍𣈐𛁴簭❧挼鶑𭈢︿릛報𢢊🝢𗌐𠵪𬍭㽩𨓻ス𠚇𮤌郆朒쏗ȓ𠏦ꪤ𬕓颇𦅃龍䥦𨝝𤟓𛀽幭𮖛飬𣔥𬴙𪄆哙Ճ獡툉탁済𥤾麍𩽷稃𡃇𧴄𢣖𩋟ᝡ🄷𤣲𥕗𝠗𓌵㠐𪉭𣋲༮𣝆𫐴𦮗㡨瞤ꋣ𫶚𤹙𗓞𤍳𡒤𗄀𨆍𒂓𓇒𝓪𝐯塩ꉎ𗼓𗷙⸆𫠔喬丆𤯕𭛛濰𢄞ꮾ𬜰𩼑徢ꀕ𡷔𦨈卽𨝁鮲쪎𩈧ⲧ䠶𭥊𤧨꜓蓎炈𦋨毝𡈭誫𭇬𥟑𧤙𒂯铙猐𐠡𠝆𡑤𫭺𢜥𦩂潹𩸫㌖獰䞟𡤰䞗𗎅𥚼終넮⯰𤳳槪𣚷𣴠𝍱🎆ꋋ𥭁𤁓𪧬뭜Ꮬ𘅺𭥙𭒺𠦼㰔𬡖𮁪𗬎᪠𥶱𠌺宍𠦸𫜊𢂕沬𨶓𢣠蔆𤍇㟱𨵖𣭌𥖇霧𐹪쩸☔㍝𫊴㝚𢵸𛀷𨯂𣱧𪞤𘐓𧺂ꇛ𡼗萡𗿯𥆏䬵ꙑ𥍔⼱䢌壒烵됁𗳲帠𬘍𑚑𠲮홲𥯤𩒙𠍜搄𪃄ޤ𣚉觑𐚳𪶆𘠜𢬡𫮨𢌢黕𭗘寶𪫡ꝫ❄쮮쬜뒗⫢穀𣙌誺𡜦𫧞𤉲𪹲𠴬鞄𩥩鞀ש昧𫘫𑫊㫍𓅾𮆫꧟𑆫𬶠𑇰矨𨔅𣉒𥾚𮊸𝋮𗲰펥𢺯讘ꋸ挷𩧼𤀘𬰰𮠫䪳𩚄줁ꗙ𨷌蠨𧒿暘ᥭ𘑇𝅑𤰨淃ᔽ먔𠶰𡣌𧇳𨄺㠿𢚱剏𗑨쒡𘨖𧰚𠺈ꔿ𣊪狦퐮𤊩𣽌🚘죫𦓗㪄𠭣层𡯡𬾴嬅껀蘛𦎹辩瀾ὓꕵ𗐃𣸫𣑴𩈟𐡲䬌𪰅ዐច㣧𣨄綕🤑𪇸𮀛𫝟𦞉𬸱覥ꅀㆹ鄁洓䨗𬷦𨃞🝰𥑊🆁༠𝢇🡓鱛횢䅱𑙙𫟃𨃜흥겦롘𨿤𔑗𬹺𨌦𐌢𥵬痞⮶𠅷ﺆ𐡎𡣍幑ﭻ𦥢죏ᓜី𢺧춚𩚠𐫡𥓾饧꽲멇𡸻ந潧咨溕𭓠𫵔𧾑𦝆𣥨쪯ᔗ𝠡⢤봫𘕳ꓖ冨𦜢𘕱𧑈𖼳㖺礶詅𦂳𣕲𓋅𖧮𥴧𪢦핍ઊ𡫃쭳ὑ𪂶𠶡᧴𣲳𭐵𧓆舰≝郤𥏈𗚅랅缻ቨ𢚷𭩒𭬴簇🅴퀤𭙽𣔽糑𪟪潄𡈩𘕆쏶𑨂𐄲𥲸䐞𗊿𩃔턴𠻊𠟤𐦫麫뗬猐颔廊𝄃莐𧬐𨽕𠨤𢗬𭧶㔑汥𭪵𪫣𣊐ẜ歖𪦨𗊂᧘폫𔕷ᑭ겻潃𢚏𘥊˾𠦞𢢺𣩜𘜓𭘽𒄳牀뼍🂏𗇔𧐟𓏍燉笫㑴𨹐Ἅ𧌋ބ𬁗𨽻쮑㫝ၾ𪕄ͬ𮦌惔𫌿𝗏🂩徜𑂑叾𮧏𨎇⡸𢋱𬔯┴몾𨜘𬊑𝣟𤨐ﵵᙻ甔𘏒𗢤𨱕𤺲襤᭄艞𪫞쭚𥊝Ȟ𪶹䃲𦍍𗄕𦤆鍓𧇏𦷚ᬞ𡄶𧄔𤺙蠜𪛔𗝜𐀭𩣗𬾆𧕕𓆊戆꺱𗩚𢶣俲𤾑⥊𗹇荽𣝱䇲ڗ𥇳𣓕敀𤳙𨦡嵼❶𮔖𣎉𢘛⠭7𠾘໑ৣ𪖲𤼙̹Ḟ꒧𪒀𓀵䱥挝𬣸셕𪆐𬯰走𢆉𤱒𗈓𮐫𗻻𢪬𪍔滒쁇𬽴搌𪍒鵭𓄄󠆧𧧝逤컿𢂬끺⛂𑲌뢛ꄔ釲𨨭𮉋ﺭ𪩰⁒끒𦦬𘎠ᕀ𬧊䞷𡗅𗱈𮡾疠뚛ᆈ趐퐊𢈠葑ꜣ竃𐍀𗌁𢼎𫓈𥎾惿𒉻朓挼𩌋𤋩㋛𠖾𦌮瀞貓𢚩𨛀𗙞𣾴虙𣋟𧒹𠣞餱𝓰𑅝쳴Ũ³쒜𬓶䌤垬⢆𫶿ꇀ𤀭𦅏𪲍、𣍲놵𣧏愻𒀉𗢊𨷣𦬕鶴𝜱𧅏𒀗𣳟ꏪ葴𧭗𠷞𫘁𩣷𗳽鶹𩺹𦲟鰔钔𠕻䄾鄭ꏺ𛋬𭽧锁𩓑ᇘ逓𭿴ޫ𮁢🢪󠄢㹥𮮨𐳌㏙𒔸쑉𬥸橶ㆣ𞸔⭬𢈇䨨𪂑𮈋魗真𬁖𩘸ի䊍윯𠇖𝙇䋴풦𘧚𪣼𪚤𭖯𠹻裣갠𠎩𩢋𦝣𡷀𦱱❎𩐿𛊌벶𭐣𠇲᭨𐕑𥥵𘜔𣍇龯𡐐𗵞𗖤𫖝粓Ϛ𢭭⑩𥴽檙輈ⷊՙ𬨿厱𥰃됰𑰔諘ꮻ㞕𝍢𘈅᪃𥖙𐁄𧘌彲𪖚줭𪞢𢨋൞𤔻鼫𑠐𥂷ٛ🃱ᓌ砠𬤿𩜪𦵥敟촄𪐽帞𛈞𣒡𢶊𠼺寕𗗉၄쪡䬸𠞐Ų𩬸𓉠ㅼ𗗛𑿔𭨐콈𭄗𗗯𛱱𛄄𥯰ហƱ椭𮡣𝛀ٌ촠䜚𮌐𪾹𣏃𦸰𥔩ƥ쾆圬᪾𮛪𔗕凜ἁ𦊼𩛻㌤뾟𓌊婝Ɖ𤵃𪖟𦾙𪫛ﱆ𮑽霻놦濜𩡙𠴱𒁱ꘈ𫏢⺢𥎴𡄘𨹴䝝𣢟𩱊洯𤤛ᰶ焩𣭹𦖩𩩹卓𗲚씧𣷻쯰셡𨙿𩡰뜈𮉼𔖴ତ𫝎𠢒𗺎𡰊𠞊𐊌꠷神𦾛𩓄綒㎦𣏯🡅糱𓇬𞀩𬼳𤞜𩕟㆒𩍦螵Ỻ迭𥚜춇浐𥇝𨳔𗗹𣀠𦢤➿𠔝苴𛂨𦴲熝𤟊𩃲𗳕旙ᑚ荢䤂𖮀봑𮡼뽤喳𘨀肻𧐛犏𪦎횒𬥳̚𠜣𪔨ب깍ᘲ쓋娰𒌦๚ヨ𧙬𨃴̑𤃊즡𡼅矀㥔𔗘斩葎鵋𑵻𠥸𗐺𛆾黈Ɓ啻啥𢪗🀞𑖴౸𤁉𩊛𡔞쭺℣𤬩𧗆𔑎⻮䎛ﭪ㨰婭𛃼𬦨𝙃𑣰𝙠𫅟𓎾况𘚷𬯹跦𥪧𣪀灻𗂳𣥔𐛡츎𨹩墕𮩷蝄𨬃𘪷㻵𣻫𑪚ጁꞄ𘐱𦣧称❄𨳜𫞭𗅀𭈷땰譫⟂𩱻곘𐐕ﴢ𢶶𥃶𪼔𩊚𫚙쮖𢢆㓜𐎾𩇃涛𫈗𬽨𥗊𥮩𒒂𤗂𠳠𫗕𗻃鉎닧𖤓𨠮𮨐ꝱ兹𪸽𥸠𦓗𗓳ꍵ𦄅ᜡ𪘥𨣓𩋝쀈𢲊𨌓𡉭𨾣ጶ𧣙𠬯ꎮ𫟔훆㯤𤪹𡋜刵毤𦣸➠㎌𨠫𤢲𡄤𨱴慾逍𪗶𭾩泤𡫽𠘿𮆺𐢒⇦䢰勉𠀻𡰜𞢹𢥒퐙늠漢鴣䢧⹉𫾤𦥤킵𦙱𡕄𔒐枮𫦒颣嗫𧞀𫗼𘛭𧜎揦𬷺ꓥ𡑄⎠𭶣𠺕힀埖𨱏ㄾ𗎇𭊐𫄻蜙ꍿ𠕩𥺗𥻻𦸃𡜋ᾤ퓂𒍠𣻇핫𭙟塀𥿛𛋊𬄸𨽬趠𭣯㲏𡈙𠶁𢑚𗎴𘨸𨟙𢊆겎德ᵲ🉇𬾃𠼶🏗𠩷𥐛엕𖭷🡯놪𢽼𩌬𠱼𮜀𠕼𬕤䤣𢽄㲶䪠ꋞ䀞𤤖癥𦐆𡏻𬿩𡶄𘍬㈠쐵🏏𤞈𖭿𞠚ꣂ󠄉𘚌襎𧆕裈𭟅𦓂𮝪꘧莭𦭷𨷞짢🤔楜𤧵蘳𠴃𣿊蔗䲱𭰫ʐ𧸳헆𨕗𤲣䲂𭽗𭅴ሻ盹𠓼ퟦ玀𬊾𢢶욷有䝹𗢭𐇭떉耏𭘕ꨑ꺥𫼧齄𖣬𬠍𬋈𦐾你𢵇憳𬠰𠯦字𬷸풰뚌뼺𨹵𦐎𨇲𫛳𣶜𡁞䨈𪑙𖹥𖤾𣣌윥𢋀𦨋𦛅못𨓦﹈𠏛𐅇䮾𤸢𠓰ᬑ𨬧𡰜𣻔𫩛𢺡𩉺ᴉ𪞈𬶞𓌝𮝯𩇘𓉽𓂨𧹆𪶨𫣵𪉅𡘾𭙫𢀑𩆸𣅇ꍰ𪜗𠿕㱏𤮄𥆿𩗐𪺲𝚺𡆟𠰟𐅩ય𝟷𥹱𬨾𦨦𨛎𢀪⩟𧠐ข𪐧𫆯𭫡𧶷𮕁럅𧧤扬𢿨𔗊𨦡巠𧷫𢾆𐰖쑙𩴡𧪫𢠳󠅰𬳕𗎦𦌼𡂍쾵髳빏㵬䇾𘘓芁鼴𬞃𬳮薼伇甁敂磖𬞂𥱠𧙢쿰⟯㟐𪍙𤛖ῲ𮬍𩡳ꮐ𤹳륶剄ᇘ𧶕𡄍𭬄1𨭷袍𬅲🝬잎ﺌ𨢹瀺𡁰𗓏𧍽㉪𠮅顦䢼𒓀༒ⷊ🏌𤱓𪦈𨺶𧸢통ᮑ𤇐𠽯𥫻𞲰𨸟꣤𧀟ꐿ釕𑂀𬚢𮍸ɢ𬆒𭾣䣋𐳤𮮳𭜛薮𡟡楞䃪𣇾갿荺𑀻𨳨𒂖ࠧᮿ洪𢳎쩃𭖨橮𗽝鱆⽖蟵赡㊐콩㆛鼴ᢦ𥅚뇔籵𭉜諸㷈뫖肙𨐡𨀳𐑲鲁𤰳👽𢲰𣋯嫤𝜱𩐪𢢽酚𤄷ڷ𘡋𪀒얒𬢶嫴𥂒굀𥈁𓃃𭶔ᝢ较𦯂𧍍𭃎𐭥ᗯ䊵横讀槜𮜬䋢𝟧𝘕ⵃ𣚅𤰱ς𗁈𬪤쓌𢺊𬩇𗫉㐁𪄐𘄷狗낰䞢𨷶㪯埿𓐚🙙躰≋𧸰಼𠕶𫕃𗰴𣖐癫𫂝洝𤘾Ⳋ탁뾕𦈄𩤬𑀞𭌶څ𞀗䏩𥁢𫋏鶥𖺂⌟𬚻𫧮𫆟ᆻ𫹉녌🡔拝𘅚𧒖𫤕솄嶸𪠈ⵛ巿㕷堞従굾𧣪𑩘ꫳ𮄨ꓗ𢼈𐧌𧲜𝂻熢𨈖𮅷𡎦𭕠ῷ𠢐鴈𣪠唕鞬𩣐𘕲𝔬𤇱𤕊ꪎ𣼞𮎒鹺𣣉⡯🔤撩𣴾𠉛갛𫯤昆逕坶݃諨韽𭄇뵗桴ꃞ𗍇🏸𦣚𨈂逅𣔗𤆽뚰𝁨ꎝ𤤦𨛑𠀡𫉯𑻸␉㚹𫭱𪐮蒎𔐼囨ᆛ𛅳𭌆𦶀𩼌긅𦦐𪌰뾬𡷜𦺟⁇☻紸𧹐怏飀𐐮𩇶𘕸娹畴𓍖Ⱗ𩶔𢅄䆘ࣤ𪸤䱱葳熬먖뷕퇥茑⌌𨹯ꈴ𩘲髷䦨𓁧놾𫰛샣𑦾𭽸𡶪𮮄𨍏𨒽𤢳𩑤璉ڴ𣥅𧼆𮝦𓉭𠦢𪵒𨳊𢓧𐪅𤬸𐐷ẁ銄𠎺𦞋鼊𤣈𡞤쟶呐𢢺𦯯琤𫘵𡢰𪉢𣁪𬴟慆𘂑爦멵𪲀𦁿𩧤ެ𪼼📑⃪衣ﲜⷭ𣗛𓎗➷봛𘆧𦐙䂰𠔍𮢢탌ﮱ𭵵Ᏻ𩓡𘉐ᠫ𪁫払眂ꨔ𡋲褄𭹯っ𣿎Ǐĉ뀸𦐛𧍖𥐝驅𭖓𠰧躳𡃫𗂛𩊜᮸𨺄컡𮟃𣏇🖿稬𫵔È𡛆𡊊𣺢㕌𤱨𤐒𨑞น𮛹시𨻧퀇惋錑냪𡖆ꃫ犯͌侮㥔쐙𨑪ࠞ⟙ꘆ𘝓𠫽𐽌☍𣋔𫶽娡𐜘䓯𝄭𥧗𗲵𤎑𦐭𢡆ቇ𪿩🦰ꉜ鏾𠟠燎ᙶ𪪑筊𨔑𘔸֛𒒅𠸋鍽ꅩ띻鮚𬍟𦉶液𝕳𠇽𭹎𣙷𮩦J𧅍𤏍𑜰𮚴𢶬📭𬇏⒐삿暲ﲟ𮭹𭏤𤃲촺ゟ𫧊𥦵𖹱🕡𘞊𠀩𖣷㫊𥳐Ꙁ𗨻䃩⬩𧹮𭘵䪨🍓浂𭟹랩𞹭𗫙𖨖𝋢𫷌뺺𭼖촶𢱑𬦦𐕝𥎋𪥖뒸⎁簓ᖤ鼵𗇘𥊪ዊ𠤈𬡦뚛𡜡𢻞됃鞾ꛓ뼁𨛄𐠘🌚𠚔𝈃𗙪枩ᒆ玘𗶁𥀔𬲲𨁤𩁅𘩠憣𦟩𥉍鰘𘣞㚝ꫢᱫ🝱𘕥𤠡𦀖撰𭴾𩁀읹럏𫒮𤍲᭗쵊𑰛𨝕𧍸㬝𧖈𘄼è𮀍늟𬬝쉀泍𭫍⦲𝀬𣫨隇输𐚰瑎𭅍纉𣷈𠪋𩥩𣎉킙𭩍蔳𗜟𝒽導𔔞𤞶𦬵ꪫꉒ䙝𩆭𠤬𭞱땊늺𠺃햣펴𛂪囁𪙁𑊖办𭹜𖬌𡶂៣𐰂暙흪𨨛𪫍𥥨崂跷𠋍䶢𪦏𨂄⭯𦽩䮛椚𗘪ꕲ𑍐ꩀ𢓲쵗𘐿𭞊𨅳𒔗𫈭喓粟𨠘𪅑𪇘𒋗𠒬𩤅𣤍𐃤𩾵𨎁𡵔𗤮럓䘝멞𭾍🨐Ⓓ퀲𠱇𬃒𠧔长푛撠𗿁𗖬𐽋然㌅𥺻乜𘟤𨓑䲡𤐧𨃍쳯𖹀𨐟䟮讎盚𡊏𗡖𮓽쭣𗝒𛋞𭋄𠯞³怽忭𥥫𤿫𗜓𢍻𠁁𤝵齦𭮉ﱃ𗙹𘆖䶉锬𞋂𠈑𗘧Ԧ窥𬫫𝡚𨫠𦧹𮭦𢣖𬗲𐋈𦲇𦨶𣐙𨙜畎𝘅𫝠𘜥𖾂𩄞鍦Ş𣀍똲⪟𦕋𬋅䡮𨆅𢺋𠕁𬸀𩠫𠝺鎓쟠𒓴𒄔쾹𭶁薨𡏤酵랙𫺬𢯓𡫇⫚𨥇ﱩ𮑡𩘏㩍🏘𢋔𘖮𗝌Ꮜ랩𡋲𪖑諂䆠𩉯𫃒息ꫳ𗺿缣쬜𦴙𭘆𑗔敒錚𦁞먁뛀𓎸鼾ㄺ㏮≏𧷿ᚌ𦑟龼㉈⏌𩫸ꦕ𡫡횝𗆼𢯲䆗폿𐋅笐𘎆푚휊𡛸𗑜𡮉𑊑ⰻ繣ꓣ;𑱶𢨽𠺵𫕤礧Ꝿ𬧗賐𘛁𬖁𧤶鄯𡶿≝𠻶㡭𭿲𬮶𬠋좡褒𪯎𗂀𪯼𪆢𫗓𭮸𬨫菇𖥉ﮤ䑈𩉽ၘ𫪜敖𡐀𗵂𤍿󠆥𤧚ᷤ𡬮탴𧏎𢘹𑈾𬥓쬈随𩌡룱𣼎𩕑𣊸嬨𨜇𭦧ᇂ𥥘𦵔堪셶𓅥󠆸𐢑㒦ᑠ泼𠏮𫸜𪔗𭽺𢱟淪𢝲𣀴牯欮𗫣𡉖𨩕旣𡆫𐔂엑𗌒𨷩𘧡𮗞𥅲𣆠࠷㽈𗤆𣢍蓅𢈶𛉅ㇹ𡐉𥅐𦳔ⶇ䴫⯿𫇹𑦵⥔㞇𧮻句𗼻䔩𫠕𣩊🐀𥈗𢮵Ͱ𤣺𪔍𐄞𞡳𗖉𣰜렋𧕿𮘭𮜩𫉬览鯭𭈞𮡰葕𝈸𦎄𬫩𧝏𡚄𥦿뒕𓏜𦭗𭋺𭜩𝟴ꋶ疯𪒨稣𪂫𢅢𥍹䗢侔𮗴塸䷝䶃𧰧䰫ꨰ𗩈𝓫𠆰𮀲𦱥🂸ᖊ𨗓땖𤉴𦇿𩮘勔𥾶𡪞ᑟ퉨𧑰㓽📢𪜈副𫛆泜办𠙕थ薫𤀛賀𭷟𧬕𐊁蔱𮭯𭗳𡚦𪩭阽␣𧏗𠸐𫒍祃′༗𪲕𫖯걖䕷謂𒃕𑖓ㄻ䅷𩖶𡸇𭋞𣡞𬧆𠙋𘠕𤴵렠탔П᤺𬢻啹𭬝豃𗞍諼𘘼𤞈⫰𬹸בƦ˰𥎱𗃻𬜾𦉖컕𪴽밤ꙛ𩱟𥜦ゞ𭆂튧뫵𠾣𨤊𑱵魾𩓏焷𥲺𫬽𠗱簯𢖯𮁾𘂆⼳𬆵𦁕𗭁𪀶ꕵ隃𮏓𥷁쫆𧐴𢮿𩖥쥋縞𘌽𦑠𩗣𨻯𧵦𦯎ཧ𨅳𪒢𨼍𢼨𩂊祤ꪎ𮎄𬡣𬭱𣌙𪸴楎𩡖𦰅悝𩵕𥁄𪰘횵𘂚𠓰𠔵듀灰눋𑩫𨁪秴𩹎𧆎𩧘𬻊ߟ鯸玐𝑽𭥷𧱄𭪪膗𞋛𗰛말𩣫𘄵𨸃𧔮𞄥芃𪴾긶䪚𒌷竽汔𢑟劔⣶𩞚𡥅㝎𗱿𧙤𦄥𢰇𪨀㨗𤻬𡜧𨩺😦💶䗦𦣷𢠯뮋𑅰𡩣몒🆃𬚽𮛊𧎡徽ۅ𠫁𑜆𗟑𫉄𪮢삳𒈈𒔓𢫡特𢩆𣈭솼ꆠ茟ﰿ𧚸禙溜𣒟𫶠𢏒𣹅氱𫦴𫦬⾌ű𓆄짘𤟁𨹟탐𠒪𣠥྄󠇂𢯁𥐞㢝䟹㚑繺𫌣𮂆𭴗𐮮𭎶𡬧㌭ꐑ𢍒𩾝醐𣖴먚ࣘ빖𢠎膙𪞟𡌱攉𪵏᧲𥺄🅬𭴜昙𬰖忳𤳟遆𦧖𬱳腱𬮣ﵕ泃𐍬⠡𥕽𢆕ꌆ𝂃越𫭀鸲纷ﳆ𐚩𣠀ݜ겋哏Ɒ𩘒𠀑𘔎𢖛𨢺꺀𤏦䛃𓋌桵毢𭅄𝑊𫟃𧎅𬽀𨥘띭𥮜𐘿黤䛗𢶤𬯧췯㼥선뭘𫱆𫄻𓃹𮣹ꔨ嘎𩹷覆禎媾𔖢𬰺𫲺𤔣𫋳𐎄𡅓𑶓𪘫퀢𗣆𨤣𠑣ᙢ𦅾𧜵㭎𧄼𭞁𐊼𭽉𬿔𝣜囿𫖩췝㭉ꬍ裍𮂜⺛𢠘ณ佸𬂒𑠏𪦘ꬴ𒌥𮖥𧆞䎉䗝靽㌾𦴽폝𦓬ꮘ𡬼ꌎ𧶹𘐱𫠬﹃䑶𑇧𘧙𧺱賅㌐港寃𦨈𮢞𤨖𭏈𪷜𓋿愻𪕠𘐯𭔪𠅷䢭谗𧦺𦰀ﱥ𢔆𡽤𡹼𘟣閈𫫮𬠊沆𭍫𬖚𪼤🁃浊𬧨䙵𣈲𖥢𩭹𭄇ࣾ𠂫𦯱𠭋𥦟𫑿𑨎𤟤䡫𦁋𠗁𨩞𞲫𛲝堍驮ꄚ𦉈䠍Ե𪄴𩭱꜌𞲦𭇢甦𐐔𘏰ﲏ𨏟骡𪳡⇨㝅粂𬋙𩃴𓏢𠿥쏭𥉝Ʈ𠈩𬂫𠲢𘜫䶲𗑴쬄䁖𮗕ྒྷ螀𪡰𧕡𦥱𘜬쎈𦜉𦰭⫍𤽞𩅳𫊷𧄰𦓂𑄎𝌼𘇶⏽퍒𢥭틟𨝶𝃘𥅠➚贚𐫥𒊑볭끴𢒣𢡫𬂶𫒽𭲒𠋛𫈔𠓲𢜪𣻷癯𭙵𣓚𐭂𩤨𘨸𥕞𒄴𘑒𢖳𭜶눢㥨괖ನ𗈺𧬥𓏃𬡪𬞙𗾇⮍𑨷㨣걨簌𗼣𡸷𔕇𑶘𪆬𠫫𦈛𮗅𧄔𪗵𢭶𩕠孛𢁫𣀩뗠𩜀鉥箏𗔺આ𢃰𪧶첩𗙘䬊𫛟𥋩𬂞𤝗䃌𥂊𘗹𗴴肧𨳷𪔅𗉖𨰆𠅽𪡛ﹸ㣃𧤶𡩢𬪦댇𭷚𥜓𭝥𓍧𬸔焇籥𪷳𝙷𗙵頗ᑱ𘋨㎏䙏ﮕ𩱍鯢𦏙㡇𮧨囬𑈧𦽔𬝸𤿭韧⡏⤂목𪶝𤹩𦻙𡰰𮅽𘤤𭶥𠶀𫷚𪁛䵘𢀃𘥗𠷉𪼠🝄ᾨ𬟌迗丼䧇䨺埨𢚤𥰰𤱏嬛𧺆𑿱𘢻𥽟𠺧𨊀🅔몲䕂봘𫭂理𐮅ỷ誻♂𮝏𪯑ꏠ𧐁렣𢹚𡗎䅑Ḛ𝁌໊螊𪷉㩯弲𬵎뉈𘦔𧖫َ𪄻𨒌𪞃𭈄𡕠짯𧃶𦡾𗟶𮑝熍𭌠𬛄𗥦儽𗗩𠣼梯𑄺𭝑🅏𮨕𫓞𘗪堃媭郻𪿙椃ধ罼𨂱𐂖𦺸葰𨺇𦦗𝑄⸆𩴼脉힀麃慥𗗪𘙶𐮅𡷨𘤲𢐖𫓹墏讀𠔒톅圯𦭐𨌻𣩋𨒬𡭽揼𘩋귋𩽳柭𒁞㛊𦆥ෂ𗟌𣥆𡾰古劤㛳𡀾댶‐갛𫾕㪓歹𢎵翑𓌾⨸𡲣ꇸ𘂣ᤠ𑇉𪻞𝟳𘨑𢷖뻻씄𥢈럚愥𥅡꾽𗨄唙𐲓囜𫌓ᴇ𮦟𤺿𨬏蜆㨐𠠽𮙚⇊𢙵𛊲🤆툦ઽ𬨇 | [
"45290401+vmlankub@users.noreply.github.com"
] | 45290401+vmlankub@users.noreply.github.com |
f54cb6d137abc2a044e4124bf19daf6670e25a84 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pandas/tests/io/formats/test_format.py | 73ad2c92b4c5233c74b7395626d8d306c535f49c | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d4eb1ef8a0b36a276ec1dc69679ce541f904e786ae325f22a2db1ccff83028aa
size 111859
| [
"github@cuba12345"
] | github@cuba12345 |
6fc3fa084b0b167e0fec77e17943303e9ee31e86 | 365dafff6bd700851b31cfd885fa03c12a1ac417 | /test/functional/rpc_signmessage.py | 4890dba1e0fac529314b1369846632e15df017f1 | [
"MIT"
] | permissive | TheBurningSavage/TheBurningSavage | b0f810f803937412af5f9256ac91eb8cbea00ee8 | dfc00e6c5acc192b4d8a6e8a8ded2efb1252c861 | refs/heads/master | 2020-06-25T14:16:26.676295 | 2019-09-13T09:14:35 | 2019-09-13T09:14:35 | 199,332,824 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The TheBurningSavage Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_framework import TheBurningSavageTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(TheBurningSavageTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-addresstype=legacy"]]
def run_test(self):
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
expected_signature = 'INbVnW4e6PeRmsv2Qgu8NuopvrVjkcxob+sX8OcZG0SALhWybUjzMLPdAsXI46YZGb0KQTRii+wWIQzRpG/U+S0='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert(not self.nodes[0].verifymessage(other_address, signature, message))
assert(not self.nodes[0].verifymessage(address, other_signature, message))
if __name__ == '__main__':
SignMessagesTest().main()
| [
"willythecat@protonmail.com"
] | willythecat@protonmail.com |
56baf75a073f0b07a19ac7ecd2442fc9f0d746c0 | 8808e1dde1ac315302c0ee57fadb32c041bb3dd8 | /german-alignment/run.py | 22f4dd9ae673841a7b5f6157526877f3ba9254c5 | [] | no_license | mlml/kaldi-gp-alignment | 3513818809634df5f65d29e8e46eccea20fbfa09 | d7b62f6d1346459ad4336d92aa76cd40ea43e036 | refs/heads/master | 2021-01-10T06:44:14.422663 | 2015-10-14T20:44:05 | 2015-10-14T20:44:05 | 44,275,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py |
import os
import sys
from collections import defaultdict
from textgrid import TextGrid, IntervalTier
def parse_ctm(ctm_path):
file_dict = defaultdict(list)
with open(ctm_path, 'r') as f:
for line in f:
line = line.strip()
line = line.split(' ')
filename = line[0]
begin = float(line[2])
duration = float(line[3])
end = round(begin + duration, 2)
label = line[4]
file_dict[filename].append([begin, end, label])
return file_dict
def find_max(input):
return max(x[1] for x in input)
def ctm_to_textgrid(directory, out_directory):
word_path = os.path.join(directory, 'word_ctm')
if not os.path.exists(word_path):
return
phone_path = os.path.join(directory, 'phone_ctm')
current = None
word_dict = parse_ctm(word_path)
phone_dict = parse_ctm(phone_path)
num_files = len(word_dict)
for i,(k,v) in enumerate(word_dict.items()):
print('processing file {} of {}'.format(i,num_files))
maxtime = find_max(v+phone_dict[k])
tg = TextGrid(maxTime = maxtime)
wordtier = IntervalTier(name = 'words', maxTime = maxtime)
phonetier = IntervalTier(name = 'phones', maxTime = maxtime)
for interval in v:
wordtier.add(*interval)
for interval in phone_dict[k]:
phonetier.add(*interval)
tg.append(wordtier)
tg.append(phonetier)
outpath = os.path.join(out_directory, k + '.TextGrid')
tg.write(outpath)
if __name__ == '__main__':
base_dir = os.path.expanduser('~/dev/kaldi-trunk/egs/gp/s5/exp/GE')
output_dir = os.path.expanduser('~/Documents/Data/GlobalPhone/German/aln')
for d in os.listdir(base_dir):
print(d)
in_dir = os.path.join(base_dir, d)
out_dir = os.path.join(output_dir, d)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
ctm_to_textgrid(in_dir,out_dir)
| [
"michael.e.mcauliffe@gmail.com"
] | michael.e.mcauliffe@gmail.com |
63855c717e36995add950dfc21ba41fee42e6072 | 04f4558aa0dc904b8d7c0ab79b80ec11c34f8ccf | /swagger_client/models/inline_response_200_45.py | 34a4b15a8c11998f5918fe3f551a443b1272a999 | [
"Apache-2.0"
] | permissive | scubawhere/scubawhere-api-python-client | 0fc23ffb97446b0bb0825c93528f954e7d642cf4 | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | refs/heads/master | 2020-12-24T11:10:34.880348 | 2016-11-08T12:20:45 | 2016-11-08T12:20:45 | 73,180,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,243 | py | # coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20045(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status=None, sessions=None):
"""
InlineResponse20045 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'sessions': 'list[Session]'
}
self.attribute_map = {
'status': 'status',
'sessions': 'sessions'
}
self._status = status
self._sessions = sessions
@property
def status(self):
"""
Gets the status of this InlineResponse20045.
:return: The status of this InlineResponse20045.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this InlineResponse20045.
:param status: The status of this InlineResponse20045.
:type: str
"""
self._status = status
@property
def sessions(self):
"""
Gets the sessions of this InlineResponse20045.
:return: The sessions of this InlineResponse20045.
:rtype: list[Session]
"""
return self._sessions
@sessions.setter
def sessions(self, sessions):
"""
Sets the sessions of this InlineResponse20045.
:param sessions: The sessions of this InlineResponse20045.
:type: list[Session]
"""
self._sessions = sessions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"bryan@iqwebcreations.com"
] | bryan@iqwebcreations.com |
b5b79d006a1526a6219d384b11259ec3921c5322 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/query_device_simplify.py | 2e304bf337fd262773cba99f3cb012bce9dd2549 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 16,987 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class QueryDeviceSimplify:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'app_id': 'str',
'app_name': 'str',
'device_id': 'str',
'node_id': 'str',
'gateway_id': 'str',
'device_name': 'str',
'node_type': 'str',
'description': 'str',
'fw_version': 'str',
'sw_version': 'str',
'device_sdk_version': 'str',
'product_id': 'str',
'product_name': 'str',
'status': 'str',
'tags': 'list[TagV5DTO]'
}
attribute_map = {
'app_id': 'app_id',
'app_name': 'app_name',
'device_id': 'device_id',
'node_id': 'node_id',
'gateway_id': 'gateway_id',
'device_name': 'device_name',
'node_type': 'node_type',
'description': 'description',
'fw_version': 'fw_version',
'sw_version': 'sw_version',
'device_sdk_version': 'device_sdk_version',
'product_id': 'product_id',
'product_name': 'product_name',
'status': 'status',
'tags': 'tags'
}
def __init__(self, app_id=None, app_name=None, device_id=None, node_id=None, gateway_id=None, device_name=None, node_type=None, description=None, fw_version=None, sw_version=None, device_sdk_version=None, product_id=None, product_name=None, status=None, tags=None):
"""QueryDeviceSimplify
The model defined in huaweicloud sdk
:param app_id: 资源空间ID。
:type app_id: str
:param app_name: 资源空间名称。
:type app_name: str
:param device_id: 设备ID,用于唯一标识一个设备。在注册设备时直接指定,或者由物联网平台分配获得。由物联网平台分配时,生成规则为\"product_id\" + \"_\" + \"node_id\"拼接而成。
:type device_id: str
:param node_id: 设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。
:type node_id: str
:param gateway_id: 网关ID,用于标识设备所属的父设备,即父设备的设备ID。当设备是直连设备时,gateway_id与设备的device_id一致。当设备是非直连设备时,gateway_id为设备所关联的父设备的device_id。
:type gateway_id: str
:param device_name: 设备名称。
:type device_name: str
:param node_type: 设备节点类型。 - ENDPOINT:非直连设备。 - GATEWAY:直连设备或网关。 - UNKNOWN:未知。
:type node_type: str
:param description: 设备的描述信息。
:type description: str
:param fw_version: 设备的固件版本。
:type fw_version: str
:param sw_version: 设备的软件版本。
:type sw_version: str
:param device_sdk_version: 设备的sdk信息。
:type device_sdk_version: str
:param product_id: 设备关联的产品ID,用于唯一标识一个产品模型。
:type product_id: str
:param product_name: 设备关联的产品名称。
:type product_name: str
:param status: 设备的状态。 - ONLINE:设备在线。 - OFFLINE:设备离线。 - ABNORMAL:设备异常。 - INACTIVE:设备未激活。 - FROZEN:设备冻结。
:type status: str
:param tags: 设备的标签列表。
:type tags: list[:class:`huaweicloudsdkiotda.v5.TagV5DTO`]
"""
self._app_id = None
self._app_name = None
self._device_id = None
self._node_id = None
self._gateway_id = None
self._device_name = None
self._node_type = None
self._description = None
self._fw_version = None
self._sw_version = None
self._device_sdk_version = None
self._product_id = None
self._product_name = None
self._status = None
self._tags = None
self.discriminator = None
if app_id is not None:
self.app_id = app_id
if app_name is not None:
self.app_name = app_name
if device_id is not None:
self.device_id = device_id
if node_id is not None:
self.node_id = node_id
if gateway_id is not None:
self.gateway_id = gateway_id
if device_name is not None:
self.device_name = device_name
if node_type is not None:
self.node_type = node_type
if description is not None:
self.description = description
if fw_version is not None:
self.fw_version = fw_version
if sw_version is not None:
self.sw_version = sw_version
if device_sdk_version is not None:
self.device_sdk_version = device_sdk_version
if product_id is not None:
self.product_id = product_id
if product_name is not None:
self.product_name = product_name
if status is not None:
self.status = status
if tags is not None:
self.tags = tags
@property
def app_id(self):
"""Gets the app_id of this QueryDeviceSimplify.
资源空间ID。
:return: The app_id of this QueryDeviceSimplify.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this QueryDeviceSimplify.
资源空间ID。
:param app_id: The app_id of this QueryDeviceSimplify.
:type app_id: str
"""
self._app_id = app_id
@property
def app_name(self):
"""Gets the app_name of this QueryDeviceSimplify.
资源空间名称。
:return: The app_name of this QueryDeviceSimplify.
:rtype: str
"""
return self._app_name
@app_name.setter
def app_name(self, app_name):
"""Sets the app_name of this QueryDeviceSimplify.
资源空间名称。
:param app_name: The app_name of this QueryDeviceSimplify.
:type app_name: str
"""
self._app_name = app_name
@property
def device_id(self):
"""Gets the device_id of this QueryDeviceSimplify.
设备ID,用于唯一标识一个设备。在注册设备时直接指定,或者由物联网平台分配获得。由物联网平台分配时,生成规则为\"product_id\" + \"_\" + \"node_id\"拼接而成。
:return: The device_id of this QueryDeviceSimplify.
:rtype: str
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this QueryDeviceSimplify.
设备ID,用于唯一标识一个设备。在注册设备时直接指定,或者由物联网平台分配获得。由物联网平台分配时,生成规则为\"product_id\" + \"_\" + \"node_id\"拼接而成。
:param device_id: The device_id of this QueryDeviceSimplify.
:type device_id: str
"""
self._device_id = device_id
@property
def node_id(self):
"""Gets the node_id of this QueryDeviceSimplify.
设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。
:return: The node_id of this QueryDeviceSimplify.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this QueryDeviceSimplify.
设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。
:param node_id: The node_id of this QueryDeviceSimplify.
:type node_id: str
"""
self._node_id = node_id
@property
def gateway_id(self):
"""Gets the gateway_id of this QueryDeviceSimplify.
网关ID,用于标识设备所属的父设备,即父设备的设备ID。当设备是直连设备时,gateway_id与设备的device_id一致。当设备是非直连设备时,gateway_id为设备所关联的父设备的device_id。
:return: The gateway_id of this QueryDeviceSimplify.
:rtype: str
"""
return self._gateway_id
@gateway_id.setter
def gateway_id(self, gateway_id):
"""Sets the gateway_id of this QueryDeviceSimplify.
网关ID,用于标识设备所属的父设备,即父设备的设备ID。当设备是直连设备时,gateway_id与设备的device_id一致。当设备是非直连设备时,gateway_id为设备所关联的父设备的device_id。
:param gateway_id: The gateway_id of this QueryDeviceSimplify.
:type gateway_id: str
"""
self._gateway_id = gateway_id
@property
def device_name(self):
"""Gets the device_name of this QueryDeviceSimplify.
设备名称。
:return: The device_name of this QueryDeviceSimplify.
:rtype: str
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""Sets the device_name of this QueryDeviceSimplify.
设备名称。
:param device_name: The device_name of this QueryDeviceSimplify.
:type device_name: str
"""
self._device_name = device_name
@property
def node_type(self):
"""Gets the node_type of this QueryDeviceSimplify.
设备节点类型。 - ENDPOINT:非直连设备。 - GATEWAY:直连设备或网关。 - UNKNOWN:未知。
:return: The node_type of this QueryDeviceSimplify.
:rtype: str
"""
return self._node_type
@node_type.setter
def node_type(self, node_type):
"""Sets the node_type of this QueryDeviceSimplify.
设备节点类型。 - ENDPOINT:非直连设备。 - GATEWAY:直连设备或网关。 - UNKNOWN:未知。
:param node_type: The node_type of this QueryDeviceSimplify.
:type node_type: str
"""
self._node_type = node_type
@property
def description(self):
"""Gets the description of this QueryDeviceSimplify.
设备的描述信息。
:return: The description of this QueryDeviceSimplify.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this QueryDeviceSimplify.
设备的描述信息。
:param description: The description of this QueryDeviceSimplify.
:type description: str
"""
self._description = description
@property
def fw_version(self):
"""Gets the fw_version of this QueryDeviceSimplify.
设备的固件版本。
:return: The fw_version of this QueryDeviceSimplify.
:rtype: str
"""
return self._fw_version
@fw_version.setter
def fw_version(self, fw_version):
"""Sets the fw_version of this QueryDeviceSimplify.
设备的固件版本。
:param fw_version: The fw_version of this QueryDeviceSimplify.
:type fw_version: str
"""
self._fw_version = fw_version
@property
def sw_version(self):
"""Gets the sw_version of this QueryDeviceSimplify.
设备的软件版本。
:return: The sw_version of this QueryDeviceSimplify.
:rtype: str
"""
return self._sw_version
@sw_version.setter
def sw_version(self, sw_version):
"""Sets the sw_version of this QueryDeviceSimplify.
设备的软件版本。
:param sw_version: The sw_version of this QueryDeviceSimplify.
:type sw_version: str
"""
self._sw_version = sw_version
@property
def device_sdk_version(self):
"""Gets the device_sdk_version of this QueryDeviceSimplify.
设备的sdk信息。
:return: The device_sdk_version of this QueryDeviceSimplify.
:rtype: str
"""
return self._device_sdk_version
@device_sdk_version.setter
def device_sdk_version(self, device_sdk_version):
"""Sets the device_sdk_version of this QueryDeviceSimplify.
设备的sdk信息。
:param device_sdk_version: The device_sdk_version of this QueryDeviceSimplify.
:type device_sdk_version: str
"""
self._device_sdk_version = device_sdk_version
@property
def product_id(self):
"""Gets the product_id of this QueryDeviceSimplify.
设备关联的产品ID,用于唯一标识一个产品模型。
:return: The product_id of this QueryDeviceSimplify.
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this QueryDeviceSimplify.
设备关联的产品ID,用于唯一标识一个产品模型。
:param product_id: The product_id of this QueryDeviceSimplify.
:type product_id: str
"""
self._product_id = product_id
@property
def product_name(self):
"""Gets the product_name of this QueryDeviceSimplify.
设备关联的产品名称。
:return: The product_name of this QueryDeviceSimplify.
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""Sets the product_name of this QueryDeviceSimplify.
设备关联的产品名称。
:param product_name: The product_name of this QueryDeviceSimplify.
:type product_name: str
"""
self._product_name = product_name
@property
def status(self):
"""Gets the status of this QueryDeviceSimplify.
设备的状态。 - ONLINE:设备在线。 - OFFLINE:设备离线。 - ABNORMAL:设备异常。 - INACTIVE:设备未激活。 - FROZEN:设备冻结。
:return: The status of this QueryDeviceSimplify.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this QueryDeviceSimplify.
设备的状态。 - ONLINE:设备在线。 - OFFLINE:设备离线。 - ABNORMAL:设备异常。 - INACTIVE:设备未激活。 - FROZEN:设备冻结。
:param status: The status of this QueryDeviceSimplify.
:type status: str
"""
self._status = status
@property
def tags(self):
"""Gets the tags of this QueryDeviceSimplify.
设备的标签列表。
:return: The tags of this QueryDeviceSimplify.
:rtype: list[:class:`huaweicloudsdkiotda.v5.TagV5DTO`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this QueryDeviceSimplify.
设备的标签列表。
:param tags: The tags of this QueryDeviceSimplify.
:type tags: list[:class:`huaweicloudsdkiotda.v5.TagV5DTO`]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueryDeviceSimplify):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
771dc87fd67bbc87453a422c054facee3c78c43e | 861781589f3f674fdcc34dc2b5f98ea06006c648 | /orderapp/migrations/0002_auto_20201028_0032.py | e51a59cecd9e1a25017186377a1bb707eca84e28 | [] | no_license | FedorPolyakov/geekshope_on_django | 7dd5133d85ffea910113a1c51684c34d8ca56229 | 2a432e2e581f64b2208447369eaa5f58accd000b | refs/heads/master | 2023-01-19T14:09:47.810521 | 2020-11-17T20:52:14 | 2020-11-17T20:52:14 | 296,665,989 | 0 | 0 | null | 2020-11-19T15:59:23 | 2020-09-18T15:53:43 | Python | UTF-8 | Python | false | false | 1,429 | py | # Generated by Django 3.1.1 on 2020-10-27 21:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0006_product_is_active'),
('orderapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('FM', 'формируется'), ('STP', 'отправлен в обработку'), ('PRD', 'обработан'), ('PD', 'оплачен'), ('RDY', 'готов'), ('DN', 'выполнен'), ('CNC', 'отменен')], max_length=3, verbose_name='Статус заказа'),
),
migrations.AlterField(
model_name='orderitem',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orderitems', to='orderapp.order', verbose_name='Заказ'),
),
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.product', verbose_name='Продукт'),
),
migrations.AlterField(
model_name='orderitem',
name='quantity',
field=models.SmallIntegerField(default=0, verbose_name='Количество'),
),
]
| [
"pismo.na.pochtu@gmail.com"
] | pismo.na.pochtu@gmail.com |
9870f9f88a5746c6350951a5f6d5eadbb1a9540f | 96538cc3eee3d73d429f3476d0e895be95d695e3 | /worker/news/main.py | 168efa63f44e7fec9b1e4e3df8286974fb1da461 | [] | no_license | FashtimeDotCom/distributed-spider | d9555670216e68d4ff031e466cbf3529d080a534 | 33292f098403fa73239e0c7353e4cc5918be981b | refs/heads/master | 2020-03-22T11:43:14.796426 | 2018-07-06T10:51:48 | 2018-07-06T11:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | # -*- coding: utf-8 -*-
'''
Created on 2018-07-04 15:45
---------
@summary: 从master中取网页首地址
---------
@author: Boris
'''
import sys
sys.path.append('../')
import init
import pid
pid.record_pid(__file__)
import utils.tools as tools
from utils.log import log
from base.spider import Spider
from utils.export_data import ExportData
# 需配置
from news.parsers import *
def main():
def begin_callback():
log.info('\n********** news begin **********')
def end_callback():
log.info('\n********** news end **********')
# 配置spider
spider = Spider(tab_urls = 'news:news_urls', begin_callback = begin_callback, end_callback = end_callback)
# 添加parser
spider.add_parser(news_parser)
spider.start()
if __name__ == '__main__':
main() | [
"boris_liu@foxmail.com"
] | boris_liu@foxmail.com |
4e8ea7b0208106797b97138f04f0ce6fa022bf85 | a9823180c93f973a20b492af9181c30a51e1c221 | /debug.py | c044a4f9ba3acfab6f9f4c67a0dab1b416351bea | [
"MIT"
] | permissive | thirtytwobits/uavcan.org | 48f62675f21a5f550829d68ec9f84b7f07f073e6 | 1a16a190520ab751f8e31c02aaeec951c9a3fc19 | refs/heads/master | 2022-05-06T12:26:46.012683 | 2022-04-13T18:38:02 | 2022-04-13T18:38:02 | 170,823,712 | 0 | 0 | MIT | 2019-02-15T07:52:38 | 2019-02-15T07:52:37 | null | UTF-8 | Python | false | false | 472 | py | #!/usr/bin/env python3
import sys
import logging
import os
os.environ['DEBUG'] = '1'
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s: %(message)s'
log_level = logging.DEBUG if 'debug' in sys.argv else logging.INFO
logging.basicConfig(stream=sys.stderr, level=log_level, format=LOG_FORMAT)
sys.path.insert(0, os.path.dirname(__file__))
# noinspection PyUnresolvedReferences
from app import app as application
application.run(host='0.0.0.0', port=4000, debug=True)
| [
"pavel.kirienko@gmail.com"
] | pavel.kirienko@gmail.com |
00daed5e30b686e85e499ae151d9ecac057a9b33 | f85d4c5b7ff8d5fba36ccebf8b8eda6bfcf99019 | /arche/models/tests/test_folder.py | cf2098f9997a6de9f69f8a6e53fa0878ffcbc8ea | [] | no_license | ArcheProject/Arche | 8289ebc6de652b786ba943fafb6ea8130e42ba24 | eb9924f72c167208265ac0a2c80504422e1897c9 | refs/heads/master | 2022-06-26T15:52:17.400944 | 2022-06-14T09:35:16 | 2022-06-14T09:35:16 | 18,967,966 | 3 | 0 | null | 2015-04-22T06:34:26 | 2014-04-20T16:01:52 | Python | UTF-8 | Python | false | false | 615 | py | from unittest import TestCase
from arche.interfaces import IArcheFolder
from pyramid import testing
from zope.interface.verify import verifyClass, verifyObject
class FolderTests(TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
@property
def _cut(self):
from arche.models.folder import ArcheFolder
return ArcheFolder
def test_verify_class(self):
self.failUnless(verifyClass(IArcheFolder, self._cut))
def test_verify_obj(self):
self.failUnless(verifyObject(IArcheFolder, self._cut()))
| [
"robin@betahaus.net"
] | robin@betahaus.net |
c3e566c25eec2714a8aa97c11da237ad0b6a143d | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - Homework4/set2/PATRICK M BARBOUN_16936_assignsubmission_file_HW4/HW4/P4.py | 6a00e1fb338230ef6a7eeea443383c80c30fcb2a | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | import numpy as np
def sincos(x):
a = np.sin(np.cos(x))
return a
x = np.array([1,2,3,4,5,6])
y = sincos(x)
print y | [
"sendurr@hotmail.com"
] | sendurr@hotmail.com |
4a89cbf78e998b518f507f6dedde62e18c37b532 | eac52a8ae7c539acedaedf8744bd8e20172f0af6 | /epi_solutions/binary_tree/smallest-subtree-with-all-the-deepest-nodes.py | 56c07341774dcb82fc85175510df5d4576742701 | [] | no_license | mshekhar/random-algs | 3a0a0f6e6b21f6a59ed5e1970b7a2bc2044e191f | 7c9a8455f49027a754038b23aaa2df61fe5397ca | refs/heads/master | 2020-03-26T16:29:42.694785 | 2019-07-18T20:57:55 | 2019-07-18T20:57:55 | 145,105,593 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self, root):
if not root:
return 0, None
left_depth, left_deepeset_node = self.helper(root.left)
right_depth, right_deepeset_node = self.helper(root.right)
if left_depth == right_depth:
return left_depth + 1, root
elif left_depth > right_depth:
return left_depth + 1, left_deepeset_node
else:
return right_depth + 1, right_deepeset_node
def subtreeWithAllDeepest(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
return self.helper(root)
| [
"mayank@moengage.com"
] | mayank@moengage.com |
38e576ecd0947e49c2db0af3fb903f16674a7332 | dcfac04f77ee9bc36c450da0a7fd7c410344d4d1 | /Plate Recognition/UI.py | b3927509443a93492d16350701095ab8d2c82e15 | [] | no_license | Bobby981229/Computer-Vision-Based-Vehicle-Integrated-Information-Collection-System | 8f5b47dd1c38e17bb87f846dba40323e2c8f9d25 | c64c81bb5812dbe0ddb0ce6d2a87322ed6de66af | refs/heads/master | 2023-04-24T16:27:32.521210 | 2021-05-12T19:16:35 | 2021-05-12T19:16:35 | 360,210,675 | 2 | 1 | null | 2021-04-22T12:13:44 | 2021-04-21T15:01:15 | Python | UTF-8 | Python | false | false | 16,169 | py | import tkinter as tk
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter.filedialog import askopenfilename
import cv2 as cv
import numpy as np
from PIL import Image, ImageTk
from tensorflow import keras
from CNN import cnn_predict
from U_Net import u_net_predict
from location_correct import locate_and_correct, erode
from modelColour_identify import car_model
from openCV_functions import license_image
from openCV_functions import license_spilt
from openCV_functions import plt_show_raw
from openCV_functions import template_matching
from CarCounter import CarCounter
class Form:
def __init__(self, form, length, width):
"""
Windows UI Design
:param form: windows frame
:param length: length - 900
:param width: width - 800
"""
# Form Control Setting
self.form = form
self.length = length
self.width = width
# Set the initial position of form
self.form.geometry("%dx%d+%d+%d" % (length, width, 480, 90))
self.form.title(
"Computer Vision-Based Vehicle Integrated Information Collection System")
self.img_path = None
# LabelFrame Control Setting
self.frame_buttons = tk.LabelFrame(self.form, text="Button Controls", font=('TIMES NEW ROMAN', '14', 'bold'),
width=273, height=410, labelanchor="nw").place(x=575, y=45) # buttons frame
self.frame_canvas = tk.LabelFrame(self.form, text="Canvas Controls", font=('TIMES NEW ROMAN', '14', 'bold'),
width=750, height=200, labelanchor="nw").place(x=35, y=570) # canvas frame
# Labels Control Setting
self.label_org = Label(self.form, text='Original Image:',
font=('TIMES NEW ROMAN', 14, 'bold')).place(x=35, y=15) # original label
self.label_locate = Label(
self.form, text='License Plate Location:', font=('TIMES NEW ROMAN', 13, 'bold')).place(x=50, y=620) # LP location label
self.label_identify = Label(
self.form, text='Identification Results:', font=('TIMES NEW ROMAN', 13, 'bold')).place(x=53, y=710) # result label
self.label_model = Label(self.form, text='Vehicle Model / Colour:', font=(
'TIMES NEW ROMAN', 15, 'bold')).place(x=565, y=470) # vehicle model... label
self.label_result = Label(self.form, text='Result...', font=(
'TIMES NEW ROMAN', 14)).place(x=565, y=510) # vehicle model... recognition result
# Canvas Control Setting
self.canvas_org = Canvas(self.form, width=512, height=512,
bg='white', relief='solid', borderwidth=1) # The original image canvas
self.canvas_org.place(x=35, y=42)
self.canvas_locate_1 = Canvas(self.form, width=245, height=85,
bg='white', relief='solid', borderwidth=1) # License plate area canvas 1
self.canvas_locate_1.place(x=235, y=590)
self.canvas_identify_1 = Canvas(self.form, width=245, height=65,
bg='white', relief='solid', borderwidth=1) # License plate recognition canvas 1
self.canvas_identify_1.place(x=235, y=690)
self.canvas_locate_2 = Canvas(self.form, width=245, height=85,
bg='white', relief='solid', borderwidth=1) # License plate area canvas 2
self.canvas_locate_2.place(x=510, y=590)
self.canvas_identify_2 = Canvas(self.form, width=245, height=65,
bg='white', relief='solid', borderwidth=1) # License plate recognition canvas 2
self.canvas_identify_2.place(x=510, y=690)
# Buttons Control Setting
self.btn_imgPath = Button(self.form, text='Select Image', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.load_show_img).place(x=585, y=70) # Select file button
self.btn_blueRecog = Button(self.form, text='Blue Plates', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.recog).place(x=585, y=150) # blue plate recognition button
self.btn_clear = Button(self.form, text='Clear ', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.clear).place(x=720, y=310) # Clear button
self.btn_whiteRecog = Button(self.form, text='White Plates', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.predict_white).place(x=720, y=150) # white plate
self.btn_cvMethod = Button(self.form, text='OpenCV Match', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.opevCV_matching).place(x=720, y=70) # OpenCV method
self.btn_greenRecogn = Button(self.form, text='Green Plates', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.predict_green).place(x=585, y=230) # Green plate
self.btn_exit = Button(self.form, text='Exit', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.quit_program).place(x=585, y=390) # exit system
self.btn_model = Button(self.form, text='Model / Colour', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.model_colour).place(x=720, y=230) # make, model, colour button
self.btn_video = Button(self.form, text='Detect in Video', font=('TIMES NEW ROMAN', '12', 'bold'),
width=12, height=2, command=self.counter_video).place(x=585, y=310) # counting in video
# Deep Learning Trained Models Setting
self.unet_blue = keras.models.load_model('../models/unet.h5') # Blue plate u_net model
self.unet_white = keras.models.load_model('../models/unet_white.h5') # Whit plate u_net model
# self.unet_green20 = keras.models.load_model('../models/unet_green20.h5') # Green plate u_net model
self.cnn = keras.models.load_model('../models/cnn.h5') # blue plate cnn model
print('The program is loading... Wait for a moment, please...')
cnn_predict(self.cnn, [np.zeros((80, 240, 3))])
def opevCV_matching(self):
"""
Select a vehicle image and using traditional method to recognize
:return: recognition result
"""
self.canvas_org.delete('all') # Delete the image on the canvas
# Step 1. Image Pre-processing
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
self.img_path = file_path
img_open = Image.open(self.img_path)
if img_open.size[0] * img_open.size[1] > 240 * 80:
img_open = img_open.resize((512, 512), Image.ANTIALIAS)
self.img_Tk = ImageTk.PhotoImage(img_open)
self.canvas_org.create_image(
258, 258, image=self.img_Tk, anchor='center')
original_image = cv.imread(file_path)
image = original_image.copy()
carLicense_image = license_image(image)
# Step 2. Split the License Numbers and Characters
try:
image = carLicense_image.copy()
word_images = license_spilt(image)
except:
tk.messagebox.showerror(
'Error', 'Unable to segment license plate area!')
else:
# Step 3. Characters and Numbers Matching with Template
word_images_ = word_images.copy()
result = template_matching(word_images_)
print('Recognition Result:', result)
tk.messagebox.showinfo('Result', result)
# Step 4. Image Rendering
width, weight = original_image.shape[0:2]
image = original_image.copy()
cv.rectangle(image, (int(0.2 * weight), int(0.75 * width)),
(int(weight * 0.8), int(width * 0.95)), (0, 255, 0), 5)
cv.putText(image, "".join(result), (int(0.2 * weight) + 30,
int(0.75 * width) + 80), cv.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 8)
plt_show_raw(image)
cv.imshow('License Plate', image)
cv.waitKey(0)
cv.destroyAllForms()
self.clear()
def model_colour(self):
"""
Recognize vehicle make, model,colour and year
:return: return recognition results
"""
if self.img_path == None: # Make predictions before you have selected an image
tk.messagebox.showerror(
'Error', 'Please select a license plate image!')
else:
result = car_model(self.img_path)
self.label_result = Label(self.form, text=result, font=(
'TIMES NEW ROMAN', 16, 'bold'), fg="red").place(x=565, y=510)
def load_show_img(self):
"""
Load vehicle images
:return:
"""
self.clear()
sv = StringVar()
sv.set(askopenfilename())
try:
self.img_path = Entry(
self.form, state='readonly', text=sv).get() # Gets the open image
img_open = Image.open(self.img_path) # open image
if img_open.size[0] * img_open.size[1] > 240 * 80: # not licence plate
img_open = img_open.resize((512, 512), Image.ANTIALIAS)
self.img_Tk = ImageTk.PhotoImage(img_open)
self.canvas_org.create_image(
258, 258, image=self.img_Tk, anchor='center')
except:
tk.messagebox.showerror('Error', 'Failed to open a image!')
def recog(self):
"""
recognize and display the result
:return:
"""
if self.img_path == None: # Not select image
self.canvas_identify_1.create_text(
32, 15, text='Please select image', anchor='nw', font=('TIMES NEW ROMAN', 14))
else:
img_origin = cv.imdecode(np.fromfile(
self.img_path, dtype=np.uint8), -1)
h, w = img_origin.shape[0], img_origin.shape[1]
if h * w <= 240 * 80 and 2 <= w / h <= 5: # A picture is a license plate, no location required
lic = cv.resize(img_origin, dsize=(240, 80), interpolation=cv.INTER_AREA)[:, :, :3] # resize to (240,80)
img_origin_copy, Lic_img = img_origin, [lic]
else: # Prediction of the original image by U-Net, to get img_mask, to LP location and recognition
img_origin, img_mask = u_net_predict(
self.unet_blue, self.img_path)
img_origin_copy, Lic_img = locate_and_correct(img_origin, img_mask) # License plate positioning and correction
# CNN model is used to predict the license plate recognition
Lic_pred = cnn_predict(self.cnn, Lic_img)
if Lic_pred:
# img_origin_copy[:, :, ::-1] Converting BGR to RGB
img = Image.fromarray(img_origin_copy[:, :, ::-1])
self.img_Tk = ImageTk.PhotoImage(img)
self.canvas_org.delete('all') # Clear the drawing board before displaying
# The outline of the positioned license plate is drawn and displayed on the drawing board
self.canvas_org.create_image(258, 258, image=self.img_Tk, anchor='center')
for i, lic_pred in enumerate(Lic_pred):
if i == 0:
self.lic_Tk1 = ImageTk.PhotoImage(
Image.fromarray(lic_pred[0][:, :, ::-1]))
self.canvas_locate_1.create_image(
5, 5, image=self.lic_Tk1, anchor='nw')
self.canvas_identify_1.create_text(
35, 15, text=lic_pred[1], anchor='nw', font=('黑体', 28))
elif i == 1:
self.lic_Tk2 = ImageTk.PhotoImage(
Image.fromarray(lic_pred[0][:, :, ::-1]))
self.canvas_locate_2.create_image(
5, 5, image=self.lic_Tk2, anchor='nw')
self.canvas_identify_2.create_text(
40, 15, text=lic_pred[1], anchor='nw', font=('黑体', 28))
else: # is null indicating that it is not recognized
self.canvas_identify_1.create_text(
47, 15, text='Recognize Failed', anchor='nw', font=('TIMES NEW ROMAN', 22))
# img_origin_copy[:, :, ::-1] Converting BGR to RGB
img = Image.fromarray(img_origin_copy[:, :, ::-1])
self.img_Tk = ImageTk.PhotoImage(img)
self.canvas_org.delete('all') # Clear the drawing board before displaying
# The outline of the positioned license plate is drawn and displayed
self.canvas_org.create_image(258, 258, image=self.img_Tk, anchor='center')
def predict_white(self):
"""
Use the trained unet model to locate the white license plate area
:return:
"""
if self.img_path == None: # Make predictions before selected an image
tk.messagebox.showerror(
'Error', 'Please select a license plate image!')
else:
img_origin, img_mask = u_net_predict(
self.unet_white, self.img_path)
img2gray = erode(img_mask)
ret, mask = cv.threshold(img2gray, 200, 255, cv.THRESH_BINARY)
cv.waitKey(0)
gray2img = cv.cvtColor(img2gray, cv.COLOR_GRAY2BGR)
img_origin_copy, Lic_img = locate_and_correct(img_origin, gray2img)
# cv.imshow('show', img_mask)
cv.imshow('License Plate', img_origin_copy)
cv.waitKey(0)
cv.destroyAllForms()
self.clear()
def predict_green(self):
"""
Use the trained unet model to locate the white license plate area
:return:
"""
if self.img_path == None: # Make predictions before selected an image
tk.messagebox.showerror(
'Error', 'Please select a license plate image!')
else:
img_origin, img_mask = u_net_predict(
self.unet_green20, self.img_path)
img2gray = erode(img_mask)
ret, mask = cv.threshold(img2gray, 200, 255, cv.THRESH_BINARY)
cv.waitKey(0)
gray2img = cv.cvtColor(img2gray, cv.COLOR_GRAY2BGR)
img_origin_copy, Lic_img = locate_and_correct(img_origin, gray2img)
# cv.imshow('show', img_mask)
cv.imshow('License Plate', img_origin_copy)
cv.waitKey(0)
cv.destroyAllForms()
self.clear()
def counter_video(self):
root = tk.Tk()
root.withdraw()
video_path = filedialog.askopenfilename()
DETECTION_ZONE = (20, 400, 890, 500)
cc = CarCounter(video_adr=video_path, detection_zone=DETECTION_ZONE)
cc.count_cars()
def clear(self):
"""
Clear all the data
:return:
"""
self.canvas_org.delete('all')
self.canvas_locate_1.delete('all')
self.canvas_locate_2.delete('all')
self.canvas_identify_1.delete('all')
self.canvas_identify_2.delete('all')
self.img_path = None
def quit_program(self):
"""
Exit the system
:return:
"""
quit = tk.messagebox.askokcancel(
'Prompt', 'Confirm to exit the program?')
if quit == True:
self.form.destroy()
def closeEvent(): # Clear session() before closing
keras.backend.clear_session()
sys.exit()
if __name__ == '__main__':
form = Tk()
length = 900 # Window width set to 1000
width = 800 # Window height set at 600
Form(form, length, width)
form.protocol("WM_DELETE_WINDOW", Form.closeEvent)
form.mainloop()
| [
"729461836@qq.com"
] | 729461836@qq.com |
a4ae304676ff53c7cd64bb3eb676b62629f4f8b1 | a981d8862c8bf4926952f90e6ec04dce679c9a2c | /skdata/base.py | 1546da9ebfaea0e8c85f6f816f114c8779fbbca2 | [] | no_license | yamins81/skdata | 250352523b7cc1672bf6ba76ac22151179e32705 | 551f707e4183ac46af6a89026c89c9f522f0a6a5 | refs/heads/master | 2020-04-05T23:37:22.932007 | 2015-05-01T22:29:55 | 2015-05-01T22:29:55 | 2,699,514 | 0 | 1 | null | 2013-09-18T19:40:36 | 2011-11-03T02:09:40 | Python | UTF-8 | Python | false | false | 10,416 | py | """
Base classes serving as design documentation.
"""
import numpy as np
class DatasetNotDownloadable(Exception):
pass
class DatasetNotPresent(Exception):
pass
class Task(object):
"""
A Task is the smallest unit of data packaging for training a machine
learning model. For different machine learning applications (semantics)
the attributes are different, but there are some conventions.
For example:
semantics='vector_classification'
- self.x is a matrix-like feature matrix with a row for each example
and a column for each feature.
- self.y is a array of labels (any type, but often integer or string)
semantics='image_classification'
- self.x is a 4D structure images x height x width x channels
- self.y is a array of labels (any type, but often integer or string)
semantics='indexed_vector_classification'
- self.all_vectors is a matrix (examples x features)
- self.all_labels is a vector of labels
- self.idxs is a vector of relevant example positions
semantics='indexed_image_classification'
- self.all_images is a 4D structure (images x height x width x channels)
- self.all_labels is a vector of labels
- self.idxs is a vector of relevant example positions
The design taken in skdata is that each data set view file defines
* a semantics object (a string in the examples above) that uniquely
*identifies* what a learning algorithm is supposed to do with the Task,
and
* documentation to *describe* to the user what a learning algorithm is
supposed to do with the Task.
As library designers, it is our hope that data set authors can re-use each
others' semantics as much as possible, so that learning algorithms are
more portable between tasks.
"""
def __init__(self, semantics=None, name=None, **kwargs):
self.semantics = semantics
self.name = name
self.__dict__.update(kwargs)
class Split(object):
"""
A Split is a (train, test) pair of Tasks with no common examples.
This class is used in cross-validation to select / learn parameters
based on the `train` task, and then to evaluate them on the `valid` task.
"""
# XXX This class is no longer necessary in the View API
def __init__(self, train, test):
self.train = train
self.test = test
class View(object):
"""
A View is an interpretation of a data set as a standard learning problem.
"""
def __init__(self, dataset=None):
"""
dataset: a reference to a low-level object that offers access to the
raw data. It is not standardized in any way, and the
reference itself is optional.
"""
self.dataset = dataset
def protocol(self, algo):
"""
Return a list of instructions for a learning algorithm.
An instruction is a 3-tuple of (attr, args, kwargs) such that
algo.<attr>(*args, **kwargs) can be interpreted by the learning algo
as a sensible operation, like train a model from some data, or test a
previously trained model.
See `LearningAlgo` below for a list of standard instructions that a
learning algorithm implementation should support, but the protocol is
left open deliberately so that new View objects can call any method
necessary on a LearningAlgo, even if it means calling a relatively
unique method that only particular LearningAlgo implementations
support.
"""
raise NotImplementedError()
class LearningAlgo(object):
"""
A base class for learning algorithms that can be driven by the protocol()
functions that are sometimes included in View subclasses.
The idea is that a protocol driver will call these methods in a particular
order with appropriate tasks, splits, etc. and a subclass of this instance
will thereby perform an experiment by side effect on `self`.
"""
def task(self, *args, **kwargs):
# XXX This is a typo right? Surely there is no reason for a
# LearningAlgo to have a self.task method...
return Task(*args, **kwargs)
def best_model(self, train, valid=None, return_promising=False):
"""
Train a model from task `train` optionally optimizing for
cross-validated performance on `valid`.
If `return_promising` is False, this function returns a tuple:
(model, train_error, valid_error)
In which
model is an opaque model for the task,
train_error is a scalar loss criterion on the training task
valid_error is a scalar loss criterion on the validation task.
If `return_promising` is True, this function returns
(model, train_error, valid_error, promising)
The `promising` term is a boolean flag indicating whether the model
seemed to work (1) or if it appeared to be degenerate (0).
"""
raise NotImplementedError('implement me')
def loss(self, model, task):
"""
Return scalar-valued training criterion of `model` on `task`.
This function can modify `self` but it should not semantically modify
`model` or `task`.
"""
raise NotImplementedError('implement me')
# -- as an example of weird methods an algo might be required to implement
# to accommodate bizarre protocols, see this one, which is required by
# LFW. Generally there is no need for this base class to list such
# special-case functions.
def retrain_classifier(self, model, train, valid=None):
"""
To the extent that `model` includes a feature extractor that is distinct from
a classifier, re-train the classifier only. This unusual step is
required in the original View1 / View2 LFW protocol. It is included
here as encouragement to add dataset-specific steps in LearningAlgo subclasses.
"""
raise NotImplementedError('implement me')
def forget_task(self, task_name):
"""
Signal that it is OK to delete any features / statistics etc related
specifically to task `task_name`. This can be safely ignored
for small data sets but deleting such intermediate results can
be crucial to keeping memory use under control.
"""
pass
class SemanticsDelegator(LearningAlgo):
def best_model(self, train, valid=None):
if valid:
assert train.semantics == valid.semantics
return getattr(self, 'best_model_' + train.semantics)(train, valid)
def loss(self, model, task):
return getattr(self, 'loss_' + task.semantics)(model, task)
class SklearnClassifier(SemanticsDelegator):
"""
Implement a LearningAlgo as much as possible in terms of an sklearn
classifier.
This class is meant to illustrate how to create an adapter between an
existing implementation of a machine learning algorithm, and the various
data sets defined in the skdata library.
Researchers are encouraged to implement their own Adapter classes
following the example of this class (i.e. cut & paste this class)
to measure the statistics they care about when handling the various
methods (e.g. best_model_vector_classification) and to save those
statistics to a convenient place. The practice of appending a summary
dictionary to the lists in self.results has proved to be useful for me,
but I don't see why it should in general be the right thing for others.
This class is also used for internal unit testing of Protocol interfaces,
so it should be free of bit rot.
"""
def __init__(self, new_model):
self.new_model = new_model
self.results = {
'best_model': [],
'loss': [],
}
def best_model_vector_classification(self, train, valid):
# TODO: use validation set if not-None
model = self.new_model()
print 'SklearnClassifier training on data set of shape', train.x.shape
model.fit(train.x, train.y)
model.trained_on = train.name
self.results['best_model'].append(
{
'train_name': train.name,
'valid_name': valid.name if valid else None,
'model': model,
})
return model
def loss_vector_classification(self, model, task):
p = model.predict(task.x)
err_rate = np.mean(p != task.y)
self.results['loss'].append(
{
'model_trained_on': model.trained_on,
'predictions': p,
'err_rate': err_rate,
'n': len(p),
'task_name': task.name,
})
return err_rate
@staticmethod
def _fallback_indexed_vector(self, task):
return Task(
name=task.name,
semantics="vector_classification",
x=task.all_vectors[task.idxs],
y=task.all_labels[task.idxs])
def best_model_indexed_vector_classification(self, train, valid):
return self.best_model_vector_classification(
self._fallback_indexed_vector(train),
self._fallback_indexed_vector(valid))
def loss_indexed_vector_classification(self, model, task):
return self.loss_vector_classification(model,
self._fallback_indexed_vector(task))
@staticmethod
def _fallback_indexed_image_task(task):
if task is None:
return None
x = task.all_images[task.idxs]
y = task.all_labels[task.idxs]
if 'int' in str(x.dtype):
x = x.astype('float32') / 255
else:
x = x.astype('float32')
x2d = x.reshape(len(x), -1)
rval = Task(
name=task.name,
semantics="vector_classification",
x=x2d,
y=y)
return rval
def best_model_indexed_image_classification(self, train, valid):
return self.best_model_vector_classification(
self._fallback_indexed_image_task(train),
self._fallback_indexed_image_task(valid))
def loss_indexed_image_classification(self, model, task):
return self.loss_vector_classification(model,
self._fallback_indexed_image_task(task))
| [
"james.bergstra@gmail.com"
] | james.bergstra@gmail.com |
5fd9b93930a9768df0ae870e00bdd00b8021b6b6 | 0ca7c7bdb297439554777e126ae8a2999962b7fe | /venv/Lib/site-packages/gevent/tests/test__example_udp_server.py | 07d863762540271b1e7dea0d35100f429ca1934a | [] | no_license | YazLuna/APIExpressJobs | 6c0857f63180bf5163d11fa9d1a411e44a4ba46f | cd52bc8d0d60100091637ef79f78cc79d58a1495 | refs/heads/master | 2023-06-13T02:50:57.672295 | 2021-06-18T14:57:53 | 2021-06-18T14:57:53 | 367,244,876 | 0 | 1 | null | 2021-06-18T14:57:53 | 2021-05-14T04:05:43 | Python | UTF-8 | Python | false | false | 535 | py | import socket
from gevent.testing import util
from gevent.testing import main
class Test(util.TestServer):
example = 'udp_server.py'
def _run_all_tests(self):
sock = socket.socket(type=socket.SOCK_DGRAM)
try:
sock.connect(('127.0.0.1', 9000))
sock.send(b'Test udp_server')
data, _address = sock.recvfrom(8192)
self.assertEqual(data, b'Received 15 bytes')
finally:
sock.close()
if __name__ == '__main__':
main()
| [
"ale_200200@hotmail.com"
] | ale_200200@hotmail.com |
4a77dfb840602c3a1ea69030cb783630d378a69b | dde951c8bcfb79cdead3449de42d9ed3e6f24fbe | /dive_into_python/kgp.py | 577a1559521a72f52510266b2f510aebab31bb0b | [] | no_license | wolfeyuanwei/study-python | c764353cbf75b0ccd79dc562fe11eebee712510b | be1a9ec93cd29d9fe6b69ad4f9c059fb9dd308de | refs/heads/master | 2021-05-11T22:57:51.541684 | 2018-02-08T05:03:10 | 2018-02-08T05:03:10 | 117,504,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,716 | py | #!/usr/bin/python
#filename:kgp.py
from xml.dom import minidom
import random
import toolbox
import sys
import getopt
_debug=0
class NoSourceError(Exception):pass
class KantGenerator:
'''generates mock philosophy based on a context-free grammar'''
def __init__(self, grammar, source=None):
self.loadGrammar(grammar)
self.loadSource(source and source or self.getDefaultSource())
self.refresh()
def _load(self, source):
'''load XML input source, return parsed XML document
-a URL of a remote XML file ("http://diveintopythonorg/kant.xml")
-a filename of local XML file ("/diveintopython/common/py/kant.xml")
- standard input ("-")
- the actual XML document, as a string
'''
sock = toolbox.openAnything(source)
xmldoc = minidom.parse(sock).documentElement
sock.close()
return xmldoc
def loadGrammar(self, grammar):
'''load context-free grammar'''
self.grammar = self._load(grammar)
self.refs = {}
for ref in self.grammar.getElementsByTagName("ref"):
self.refs[ref.attributes["id"].value]=ref
def loadSource(self, source):
'''load source'''
self.source = self._load(source)
def getDefaultSource(self):
'''guess default source of the current grammar
The default source will be one of the <ref>s thas is not
cross-referenced. This sounds complicated but it's not.
Example: the default source for kant.xml is
"<xref id='section'/>", because'section' is the one <ref>
thas is not <xref>'d anywhere in the grammar.getElementsByTagName
In most grammars,the default source will produce the
longest(ant most interesting) output.
'''
xrefs = {}
for xref in self.grammar.getElementsByTagName("xref"):
xrefs[xref.attributes["id"].value]=1
xrefs=xrefs.keys()
standaloneXrefs=[e for e in self.refs.keys() if e not in xrefs]
if not standaloneXrefs:
raise NoSourceError, "can't guess source, and no source specified."
return '<xref id="%s"/>' % random.choice(standaloneXrefs)
def reset(self):
'''reset parser'''
self.pieces=[]
self.capitalizeNextWord = 0
def refresh(self):
'''reset output buffer, re-parse entire source file, and return output
Since parsing involves a good deal of randomness, this is an
easy way to get new output without having to reload a grammar file
each time.
'''
self.reset()
self.parse(self.source)
return self.output()
def output(self):
'''output generated text'''
return "".join(self.pieces)
def randomChildElement(self, node):
'''choose a random child element of a node
This is a utility method used by do_xref and do_choice.
'''
choices = [e for e in node.childNodes
if e.nodeType == e.ELEMENT_NODE]
chosen=random.choice(choices)
if _debug:
sys.stderr.write('%s available choices: %s\n' % \
(len(choices), [e.toxml() for e in choices]))
sys.stderr.write('Chosen:%s\n' % chosen.toxml())
return chosen
def parse(self, node):
'''parse a single XML node
A parsed XML document is a tree of nodes
of various types. Each node is represented by an instance of the
corresponding Python class(Element for a tag, Text for
text data, Document for the top-level document).The following
statement constructs the name of a class method based on the type
of node we're parsing ("parse_Element" for an Element node,
"parse_Text" for a Text node, etc.) and then calls the methos.
'''
parseMethod = getattr(self, "parse_%s_" %node.__class__.__name__)
parseMethod(node)
def parse_Document(self, node):
'''parse the document node
The document node by itself isn't interesting (to us), but
its only child, node, documentElement, is it's the root node
of the grammar.
'''
self.parse(node.documentElement)
def parse_Text(self, node):
'''parse a text node
The text of a text node is usually added to the output buffer
verbatim. The noe exception is that <p class='sentence'> sets
a flag to capitalize the first letter of the next word. If
that flag is set, we capitalize the text and reset the flag.
'''
text = node.data
if self.capitalizeNextWord:
self.pieces.append(text[0].upper())
self.pieces.append(text[1:])
self.capitalizeNextWord = 0
else:
self.pieces.append(text)
def parse_Element(self, node):
'''parse an element
An XML element corresponds to an actual tag in the source:
<xref id='...'>, <p chance='...'>, <choice>, etc.
Each element type is handled in its own method. Like we did in
parse(), we construct a method name based on the name of the
element ("do_xref" for an <xref> tag, etc.) and
call the method.
'''
handlerMethod = getattr(self, "do_%s" %node.tagName)
handlerMethod(node)
def parse_Comment(self, node):
'''parse an comment'''
pass
def do_xref(self, node):
'''do xref'''
id = node.attributes["id"].value
self.parse(self.randomChildElement(self.refs[id]))
def do_p(self, node):
'''do p'''
keys = node.attributes.keys()
if "class" in keys:
if node.attributes["class"].value == "sentence":
self.capitalizeNextWord = 1
if "chance" in keys:
chance = int(node.attributes["chance"].value)
doit = (chance > random.randrange(100))
else:
doit = 1
if doit:
for child in node.childNodes:self.parse(child)
def do_choice(self, node):
'''do choice'''
self.parse(self.randomChildElement(node))
def usage():
print __doc__
def main(argv):
grammar = "note.xml"
for v in argv:
print v
try:
opts, args = getopt.getopt(argv, "hg:d", ["help", "grammar="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
elif opt == '-d':
_debug =1
elif opt in ("-g", "--grammar"):
grammar = arg
source = "".join(args)
k=KantGenerator(grammar, source)
print k.output()
if __name__ == "__main__":
main(sys.argv[1:]) | [
"wolfe_yuan@163.com"
] | wolfe_yuan@163.com |
0e6181bcc7f4022fa1bfb9215f66358fb1fbf947 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_snacked.py | 45d305aaad9053ae01e84d243abe6642be1e7899 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
#calss header
class _SNACKED():
def __init__(self,):
self.name = "SNACKED"
self.definitions = snack
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['snack']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f8f265699c8e44ca88151cc21f3fb5cdbfdaeea1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2631/60747/257584.py | beaeba029f2abe3d32e519c36820c90d7d1d5c23 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | s=input().split(" ")
N=int(s[0])
G=int(s[1])
milk=[G for l in range(N)]
num=[]
count=0
for i in range(N):
num.append(input().split(" "))
for j in range(N):
for k in range(3):
num[j][k]=int(num[j][k])
num.sort()
for d in range(N):
a=milk.count(max(milk))
best=milk.index(max(milk))
milk[num[d][1]-1]=milk[num[d][1]-1]+num[d][2]
if milk.count(max(milk))!=a:
count+=1
else:
if milk.index(max(milk))!=best:
count+=1
print(count,end="")
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
b44468d2b24bbe806eca686c36379f02c9a5acc2 | 800b5166148d4e3cd03825d7d20e2900fbc6c789 | /app_common/migrations/0015_feedbacks_about_object_type.py | 25814c6692de563fc266846ee0ac2cdd8381c69b | [] | no_license | JiSuPiaoYi/dawufupin | 4ffc979a93502eb576776673c98aaeb16021827e | 57756a501436fabe9b27ebca2e80e60932da30dc | refs/heads/master | 2020-04-07T11:37:35.728108 | 2018-11-20T09:09:50 | 2018-11-20T09:09:50 | 158,334,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-09-28 21:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_common', '0014_feedbacks_remarks'),
]
operations = [
migrations.AddField(
model_name='feedbacks',
name='about_object_type',
field=models.SmallIntegerField(default=0, verbose_name='涉及对象类型'),
),
]
| [
"360510132@qq.com"
] | 360510132@qq.com |
a2fb3c3846eb698405c231fc6512f0f11eb7f24b | 7ba672971ac7453d9026eb2f9cf4507bac134a94 | /st01.Python기초/0301수업/py11문자열/py11_09_format.py | 97c50eff88081faca5fc5bf8eae46374b72bbe06 | [] | no_license | parky83/python0209 | 5e806bc14679085ca1d1d4c5d59461abb4c8024e | 5159beb9c80f59b2c618eed040ffb4ffb69469c8 | refs/heads/master | 2021-01-01T10:34:39.047489 | 2020-08-15T03:41:41 | 2020-08-15T03:41:41 | 239,239,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # format 함수의 사용법을 익혀보자
# format() 함수로 숫자를 문자열로 변환하기
# format() 함수로 숫자를 문자열로 변환하기
# 정수
# 특정 칸에 출력하기
# 빈 칸을 0으로 채우기
# 기호와 함께 출력하기
# 조합하기
# 15칸 만들기
# 15칸에 부호 추가하기
# 15칸에 부호 추가하고 0으로 채우기
| [
"tt@gmail.com"
] | tt@gmail.com |
b4764ec5a518554f73560466e781d39533cba196 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /152/152.maximum-product-subarray.699806514.Accepted.leetcode.python3.py | ba0d37c0ac8d63dff9f8413a9708e951d3544b1e | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | class Solution(object):
def maxProduct(self, nums):
max_so_far, min_so_far, result = nums[0], nums[0], nums[0]
for index in range(1, len(nums)):
if nums[index] > 0:
max_so_far = max(max_so_far * nums[index], nums[index])
min_so_far = min(min_so_far * nums[index], nums[index])
else:
temp = max_so_far
max_so_far = max(min_so_far * nums[index], nums[index])
min_so_far = min(temp * nums[index], nums[index])
result = max(result, max_so_far)
return result
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
5472dd53d9f8c1cf0e8181de52297ae189580ff9 | f83f053278a036e18466d85585bc03a28c0f140a | /xsdata/codegen/handlers/attribute_restrictions.py | cf27e1d0045aaf0e265180f2ea1987505d025f1c | [
"MIT"
] | permissive | finswimmer/xsdata | dd951124e378bf9f4d8bd6939e4ebe542c677ee2 | eed822b83f362f48561a7d116e181a5422ff52dd | refs/heads/master | 2023-05-05T21:16:20.693559 | 2021-05-31T16:11:44 | 2021-05-31T16:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | from xsdata.codegen.mixins import HandlerInterface
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
class AttributeRestrictionsHandler(HandlerInterface):
"""Sanitize attributes restrictions."""
__slots__ = ()
def process(self, target: Class):
for index, attr in enumerate(target.attrs):
self.reset_occurrences(attr)
self.reset_sequential(target, index)
@classmethod
def reset_occurrences(cls, attr: Attr):
"""Sanitize attribute required flag by comparing the min/max
occurrences restrictions."""
restrictions = attr.restrictions
min_occurs = restrictions.min_occurs or 0
max_occurs = restrictions.max_occurs or 0
if attr.is_attribute:
restrictions.min_occurs = None
restrictions.max_occurs = None
elif attr.is_tokens:
restrictions.required = None
if max_occurs <= 1:
restrictions.min_occurs = None
restrictions.max_occurs = None
elif attr.xml_type is None or min_occurs == max_occurs == 1:
restrictions.required = True
restrictions.min_occurs = None
restrictions.max_occurs = None
elif min_occurs == 0 and max_occurs < 2:
restrictions.required = None
restrictions.min_occurs = None
restrictions.max_occurs = None
attr.default = None
attr.fixed = False
else: # max_occurs > 1
restrictions.min_occurs = min_occurs
restrictions.required = None
attr.fixed = False
if attr.default or attr.fixed or attr.restrictions.nillable:
restrictions.required = None
@classmethod
def reset_sequential(cls, target: Class, index: int):
"""Reset the attribute at the given index if it has no siblings with
the sequential restriction."""
attr = target.attrs[index]
before = target.attrs[index - 1] if index - 1 >= 0 else None
after = target.attrs[index + 1] if index + 1 < len(target.attrs) else None
if not attr.is_list:
attr.restrictions.sequential = False
if (
not attr.restrictions.sequential
or (before and before.restrictions.sequential)
or (after and after.restrictions.sequential and after.is_list)
):
return
attr.restrictions.sequential = False
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
54bb59e1aa9b8b756b91237113e05b4270ca5bfe | f57529f95a0fd10676f46063fdcd273fb5a81427 | /boj/01001-02000/1914.py | 75afdaa28e8b881d375944ecb481c5e5941ab8cb | [] | no_license | hoyasmh/PS | a9b83b0044e483586590c9b7c6bf8a77236b67e7 | 6bbaa0ce77b2726f6af782af049d73720820f761 | refs/heads/master | 2023-04-23T10:43:27.349785 | 2021-05-17T13:43:53 | 2021-05-17T13:43:53 | 311,239,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | def h(n,a,b,c):
if n>0:
h(n-1,a,c,b)
print(a,c)
h(n-1,b,a,c)
s=int(input())
print(2**s-1)
h(s,1,2,3)
# def hanoi(n, a, b, c):
# if n > 0:
# hanoi(n - 1, a, c, b)
# print("%d번 원반을 %c에서 %c로 옮김"%(n, a, b))
# hanoi(n - 1, c, b, a)
#
# if __name__ == "__main__":
# print("원반의 갯수 : ")
# n = int(input())
# hanoi(n,'a','b','c')
| [
"hoyasmh@gmail.com"
] | hoyasmh@gmail.com |
a453a5dd1988f2b7a4930f00da957cce3e57d529 | 89d9f97464fe71cba6dc9d6a3f3b1f6a91bac523 | /cooler/create/_ingest.py | e1cb6a2234b23726edaf94d606050b4fae773ac5 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | sbreiff/cooler | f7b855ce47b1f511d59fbc21dba449e727bb23d7 | 5754c9ad9fa9dfaaa23e537ca7c05d76da87bcff | refs/heads/master | 2020-04-12T04:45:26.138522 | 2019-01-02T17:53:03 | 2019-01-02T17:53:03 | 162,304,766 | 0 | 0 | NOASSERTION | 2019-01-02T17:53:04 | 2018-12-18T15:00:41 | Python | UTF-8 | Python | false | false | 38,169 | py | # -*- coding: utf-8 -*-
"""
Contact Binners
~~~~~~~~~~~~~~~
Binners are iterators that convert input data of various flavors into a
properly sorted, chunked stream of binned contacts.
"""
from __future__ import division, print_function
from collections import OrderedDict, Counter
from bisect import bisect_left, bisect_right
from multiprocess import Pool
from functools import partial
import itertools
import warnings
import sys
import six
from pandas.api.types import is_integer_dtype
import numpy as np
import pandas as pd
import h5py
from .._logging import get_logger
from ..util import (
parse_region, rlencode, get_binsize, get_chromsizes, GenomeSegmentation,
balanced_partition
)
from ..tools import lock, partition
logger = get_logger()
class BadInputError(ValueError):
pass
SANITIZE_PRESETS = {
'bg2': dict(decode_chroms=True, is_one_based=False, tril_action='reflect',
chrom_field='chrom', anchor_field='start',
sided_fields=('chrom', 'start', 'end'), suffixes=('1', '2'),
sort=True, validate=True),
'pairs': dict(decode_chroms=True, is_one_based=False, tril_action='reflect',
chrom_field='chrom', anchor_field='pos',
sided_fields=('chrom', 'pos'), suffixes=('1', '2'),
sort=False, validate=True)
}
def _sanitize_records(chunk, gs, decode_chroms, is_one_based, tril_action,
chrom_field, anchor_field, sided_fields, suffixes,
sort, validate):
# Get integer contig IDs
if decode_chroms:
# Unspecified chroms get assigned category = NaN and integer code = -1
chrom1_ids = np.array(pd.Categorical(
chunk['chrom1'], gs.contigs, ordered=True).codes)
chrom2_ids = np.array(pd.Categorical(
chunk['chrom2'], gs.contigs, ordered=True).codes)
else:
chrom1_ids = chunk['chrom1'].values
chrom2_ids = chunk['chrom2'].values
if validate:
for col, dt in [('chrom1', chrom1_ids.dtype),
('chrom2', chrom2_ids.dtype)]:
if not is_integer_dtype(dt):
raise BadInputError(
"`{}` column is non-integer. ".format(col) +
"If string, use `decode_chroms=True` to convert to enum")
# Drop records from non-requested chromosomes
to_drop = (chrom1_ids < 0) | (chrom2_ids < 0)
if np.any(to_drop):
mask = ~to_drop
chrom1_ids = chrom1_ids[mask]
chrom2_ids = chrom2_ids[mask]
chunk = chunk[mask].copy()
# Handle empty case
if not len(chunk):
chunk['bin1_id'] = []
chunk['bin2_id'] = []
return chunk
# Find positional anchor columns, convert to zero-based if needed
anchor1 = np.array(chunk[anchor_field + suffixes[0]])
anchor2 = np.array(chunk[anchor_field + suffixes[1]])
if is_one_based:
anchor1 -= 1
anchor2 -= 1
# Check types and bounds
if validate:
for dt in [anchor1.dtype, anchor2.dtype]:
if not is_integer_dtype(dt):
raise BadInputError("Found a non-integer anchor column")
is_neg = (anchor1 < 0) | (anchor2 < 0)
if np.any(is_neg):
err = chunk[is_neg]
raise BadInputError(
"Found an anchor position with negative value:\n{}".format(
err.head().to_csv(sep='\t')))
chromsizes1 = gs.chromsizes[chrom1_ids].values
chromsizes2 = gs.chromsizes[chrom2_ids].values
is_excess = (anchor1 > chromsizes1) | (anchor2 > chromsizes2)
if np.any(is_excess):
err = chunk[is_excess]
raise BadInputError(
"Found an anchor position exceeding chromosome length:\n{}".format(
err.head().to_csv(sep='\t')))
# Handle lower triangle records
if tril_action is not None:
is_tril = (
(chrom1_ids > chrom2_ids) |
((chrom1_ids == chrom2_ids) & (anchor1 > anchor2))
)
if np.any(is_tril):
if tril_action == 'reflect':
chrom1_ids[is_tril], chrom2_ids[is_tril] = \
chrom2_ids[is_tril], chrom1_ids[is_tril]
anchor1[is_tril], anchor2[is_tril] = \
anchor2[is_tril], anchor1[is_tril]
for field in sided_fields:
chunk.loc[is_tril, field + suffixes[0]], \
chunk.loc[is_tril, field + suffixes[1]] = \
chunk.loc[is_tril, field + suffixes[1]], \
chunk.loc[is_tril, field + suffixes[0]]
elif tril_action == 'drop':
mask = ~is_tril
chrom1_ids = chrom1_ids[mask]
chrom2_ids = chrom2_ids[mask]
anchor1 = anchor1[mask]
anchor2 = anchor2[mask]
chunk = chunk[mask].copy()
elif tril_action == 'raise':
err = chunk[is_tril]
raise BadInputError("Found lower triangle pairs:\n{}".format(
err.head().to_csv(sep='\t')))
else:
raise ValueError("Unknown tril_action value: '{}'".format(
tril_action))
# Assign bin IDs from bin table
chrom_binoffset = gs.chrom_binoffset
binsize = gs.binsize
if binsize is None:
chrom_abspos = gs.chrom_abspos
start_abspos = gs.start_abspos
bin1_ids = []
bin2_ids = []
for cid1, pos1, cid2, pos2 in zip(chrom1_ids, anchor1,
chrom2_ids, anchor2):
lo = chrom_binoffset[cid1]
hi = chrom_binoffset[cid1 + 1]
bin1_ids.append(lo + np.searchsorted(
start_abspos[lo:hi],
chrom_abspos[cid1] + pos1,
side='right') - 1)
lo = chrom_binoffset[cid2]
hi = chrom_binoffset[cid2 + 1]
bin2_ids.append(lo + np.searchsorted(
start_abspos[lo:hi],
chrom_abspos[cid2] + pos2,
side='right') - 1)
chunk['bin1_id'] = bin1_ids
chunk['bin2_id'] = bin2_ids
else:
chunk['bin1_id'] = chrom_binoffset[chrom1_ids] + anchor1 // binsize
chunk['bin2_id'] = chrom_binoffset[chrom2_ids] + anchor2 // binsize
# Sort by bin IDs
if sort:
chunk = chunk.sort_values(['bin1_id', 'bin2_id'])
# TODO: check for duplicate records and warn
return chunk
def sanitize_records(bins, schema=None, **kwargs):
"""
Builds a funtion to sanitize and assign bin IDs to a data frame of
paired genomic positions based on a provided genomic bin segmentation.
Parameters
----------
bins : DataFrame
Bin table to compare records against.
schema : str, optional
Use pre-defined parameters for a particular format. Any options can be
overriden via kwargs. If not provided, values for all the options below
must be given.
decode_chroms : bool
Convert string chromosome names to integer IDs based on the order given
in the bin table. Set to False if the chromosomes are already given as
an enumeration, starting at 0. Records with either chrom ID < 0 are
dropped.
is_one_based : bool
Whether the input anchor coordinates are one-based, rather than
zero-based. They will be converted to zero-based.
tril_action : 'reflect', 'drop', 'raise' or None
How to handle lower triangle ("tril") records.
If set to 'reflect', tril records will be flipped or "reflected"
to their mirror image: "sided" column pairs will have their values
swapped.
If set to 'drop', tril records will be discarded. This is useful if
your input data is symmetric, i.e. contains mirror duplicates of every
record.
If set to 'raise', an exception will be raised if any tril record is
encountered.
chrom_field : str
Base name of the two chromosome/scaffold/contig columns.
anchor_field : str
Base name of the positional anchor columns.
sided_fields : sequence of str
Base names of column pairs to swap values between when
mirror-reflecting records.
suffixes : pair of str
Suffixes used to identify pairs of sided columns. e.g.: ('1', '2'),
('_x', '_y'), etc.
sort : bool
Whether to sort the output dataframe by bin_id and bin2_id.
validate : bool
Whether to do type- and bounds-checking on the anchor position
columns. Raises BadInputError.
Returns
-------
callable :
Function of one argument that takes a raw dataframe and returns a sanitized dataframe with bin IDs assigned.
"""
if schema is not None:
try:
options = SANITIZE_PRESETS[schema].copy()
except KeyError:
raise ValueError("Unknown schema: '{}'".format(schema))
else:
options = {}
options.update(**kwargs)
chromsizes = get_chromsizes(bins)
options['gs'] = GenomeSegmentation(chromsizes, bins)
return partial(_sanitize_records, **options)
def _sanitize_pixels(chunk, gs, is_one_based=False, tril_action='reflect',
bin1_field='bin1_id', bin2_field='bin2_id', sided_fields=(),
suffixes=('1', '2'), validate=True, sort=True):
if is_one_based:
chunk['bin1_id'] -= 1
chunk['bin2_id'] -= 1
if tril_action is not None:
is_tril = chunk['bin1_id'] > chunk['bin2_id']
if np.any(is_tril):
if tril_action == 'reflect':
chunk.loc[is_tril, 'bin1_id'], \
chunk.loc[is_tril, 'bin2_id'] = chunk.loc[is_tril, 'bin2_id'], \
chunk.loc[is_tril, 'bin1_id']
for field in sided_fields:
chunk.loc[is_tril, field + suffixes[0]], \
chunk.loc[is_tril, field + suffixes[1]] = \
chunk.loc[is_tril, field + suffixes[1]], \
chunk.loc[is_tril, field + suffixes[0]]
elif tril_action == 'drop':
chunk = chunk[~is_tril]
elif tril_action == 'raise':
raise BadInputError("Found bin1_id greater than bin2_id")
else:
raise ValueError("Unknown tril_action value: '{}'".format(
tril_action))
return chunk.sort_values(['bin1_id', 'bin2_id']) if sort else chunk
def _validate_pixels(chunk, n_bins, boundscheck, triucheck, dupcheck, ensure_sorted):
if boundscheck:
is_neg = (chunk['bin1_id'] < 0) | (chunk['bin2_id'] < 0)
if np.any(is_neg):
raise BadInputError("Found bin ID < 0")
is_excess = (chunk['bin1_id'] >= n_bins) | (chunk['bin2_id'] >= n_bins)
if np.any(is_excess):
raise BadInputError(
"Found a bin ID that exceeds the declared number of bins. "
"Check whether your bin table is correct.")
if triucheck:
is_tril = chunk['bin1_id'] > chunk['bin2_id']
if np.any(is_tril):
raise BadInputError("Found bin1_id greater than bin2_id")
if not isinstance(chunk, pd.DataFrame):
chunk = pd.DataFrame(chunk)
if dupcheck:
is_dup = chunk.duplicated(['bin1_id', 'bin2_id'])
if is_dup.any():
err = chunk[is_dup]
raise BadInputError("Found duplicate pixels:\n{}".format(err.head().to_csv(sep='\t')))
if ensure_sorted:
chunk = chunk.sort_values(['bin1_id', 'bin2_id'])
return chunk
def validate_pixels(n_bins, boundscheck, triucheck, dupcheck, ensure_sorted):
return partial(
_validate_pixels,
n_bins=n_bins,
boundscheck=boundscheck,
triucheck=triucheck,
dupcheck=dupcheck,
ensure_sorted=ensure_sorted)
def sanitize_pixels(bins, **kwargs):
"""
Builds a function to sanitize an already-binned genomic data with
genomic bin assignments.
Parameters
----------
bins : DataFrame
Bin table to compare pixel records against.
is_one_based : bool, optional
Whether the input bin IDs are one-based, rather than zero-based.
They will be converted to zero-based.
tril_action : 'reflect', 'drop', 'raise' or None
How to handle lower triangle ("tril") pixels.
If set to 'reflect' [default], tril pixels will be flipped or
"reflected" to their mirror image: "sided" column pairs will have their
values swapped.
If set to 'drop', tril pixels will be discarded. This is useful if
your input data is duplexed, i.e. contains mirror duplicates of every
record.
If set to 'raise', an exception will be raised if any tril record is
encountered.
bin1_field : str
Name of the column representing ith (row) axis of the matrix.
Default is 'bin1_id'.
bin2_field : str
Name of the column representing jth (col) axis of the matrix.
Default is 'bin2_id'.
sided_fields : sequence of str
Base names of column pairs to swap values between when mirror-reflecting
pixels.
suffixes : pair of str
Suffixes used to identify pairs of sided columns. e.g.: ('1', '2'),
('_x', '_y'), etc.
sort : bool
Whether to sort the output dataframe by bin_id and bin2_id.
validate : bool
Whether to do type- and bounds-checking on the bin IDs.
Raises BadInputError.
Returns
-------
callable :
Function of one argument that takes a raw dataframe and returns a
sanitized dataframe.
"""
chromsizes = get_chromsizes(bins)
kwargs['gs'] = GenomeSegmentation(chromsizes, bins)
return partial(_sanitize_pixels, **kwargs)
def _aggregate_records(chunk, sort, agg, rename):
return (chunk.groupby(['bin1_id', 'bin2_id'], sort=sort)
.aggregate(agg)
.rename(columns=rename)
.reset_index())
def aggregate_records(sort=True, count=True, agg=None, rename=None):
"""
Generates a function that aggregates bin-assigned records by pixel.
Parameters
----------
sort : bool, optional
Sort group keys. Get better performance by turning this off.
Note that this does not influence the order of observations within each
group.
count : bool, optional
Output the number of records per pixel. Default is True.
agg : dict, optional
Dict of column names -> functions or names.
rename : dict, optional
Dict to rename columns after aggregating.
Returns
-------
Function that takes a dataframe of records with bin IDs assigned, groups
them by pixel, counts them, and optionally aggregates other value columns.
Notes
-----
The GroupBy 'count' method ignores NaNs within groups, as opposed to 'size'.
"""
if agg is None:
agg = {}
if rename is None:
rename = {}
# We use one of the grouper columns to count the number of pairs per pixel.
# We always do count, even if 'count' isn't requested as output.
if count and 'count' not in agg:
agg['bin1_id'] = 'size'
rename['bin1_id'] = 'count'
def _aggregate_records(chunk):
return (chunk.groupby(['bin1_id', 'bin2_id'], sort=sort)
.aggregate(agg)
.rename(columns=rename)
.reset_index())
return _aggregate_records
# return partial(_aggregate_records, sort=sort, agg=agg, rename=rename)
class ContactBinner(object):
"""
Base class for iterable contact binners.
"""
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_map', None)
return d
def __iter__(self):
""" Iterator over chunks of binned contacts (i.e., nonzero pixels)
Chunks are expected to have the following format:
* dict of 1D arrays
* keys `bin1_id`, `bin2_id`, `count`
* arrays lexically sorted by `bin_id` then `bin2_id`
"""
raise NotImplementedError
class HDF5Aggregator(ContactBinner):
"""
Aggregate contacts from a hiclib-style HDF5 contacts file.
"""
def __init__(self, h5pairs, chromsizes, bins, chunksize, **kwargs):
self.h5 = h5pairs
self.C1 = kwargs.pop('C1', 'chrms1')
self.P1 = kwargs.pop('P1', 'cuts1')
self.C2 = kwargs.pop('C2', 'chrms2')
self.P2 = kwargs.pop('P2', 'cuts2')
self.gs = GenomeSegmentation(chromsizes, bins)
self.chunksize = chunksize
self.partition = self._index_chroms()
def _index_chroms(self):
# index extents of chromosomes on first axis of contact list
starts, lengths, values = rlencode(self.h5[self.C1], self.chunksize)
if len(set(values)) != len(values):
raise ValueError(
"Read pair coordinates are not sorted on the first axis")
return dict(zip(values, zip(starts, starts + lengths)))
def _load_chunk(self, lo, hi):
data = OrderedDict([
('chrom_id1', self.h5[self.C1][lo:hi]),
('cut1', self.h5[self.P1][lo:hi]),
('chrom_id2', self.h5[self.C2][lo:hi]),
('cut2', self.h5[self.P2][lo:hi]),
])
return pd.DataFrame(data)
def aggregate(self, chrom):
h5pairs = self.h5
C1, P1, C2, P2 = self.C1, self.P1, self.C2, self.P2
chunksize = self.chunksize
bins = self.gs.bins
binsize = self.gs.binsize
chrom_binoffset = self.gs.chrom_binoffset
chrom_abspos = self.gs.chrom_abspos
start_abspos = self.gs.start_abspos
cid = self.gs.idmap[chrom]
chrom_lo, chrom_hi = self.partition.get(cid, (-1, -1))
lo = chrom_lo
hi = lo
while hi < chrom_hi:
# update `hi` to make sure our selection doesn't split a bin1
lo, hi = hi, min(hi + chunksize, chrom_hi)
abspos = chrom_abspos[cid] + h5pairs[P1][hi - 1]
bin_id = int(np.searchsorted(
start_abspos, abspos, side='right')) - 1
bin_end = bins['end'][bin_id]
hi = bisect_left(h5pairs[P1], bin_end, lo, chrom_hi)
if lo == hi:
hi = chrom_hi
logger.info('{} {}'.format(lo, hi))
# load chunk and assign bin IDs to each read side
table = self._load_chunk(lo, hi)
abspos1 = chrom_abspos[h5pairs[C1][lo:hi]] + h5pairs[P1][lo:hi]
abspos2 = chrom_abspos[h5pairs[C2][lo:hi]] + h5pairs[P2][lo:hi]
if np.any(abspos1 > abspos2):
raise ValueError(
"Found a read pair that maps to the lower triangle of the "
"contact map (side1 > side2). Check that the provided "
"chromosome ordering and read pair file are consistent "
"such that all pairs map to the upper triangle with "
"respect to the given chromosome ordering.")
if binsize is None:
table['bin1_id'] = np.searchsorted(
start_abspos, abspos1, side='right') - 1
table['bin2_id'] = np.searchsorted(
start_abspos, abspos2, side='right') - 1
else:
rel_bin1 = np.floor(table['cut1']/binsize).astype(int)
rel_bin2 = np.floor(table['cut2']/binsize).astype(int)
table['bin1_id'] = (
chrom_binoffset[table['chrom_id1'].values] + rel_bin1)
table['bin2_id'] = (
chrom_binoffset[table['chrom_id2'].values] + rel_bin2)
# reduce
gby = table.groupby(['bin1_id', 'bin2_id'])
agg = (gby['chrom_id1'].count()
.reset_index()
.rename(columns={'chrom_id1': 'count'}))
yield agg
def size(self):
return len(self.h5['chrms1'])
def __iter__(self):
for chrom in self.gs.contigs:
for df in self.aggregate(chrom):
yield {k: v.values for k, v in six.iteritems(df)}
class TabixAggregator(ContactBinner):
"""
Aggregate contacts from a sorted, BGZIP-compressed and tabix-indexed
tab-delimited text file.
"""
def __init__(self, filepath, chromsizes, bins, map=map, n_chunks=1, is_one_based=False, **kwargs):
try:
import pysam
except ImportError:
raise ImportError("pysam is required to read tabix files")
import dill
import pickle
dill.settings['protocol'] = pickle.HIGHEST_PROTOCOL
self._map = map
self.n_chunks = n_chunks
self.is_one_based = bool(is_one_based)
self.C2 = kwargs.pop('C2', 3)
self.P2 = kwargs.pop('P2', 4)
# all requested contigs will be placed in the output matrix
self.gs = GenomeSegmentation(chromsizes, bins)
# find available contigs in the contact list
self.filepath = filepath
self.n_records = None
with pysam.TabixFile(filepath, 'r', encoding='ascii') as f:
try:
self.file_contigs = [c.decode('ascii') for c in f.contigs]
except AttributeError:
self.file_contigs = f.contigs
if not len(self.file_contigs):
raise RuntimeError("No reference sequences found.")
# warn about requested contigs not seen in the contact list
for chrom in self.gs.contigs:
if chrom not in self.file_contigs:
warnings.warn(
"Did not find contig " +
" '{}' in contact list file.".format(chrom))
warnings.warn(
"NOTE: When using the Tabix aggregator, make sure the order of "
"chromosomes in the provided chromsizes agrees with the chromosome "
"ordering of read ends in the contact list file.")
def aggregate(self, grange):
chrom1, start, end = grange
import pysam
filepath = self.filepath
binsize = self.gs.binsize
idmap = self.gs.idmap
chromsizes = self.gs.chromsizes
chrom_binoffset = self.gs.chrom_binoffset
chrom_abspos = self.gs.chrom_abspos
start_abspos = self.gs.start_abspos
decr = int(self.is_one_based)
C2 = self.C2
P2 = self.P2
logger.info('Binning {}:{}-{}|*'.format(chrom1, start, end))
these_bins = self.gs.fetch((chrom1, start, end))
rows = []
with pysam.TabixFile(filepath, 'r', encoding='ascii') as f:
parser = pysam.asTuple()
accumulator = Counter()
for bin1_id, bin1 in these_bins.iterrows():
for line in f.fetch(chrom1, bin1.start, bin1.end,
parser=parser):
chrom2 = line[C2]
pos2 = int(line[P2]) - decr
try:
cid2 = idmap[chrom2]
except KeyError:
# this chrom2 is not requested
continue
if binsize is None:
lo = chrom_binoffset[cid2]
hi = chrom_binoffset[cid2 + 1]
bin2_id = lo + np.searchsorted(
start_abspos[lo:hi],
chrom_abspos[cid2] + pos2,
side='right') - 1
else:
bin2_id = chrom_binoffset[cid2] + (pos2 // binsize)
accumulator[bin2_id] += 1
if not accumulator:
continue
rows.append(
pd.DataFrame({
'bin1_id': bin1_id,
'bin2_id': list(accumulator.keys()),
'count': list(accumulator.values())},
columns=['bin1_id', 'bin2_id', 'count'])
.sort_values('bin2_id')
)
accumulator.clear()
logger.info('Finished {}:{}-{}|*'.format(chrom1, start, end))
return pd.concat(rows, axis=0) if len(rows) else None
def __iter__(self):
granges = balanced_partition(self.gs, self.n_chunks, self.file_contigs)
for df in self._map(self.aggregate, granges):
if df is not None:
yield {k: v.values for k, v in six.iteritems(df)}
class PairixAggregator(ContactBinner):
"""
Aggregate contacts from a sorted, BGZIP-compressed and pairix-indexed
tab-delimited text file.
"""
def __init__(self, filepath, chromsizes, bins, map=map, n_chunks=1, is_one_based=False, **kwargs):
try:
import pypairix
except ImportError:
raise ImportError(
"pypairix is required to read pairix-indexed files")
import dill
import pickle
dill.settings['protocol'] = pickle.HIGHEST_PROTOCOL
self._map = map
self.n_chunks = n_chunks
self.is_one_based = bool(is_one_based)
f = pypairix.open(filepath, 'r')
self.C1 = f.get_chr1_col()
self.C2 = f.get_chr2_col()
self.P1 = f.get_startpos1_col()
self.P2 = f.get_startpos2_col()
self.file_contigs = set(
itertools.chain.from_iterable(
[b.split('|') for b in f.get_blocknames()]))
if not len(self.file_contigs):
raise RuntimeError("No reference sequences found.")
for c1, c2 in itertools.combinations(self.file_contigs, 2):
if f.exists2(c1, c2) and f.exists2(c2, c1):
raise RuntimeError(
"Pairs are not triangular: found blocks " +
"'{0}|{1}'' and '{1}|{0}'".format(c1, c2))
# dumb heuristic to prevent excessively large chunks on one worker
if hasattr(f, 'get_linecount'):
n_lines = f.get_linecount()
if n_lines < 0:
# correct int32 overflow bug
MAXINT32 = 2147483647
n_lines = MAXINT32 + MAXINT32 + n_lines
max_chunk = int(100e6)
n_chunks = n_lines // 2 // max_chunk
old_n = self.n_chunks
self.n_chunks = max(self.n_chunks, n_chunks)
if self.n_chunks > old_n:
logger.info(
"Pairs file has {} lines. Increasing max-split to {}.".format(
n_lines, self.n_chunks))
# all requested contigs will be placed in the output matrix
self.gs = GenomeSegmentation(chromsizes, bins)
# find available contigs in the contact list
self.filepath = filepath
self.n_records = None
# warn about requested contigs not seen in the contact list
for chrom in self.gs.contigs:
if chrom not in self.file_contigs:
warnings.warn(
"Did not find contig " +
" '{}' in contact list file.".format(chrom))
def aggregate(self, grange):
chrom1, start, end = grange
import pypairix
filepath = self.filepath
binsize = self.gs.binsize
chromsizes = self.gs.chromsizes
chrom_binoffset = self.gs.chrom_binoffset
chrom_abspos = self.gs.chrom_abspos
start_abspos = self.gs.start_abspos
decr = int(self.is_one_based)
C1 = self.C1
C2 = self.C2
P1 = self.P1
P2 = self.P2
logger.info('Binning {}:{}-{}|*'.format(chrom1, start, end))
f = pypairix.open(filepath, 'r')
these_bins = self.gs.fetch((chrom1, start, end))
remaining_chroms = self.gs.idmap[chrom1:]
cid1 = self.gs.idmap[chrom1]
accumulator = Counter()
rows = []
for bin1_id, bin1 in these_bins.iterrows():
for chrom2, cid2 in six.iteritems(remaining_chroms):
chrom2_size = chromsizes[chrom2]
if chrom1 != chrom2 and f.exists2(chrom2, chrom1): # flipped
iterator = f.query2D(chrom2, 0, chrom2_size,
chrom1, bin1.start, bin1.end)
pos2_col = P1
else:
iterator = f.query2D(chrom1, bin1.start, bin1.end,
chrom2, 0, chrom2_size)
pos2_col = P2
for line in iterator:
pos2 = int(line[pos2_col]) - decr
if binsize is None:
lo = chrom_binoffset[cid2]
hi = chrom_binoffset[cid2 + 1]
bin2_id = lo + np.searchsorted(
start_abspos[lo:hi],
chrom_abspos[cid2] + pos2,
side='right') - 1
else:
bin2_id = chrom_binoffset[cid2] + (pos2 // binsize)
accumulator[bin2_id] += 1
if not accumulator:
continue
rows.append(
pd.DataFrame({
'bin1_id': bin1_id,
'bin2_id': list(accumulator.keys()),
'count': list(accumulator.values())},
columns=['bin1_id', 'bin2_id', 'count'])
.sort_values('bin2_id')
)
accumulator.clear()
logger.info('Finished {}:{}-{}|*'.format(chrom1, start, end))
return pd.concat(rows, axis=0) if len(rows) else None
def __iter__(self):
granges = balanced_partition(self.gs, self.n_chunks, self.file_contigs)
for df in self._map(self.aggregate, granges):
if df is not None:
yield {k: v.values for k, v in six.iteritems(df)}
class SparseLoader(ContactBinner):
"""
Load binned contacts from a single 3-column sparse matrix text file.
"""
FIELD_NUMBERS = OrderedDict([
('bin1_id', 0),
('bin2_id', 1),
('count', 2),
])
FIELD_DTYPES = OrderedDict([
('bin1_id', int),
('bin2_id', int),
('count', int),
])
def __init__(self, filepath, bins, chunksize, field_numbers=None,
field_dtypes=None, one_based=False):
"""
Parameters
----------
filepath : str
Path to tsv file
chunksize : number of rows of the matrix file to read at a time
"""
self._map = map
self.filepath = filepath
self.chunksize = chunksize
self.one_based_bin_ids = one_based
self.n_bins = len(bins)
# Assign the column numbers
self.field_numbers = self.FIELD_NUMBERS.copy()
if field_numbers is not None:
self.field_numbers.update(field_numbers)
self.columns = list(self.field_numbers.keys())
self.usecols = list(self.field_numbers.values())
# Assign the column dtypes. Assume additional value fields are float.
self.out_columns = ['bin1_id', 'bin2_id', 'count']
self.dtypes = self.FIELD_DTYPES.copy()
for col in self.columns:
if col not in self.dtypes:
self.out_columns.append(col)
self.dtypes[col] = float
# Override defaults
if field_dtypes is not None:
self.dtypes.update(field_dtypes)
def __iter__(self):
n_bins = self.n_bins
iterator = pd.read_csv(
self.filepath,
sep='\t',
iterator=True,
comment='#',
chunksize=self.chunksize,
usecols=self.usecols,
names=self.columns,
dtype=self.dtypes)
for chunk in iterator:
if np.any(chunk['bin1_id'] > chunk['bin2_id']):
raise ValueError("Found bin1_id greater than bin2_id")
if self.one_based_bin_ids:
# convert to zero-based
if np.any(chunk['bin1_id'] <= 0) or np.any(chunk['bin2_id'] <= 0):
raise ValueError(
"Found bin ID <= 0. Are you sure bin IDs are one-based?")
chunk['bin1_id'] -= 1
chunk['bin2_id'] -= 1
if (np.any(chunk['bin1_id'] >= n_bins) or
np.any(chunk['bin2_id'] >= n_bins)):
raise ValueError(
"Found a bin ID that exceeds the declared number of bins. "
"Check whether your bin table is correct.")
yield {k: v.values for k,v in six.iteritems(chunk)}
class SparseBlockLoader(ContactBinner):
"""
"""
def __init__(self, chromsizes, bins, mapping, chunksize):
bins = check_bins(bins, chromsizes)
self.bins = bins
self.chromosomes = list(chromsizes.index)
self.chunksize = chunksize
n_chroms = len(chromsizes)
n_bins = len(bins)
chrom_ids = bins['chrom'].cat.codes
self.offsets = np.zeros(n_chroms + 1, dtype=int)
curr_val = 0
for start, length, value in zip(*rlencode(chrom_ids)):
self.offsets[curr_val:value + 1] = start
curr_val = value + 1
self.offsets[curr_val:] = n_bins
self.mapping = mapping
def select_block(self, chrom1, chrom2):
try:
block = self.mapping[chrom1, chrom2]
except KeyError:
try:
block = self.mapping[chrom2, chrom1].T
except KeyError:
warning.warn(
"Block for {{{}, {}}} not found".format(chrom1, chrom2))
raise
return block
def __iter__(self):
n_bins = len(self.bins)
chromosomes = self.chromosomes
for cid1, chrom1 in enumerate(chromosomes):
offset = self.offsets[cid1]
chrom1_nbins = self.offsets[cid1 + 1] - offset
spans = partition(0, chrom1_nbins, self.chunksize)
for lo, hi in spans:
chunks = []
for chrom2 in chromosomes[cid1:]:
try:
block = self.select_block(chrom1, chrom2)
except KeyError:
continue
chunks.append(block.tocsr()[lo:hi, :])
X = scipy.sparse.hstack(chunks).tocsr().tocoo()
i, j, v = X.row, X.col, X.data
mask = (offset + i) <= (offset + j)
triu_i, triu_j, triu_v = i[mask], j[mask], v[mask]
yield {
'bin1_id': offset + triu_i,
'bin2_id': offset + triu_j,
'count': triu_v,
}
class ArrayLoader(ContactBinner):
"""
Load a dense genome-wide numpy array contact matrix.
Works with array-likes such as h5py.Dataset and memmapped arrays
See Also
--------
numpy.save, numpy.load (mmap_mode)
"""
def __init__(self, bins, array, chunksize):
if len(bins) != array.shape[0]:
raise ValueError("Number of bins must equal the dimenion of the matrix")
self.array = array
self.chunksize = chunksize
def __iter__(self):
n_bins = self.array.shape[0]
spans = partition(0, n_bins, self.chunksize)
# TRIU sparsify the matrix
for lo, hi in spans:
X = self.array[lo:hi, :]
i, j = np.nonzero(X)
mask = (lo + i) <= j
triu_i, triu_j = i[mask], j[mask]
yield {
'bin1_id': lo + triu_i,
'bin2_id': triu_j,
'count': X[triu_i, triu_j],
}
class ArrayBlockLoader(ContactBinner):
"""
"""
def __init__(self, chromsizes, bins, mapping, chunksize):
bins = check_bins(bins, chromsizes)
self.bins = bins
self.chromosomes = list(chromsizes.index)
self.chunksize = chunksize
n_chroms = len(chromsizes)
n_bins = len(bins)
chrom_ids = bins['chrom'].cat.codes
self.offsets = np.zeros(n_chroms + 1, dtype=int)
curr_val = 0
for start, length, value in zip(*rlencode(chrom_ids)):
self.offsets[curr_val:value + 1] = start
curr_val = value + 1
self.offsets[curr_val:] = n_bins
self.mapping = mapping
def select_block(self, chrom1, chrom2):
try:
block = self.mapping[chrom1, chrom2]
except KeyError:
try:
block = self.mapping[chrom2, chrom1].T
except KeyError:
warning.warn(
"Block for {{{}, {}}} not found".format(chrom1, chrom2))
raise
return block
def __iter__(self):
n_bins = len(self.bins)
chromosomes = self.chromosomes
for cid1, chrom1 in enumerate(chromosomes):
offset = self.offsets[cid1]
chrom1_nbins = self.offsets[cid1 + 1] - offset
spans = partition(0, chrom1_nbins, self.chunksize)
for lo, hi in spans:
chunks = []
for chrom2 in chromosomes[cid1:]:
try:
block = self.select_block(chrom1, chrom2)
except KeyError:
continue
chunks.append(block[lo:hi, :])
X = np.concatenate(chunks, axis=1)
i, j = np.nonzero(X)
mask = (offset + i) <= (offset + j)
triu_i, triu_j = i[mask], j[mask]
yield {
'bin1_id': offset + triu_i,
'bin2_id': offset + triu_j,
'count': X[triu_i, triu_j],
}
| [
"nabdennur@gmail.com"
] | nabdennur@gmail.com |
08337b0bd47f2c40824bf33ed5cfb498412f19f5 | 1c21fa248091e31c362b95afafc5021211e85e63 | /invensis_pmc/inhouseapp/utils/decorators.py | ef84ceff1a3d3226cd385ba8f8ac51b6bf89d02d | [] | no_license | anudeepnaidu95/dev5 | 3d3252a51fccbb794e78a91681708e1b3c1ce0d4 | 7351244b79be242aa2cad36dbe1adca22a744edc | refs/heads/master | 2021-01-20T12:28:07.286078 | 2017-05-05T11:08:37 | 2017-05-05T11:08:37 | 90,365,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,662 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import timeit
import jsonpickle
from django.utils import six
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
def log(fn):
"""
Logger decorator.
Logs function Executing and Executed info and total Executed Time.
"""
def wrapper(*args, **kwargs):
# Get information about executing function
function_name = "'(%(type)s) %(module)s.%(name)s'" % \
{'module': fn.__module__, 'type': fn.__class__.__name__, 'name': fn.__name__}
# Log about start of the function
settings.LOGIC_LOGGER.info('Start %s', function_name)
# Function start time
start = timeit.default_timer()
# Executing function itself
fn_result = fn(*args, **kwargs)
# Function end time
stop = timeit.default_timer()
# Calculate function executing time
time = stop - start
# Log about end of the function
settings.LOGIC_LOGGER.info('End %s in %.5f SECs', function_name, time)
return fn_result
return wrapper
def json(fn):
"""
Gets view method response and returns it in JSON format.
"""
def wrapper(request, *args, **kwargs):
try:
# Executing function itself
fn_result = fn(request, *args, **kwargs)
# Prepare JSON dictionary for successful result
json_result = {'is_successful': True, 'result': fn_result}
except Exception as e:
# If AJAX_DEBUG is enabled raise exception
if settings.JSON_DEBUG:
raise
# Else prepare JSON result object with error message
error_message = e.message.upper()
json_result = {'is_successful': False, 'message': error_message}
# Wrap result with JSON HTTPResponse and return
return HttpResponse(jsonpickle.encode(json_result, unpicklable=False), content_type='application/json')
return wrapper
def anonymous_required(function):
"""
Check either user registered or not. If it is already registered user will redirect to 'INDEX'.
"""
def wrap(request, *args, **kwargs):
user = request.user
if user.is_authenticated():
messages.add_message(request, messages.ERROR, _('You are already registered and logged in.'))
return redirect(reverse('home'))
else:
return function(request, *args, **kwargs)
return wrap
def group_required(group, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a group permission,
redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if isinstance(group, six.string_types):
groups = (group, )
else:
groups = group
# First check if the user has the permission (even anon users)
if user.groups.filter(name__in=groups).exists():
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url) | [
"anudeepnaidu95@gmail.com"
] | anudeepnaidu95@gmail.com |
94d08669d7d91b6bae39af0f8cd3f736aa4d581e | 4794379ca67670b65097830df76dd5440c69ddf6 | /oop_advance/slots.py | 2dcea1fb857c1f05d25824b1cb714780f170d880 | [] | no_license | kk412027247/learn-python | f7e12c1b476e47a2a18c609a81dbfe8bd1e31a41 | 25bb231e62a17303ec543e41a18d9302dac9d607 | refs/heads/master | 2020-05-26T14:31:05.096302 | 2019-10-13T07:06:29 | 2019-10-13T07:06:29 | 188,265,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from types import MethodType
class Student(object):
pass
s = Student()
s.name = 'Michael'
print(s.name)
def set_age(self, age):
self.age = age
s.set_age = MethodType(set_age, s)
s.set_age(25)
print(s.age)
def set_score(self, score):
self.score = score
Student.set_score = set_score
s.set_score(100)
print(s.score)
s2 = Student()
s2.set_score(99)
print(s2.score)
class Student(object):
__slots__ = ('age', 'name')
s = Student()
s.name = 'Michael'
s.age = 25
# s.score = 99
class GraduateStudent(Student):
pass
g = GraduateStudent()
g.score = 9999
| [
"kk412027247@gmail.com"
] | kk412027247@gmail.com |
9ccf72911d2d92e9eb8901fb65d14b172b2d10df | be55991401aef504c42625c5201c8a9f14ca7c3b | /leetcode/graph/hard/dungeon_game_174/my_answer.py | 4d7e22d5b43386c062551f7d021dd7ac567375ab | [
"Apache-2.0"
] | permissive | BillionsRichard/pycharmWorkspace | adc1f8bb15b58ded489fc8dec0df397601823d2c | 709e2681fc6d85ff52fb25717215a365f51073aa | refs/heads/master | 2021-09-14T21:12:59.839963 | 2021-08-08T09:05:37 | 2021-08-08T09:05:37 | 143,610,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,605 | py | # encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: billions.richard@qq.com
@site:
@software: PyCharm
@time: 2019/10/20 16:28
"""
class Solution:
def calculateMinimumHP(self, dungeon: list) -> int:
m, n = len(dungeon), len(dungeon[0])
# dp[x][y] 表示勇士到达 (x,y) 坐标时最低需要的健康值
dp = [[0 for _ in range(n)] for _ in range(m)]
for x in range(m):
for y in range(n):
if dungeon[x][y] >= 0:
dp[x][y] = 1 # 至少需要1个健康值进入房间
else:
# 比如进入该房间会损失 3 点健康值,
# 那么进入该房间至少需要 4 点健康值
dp[x][y] = 1 - dungeon[x][y] #
# 从地图右下角 (倒数第 1 列 倒数第一行) 逐个扫描计算
for y in range(n - 1, -1, -1): # 按列
for x in range(m - 1, -1, -1): # 按行
if y == n - 1 and x == m - 1: # 跳过 最右下角
continue
"""
最后一列(dungenon[m-1][n-1]除外)每一格的健康值只受到其下方健
康值和进入房间初始健康值的约束
dp[x][y] + dungeon[x][y] = dp[x+1][y] =>
dp[x][y] = dp[x+1][y] - dungeon[x][y]
"""
if y == n - 1:
dp[x][y] = max(dp[x][y], dp[x + 1][y] - dungeon[x][y])
continue
"""
第1列至倒数第二列每一格的健康值只受到其右方健康值和进入房间初始
健康值的约束
dp[x][y] + dungeon[x][y] = dp[x][y+1] =>
dp[x][y] = dp[x][y+1] - dungeon[x][y]
"""
right_min = dp[x][y + 1] - dungeon[x][y]
if x == m - 1:
dp[x][y] = max(dp[x][y], right_min)
continue
"""
第1列至倒数第二列,第一行到倒数第二行中,每一格的健康值同时受到
其右方健康值、下方健康值 和进入房间初始健康值的约束
dp[x][y] + dungeon[x][y] = dp[x+1][y] =>
dp[x][y] = dp[x+1][y] - dungeon[x][y]
"""
down_min = dp[x + 1][y] - dungeon[x][y]
neighbor_min = min(right_min, down_min) # 此处有1条路可走即可
dp[x][y] = max(dp[x][y], neighbor_min)
return dp[0][0]
| [
"295292802@qq.com"
] | 295292802@qq.com |
3107bcf162c8b6a6c5177db6cbf6585df3817cc6 | 083f0cd2cf53384802eaf4f3cd9cbc4f0f632be4 | /env/lib/python3.5/tokenize.py | 41ba11f6602911ba2ede6c462ed8694bf56719f4 | [] | no_license | billchen99/LabsTechChallenge | 527e3ae33d31dd1e35c1b17a89cd45770be07519 | c1a4c2fc028b5bd155ec4117c818accc501a5b52 | refs/heads/master | 2021-05-11T16:04:10.433404 | 2018-01-17T08:48:55 | 2018-01-17T08:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | /home/bill/psi4conda/lib/python3.5/tokenize.py | [
"billchen99@gmail.com"
] | billchen99@gmail.com |
fab1af44d31dd57e504c8b276a0b0b78dae067e2 | c8c95520fb1a17d627a0256df2c6702f4f53403a | /10.6.3_exercises.py | 70ad860142aa2c9a7972a1446f9c1829e3d0fcd1 | [] | no_license | wsargeant/httlacs | 608f1b4b34c95f18f934f10883beb56a2895c269 | 3337b369c541e18d5ed9ecbca35494c3ebcfa591 | refs/heads/master | 2023-02-21T21:10:03.228762 | 2021-01-25T08:44:46 | 2021-01-25T08:44:46 | 284,936,152 | 0 | 0 | null | 2020-08-28T10:35:17 | 2020-08-04T09:32:38 | null | UTF-8 | Python | false | false | 2,406 | py | import turtle
turtle.setup(400,600)
wn = turtle.Screen()
wn.title("Tess becomes a traffic light")
wn.bgcolor("lightgreen")
tess = turtle.Turtle()
green_light = turtle.Turtle()
amber_light = turtle.Turtle()
red_light = turtle.Turtle()
def draw_housing():
""" Draw a housing to hold the traffic lights"""
tess.pensize(3)
tess.color("black", "darkgrey")
tess.begin_fill()
tess.forward(80)
tess.left(90)
tess.forward(200)
tess.circle(40, 180)
tess.forward(200)
tess.left(90)
tess.end_fill()
tess.hideturtle()
draw_housing()
# position green_light onto the place where the green light should be
green_light.hideturtle()
green_light.penup()
green_light.forward(40)
green_light.left(90)
green_light.forward(50)
# turn green_light into a big green circle
green_light.shape("circle")
green_light.shapesize(3)
green_light.fillcolor("green")
green_light.showturtle()
# position amber_light onto the place where the amber light should be
amber_light.hideturtle()
amber_light.penup()
amber_light.forward(40)
amber_light.left(90)
amber_light.forward(120)
# turn amber_light into a big orange circle
amber_light.shape("circle")
amber_light.shapesize(3)
amber_light.fillcolor("orange")
# position red_light onto the place where the red light should be
red_light.hideturtle()
red_light.penup()
red_light.forward(40)
red_light.left(90)
red_light.forward(190)
# turn red_light into a big red circle
red_light.shape("circle")
red_light.shapesize(3)
red_light.fillcolor("red")
# a traffic light is a state machine with 3 states,
# green, amber, red. We number each state 0, 1, 2
# when the machine changes state, we change which turtle to show
state_num = 0
def advance_state_machine():
""" Traffic light sequence on timer"""
global state_num
if state_num == 0: #transition from state 0 to state 1
green_light.hideturtle()
amber_light.showturtle()
state_num = 1
wn.ontimer(advance_state_machine, 1000)
elif state_num == 1: # transition from state 1 to state 2
amber_light.hideturtle()
red_light.showturtle()
state_num = 2
wn.ontimer(advance_state_machine, 1000)
else:
red_light.hideturtle()
green_light.showturtle()
state_num = 0
# bind the event handler to the space key
wn.onkey(advance_state_machine, "space")
wn.listen() # listen for events
wn.mainloop() | [
"69194027+wsargeant@users.noreply.github.com"
] | 69194027+wsargeant@users.noreply.github.com |
f7e37c2a90c62480274603a2d723babb7e5c0bf0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02743/s644313592.py | 035aeecce49eb8a0432c5a0407dd58feff435620 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from typing import List
def main():
nums = [int(n) for n in input().split(' ')]
if si(nums):
print('Yes')
else:
print('No')
def si(nums: List[int]) -> bool:
result = False
a, b, c = nums
if 4 * a * b < (c - a - b) ** 2 and (c - a - b) > 0:
result = True
return result
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
35f014f32beeaa20fc17beb68ae7b31007579727 | 958cbae5e11bd8116c9478b1c32b7c226fa68d38 | /myvenv/bin/pilprint.py | 73975b7983f252c90abe9835754750c65f3e0faf | [] | no_license | sarkmen/onedayschool | 0f49d26fae4a359d3e16b08efef0d951bf061b9e | 2bce3adf98bd1152a9aa5b30aaeb5b5885192b88 | refs/heads/master | 2021-01-21T04:50:26.946895 | 2016-07-19T05:42:39 | 2016-07-19T05:42:39 | 55,145,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | #!/Users/Roadwalker/dev/onedayschool/myvenv/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"beomjun.gim3@gmail.com"
] | beomjun.gim3@gmail.com |
de420c5cc26a3d0a32a181673551d55cbad73dee | b44bb9a3e2d621d3311597eaf89b53e9dc4761ca | /verpy/pybin3/assigns.py | 1b1a7e59e34a0e2ecf32068515df60e01887a3cc | [
"MIT"
] | permissive | greenblat/vlsistuff | e8ea9401a79bb9e770ab7692e672d14696611be7 | ed54a68a3099c89a3bbbff498be206151e5ca98e | refs/heads/master | 2023-09-01T07:30:57.223756 | 2023-08-23T09:59:04 | 2023-08-23T09:59:04 | 110,414,867 | 43 | 11 | null | 2021-04-19T06:36:36 | 2017-11-12T07:52:54 | Python | UTF-8 | Python | false | false | 13,814 | py |
import logs
import module_class
def help_main(Env):
Mod = Env.Current
buildTranslation(Mod)
logs.log_info('translation has %d members'%(len(TRANSLATIONS.keys())))
Insts = list(Mod.insts.keys())
# gatherAssigns(Mod)
for Inst in Insts:
Obj = Mod.insts[Inst]
Type = Obj.Type
logs.log_info('inst %s %s'%(Inst,Type))
Del = dealType(Type,Inst,Obj,Mod)
if Del:
Mod.insts.pop(Inst)
Fout = open('%s.rtl0'%Mod.Module,'w')
Mod.dump_verilog(Fout)
Fout.close()
gatherAlwayses(Mod)
Fout = open('%s.rtl1'%Mod.Module,'w')
Mod.dump_verilog(Fout)
Fout.close()
gatherExpressions(Mod)
Fout = open('%s.rtl2'%Mod.Module,'w')
Mod.dump_verilog(Fout)
Fout.close()
def gatherExpressions(Mod):
Dsts = {}
Srcs = {}
Invs = {}
x2s = []
for In in SOURCES:
Srcs[In] = 1
for Dst,Src,_,_ in Mod.hard_assigns:
if type(Dst) is str:
Dsts[Dst] = Src
if (Src[0] == '!')and(len(Src)==2)and(type(Src[1]) is str):
Invs[Dst] = Src[1]
Sup = module_class.support_set(Src)
for In in Sup:
if (type(In) is str):
if In not in Srcs:
Srcs[In] = 1
else: Srcs[In] += 1
for Net in Dsts:
if (Net in Srcs)and(Srcs[Net]==1)and(Net in Mod.nets):
Dir,Wid = Mod.nets[Net]
if Dir=='wire':
x2s.append(Net)
Deletes = []
Addeds = []
for Inv in Invs.keys():
Src = Invs[Inv]
if Src in x2s:
logs.log_info('x2s driven inverter %s <= %s = %s'%(Inv,Src,Dsts[Src]))
Expr = Dsts[Src]
if Expr[0] in ('~', '!'):
Addeds.append((Inv,Expr[1]))
Deletes.append(Inv)
Deletes.append(Src)
Ind = 0
while Ind<len(Mod.hard_assigns):
Dst,Src,_,_ = Mod.hard_assigns[Ind]
if Dst in Deletes:
Mod.hard_assigns.pop(Ind)
else:
Ind += 1
for (Dst,Src) in Addeds:
Mod.hard_assigns.append((Dst,Src,'',''))
Uses = {}
for Inv in Invs.keys():
if Inv in x2s:
Uses[Inv] = Invs[Inv]
Ind = 0
while Ind < len(Mod.hard_assigns):
(Dst,Src,_,_) = Mod.hard_assigns[Ind]
Src1,Done = replaces(Src,Uses)
if Done:
Mod.hard_assigns[Ind] = (Dst,Src1,'','')
logs.log_info('fixed hard_assign %s %s <- %s'%(Dst,Src1,Src))
Ind += 1
elif (type(Dst) is str) and(Dst in Uses)and(Dst not in SOURCES):
Mod.hard_assigns.pop(Ind)
if Dst in Mod.nets: Mod.nets.pop(Dst)
else:
Ind += 1
def replaces(Src,Uses):
if (type(Src) is str) and (Src in Uses): return ('!',Uses[Src]),True
if type(Src) is str: return Src,False
LL = list(str(Src))
Done = False
for Ind,In in enumerate(LL):
Inx,What = replaces(In,Uses)
if What:
LL[Ind]=Inx
Done = True
return LL,Done
ALWAYS = {}
SOURCES = []
def gatherAlwayses(Mod):
for Cond,Body,Kind in Mod.alwayses:
if Cond not in ALWAYS: ALWAYS[Cond]=[]
ALWAYS[Cond].append(Body)
Mod.alwayses = []
for Cond in ALWAYS:
logs.log_info('ALWAYS %d %s'%(len(ALWAYS[Cond]),Cond))
List = ALWAYS[Cond]
RList = ['list']
BList = ['list']
if (Cond[0]=='list'):
for Item in List:
RR = Item[2]
BB = Item[3]
RList.append(RR)
BList.append(BB)
if (BB[0]=='<='):
LL = BB[2]
if (type(LL) is list): LL = tuple(LL)
if LL not in SOURCES: SOURCES.append(LL)
MM = module_class.support_set(Item[1])
for M1 in MM:
if (M1 not in SOURCES): SOURCES.append(M1)
Mod.alwayses.append( ( Cond, ('ifelse',Item[1],RList,BList),'always'))
elif (Cond[0]=='edge'):
for Item in List:
BB = Item
BList.append(BB)
if (BB[0]=='<='):
LL = BB[2]
if (type(LL) is list): LL = tuple(LL)
if LL not in SOURCES: SOURCES.append(LL)
MM = module_class.support_set(Item[1])
for M1 in MM:
if (M1 not in SOURCES): SOURCES.append(M1)
Mod.alwayses.append( ( Cond, BList,'always'))
else:
logs.log_error('condition list is "%s"'%str(List))
ONES = []
TWOS = []
def gatherAssigns(Mod):
netTable = Mod.prepareNetTable()
logs.log_info('netTable %d'%len(netTable.keys()))
for Net in netTable:
if Net in Mod.nets:
Dir,Wid = Mod.nets[Net]
logs.log_info('NNNN %s %s %s'%(Net,Dir,Wid))
if (Dir == 'wire')and(Wid==0):
if len(netTable[Net])==2:
logs.log_info('two %s %s'%(Net,netTable[Net]))
TWOS.append(Net)
elif len(netTable[Net])==1:
logs.log_info('one %s %s'%(Net,netTable[Net][0]))
ONES.append(Net)
def buildTranslation(Mod):
Insts = Mod.insts.keys()
for Inst in Insts:
Obj = Mod.insts[Inst]
if Obj.Type == 'BUFX2':
A = Obj.conns['A']
Y = Obj.conns['Y']
registerBuf(A,Y)
Mod.insts.pop(Inst)
# logs.log_info('ORIGINS %s'%(ORIGINS.keys()))
# logs.log_info('BUFS %s'%(str(BUFS)))
# logs.log_info('DRIVES %s'%(str(DRIVES)))
for Out in BUFS:
In = findIn(Out)
TRANSLATIONS[Out] = In
def buildTranslationInv(Mod):
Insts = Mod.insts.keys()
for Inst in Insts:
Obj = Mod.insts[Inst]
if Obj.Type == 'INVX1':
A = Obj.conns['A']
Y = Obj.conns['Y']
registerInv(A,Y)
Mod.insts.pop(Inst)
# logs.log_info('ORIGINS %s'%(ORIGINS.keys()))
# logs.log_info('BUFS %s'%(str(BUFS)))
# logs.log_info('DRIVES %s'%(str(DRIVES)))
for Out in INVS:
In = findIn(Out)
TRANSLATIONS[Out] = In
def findIn(Net):
if Net in ORIGINS: return Net
Src = BUFS[Net]
return findIn(Src)
def findInv(Net,Depth):
if Net not in INVS: return Net,Depth
Src = INVS[Net]
return findInv(Src,Depth+1)
def trans(Txt):
if (type(Txt) is str):
if Txt in TRANSLATIONS: return TRANSLATIONS[Txt]
return Txt
BUFS = {}
ORIGINS = {}
DRIVES = {}
TRANSLATIONS = {}
def registerBuf(A,Y):
BUFS[Y] = A
if A not in BUFS:
ORIGINS[A] = True
if Y in ORIGINS:
ORIGINS.pop(Y)
if A not in DRIVES:
DRIVES[A] = [Y]
else:
DRIVES[A].append(Y)
def registerInv(A,Y):
INVS[Y] = A
def dealType(Type,Inst,Obj,Mod):
if Type in KNOWNS:
What = KNOWNS[Type]
if What[0]=='mul':
logs.log_info('dealType %s %s %s'%(Type,Inst,What[1:]))
for Dst,Op,Src in What[1:]:
dealSingle(Dst,Op,Src,Obj,Mod)
return True
elif What[0]=='func':
return What[1](Obj,Mod)
else:
Dst,Op,Src = KNOWNS[Type]
dealSingle(Dst,Op,Src,Obj,Mod)
return True
else:
logs.log_info('leaving %s as is'%(Type))
return False
def dealSingle(Dst,Op,Src,Obj,Mod):
Ins = []
Pref = ''
if Op!='':
if (Op[0]=='~')and(len(Op)>1):
Ins.append(Op[1:])
Pref = '~'
else:
Ins.append(Op)
Pref = ''
for In in Src:
Inx = trans(Obj.conns[In])
Ins.append(Inx)
if Pref!='':
Ins = (Pref,Ins)
logs.log_info('dealSingle dst=%s %s %s ins %s'%(Dst,Obj.conns[Dst],Obj.Type,Ins))
Mod.hard_assigns.append((Obj.conns[Dst],Ins,'',''))
return True
KNOWNS = {}
KNOWNS['BUFX2'] = ('Y','',['A'])
KNOWNS['INVX1'] = ('Y','!',['A'])
KNOWNS['XOR2X1'] = ('Y','^',['A','B'])
KNOWNS['XOR3X1'] = ('Y','^',['A','B','C'])
KNOWNS['ADDHX1'] = ['mul',('CO','&',('A','B')),('S','^',('A','B'))]
KNOWNS['BUF'] = ('Y','',['A'])
KNOWNS['INV'] = ('Y','!',['A'])
KNOWNS['XOR2'] = ('Y','^',['A','B'])
KNOWNS['XOR3'] = ('Y','^',['A','B','C'])
KNOWNS['XNOR2'] = ('Y','~^',('A','B'))
KNOWNS['XNOR3'] = ('Y','~^',('A','B','C'))
KNOWNS['AND2'] = ('Y','&',('A','B'))
KNOWNS['AND3'] = ('Y','&',('A','B','C'))
KNOWNS['NAND2'] = ('Y','~&',('A','B'))
KNOWNS['NAND3'] = ('Y','~&',('A','B','C'))
KNOWNS['NOR2'] = ('Y','~|',('A','B'))
KNOWNS['NOR3'] = ('Y','~|',('A','B','C'))
KNOWNS['OR2'] = ('Y','|',('A','B'))
KNOWNS['OR3'] = ('Y','|',('A','B','C'))
KNOWNS['AND2X1'] = ('Y','&',('A','B'))
KNOWNS['AND3X1'] = ('Y','&',('A','B','C'))
KNOWNS['AND4X1'] = ('Y','&',('A','B','C','D'))
KNOWNS['NAND2X1'] = ('Y','~&',('A','B'))
KNOWNS['NAND3X1'] = ('Y','~&',('A','B','C'))
KNOWNS['NAND4X1'] = ('Y','~&',('A','B','C','D'))
KNOWNS['NOR2X1'] = ('Y','~|',('A','B'))
KNOWNS['NOR3X1'] = ('Y','~|',('A','B','C'))
KNOWNS['NOR4X1'] = ('Y','~|',('A','B','C','D'))
KNOWNS['XNOR2X1'] = ('Y','~^',('A','B'))
KNOWNS['XNOR3XL'] = ('Y','~^',('A','B','C'))
def mx2x1(Obj,Mod):
A = trans(Obj.conns['A'])
B = trans(Obj.conns['B'])
S0 = trans(Obj.conns['S0'])
Y = Obj.conns['Y']
Mod.hard_assigns.append((Y,('?',S0,B,A),'',''))
return True
def mx3x1(Obj,Mod):
A = trans(Obj.conns['A'])
B = trans(Obj.conns['B'])
C = trans(Obj.conns['C'])
S0 = trans(Obj.conns['S0'])
S1 = trans(Obj.conns['S1'])
Y = Obj.conns['Y']
Mod.hard_assigns.append((Y,('?',S1,C,('?',S0,B,A)),'',''))
return True
def mx4x1(Obj,Mod):
A = trans(Obj.conns['A'])
B = trans(Obj.conns['B'])
C = trans(Obj.conns['C'])
D = trans(Obj.conns['D'])
S0 = trans(Obj.conns['S0'])
S1 = trans(Obj.conns['S1'])
Y = Obj.conns['Y']
Mod.hard_assigns.append((Y,('?',S1,('?',S0,D,C),('?',S0,B,A)),'',''))
return True
def getQ(Pin,Obj,Mod,MakeReg=True):
if Pin not in Obj.conns: return ''
Q = Obj.conns[Pin]
logs.log_info('getQ %s=%s %s %s'%(Pin,Q,Obj.Name,Obj.Type))
if MakeReg:
if type(Q) is str:
if Q not in Mod.nets:
Mod.nets[Q] = 'reg',0
elif 'output' in Mod.nets[Q]:
Mod.nets[Q] = 'output reg',0
else:
Mod.nets[Q] = 'reg',0
elif (Q[0] == 'subbit'):
Bus = Q[1]
Ind = eval(Q[2])
if Bus not in Mod.nets:
Mod.nets[Bus] = 'reg',(Ind,0)
elif 'output' in Mod.nets[Bus]:
_,Wid = Mod.nets[Bus]
Wid = max(Wid[0],Ind),0
Mod.nets[Bus] = 'output reg',Wid
else:
_,Wid = Mod.nets[Bus]
Wid = max(Wid[0],Ind),0
Mod.nets[Bus] = 'reg',Wid
return Q
def getQandQN(Obj,Mod):
Q = getQ('Q',Obj,Mod,True)
QN = getQ('QN',Obj,Mod,Q=='')
return Q,QN
def dffrx1(Obj,Mod):
D = trans(Obj.conns['D'])
CK = trans(Obj.conns['CK'])
RN = trans(Obj.conns['RN'])
Cond = ('list', ('edge', 'posedge', CK), ('edge', 'negedge', RN))
Q,QN = getQandQN(Obj,Mod)
if (Q=='')and(QN==''):
logs.log_warning('no Q, QN in dffrx1 %s'%Obj.Name)
logs.log_info('%s %s' %('Q' in Obj.conns,'QN' in Obj.conns))
return False
if (QN==''):
Mod.alwayses.append((Cond,('ifelse',('!',RN),('<=',Q,0),('<=',Q,D)),'always'))
elif (Q==''):
Mod.alwayses.append((Cond,('ifelse',('!',RN),('<=',QN,1),('<=',QN,('!',D))),'always'))
else:
Mod.alwayses.append((Cond,('ifelse',('!',RN),('<=',Q,0),('<=',Q,D)),'always'))
Mod.hard_assigns.append((QN,('!',Q),'',''))
return True
def dffsx1(Obj,Mod):
D = trans(Obj.conns['D'])
CK = trans(Obj.conns['CK'])
SN = trans(Obj.conns['SN'])
Q,QN = getQandQN(Obj,Mod)
Cond = ('list', ('edge', 'posedge', CK), ('edge', 'negedge', SN))
if (Q=='')and(QN==''):
logs.log_warning('no Q, QN in dffsx1 %s'%Obj.Name)
return True
if (QN==''):
Mod.alwayses.append((Cond,('ifelse',('!',SN),('<=',Q,1),('<=',Q,D)),'always'))
elif (Q==''):
Mod.alwayses.append((Cond,('ifelse',('!',SN),('<=',QN,0),('<=',QN,('!',D))),'always'))
else:
Mod.alwayses.append((Cond,('ifelse',('!',SN),('<=',Q,1),('<=',Q,D)),'always'))
Mod.hard_assigns.append((QN,('!',Q),'',''))
return True
def dffx1(Obj,Mod):
D = trans(Obj.conns['D'])
CK = trans(Obj.conns['CK'])
Q,QN = getQandQN(Obj,Mod)
Cond = ('edge', 'posedge', CK)
if (Q=='')and(QN==''):
logs.log_warning('no Q, QN in dffx1 %s'%Obj.Name)
return True
if (QN==''):
Mod.alwayses.append((Cond,('<=',Q,D),'always'))
elif (Q==''):
Mod.alwayses.append((Cond,('<=',QN,('!',D)),'always'))
else:
Mod.alwayses.append((Cond,('<=',Q,D),'always'))
Mod.hard_assigns.append((QN,('!',Q),'',''))
return True
def addfx1(Obj,Mod):
A = trans(Obj.conns['A'])
B = trans(Obj.conns['B'])
CI = trans(Obj.conns['CI'])
if 'CO' in Obj.conns:
CO = trans(Obj.conns['CO'])
COfunc = ('|',('&',A,B),('&',A,CI),('&',CI,B))
Mod.hard_assigns.append((CO,COfunc,'',''))
if 'S' in Obj.conns:
S = trans(Obj.conns['S'])
Sfunc = ('^',A,B,CI)
Mod.hard_assigns.append((S,Sfunc,'',''))
return True
KNOWNS['MX2X1'] = ('func',mx2x1)
KNOWNS['MX3X1'] = ('func',mx3x1)
KNOWNS['MX4X1'] = ('func',mx4x1)
KNOWNS['DFFX1'] = ('func',dffx1)
KNOWNS['DFFRX1'] = ('func',dffrx1)
KNOWNS['DFFSX1'] = ('func',dffsx1)
KNOWNS['ADDFX1'] = ('func',addfx1)
| [
"greenblat@mac.com"
] | greenblat@mac.com |
0c0eeb57c9fdcdcb8ca39adf4dd38098bc1c6d90 | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/1913. Maximum Product Difference Between Two Pairs/1913.py | 95b10944b5dc495737e66afb6181f548621313b9 | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 420 | py | class Solution:
def maxProductDifference(self, nums: List[int]) -> int:
max1 = -math.inf
max2 = -math.inf
min1 = math.inf
min2 = math.inf
for num in nums:
if num > max1:
max2 = max1
max1 = num
elif num > max2:
max2 = num
if num < min1:
min2 = min1
min1 = num
elif num < min2:
min2 = num
return max1 * max2 - min1 * min2
| [
"me@pengyuc.com"
] | me@pengyuc.com |
a14d6b0e0748fc1cda2fb198decd0056b8b3d273 | 327c11245c022825975b5e3c5e99be01c3102cd3 | /chatterbot/stemming.py | b353587e8dc19156199faf119162233bcc327271 | [
"BSD-3-Clause"
] | permissive | KevinMaldjian/ChatterBot | 68c9f7ee5592e9fc06deea99c16b00cf6fc21fc4 | 91975d04efe7b6a63aaa379ed682a8dae64c47cf | refs/heads/master | 2020-04-10T11:47:41.920838 | 2018-12-08T15:59:28 | 2018-12-08T16:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,726 | py | import string
from nltk import pos_tag
from nltk.corpus import wordnet, stopwords
class SimpleStemmer(object):
"""
A very simple stemming algorithm that removes stopwords and punctuation.
It then removes the beginning and ending characters of each word.
This should work for any language.
"""
def __init__(self, language='english'):
self.punctuation_table = str.maketrans(dict.fromkeys(string.punctuation))
self.language = language
self.stopwords = None
def initialize_nltk_stopwords(self):
"""
Download required NLTK stopwords corpus if it has not already been downloaded.
"""
from chatterbot.utils import nltk_download_corpus
nltk_download_corpus('stopwords')
def get_stopwords(self):
"""
Get the list of stopwords from the NLTK corpus.
"""
if not self.stopwords:
self.stopwords = stopwords.words(self.language)
return self.stopwords
def get_stemmed_words(self, text, size=4):
stemmed_words = []
# Make the text lowercase
text = text.lower()
# Remove punctuation
text_with_punctuation_removed = text.translate(self.punctuation_table)
if text_with_punctuation_removed:
text = text_with_punctuation_removed
words = text.split(' ')
# Do not stem singe-word strings that are less than the size limit for characters
if len(words) == 1 and len(words[0]) < size:
return words
# Generate the stemmed text
for word in words:
# Remove stopwords
if word not in self.get_stopwords():
# Chop off the ends of the word
start = len(word) // size
stop = start * -1
word = word[start:stop]
if word:
stemmed_words.append(word)
# Return the word list if it could not be stemmed
if not stemmed_words and words:
return words
return stemmed_words
def get_bigram_pair_string(self, text):
"""
Return bigram pairs of stemmed text for a given string.
For example:
"Hello Dr. Salazar. How are you today?"
"[ell alaza] [alaza oda]"
"ellalaza alazaoda"
"""
words = self.get_stemmed_words(text)
bigrams = []
word_count = len(words)
if word_count <= 1:
bigrams = words
for index in range(0, word_count - 1):
bigram = words[index] + words[index + 1]
bigrams.append(bigram)
return ' '.join(bigrams)
def treebank_to_wordnet(pos):
"""
* https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
* http://www.nltk.org/_modules/nltk/corpus/reader/wordnet.html
"""
data_map = {
'N': wordnet.NOUN,
'J': wordnet.ADJ,
'V': wordnet.VERB,
'R': wordnet.ADV
}
return data_map.get(pos[0])
class PosHypernymStemmer(object):
"""
For each non-stopword in a string, return a string where each word is a
hypernym preceded by the part of speech of the word before it.
"""
def __init__(self, language='english'):
self.punctuation_table = str.maketrans(dict.fromkeys(string.punctuation))
self.language = language
self.stopwords = None
def initialize_nltk_stopwords(self):
"""
Download required NLTK stopwords corpus if it has not already been downloaded.
"""
from chatterbot.utils import nltk_download_corpus
nltk_download_corpus('stopwords')
def initialize_nltk_wordnet(self):
"""
Download required NLTK corpora if they have not already been downloaded.
"""
from chatterbot.utils import nltk_download_corpus
nltk_download_corpus('corpora/wordnet')
def get_stopwords(self):
"""
Get the list of stopwords from the NLTK corpus.
"""
if not self.stopwords:
self.stopwords = stopwords.words(self.language)
return self.stopwords
def get_hypernyms(self, pos_tags):
"""
Return the hypernyms for each word in a list of POS tagged words.
"""
results = []
for word, pos in pos_tags:
synsets = wordnet.synsets(word, treebank_to_wordnet(pos))
if synsets:
synset = synsets[0]
hypernyms = synset.hypernyms()
if hypernyms:
results.append(hypernyms[0].name().split('.')[0])
else:
results.append(word)
else:
results.append(word)
return results
def get_bigram_pair_string(self, text):
"""
For example:
What a beautiful swamp
becomes:
DT:beautiful JJ:wetland
"""
words = text.split()
# Separate punctuation from last word in string
word_with_punctuation_removed = words[-1].strip(string.punctuation)
if word_with_punctuation_removed:
words[-1] = word_with_punctuation_removed
pos_tags = pos_tag(words)
hypernyms = self.get_hypernyms(pos_tags)
bigrams = []
word_count = len(words)
if word_count <= 1:
bigrams = words
if bigrams:
bigrams[0] = bigrams[0].lower()
for index in range(1, word_count):
if words[index].lower() not in self.get_stopwords():
bigram = pos_tags[index - 1][1] + ':' + hypernyms[index].lower()
bigrams.append(bigram)
return ' '.join(bigrams)
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
cba3f9fae1a60a8fcceec19e2d36d35d7fac6b27 | 70263abbb3c7efabf4039f5861e4d417926c13af | /nova/nova/cmd/conductor.py | 94377f7045a3003d21fdfdad38e524c6b1d0a07b | [
"Apache-2.0"
] | permissive | bopopescu/keystone_hierarchical_projects | 9b272be1e3069a8ad1875103a1b7231257c90d9c | bf5cbb2e1c58dae17e77c2ec32ed171ec3104725 | refs/heads/master | 2022-11-21T04:33:35.089778 | 2014-02-24T19:23:47 | 2014-02-24T19:23:47 | 282,174,859 | 0 | 0 | null | 2020-07-24T09:12:32 | 2020-07-24T09:12:31 | null | UTF-8 | Python | false | false | 1,399 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Conductor."""
import sys
from oslo.config import cfg
from nova import config
from nova import objects
from nova.openstack.common import log as logging
from nova import service
from nova import utils
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
def main():
objects.register_all()
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-conductor',
topic=CONF.conductor.topic,
manager=CONF.conductor.manager)
workers = CONF.conductor.workers or utils.cpu_count()
service.serve(server, workers=workers)
service.wait()
| [
"tellesnobrega@gmail.com"
] | tellesnobrega@gmail.com |
3ca4b1890044c389fe0a23bd35ee50baa6b78a33 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /numerical_analysis_backup/small-scale-multiobj/pod150_milp/hybrid/runsimu7_hybrid.py | 6c259e8aadbfc3137fa5ac817b5354e031843b24 | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize hybrid
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition import Arch4_decompose
from arch1 import ModelSDM_arch1
from arch2_decomposition import Arch2_decompose
from arch5_decomposition import Arch5_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
n_sim = 1 # number of simulations
n_start = 7 # index of start
n_end = n_start+n_sim # index of end
time_limit_routing = 1000 # 1000
time_limit_sa = 18000
result = np.zeros((n_sim, 15))
total_cnk = []
for i in range(n_start, n_end):
filename = 'traffic_matrix__matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [int(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
total_cnk.append(tm.flatten().astype(bool).sum())
result[i-n_start, 14] = tm.flatten().astype(bool).sum()
print "\n"
print total_cnk
print "\n"
#%% arch4
print "Architecture 4"
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=0.01)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 0] = m.connections_lb
result[i-n_start, 1] = m.connections_ub
result[i-n_start, 2] = m.throughput_lb
result[i-n_start, 3] = m.throughput_ub
#%% arch1
print "Architecutre 1"
m = ModelSDM_arch1(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=0.01)
m.create_model(mipfocus=1, timelimit=time_limit_routing,mipgap=0.01)
result[i-n_start, 4] = m.connections
result[i-n_start, 5] = m.throughput
#%% arch2
print "Architecture 2"
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=0.01)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 6] = m.connections_lb
result[i-n_start, 7] = m.connections_ub
result[i-n_start, 8] = m.throughput_lb
result[i-n_start, 9] = m.throughput_ub
#%% arch5
print "Architecture 5"
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=0.01)
m.create_model_routing(mipfocus=1, timelimit=time_limit_routing, mipgap=0.01)
m.create_model_sa(mipfocus=1, timelimit=time_limit_sa)
result[i-n_start, 10] = m.connections_lb
result[i-n_start, 11] = m.connections_ub
result[i-n_start, 12] = m.throughput_lb
result[i-n_start, 13] = m.throughput_ub
file_name = "result_hybrid_{}to{}.csv".format(n_start, n_end)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['arch4_connections_lb', 'arch4_connections_ub',
'arch4_throughput_lb', 'arch4_throughput_ub',
'arch1_connections', 'arch1_throughput',
'arch2_connections_lb', 'arch2_connections_ub',
'arch2_throughput_lb', 'arch2_throughput_ub',
'arch5_connections_lb', 'arch5_connections_ub',
'arch5_throughput_lb', 'arch5_throughput_ub',
'total_cnk'])
writer.writerows(result) | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
0f8b912ba66a1173d67a43cd005f23f5bc24a832 | 7fcdb9f3d8dfd39bf196fdb473fba5566cd97500 | /integration/tmux/create_pane_spec.py | e171e83b4763dbcc3d38ab2830bea6ece428ed02 | [
"MIT"
] | permissive | tek/myo-py | 4ccf7892a8317d54a02ed8f3d65ea54f07d984b4 | 3772a00a021cbf4efb55786e26881767d854afe8 | refs/heads/master | 2021-10-19T20:45:24.774281 | 2019-02-23T15:16:58 | 2019-02-23T15:16:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from kallikrein import Expectation, k
from kallikrein.matchers.typed import have_type
from amino import do, Do
from chiasma.util.id import StrIdent
from amino.test.spec import SpecBase
from ribosome.nvim.io.compute import NvimIO
from ribosome.nvim.api.command import nvim_command
from ribosome.test.klk.matchers.prog import component_state
from ribosome.test.integration.embed import plugin_test
from ribosome.test.config import TestConfig
from ribosome.nvim.io.api import N
from myo import myo_config
from myo.ui.data.view import Pane
@do(NvimIO[Expectation])
def create_pane_spec() -> Do:
yield nvim_command('MyoCreatePane', '{ "layout": "make", "ident": "pane"}')
state = yield component_state('ui')
space = yield N.m(state.spaces.head, 'no space')
win = yield N.m(space.windows.head, 'no window')
layout = yield N.m(win.layout.sub.find(lambda a: a.data.ident == StrIdent('make')), 'no layout `make`')
pane = yield N.m(layout.sub.find(lambda a: a.data.ident == StrIdent('pane')), 'no pane `pane`')
return k(pane.data).must(have_type(Pane))
test_config = TestConfig.cons(myo_config)
class CreatePaneSpec(SpecBase):
'''
create a pane $create_pane
'''
def create_pane(self) -> Expectation:
return plugin_test(test_config, create_pane_spec)
__all__ = ('CreatePaneSpec',)
| [
"torstenschmits@gmail.com"
] | torstenschmits@gmail.com |
b6f245ee3c5b5ad2ae95c0b7ff11b2cf6c9b9995 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_docketed.py | 47549313b856590ea1a3308650f7333c6d6d0449 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _DOCKETED():
def __init__(self,):
self.name = "DOCKETED"
self.definitions = docket
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['docket']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
827a4447791588d49123ef4cf9cd00dfc7a5e4b2 | f63fdd673e5b9881a0395be83e0a6b5ccc7edbdb | /drawing_pallet_jack.py | 435be0e9d5a166434d1a3ffe524eb0c5a59ad9c8 | [] | no_license | karry3775/Motion-Planning-Python | 63ad4bf928c57b9ea6b8c19fceea16dbc2adb783 | 20ed30526bd03380ead8b29357e5bf7eb291ba9b | refs/heads/master | 2023-02-08T18:19:26.644696 | 2020-12-30T22:22:36 | 2020-12-30T22:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,107 | py | import matplotlib.pyplot as plt
import numpy as np
import math
def wrapToPi(theta):
return math.atan2(math.sin(theta),math.cos(theta))
def dpj(x,y,theta,steer):
# plotting the center point
wd = 1.5
plt.plot(x,y,'co')
plt.arrow(x,y,math.cos(theta),math.sin(theta),width=0.01,color='orange')
plt.arrow(x,y,math.cos(wrapToPi(theta+steer)),math.sin(wrapToPi(theta+steer)),width=0.01,color='g')
#wheel cordinates
A = np.array([-0.3,0.1,1]).T
B = np.array([0.3,0.1,1]).T
C = np.array([-0.3,-0.1,1]).T
D = np.array([0.3,-0.1,1]).T
#tranform the wheel
T_wheel = np.array([[math.cos(wrapToPi(theta + steer)),-math.sin(wrapToPi(theta + steer)),x],
[math.sin(wrapToPi(theta + steer)),math.cos(wrapToPi(theta + steer)),y],
[0,0,1]])
A = np.matmul(T_wheel,A)
B = np.matmul(T_wheel,B)
C = np.matmul(T_wheel,C)
D = np.matmul(T_wheel,D)
#front cordinates
a = np.array([0.5,-0.5,1]).T
b = np.array([0.5,0.5,1]).T
c = np.array([0,1,1]).T
d = np.array([0,-1,1]).T
e = np.array([-0.5,1,1]).T
f = np.array([-0.5,-1,1]).T
#dotted front
X = np.array([-0.5,0.75,1]).T
Y = np.array([0,0.75,1]).T
Z = np.array([0.25,0.5,1]).T
W = np.array([0.25,-0.5,1]).T
U = np.array([0,-0.75,1]).T
V = np.array([-0.5,-0.75,1]).T
#back support
g = np.array([-1.5,1,1]).T
h = np.array([-1.5,-1,1]).T
i = np.array([-1.5,0.75,1]).T
j = np.array([-1.5,0.25,1]).T
k = np.array([-1.5,-0.25,1]).T
l = np.array([-1.5,-0.75,1]).T
#drawing the pallet_first_ends
m = np.array([-4,0.75,1]).T
n = np.array([-4,0.25,1]).T
o = np.array([-4,-0.25,1]).T
p = np.array([-4,-0.75,1]).T
#drawing the lasts
q = np.array([-4.5,0.75-0.2,1]).T
r = np.array([-4.5,0.25+0.2,1]).T
s = np.array([-4.5,-0.25-0.2,1]).T
t = np.array([-4.5,-0.75+0.2,1]).T
# Tranformations
T = np.array([[math.cos(wrapToPi(theta)),-math.sin(wrapToPi(theta)),x],
[math.sin(wrapToPi(theta)),math.cos(wrapToPi(theta)),y],
[0,0,1]])
#front cordinates
a = np.matmul(T,a)
b = np.matmul(T,b)
c = np.matmul(T,c)
d = np.matmul(T,d)
e = np.matmul(T,e)
f = np.matmul(T,f)
#dotted front
X = np.matmul(T,X)
Y = np.matmul(T,Y)
Z = np.matmul(T,Z)
W = np.matmul(T,W)
U = np.matmul(T,U)
V = np.matmul(T,V)
#back support
g = np.matmul(T,g)
h = np.matmul(T,h)
i = np.matmul(T,i)
j = np.matmul(T,j)
k = np.matmul(T,k)
l = np.matmul(T,l)
#drawing the pallet_first_ends
m = np.matmul(T,m)
n = np.matmul(T,n)
o = np.matmul(T,o)
p = np.matmul(T,p)
#drawing the lasts
q = np.matmul(T,q)
r = np.matmul(T,r)
s = np.matmul(T,s)
t = np.matmul(T,t)
back_center = [(n[0]+o[0])/2,(n[1]+o[1])/2]
#plotting color
plt.fill([r[0],q[0],m[0],i[0],j[0],n[0],r[0]],[r[1],q[1],m[1],i[1],j[1],n[1],r[1]],color='grey')
plt.fill([s[0],o[0],k[0],l[0],p[0],t[0],s[0]],[s[1],o[1],k[1],l[1],p[1],t[1],s[1]],color='grey')
plt.fill([g[0],e[0],f[0],h[0],g[0]],[g[1],e[1],f[1],h[1],g[1]],color='orange')
plt.fill([e[0],c[0],b[0],a[0],d[0],f[0],e[0]],[e[1],c[1],b[1],a[1],d[1],f[1],e[1]],color='orange')
plt.fill([A[0],B[0],D[0],C[0],A[0]],[A[1],B[1],D[1],C[1],A[1]],color='blue')
plt.plot([a[0],b[0]],[a[1],b[1]],'k',linewidth=wd)
plt.plot([a[0],d[0]],[a[1],d[1]],'k',linewidth=wd)
plt.plot([c[0],b[0]],[c[1],b[1]],'k',linewidth=wd)
plt.plot([c[0],e[0]],[c[1],e[1]],'k',linewidth=wd)
plt.plot([d[0],f[0]],[d[1],f[1]],'k',linewidth=wd)
plt.plot([e[0],f[0]],[e[1],f[1]],'k',linewidth=wd)
plt.plot([X[0],Y[0]],[X[1],Y[1]],'g--')
plt.plot([Z[0],Y[0]],[Z[1],Y[1]],'g--')
plt.plot([Z[0],W[0]],[Z[1],W[1]],'g--')
plt.plot([U[0],W[0]],[U[1],W[1]],'g--')
plt.plot([U[0],V[0]],[U[1],V[1]],'g--')
plt.plot([g[0],h[0]],[g[1],h[1]],'k',linewidth=wd)
plt.plot([g[0],e[0]],[g[1],e[1]],'k',linewidth=wd)
plt.plot([h[0],f[0]],[h[1],f[1]],'k',linewidth=wd)
plt.plot([i[0],l[0]],[i[1],l[1]],'k',linewidth=wd)
plt.plot([m[0],i[0]],[m[1],i[1]],'k',linewidth=wd)
plt.plot([n[0],j[0]],[n[1],j[1]],'k',linewidth=wd)
plt.plot([o[0],k[0]],[o[1],k[1]],'k',linewidth=wd)
plt.plot([p[0],l[0]],[p[1],l[1]],'k',linewidth=wd)
plt.plot([m[0],q[0]],[m[1],q[1]],'k',linewidth=wd)
plt.plot([q[0],r[0]],[q[1],r[1]],'k',linewidth=wd)
plt.plot([n[0],r[0]],[n[1],r[1]],'k',linewidth=wd)
plt.plot([o[0],s[0]],[o[1],s[1]],'k',linewidth=wd)
plt.plot([s[0],t[0]],[s[1],t[1]],'k',linewidth=wd)
plt.plot([p[0],t[0]],[p[1],t[1]],'k',linewidth=wd)
plt.plot([A[0],B[0]],[A[1],B[1]],'k')
plt.plot([A[0],C[0]],[A[1],C[1]],'k')
plt.plot([D[0],B[0]],[D[1],B[1]],'k')
plt.plot([D[0],C[0]],[D[1],C[1]],'k')
plt.plot([back_center[0],x],[back_center[1],y],'o--',linewidth=wd)
# plt.axes().set_aspect('equal','datalim')
# plt.show()
x = 1
y = 2
theta = math.pi/6
steer = math.pi/3
dpj(x,y,theta,steer)
| [
"kartikprakash3775@gmail.com"
] | kartikprakash3775@gmail.com |
44ac6e76c3e0b9341d50614fe10d9fd3728459fb | 754fb49b8c3e41a9790fadb92f27571f175150fb | /text_matching/20125222/ESIM/test.py | 923f3f093fa5cb140b83a64a1ebcd815bc979fcd | [] | no_license | HuihuiChyan/BJTUNLP_Practice2020 | 8df2c956a9629c8841d7b7d70d2ea86345be0b0e | 4ba1f8f146f5d090d401c38f5536907afca4830b | refs/heads/master | 2023-02-16T17:47:55.570612 | 2021-01-13T09:40:20 | 2021-01-13T09:40:20 | 295,123,517 | 21 | 3 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import argparse
import torch
import utils
import model
import train
import test
#设置设备
def test(args,device,net,train_iter):
net.load_state_dict(torch.load(args.save_path))
print(net)
net.eval()
for feature,_ in train_iter:
f=open("result.txt")
feature = feature.transpose(1, 0)
feature = feature.to(device)
score = net(feature[0], feature[1])
f.write(torch.argmax(score.cpu().data, dim=1))
f.close() | [
"noreply@github.com"
] | HuihuiChyan.noreply@github.com |
891ab579b163f740e989fdea514ebf2c0ec2931d | 3b944f1714c458c5d6d0e84d4b1498f2b59c4ef7 | /67. Add Binary.py | 0feccbd47c3a2de8dcae01d332f475c8c2a2cfb2 | [] | no_license | shiannn/LeetCodePython | e4d66f108200d8329616b3e45b70c3f8fc4cd9ed | 6e4472d41904e60ff9d70b5f3979c5dcae98c838 | refs/heads/master | 2021-06-26T03:24:03.079077 | 2021-02-24T16:54:18 | 2021-02-24T16:54:18 | 213,206,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | class Solution:
def addBinary(self, a: str, b: str) -> str:
carry = 0
bit = 0
ret = ""
all_len = max(len(a), len(b))
for i in range(1, all_len+1):
if len(a) - i >= 0:
aa = a[len(a) - i]
else:
aa = 0
if len(b) - i >= 0:
bb = b[len(b) - i]
else:
bb = 0
bit = int(aa) + int(bb) + carry
if bit == 0:
ret = "0"+ret
carry = 0
elif bit == 1:
ret = "1"+ret
carry = 0
elif bit == 2:
ret = "0"+ret
carry = 1
elif bit == 3:
ret = "1"+ret
carry = 1
if carry > 0:
ret = str(carry) + ret
return ret
if __name__ == '__main__':
#a = '11'
#b = '1'
a = "1010"
b = "1011"
sol = Solution()
ret = sol.addBinary(a, b)
print(ret) | [
"b05502087@ntu.edu.tw"
] | b05502087@ntu.edu.tw |
fe6302586a28e6dbbb50242710d80872542efaae | 15863dafe261decf655286de2fcc6e67cadef3d8 | /website/apps/loans/migrations/0003_loanprofilev1_assets_verification_method.py | 56d08ce867e953afb77df96d9ab32e6fa5decfae | [] | no_license | protoprojects/worksample | 5aa833570a39d5c61e0c658a968f28140694c567 | f1a8cd8268d032ea8321e1588e226da09925b7aa | refs/heads/master | 2021-06-26T17:34:10.847038 | 2017-09-14T00:14:03 | 2017-09-14T00:14:03 | 103,463,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-31 07:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loans', '0002_employmentv1_is_current_employment'),
]
operations = [
migrations.AddField(
model_name='loanprofilev1',
name='assets_verification_method',
field=models.CharField(blank=True, choices=[('self_reported', 'Self Reported'), ('yodlee', 'Yodlee Aggregated assets')], max_length=127, null=True),
),
]
| [
"dev@divethree.com"
] | dev@divethree.com |
6e273e8d146220a1751305959dc5d98e59fc206d | 3052e12cd492f3ffc3eedadc1bec6e6bb220e16b | /0x06-python-classes/102-square.py | 67848441ec4aa157cc06583f7c91a89207b42a60 | [] | no_license | JoseAVallejo12/holbertonschool-higher_level_programming | 2e9f79d495b664299a57c723e4be2ad8507abb4f | f2d9d456c854b52c932ec617ea3c40564b78bcdb | refs/heads/master | 2022-12-17T22:36:41.416213 | 2020-09-25T20:08:41 | 2020-09-25T20:08:41 | 259,383,382 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | #!/usr/bin/python3
# 102-square.py
# jose vallejo <1545@holbertonschool.com
""" Define a new class """
class Square(object):
""" create Square class """
def __init__(self, size=0):
""" inicialice size var """
self.__size = size
@property
def size(self):
""" getter method for private size """
return self.__size
@size.setter
def size(self, value):
""" setter method for private size """
if type(value) is not int or float:
raise TypeError("size must be a number")
elif value < 0:
raise ValueError("size must be >= 0")
def area(self):
""" Method for Calculate area """
return self.__size * self.__size
def __eq__(self, other):
""" __eq__ represent a == b """
return self.__size == other
def __ne__(self, other):
""" __ne__ represent a != b or a <> b """
return self.__size != other
def __gt__(self, other):
""" __gt__ represent a > b """
return self.__size > other
def __ge__(self, other):
""" __eq__ represent a==b """
return self.__size >= other
def __lt__(self, other):
""" __lt__ represent a < b """
return self.__size < other
def __le__(self, other):
""" __le__ represent a <= b """
return self.__size <= other
| [
"josealfredovallejo25@gmail.com"
] | josealfredovallejo25@gmail.com |
8941614c0c690934b59b84074e2f8ef37b10da44 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_319/ch15_2020_09_30_19_19_59_252844.py | c9f8753e8fea96067b6cc7e293c8ac16eee99427 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | Qual_o_seu_nome = input('Qual o seu nome?')
if Qual_o_seu_nome == Chris:
return 'Todo mundo odeia o Chris'
else:
return 'Olá, NOME'
print(Qual_o_seu_nome) | [
"you@example.com"
] | you@example.com |
e86f5382c89102f0d86b4c313724b8f605b98150 | 63050299cfe58aebf6dd26a5178051a839de8c39 | /game/game_data.py | 92697b75c725f4e4cf5f6c34508d70c35701a5bd | [] | no_license | stmobo/Split-Minded | df12a6a60eb086294f2a7e6f14ad137cd628228c | 0307ff4c9777f08d582147decd4dc762c40b2688 | refs/heads/master | 2020-03-12T04:25:04.820340 | 2018-04-25T01:47:17 | 2018-04-25T01:47:17 | 130,444,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import renpy.exports as renpy
# the game runs at 1280x720 by default
# use roughly half of the total screen size for the control game view
tile_size = 50 # px square
field_sz_tiles = [30, 30]
field_size = [field_sz_tiles[0]*tile_size, field_sz_tiles[1]*tile_size]
screen_total_size = [1280, 720]
gameplay_screen_size = [640, 640]
screen_center = None
combat_in_progress = False
ai_active = False
# dev purposes only:
force_combat_winner = None
skip_combat = False
| [
"stmobo@gmail.com"
] | stmobo@gmail.com |
fe1df3ac060074c2d4d8ce317c719e6378728b98 | cf62f7a7f9e13205fe83957fb7bfcf1b097bf481 | /src/hub/dataload/sources/reactome/__init__.py | ece58d7abdd9805c6dc4ca3d07d6388273c83b1b | [
"Apache-2.0"
] | permissive | biothings/mygene.info | 09bf19f481c066789a4ad02a0d2880f31dae28f6 | fe1bbdd81bc29b412ca4288d3af38e47c0602ab7 | refs/heads/master | 2023-08-22T21:34:43.540840 | 2023-08-08T23:25:15 | 2023-08-08T23:25:18 | 54,933,630 | 89 | 20 | NOASSERTION | 2023-07-18T23:53:49 | 2016-03-29T00:36:49 | Python | UTF-8 | Python | false | false | 70 | py | from .dump import ReactomeDumper
from .upload import ReactomeUploader
| [
"slelong@scripps.edu"
] | slelong@scripps.edu |
d94a5cdac376d139ebc4289f0fd4dfaee5f315e9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/664.py | 3f8b42535772cdcba828eea3e04f7364e212ad5d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py |
__author__ = 'sergeyy'
import time
import numpy as np
def getUsedH(a):
res = set()
for l in a:
res = res | set(l)
return res
def delLine(a, l):
del a[l]
return a
def delColl(a, c):
for l in a:
del l[c]
return a
def isCuutbl(line):
if line[0] >= max(line):
return True
return False
def removeH(a,h):
if min([min(x) for x in a]) < h:
return False
while True:
lToRem = []
cToRem = []
for i in range(len(a)):
if (a[i][0] == h):
if isCuutbl(a[i]):
lToRem.append(i);
for i in range(len(a[0])):
if (a[0][i] == h):
if isCuutbl([x[i] for x in a]):
cToRem.append(i);
if not lToRem and not cToRem:
return a
for i in sorted(lToRem, reverse=True):
delLine(a, i)
for i in sorted(cToRem, reverse=True):
delColl(a, i)
if not a:
return a
def solve(a):
for h in getUsedH(a):
a = removeH(a,h)
if a == False:
return 'NO'
if a:
return 'NO'
return 'YES'
with open('input.txt', 'r') as fin:
with open('output.txt', 'w') as fout:
cnt = int(fin.readline())
for case in range(1, cnt+1):
n, m = [int(x) for x in fin.readline().split()]
a = [[]*m]*n
for i in range(n):
a[i] = [int(x) for x in fin.readline().split()]
oline = ''
oline = 'Case #' + str(case) +': ' + solve(a) +'\n'
fout.write(oline)
print (time.clock())
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
48a6fbcf96afb2fa41e94c660956111df1d9812a | 7f2fc00415c152538520df84387549def73c4320 | /p0_data_prepare/path2test_list.py | 2fb54f06df155865096f47ccd75db55e4e20728c | [
"MIT"
] | permissive | mxer/face_project | da326be7c70c07ae510c2b5b8063902b27ae3a5b | 8d70858817da4d15c7b513ae492034784f57f35f | refs/heads/main | 2023-08-06T09:24:49.842098 | 2021-09-18T08:32:18 | 2021-09-18T08:32:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import os
import argparse
def main(args):
# to list.txt
root_ = args.folder_dir
files = []
for root, dirs, file in os.walk(root_):
for f in file:
files.append(os.path.join(root, f))
folder_ = os.path.split(args.txt_dir)[0]
if not os.path.exists(folder_):
os.makedirs(folder_)
f_ = open(args.txt_dir, 'w')
for file in files:
line = file + '\n'
f_.write(line)
f_.close()
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('--folder_dir', type=str,
default=r'E:\dataset-zoo\face-real\1-N\key')
parse.add_argument('--txt_dir', type=str, default=r'E:\list-zoo\real.txt')
args = parse.parse_args()
main(args) | [
"chen19940524@live.com"
] | chen19940524@live.com |
1e7178314f6883226677aa0092bc7b7f7415728c | b75237a8c9436cc31458ca20e93b4f3b46703b63 | /src/oaipmh/tests/createdata.py | 541e06ace459ffeb7572f91bfc20b62f10fa0566 | [
"BSD-3-Clause"
] | permissive | ulikoehler/pyoai | 09ebc795f707e829de1587f8cdec011201d46f75 | 461ccf81526a5ca98623dd39e210cded4fb6263d | refs/heads/master | 2022-05-10T19:36:54.980792 | 2022-03-09T16:10:36 | 2022-03-09T16:10:36 | 30,493,052 | 1 | 2 | NOASSERTION | 2022-03-09T16:10:37 | 2015-02-08T14:14:53 | Python | UTF-8 | Python | false | false | 2,109 | py | from fakeclient import FakeCreaterClient
# tied to the server at EUR..
client = FakeCreaterClient(
'http://dspace.ubib.eur.nl/oai/',
'/home/faassen/py/oai/tests/fake2')
print "GetRecord"
header, metadata, about = client.getRecord(
metadataPrefix='oai_dc', identifier='hdl:1765/315')
print "identifier:", header.identifier()
print "datestamp:", header.datestamp()
print "setSpec:", header.setSpec()
print "isDeleted:", header.isDeleted()
print
print "Identify"
identify = client.identify()
print "repositoryName:", identify.repositoryName()
print "baseURL:", identify.baseURL()
print "protocolVerson:", identify.protocolVersion()
print "adminEmails:", identify.adminEmails()
print "earliestDatestamp:", identify.earliestDatestamp()
print "deletedRecords:", identify.deletedRecord()
print "granularity:", identify.granularity()
print "compression:", identify.compression()
print
print "ListIdentifiers"
headers = client.listIdentifiers(from_=datetime(2003, 04, 10),
metadataPrefix='oai_dc')
for header in headers:
print "identifier:", header.identifier()
print "datestamp:", header.datestamp()
print "setSpec:", header.setSpec()
print "isDeleted:", header.isDeleted()
print
print "ListMetadataFormats"
for prefix, schema, ns in client.listMetadataFormats():
print "metadataPrefix:", prefix
print "schema:", schema
print "metadataNamespace:", ns
print
print "ListRecords"
for header, metadata, about in client.listRecords(
from_=datetime(2003, 04, 10), metadataPrefix='oai_dc'):
print "header"
print "identifier:", header.identifier()
print "datestamp:", header.datestamp()
print "setSpec:", header.setSpec()
print "isDeleted:", header.isDeleted()
#print "metadata"
#for fieldname in fieldnames:
# print "%s:" % fieldname, metadata.getField(fieldname)
print "about"
print about
print
print "ListSets"
for setSpec, setName, setDescription in client.listSets():
print "setSpec:", setSpec
print "setName:", setName
print "setDescription:", setDescription
print
client.save()
| [
"none@none"
] | none@none |
33453a0ed6537cf87445691a5ccc4ad2c51ff81f | bb51bebd6ff9fccc5a639d621190d46bb66e4456 | /bill_calc.py | 5e030d865a88fbcc2b69b66232ef3045b6e47f85 | [] | no_license | annalisestevenson/bill_lab | c7cb1b42844147378d336bfcbae1f481eb14b65c | c4f6badc3d7c1a73d446f0e43bee74d3f57e2f3f | refs/heads/master | 2016-09-13T23:38:25.760794 | 2016-05-06T02:50:00 | 2016-05-06T02:50:00 | 58,176,103 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | tip_percentage= 18
bill_amount= 10
def calculate_bill():
return (tip_percentage*.01* bill_amount)+bill_amount
def calc_alone_bill():
return bill_amount
def tip_ave():
return (.18*bill_amount) + bill_amount
def main():
global bill_amount
bill_amount= float(raw_input("How much was your bill, not including tip?"))
dine_alone=raw_input("Did you dine alone, Y/N?")
if (dine_alone.upper()=="N"):
dine= int(raw_input("How many people were at dinner?"))
tip_percentage= raw_input("Is 18 percent tip okay, Y/N?")
if (tip_percentage.upper()=="N"):
tip= float(raw_input("How much do you want to leave for tip?"))
print "Your total is", ((tip*.01*bill_amount)+bill_amount)/dine
else:
print "Your total is", tip_ave()/dine
else:
global tip_percentage
tip_percentage= float(raw_input("How much do you want to leave for tip?"))
alone_bill= calculate_bill()
print "Your total is", alone_bill
# tip_percentage= raw_input("Is 18 percent tip okay, Y/N?")
# if (tip_percentage.upper()=="N"):
# tip= int(raw_input("How much do you want to leave for tip?"))
# else:
# return tip_ave
if __name__ == "__main__":
main() | [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
d8d595e7f69bb51aaf3a7bcc753fd581253604dd | 1ff265ac6bdf43f5a859f357312dd3ff788804a6 | /cards.py | ebaa6094cc4fbb22b25c24cc4dcee8020ccc2a4a | [] | no_license | sgriffith3/july_pyb | f1f493450ab4933a4443518863f772ad54865c26 | 5e06012ad436071416b95613ed46e972c46b0ff7 | refs/heads/master | 2022-11-19T04:58:41.632000 | 2020-07-17T19:46:44 | 2020-07-17T19:46:44 | 279,335,603 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | import random
class DeckofCards:
def __init__(self):
cards = []
nums = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K', 'A']
for n in nums:
for suit in ['spades', 'diamonds', 'clubs', 'hearts']:
card = f"{n} of {suit}"
cards.append(card)
self.cards = cards
def pick_one(self):
one = random.choice(self.cards)
self.cards.remove(one)
return one
deck = DeckofCards()
print(len(deck.cards))
print(deck.pick_one())
print(len(deck.cards))
| [
"sgriffith@alta3.com"
] | sgriffith@alta3.com |
dfe90b2f77adbed5cee3c8bf22fc1cadf07db007 | a281d09ed91914b134028c3a9f11f0beb69a9089 | /contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/metrics/data_profiler_metrics/data_profiler_profile_percent_diff.py | 74cfa564650c69823f7fd47fecd0d9e533bd6e47 | [
"Apache-2.0"
] | permissive | CarstenFrommhold/great_expectations | 4e67bbf43d21bc414f56d576704259a4eca283a5 | 23d61c5ed26689d6ff9cec647cc35712ad744559 | refs/heads/develop | 2023-01-08T10:01:12.074165 | 2022-11-29T18:50:18 | 2022-11-29T18:50:18 | 311,708,429 | 0 | 0 | Apache-2.0 | 2020-11-10T15:52:05 | 2020-11-10T15:52:04 | null | UTF-8 | Python | false | false | 4,150 | py | import copy
from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.metrics.metric_provider import metric_value
from great_expectations.validator.metric_configuration import MetricConfiguration
from .data_profiler_profile_metric_provider import DataProfilerProfileMetricProvider
class DataProfilerProfilePercentDiff(DataProfilerProfileMetricProvider):
metric_name = "data_profiler.profile_percent_diff"
value_keys = ("profile_path",)
@metric_value(PandasExecutionEngine)
def _pandas(
cls,
execution_engine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
numerical_diff_stats = [
"min",
"max",
"sum",
"mean",
"median",
"median_absolute_deviation",
"variance",
"stddev",
"unique_count",
"unique_ratio",
"gini_impurity",
"unalikeability",
"sample_size",
"null_count",
]
profile_report = metrics["data_profiler.profile_report"]
diff_report = metrics["data_profiler.profile_diff"]
pr_columns = profile_report["data_stats"]
dr_columns = diff_report["data_stats"]
percent_delta_data_stats = []
for pr_col, dr_col in zip(pr_columns, dr_columns):
pr_stats = pr_col["statistics"]
dr_stats = dr_col["statistics"]
percent_delta_col = copy.deepcopy(dr_col)
percent_delta_stats = {}
for (dr_stat, dr_val) in dr_stats.items():
if dr_stat not in numerical_diff_stats:
percent_delta_stats[dr_stat] = dr_val
continue
if dr_val == "unchanged":
dr_val = 0
if dr_stat not in pr_stats:
percent_delta_stats[dr_stat] = "ERR_no_original_value"
continue
pr_val = pr_stats[dr_stat]
percent_change = 0
if pr_val == 0:
percent_change = "ERR_divide_by_zero" # Div by 0 error
else:
percent_change = dr_val / pr_val
percent_delta_stats[dr_stat] = percent_change
percent_delta_col["statistics"] = percent_delta_stats
percent_delta_data_stats.append(percent_delta_col)
percent_diff_report = copy.deepcopy(diff_report)
percent_diff_report["data_stats"] = percent_delta_data_stats
return percent_diff_report
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if metric.metric_name == "data_profiler.profile_percent_diff":
dependencies["data_profiler.profile_report"] = MetricConfiguration(
metric_name="data_profiler.profile_report",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| [
"noreply@github.com"
] | CarstenFrommhold.noreply@github.com |
2e75e8e720652926e2a683efdef81ac85dc0650e | e23512edf95ea66640eab85adb8ca0c24ae6e3f7 | /tensorflow/tools/docs/parser.py | 3db164c2b5b78dbcb3c408ce89c067d33c2a2af4 | [
"Apache-2.0"
] | permissive | snuspl/tensorflow | 755ac46c3163adb119de0755ed706b1c960991fb | 212d4e9e5f4093ecb90e5b7837d4e02da7506228 | refs/heads/r1.6 | 2021-06-25T18:03:17.625202 | 2018-12-30T09:35:50 | 2018-12-30T09:35:50 | 134,066,972 | 1 | 3 | Apache-2.0 | 2020-06-10T06:12:19 | 2018-05-19T14:02:25 | C++ | UTF-8 | Python | false | false | 53,772 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import functools
import json
import os
import re
import sys
import codegen
import six
from google.protobuf.message import Message as ProtoMessage
from tensorflow.python.util import tf_inspect
# A regular expression capturing a python identifier.
IDENTIFIER_RE = '[a-zA-Z_][a-zA-Z0-9_]*'
class _Errors(object):
"""A collection of errors."""
def __init__(self):
self._errors = []
def log_all(self):
"""Log all the collected errors to the standard error."""
template = 'ERROR:\n output file name: %s\n %s\n\n'
for full_name, message in self._errors:
print(template % (full_name, message), file=sys.stderr)
def append(self, full_name, message):
"""Add an error to the collection.
Args:
full_name: The path to the file in which the error occurred.
message: The message to display with the error.
"""
self._errors.append((full_name, message))
def __len__(self):
return len(self._errors)
def __eq__(self, other):
if not isinstance(other, _Errors):
return False
return self._errors == other._errors # pylint: disable=protected-access
def documentation_path(full_name):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
Returns:
The file path to which to write the documentation for `full_name`.
"""
dirs = full_name.split('.')
return os.path.join(*dirs) + '.md'
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
# For object instances, tf_inspect.getdoc does give us the docstring of their
# type, which is not what we want. Only return the docstring if it is useful.
if (tf_inspect.isclass(py_object) or tf_inspect.ismethod(py_object) or
tf_inspect.isfunction(py_object) or tf_inspect.ismodule(py_object) or
isinstance(py_object, property)):
return tf_inspect.getdoc(py_object) or ''
else:
return ''
# A regular expression for capturing a @{symbol} reference.
SYMBOL_REFERENCE_RE = re.compile(
r"""
# Start with a literal "@{".
@\{
# Group at least 1 symbol: not "}" or "\n".
([^}\n]+)
# Followed by a closing "}"
\}
""",
flags=re.VERBOSE)
class ReferenceResolver(object):
"""Class for replacing @{...} references with Markdown links.
Attributes:
current_doc_full_name: A string (or None) indicating the name of the
document currently being processed, so errors can reference the broken
doc.
"""
def __init__(self, duplicate_of, doc_index, is_class, is_module,
py_module_names):
"""Initializes a Reference Resolver.
Args:
duplicate_of: A map from duplicate names to preferred names of API
symbols.
doc_index: A `dict` mapping symbol name strings to objects with `url`
and `title` fields. Used to resolve @{$doc} references in docstrings.
is_class: A map from full names to bool for each symbol.
is_module: A map from full names to bool for each symbol.
py_module_names: A list of string names of Python modules.
"""
self._duplicate_of = duplicate_of
self._doc_index = doc_index
self._is_class = is_class
self._is_module = is_module
self._all_names = set(is_class.keys())
self._py_module_names = py_module_names
self.current_doc_full_name = None
self._errors = _Errors()
def add_error(self, message):
self._errors.append(self.current_doc_full_name, message)
def log_errors(self):
self._errors.log_all()
def num_errors(self):
return len(self._errors)
@classmethod
def from_visitor(cls, visitor, doc_index, **kwargs):
"""A factory function for building a ReferenceResolver from a visitor.
Args:
visitor: an instance of `DocGeneratorVisitor`
doc_index: a dictionary mapping document names to references objects with
"title" and "url" fields
**kwargs: all remaining args are passed to the constructor
Returns:
an instance of `ReferenceResolver` ()
"""
is_class = {
name: tf_inspect.isclass(visitor.index[name])
for name, obj in visitor.index.items()
}
is_module = {
name: tf_inspect.ismodule(visitor.index[name])
for name, obj in visitor.index.items()
}
return cls(
duplicate_of=visitor.duplicate_of,
doc_index=doc_index,
is_class=is_class,
is_module=is_module,
**kwargs)
@classmethod
def from_json_file(cls, filepath, doc_index):
with open(filepath) as f:
json_dict = json.load(f)
return cls(doc_index=doc_index, **json_dict)
def to_json_file(self, filepath):
"""Converts the RefenceResolver to json and writes it to the specified file.
Args:
filepath: The file path to write the json to.
"""
json_dict = {}
for key, value in self.__dict__.items():
# Drop these two fields. `_doc_index` is not serializable. `_all_names` is
# generated by the constructor.
if key in ('_doc_index', '_all_names',
'_errors', 'current_doc_full_name'):
continue
# Strip off any leading underscores on field names as these are not
# recognized by the constructor.
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
json.dump(json_dict, f)
def replace_references(self, string, relative_path_to_root):
"""Replace "@{symbol}" references with links to symbol's documentation page.
This functions finds all occurrences of "@{symbol}" in `string`
and replaces them with markdown links to the documentation page
for "symbol".
`relative_path_to_root` is the relative path from the document
that contains the "@{symbol}" reference to the root of the API
documentation that is linked to. If the containing page is part of
the same API docset, `relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the
python name of the object whose documentation page the reference
lives on.
Args:
string: A string in which "@{symbol}" references should be replaced.
relative_path_to_root: The relative path from the containing document to
the root of the API documentation that is being linked to.
Returns:
`string`, with "@{symbol}" references replaced by Markdown links.
"""
def one_ref(match):
return self._one_ref(match, relative_path_to_root)
return re.sub(SYMBOL_REFERENCE_RE, one_ref, string)
def python_link(self, link_text, ref_full_name, relative_path_to_root,
code_ref=True):
"""Resolve a "@{python symbol}" reference to a Markdown link.
This will pick the canonical location for duplicate symbols. The
input to this function should already be stripped of the '@' and
'{}'. This function returns a Markdown link. If `code_ref` is
true, it is assumed that this is a code reference, so the link
text will be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
code_ref: If true (the default), put `link_text` in `...`.
Returns:
A markdown link to the documentation page of `ref_full_name`.
"""
url = self.reference_to_url(ref_full_name, relative_path_to_root)
if code_ref:
link_text = link_text.join(['<code>', '</code>'])
else:
link_text = self._link_text_to_html(link_text)
return '<a href="{}">{}</a>'.format(url, link_text)
@staticmethod
def _link_text_to_html(link_text):
code_re = '`(.*?)`'
return re.sub(code_re, r'<code>\1</code>', link_text)
def py_master_name(self, full_name):
"""Return the master name for a Python symbol name."""
return self._duplicate_of.get(full_name, full_name)
def reference_to_url(self, ref_full_name, relative_path_to_root):
"""Resolve a "@{python symbol}" reference to a relative path.
The input to this function should already be stripped of the '@'
and '{}', and its output is only the link, not the full Markdown.
If `ref_full_name` is the name of a class member, method, or property, the
link will point to the page of the containing class, and it will include the
method name as an anchor. For example, `tf.module.MyClass.my_method` will be
translated into a link to
`os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
Raises:
RuntimeError: If `ref_full_name` is not documented.
"""
master_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if master_name not in self._all_names:
message = 'Cannot make link to "%s": Not in index.' % master_name
self.add_error(message)
return 'BROKEN_LINK'
# If this is a member of a class, link to the class page with an anchor.
ref_path = None
if not (self._is_class[master_name] or self._is_module[master_name]):
idents = master_name.split('.')
if len(idents) > 1:
class_name = '.'.join(idents[:-1])
assert class_name in self._all_names
if self._is_class[class_name]:
ref_path = documentation_path(class_name) + '#%s' % idents[-1]
if not ref_path:
ref_path = documentation_path(master_name)
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root):
"""Return a link for a single "@{symbol}" reference."""
string = match.group(1)
# Look for link text after $.
dollar = string.rfind('$')
if dollar > 0: # Ignore $ in first character
link_text = string[dollar + 1:]
string = string[:dollar]
manual_link_text = True
else:
link_text = string
manual_link_text = False
# Handle different types of references.
if string.startswith('$'): # Doc reference
return self._doc_link(string, link_text, manual_link_text,
relative_path_to_root)
elif string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(string, link_text, manual_link_text,
relative_path_to_root)
else:
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(
link_text,
string,
relative_path_to_root,
code_ref=not manual_link_text)
# Error!
self.add_error('Did not understand "%s"' % match.group(0))
return 'BROKEN_LINK'
def _doc_link(self, string, link_text, manual_link_text,
relative_path_to_root):
"""Generate a link for a @{$...} reference."""
string = string[1:] # remove leading $
# If string has a #, split that part into `hash_tag`
hash_pos = string.find('#')
if hash_pos > -1:
hash_tag = string[hash_pos:]
string = string[:hash_pos]
else:
hash_tag = ''
if string in self._doc_index:
if not manual_link_text: link_text = self._doc_index[string].title
url = os.path.normpath(os.path.join(
relative_path_to_root, '../..', self._doc_index[string].url))
link_text = self._link_text_to_html(link_text)
return '<a href="{}{}">{}</a>'.format(url, hash_tag, link_text)
return self._doc_missing(string, hash_tag, link_text, manual_link_text,
relative_path_to_root)
def _doc_missing(self, string, unused_hash_tag, link_text,
unused_manual_link_text, unused_relative_path_to_root):
"""Generate an error for unrecognized @{$...} references."""
self.add_error('Unknown Document "%s"' % string)
return link_text
def _cc_link(self, string, link_text, unused_manual_link_text,
relative_path_to_root):
"""Generate a link for a @{tensorflow::...} reference."""
# TODO(josh11b): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md#const'
else:
self.add_error('C++ reference not understood: "%s"' % string)
return 'TODO_C++:%s' % string
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(os.path.join(
relative_path_to_root, '../cc', ret))
return '<a href="{}"><code>{}</code></a>'.format(cc_relative_path,
link_text)
# TODO(aselle): Collect these into a big list for all modules and functions
# and make a rosetta stone page.
def _handle_compatibility(doc):
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes"
Returns:
a tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _gen_pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Yields:
The original items, in pairs
"""
assert len(items) % 2 == 0
items = iter(items)
while True:
yield next(items), next(items)
class _FunctionDetail(
collections.namedtuple('_FunctionDetail', ['keyword', 'header', 'items'])):
"""A simple class to contain function details.
Composed of a "keyword", a possibly empty "header" string, and a possibly
empty
list of key-value pair "items".
"""
__slots__ = []
def __str__(self):
"""Return the original string that represents the function detail."""
parts = [self.keyword + ':\n']
parts.append(self.header)
for key, value in self.items:
parts.append(' ' + key + ': ')
parts.append(value)
return ''.join(parts)
def _parse_function_details(docstring):
r"""Given a docstring, split off the header and parse the function details.
For example the docstring of tf.nn.relu:
'''Computes rectified linear: `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
'''
This is parsed, and returned as:
```
('Computes rectified linear: `max(features, 0)`.\n\n', [
_FunctionDetail(
keyword='Args',
header='',
items=[
('features', ' A `Tensor`. Must be ...'),
('name', ' A name for the operation (optional).\n\n')]),
_FunctionDetail(
keyword='Returns',
header=' A `Tensor`. Has the same type as `features`.',
items=[])
])
```
Args:
docstring: The docstring to parse
Returns:
A (header, function_details) pair, where header is a string and
function_details is a (possibly empty) list of `_FunctionDetail` objects.
"""
detail_keywords = '|'.join([
'Args', 'Arguments', 'Fields', 'Returns', 'Yields', 'Raises', 'Attributes'
])
tag_re = re.compile('(?<=\n)(' + detail_keywords + '):\n', re.MULTILINE)
parts = tag_re.split(docstring)
# The first part is the main docstring
docstring = parts[0]
# Everything else alternates keyword-content
pairs = list(_gen_pairs(parts[1:]))
function_details = []
item_re = re.compile(r'^ ? ?(\*?\*?\w[\w.]*?\s*):\s', re.MULTILINE)
for keyword, content in pairs:
content = item_re.split(content)
header = content[0]
items = list(_gen_pairs(content[1:]))
function_details.append(_FunctionDetail(keyword, header, items))
return docstring, function_details
_DocstringInfo = collections.namedtuple('_DocstringInfo', [
'brief', 'docstring', 'function_details', 'compatibility'
])
def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver):
"""Parse the object's docstring and return a `_DocstringInfo`.
This function clears @@'s from the docstring, and replaces @{} references
with markdown links.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to:
```python
relative_path_to_root = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the Python API documentation. This is used to
compute links for "@{symbol}" references.
reference_resolver: An instance of ReferenceResolver.
Returns:
A _DocstringInfo object, all fields will be empty if no docstring was found.
"""
# TODO(wicke): If this is a partial, use the .func docstring and add a note.
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = reference_resolver.replace_references(
raw_docstring, relative_path_to_root)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
docstring, function_details = _parse_function_details(docstring)
return _DocstringInfo(
docstring.split('\n')[0], docstring, function_details, compatibility)
def _get_arg_spec(func):
"""Extracts signature information from a function or functools.partial object.
For functions, uses `tf_inspect.getargspec`. For `functools.partial` objects,
corrects the signature of the underlying function to take into account the
removed arguments.
Args:
func: A function whose signature to extract.
Returns:
An `ArgSpec` namedtuple `(args, varargs, keywords, defaults)`, as returned
by `tf_inspect.getargspec`.
"""
# getargspec does not work for functools.partial objects directly.
if isinstance(func, functools.partial):
argspec = tf_inspect.getargspec(func.func)
# Remove the args from the original function that have been used up.
first_default_arg = (
len(argspec.args or []) - len(argspec.defaults or []))
partial_args = len(func.args)
argspec_args = []
if argspec.args:
argspec_args = list(argspec.args[partial_args:])
argspec_defaults = list(argspec.defaults or ())
if argspec.defaults and partial_args > first_default_arg:
argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:])
first_default_arg = max(0, first_default_arg - partial_args)
for kwarg in (func.keywords or []):
if kwarg in (argspec.args or []):
i = argspec_args.index(kwarg)
argspec_args.pop(i)
if i >= first_default_arg:
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
return tf_inspect.ArgSpec(args=argspec_args,
varargs=argspec.varargs,
keywords=argspec.keywords,
defaults=tuple(argspec_defaults))
else: # Regular function or method, getargspec will work fine.
return tf_inspect.getargspec(func)
def _remove_first_line_indent(string):
indent = len(re.match(r'^\s*', string).group(0))
return '\n'.join([line[indent:] for line in string.split('\n')])
def _generate_signature(func, reverse_index):
"""Given a function, returns a list of strings representing its args.
This function produces a list of strings representing the arguments to a
python function. It uses tf_inspect.getargspec, which
does not generalize well to Python 3.x, which is more flexible in how *args
and **kwargs are handled. This is not a problem in TF, since we have to remain
compatible to Python 2.7 anyway.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
reverse_index: A map from object ids to canonical full names to use.
Returns:
A list of strings representing the argument signature of `func` as python
code.
"""
args_list = []
argspec = _get_arg_spec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
# Python documentation skips `self` when printing method signatures.
# Note we cannot test for ismethod here since unbound methods do not register
# as methods (in Python 3).
first_arg = 1 if 'self' in argspec.args[:1] else 0
# Add all args without defaults.
for arg in argspec.args[first_arg:first_arg_with_default]:
args_list.append(arg)
# Add all args with defaults.
if argspec.defaults:
try:
source = _remove_first_line_indent(tf_inspect.getsource(func))
func_ast = ast.parse(source)
ast_defaults = func_ast.body[0].args.defaults
except IOError: # If this is a builtin, getsource fails with IOError
# If we cannot get the source, assume the AST would be equal to the repr
# of the defaults.
ast_defaults = [None] * len(argspec.defaults)
for arg, default, ast_default in zip(
argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults):
if id(default) in reverse_index:
default_text = reverse_index[id(default)]
elif ast_default is not None:
default_text = codegen.to_source(ast_default)
if default_text != repr(default):
# This may be an internal name. If so, handle the ones we know about.
# TODO(wicke): This should be replaced with a lookup in the index.
# TODO(wicke): (replace first ident with tf., check if in index)
internal_names = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
if match:
lookup_text = default_text
for internal_name, public_name in six.iteritems(internal_names):
if match.group(0).startswith(internal_name):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
print('WARNING: Using default arg, failed lookup: %s, repr: %r' %
(default_text, default))
else:
default_text = lookup_text
else:
default_text = repr(default)
args_list.append('%s=%s' % (arg, default_text))
# Add *args and *kwargs.
if argspec.varargs:
args_list.append('*' + argspec.varargs)
if argspec.keywords:
args_list.append('**' + argspec.keywords)
return args_list
def _get_guides_markdown(duplicate_names, guide_index, relative_path):
all_guides = []
for name in duplicate_names:
all_guides.extend(guide_index.get(name, []))
if not all_guides: return ''
prefix = '../' * (relative_path.count('/') + 3)
links = sorted(set([guide_ref.make_md_link(prefix)
for guide_ref in all_guides]))
return 'See the guide%s: %s\n\n' % (
's' if len(links) > 1 else '', ', '.join(links))
def _get_defining_class(py_class, name):
for cls in tf_inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class _LinkInfo(
collections.namedtuple(
'_LinkInfo', ['short_name', 'full_name', 'obj', 'doc', 'url'])):
__slots__ = []
def is_link(self):
return True
class _OtherMemberInfo(
collections.namedtuple('_OtherMemberInfo',
['short_name', 'full_name', 'obj', 'doc'])):
__slots__ = []
def is_link(self):
return False
_PropertyInfo = collections.namedtuple(
'_PropertyInfo', ['short_name', 'full_name', 'obj', 'doc'])
_MethodInfo = collections.namedtuple('_MethodInfo', [
'short_name', 'full_name', 'obj', 'doc', 'signature', 'decorators'
])
class _FunctionPageInfo(object):
"""Collects docs For a function Page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._signature = None
self._decorators = []
def for_function(self):
return True
def for_class(self):
return False
def for_module(self):
return False
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def signature(self):
return self._signature
def set_signature(self, function, reverse_index):
"""Attach the function's signature.
Args:
function: The python function being documented.
reverse_index: A map from object ids in the index to full names.
"""
assert self.signature is None
self._signature = _generate_signature(function, reverse_index)
@property
def decorators(self):
return list(self._decorators)
def add_decorator(self, dec):
self._decorators.append(dec)
class _ClassPageInfo(object):
"""Collects docs for a class page.
Attributes:
full_name: The fully qualified name of the object at the master
location. Aka `master_name`. For example: `tf.nn.sigmoid`.
short_name: The last component of the `full_name`. For example: `sigmoid`.
defined_in: The path to the file where this object is defined.
aliases: The list of all fully qualified names for the locations where the
object is visible in the public api. This includes the master location.
doc: A `_DocstringInfo` object representing the object's docstring (can be
created with `_parse_md_docstring`).
guides: A markdown string, of back links pointing to the api_guides that
reference this object.
bases: A list of `_LinkInfo` objects pointing to the docs for the parent
classes.
properties: A list of `_PropertyInfo` objects documenting the class'
properties (attributes that use `@property`).
methods: A list of `_MethodInfo` objects documenting the class' methods.
classes: A list of `_LinkInfo` objects pointing to docs for any nested
classes.
other_members: A list of `_OtherMemberInfo` objects documenting any other
object's defined inside the class object (mostly enum style fields).
"""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._bases = None
self._properties = []
self._methods = []
self._classes = []
self._other_members = []
def for_function(self):
"""Returns true if this object documents a function."""
return False
def for_class(self):
"""Returns true if this object documents a class."""
return True
def for_module(self):
"""Returns true if this object documents a module."""
return False
@property
def full_name(self):
"""Returns the documented object's fully qualified name."""
return self._full_name
@property
def short_name(self):
"""Returns the documented object's short name."""
return self._full_name.split('.')[-1]
@property
def defined_in(self):
"""Returns the path to the file where the documented object is defined."""
return self._defined_in
def set_defined_in(self, defined_in):
"""Sets the `defined_in` path."""
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
"""Returns a list of all full names for the documented object."""
return self._aliases
def set_aliases(self, aliases):
"""Sets the `aliases` list.
Args:
aliases: A list of strings. Containing all the object's full names.
"""
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
"""Returns a `_DocstringInfo` created from the object's docstring."""
return self._doc
def set_doc(self, doc):
"""Sets the `doc` field.
Args:
doc: An instance of `_DocstringInfo`.
"""
assert self.doc is None
self._doc = doc
@property
def guides(self):
"""Returns a markdown string containing backlinks to relevant api_guides."""
return self._guides
def set_guides(self, guides):
"""Sets the `guides` field.
Args:
guides: A markdown string containing backlinks to all the api_guides that
link to the documented object.
"""
assert self.guides is None
self._guides = guides
@property
def bases(self):
"""Returns a list of `_LinkInfo` objects pointing to the class' parents."""
return self._bases
def _set_bases(self, relative_path, parser_config):
"""Builds the `bases` attribute, to document this class' parent-classes.
This method sets the `bases` to a list of `_LinkInfo` objects point to the
doc pages for the class' parents.
Args:
relative_path: The relative path from the doc this object describes to
the documentation root.
parser_config: An instance of `ParserConfig`.
"""
bases = []
obj = parser_config.py_name_to_object(self.full_name)
for base in obj.__bases__:
base_full_name = parser_config.reverse_index.get(id(base), None)
if base_full_name is None:
continue
base_doc = _parse_md_docstring(base, relative_path,
parser_config.reference_resolver)
base_url = parser_config.reference_resolver.reference_to_url(
base_full_name, relative_path)
link_info = _LinkInfo(short_name=base_full_name.split('.')[-1],
full_name=base_full_name, obj=base,
doc=base_doc, url=base_url)
bases.append(link_info)
self._bases = bases
@property
def properties(self):
"""Returns a list of `_PropertyInfo` describing the class' properties."""
return self._properties
def _add_property(self, short_name, full_name, obj, doc):
"""Adds a `_PropertyInfo` entry to the `properties` list.
Args:
short_name: The property's short name.
full_name: The property's fully qualified name.
obj: The property object itself
doc: The property's parsed docstring, a `_DocstringInfo`.
"""
property_info = _PropertyInfo(short_name, full_name, obj, doc)
self._properties.append(property_info)
@property
def methods(self):
"""Returns a list of `_MethodInfo` describing the class' methods."""
return self._methods
def _add_method(self, short_name, full_name, obj, doc, signature, decorators):
"""Adds a `_MethodInfo` entry to the `methods` list.
Args:
short_name: The method's short name.
full_name: The method's fully qualified name.
obj: The method object itself
doc: The method's parsed docstring, a `_DocstringInfo`
signature: The method's parsed signature (see: `_generate_signature`)
decorators: A list of strings describing the decorators that should be
mentioned on the object's docs page.
"""
method_info = _MethodInfo(short_name, full_name, obj, doc, signature,
decorators)
self._methods.append(method_info)
@property
def classes(self):
"""Returns a list of `_LinkInfo` pointing to any nested classes."""
return self._classes
def _add_class(self, short_name, full_name, obj, doc, url):
"""Adds a `_LinkInfo` for a nested class to `classes` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
url: A url pointing to where the nested class is documented.
"""
page_info = _LinkInfo(short_name, full_name, obj, doc, url)
self._classes.append(page_info)
@property
def other_members(self):
"""Returns a list of `_OtherMemberInfo` describing any other contents."""
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
"""Adds an `_OtherMemberInfo` entry to the `other_members` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
"""
other_member_info = _OtherMemberInfo(short_name, full_name, obj, doc)
self._other_members.append(other_member_info)
def collect_docs_for_class(self, py_class, parser_config):
"""Collects information necessary specifically for a class's doc page.
Mainly, this is details about the class's members.
Args:
py_class: The class object being documented
parser_config: An instance of ParserConfig.
"""
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
self._set_bases(relative_path, parser_config)
for short_name in parser_config.tree[self.full_name]:
# Remove builtin members that we never want to document.
if short_name in ['__class__', '__base__', '__weakref__', '__doc__',
'__module__', '__dict__', '__abstractmethods__',
'__slots__', '__getnewargs__']:
continue
child_name = '.'.join([self.full_name, short_name])
child = parser_config.py_name_to_object(child_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, short_name)
if (defining_class is object or
defining_class is type or defining_class is tuple or
defining_class is BaseException or defining_class is Exception or
# The following condition excludes most protobuf-defined symbols.
defining_class and defining_class.__name__ in ['CMessage', 'Message',
'MessageMeta']):
continue
# TODO(markdaoust): Add a note in child docs showing the defining class.
child_doc = _parse_md_docstring(child, relative_path,
parser_config.reference_resolver)
if isinstance(child, property):
self._add_property(short_name, child_name, child, child_doc)
elif tf_inspect.isclass(child):
if defining_class is None:
continue
url = parser_config.reference_resolver.reference_to_url(
child_name, relative_path)
self._add_class(short_name, child_name, child, child_doc, url)
elif (tf_inspect.ismethod(child) or tf_inspect.isfunction(child) or
tf_inspect.isroutine(child)):
if defining_class is None:
continue
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[short_name]
if (hasattr(original_method, '__module__') and
(original_method.__module__ or '').startswith('namedtuple')):
continue
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if not child_doc.brief.strip() and short_name in [
'__str__', '__repr__', '__hash__', '__del__', '__copy__']:
print('Skipping %s, defined in %s, no docstring.' % (child_name,
defining_class))
continue
try:
child_signature = _generate_signature(child,
parser_config.reverse_index)
except TypeError:
# If this is a (dynamically created) slot wrapper, tf_inspect will
# raise typeerror when trying to get to the code. Ignore such
# functions.
continue
child_decorators = []
try:
if isinstance(py_class.__dict__[short_name], classmethod):
child_decorators.append('classmethod')
except KeyError:
pass
try:
if isinstance(py_class.__dict__[short_name], staticmethod):
child_decorators.append('staticmethod')
except KeyError:
pass
self._add_method(short_name, child_name, child, child_doc,
child_signature, child_decorators)
else:
# Exclude members defined by protobuf that are useless
if issubclass(py_class, ProtoMessage):
if (short_name.endswith('_FIELD_NUMBER') or
short_name in ['__slots__', 'DESCRIPTOR']):
continue
# TODO(wicke): We may want to also remember the object itself.
self._add_other_member(short_name, child_name, child, child_doc)
class _ModulePageInfo(object):
"""Collects docs for a module page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._modules = []
self._classes = []
self._functions = []
self._other_members = []
def for_function(self):
return False
def for_class(self):
return False
def for_module(self):
return True
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def modules(self):
return self._modules
def _add_module(self, short_name, full_name, obj, doc, url):
self._modules.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def classes(self):
return self._classes
def _add_class(self, short_name, full_name, obj, doc, url):
self._classes.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def functions(self):
return self._functions
def _add_function(self, short_name, full_name, obj, doc, url):
self._functions.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def other_members(self):
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
self._other_members.append(
_OtherMemberInfo(short_name, full_name, obj, doc))
def collect_docs_for_module(self, parser_config):
"""Collect information necessary specifically for a module's doc page.
Mainly this is information about the members of the module.
Args:
parser_config: An instance of ParserConfig.
"""
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = parser_config.tree.get(self.full_name, [])
for name in member_names:
if name in ['__builtins__', '__doc__', '__file__',
'__name__', '__path__', '__package__']:
continue
member_full_name = self.full_name + '.' + name if self.full_name else name
member = parser_config.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path,
parser_config.reference_resolver)
url = parser_config.reference_resolver.reference_to_url(
member_full_name, relative_path)
if tf_inspect.ismodule(member):
self._add_module(name, member_full_name, member, member_doc, url)
elif tf_inspect.isclass(member):
self._add_class(name, member_full_name, member, member_doc, url)
elif tf_inspect.isfunction(member):
self._add_function(name, member_full_name, member, member_doc, url)
else:
self._add_other_member(name, member_full_name, member, member_doc)
class ParserConfig(object):
"""Stores all indexes required to parse the docs."""
def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index,
reverse_index, guide_index, base_dir):
"""Object with the common config for docs_for_object() calls.
Args:
reference_resolver: An instance of ReferenceResolver.
duplicates: A `dict` mapping fully qualified names to a set of all
aliases of this name. This is used to automatically generate a list of
all aliases for each name.
duplicate_of: A map from duplicate names to preferred names of API
symbols.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
index: A `dict` mapping full names to objects.
reverse_index: A `dict` mapping object ids to full names.
guide_index: A `dict` mapping symbol name strings to objects with a
`make_md_link()` method.
base_dir: A base path that is stripped from file locations written to the
docs.
"""
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.duplicate_of = duplicate_of
self.tree = tree
self.reverse_index = reverse_index
self.index = index
self.guide_index = guide_index
self.base_dir = base_dir
self.defined_in_prefix = 'tensorflow/'
self.code_url_prefix = (
'https://www.tensorflow.org/code/tensorflow/') # pylint: disable=line-too-long
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
return self.index[full_name]
def docs_for_object(full_name, py_object, parser_config):
"""Return a PageInfo object describing a given object from the TF API.
This function uses _parse_md_docstring to parse the docs pertaining to
`object`.
This function resolves '@{symbol}' references in the docstrings into links to
the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
Args:
full_name: The fully qualified name of the symbol to be
documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
parser_config: A ParserConfig object.
Returns:
Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo`
depending on the type of the python object being documented.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
master_name = parser_config.reference_resolver.py_master_name(full_name)
duplicate_names = parser_config.duplicates.get(master_name, [full_name])
# TODO(wicke): Once other pieces are ready, enable this also for partials.
if (tf_inspect.ismethod(py_object) or tf_inspect.isfunction(py_object) or
# Some methods in classes from extensions come in as routines.
tf_inspect.isroutine(py_object)):
page_info = _FunctionPageInfo(master_name)
page_info.set_signature(py_object, parser_config.reverse_index)
elif tf_inspect.isclass(py_object):
page_info = _ClassPageInfo(master_name)
page_info.collect_docs_for_class(py_object, parser_config)
elif tf_inspect.ismodule(py_object):
page_info = _ModulePageInfo(master_name)
page_info.collect_docs_for_module(parser_config)
else:
raise RuntimeError('Cannot make docs for object %s: %r' % (full_name,
py_object))
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(_parse_md_docstring(
py_object, relative_path, parser_config.reference_resolver))
page_info.set_aliases(duplicate_names)
page_info.set_guides(_get_guides_markdown(
duplicate_names, parser_config.guide_index, relative_path))
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
class _PythonBuiltin(object):
"""This class indicated that the object in question is a python builtin.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def is_builtin(self):
return True
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'This is an alias for a Python built-in.\n\n'
class _PythonFile(object):
"""This class indicates that the object is defined in a regular python file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return True
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _ProtoFile(object):
"""This class indicates that the object is defined in a .proto file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _GeneratedFile(object):
"""This class indicates that the object is defined in a generated python file.
Generated files should not be linked to directly.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return True
def __str__(self):
return 'Defined in `%s%s`.\n\n' % (self.path_prefix, self.path)
def _get_defined_in(py_object, parser_config):
"""Returns a description of where the passed in python object was defined.
Args:
py_object: The Python object.
parser_config: A ParserConfig object.
Returns:
Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile`
"""
# Every page gets a note about where this object is defined
# TODO(wicke): If py_object is decorated, get the decorated object instead.
# TODO(wicke): Only use decorators that support this in TF.
try:
path = os.path.relpath(path=tf_inspect.getfile(py_object),
start=parser_config.base_dir)
except TypeError: # getfile throws TypeError if py_object is a builtin.
return _PythonBuiltin()
# TODO(wicke): If this is a generated file, link to the source instead.
# TODO(wicke): Move all generated files to a generated/ directory.
# TODO(wicke): And make their source file predictable from the file name.
# In case this is compiled, point to the original
if path.endswith('.pyc'):
path = path[:-1]
# Never include links outside this code base.
if path.startswith('..'):
return None
if re.match(r'.*/gen_[^/]*\.py$', path):
return _GeneratedFile(path, parser_config)
elif re.match(r'.*_pb2\.py$', path):
# The _pb2.py files all appear right next to their defining .proto file.
return _ProtoFile(path[:-7] + '.proto', parser_config)
else:
return _PythonFile(path, parser_config)
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in six.iteritems(index):
if (tf_inspect.ismodule(py_object) or tf_inspect.isfunction(py_object) or
tf_inspect.isclass(py_object)):
# In Python 3, unbound methods are functions, so eliminate those.
if tf_inspect.isfunction(py_object):
if full_name.count('.') == 0:
parent_name = ''
else:
parent_name = full_name[:full_name.rfind('.')]
if parent_name in index and tf_inspect.isclass(index[parent_name]):
# Skip methods (=functions with class parents).
continue
symbol_links.append((
full_name, reference_resolver.python_link(full_name, full_name, '.')))
lines = ['# All symbols in %s' % library_name, '']
for _, link in sorted(symbol_links, key=lambda x: x[0]):
lines.append('* %s' % link)
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
4acd9eeaf1e4d74b16644837730b5b590e581b08 | f7dd190a665a4966db33dcc1cc461dd060ca5946 | /venv/Lib/site-packages/rx/linq/observable/fromiterable.py | d7cb23a2d561fb56e33b039fe26c8d9f34747115 | [] | no_license | Darwin939/macmeharder_back | 2cc35e2e8b39a82c8ce201e63d9f6a9954a04463 | 8fc078333a746ac7f65497e155c58415252b2d33 | refs/heads/main | 2023-02-28T12:01:23.237320 | 2021-02-02T17:37:33 | 2021-02-02T17:37:33 | 328,173,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | from rx import config
from rx.core import Observable, AnonymousObservable
from rx.concurrency import current_thread_scheduler
from rx.disposables import MultipleAssignmentDisposable
from rx.internal import extensionclassmethod
@extensionclassmethod(Observable, alias=["from_", "from_list"])
def from_iterable(cls, iterable, scheduler=None):
"""Converts an array to an observable sequence, using an optional
scheduler to enumerate the array.
1 - res = rx.Observable.from_iterable([1,2,3])
2 - res = rx.Observable.from_iterable([1,2,3], rx.Scheduler.timeout)
Keyword arguments:
:param Observable cls: Observable class
:param Scheduler scheduler: [Optional] Scheduler to run the
enumeration of the input sequence on.
:returns: The observable sequence whose elements are pulled from the
given enumerable sequence.
:rtype: Observable
"""
scheduler = scheduler or current_thread_scheduler
lock = config["concurrency"].RLock()
def subscribe(observer):
sd = MultipleAssignmentDisposable()
iterator = iter(iterable)
def action(scheduler, state=None):
try:
with lock:
item = next(iterator)
except StopIteration:
observer.on_completed()
else:
observer.on_next(item)
sd.disposable = scheduler.schedule(action)
sd.disposable = scheduler.schedule(action)
return sd
return AnonymousObservable(subscribe)
| [
"51247000+Darwin939@users.noreply.github.com"
] | 51247000+Darwin939@users.noreply.github.com |
c263915915e3ecdd4dfe80ac85a6252e793db04b | 2d23c271ec1a226bb345c23d7b2671ec021e9502 | /N-Queens II.py | 0e810b3a5a8215fc322e50aa8bbaad18095b027a | [] | no_license | chenlanlan/leetcode | 2e6aec0846ed951466bcd2c2e4596c998faca8e4 | d02478853c32c29477f53852286c429c20f1424e | refs/heads/master | 2016-09-08T05:07:46.904441 | 2015-07-12T05:41:15 | 2015-07-12T05:41:15 | 32,845,795 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | #!/usr/bin/python
class Solution:
# @param {integer} n
# @return {integer}
def __init__(self):
self.ans = 0
def queens(self, board, n, curLevel):
if curLevel == n:
self.ans += 1
return
for i in range (n):
valid = True
for j in range(curLevel):
if board[j] == i:
valid = False
break
if abs(board[j] - i) == curLevel - j:
valid = False
break
if valid:
board[curLevel] = i
self.queens(board, n, curLevel + 1)
return
def totalNQueens(self, n):
board = [-1 for i in range (n)]
self.queens(board, n, 0)
return self.ans
test = Solution()
print(test.totalNQueens(4))
| [
"silan0318@163.com"
] | silan0318@163.com |
d9fe9d52411101f054693c8637b85329e64ad2f7 | 6051b9afd2969b65a055255d8fcde0555c62fc44 | /shop/admin.py | 0b0c0d68b8d7a21f803996bbbc85e35658b943f1 | [] | no_license | nazmul199512/django-ecommerce-app | 33a66f8efa9d63fc1fbf09d7e34d8ea74ffab63c | 6e48bdb86e7f04d7741eb0d302ee909ce99d6b88 | refs/heads/master | 2022-05-09T06:26:02.349432 | 2020-04-25T18:54:48 | 2020-04-25T18:54:48 | 258,850,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.contrib import admin
from .models import Product, Contact
admin.site.register(Product),
admin.site.register(Contact)
| [
"nazmul199512@gmail.com"
] | nazmul199512@gmail.com |
e27814dfaf0caaa92047a775a4c6e4f33ed268a1 | 41de4210af23a8a8a3ca7dd090bb51faecf4a0c8 | /lib/python3.5/site-packages/statsmodels/nonparametric/tests/test_kernels.py | b113306af1f38c208b9b5187b53a94b2ceb23ab2 | [
"Python-2.0"
] | permissive | randybrown-github/ziplineMacOS | 42a0c2bfca2a54baa03d2803dc41317647811285 | eb5872c0903d653e19f259f0800fb7aecee0ee5c | refs/heads/master | 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 | Python | UTF-8 | Python | false | false | 4,996 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 14 17:23:25 2013
Author: Josef Perktold
"""
from __future__ import print_function
import os
import numpy as np
from statsmodels.sandbox.nonparametric import kernels
from numpy.testing import assert_allclose, assert_array_less
DEBUG = 0
curdir = os.path.dirname(os.path.abspath(__file__))
fname = 'results/results_kernel_regression.csv'
results = np.recfromcsv(os.path.join(curdir, fname))
y = results['accident']
x = results['service']
positive = x >= 0
x = np.log(x[positive])
y = y[positive]
xg = np.linspace(x.min(), x.max(), 40) # grid points default in Stata
#kern_name = 'gau'
#kern = kernels.Gaussian()
#kern_name = 'epan2'
#kern = kernels.Epanechnikov()
#kern_name = 'rec'
#kern = kernels.Uniform() # ours looks awful
#kern_name = 'tri'
#kern = kernels.Triangular()
#kern_name = 'cos'
#kern = kernels.Cosine() #doesn't match up, nan in Stata results ?
#kern_name = 'bi'
#kern = kernels.Biweight()
class CheckKernelMixin(object):
se_rtol = 0.7
upp_rtol = 0.1
low_rtol = 0.2
low_atol = 0.3
def test_smoothconf(self):
kern_name = self.kern_name
kern = self.kern
#fittedg = np.array([kernels.Epanechnikov().smoothconf(x, y, xi) for xi in xg])
fittedg = np.array([kern.smoothconf(x, y, xi) for xi in xg])
# attach for inspection from outside of test run
self.fittedg = fittedg
res_fitted = results['s_' + kern_name]
res_se = results['se_' + kern_name]
crit = 1.9599639845400545 # norm.isf(0.05 / 2)
# implied standard deviation from conf_int
se = (fittedg[:, 2] - fittedg[:, 1]) / crit
fitted = fittedg[:, 1]
# check both rtol & atol
assert_allclose(fitted, res_fitted, rtol=5e-7, atol=1e-20)
assert_allclose(fitted, res_fitted, rtol=0, atol=1e-6)
# TODO: check we are using a different algorithm for se
# The following are very rough checks
self.se = se
self.res_se = res_se
se_valid = np.isfinite(res_se)
# if np.any(~se_valid):
# print('nan in stata result', self.__class__.__name__)
assert_allclose(se[se_valid], res_se[se_valid], rtol=self.se_rtol, atol=0.2)
# check that most values are closer
mask = np.abs(se - res_se) > (0.2 + 0.2 * res_se)
if not hasattr(self, 'se_n_diff'):
se_n_diff = 40 * 0.125
else:
se_n_diff = self.se_n_diff
assert_array_less(mask.sum(), se_n_diff + 1) # at most 5 large diffs
if DEBUG:
# raises: RuntimeWarning: invalid value encountered in divide
print(fitted / res_fitted - 1)
print(se / res_se - 1)
# Stata only displays ci, doesn't save it
res_upp = res_fitted + crit * res_se
res_low = res_fitted - crit * res_se
self.res_fittedg = np.column_stack((res_low, res_fitted, res_upp))
if DEBUG:
print(fittedg[:, 2] / res_upp - 1)
print(fittedg[:, 2] - res_upp)
print(fittedg[:, 0] - res_low)
print(np.max(np.abs(fittedg[:, 2] / res_upp - 1)))
assert_allclose(fittedg[se_valid, 2], res_upp[se_valid],
rtol=self.upp_rtol, atol=0.2)
assert_allclose(fittedg[se_valid, 0], res_low[se_valid],
rtol=self.low_rtol, atol=self.low_atol)
#assert_allclose(fitted, res_fitted, rtol=0, atol=1e-6)
def t_est_smoothconf_data(self):
kern = self.kern
crit = 1.9599639845400545 # norm.isf(0.05 / 2)
# no reference results saved to csv yet
fitted_x = np.array([kern.smoothconf(x, y, xi) for xi in x])
if DEBUG:
print(fitted_x[:, 2] - fitted_x[:, 1]) / crit
class TestEpan(CheckKernelMixin):
kern_name = 'epan2'
kern = kernels.Epanechnikov()
class TestGau(CheckKernelMixin):
kern_name = 'gau'
kern = kernels.Gaussian()
class TestUniform(CheckKernelMixin):
kern_name = 'rec'
kern = kernels.Uniform()
se_rtol = 0.8
se_n_diff = 8
upp_rtol = 0.4
low_rtol = 0.2
low_atol = 0.8
class TestTriangular(CheckKernelMixin):
kern_name = 'tri'
kern = kernels.Triangular()
se_n_diff = 10
upp_rtol = 0.15
low_rtol = 0.3
class T_estCosine(CheckKernelMixin):
# Stata results for Cosine look strange, has nans
kern_name = 'cos'
kern = kernels.Cosine2()
class TestBiweight(CheckKernelMixin):
kern_name = 'bi'
kern = kernels.Biweight()
se_n_diff = 9
low_rtol = 0.3
if __name__ == '__main__':
tt = TestEpan()
tt = TestGau()
tt = TestBiweight()
tt.test_smoothconf()
diff_rel = tt.fittedg / tt.res_fittedg - 1
diff_abs = tt.fittedg - tt.res_fittedg
mask = np.abs(tt.fittedg - tt.res_fittedg) > (0.3 + 0.1 * tt.res_fittedg)
| [
"randybrown18@me.com"
] | randybrown18@me.com |
21668d9f7981e397a1a67b0e5002313b2d9129a6 | c5ff781994b48e4699e6a4719951c22badf76716 | /conda_package_handling/utils.py | 209b3d5d941beafdf7796735fcb2187e5c6f6681 | [
"BSD-3-Clause"
] | permissive | soapy1/conda-package-handling | 1145b6762b47bc4d1dd85dd9319954b8f7b044a0 | bf21ee30a73b45d493ba0679bb51fbced4faf824 | refs/heads/master | 2020-05-19T04:12:19.773121 | 2019-02-13T16:23:53 | 2019-02-13T16:23:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,409 | py | import contextlib
import hashlib
import os
import re
import shutil
import warnings as _warnings
from six import string_types
try:
from tempfile import TemporaryDirectory
except ImportError:
from tempfile import mkdtemp
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
# Handle mkdtemp raising an exception
name = None
_closed = False
def __init__(self, suffix="", prefix='tmp', dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False, _warnings=_warnings):
if self.name and not self._closed:
try:
shutil.rmtree(self.name)
except (TypeError, AttributeError) as ex:
if "None" not in '%s' % (ex,):
raise
shutil.rmtree(self.name)
self._closed = True
if _warn and _warnings.warn:
_warnings.warn("Implicitly cleaning up {!r}".format(self),
_warnings.ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
@contextlib.contextmanager
def tmp_chdir(dest):
curdir = os.getcwd()
try:
os.chdir(dest)
yield
finally:
os.chdir(curdir)
def ensure_list(arg):
if (isinstance(arg, string_types) or not hasattr(arg, '__iter__')):
if arg is not None:
arg = [arg]
else:
arg = []
return arg
def filter_files(files_list, prefix, filter_patterns=(r'(.*[\\\\/])?\.git[\\\\/].*',
r'(.*[\\\\/])?\.git$',
r'(.*)?\.DS_Store.*',
r'.*\.la$',
r'conda-meta.*')):
"""Remove things like the .git directory from the list of files to be copied"""
for pattern in filter_patterns:
r = re.compile(pattern)
files_list = set(files_list) - set(filter(r.match, files_list))
return [f for f in files_list if not os.path.isdir(os.path.join(prefix, f))]
def filter_info_files(files_list, prefix):
return filter_files(files_list, prefix, filter_patterns=(
'info[\\\\/]index.json',
'info[\\\\/]files',
'info[\\\\/]paths.json',
'info[\\\\/]about.json',
'info[\\\\/]has_prefix',
'info[\\\\/]hash_input_files', # legacy, not used anymore
'info[\\\\/]hash_input.json',
'info[\\\\/]run_exports.yaml', # legacy
'info[\\\\/]run_exports.json', # current
'info[\\\\/]git',
'info[\\\\/]recipe[\\\\/].*',
'info[\\\\/]recipe_log.json',
'info[\\\\/]recipe.tar',
'info[\\\\/]test[\\\\/].*',
'info[\\\\/]LICENSE.txt',
'info[\\\\/]requires',
'info[\\\\/]meta',
'info[\\\\/]platform',
'info[\\\\/]no_link',
'info[\\\\/]link.json',
'info[\\\\/]icon.png',
))
def _checksum(fd, algorithm, buffersize=65536):
hash_impl = getattr(hashlib, algorithm)
if not hash_impl:
raise ValueError("Unrecognized hash algorithm: {}".format(algorithm))
else:
hash_impl = hash_impl()
for block in iter(lambda: fd.read(buffersize), b''):
hash_impl.update(block)
return hash_impl.hexdigest()
def sha256_checksum(fd):
return _checksum(fd, 'sha256')
def md5_checksum(fd):
return _checksum(fd, 'md5')
| [
"msarahan@gmail.com"
] | msarahan@gmail.com |
93229b77609a873669a124b6ac832d479c463898 | f734b77edf3e58a80d58433244375d2a70e8471a | /pages/urls.py | fd0d178a0bec5395992d54c6ebe4bae4431601e0 | [] | no_license | akjasim/cb_dj_auth_login_logout | 6751c6cb74ec57ca391e9205d45abd15e38612ea | f6201a7a9240b05d35dd70f713a8f4dc02209e54 | refs/heads/master | 2022-07-14T08:25:12.570455 | 2020-05-12T07:08:25 | 2020-05-12T07:08:25 | 263,248,446 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.urls import path
from pages import views
urlpatterns = [
path('', views.home)
] | [
"jasimjasim786@gmail.com"
] | jasimjasim786@gmail.com |
eaed488643b5678c463000200f53b36f46b825ce | 1576481f1e922a56962fce7cdf12745ec94d29d0 | /pywemo/ouimeaux_device/insight.py | 66ea79f45fde64eb9ce078ca0e1c7643a9cc77a2 | [
"BSD-3-Clause",
"MIT"
] | permissive | tomwilkie/pywemo | c5478fe6c8b74909716e956ddd0495eae459cecc | 6e48ef0e7a5430b7f028a82f82b682b889d21829 | refs/heads/master | 2021-01-17T17:31:12.205633 | 2015-04-18T00:54:09 | 2015-04-18T00:54:09 | 28,048,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from datetime import datetime
from .switch import Switch
class Insight(Switch):
def __repr__(self):
return '<WeMo Insight "{name}">'.format(name=self.name)
@property
def insight_params(self):
params = self.insight.GetInsightParams().get('InsightParams')
(
state, # 0 if off, 1 if on, 8 if on but load is off
lastchange,
onfor, # seconds
ontoday, # seconds
ontotal, # seconds
timeperiod, # The period over which averages are calculated
_x, # This one is always 19 for me; what is it?
currentmw,
todaymw,
totalmw,
powerthreshold
) = params.split('|')
return {'state': state,
'lastchange': datetime.fromtimestamp(int(lastchange)),
'onfor': int(onfor),
'ontoday': int(ontoday),
'ontotal': int(ontotal),
'todaymw': int(float(todaymw)),
'totalmw': int(float(totalmw)),
'currentpower': int(float(currentmw))}
@property
def today_kwh(self):
return self.insight_params['todaymw'] * 1.6666667e-8
@property
def current_power(self):
"""
Returns the current power usage in mW.
"""
return self.insight_params['currentpower']
@property
def today_on_time(self):
return self.insight_params['ontoday']
@property
def on_for(self):
return self.insight_params['onfor']
@property
def last_change(self):
return self.insight_params['lastchange']
@property
def today_standby_time(self):
return self.insight_params['ontoday']
| [
"Paulus@PaulusSchoutsen.nl"
] | Paulus@PaulusSchoutsen.nl |
acb012a38a19f295e6d2b0a0acab5cde51f0a9f4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2338/60763/273941.py | dfb9cb96f0d7d1e3c021894db75f11a1b60bd05f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | T = int(input())
for i in range(T):
nx = list(map(int, ('' + input()).split(' ')))
n,x = nx[0],nx[1]
a = list(map(int, ('' + input()).split(' ')))
isContain = False
for j in range(n):
for k in range(n):
if j ==k:
continue
if a[j] + a[k] == x:
isContain = True
break
if isContain:
print("Yes")
else:
print("No") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7a98a4faf482eda836808da7fca33248732e244b | b6cac88da54791fe3b1588882ad13745d7a72a31 | /visualize/migrations/0003_bookmark_description.py | aca98a7a7cd07ebcf5658f82adfb7fccaf05b921 | [] | no_license | MidAtlanticPortal/mp-visualize | d662352a344c27c1493bcde34664a89c08453e8d | 300281f461aced50fd8a57e2dfbd5b8c75d4e911 | refs/heads/main | 2022-11-10T18:28:13.699794 | 2021-05-13T16:24:23 | 2021-05-13T16:24:23 | 44,761,880 | 1 | 1 | null | 2022-10-15T00:27:09 | 2015-10-22T17:32:28 | JavaScript | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visualize', '0002_content'),
]
operations = [
migrations.AddField(
model_name='bookmark',
name='description',
field=models.TextField(default=None, null=True, blank=True),
preserve_default=True,
),
]
| [
"ryan.d.hodges@gmail.com"
] | ryan.d.hodges@gmail.com |
339afdf3af2d30a40f3ab955bf5ce4d621a502d1 | b16e5e9d5ec0d49f8f4e85b7f9aaa4d60c4842fa | /calculator_proj/urls.py | 392ef641536dcf2d83b1069299a52a1bc4231294 | [] | no_license | leongus1/calc_proj | db9eb9546ef54f00dcc12a9fbaa23728e8c6383c | 607b16b26037fba4bf959244f4bf4662b86fd7ad | refs/heads/master | 2023-01-22T17:54:08.384028 | 2020-11-20T05:22:53 | 2020-11-20T05:22:53 | 314,307,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | """calculator_proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('calc_app.urls')),
]
| [
"leongus1@gmail.com"
] | leongus1@gmail.com |
44050fcf2efb032490eda335ee86a27171c500a6 | 152f54367e5a9f633217f5d1c9f327158d90bc85 | /sms.py | fac199f987b086c76ffb26f677a537be08f50c36 | [] | no_license | makalaaneesh/python-scripts | cf506bc62cb01f56367a510a81132ec5568bca98 | 40750a0a24db8c022b0fe1b16992d57817b764c2 | refs/heads/master | 2021-01-18T21:35:30.682773 | 2016-04-29T19:17:05 | 2016-04-29T19:17:05 | 29,910,777 | 0 | 2 | null | 2015-11-12T16:00:48 | 2015-01-27T11:17:06 | Python | UTF-8 | Python | false | false | 397 | py | headers = {
"X-Mashape-Key": "KEY"
}
import requests
message = "Double check. i could spam you you know"
semi_colon = '%3B'
phone = 'xx;xx'
username = 'phonenumber'
password = 'password' #1450
response = requests.get("https://freesms8.p.mashape.com/index.php?msg="+message+"&phone="+phone+"&pwd="+password+"&uid="+username, headers=headers)
print response.status_code
print response.content
| [
"makalaaneesh@yahoo.com"
] | makalaaneesh@yahoo.com |
0ae02dca88b48b82467eec4cc534e533f872f722 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/terminado/uimodule.py | 584259c87499503331c62bd650f9b84fe6358e97 | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 1,012 | py | """A Tornado UI module for a terminal backed by terminado.
See the Tornado docs for information on UI modules:
http://www.tornadoweb.org/en/stable/guide/templates.html#ui-modules
"""
# Copyright (c) Jupyter Development Team
# Copyright (c) 2014, Ramalingam Saravanan <sarava@sarava.net>
# Distributed under the terms of the Simplified BSD License.
import os.path
import tornado.web
class Terminal(tornado.web.UIModule):
def render(self, ws_url, cols=80, rows=25):
return ('<div class="terminado-container" '
'data-ws-url="{ws_url}" '
'data-rows="{rows}" data-cols="{cols}"/>').format(
ws_url=ws_url, rows=rows, cols=cols)
def javascript_files(self):
# TODO: Can we calculate these dynamically?
return ['/xstatic/termjs/term.js', '/static/terminado.js']
def embedded_javascript(self):
file = os.path.join(os.path.dirname(__file__), 'uimod_embed.js')
with open(file) as f:
return f.read()
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
8d3cdc75dc12985fae9f51ea8f9279df7717d4ba | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/learnedlspindex.py | 3c6d7aba9e95f03e6a73232b9e12d28bd68c0c92 | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 2,468 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedLspIndex(Base):
"""The LearnedLspIndex class encapsulates a required learnedLspIndex node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedLspIndex property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'learnedLspIndex'
def __init__(self, parent):
super(LearnedLspIndex, self).__init__(parent)
@property
def Count(self):
"""total number of values
Returns:
number
"""
return self._get_attribute('count')
def FetchAndUpdateConfigFromCloud(self, Mode):
"""Executes the fetchAndUpdateConfigFromCloud operation on the server.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/globals?deepchild=*|/api/v1/sessions/1/ixnetwork/topology?deepchild=*)): The method internally sets Arg1 to the current href for this instance
Mode (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('FetchAndUpdateConfigFromCloud', payload=locals(), response_object=None)
| [
"hubert.gee@keysight.com"
] | hubert.gee@keysight.com |
6f403e6bceb52c0706ae8c96e1056976c974d6cc | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/scaleform/daapi/view/lobby/MinimapLobby.py | 32508605dd055d92317f7e646f0d28a40723876c | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 3,856 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/MinimapLobby.py
import ArenaType
from gui.Scaleform.daapi.view.meta.MinimapPresentationMeta import MinimapPresentationMeta
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from helpers import dependency
from skeletons.account_helpers.settings_core import ISettingsCore
class MinimapLobby(MinimapPresentationMeta):
settingsCore = dependency.descriptor(ISettingsCore)
def __init__(self):
super(MinimapLobby, self).__init__()
self.__playerTeam = 1
self.__arenaTypeID = None
self.__cfg = dict()
self.__minimapSize = 300
return
def _populate(self):
super(MinimapLobby, self)._populate()
self.settingsCore.onSettingsChanged += self.onSettingsChanging
def _dispose(self):
self.settingsCore.onSettingsChanged -= self.onSettingsChanging
super(MinimapLobby, self)._dispose()
def onSettingsChanging(self, diff):
if 'isColorBlind' in diff:
self.as_updatePointsS()
def setMap(self, arenaID):
self.setArena(arenaID)
def setMinimapData(self, arenaID, playerTeam, size):
self.__minimapSize = size
self.__playerTeam = playerTeam
self.setArena(arenaID)
def setPlayerTeam(self, playerTeam):
self.__playerTeam = playerTeam
def swapTeams(self, team):
doBuild = False
if not team:
team = 1
if team is not self.__playerTeam:
self.__playerTeam = team
doBuild = True
if doBuild and self.__arenaTypeID is not None:
self.build()
return
def setArena(self, arenaTypeID):
self.__arenaTypeID = int(arenaTypeID)
arenaType = ArenaType.g_cache[self.__arenaTypeID]
cfg = {'texture': RES_ICONS.getMapPath(arenaType.geometryName),
'size': arenaType.boundingBox,
'teamBasePositions': arenaType.teamBasePositions,
'teamSpawnPoints': arenaType.teamSpawnPoints,
'controlPoints': arenaType.controlPoints}
self.setConfig(cfg)
def setEmpty(self):
self.as_clearS()
path = RES_ICONS.getMapPath('question')
self.as_changeMapS(path)
def setConfig(self, cfg):
self.__cfg = cfg
self.build()
def build(self):
self.as_clearS()
self.as_changeMapS(self.__cfg['texture'])
bottomLeft, upperRight = self.__cfg['size']
mapWidthMult, mapHeightMult = (upperRight - bottomLeft) / self.__minimapSize
offset = (upperRight + bottomLeft) * 0.5
def _normalizePoint(posX, posY):
return ((posX - offset.x) / mapWidthMult, (posY - offset.y) / mapHeightMult)
for team, teamSpawnPoints in enumerate(self.__cfg['teamSpawnPoints'], 1):
for spawn, spawnPoint in enumerate(teamSpawnPoints, 1):
posX, posY = _normalizePoint(spawnPoint[0], spawnPoint[1])
self.as_addPointS(posX, posY, 'spawn', 'blue' if team == self.__playerTeam else 'red', spawn + 1 if len(teamSpawnPoints) > 1 else 1)
for team, teamBasePoints in enumerate(self.__cfg['teamBasePositions'], 1):
for baseNumber, basePoint in enumerate(teamBasePoints.values(), 2):
posX, posY = _normalizePoint(basePoint[0], basePoint[1])
self.as_addPointS(posX, posY, 'base', 'blue' if team == self.__playerTeam else 'red', baseNumber if len(teamBasePoints) > 1 else 1)
if self.__cfg['controlPoints']:
for index, controlPoint in enumerate(self.__cfg['controlPoints'], 2):
posX, posY = _normalizePoint(controlPoint[0], controlPoint[1])
self.as_addPointS(posX, posY, 'control', 'empty', index if len(self.__cfg['controlPoints']) > 1 else 1)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
90c9232164970d90d692cc1657a3835b8b64f6ad | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/93666/snippet.py | c0127e80358508837ecd626a3fa2379bf4e75d2d | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 702 | py | def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ','_') # I don't like spaces in filenames.
return filename | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
5e4805054d945c675d646fdf4e1f6df585066afe | 30fcd4c53519cd17497106cbe963f4f28d5c4edd | /shopping_list.py | d52b1975c3a343ac0640ff59620aa3bf1aba9b94 | [] | no_license | aymeet/shopping_list | 1e0300a85cf7443662c1829f5a73ae01b739a99b | c6d2252d1211621e8e056d51d6cbb05abe0a1a40 | refs/heads/master | 2021-01-20T04:09:46.330549 | 2017-04-28T00:56:49 | 2017-04-28T00:56:49 | 89,649,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,612 | py | """
Shopping List (Hackbright Prep dictionary code challenge)
Create a dictionary of shopping lists, and be able to
* add / remove lists
* add / remove items from lists
"""
def add_new_shopping_list(lists_by_name, new_list_name):
"""Add new shopping list to dict: key is list name, value is empty list
Will not allow duplicate shopping list names to be added. This method
is case-sensitive.
Arguments:
lists_by_name: dict of shopping lists
new_list_name: string name of the new list to add
Returns:
None
"""
if lists_by_name.get(new_list_name) is None:
lists_by_name[new_list_name] = []
print lists_by_name
def remove_shopping_list(lists_by_name, list_name_to_remove):
"""Remove shopping list from shopping lists dict.
If the shopping list name does not exist in the dictionary, print a message
and do nothing. This method is case-sensitive.
Arguments:
lists_by_name: dict of shopping lists
list_name_to_remove: string name of the list to remove
Returns:
None
"""
if lists_by_name.get(list_name_to_remove) is None:
print list_name_to_remove + " does not exist"
else:
del lists_by_name[list_name_to_remove]
def add_to_shopping_list(lists_by_name, list_name, items):
"""Add given items to shopping list.
Arguments:
lists_by_name: dict of shopping lists
list_name: string name of a shopping list
items: list of items to add to the list
Returns:
None
"""
# your code here!
pass
def remove_from_shopping_list(lists_by_name, list_name, items):
"""Remove given items from shopping list.
If an item doesn't exist in the list, print an error, and continue to
attempt to remove the other items.
Arguments:
lists_by_name: dict of shopping lists
list_name: string name of a shopping list
items: list of items to remove from the list
Returns:
None
"""
# your code here!
pass
def display_shopping_list(lists_by_name, list_name):
"""Print the contents of a shopping list.
If the list is missing, return a string message saying so. This function is
case sensitive.
Arguments:
lists_by_name: dict of shopping lists
list_name: string name of a shopping list
Returns:
None
"""
for item in lists_by_name:
print item
def show_all_lists(lists_by_name):
"""Given a dictionary of shopping lists, print out each list.
Arguments:
lists_by_name: dict of shopping lists
Returns:
None
"""
for item in lists_by_name:
print item
def parse_string_of_items(items_string):
"""Split input string on commas and return the list of items.
Trim leading and trailing whitespace.
Arguments:
items_string: a string with 0 or more commas separating items
Returns:
list of strings
"""
# list to return, starts out empty
items = []
# split the items_string on commas into a temporary list
temp_items = items_string.split(',')
# iterate through the temporary list and strip white space from each item
# before appending it to the list to be returned
for item in temp_items:
items.append(item.strip())
return items
def edit_shopping_list(lists_by_name, list_name, add_or_remove):
"""Get items from user and add / remove them from the shopping list
Arguments:
lists_by_name: dict of shopping lists
list_name: string name of a shopping list
add_or_remove: string that is either 'add' or 'remove', indicating whether
the collected items should be added to or removed from the list
Returns:
None
"""
# if so, get items to add to the list
input_str = raw_input('Please enter items separated by commas: ')
# list-ify the input string
items = parse_string_of_items(input_str)
# add or remove, according to the argument
if add_or_remove == 'add':
add_to_shopping_list(shopping_lists_by_name, list_name, items)
else:
remove_from_shopping_list(shopping_lists_by_name, list_name, items)
def get_menu_choice():
"""Print a menu and asks the user to make a choice.
Arguments:
None
Returns:
int: the user's menu choice
"""
print '\n 0 - Main Menu'
print ' 1 - Show all lists.'
print ' 2 - Show a specific list.'
print ' 3 - Add a new shopping list.'
print ' 4 - Add item(s) to a shopping list.'
print ' 5 - Remove items(s) from a shopping list.'
print ' 6 - Remove a list by name.'
print ' 7 - Exit the program.\n'
choice = int(raw_input('Choose from the menu options: '))
return choice
def execute_repl(shopping_lists_by_name):
"""Execute the repl loop for the control structure of the program.
(REPL stands for Read - Eval - Print Loop. For more info:
https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop)
Arguments:
shopping_lists_by_name: dict of shopping lists
Returns:
None
"""
while True:
# get the next choice from the user
choice = get_menu_choice()
if choice == 0:
# main menu
continue # continue goes to the next loop iteration
elif choice == 1:
# show all lists
# call function to display all lists
show_all_lists(shopping_lists_by_name)
elif choice == 3:
# Add a new shopping list
# get name of list and add it
list_name = raw_input('Enter the name for your list: ')
add_new_shopping_list(shopping_lists_by_name, list_name)
# get items for list and add them
input_str = raw_input('Please enter items separated by commas: ')
items = parse_string_of_items(input_str)
shopping_lists_by_name[list_name] = items
elif choice == 7:
# quit
break
else:
# all of the remaning choices require an existing list. First, run
# code to get the list name from the user and verify it exists in
# the dict
# determine which list
list_name = raw_input('Which list would you like to see? ')
# test to see if the list is in the shopping list dict
if list_name not in shopping_lists_by_name:
# no list by this name :-(
print 'There is no {} list.'.format(list_name)
continue
# if the code reaches this point, it means the list exists in the
# dictionary, so proceed according to which choice was chosen
if choice == 2:
# show a specific list
display_shopping_list(shopping_lists_by_name, list_name)
elif choice == 4:
# Add item(s) to a shopping list
# add items to the shopping list
edit_shopping_list(shopping_lists_by_name, list_name, 'add')
elif choice == 5:
# Remove an item from a shopping list
# add items to the shopping list
edit_shopping_list(shopping_lists_by_name, list_name, 'remove')
elif choice == 6:
# remove list
remove_shopping_list(shopping_lists_by_name, list_name)
# Main code here
shopping_lists_by_name = {} # key is list name, value is [shopping list]
execute_repl(shopping_lists_by_name)
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
913e29f0f26471e63160c9235f604a5c3154198b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_141/ch18_2020_09_09_19_35_24_611574.py | 7b2fd62536d60fa3e0c59e54e26d481a70df8eba | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | def verifica_idade(anos):
if anos >= 18:
print("Liberado BRASIL")
elif anos >= 21:
print("Liberado EUA e BRASIL")
else:
print("Não está liberado") | [
"you@example.com"
] | you@example.com |
508d57aee0b7a1f6842f27cff28624defc925da6 | d9b679a0e49339f73334b72b07da3932611029a3 | /SSAFY/C9_TIL/django/instagram/instagram/urls.py | a5579b21f2c960053a51d1aaeecdcc347c5dc0ef | [] | no_license | ryanlee5646/TIL | 1c07f646dacb65b506bb1f848246a0dbb1c656e6 | 0a1f987beb11ca0222da8cd26c3d09dc342e17c7 | refs/heads/master | 2023-01-09T20:52:10.128853 | 2021-02-18T16:27:31 | 2021-02-18T16:27:31 | 162,209,652 | 1 | 1 | null | 2023-01-04T18:06:44 | 2018-12-18T00:57:19 | Python | UTF-8 | Python | false | false | 1,119 | py | """instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from accounts import views as accounts_views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('posts/', include('posts.urls')),
path('<str:username>/', accounts_views.people, name='people' ),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"ryanlee5646@gmail.com"
] | ryanlee5646@gmail.com |
5f0c1da84c218d3c1464c3f4c1c30f0baff4e48a | 06119696c70c8e5458102f380d1842b3e3717b13 | /wassh/bin/lint | 6f621bc57a7ea6309db65fdbbd07aa1fc8691e09 | [
"BSD-2-Clause"
] | permissive | lproux/chOSapp | f166ad701857ce0f5b1bcafe97e778ff310ebdcc | 9c7e724c23bfca141a68c9a7d3639075ea908d52 | refs/heads/main | 2023-05-05T09:08:59.607603 | 2021-05-14T07:45:40 | 2021-05-14T17:31:07 | 367,856,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | #!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Lint our source files."""
import os
from pathlib import Path
import sys
import wassh
import libdot
JS_DIR = wassh.DIR / 'js'
WJB_DIR = wassh.DIR.parent / 'wasi-js-bindings'
def _get_default_paths(basedir):
"""Get list of paths to lint by default."""
most_files = sorted(x for x in libdot.lint.get_known_sources(basedir)
if x.suffix not in {'.js'})
# All files in js/*.js.
# Use relpath for nicer default output.
js_files = sorted(JS_DIR.glob('**/*.js'))
return [os.path.relpath(x) for x in most_files + js_files]
def main(argv):
"""The main func!"""
# We need to use an absolute path with the module root to workaround
# https://github.com/google/closure-compiler/issues/3580
for i, arg in enumerate(argv):
path = Path(arg)
if arg and arg[0] != '-' and path.exists():
argv[i] = os.path.relpath(path.resolve(), wassh.DIR)
os.chdir(wassh.DIR)
wasi_externs = WJB_DIR / 'externs'
closure_args = list(libdot.lint.DEFAULT_CLOSURE_ARGS) + [
'--js_module_root', JS_DIR,
# TODO(vapier): We want to turn this on at some point.
'--jscomp_off=strictMissingProperties',
# Let closure compiler itself do the expansion.
wassh.DIR.parent / 'wasi-js-bindings' / 'js' / '**.js',
] + [f'--externs={os.path.relpath(x)}'
for x in sorted(wasi_externs.glob('*.js'))]
return libdot.lint.main(argv, basedir=wassh.DIR,
get_default_paths=_get_default_paths,
closure_args=closure_args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"vapier@chromium.org"
] | vapier@chromium.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.