text stringlengths 8 6.05M |
|---|
from typing import Optional, AsyncGenerator
from urllib.parse import quote as encode_path_name_for_url
from logging import getLogger
import xmltodict
from asgi_webdav.constants import (
DAV_METHODS,
DAVPath,
DAVLockInfo,
DAVPropertyIdentity,
DAVProperty,
)
from asgi_webdav.response import DAVResponse
from asgi_webdav.helpers import (
receive_all_data_in_one_call,
)
from asgi_webdav.request import DAVRequest
from asgi_webdav.lock import DAVLock
logger = getLogger(__name__)
class DAVProvider:
dist_prefix: DAVPath
read_only: bool
def __init__(self, dist_prefix: DAVPath, read_only: bool = False):
self.dist_prefix = dist_prefix
self.read_only = read_only # TODO
self.dav_lock = DAVLock()
def __repr__(self):
raise NotImplementedError
@staticmethod
def _create_ns_key_with_id(ns_map: dict[str, str], ns: str, key: str) -> str:
if len(ns) == 0:
# no namespace
return key
ns_id = ns_map.setdefault(ns, "ns{}".format(len(ns_map) + 1))
return "{}:{}".format(ns_id, key)
@staticmethod
def _create_data_lock_discovery(lock_info: DAVLockInfo) -> dict:
return {
"D:activelock": {
"D:locktype": {"D:write": None},
"D:lockscope": {"D:{}".format(lock_info.scope.name): None},
"D:depth": lock_info.depth.value,
"D:owner": lock_info.owner,
"D:timeout": "Second-{}".format(lock_info.timeout),
"D:locktoken": {
"D:href": "opaquelocktoken:{}".format(lock_info.token),
},
},
}
"""
https://tools.ietf.org/html/rfc4918#page-35
9.1.1. PROPFIND Status Codes
This section, as with similar sections for other methods, provides
some guidance on error codes and preconditions or postconditions
(defined in Section 16) that might be particularly useful with
PROPFIND.
403 Forbidden - A server MAY reject PROPFIND requests on collections
with depth header of "Infinity", in which case it SHOULD use this
error with the precondition code 'propfind-finite-depth' inside the
error body.
9.1.2. Status Codes for Use in 'propstat' Element
In PROPFIND responses, information about individual properties_list is
returned inside 'propstat' elements (see Section 14.22), each
containing an individual 'status' element containing information
about the properties_list appearing in it. The list below summarizes the
most common status codes used inside 'propstat'; however, clients
should be prepared to handle other 2/3/4/5xx series status codes as
well.
200 OK - A property exists and/or its value is successfully returned.
401 Unauthorized - The property cannot be viewed without appropriate
authorization.
403 Forbidden - The property cannot be viewed regardless of
authentication.
404 Not Found - The property does not exist.
https://tools.ietf.org/html/rfc4918#page-78
11.1. 207 Multi-Status
The 207 (Multi-Status) status code provides status for multiple
independent operations (see Section 13 for more information).
"""
async def do_propfind(self, request: DAVRequest) -> dict[DAVPath, DAVProperty]:
return await self._do_propfind(request)
async def _do_propfind(self, request: DAVRequest) -> dict[DAVPath, DAVProperty]:
raise NotImplementedError
async def create_propfind_response(
self, request: DAVRequest, dav_properties: dict[DAVPath, DAVProperty]
) -> bytes:
response = list()
ns_map = dict()
for dav_property in dav_properties.values():
href_path = dav_property.href_path
found_property = dict()
# basic data
if request.propfind_fetch_all_property:
basic_keys = dav_property.basic_data.keys()
else:
basic_keys = request.propfind_basic_keys
for k in basic_keys:
if k in dav_property.basic_data:
found_property["D:" + k] = dav_property.basic_data[k]
if dav_property.is_collection:
found_property["D:resourcetype"] = {"D:collection": None}
else:
found_property["D:resourcetype"] = None
# extra data
for (ns, key), value in dav_property.extra_data.items():
ns_id = self._create_ns_key_with_id(ns_map, ns, key)
found_property[ns_id] = value
# lock
lock_info = await self.dav_lock.get_info_by_path(href_path)
if len(lock_info) > 0:
# TODO!!!! multi-token
lock_discovery = self._create_data_lock_discovery(lock_info[0])
else:
lock_discovery = None
response_item = {
"D:href": encode_path_name_for_url(href_path.raw),
"D:propstat": [
{
"D:prop": found_property,
# 'D:supportedlock': {
# 'D:lockentry': [
# {
# 'D:lockscope': {'D:exclusive': None},
# 'D:locktype': {'D:write': None}
# },
# {
# 'D:lockscope': {'D:shared': None},
# 'D:locktype': {'D:write': None}
# }
# ]
# },
"D:lockdiscovery": lock_discovery,
"D:status": "HTTP/1.1 200 OK",
},
],
}
# extra not found
if len(dav_property.extra_not_found) > 0:
not_found_property = dict()
for ns, key in dav_property.extra_not_found:
ns_id = self._create_ns_key_with_id(ns_map, ns, key)
not_found_property[ns_id] = None
not_found_property = {
"D:prop": not_found_property,
"D:status": "HTTP/1.1 404 Not Found",
}
response_item["D:propstat"].append(not_found_property)
# namespace
# TODO ns0 => DAV:
for k, v in ns_map.items():
response_item["@xmlns:{}".format(v)] = k
response.append(response_item)
data = {
"D:multistatus": {
"@xmlns:D": "DAV:",
"D:response": response,
}
}
return (
xmltodict.unparse(data, short_empty_elements=True)
.replace("\n", "")
.encode("utf-8")
)
"""
https://tools.ietf.org/html/rfc4918#page-44
9.2. PROPPATCH Method
9.2.1. Status Codes for Use in 'propstat' Element
In PROPPATCH responses, information about individual properties_list is
returned inside 'propstat' elements (see Section 14.22), each
containing an individual 'status' element containing information
about the properties_list appearing in it. The list below summarizes the
most common status codes used inside 'propstat'; however, clients
should be prepared to handle other 2/3/4/5xx series status codes as
well.
200 (OK) - The property set or change succeeded. Note that if this
appears for one property, it appears for every property in the
response, due to the atomicity of PROPPATCH.
403 (Forbidden) - The client, for reasons the server chooses not to
specify, cannot alter one of the properties_list.
403 (Forbidden): The client has attempted to set a protected
property, such as DAV:getetag. If returning this error, the server
SHOULD use the precondition code 'cannot-modify-protected-property'
inside the response body.
409 (Conflict) - The client has provided a value whose semantics are
not appropriate for the property.
424 (Failed Dependency) - The property change could not be made
because of another property change that failed.
507 (Insufficient Storage) - The server did not have sufficient space
to record the property.
"""
async def do_proppatch(self, request: DAVRequest) -> DAVResponse:
if not request.body_is_parsed_success:
return DAVResponse(400)
if await self.dav_lock.is_locking(request.src_path, request.lock_token):
return DAVResponse(423)
http_status = await self._do_proppatch(request)
if http_status == 207:
sucess_ids = [x[0] for x in request.proppatch_entries]
message = self._create_proppatch_response(request, sucess_ids)
else:
message = b""
return DAVResponse(http_status, message=message)
async def _do_proppatch(self, request: DAVRequest) -> int:
raise NotImplementedError
@staticmethod
def _create_proppatch_response(
request: DAVRequest, sucess_ids: list[DAVPropertyIdentity]
) -> bytes:
data = dict()
for ns, key in sucess_ids:
# data['ns1:{}'.format(item)] = None
data["D:{}".format(key)] = None # TODO namespace
data = {
"D:multistatus": {
"@xmlns:D": "DAV:",
"D:response": {
"D:href": request.src_path,
"D:propstat": {
"D:prop": data,
"D:status": "HTTP/1.1 200 OK",
},
},
}
}
return (
xmltodict.unparse(data, short_empty_elements=True)
.replace("\n", "")
.encode("utf-8")
)
"""
https://tools.ietf.org/html/rfc4918#page-46
9.3.1. MKCOL Status Codes
In addition to the general status codes possible, the following
status codes have specific applicability to MKCOL:
201 (Created) - The collection was created.
403 (Forbidden) - This indicates at least one of two conditions: 1)
the server does not allow the creation of collections at the given
location in its URL namespace, or 2) the parent collection of the
Request-URI exists but cannot accept members.
405 (Method Not Allowed) - MKCOL can only be executed on an unmapped
URL.
409 (Conflict) - A collection cannot be made at the Request-URI until
one or more intermediate collections have been created. The server
MUST NOT create those intermediate collections automatically.
415 (Unsupported Media Type) - The server does not support the
request body type (although bodies are legal on MKCOL requests, since
this specification doesn't define any, the server is likely not to
support any given body type).
507 (Insufficient Storage) - The resource does not have sufficient
space to record the state of the resource after the execution of this
method.
"""
async def do_mkcol(self, request: DAVRequest) -> DAVResponse:
request_data = await receive_all_data_in_one_call(request.receive)
if len(request_data) > 0:
# https://tools.ietf.org/html/rfc2518#page-33
# https://tools.ietf.org/html/rfc4918#page-46
return DAVResponse(415)
http_status = await self._do_mkcol(request)
return DAVResponse(http_status)
async def _do_mkcol(self, request: DAVRequest) -> int:
raise NotImplementedError
"""
https://tools.ietf.org/html/rfc4918#page-48
9.4. GET, HEAD for Collections
The semantics of GET are unchanged when applied to a collection,
since GET is defined as, "retrieve whatever information (in the form
of an entity) is identified by the Request-URI" [RFC2616]. GET, when
applied to a collection, may return the contents of an "index.html"
resource, a human-readable view of the contents of the collection, or
something else altogether. Hence, it is possible that the result of
a GET on a collection will bear no correlation to the membership of
the collection.
Similarly, since the definition of HEAD is a GET without a response
message body, the semantics of HEAD are unmodified when applied to
collection resources.
https://tools.ietf.org/html/rfc2616#page-53
9.3 GET
The GET method means retrieve whatever information (in the form of an
entity) is identified by the Request-URI. If the Request-URI refers
to a data-producing process, it is the produced data which shall be
returned as the entity in the response and not the source text of the
process, unless that text happens to be the output of the process.
The semantics of the GET method change to a "conditional GET" if the
request message includes an If-Modified-Since, If-Unmodified-Since,
If-Match, If-None-Match, or If-Range header field. A conditional GET
method requests that the entity be transferred only under the
circumstances described by the conditional header field(s). The
conditional GET method is intended to reduce unnecessary network
usage by allowing cached entities to be refreshed without requiring
multiple requests or transferring data already held by the client.
The semantics of the GET method change to a "partial GET" if the
request message includes a Range header field. A partial GET requests
that only part of the entity be transferred, as described in section
14.35. The partial GET method is intended to reduce unnecessary
network usage by allowing partially-retrieved entities to be
completed without transferring data already held by the client.
The response to a GET request is cacheable if and only if it meets
the requirements for HTTP caching described in section 13.
See section 15.1.3 for security considerations when used for forms.
"""
async def do_get(self, request: DAVRequest) -> DAVResponse:
http_status, basic_property, data = await self._do_get(request)
if http_status != 200:
# TODO bug
return DAVResponse(http_status)
headers = self._create_get_head_response_headers(basic_property)
return DAVResponse(200, headers=headers, data=data)
async def _do_get(
self, request: DAVRequest
) -> tuple[int, dict[str, str], Optional[AsyncGenerator]]:
raise NotImplementedError
async def do_head(self, request: DAVRequest) -> DAVResponse:
http_status, basic_property = await self._do_head(request)
if http_status == 200:
headers = self._create_get_head_response_headers(basic_property)
response = DAVResponse(status=http_status, headers=headers)
else:
response = DAVResponse(404) # TODO
return response
async def _do_head(self, request: DAVRequest) -> tuple[int, dict[str, str]]:
raise NotImplementedError
@staticmethod
def _create_get_head_response_headers(
basic_property: dict[str, str]
) -> dict[bytes, bytes]:
headers = {
b"ETag": basic_property.get("getetag").encode("utf-8"),
b"Last-Modified": basic_property.get("getlastmodified").encode("utf-8"),
b"Content-Type": basic_property.get("getcontenttype").encode("utf-8"),
b"Content-Length": basic_property.get("getcontentlength").encode("utf-8"),
b"Content-Encodings": basic_property.get("encoding").encode("utf-8"),
b"Accept-Ranges": b"bytes",
}
return headers
"""
https://tools.ietf.org/html/rfc4918#page-48
9.6. DELETE Requirements
DELETE is defined in [RFC2616], Section 9.7, to "delete the resource
identified by the Request-URI". However, WebDAV changes some DELETE
handling requirements.
A server processing a successful DELETE request:
MUST destroy locks rooted on the deleted resource
MUST remove the mapping from the Request-URI to any resource.
Thus, after a successful DELETE operation (and in the absence of
other actions), a subsequent GET/HEAD/PROPFIND request to the target
Request-URI MUST return 404 (Not Found).
9.6.1. DELETE for Collections
The DELETE method on a collection MUST act as if a "Depth: infinity"
header was used on it. A client MUST NOT submit a Depth header with
a DELETE on a collection with any value but infinity.
DELETE instructs that the collection specified in the Request-URI and
all resources identified by its internal member URLs are to be
deleted.
If any resource identified by a member URL cannot be deleted, then
all of the member's ancestors MUST NOT be deleted, so as to maintain
URL namespace consistency.
Any headers included with DELETE MUST be applied in processing every
resource to be deleted.
When the DELETE method has completed processing, it MUST result in a
consistent URL namespace.
If an error occurs deleting a member resource (a resource other than
the resource identified in the Request-URI), then the response can be
a 207 (Multi-Status). Multi-Status is used here to indicate which
internal resources could NOT be deleted, including an error code,
which should help the client understand which resources caused the
failure. For example, the Multi-Status body could include a response
with status 423 (Locked) if an internal resource was locked.
The server MAY return a 4xx status response, rather than a 207, if
the request failed completely.
424 (Failed Dependency) status codes SHOULD NOT be in the 207 (Multi-
Status) response for DELETE. They can be safely left out because the
client will know that the ancestors of a resource could not be
deleted when the client receives an error for the ancestor's progeny.
Additionally, 204 (No Content) errors SHOULD NOT be returned in the
207 (Multi-Status). The reason for this prohibition is that 204 (No
Content) is the default success code.
https://tools.ietf.org/html/rfc2616#section-9.7
9.7 DELETE
The DELETE method requests that the origin server delete the resource
identified by the Request-URI. This method MAY be overridden by human
intervention (or other means) on the origin server. The client cannot
be guaranteed that the operation has been carried out, even if the
status code returned from the origin server indicates that the action
has been completed successfully. However, the server SHOULD NOT
indicate success unless, at the time the response is given, it
intends to delete the resource or move it to an inaccessible
location.
A successful response SHOULD be 200 (OK) if the response includes an
entity describing the status, 202 (Accepted) if the action has not
yet been enacted, or 204 (No Content) if the action has been enacted
but the response does not include an entity.
If the request passes through a cache and the Request-URI identifies
one or more currently cached entities, those entries SHOULD be
treated as stale. Responses to this method are not cacheable.
"""
async def do_delete(self, request: DAVRequest) -> DAVResponse:
if await self.dav_lock.is_locking(request.src_path, request.lock_token):
return DAVResponse(423)
http_status = await self._do_delete(request)
if http_status == 204:
await self.dav_lock.release(request.lock_token)
return DAVResponse(http_status)
async def _do_delete(self, request: DAVRequest) -> int:
raise NotImplementedError
"""
https://tools.ietf.org/html/rfc4918#page-50
9.7. PUT Requirements
9.7.1. PUT for Non-Collection Resources
A PUT performed on an existing resource replaces the GET response
entity of the resource. Properties defined on the resource may be
recomputed during PUT processing but are not otherwise affected. For
example, if a server recognizes the content type of the request body,
it may be able to automatically extract information that could be
profitably exposed as properties_list.
A PUT that would result in the creation of a resource without an
appropriately scoped parent collection MUST fail with a 409
(Conflict).
A PUT request allows a client to indicate what media type an entity
body has, and whether it should change if overwritten. Thus, a
client SHOULD provide a Content-Type for a new resource if any is
known. If the client does not provide a Content-Type for a new
resource, the server MAY create a resource with no Content-Type
assigned, or it MAY attempt to assign a Content-Type.
Note that although a recipient ought generally to treat metadata
supplied with an HTTP request as authoritative, in practice there's
no guarantee that a server will accept client-supplied metadata
(e.g., any request header beginning with "Content-"). Many servers
do not allow configuring the Content-Type on a per-resource basis in
the first place. Thus, clients can't always rely on the ability to
directly influence the content type by including a Content-Type
request header.
9.7.2. PUT for Collections
This specification does not define the behavior of the PUT method for
existing collections. A PUT request to an existing collection MAY be
treated as an error (405 Method Not Allowed).
The MKCOL method is defined to create collections.
"""
async def do_put(self, request: DAVRequest) -> DAVResponse:
if not request.lock_token_is_parsed_success:
return DAVResponse(412)
# check etag
if request.lock_token_etag:
etag = await self._do_get_etag(request)
if etag != request.lock_token_etag:
return DAVResponse(412)
if request.lock_token_path is None:
locked_path = request.src_path
else:
locked_path = request.lock_token_path
if await self.dav_lock.is_locking(locked_path, request.lock_token):
return DAVResponse(423)
http_status = await self._do_put(request)
return DAVResponse(http_status)
async def _do_put(self, request: DAVRequest) -> int:
raise NotImplementedError
async def _do_get_etag(self, request: DAVRequest) -> str:
raise NotImplementedError
"""
https://tools.ietf.org/html/rfc4918#page-51
9.8. COPY Method
9.8.5. Status Codes
In addition to the general status codes possible, the following
status codes have specific applicability to COPY:
201 (Created) - The source resource was successfully copied. The
COPY operation resulted in the creation of a new resource.
204 (No Content) - The source resource was successfully copied to a
preexisting destination resource.
207 (Multi-Status) - Multiple resources were to be affected by the
COPY, but errors on some of them prevented the operation from taking
place. Specific error messages, together with the most appropriate
of the source and destination URLs, appear in the body of the multi-
status response. For example, if a destination resource was locked
and could not be overwritten, then the destination resource URL
appears with the 423 (Locked) status.
403 (Forbidden) - The operation is forbidden. A special case for
COPY could be that the source and destination resources are the same
resource.
409 (Conflict) - A resource cannot be created at the destination
until one or more intermediate collections have been created. The
server MUST NOT create those intermediate collections automatically.
412 (Precondition Failed) - A precondition header check failed, e.g.,
the Overwrite header is "F" and the destination URL is already mapped
to a resource.
423 (Locked) - The destination resource, or resource within the
destination collection, was locked. This response SHOULD contain the
'lock-token-submitted' precondition element.
502 (Bad Gateway) - This may occur when the destination is on another
server, repository, or URL namespace. Either the source namespace
does not support copying to the destination namespace, or the
destination namespace refuses to accept the resource. The client may
wish to try GET/PUT and PROPFIND/PROPPATCH instead.
507 (Insufficient Storage) - The destination resource does not have
sufficient space to record the state of the resource after the
execution of this method.
"""
async def do_copy(self, request: DAVRequest) -> DAVResponse:
if not request.dst_path.startswith(self.dist_prefix):
# Do not support between DAVProvider instance
return DAVResponse(400)
if request.depth is None:
return DAVResponse(403)
if await self.dav_lock.is_locking(request.dst_path, request.lock_token):
return DAVResponse(423)
http_status = await self._do_copy(request)
return DAVResponse(http_status)
async def _do_copy(self, request: DAVRequest) -> int:
raise NotImplementedError
"""
https://tools.ietf.org/html/rfc4918#page-56
9.9. MOVE Method
9.9.4. Status Codes
In addition to the general status codes possible, the following
status codes have specific applicability to MOVE:
201 (Created) - The source resource was successfully moved, and a new
URL mapping was created at the destination.
204 (No Content) - The source resource was successfully moved to a
URL that was already mapped.
207 (Multi-Status) - Multiple resources were to be affected by the
MOVE, but errors on some of them prevented the operation from taking
place. Specific error messages, together with the most appropriate
of the source and destination URLs, appear in the body of the multi-
status response. For example, if a source resource was locked and
could not be moved, then the source resource URL appears with the 423
(Locked) status.
403 (Forbidden) - Among many possible reasons for forbidding a MOVE
operation, this status code is recommended for use when the source
and destination resources are the same.
409 (Conflict) - A resource cannot be created at the destination
until one or more intermediate collections have been created. The
server MUST NOT create those intermediate collections automatically.
Or, the server was unable to preserve the behavior of the live
properties_list and still move the resource to the destination (see
'preserved-live-properties_list' postcondition).
412 (Precondition Failed) - A condition header failed. Specific to
MOVE, this could mean that the Overwrite header is "F" and the
destination URL is already mapped to a resource.
423 (Locked) - The source or the destination resource, the source or
destination resource parent, or some resource within the source or
destination collection, was locked. This response SHOULD contain the
'lock-token-submitted' precondition element.
502 (Bad Gateway) - This may occur when the destination is on another
server and the destination server refuses to accept the resource.
This could also occur when the destination is on another sub-section
of the same server namespace.
"""
async def do_move(self, request: DAVRequest) -> DAVResponse:
if not request.dst_path.startswith(self.dist_prefix):
# Do not support between DAVProvider instance
return DAVResponse(400)
if await self.dav_lock.is_locking(request.src_path):
return DAVResponse(423)
if await self.dav_lock.is_locking(request.dst_path):
return DAVResponse(423)
http_status = await self._do_move(request)
# )
return DAVResponse(http_status)
async def _do_move(self, request: DAVRequest) -> int:
raise NotImplementedError
"""
https://tools.ietf.org/html/rfc4918#page-61
9.10. LOCK Method
9.10.6. LOCK Responses
In addition to the general status codes possible, the following
status codes have specific applicability to LOCK:
200 (OK) - The LOCK request succeeded and the value of the DAV:
lockdiscovery property is included in the response body.
201 (Created) - The LOCK request was to an unmapped URL, the request
succeeded and resulted in the creation of a new resource, and the
value of the DAV:lockdiscovery property is included in the response
body.
409 (Conflict) - A resource cannot be created at the destination
until one or more intermediate collections have been created. The
server MUST NOT create those intermediate collections automatically.
423 (Locked), potentially with 'no-conflicting-lock' precondition
code - There is already a lock on the resource that is not compatible
with the requested lock (see lock compatibility table above).
412 (Precondition Failed), with 'lock-token-matches-request-uri'
precondition code - The LOCK request was made with an If header,
indicating that the client wishes to refresh the given lock.
However, the Request-URI did not fall within the scope of the lock
identified by the token. The lock may have a scope that does not
include the Request-URI, or the lock could have disappeared, or the
token may be invalid.
"""
async def do_lock(self, request: DAVRequest) -> DAVResponse:
# TODO
if (
not request.body_is_parsed_success
or not request.lock_token_is_parsed_success
):
return DAVResponse(400)
elif request.lock_token:
# refresh
lock_info = await self.dav_lock.refresh(request.lock_token)
else:
# new
lock_info = await self.dav_lock.new(request)
if lock_info is None:
return DAVResponse(423)
message = self._create_lock_response(lock_info)
headers = {
b"Lock-Token": "opaquelocktoken:{}".format(lock_info.token).encode("utf-8"),
}
response = DAVResponse(status=200, headers=headers, message=message)
return response
def _create_lock_response(self, lock_info: DAVLockInfo) -> bytes:
lock_discovery = self._create_data_lock_discovery(lock_info)
data = {
"D:prop": {
"@xmlns:D": "DAV:",
"D:lockdiscovery": lock_discovery,
}
}
return (
xmltodict.unparse(data, short_empty_elements=True)
.replace("\n", "")
.encode("utf-8")
)
"""
https://tools.ietf.org/html/rfc4918#page-68
9.11. UNLOCK Method
9.11.1. Status Codes
In addition to the general status codes possible, the following
status codes have specific applicability to UNLOCK:
204 (No Content) - Normal success response (rather than 200 OK, since
200 OK would imply a response body, and an UNLOCK success response
does not normally contain a body).
400 (Bad Request) - No lock token was provided.
403 (Forbidden) - The currently authenticated principal does not have
permission to remove the lock.
409 (Conflict), with 'lock-token-matches-request-uri' precondition -
The resource was not locked, or the request was made to a Request-URI
that was not within the scope of the lock.
"""
async def do_unlock(self, request: DAVRequest) -> DAVResponse:
if request.lock_token is None:
return DAVResponse(409)
sucess = await self.dav_lock.release(request.lock_token)
if sucess:
return DAVResponse(204)
return DAVResponse(400)
async def get_options(self, request: DAVRequest) -> DAVResponse: # TODO
headers = {
b"Allow": ",".join(DAV_METHODS).encode("utf-8"),
b"DAV": b"1, 2",
}
return DAVResponse(status=200, headers=headers)
|
# Author:ambiguoustexture
# Date: 2020-02-02
str = "Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
str = str.split(" ")
res = []
for word, index in zip(str, range(len(str))):
if index+1 in (1, 5, 6, 7, 8, 9, 15, 16, 19):
res.append(word[0])
else:
res.append(word[0:2])
print(res)
|
#!/usr/bin/env python3
def merge_two_files(fname1: str, fname2: str) -> str:
"""Takes two files with ordered numbers (positive or negative), one
number per line, merges these files int new one, preserving the order
and returns the name of a new file"""
def proc_file(file):
while True:
line = file.readline()
if not line:
break
yield line
def init_val(proc):
""" Returns line or None if file is empty """
try:
content = next(proc).rstrip('\n')
except StopIteration:
return None
return int(content)
def next_val(proc):
content = next(proc).rstrip('\n')
if content.lstrip('-').isdigit():
return int(content)
else:
# In case of a new line at the end of the file
raise StopIteration
class KeyVal:
""" Allows to create unique key objects, to avoid situations,
when one of the equal integers keys override the other. """
def __init__(self, val):
self.val = val
f1 = open(fname1)
f2 = open(fname2)
f3 = open('file3.txt', 'w')
proc_1 = proc_file(f1)
proc_2 = proc_file(f2)
# Get initial values if file is not empty
val_1 = init_val(proc_1)
val_2 = init_val(proc_2)
schedule = {}
if val_1:
schedule[KeyVal(val_1)] = proc_1
if val_2:
schedule[KeyVal(val_2)] = proc_2
while True:
if not schedule:
break
high_prior = sorted(schedule, key=lambda x: x.val)[0]
current_proc = schedule.pop(high_prior)
f3.write(str(high_prior.val) + '\n')
try:
new_val = next_val(current_proc)
schedule[KeyVal(new_val)] = current_proc
except StopIteration:
pass
f1.close(), f2.close(), f3.close()
return 'file3.txt'
merge_two_files('file1.txt', 'file2.txt')
|
# a questo punto abbiamo i dati in modo da costruire il grafo
from twitter_data_retrieve.twitter_data_retriever import *
def graph_from_data(source=SOURCE, verbose=True):
tweets_map = {}
people = get_dir_people()
people.remove(SOURCE)
base_path = os.path.join(os.pardir, DATA_DIRECTORY, PEOPLE_DIRECTORY)
for p in people:
tweets_map[p] = {}
person_path = os.path.join(base_path, p)
tweets = [name for name in os.listdir(person_path)]
for t in tweets:
with open(os.path.join(person_path, t), 'r') as f:
line = f.readline().strip("\n")
line = line.split("\t")
tweets_map[p][t] = (line[0], line[1])
for p in people:
for k in tweets_map[p]:
tup = tweets_map[p][k]
if verbose:
print "<-------------------------------------------------------------------------------------->"
print "Adding edge: ", tup[1], "\t", p
add_edges(tweets_map, tup[1], tup[0], tup[1], p)
def add_edges(tweets_map, person, tweet, source, target, verbose=True):
if verbose:
print "person: ", person
print "tweet: ", tweet
print "source: ", source
print "target: ", target
print
if person == SOURCE:
path = os.path.join(os.pardir, DATA_DIRECTORY, PEOPLE_DIRECTORY, person)
with open(os.path.join(path, tweet), 'a') as f:
edge = "{}\t{}\n".format(source,target)
f.write(edge)
else :
tup = tweets_map[person][tweet]
add_edges(tweets_map, tup[1], tup[0], source, target)
if __name__ == "__main__":
graph_from_data() |
#coding:utf-8
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email,EqualTo
class UserSearchForm(Form):
input = StringField(u'用户名:',validators=[DataRequired(message=u'用户名不能为空')])
submit = SubmitField(u'查找')
class UserModForm(Form):
banned = BooleanField(u'封禁该用户')
user = BooleanField(u'会员')
manager = BooleanField(u'内容管理员')
s_manager = BooleanField(u'权限管理员')
submit = SubmitField(u'设置')
class RoleChooseForm(Form):
input = StringField(u'角色名:',validators=[DataRequired(message=u'角色名不能为空')])
submit = SubmitField(u'查找')
class RoleModForm(Form):
follow = BooleanField(u'关注')
comment = BooleanField(u'评论')
write_articles = BooleanField(u'撰写博客')
manage_comment = BooleanField(u'管理评论')
manage_articles = BooleanField(u'管理博客')
manage_homepage = BooleanField(u'管理个人主页')
submit = SubmitField(u'设置')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:18:28 2018
@author: user
字元次數計算
"""
def compute(a,b):
c=a.count(b)
print("{:} occurs {:} time(s)".format(b,c))
a=input()
b=input()
compute(a,b) |
# Author: ambiguoustexture
# Date: 2020-03-11
import pickle
import numpy as np
from scipy import io
def sim_cos(word_a, word_b):
"""
calculate the cosine similarity
"""
mul_ab = np.linalg.norm(word_a) * np.linalg.norm(word_b)
if mul_ab == 0:
return -1
else:
return np.dot(word_a, word_b) / mul_ab
if __name__ == '__main__':
file_context_matrix_X_w2v = './context_matrix_X_w2v'
file_t_index_dict_w2v = './t_index_dict_w2v'
with open(file_t_index_dict_w2v, 'rb') as t_index_dict_w2v:
t_index_dict_w2v = pickle.load(t_index_dict_w2v)
context_matrix_X_w2v = io.loadmat(file_context_matrix_X_w2v)['context_matrix_X_w2v']
word_a = context_matrix_X_w2v[t_index_dict_w2v['United_States']]
word_b = context_matrix_X_w2v[t_index_dict_w2v['U.S']]
print('Cosine similarity between "United_States" and "U.S":', sim_cos(word_a, word_b))
|
dist = int(input("Informe a distânica em metros: "))
print(f"{dist}m = {dist*100}cm")
|
from string import ascii_lowercase
with open('Day 5/data.dat', "r") as data:
input_base = str(data.read())
min_length = len(input_base)
for y in ascii_lowercase:
input = input_base
input = input.replace(f'{y}',"")
input = input.replace(f'{y.upper()}',"")
removed = True
while removed:
removed = False
length = len(input)
for x in ascii_lowercase:
input = input.replace(f'{x}{x.upper()}',"")
input = input.replace(f'{x.upper()}{x}',"")
if length > len(input):
removed = True
if len(input) < min_length:
min_length = len(input)
with open('Day 5/results 2.dat', "w") as results:
results.write(str(min_length)) |
"""Main assignment module."""
import csv
import utils
def get_must_dict():
"""Read the csv and return the Orderdict."""
musts_file = open("Mobile Phone Masts.csv", "r")
must_dict = csv.DictReader(musts_file)
return must_dict
def first_five_lowest_current_rent():
"""Produce a list sorted by Current Rent in ascending order and return the fisrt five."""
musts = get_must_dict()
sorted_musts = sorted(list(musts), key=lambda x: float(x["Current Rent"]))
return sorted_musts[:5]
def twenty_five_lease():
"""Produce a list of musts with Lease Years = 25 years and return them."""
musts = get_must_dict()
musts = [must for must in musts if int(must["Lease Years"]) == 25]
return musts
def total_rent(musts):
"""Sum up the Current Rent of the given musts."""
total_rent = sum([float(must["Current Rent"]) for must in musts])
return total_rent
def tenants():
"""Create a dictionary containing tenant name and related count of masts."""
must_dict = get_must_dict()
final = dict()
for must in must_dict:
name = utils.clean_tenant_name(must["Tenant Name"])
final[name] = final.setdefault(name, 0) + 1
return final
def list_rental_by_lease_date():
"""List rental by lease date and filter by given dates."""
must_dict = get_must_dict()
final = list()
start, end = utils.str2date("1 Jun 1999"), utils.str2date("31 Aug 2007")
for must in must_dict:
lease_date = utils.str2date(must["Lease Start Date"])
if start < lease_date < end:
final.append(must)
return final
|
"""
Routes and views for the api application.
"""
from datetime import datetime
from flask import render_template , jsonify
from globalsuperstore import app
import json
import requests
from globalsuperstore.pgsql import getStoreData
@app.route('/table')
def table():
"""Renders the contact page."""
print("rendering table from python application")
# get api info from database
data = getStoreData()
return data
# @app.route('/chart')
# def chart():
# """Renders the contact page."""
# print("rendering chart from python application")
# # get api info from database
# apikey, baseurl = getApiInfo()
# queryUrl = baseurl + "&api_key="+ apikey
# response = requests.get(queryUrl).json()
# return response |
import os
import dd
import time
import json
import gmail
import requests
import traceback
import sched, time
from flask import Flask
from flask_caching import Cache
app = Flask(__name__)
cache = Cache(config={'CACHE_TYPE': 'SimpleCache'})
cache.init_app(app)
MINE_POOL=os.getenv("mine_pool_url")
HIVE_EMAIL=os.getenv('hive_email')
HIVE_PASSWORD=os.getenv("hive_password")
HOST_ENV=os.getenv("host")
TTL=os.getenv("ttl")
NOMICS_EXCHANGE_KEY=os.getenv("nomics_exchange_key")
CURRENCY_API_KEY=os.getenv("currency_api_key")
s = sched.scheduler(time.time, time.sleep)
api_endpoint = "https://api2.hiveos.farm/api/v2/"
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
@app.route("/")
def index():
return "Hello World!"
def get_otp_hive(retry = 0):
print("Getting otp from HiveOS")
current_otp = 0
try:
get_otp_ep = "auth/login/confirmation"
response = requests.post(api_endpoint+get_otp_ep,
headers=headers,
data=json.dumps({
"login":HIVE_EMAIL
})
)
print("Sent otp request, waiting for 10 seconds before reading email")
time.sleep(10)
if response.status_code == 200:
otps = gmail.get_otp()
print("Got these otps from gmail %s" % otps)
if len(otps) >= 1:
current_otp = otps[0]
return current_otp
if current_otp == 0 and retry <= 1:
print("OTP not found, retrying again")
return get_otp_hive(retry=retry+1)
except Exception as ex:
if retry <= 1:
return get_otp_hive(retry = retry+1)
print("Even after retrying unable to get OTP.")
return current_otp
def auth_hive():
print("Checking for hive auth token cache")
if cache.get("hive_auth_token"):
print("Found Hive auth token cache, returning")
return cache.get("hive_auth_token")
print("Hive auth token cache not found")
otp = get_otp_hive()
auth_api_url = "auth/login"
if otp > 0:
# we have otp, send login request
response = requests.post(api_endpoint + auth_api_url,
headers=headers,
data=json.dumps({
"login": HIVE_EMAIL,
"password": HIVE_PASSWORD,
"twofa_code": str(otp),
"remember": True
})
)
print("auth login status code",response.status_code)
if response.status_code == 200:
data = response.json()
if data.get("access_token", None):
cache.set("hive_auth_token",data.get("access_token"), timeout=data.get("expires_in"))
print("Got access_token from Hive caching it for future use")
return data.get("access_token")
print("Unable to get bearer token from hive api")
return ""
def send_metrics_chain(data, key=""):
if data is None:
return
if type(data) is list:
for k,metric in enumerate(data):
n_key = "%s.%s" % (key,str(k)) if key else str(k)
send_metrics_chain(metric, n_key)
elif type(data) is dict:
for k,metric in data.items():
n_key = "%s.%s" % (key,str(k)) if key else str(k)
send_metrics_chain(metric, n_key)
else:
dd.send_metrics(key,data)
def get_farm_data():
bearer_token = auth_hive()
auth_header = {
"Authorization": "Bearer %s" % bearer_token,
'Host': HOST_ENV
}
response = requests.get(api_endpoint+"farms", headers={**auth_header, **headers})
if response.status_code == 200:
farms_data = response.json()
# from pprint import pprint; pprint(farms_data)
farms_data = farms_data.get("data")
for farm in farms_data :
send_metrics_chain(farm, key="hive")
print("Round completed with DD")
def send_mining_wallet():
if MINE_POOL:
extra_headers = {
"host": HOST_ENV,
}
response = requests.get(MINE_POOL,headers={**headers, **extra_headers})
if response.status_code == 200:
data = response.json()
if data.get("rewards",None):
del data["rewards"]
if data.get("sumrewards",None):
del data["sumrewards"]
print("Sending mining metrics to DD")
send_metrics_chain(data, key="pool")
def fetch_latest_price():
if NOMICS_EXCHANGE_KEY:
try:
url = "https://api.nomics.com/v1/exchange-rates?key=%s" % NOMICS_EXCHANGE_KEY
response= requests.get(url,
headers= {**headers, **{
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}})
if response.status_code == 200:
data = response.json()
for cur in data:
if cur.get("currency") and cur.get("rate"):
dd.send_metrics("nomics."+cur.get("currency"),cur.get("rate"))
else:
print("Got %s while trying to access %s" % (response.status_code, url))
except Exception as ex:
print("Please check exchange API url or NOMICS_EXCHANGE_KEY", ex)
def fetch_fiat_val_base_usd(fiat="INR"):
if CURRENCY_API_KEY:
try:
url = "https://freecurrencyapi.net/api/v2/latest?apikey="+CURRENCY_API_KEY
response= requests.get(url,
headers= {**headers, **{
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}})
if response.status_code == 200:
data = response.json()
cur = data.get("data",{})
if cur.get(fiat):
dd.send_metrics("fiat."+fiat,cur.get(fiat))
else:
print("Got %s while trying to access %s" % (response.status_code, url))
except Exception as ex:
print("Please check freecurrencyapi.net API url or CURRENCY_API_KEY", ex)
def start_metrics(s):
if not (HIVE_EMAIL and HIVE_PASSWORD and os.getenv('email')
and os.getenv('password') and os.getenv("dd_key") and TTL):
print("Please set all environment variables before starting.")
exit(1)
try:
print("Monitoring start...")
fetch_fiat_val_base_usd()
fetch_latest_price()
send_mining_wallet()
print("Calling, get_farm_data()")
get_farm_data()
except Exception as ex:
print(traceback.format_exc())
print("Unable to fetch data!!!")
s.enter(int(TTL), 1, start_metrics, (s,))
s.run()
start_metrics(s)
|
calories = [6,13,8,7,10,1,12,11]
n=len(calories)
lower=5
upper=37
s=0
k=6
for i in range(0,n,k):
count=0
for j in range(i,i+k):
if j<n:
count+=calories[j]
print(count)
if count<lower:
s-=1
elif count>upper:
s+=1
print(s) |
__author__ = 'Shafikur Rahman'
urlpatterns = []
|
""" Python I/O tools
"""
import numpy as np
def print2D(data, fmt=None):
""" Print 2D array (list) to stdout, used fmt ("{:d} {:f}"...) to specify format
"""
arr = np.asarray(data)
for row in arr:
if fmt is None:
print(*row)
else:
print(fmt.format(*row))
return
def write2D(data, outname=None, fmt=None, header=None):
""" Write 2D array (list) to file if None print to stdout, used fmt ("{:d} {:f}"...) to specify format
"""
if outname is None:
print2D(data, fmt)
arr = np.asarray(data)
outf = open(outname, 'w+')
if header is not None:
outf.writelines(header.rstrip()+'\n')
for row in arr:
if fmt is not None:
fmt = fmt.rstrip()+'\n'
outf.writelines(fmt.format(*row))
else:
outf.writelines(" ".join(item for item in row)+'\n')
return
|
import gspread
import pandas as pd
from google.oauth2.service_account import Credentials
def get_data(sheet_name):
SCOPES=['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = Credentials.from_service_account_file('client_secret.json', scopes=SCOPES)
client=gspread.authorize(credentials)
sheet=client.open('Dummy_LITM_Monitoring').worksheet(sheet_name)
data=sheet.get_all_records()
dataset = pd.DataFrame(data)
return dataset |
#!/usr/bin/env python
import pygame
import mimo
import random
from utils import utils
from utils import neopixelmatrix as graphics
from utils.NeoSprite import NeoSprite, AnimatedNeoSprite, TextNeoSprite, SpriteFromFrames
from utils.NewsProvider import news
from utils import constants
from scenes.BaseScene import SceneBase
# StartEvent Scene
# PLAY STATUS #1
# should start and print a new mission to accomplish to the player
# next scene will be edit
class BeginEventScene(SceneBase):
def __init__(self):
SceneBase.__init__(self)
#musique
self.MX = []
self.MX.append('assets/audio/MX/DirtySoil.ogg')
self.MX.append('assets/audio/MX/DystopianBallad.ogg')
self.MX.append('assets/audio/MX/LazyBartender.ogg')
self.MX.append('assets/audio/MX/LostInParadise.ogg')
self.MX.append('assets/audio/MX/PapayaJuice.ogg')
self.MX.append('assets/audio/MX/RetroDance.ogg')
self.MX.append('assets/audio/MX/SunnyBeach.ogg')
self.MX.append('assets/audio/MX/TimeTraveler.ogg')
self.MX.append('assets/audio/MX/WeirdJungle.ogg')
self.MX.append('assets/audio/MX/WhereAreYou.ogg')
self.SetupMimo()
# initialize state
# setup the layout for the scene
self.SetupLayout()
# load event, title, description, objective and material
# currento_evento = int(random.random() * 4)
if constants.currento_evento == 3:
constants.currento_evento = -1
constants.currento_evento += 1
self.LoadEvent(news[constants.currento_evento])
def SetupMimo(self):
mimo.set_led_brightness(150)
mimo.set_buttons_enable_status(True, False)
mimo.set_material_ligths_on([0,0, 1,0, 2,0, 3,0, 4,0, 5,0, 6,0, 7,0, 8,0, 9,0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26,0, 27,0])
mimo.set_optimization_ligths_on([0,0, 1,0, 2,0, 3,0, 4,0])
mimo.set_material_buttons_mode([0,1, 1,1, 2,1, 3,1, 4,1, 5,1, 6,0, 7,0])
mimo.set_independent_lights(True, True)
mimo.set_material_leds_color([7, 0xf7, 0x5a, 0xff])
def SetupLayout(self):
utils.play_music(self.MX[(int(random.random()*10))], -1, 0.1, 0.6)
# add da fact
self.fact_title = utils.Text(
'',
self.title_font,
color = constants.PALETTE_TITLES_DARK_BLUE
)
self.fact_title.setAnchor(0.5, 0)
self.fact_title.SetPosition(constants.VIEWPORT_CENTER_X, 100)
self.current_evt_frame = utils.Sprite(
constants.SPRITES_EDITION + 'current-nws.png'
)
self.current_evt_frame.setAnchor(0.5, 0.5)
self.current_evt_frame.SetPosition(constants.VIEWPORT_CENTER_X, 303)
self.fact_summary = utils.Text('', self.normal_font,
color = constants.PALETTE_TITLES_DARK_BLUE)
self.fact_summary.setAnchor(0.5, 0)
self.fact_summary.SetPosition(constants.VIEWPORT_CENTER_X, 463)
# self.fact_argument = utils.Text('', self.normal_font,
# color = constants.PALETTE_TITLES_DARK_BLUE)
# self.fact_argument.setAnchor(0.5, 0)
# self.fact_argument.SetPosition(constants.VIEWPORT_CENTER_X, 496)
# add da goal
goal_layout = {
'title': { 'en': 'goal', 'es': 'objetivo' },
'width': { 'en': 115, 'es': 198 }
}
self.goal_title = utils.Text(
goal_layout['title'][constants.language] + ':',
self.subtitle_font,
color = constants.PALLETE_BACKGROUND_BLUE
)
self.goal_title.setAnchor(0, 0)
self.goal_title.SetPosition(78, 554)
self.goal_desc = utils.Text('', self.subtitle_font, color = constants.PALLETE_BACKGROUND_BLUE)
self.goal_desc.setAnchor(0, 0)
self.goal_desc.SetPosition(78 + goal_layout['width'][constants.language], 554)
# background for other news:
self.back_news = []
for i in range(1, 3):
temp = utils.Sprite(
constants.SPRITES_EDITION + 'next-nws.png'
)
temp.setAnchor(0.5, 0.5)
temp.SetPosition(constants.VIEWPORT_CENTER_X+273*i, 303)
self.back_news.append(temp)
# add da ui
self.SetupUI()
def ProcessInput(self, events, pressed_keys):
if self.closing: return
for event in events:
if event.type == pygame.KEYDOWN and event.key == pygame.K_i:
self.CloseEvent(0.5)
self.UI_SwitchScene = utils.get_sound('assets/audio/SFX/Scanning/MG1_ObjSort.ogg')
self.UI_SwitchScene.play()
self.AddTrigger(0.51, self, 'SwitchToScene', "Edit")
def Update(self, dt):
SceneBase.Update(self, dt)
def RenderBody(self, screen):
# for back_image in self.back_news:
# back_image.RenderWithAlpha(screen)
# render the layout
self.fact_title.render(screen)
self.current_evt_frame.RenderWithAlpha(screen)
self.icon.RenderWithAlpha(screen)
self.fact_summary.render(screen)
# self.fact_argument.render(screen)
pygame.draw.rect(screen, constants.PALLETE_BACKGROUND_TITLE_BLUE, (0, 540, 1280, 70))
self.goal_title.render(screen)
self.goal_desc.render(screen)
def LoadEvent(self, event):
self.current_event = event
self.icon = utils.Sprite(
constants.EVENTS + self.current_event['ico']
)
self.icon.setAnchor(0.5, 0.5)
self.icon.SetPosition(constants.VIEWPORT_CENTER_X, 303)
self.fact_title.SetText(self.current_event['hdl'][constants.language])
self.fact_summary.SetText(self.current_event['ovw'][constants.language])
# self.fact_argument.SetText(self.current_event['arg'][constants.language])
self.goal_desc.SetText(self.current_event['gol'][constants.language])
# change the default order of the material
random.shuffle(self.current_event['material'])
#mimo.termal_print(self.current_event['hdl'].upper())
#### imprimir la noticia en pantalla? |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
def uk_plot(shp_path, geo_code, df, var_name, title, cmap='coolwarm'):
"Choropleth map of the given variable."
print("\nGenerating plot...")
map_df = gpd.read_file(shp_path)
merged = map_df.merge(df[var_name], left_on=geo_code, right_on=df.index, how='left').dropna(subset=[var_name])
fig, ax = plt.subplots(1,1, figsize=(8,7))
ax.axis('off')
ax.set_title(title)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=min(merged[str(var_name)]),vmax=max(merged[str(var_name)])))
sm._A = []
fig.colorbar(sm)
merged.plot(column=str(var_name), cmap=cmap, linewidth=0.2, edgecolor='0.5', ax=ax)
def top_10(df, var_name, title):
"Bar chart of the top 10 wards for the given variable"
df_desc = df.sort_values(by=[var_name], ascending=False)
plt.figure(figsize=[12,6])
plt.bar(df_desc.index[0:10], df_desc[var_name][0:10])
plt.ylabel("Frequency")
plt.xticks(fontsize=10)
plt.title(title)
def categ(df, cat, r=False):
"Most common type of move for a given category in each ward."
if r == False:
cat_types = {
'beds': ['Beds1to3', 'Beds4Plus'],
'dwelling': ['Terraced', 'Flat', 'SemiDetached', 'Detached'],
'price': ['MovesUnder250k', 'MovesOver250k']
}
else:
cat_types = {
'beds': ['Beds1to3', 'Beds4Plus'],
'dwelling': ['Terraced', 'Flat', 'SemiDetached', 'Detached'], #, 'Bungalow'
'price': ['RentUnder250', 'RentOver250']
}
# most common type in each ward
df[cat] = df[cat_types[cat]].idxmax(axis=1)
return df
def categ_plot(shp_path, geo_code, df, var_name, title):
"Categorical plot for given variable"
print("\nGenerating plot...")
map_df = gpd.read_file(shp_path)
merged = map_df.merge(df[var_name], left_on=geo_code, right_on=df.index, how='left').fillna(value='no data available') #.dropna(subset=[var_name])
merged=merged.loc[~merged[geo_code].str.startswith('S', na=False)] # drop Scottish wards
ax = plt.subplots(1,1, figsize=(8,7))[1]
ax.axis('off')
ax.set_title(title)
merged.plot(column=var_name, cmap='tab20c', categorical=True, legend=True, ax=ax)
def ward_distance(df, map_df):
"""
Calculate the distance (as the crow flies) between ward centroids.
Note: this requires a shapefile that contains all wards present in df (UK).
"""
print("Calculating distances...")
dist_list = []
for i in df.index:
a = df['OriginWardCode'][i] # ward code
ai = map_df.loc[map_df['wd16cd']==a].index # find index of first ward
ma = map_df['geometry'][ai.item()] # find geometry of first ward
mac = ma.centroid # centre point of first ward
b = df['DestinationWardCode'][i] # ward code
bi = map_df.loc[map_df['wd16cd']==b].index # find index of second ward
mb = map_df['geometry'][bi.item()] # find geometry of second ward
mbc = mb.centroid # centre point of second ward
ab = mac.distance(mbc) # distance between centroids
dist_list.append(ab)
#df['distance'] = dist_list
#df.to_csv("data/rentals_and_distance.csv")
return dist_list
def ru_class(remap=True):
"""rural/urban classification of wards according to 2011 ONS census"""
rural_urban = pd.read_csv("data/Rural_Urban_Classification_2011_of_Wards_in_England_and_Wales.csv")
#print(rural_urban['RUC11'].value_counts())
ru_map = {
'Rural village and dispersed': 'Rural', 'Rural village and dispersed in a sparse setting': 'Rural',
'Rural town and fringe': 'Rural', 'Rural town and fringe in a sparse setting': 'Rural',
'Urban major conurbation':'Urban', 'Urban minor conurbation':'Urban',
'Urban city and town':'Urban', 'Urban city and town in a sparse setting': 'Urban'
}
if remap == True:
rural_urban['RUC11'].replace(ru_map, inplace=True)
# plot
# shp_path = "data/shapefiles/EW_Wards_2011.shp"
# categ_plot(shp_path, 'geo_code', rural_urban.set_index('WD11CD'), 'RUC11', 'Rural/Urban Classification - 2011')
# plt.show()
# merge with df
#b = df.merge(rural_urban[['WD11CD','RUC11']], left_on='OriginWardCode', right_on=['WD11CD'], how='left')
#b.to_csv("data/rentals_distance_ru.csv")
return rural_urban
def Ward_to_LAD(df, df_types):
map_df = gpd.read_file("data/shapefiles/GB_Wards_2016.shp")
merged = map_df.merge(df, left_on='wd16cd', right_on=df.index, how='left')
merged=merged.loc[~merged['wd16cd'].str.startswith('S', na=False)] # drop Scottish wards
# aggregate to LADs
lad_map = pd.pivot_table(merged, values=df_types, index='lad16cd', aggfunc=np.sum)
return lad_map
def load_moves(r=False):
""" Load sales or rental data.
Note: the path to data files might need to be changed depedning on how data is stored."""
if r:
rs = 'rentals'
print("Loading rental data...")
df = pd.read_csv("data/ZooplaRentals_Aggregate_NikLomax.txt", sep='\t')
df.rename(index=str, columns={'NumberOfRentals': 'Total'}, inplace=True)
df_types = ['Total', 'RentUnder250', 'RentOver250',
'Terraced', 'Flat', 'SemiDetached', 'Detached', 'Bungalow', 'PropertyTypeUnknown',
'Beds1to3', 'Beds4Plus']
else:
rs = 'sales'
print("Loading sales data...")
df = pd.read_csv("data/ZooplaSales_Aggregate_NikLomax.txt", sep='\t')
df.rename(index=str, columns={'NumberOfMoves': 'Total'}, inplace=True)
df_types = ['Total', 'MovesUnder250k', 'MovesOver250k',
'Terraced', 'Flat', 'SemiDetached', 'Detached',
'Beds1to3', 'Beds4Plus']
return df, df_types, rs
|
from django.apps import AppConfig
class BlackAppsConfig(AppConfig):
name = 'black_apps'
|
#
# (C) 2013 Varun Mittal <varunmittal91@gmail.com>
# JARVIS program is distributed under the terms of the GNU General Public License v3
#
# This file is part of JARVIS.
#
# JARVIS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation version 3 of the License.
#
# JARVIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JARVIS. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import base64
import simplejson as json
import requests
from requests.auth import HTTPBasicAuth
from copy import deepcopy
is_appengine_environment = False
try:
from google.appengine.runtime.apiproxy_errors import DeadlineExceededError
from google.appengine.api import urlfetch
is_appengine_environment = True
except ImportError:
pass
from elasticsearch import helpers
from elasticsearch.exceptions import TransportError
from .exceptions import IndexException
from .conn import ElasticSearchClient
from .scored_document import EsSearchDocument, EsFieldBase
es_client_conn = ElasticSearchClient()
class EsIndex:
transaction_header_size = 100
max_transaction_size = 10000000 - transaction_header_size
def __init__(self, name):
self.__name = name.lower()
def get(self, search_doc_id, doc_type):
document = es_client_conn.es.get(doc_type=doc_type, id=search_doc_id, index=self.__name, ignore=[400, 404])
if 'found' not in document or not document['found']:
return None
document = document['_source']
fields = []
for name, value in zip(document.keys(), document.values()):
fields.append(EsFieldBase(name=name, value=value))
doc = EsSearchDocument(rank=document['_rank'], doc_type=doc_type, fields=fields, id=search_doc_id)
return doc
def update(self, doc, update_fields, doc_type):
actions = [{
"_op_type": 'update',
"_index": self.__name,
"_type": doc_type,
"_id": doc.doc_id,
"doc": {'_rank': doc.rank},
"doc_as_upsert": True
}]
for field in update_fields:
actions[0]['doc'][field.name] = field.value
results = helpers.bulk(client=es_client_conn.es, actions=actions,)
del actions
return results[0]
def put(self, documents):
__documents = []
results = []
if type(documents) == list:
for document in documents:
__documents.append(document.getDoc(self.__name))
else:
__documents.append(documents.getDoc(self.__name))
actions = EsActions()
for document in __documents:
action = {
"_index": document['index'],
"_type": document['doc_type'],
"_id": document['id'],
"_source": document['body'],
}
actions.addAction(action)
actions.push()
results = actions.push()
del actions
return results
def delete(self, search_doc_ids, doc_type):
actions = []
if type(search_doc_ids) != list:
search_doc_ids = [search_doc_ids]
actions = EsActions()
for doc_id in search_doc_ids:
actions.addAction({
"_op_type": 'delete',
"_index": self.__name,
"_type": doc_type,
"_id": doc_id,
})
actions.push()
results = actions.getResults()
del actions
return results
def __put__(self, actions):
results = [action['_id'] for action in actions]
if is_appengine_environment:
retry_count = 0
while retry_count < 5:
try:
__results = helpers.bulk(client=es_client_conn.es, actions=actions,)
break
except DeadlineExceededError:
retry_count += 1
if retry_count == 5:
raise DeadlineExceededError
else:
__results = helpers.bulk(client=es_client_conn.es, actions=actions,)
for result in __results[1]:
if result['index']['status'] != 200:
raise IndexException(result['index']['error'])
results.append(result['index']['_id'])
return results
def query(self, query_object):
config = query_object.getSearchObject()
_source = config.get('_source', "")
if len(_source) > 0 and not _source.endswith('_rank'):
_source = "%s,_rank" % _source
else:
_source = "_rank"
config['_source'] = _source
config['index'] = self.__name
match_only = False
try:
del config['match_only']
match_only = True
except KeyError:
if config['reverse']:
config['sort'] = "_rank:asc"
else:
config['sort'] = "_rank:desc"
reverse = config['reverse']
del config['reverse']
try:
if is_appengine_environment:
retry_count = 0
while retry_count < 2:
try:
response = es_client_conn.es.search(**config)
break
except DeadlineExceededError:
retry_count += 1
if retry_count == 2:
raise DeadlineExceededError
else:
response = es_client_conn.es.search(**config)
except TransportError:
return EsResultObject()
return EsResultObject(response, match_only=match_only, reverse=reverse)
def query_filtered(self, query_object):
server = es_client_conn.SERVERS[0]
credentials = server['http_auth'].split(':')
payload = {'q': query_object.getQueryString(), 'm': query_object.getOffset(), 's': query_object.getLimit()}
r = requests.get("%s/search/" % server['url'], auth=HTTPBasicAuth(credentials[0], credentials[1]), params=payload, timeout=60)
if r.status_code == 200:
return r.content
return
class EsActions:
transaction_header_size = 100
max_transaction_size = 10000000 - transaction_header_size
def __init__(self):
self.__actions = []
self.__results = []
self.__action_size = 0
def __del__(self):
del self.__actions
def addAction(self, action):
self.__actions.append(action)
self.__action_size += sys.getsizeof(str(action))
results = None
if len(self.__actions) > 200 or self.__action_size > self.max_transaction_size:
results = self.push()
del self.__actions
self.__actions = []
self.__action_size = 0
return results
def push(self):
if is_appengine_environment:
retry_count = 0
while retry_count < 5:
try:
__results = helpers.bulk(client=es_client_conn.es, actions=self.__actions,)
break
except DeadlineExceededError:
retry_count += 1
if retry_count == 5:
raise DeadlineExceededError
else:
__results = helpers.bulk(client=es_client_conn.es, actions=self.__actions,)
for action in self.__actions:
self.__results.append(action['_id'])
def getResults(self):
return self.__results
class EsQueryObject:
def __init__(self, query_string, doc_type, returned_fields=[], limit=25, default_operator="AND", offset=0, reverse=False, match_only=False):
self.__config = {}
if returned_fields:
self.__config['_source'] = ",".join(returned_fields)
self.__config['q'] = query_string
self.__config['size'] = limit
self.__config['doc_type'] = doc_type
self.__config['default_operator'] = default_operator
self.__config['from_'] = offset
self.__config['reverse'] = reverse
if match_only:
self.__config['match_only'] = True
def __del__(self):
del self.__config
def getSearchObject(self):
# temporary fix for index query, deleting parameters to make compatible to elasticsearch api
return deepcopy(self.__config)
def getQueryString(self):
return self.__config['q']
def getLimit(self):
return self.__config['size']
def getOffset(self):
return self.__config['from_']
class EsResultObject:
def __init__(self, response=None, match_only=False, reverse=False):
self.documents = []
self.total_count = 0
if not response:
return
self.total_count = response['hits']['total']
for result in response['hits']['hits']:
doc = result['_source']
fields = []
for name, value in zip(doc.keys(), doc.values()):
fields.append(EsFieldBase(name=name, value=value))
document = EsSearchDocument(rank=doc['_rank'], doc_type=result['_type'], fields=fields, id=result['_id'])
self.documents.append(document)
if match_only:
self.documents = sorted(self.documents, key=lambda document: document['_rank'], reverse=not reverse)
|
import os
import threadpool
from datetime import time
from html_dloader import HtmlDLoader
from html_outputer import HtmlOutputer
from html_parser import HtmlParser
from url_manager import UrlManager
class GrabMain(object):
def __init__(self, url):
self.root_url = url
self.urlManager = UrlManager()
self.dLoader = HtmlDLoader()
self.contParser = HtmlParser()
self.contOutputer = HtmlOutputer()
pass
def grabText(self):
if self.root_url is None:
return
self.urlManager.add_new_next_url(self.root_url)
self.contParser.parser_set(None, None, None, None, None)
while self.urlManager.get_new_next_count():
try:
new_url = self.urlManager.get_new_next_url()
html_cont = self.dLoader.download(new_url)
urls, nexts = self.contParser.parser_text_urls(html_cont)
self.urlManager.add_new_next_urls(nexts)
self.urlManager.add_new_urls(urls)
except:
print "url is error."
pool = threadpool.ThreadPool(10)
requests = threadpool.makeRequests(self.thread_grabText, self.urlManager.new_urls)
[pool.putRequest(req) for req in requests]
pool.wait()
def thread_grabText(self, url):
try:
print "curr url is %s." % url
html_cont = self.dLoader.download(url)
title, cont = self.contParser.parser_text_cont(html_cont)
self.contOutputer.output_cont(title, cont)
except:
print "url is %s, error." % url
def grabImgs(self):
if self.root_url is None:
return None
self.urlManager.add_new_next_url(self.root_url)
self.contParser.parser_set(None, None, None, None, None)
while self.urlManager.get_new_next_count():
try:
new_url = self.urlManager.get_new_next_url()
html_cont = self.dLoader.download(new_url)
urls, nexts = self.contParser.parser_text_urls(html_cont)
self.urlManager.add_new_next_urls(nexts)
self.urlManager.add_new_urls(urls)
except:
print "url is error."
pool = threadpool.ThreadPool(10)
requests = threadpool.makeRequests(self.thread_grabImg, self.urlManager.new_urls)
[pool.putRequest(req) for req in requests]
pool.wait()
def thread_grabImg(self, url):
try:
print "curr url is %s." % url
html_cont = self.dLoader.download(url)
title, links = self.contParser.parser_img_cont(html_cont)
if links is None or len(links) == 0:
print "url is %s, not src." % url
return None
if title is None:
title = time.time()
try:
if not os.path.isdir(title):
os.mkdir(title)
except:
title = time.time()
if not os.path.isdir(title):
os.mkdir(title)
params = []
index = 0
for link in links:
params.append(([title, link, index], None))
index += 1
pool = threadpool.ThreadPool(12)
requests = threadpool.makeRequests(self.contOutputer.output_img, params)
[pool.putRequest(req) for req in requests]
pool.wait()
except:
print "url is %s, error." % url
if __name__ == "__main__":
obj = GrabMain("")
obj.grabImgs()
pass |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-04 00:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_', '0005_auto_20170830_2101'),
]
operations = [
migrations.RemoveField(
model_name='band',
name='fas_amount',
),
migrations.AlterField(
model_name='place',
name='id',
field=models.CharField(max_length=255, primary_key=True, serialize=False, unique=True),
),
migrations.AlterField(
model_name='place',
name='latitude',
field=models.DecimalField(decimal_places=6, max_digits=9, null=True),
),
migrations.AlterField(
model_name='place',
name='longitude',
field=models.DecimalField(decimal_places=6, max_digits=9, null=True),
),
]
|
import requests
import sys
from jsonConvert import jsonConvert
def getPage(date):
"""Récupère la page de l'ordre du jour du Sénat à la date donnée
Args:
date (string): date format: 'jjmmaaaa'
Returns:
[requests.Response]: réponse à la requête avec l'URL (contenu html: requests.get(url).content)
"""
url = 'https://www.senat.fr/aglae/Global/agl'+date+'.html'
#print(url)
return requests.get(url)
if len(sys.argv)>1:
jsonConvert(getPage(sys.argv[1]), sys.argv[1])
|
from django.shortcuts import render
from .models import login_data
import jwt
import random
from django.views.decorators.csrf import csrf_exempt
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse,HttpResponseBadRequest
from django.contrib.auth import authenticate, login,logout
from django.core.mail import EmailMessage,get_connection
from custom_key.models import *
from django.core.mail.backends.smtp import EmailBackend
from internal_key.models import *
from student.models import *
from faculty.models import *
from alumni.models import *
from student.views import signup_student
class UploadFileForm(forms.Form):
file = forms.FileField()
#Group_id distributions
# 1 - student
# 2 - faculty
# 3 - alumni
# 4 - admin
# 5 - developer
@login_required
@csrf_exempt
def import_login_table(request):
if request.user.is_authenticated():
user=str(request.user)
try:
login_data_row=login_data.objects.get(login_id=str(user))
if login_data_row.group_id==5:
if request.method == "POST":
print "27"
form = UploadFileForm(request.POST,request.FILES)
if form.is_valid():
print "30"
request.FILES['file'].save_to_database(model=login_data,mapdict=['login_id','group_id'])
return HttpResponse("OK")
else:
return HttpResponseBadRequest()
else:
form = UploadFileForm()
return render(request,'upload.html',{'form':form ,'msg':"Login table"})
except:
return HttpResponse("Page not found")
else:
return HttpResponse("page not found")
@login_required
@csrf_exempt
def import_overall_table(request):
if request.user.is_authenticated():
user=str(request.user)
try:
login_data_row=login_data.objects.get(login_id=str(user))
if login_data_row.group_id==5:
if request.method == "POST":
print "27"
form = UploadFileForm(request.POST,request.FILES)
if form.is_valid():
print "30"
request.FILES['file'].save_to_database(model=overall_achievement_data,mapdict=['title','description'])
return HttpResponse("OK")
else:
return HttpResponseBadRequest()
else:
form = UploadFileForm()
return render(request,'upload.html',{'form':form ,'msg':"overall_achievement_data"})
except:
return HttpResponse("Page not found")
else:
return HttpResponse("page not found")
def email_verification(request,value):
try:
print value
jwt_key=str(internal_key_data.objects.get(key='jwt_key').value)
print jwt_key
email_decoded_json=jwt.decode(value,jwt_key,algorithms =['HS256'])
print email_decoded_json
email=email_decoded_json['email']
roll_no=email_decoded_json['roll_no']
# otp=jwt.decode(otp,'secret',algorithms=['HS256'])
print email
print roll_no
# print otp
try:
login_data_row=login_data.objects.get(email=email)
group_id=login_data_row.group_id
setattr(login_data_row,'email_flag',True)
login_data_row.save()
if group_id==1:
return render(request,'signup_student.html',{'login_id':roll_no})
else:
if group_id==2:
return render(request,'signup_faculty.html',{'login_id':roll_no})
else:
if group_id==3:
return render(request,'signup_alumni.html',{'login_id':roll_no})
else:
return HttpResponse("ok")
except Exception,e:
print e
return HttpResponse("email_id already registered try another or contact ace ")
except:
return HttpResponse("Failed")
# http://127.0.0.1:8000/verify_email?email=arpitj938@gmail.com&otp=123456
@csrf_exempt
def login_view(request):
if request.user.is_authenticated():
return render (request,'index.html',{'link2':'<a href="/logout/">LOGOUT</a>'})
else:
if request.method=='POST':
login_id=str(request.POST.get('login_id'))
print login_id
password=str(request.POST.get('password'))
# password=jwt.decode(password,'secret',algorithms=['HS256'])
try:
login_data_row=login_data.objects.get(login_id=login_id)
print login_id
if login_data_row.email_flag==1:
print 105
user = authenticate(username=login_id, password=password)
if user is not None:
login(request, user)
print 'login done'
return render (request,'index.html',{'link2':'<a href="/logout/">LOGOUT</a>'})
# return HttpResponseRedirect("/welcome/")
else:
return render(request,'login.html',{'msg':'wrong login_id or password'})
else:
return render(request,'login.html',{'msg':'complete your email verification'})
except Exception,e:
print e
return render(request,'login.html',{'msg':'wrong login_id or password'})
else:
return render(request,'login.html')
@csrf_exempt
def signup_view(request):
if request.user.is_authenticated():
return render (request,'index.html',{'link2':'<a href="/logout/">LOGOUT</a>'})
else:
if request.method=='POST':
try:
print "try"
# roll_no='151258'
roll_no=str(request.POST.get('roll_no'))
print roll_no
# name='arpit'
name=str(request.POST.get('name'))
print name
mobile=str(request.POST.get('mobile'))
email=str(request.POST.get('email'))
# email='arpitj938@gmail.com'
print email
try:
print "try 1"
login_data_row=login_data.objects.get(login_id=roll_no)
group_id=login_data_row.group_id
print group_id
if login_data_row.email_flag==True:
print 'your account is registered already'
# return HttpResponse("your account is registered already")
return render(request,"signup.html",{'msg':'your account is registered already','link2':'<a href="/login/">LOGIN</a>'})
else:
print roll_no
if group_id==1:
try:
student_data_row=student_data.objects.get(roll_no=roll_no)
setattr(student_data_row,'name',str(name))
setattr(student_data_row,'mobile',str(mobile))
setattr(student_data_row,'email',str(email))
student_data_row.save()
except:
student_data.objects.create(roll_no=roll_no,name=name,email=email,mobile=mobile)
print '148'
else:
if group_id==2:
try:
faculty_data_row=faculty_data.objects.get(faculty_id=roll_no)
setattr(faculty_data_row,'name',str(name))
setattr(faculty_data_row,'mobile',str(mobile))
setattr(faculty_data_row,'email',str(email))
faculty_data_row.save()
except:
faculty_data.objects.create(faculty_id=roll_no,name=name,email=email,mobile=mobile)
else:
if group_id==3:
try:
alumni_data_row=alumni_data.objects.get(roll_no=roll_no)
setattr(alumni_data_row,'name',str(name))
setattr(alumni_data_row,'mobile',str(mobile))
setattr(alumni_data_row,'email',str(email))
alumni_data_row.save()
except:
alumni_data.objects.create(roll_no=roll_no,name=name,email=email,mobile=mobile)
setattr(login_data_row,'email',str(email))
login_data_row.save()
print '160'
host_email=str(custom_key_data.objects.get(key='host').value)
port_email=custom_key_data.objects.get(key='port').value
username_email=str(custom_key_data.objects.get(key='username').value)
password_email=str(custom_key_data.objects.get(key='password').value)
print host_email
email_json={'email':str(email),
'roll_no':str(roll_no)}
jwt_key=str(internal_key_data.objects.get(key='jwt_key').value)
email_encoded_url=jwt.encode(email_json,jwt_key,algorithm='HS256')
print email_encoded_url
link=str(request.scheme+"://"+request.get_host()+"/verify_email/"+email_encoded_url)
url='<a href='+link+'>verify email</a>'
# image='<img src='+'"https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcTEgRR_jTgKgGkHyVYFbXHg51pzhpWmx1bsgREGMcV621HdH39q"'+'>'
print url
# email_body=str(custom_keys_data.objects.get(key='email_test').value)
email_body=str(email_key_data.objects.get(key='email_verify').value)
print email_body % (name,url)
backend = EmailBackend(host=str(host_email), port=int(port_email), username=str(username_email),
password=str(password_email), use_tls=True, fail_silently=True)
EmailMsg=EmailMessage("ACE",email_body % (name,url) ,'no-reply@gmail.com',[email] ,connection=backend)
EmailMsg.content_subtype = "html"
EmailMsg.send()
return render(request,"signup.html",{'msg':'email is send to your email account kindly verify it','link2':'<a href="/login/">LOGIN</a>'})
except:
print 'enroll_no is not valid'
return render(request,"signup.html",{'msg':'enroll_no is not valid','link2':'<a href="/login/">LOGIN</a>'})
except:
print 'enroll_no not get'
return render(request,"signup.html",{'msg':'enroll_no not get','link2':'<a href="/login/">LOGIN</a>'})
else:
return render(request,"signup.html",{'link2':'<a href="/login/">LOGIN</a>'})
@login_required
def ping(request,id):
return render (request,'ping.html',{'link2':'<a href="/logout/">LOGOUT</a>','roll_no':id})
@login_required
@csrf_exempt
def ping_send(request):
try:
if request.method=='POST':
msg=str(request.POST.get('msg'))
id=str(request.POST.get('roll_no'))
login_id=str(request.user)
login_data_row=login_data.objects.get(login_id=login_id)
login_data_row_2=login_data.objects.get(login_id=id)
sender_group_id=login_data_row.group_id
reciver_group_id=login_data_row_2.group_id
if sender_group_id==1:
sender_data_row=student_data.objects.get(roll_no=login_id)
sender_name=sender_data_row.name
sender_mobile=sender_data_row.mobile
else:
if sender_group_id==2:
sender_data_row=faculty_data.objects.get(faculty_id=login_id)
sender_name=sender_data_row.name
sender_mobile=sender_data_row.mobile
else:
if sender_group_id==3:
sender_data_row=alumni_data.objects.get(roll_no=login_id)
sender_name=sender_data_row.name
sender_mobile=sender_data_row.mobile
if reciver_group_id==1:
reciver_data_row=student_data.objects.get(roll_no=id)
reciver_name=reciver_data_row.name
else:
if reciver_group_id==3:
reciver_data_row=alumni_data.objects.get(roll_no=id)
reciver_name=reciver_data_row.name
reciver_email=login_data_row_2.email
sender_email=login_data_row.email
host_email=str(custom_key_data.objects.get(key='host').value)
port_email=custom_key_data.objects.get(key='port').value
username_email=str(custom_key_data.objects.get(key='username').value)
password_email=str(custom_key_data.objects.get(key='password').value)
email_body=str(email_key_data.objects.get(key='email_connect').value)
print email_body % (reciver_name,sender_name,msg,sender_mobile,sender_email)
backend = EmailBackend(host=str(host_email), port=int(port_email), username=str(username_email),
password=str(password_email), use_tls=True, fail_silently=True)
EmailMsg=EmailMessage("ACE",email_body % (reciver_name,sender_name,msg,sender_mobile,sender_email) ,'no-reply@gmail.com',[reciver_email] ,connection=backend)
EmailMsg.content_subtype = "html"
EmailMsg.send()
return render (request,'ping.html',{'msg':'email is send, kindly wait for reply','link2':'<a href="/logout/">LOGOUT</a>'})
except:
return render (request,'ping.html',{'msg':'Something occur please try again','link2':'<a href="/logout/">LOGOUT</a>'}) |
"""Generator stuff"""
import os
import sys
from flask import Blueprint, jsonify
from flaskbox import constants
from flaskbox.config import config
from flaskbox.fake_data import fake_data
from flaskbox.helpers import create_init_file
class YAMLGenerator:
"""Generator class for the flaskbox stuff
Methods::
create_file Create the flaskbox yaml file
"""
@staticmethod
def create_file():
"""
:return: The flaskbox.yml file
"""
"""Create the init file"""
if os.path.isfile('flaskbox.yml'):
print(constants.FILE_EXISTS_MESSAGE)
sys.exit(1)
return create_init_file()
class BlueprintGenerator:
"""Class for a blueprints stuff
Methods::
response Return an fake data base on data type,
not completed yet
make_blueprints Make a blueprint object,
add route name, and fake data.
base_route Add base route for a flask application
"""
@staticmethod
def base_route():
"""Base route for an flaskbox API
:return: an dictionary, with welcome message
"""
return jsonify({'data': {'message': 'Flaskbox mock server'}})
@staticmethod
def response():
"""
:return: Return an response, based on fields, see fake_data.FakeData class
"""
data = None
for route in config.routes:
fields = config.get_fields(route)
fake_objects = fake_data.generate_value(fields)
data = dict([(key, field[key]) for field in fake_objects for key in field])
return jsonify({'data': data})
def make_blueprints(self):
""""Iterate the config routes object,
and make an Blueprint objects, also add into the blueprints array.
:return: An array with Blueprint objects
"""
blueprints = []
for route in config.routes:
name = config.get_route_name(route)
bp = Blueprint(name, __name__)
bp.add_url_rule(name, 'response', self.response)
blueprints.append(bp)
return blueprints
# instance of blueprint class
blueprint = BlueprintGenerator()
|
'''Kevin and Stuart want to play the 'The Minion Game'.
Game Rules
Both players are given the same string,
.
Both players have to make substrings using the letters of the string
.
Stuart has to make words starting with consonants.
Kevin has to make words starting with vowels.
The game ends when both players have made all possible substrings.
Scoring
A player gets +1 point for each occurrence of the substring in the string
.
For Example:
String
= BANANA
Kevin's vowel beginning word = ANA
Here, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points. '''
'''s=input()
scoreStuart=0
scoreKevin=0
def minion_game(string):
Vocales=['A','E','I','O','U']
scoreStuart=0
scoreKevin=0
#scoreStuart=string.count('A')+string.count('E')+string.count('I')+string.count('O')+string.count('U')
#scoreKevin=len(string)-scoreStuart
#lista=[chr(65+i) for i in range(26)]
for i in range(len(string)):
for j in range(i+1,len(string)+1):
aux=string[i:j]
#if string[i:j]==string[i:j+1]
if aux[0:1] in Vocales and aux[0:1]!='':
scoreKevin+=1
elif aux[0:1]!='':
scoreStuart+=1
if scoreKevin>scoreStuart and scoreKevin-scoreStuart!=1:
print(f'Kevin {scoreKevin}')
elif scoreStuart>scoreKevin and scoreStuart-scoreKevin!=1:
print(f'Stuart {scoreStuart}')
else:
print('Draw')
minion_game(s)'''
S=input()
s=0
k=0
for i in range(len(S)):
if S[i]=='A' or S[i]=='E' or S[i]=='I' or S[i]=='O' or S[i]=='U':
k+=len(S)-i
else:
s+=len(S)-i
if s==k:
print("Draw")
elif k>s:
print("Kevin "+ str(k))
else:
print("Stuart "+ str(s))
|
a=int(input("Enter the first number:"))
b=int(input("Enter the second number:"))
print("Before swapping")
print("The value of a is ",a)
print("The value of b is ",b)
temp=a
a=b
b=temp
print("After swapping")
print("The value of a is ",a)
print("The value of b is ",b)
|
# Generated by Django 2.1.2 on 2018-11-19 18:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ann', '0002_auto_20181114_1654'),
]
operations = [
migrations.CreateModel(
name='Testing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sum_loss', models.DecimalField(decimal_places=3, max_digits=4)),
('temp_jumlah_testing', models.IntegerField()),
('temp_jumlah_training', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['training__epoch'],
},
),
migrations.CreateModel(
name='TestingDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prediksi', models.BooleanField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('dataset_detail', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ann.DatasetDetail')),
('testing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='traintest.Testing')),
],
options={
'ordering': ['dataset_detail__id'],
},
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('epoch', models.IntegerField()),
('learning_rate', models.DecimalField(decimal_places=3, max_digits=4)),
('sum_loss', models.DecimalField(decimal_places=3, max_digits=4)),
('check_point', models.BooleanField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('neural_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ann.NeuralNetwork')),
],
options={
'ordering': ['epoch'],
},
),
migrations.CreateModel(
name='TrainingBias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('layer', models.IntegerField()),
('neuron', models.IntegerField()),
('bias', models.DecimalField(decimal_places=2, max_digits=3)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('training', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='traintest.Training')),
],
options={
'ordering': ['layer', 'neuron'],
},
),
migrations.CreateModel(
name='TrainingWeight',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('layer', models.IntegerField()),
('neuron', models.IntegerField()),
('index', models.IntegerField()),
('weight', models.DecimalField(decimal_places=2, max_digits=3)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('training', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='traintest.Training')),
],
options={
'ordering': ['layer', 'neuron', 'index'],
},
),
migrations.AddField(
model_name='testing',
name='training',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='traintest.Training'),
),
]
|
import ccobra
import pandas
import pykmeans
import numpy
import math
import principleextractor
import collections
class SyllogisticKMeans:
"""
Class to model a syllogistic kmean solver using a category hamming distance
Input: Syllogistic Data (see e.g. Ragni2016), number of clusters to form (k)
Output: k Clusters, which correspond to 'cognitive' clusters
"""
def __init__(self, k):
self.data = []
self.subj_data_dict = {}
self.resp_to_num = {
'Aac': 0,
'Aca': 1,
'Iac': 2,
'Ica': 3,
'Eac': 4,
'Eca': 5,
'Oac': 6,
'Oca': 7,
'NVC': 8
}
self.num_to_syllogism = {}
data = pandas.read_csv("./syllogisms.csv")
for index, syllogism in data.iterrows():
self.num_to_syllogism[index] = syllogism['Syllogism']
self.num_to_resp = {v: k for k, v in self.resp_to_num.items()}
self.syllogism_to_num = {v: k for k, v in self.num_to_syllogism.items()}
self.k = k
self.final_clusters_num = []
self.final_clusters_syll_list = [] # [{Syllogism : Response, ...}, ...]
self.corresponding_centroids = []
self.distances = []
def add_syllogistic_data(self, data):
# enc_task = ccobra.syllogistic.encode_task(item.task)
# enc_resp = ccobra.syllogistic.encode_response(truth, item.task)
subj_data_dict = {}
for index, subj_data in data.iterrows():
if subj_data['id'] not in subj_data_dict:
subj_data_dict[subj_data['id']] = {}
task = parse(subj_data['task'])
response = ccobra.syllogistic.encode_response(parse(subj_data['response']), task)
subj_data_dict[subj_data['id']][self.syllogism_to_num[ccobra.syllogistic.encode_task(task)]] =\
self.resp_to_num[response]
# subj_data_dict[subj_data['id']][ccobra.syllogistic.encode_task(task)] = response
self.subj_data_dict = subj_data_dict # {id : [1, 6, 8, 0, ...], ...}
ordered_dict = collections.OrderedDict(sorted(subj_data_dict.items()))
for key, val in ordered_dict.items():
add_list = []
for i in range(64):
add_list.append(val[i])
self.data.append(add_list)
def generate_clusters(self, cutoff):
# TODO PROBLEM data is not ordered, even though it is assumed to be ordered
initial_centroids = generate_centroids(self.data, self.k)
kme = pykmeans.kmeans_category_hamming(data=self.data, centroids=initial_centroids, cutoff=cutoff)
self.final_clusters_num = kme[0]
self.corresponding_centroids = kme[1]
self.distances = kme[2]
for cluster in self.final_clusters_num:
temp_d = {}
i = 0
for val in cluster:
temp_d[self.num_to_syllogism[i]] = (self.num_to_resp[val])
i += 1
self.final_clusters_syll_list.append(temp_d)
def generate_score(self):
"""
Generates a quality measurement for each final cluster
"""
# TODO : don't generate score to select initial cluster, create initial cluster with kmeans k=1
center_scores = []
for i in range(self.k):
center_scores.append(0)
for dp in self.data:
i = 0
for center in self.final_clusters_num:
center_scores[i] += math.pow(math.e, -1 * category_hamming_dist(dp, center))
i += 1
return center_scores
def category_hamming_dist(x, y):
"""Counts number of mismatches as distance between two lists"""
dist = 0
for i in range(0, len(x)):
if x[i] != y[i]:
dist += 1
return dist
def category_hamming_centroid_factory(data):
"""Create a centroid from a list of categories
The centroid is a data point for which
sum(map(lambda x: category_hamming_dist(centroid, x),data)) is minimized
"""
cat_dict = {}
centroid = []
dim = len(data[0])
for i in range(0, dim):
for dp in data:
if dp[i] in cat_dict:
cat_dict[dp[i]] += 1
else:
cat_dict[dp[i]] = 1
centroid.append(max(cat_dict, key=cat_dict.get))
cat_dict = {}
return centroid
def parse(item):
"""
Parses a task or response from a syllogism data csv
:param item:
:return:
"""
l = item.split('/')
new_l = []
for entity in l:
new_l.append(entity.split(';'))
return new_l
'''
def generate_centroids(k):
l = []
val_l = []
for i in range(0, int(k)):
for i in range(0, 64):
val_l.append(randrange(10))
l.append(val_l)
val_l = []
return l
'''
def generate_centroids(X, K):
"""
A kmeans++ implementation of generating K centroids
:param X: data, list of lists
:param K: number of centroids to specify
:return:
"""
C = [X[0]]
for k in range(1, K):
D2 = numpy.array(
[min([numpy.inner(category_hamming_dist(c, x), category_hamming_dist(c, x)) for c in C]) for x in X])
probs = D2 / D2.sum()
cumprobs = probs.cumsum()
r = numpy.random.rand()
for j, p in enumerate(cumprobs):
if r < p:
i = j
break
C.append(X[i])
return C
|
import random
import secrets
import time
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import ANY
from uuid import UUID
import pytest
from _test_mockups import get_fake_authdevice, random_bool
from wacryptolib.authenticator import initialize_authenticator
from wacryptolib.exceptions import (
KeystoreDoesNotExist,
SchemaValidationError,
ValidationError,
KeyDoesNotExist,
KeystoreMetadataDoesNotExist,
OperationNotSupported,
)
from wacryptolib.keygen import SUPPORTED_ASYMMETRIC_KEY_ALGOS
from wacryptolib.keystore import (
FilesystemKeystore,
InMemoryKeystore,
KeystoreBase,
FilesystemKeystorePool,
generate_free_keypair_for_least_provisioned_key_algo,
get_free_keypair_generator_worker,
generate_keypair_for_storage,
ReadonlyFilesystemKeystore,
KEYSTORE_FORMAT,
InMemoryKeystorePool,
_get_keystore_metadata_file_path,
)
from wacryptolib.scaffolding import (
check_keystore_free_keys_concurrency,
check_keystore_basic_get_set_api,
check_keystore_free_keys_api,
)
from wacryptolib.utilities import generate_uuid0, get_utc_now_date, dump_to_json_file
def test_keystore_basic_get_set_api(tmp_path):
tmp_path1 = tmp_path / "subdir1"
tmp_path1.mkdir()
tmp_path2 = tmp_path / "subdir2"
tmp_path2.mkdir()
with pytest.raises(TypeError, match="Can't instantiate abstract class"):
KeystoreBase()
dummy_keystore = InMemoryKeystore()
filesystem_keystore = FilesystemKeystore(keys_dir=tmp_path1)
readonly_filesystem_keystore = ReadonlyFilesystemKeystore(keys_dir=tmp_path1)
class IntolerantKeystore(FilesystemKeystore):
"""Some keystores might be unable to list keypairs, e.g. servers with big databases"""
def list_keypair_identifiers(self, *args, **kwargs) -> list:
raise OperationNotSupported
intolerant_keystore = IntolerantKeystore(keys_dir=tmp_path2)
filesystem_keystore_test_locals = None
for keystore, readonly_keystore in [
(dummy_keystore, None),
(intolerant_keystore, None),
(filesystem_keystore, readonly_filesystem_keystore),
]:
res = check_keystore_basic_get_set_api(keystore=keystore, readonly_keystore=readonly_keystore)
if keystore.__class__ == FilesystemKeystore:
filesystem_keystore_test_locals = res
# Specific tests for filesystem storage
keychain_uid = filesystem_keystore_test_locals["keychain_uid"]
is_public = random_bool()
filepath = filesystem_keystore._get_filepath(keychain_uid, key_algo="abxz", is_public=is_public)
with open(filepath, "rb") as f:
key_data = f.read()
assert key_data == (b"public_data" if is_public else b"private_data") # IMPORTANT no exchange of keys in files!
def test_keystore_free_keys_api(tmp_path):
dummy_keystore = InMemoryKeystore()
filesystem_keystore = FilesystemKeystore(keys_dir=tmp_path)
assert not filesystem_keystore._free_keys_dir.exists()
for keystore in (dummy_keystore, filesystem_keystore):
check_keystore_free_keys_api(keystore)
assert filesystem_keystore._free_keys_dir.exists()
def test_readonly_keystore_limitations(tmp_path):
"""For now we just test that the base ReadonlyFilesystemKeystore class doesn't have dangerous fields."""
normal_keystore = FilesystemKeystore(keys_dir=tmp_path)
readonly_keystore = ReadonlyFilesystemKeystore(keys_dir=tmp_path)
forbidden_fields = [
"set_keypair",
"set_public_key",
"set_private_key",
"get_free_keypairs_count",
"add_free_keypair",
"attach_free_keypair_to_uuid",
"_write_to_storage_file",
]
for forbidden_field in forbidden_fields:
assert hasattr(normal_keystore, forbidden_field)
assert not hasattr(readonly_keystore, forbidden_field)
def test_keystore_free_keys_concurrency(tmp_path):
dummy_keystore = InMemoryKeystore()
filesystem_keystore = FilesystemKeystore(keys_dir=tmp_path)
for keystore in (dummy_keystore, filesystem_keystore):
check_keystore_free_keys_concurrency(keystore)
def test_filesystem_keystore_list_keypair_identifiers(tmp_path: Path):
def _check_key_dict_format(key):
print(">> public key detected:", key)
assert isinstance(key["keychain_uid"], UUID)
assert key["key_algo"] in SUPPORTED_ASYMMETRIC_KEY_ALGOS
assert isinstance(key["private_key_present"], bool)
keystore = FilesystemKeystore(tmp_path)
assert keystore.list_keypair_identifiers() == []
# CASE 1 : only one key in storage
key_algo = random.choice(SUPPORTED_ASYMMETRIC_KEY_ALGOS)
keychain_uid = generate_uuid0()
generate_keypair_for_storage(key_algo=key_algo, keystore=keystore, keychain_uid=keychain_uid)
keys_list = keystore.list_keypair_identifiers()
assert isinstance(keys_list, list)
assert len(keys_list) == 1
single_key = keys_list[0]
_check_key_dict_format(single_key)
assert single_key["keychain_uid"] == keychain_uid
assert single_key["key_algo"] == key_algo
assert single_key["private_key_present"]
# CASE 2 : multiple public keys, with or without private keys
for i in range(3):
_key_algo = random.choice(SUPPORTED_ASYMMETRIC_KEY_ALGOS)
generate_keypair_for_storage(key_algo=_key_algo, keystore=keystore, passphrase="xzf".encode())
for bad_filename in (
"0e896f1d-a4d0-67d6-7286-056f1ec342e8_RSA_OAEP_public_key.dot",
"0e896f1d-a4d0-67d6-7286-056f1ec342e8_RSA_OAEP_publicX_key.pem",
"a4d0-67d6-7286-056f1ec342e8_RSA_OAEP_public_key.pem",
"WRONGPREFIX_public_key.pem",
):
tmp_path.joinpath(bad_filename).touch() # These will be ignored thanks to Regex
keys_list = keystore.list_keypair_identifiers()
assert isinstance(keys_list, list)
assert len(keys_list) == 4
assert keys_list == sorted(keys_list, key=lambda x: (x["keychain_uid"], x["key_algo"])) # Well sorted
for some_key in keys_list:
_check_key_dict_format(some_key)
assert single_key["private_key_present"] # ALWAYS for now
for filepath in tmp_path.glob("*" + FilesystemKeystore._private_key_suffix):
filepath.unlink()
keys_list = keystore.list_keypair_identifiers()
assert isinstance(keys_list, list)
assert len(keys_list) == 4
for some_key in keys_list:
_check_key_dict_format(some_key)
assert not some_key["private_key_present"] # Private keys were deleted
# CASE 3 : keys all deleted
for filepath in tmp_path.glob("*.pem"):
filepath.unlink()
assert keystore.list_keypair_identifiers() == []
def test_in_memory_keystore_pool_corner_cases():
keystore_uid = generate_uuid0()
pool = InMemoryKeystorePool()
assert pool.get_local_keyfactory()
with pytest.raises(KeystoreDoesNotExist):
pool.get_foreign_keystore(keystore_uid)
assert pool.list_foreign_keystore_uids() == []
def test_filesystem_keystore_pool_basics(tmp_path: Path):
pool = FilesystemKeystorePool(tmp_path)
assert pool.get_all_foreign_keystore_metadata() == {}
local_keystore = pool.get_local_keyfactory()
assert isinstance(local_keystore, FilesystemKeystore)
assert not local_keystore.list_keypair_identifiers()
keypair = generate_keypair_for_storage(key_algo="RSA_OAEP", keystore=local_keystore, passphrase="xzf".encode())
assert len(local_keystore.list_keypair_identifiers()) == 1
assert pool.list_foreign_keystore_uids() == []
foreign_keystore_uid = generate_uuid0()
mirror_path = tmp_path.joinpath(
pool.FOREIGN_KEYSTORES_DIRNAME, pool.FOREIGN_KEYSTORE_PREFIX + str(foreign_keystore_uid)
)
mirror_path.mkdir(parents=True, exist_ok=False)
foreign_keystore_uid2 = generate_uuid0()
mirror_path2 = tmp_path.joinpath(
pool.FOREIGN_KEYSTORES_DIRNAME, pool.FOREIGN_KEYSTORE_PREFIX + str(foreign_keystore_uid2)
)
mirror_path2.mkdir(parents=True, exist_ok=False)
assert pool.list_foreign_keystore_uids() == sorted([foreign_keystore_uid, foreign_keystore_uid2])
with pytest.raises(KeystoreMetadataDoesNotExist):
pool.get_all_foreign_keystore_metadata()
with pytest.raises(KeystoreMetadataDoesNotExist): # Error raised immediately
pool.get_foreign_keystore_metadata(foreign_keystore_uid)
with pytest.raises(KeystoreDoesNotExist, match="not found"):
pool.get_foreign_keystore(generate_uuid0())
foreign_keystore = pool.get_foreign_keystore(foreign_keystore_uid)
assert isinstance(foreign_keystore, ReadonlyFilesystemKeystore)
assert not isinstance(foreign_keystore, FilesystemKeystore) # NOT writable
foreign_keystore = pool.get_foreign_keystore(foreign_keystore_uid, writable=True)
assert isinstance(foreign_keystore, ReadonlyFilesystemKeystore)
assert isinstance(foreign_keystore, FilesystemKeystore) # Writable
assert not foreign_keystore.list_keypair_identifiers()
foreign_keystore.set_keypair(
keychain_uid=generate_uuid0(),
key_algo="RSA_OAEP",
public_key=keypair["public_key"],
private_key=keypair["private_key"],
)
assert len(local_keystore.list_keypair_identifiers()) == 1 # Unchanged
assert len(foreign_keystore.list_keypair_identifiers()) == 1
foreign_keystore2 = pool.get_foreign_keystore(foreign_keystore_uid2)
assert isinstance(foreign_keystore2, ReadonlyFilesystemKeystore)
assert not isinstance(foreign_keystore2, FilesystemKeystore)
assert not foreign_keystore2.list_keypair_identifiers()
def test_keystore_export_from_keystore_tree(tmp_path: Path):
authdevice_path = tmp_path / "device1"
authdevice_path.mkdir()
authdevice = get_fake_authdevice(authdevice_path)
remote_keystore_dir = authdevice["authenticator_dir"]
metadata = initialize_authenticator(
remote_keystore_dir, keystore_owner="Jean-Jâcques", keystore_passphrase_hint="my-hint"
)
extra_fields_schema = {"keystore_creation_datetime": ANY}
use_legacy_format = random_bool()
if use_legacy_format:
del metadata["keystore_creation_datetime"]
metadata_file = _get_keystore_metadata_file_path(authdevice["authenticator_dir"])
dump_to_json_file(metadata_file, metadata)
extra_fields_schema = {}
remote_keystore = FilesystemKeystore(remote_keystore_dir)
keychain_uid = generate_uuid0()
key_algo = "RSA_OAEP"
remote_keystore.set_keypair(keychain_uid=keychain_uid, key_algo=key_algo, public_key=b"555", private_key=b"okj")
keystore_tree = remote_keystore.export_to_keystore_tree(include_private_keys=True)
assert keystore_tree == {
"keypairs": [
{"key_algo": "RSA_OAEP", "keychain_uid": keychain_uid, "private_key": b"okj", "public_key": b"555"}
],
"keystore_format": "keystore_1.0",
"keystore_owner": "Jean-Jâcques",
"keystore_passphrase_hint": "my-hint",
"keystore_secret": ANY,
"keystore_type": "authenticator",
"keystore_uid": ANY,
**extra_fields_schema,
}
if "keystore_creation_datetime" in extra_fields_schema:
assert isinstance(keystore_tree["keystore_creation_datetime"], datetime)
keystore_tree = remote_keystore.export_to_keystore_tree(include_private_keys=False)
assert keystore_tree == {
"keypairs": [{"key_algo": "RSA_OAEP", "keychain_uid": keychain_uid, "private_key": None, "public_key": b"555"}],
"keystore_format": "keystore_1.0",
"keystore_owner": "Jean-Jâcques",
"keystore_passphrase_hint": "my-hint",
"keystore_secret": ANY,
"keystore_type": "authenticator",
"keystore_uid": ANY,
**extra_fields_schema,
}
if "keystore_creation_datetime" in extra_fields_schema:
assert isinstance(keystore_tree["keystore_creation_datetime"], datetime)
with pytest.raises(SchemaValidationError):
# FIXME - set_keypair() should actually validate data too
remote_keystore.set_keypair(
keychain_uid=keychain_uid, key_algo="bad_algo", public_key=b"555", private_key=b"okj"
)
remote_keystore.export_to_keystore_tree() # Raises because of bad_algo
# Create uninitialized keystore (no metadata file) and try to export it
authdevice_path = tmp_path / "device2"
authdevice_path.mkdir()
authdevice = get_fake_authdevice(authdevice_path)
remote_keystore_dir = authdevice["authenticator_dir"]
remote_keystore_dir.mkdir()
remote_keystore = FilesystemKeystore(remote_keystore_dir)
with pytest.raises(KeystoreDoesNotExist):
remote_keystore.export_to_keystore_tree()
def test_keystore_import_from_keystore_tree(tmp_path: Path):
authdevice_path = tmp_path / "device"
authdevice_path.mkdir()
filesystem_keystore = FilesystemKeystore(authdevice_path)
keystore_uid = generate_uuid0()
keychain_uid = generate_uuid0()
keychain_uid_bis = generate_uuid0()
key_algo = "RSA_OAEP"
keystore_secret = secrets.token_urlsafe(64)
keypairs1 = [{"keychain_uid": keychain_uid, "key_algo": key_algo, "public_key": b"555", "private_key": None}]
keypairs2 = [
{"keychain_uid": keychain_uid, "key_algo": key_algo, "public_key": b"8888", "private_key": b"okj"},
{"keychain_uid": keychain_uid_bis, "key_algo": key_algo, "public_key": b"23", "private_key": b"3234"},
]
keystore_tree = {
# These are MANDATORY FIELDS
"keystore_type": "authenticator",
"keystore_format": KEYSTORE_FORMAT,
"keystore_owner": "Jacques",
"keystore_uid": keystore_uid,
"keypairs": keypairs1,
}
use_keystore_secret = random_bool()
if use_keystore_secret:
keystore_tree["keystore_secret"] = keystore_secret
use_keystore_creation_datetime = random_bool()
if use_keystore_creation_datetime:
keystore_tree["keystore_creation_datetime"] = get_utc_now_date()
use_keystore_passphrase_hint = random_bool()
if use_keystore_passphrase_hint:
keystore_tree["keystore_passphrase_hint"] = secrets.token_urlsafe(20)
keystore_tree_metadata = keystore_tree.copy()
del keystore_tree_metadata["keypairs"]
# Initial import
updated = filesystem_keystore.import_from_keystore_tree(keystore_tree)
assert not updated
metadata = filesystem_keystore.get_keystore_metadata()
def _check_loaded_metadata_coherence(loaded_metadata):
# Quick checks
assert loaded_metadata["keystore_uid"] == keystore_uid
assert loaded_metadata["keystore_owner"] == "Jacques"
assert len(loaded_metadata) == len(keystore_tree_metadata)
for (key, value) in keystore_tree_metadata.items():
if key == "keystore_creation_datetime":
# Special case: precision trouble because Bson only goes down to milliseconds
assert (value - loaded_metadata[key]) <= timedelta(milliseconds=1)
else:
assert loaded_metadata[key] == value
_check_loaded_metadata_coherence(metadata)
assert filesystem_keystore.list_keypair_identifiers() == [
dict(keychain_uid=keychain_uid, key_algo=key_algo, private_key_present=False)
]
assert filesystem_keystore.get_public_key(keychain_uid=keychain_uid, key_algo=key_algo) == b"555"
with pytest.raises(KeyDoesNotExist):
filesystem_keystore.get_private_key(keychain_uid=keychain_uid, key_algo=key_algo)
# Update import
for i in range(2): # IDEMPOTENT
keystore_tree["keystore_owner"] += "_corrupted" # Corrupted, but not taken into account on update!
keystore_tree["keypairs"] = keypairs2
updated = filesystem_keystore.import_from_keystore_tree(keystore_tree)
assert updated
metadata = filesystem_keystore.get_keystore_metadata()
_check_loaded_metadata_coherence(metadata)
expected_keypair_identifiers = [
dict(keychain_uid=keychain_uid, key_algo=key_algo, private_key_present=True),
dict(keychain_uid=keychain_uid_bis, key_algo=key_algo, private_key_present=True),
]
expected_keypair_identifiers.sort(key=lambda x: (x["keychain_uid"], x["key_algo"]))
assert filesystem_keystore.list_keypair_identifiers() == expected_keypair_identifiers
assert filesystem_keystore.get_public_key(keychain_uid=keychain_uid, key_algo=key_algo) == b"555" # NOT changed
assert filesystem_keystore.get_private_key(keychain_uid=keychain_uid, key_algo=key_algo) == b"okj" # Added
# Mismatch of keystore_uid
keystore_tree["keystore_uid"] = generate_uuid0()
with pytest.raises(ValidationError, match="Mismatch"):
filesystem_keystore.import_from_keystore_tree(keystore_tree)
# Corrupt the keystore_tree with extra key
keystore_tree["keystore_stuff"] = 33
with pytest.raises(SchemaValidationError):
filesystem_keystore.import_from_keystore_tree(keystore_tree)
del keystore_tree["keystore_stuff"]
# Corrupt the keystore_tree with bad content
keystore_tree["keystore_owner"] = 2242
with pytest.raises(SchemaValidationError):
filesystem_keystore.import_from_keystore_tree(keystore_tree)
keystore_tree["keystore_owner"] = "Jacques"
# Corrupt the keystore_tree with missing key
del keystore_tree["keystore_owner"]
with pytest.raises(SchemaValidationError):
filesystem_keystore.import_from_keystore_tree(keystore_tree)
def test_keystorepool_export_and_import_foreign_keystore_to_keystore_tree(tmp_path: Path):
keystore_uid = generate_uuid0()
keychain_uid = generate_uuid0()
key_algo = "RSA_OAEP"
keystore_secret = secrets.token_urlsafe(64)
keystore_tree = {
"keystore_type": "authenticator",
"keystore_format": KEYSTORE_FORMAT,
"keystore_owner": "Jacques",
"keystore_uid": keystore_uid,
"keystore_secret": keystore_secret,
"keypairs": [{"keychain_uid": keychain_uid, "key_algo": key_algo, "public_key": b"555", "private_key": b"okj"}],
}
authdevice_path = tmp_path / "device"
authdevice_path.mkdir()
for idx in range(2): # Import is idempotent
keystore_pool = FilesystemKeystorePool(authdevice_path)
updated = keystore_pool.import_foreign_keystore_from_keystore_tree(keystore_tree)
assert updated == bool(idx) # Second import is an update
metadata = keystore_pool.get_foreign_keystore_metadata(keystore_uid)
# We test this utility along the way
all_metadata = keystore_pool.get_all_foreign_keystore_metadata()
assert all_metadata == {metadata["keystore_uid"]: metadata} # Only a single entry here
keystore_tree = keystore_pool.export_foreign_keystore_to_keystore_tree(
metadata["keystore_uid"], include_private_keys=True
)
foreign_keystore = keystore_pool.get_foreign_keystore(keystore_uid)
assert foreign_keystore.list_keypair_identifiers() == [
dict(keychain_uid=keychain_uid, key_algo=key_algo, private_key_present=True)
]
assert (
foreign_keystore.get_public_key(keychain_uid=keychain_uid, key_algo=key_algo)
== keystore_tree["keypairs"][0]["public_key"]
)
assert (
foreign_keystore.get_private_key(keychain_uid=keychain_uid, key_algo=key_algo)
== keystore_tree["keypairs"][0]["private_key"]
)
def test_generate_free_keypair_for_least_provisioned_key_algo():
generated_keys_count = 0
def keygen_func(key_algo, serialize):
nonlocal generated_keys_count
generated_keys_count += 1
return dict(private_key=b"someprivatekey", public_key=b"somepublickey")
# Check the fallback on "all types of keys" for key_algos parameter
keystore = InMemoryKeystore()
for _ in range(4):
res = generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore,
max_free_keys_per_algo=10,
keygen_func=keygen_func,
# no key_algos parameter provided
)
assert res
assert keystore.get_free_keypairs_count("DSA_DSS") == 1
assert keystore.get_free_keypairs_count("ECC_DSS") == 1
assert keystore.get_free_keypairs_count("RSA_OAEP") == 1
assert keystore.get_free_keypairs_count("RSA_PSS") == 1
assert generated_keys_count == 4
# Now test with a restricted set of key types
keystore = InMemoryKeystore()
restricted_key_algos = ["DSA_DSS", "ECC_DSS", "RSA_OAEP"]
generated_keys_count = 0
for _ in range(7):
res = generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore, max_free_keys_per_algo=10, keygen_func=keygen_func, key_algos=restricted_key_algos
)
assert res
assert keystore.get_free_keypairs_count("DSA_DSS") == 3
assert keystore.get_free_keypairs_count("ECC_DSS") == 2
assert keystore.get_free_keypairs_count("RSA_OAEP") == 2
assert generated_keys_count == 7
for _ in range(23):
res = generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore, max_free_keys_per_algo=10, keygen_func=keygen_func, key_algos=restricted_key_algos
)
assert res
assert keystore.get_free_keypairs_count("DSA_DSS") == 10
assert keystore.get_free_keypairs_count("ECC_DSS") == 10
assert keystore.get_free_keypairs_count("RSA_OAEP") == 10
assert generated_keys_count == 30
res = generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore, max_free_keys_per_algo=10, keygen_func=keygen_func, key_algos=restricted_key_algos
)
assert not res
assert generated_keys_count == 30 # Unchanged
for _ in range(7):
generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore, max_free_keys_per_algo=15, keygen_func=keygen_func, key_algos=["RSA_OAEP", "DSA_DSS"]
)
assert keystore.get_free_keypairs_count("DSA_DSS") == 14 # First in sorting order
assert keystore.get_free_keypairs_count("ECC_DSS") == 10
assert keystore.get_free_keypairs_count("RSA_OAEP") == 13
assert generated_keys_count == 37
res = generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore, max_free_keys_per_algo=20, keygen_func=keygen_func, key_algos=restricted_key_algos
)
assert res
assert keystore.get_free_keypairs_count("DSA_DSS") == 14
assert keystore.get_free_keypairs_count("ECC_DSS") == 11
assert keystore.get_free_keypairs_count("RSA_OAEP") == 13
assert generated_keys_count == 38
res = generate_free_keypair_for_least_provisioned_key_algo(
keystore=keystore, max_free_keys_per_algo=5, keygen_func=keygen_func, key_algos=restricted_key_algos
)
assert not res
assert generated_keys_count == 38
def test_get_free_keypair_generator_worker():
generated_keys_count = 0
keystore = InMemoryKeystore()
def keygen_func(key_algo, serialize):
nonlocal generated_keys_count
generated_keys_count += 1
time.sleep(0.01)
return dict(private_key=b"someprivatekey2", public_key=b"somepublickey2")
worker = get_free_keypair_generator_worker(
keystore=keystore, max_free_keys_per_algo=30, sleep_on_overflow_s=0.5, keygen_func=keygen_func
)
try:
worker.start()
time.sleep(0.5)
worker.stop()
worker.join()
assert 10 < generated_keys_count < 50, generated_keys_count # Not enough time to generate all
worker.start()
time.sleep(6)
worker.stop()
worker.join()
assert (
generated_keys_count == 120 # 4 key types for now
), generated_keys_count # All keys had the time to be generated
print("NEW START")
start = time.time()
worker.start()
time.sleep(0.01)
worker.stop()
print("NEW STOP")
worker.join()
print("NEW JOINED")
end = time.time()
assert (end - start) > 0.4 # sleep-on-overflow occurred
finally:
if worker.is_running:
worker.stop()
|
import utils
import json
from umls_api.Authentication import Authentication
import requests
import json
class UMLSAPI(object):
api_url = "https://uts-ws.nlm.nih.gov/rest"
def __init__(self, api_key):
self._tgt = None
self._api_key = api_key
self._auth = Authentication(api_key)
@property
def tgt(self):
if self._tgt is None:
self._tgt = self._auth.gettgt()
return self._tgt
def invalidate_tgt(self):
self._tgt = None
def get_st(self):
return self._auth.getst(self.tgt)
def match_term(self, term):
query = {'ticket': self.get_st(), 'string': term}
content_endpoint = self.api_url + '/search/current'
r = requests.get(content_endpoint, params=query)
r.encoding = 'utf-8'
print r.text
items = json.loads(r.text)
jsonData = items["result"]
return [o for o in jsonData['results']]
def get_atoms(self, cui):
content_endpoint = self.api_url + ('/content/current/CUI/%s/atoms' % cui)
return [o['ui'] for o in self.get_all_objects(content_endpoint)]
def get_aui_descendants(self, aui):
content_endpoint = self.api_url + ('/content/current/AUI/%s/descendants' % 'A10134087')
return [o['concept'][o['concept'].rfind('/')+1:] for o in self.get_all_objects(content_endpoint)]
def get_cui_descendants(self, cui):
auis = self.get_atoms(cui)
descendants = []
for aui in auis:
descendants += self.get_aui_descendants(aui)
def get_narrower_concepts(self, cui):
content_endpoint = self.api_url + ('/content/current/CUI/%s/relations' % cui)
return [(c['relatedId'][c['relatedId'].rfind('/')+1:], c['relationLabel'])
for c in self.get_all_objects(content_endpoint) if c['relationLabel'] == 'RB']
def get_all_objects(self, content_endpoint):
objects = []
obj = self.get_object(content_endpoint)
objects += obj['result']
# print 'page count: %s ' % obj['pageCount']
for i in range(2, obj['pageCount'] + 1):
objects += self.get_object(content_endpoint, page_number=i)['result']
return objects
def get_object(self, uri, page_number=1):
# print uri
content = requests.get(uri, params={'ticket': self.get_st(), 'pageNumber': page_number}).content
# print content
return json.loads(content)
def align_mapped_concepts(map_file, disorder_file):
concept_map = utils.load_json_data(map_file)
disorders = [d.strip() for d in utils.read_text_file(disorder_file)]
exact_mapped = {}
for d in disorders:
if d in concept_map:
exact_mapped[d] = concept_map[d]
else:
exact_mapped[d] = ""
print json.dumps(exact_mapped)
if __name__ == "__main__":
# align_mapped_concepts('./resources/autoimmune-concepts.json', './resources/auto_immune_gazetteer.txt')
umls = UMLSAPI('148475b7-ad37-4e15-95a0-ff4d4060c132')
# rets = umls.match_term('Diabetes Mellitus')
# cui = rets[0]['ui']
# print cui
subconcepts = umls.get_narrower_concepts('C0023895')
print len(subconcepts), json.dumps(subconcepts)
next_scs = set([c[0] for c in subconcepts])
for sc in subconcepts:
local_scs = umls.get_narrower_concepts(sc[0])
next_scs |= set([c[0] for c in local_scs])
print len(local_scs)
print 'total concepts: %s' % len(next_scs), json.dumps(list(next_scs))
# print umls.get_object('https://uts-ws.nlm.nih.gov/rest/search/current?string=fracture')
|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
__author__ = "golden"
__date__ = '2018/6/26'
from .spider import SpiderManager
from .process import Multiprocessing
|
import nltk
from nltk.corpus import wordnet
import spacy
# lst_events = ['appears_static_object', 'vehicle_1_comes_near', 'vehicle_1_moves_far', 'vehicle_2_comes_near',
# 'vehicle_2_moves_far', 'vehicle_3_comes_near', 'vehicle_3_moves_far']
#
# for adv_pr in lst_events:
# adv_pr_str = nltk.word_tokenize(' '.join(adv_pr.split('_')))
# print(nltk.pos_tag(adv_pr_str))
#
# pos_adv_0 = 'near'
# pos_adv_1 = 'far'
#
# # ctrlsyn = wordnet.synset('near.a.01')
# # statesyn = wordnet.synset('far.a.01')
# # path_sim = ctrlsyn.path_similarity(statesyn)
# #
# # print(path_sim)
#
# sy1 = wordnet.synsets('long')
# sy2 = wordnet.synsets('short')
#
# # for s in sy1:
# # for t in sy2:
# # print("%s\t %s\t :%s" % (s.name,t.name,wordnet.path_similarity(s,t)))
# nlp = spacy.load("en_core_web_md") # make sure to use larger model!
# tokens = nlp("detect notice")
#
# for token1 in tokens:
# for token2 in tokens:
# print(token1.text, token2.text, token1.similarity(token2))
# w1 = set('The car detected the obstacle')
# w2 = set('obstacle noticed')
# w3 = set('car noticed')
#
# print('JACCARD DISTANCE obj ', nltk.jaccard_distance(w1, w2))
# print('JACCARD DISTANCE car ', nltk.jaccard_distance(w1, w3))
print(spacy.explain('nsubj')) |
import unittest
from datetime import datetime
from flask import current_app
from app import create_app, db
from app.models import Company, Executive, Compensation, NonEquityPayment
class ExecutiveModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.create_company()
self.create_executives()
self.add_executive_compensation()
self.add_non_equity_payments()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def create_company(self):
Company.query.delete()
company = Company(name = 'abc', transaction_date = datetime(2020, 12, 1), per_share_deal_price = 55.55)
db.session.add(company)
db.session.commit()
def create_executives(self):
company = Company.query.get(1)
executive_1 = Executive(company = company, name = 'John', title = 'CEO', start_date = datetime(2010,1,1))
executive_2 = Executive(company = company, name = 'Paul', title = 'COO', start_date = datetime(2018,7,1))
executive_3 = Executive(company = company, name = 'George', title = 'CFO', start_date = datetime(2018,7,1), first_year_non_recurring_compensation = 10000)
executive_4 = Executive(company = company, name = 'Ringo', title = 'CTO', start_date = datetime(2020,7,1))
executives = [executive_1, executive_2, executive_3, executive_4]
for executive in executives:
db.session.add(executive)
db.session.commit()
def add_executive_compensation(self):
executive_1_compensation = [(2019, 500000), (2018, 500000), (2017, 500000), (2016, 400000), (2015, 100000)]
executive_2_compensation = [(2019, 200000), (2018, 100000)]
executive_3_compensation = [(2019, 200000), (2018, 100000)]
executive_4_compensation = [(2020, 100000)]
compensation = [executive_1_compensation, executive_2_compensation, executive_3_compensation, executive_4_compensation]
for i, executive in enumerate(compensation):
for year in executive:
db.session.add(Compensation(executive = Executive.query.get(i+1), year = year[0], compensation = year[1]))
db.session.commit()
def add_non_equity_payments(self):
payment_details = [(Executive.query.get(1), 500000, 'Bonus 1', False, False),
(Executive.query.get(1), 500000, 'Bonus 2', False, False),
(Executive.query.get(2), 100000, 'Bonus 1', False, False),
(Executive.query.get(2), 100000, 'Bonus 2', True, False),
(Executive.query.get(3), 100000, 'Bonus 1', False, False),
(Executive.query.get(3), 100000, 'Bonus 2', False, True)]
for payment in payment_details:
db.session.add(NonEquityPayment(executive = payment[0],
amount = payment[1],
description = payment[2],
reasonable_compensation_before_change = payment[3],
reasonable_compensation_after_change = payment[4]))
db.session.commit()
def test_company_executive_relationship(self):
executive_1 = Executive.query.get(1)
self.assertTrue(executive_1.company.name == 'abc')
def test_base_amount(self):
executive_1 = Executive.query.get(1)
executive_2 = Executive.query.get(2)
executive_3 = Executive.query.get(3)
executive_4 = Executive.query.get(4)
self.assertTrue(abs(executive_1.base_amount - 400000) < 0.01)
self.assertTrue(abs(executive_2.base_amount - 199184.78) < 0.01)
self.assertTrue(abs(executive_3.base_amount - 194266.30) < 0.01)
self.assertTrue(abs(executive_4.base_amount - 237662.34) < 0.01)
def test_parachute_threshold(self):
executive_1 = Executive.query.get(1)
self.assertTrue(executive_1.parachute_threshold - 1200000 < 0.01)
def test_total_non_equity_payments(self):
executive_1 = Executive.query.get(1)
executive_2 = Executive.query.get(2)
executive_3 = Executive.query.get(3)
self.assertTrue(executive_1.total_non_equity_payments == 1000000)
self.assertTrue(executive_2.total_non_equity_payments == 200000)
self.assertTrue(executive_3.total_non_equity_payments == 200000)
def test_reasonable_compensation_amounts(self):
executive_1 = Executive.query.get(1)
executive_2 = Executive.query.get(2)
executive_3 = Executive.query.get(3)
self.assertTrue(executive_1.total_reasonable_compensation_before_change == 0)
self.assertTrue(executive_1.total_reasonable_compensation_after_change == 0)
self.assertTrue(executive_2.total_reasonable_compensation_before_change == 100000)
self.assertTrue(executive_2.total_reasonable_compensation_after_change == 0)
self.assertTrue(executive_3.total_reasonable_compensation_before_change == 0)
self.assertTrue(executive_3.total_reasonable_compensation_after_change == 100000) |
import numpy as np
def generate_data(num_point, cluster_center):
dat_x = np.random.randn(num_point, 2)
cluster = np.array(cluster_center)
return (dat_x+cluster_center)
def construct_cluster(num_points, clusters):
"""Constructs num_points number of points at the specified cluster locations. Clusters
should be a list of tuples of x and y coordinates."""
data = generate_data(num_points, clusters[0])
for i in xrange(1, len(clusters)):
new_dat = generate_data(num_points, clusters[i])
data = np.vstack((data, new_dat))
return data
if __name__ == "__main__":
data = construct_cluster(10, [(2,1), (5,7), (-4, -4)])
np.savetxt("data.txt", data)
np.savetxt("center.txt", np.random.randn(3, 2)*4)
|
import cv2
import os
def detect_face(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(os.path.join(cv2.haarcascades, "haarcascade_frontalface_default.xml"))
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=5)
if len(faces) == 0:
return None, None
x, y, w, h = faces[0]
return gray_img[y:y + w, x:x + h], faces[0]
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def ex1():
s = raw_input()
print s
s = s.split()
print s
s.sort()
print s
if __name__ == '__main__':
ex1()
|
#coding=utf-8
import requests
import re
import time
import random
import os
from bs4 import BeautifulSoup
# Start a new session every time to keep the cache update in runtime.
s = requests.Session()
def get_html(url):
'''Send http request
Args:
url: the web url need to fetch
Return:
html: the webpage
'''
# Sometimes time gap may be needed between each request
# time.sleep(5+random.random()*5)
# Http header, copy cookie at start
head={'copy your header here'}
html = s.get(url,headers=head).text
return html
def find_link(html):
'''Get related links
Find the needed links on the main page, judge whether blocked.
Args:
html: webpage returned by get_html()
Return:
links: needed links
'''
# Some websites have auto-block system, judge it by identifying special strings
if re.search(r'distilIdentificationBlock',html):
print "Error! Blocked!"
return 0, 0
else:
sales_link = re.search(r'nearbyDiv.load\(Utils.AppPath(.*?),',html)
#Some webpages don't have such links
if not sales_link:
print "Error! Invalid!"
return 0,0
sales_link = "http://www.mlsli.com"+str(sales_link.group(1))[4:-1]
neighbors_link = re.search(r'https:\/\/www.rdesk.com\/(.*?);',html)
neighbors_link = str(neighbors_link.group())[:-2]
print "Success"
return sales_link, neighbors_link
def craw_main(f, html):
'''Craw the main page
Get the information on the main page and output to file.
Args:
f: output stream
html: webpage returned by get_html()
Output:
Write info to file
Raise:
IndexError: some tag organized differently.
'''
street = re.search(r'full-address.*inline">(.*?),',html)
street = str(street.group(1))
soup = BeautifulSoup(html,"lxml")
city = soup.select('span[itemprop="addressLocality"]')[0].string.encode('utf-8')
state = soup.select('span[itemprop="addressRegion"]')[0].string.encode('utf-8')
postcode = soup.select('span[itemprop="postalCode"]')[0].string.encode('utf-8')
status = soup.select('span[class="ld-status"]')[0].select('span')[0].string.encode('utf-8')
try:
price = soup.select('span[class="price"]')[0].string.encode('utf-8')
except:
price = soup.select('span[class="price"]')[0].select('span')[0].string.encode('utf-8')
bed_bath = soup.select('div[class="bed-baths"]')[0].text.encode('utf-8')
MLS_num = soup.select('div[class="listing-number"]')[0].text.encode('utf-8')
# Some webpages don't have summary
if soup.select('div[class="summary-remarks"]'):
summary = soup.select('div[class="summary-remarks"]')[0].text.encode('utf-8')
else:
summary = ""
basic_info = [street, city, state, postcode, status, price, bed_bath, MLS_num, summary]
_list_summary = soup.select('div[class="summary-additional details-info"]')[0].select('div')
list_summary = []
for item in _list_summary:
label = item.text.encode('utf-8').replace("\n","").split(':')
if label[0]:
label[1] = label[1].lstrip()
list_summary.append(label)
_list_info = soup.select('table[class="details-info-table1"]')[0].select('td')
list_info = []
for item in _list_info:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").split(':')
if label[0]:
label[1] = label[1].lstrip()
list_info.append(label)
_room_info = soup.select('div[id="listingdetail-roominfo"]')[0].select('div[class="details-3-per-row details-text-data"]')
room_info = []
for item in _room_info:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").split(':')
if label[0]:
label[1] = label[1].lstrip()
room_info.append(label)
_int_info = soup.select('div[id="listingdetail-interiorfeatures1"]')[0].select('div[class="details-3-per-row details-text-data"]')
int_info = []
for item in _int_info:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").split(':')
if label[0]:
label[1] = label[1].lstrip()
int_info.append(label)
_ext_info = soup.select('div[id="listingdetail-exteriorfeatures"]')[2].select('div[class="details-3-per-row details-text-data"]')
ext_info = []
for item in _ext_info:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").split(':')
if label[0]:
label[1] = label[1].lstrip()
ext_info.append(label)
_fin_info = soup.select('div[id="listingdetail-financial"]')[0].select('div[class="details-3-per-row details-text-data"]')
fin_info = []
for item in _fin_info:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").split(':')
if label[0]:
label[1] = label[1].lstrip()
fin_info.append(label)
_other_info = soup.select('div[id="listingdetail-financial"]')[1].select('div[class="details-1-per-row details-text-data"]')
other_info = []
for item in _other_info:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","")
sp = label.index(":")
label = [label[:sp+1],label[sp+1:]]
if label[0]:
label[1] = label[1].lstrip()
other_info.append(label)
print >> f, "Basic Info:\n", basic_info, "\n"
print >> f, "Listing Summary:\n", list_summary, "\n"
print >> f, "Listing Information:\n", list_info, "\n"
print >> f, "Room Information:\n", room_info, "\n"
print >> f, "Interior Features / Utilities:\n", int_info, "\n"
print >> f, "Exterior / Lot Features:\n", ext_info, "\n"
print >> f, "Financial Considerations:\n", fin_info, "\n"
print >> f, "Other:\n", other_info, "\n"
def craw_sales(f, sales_link):
'''Craw the sales page
Get the information on the sales page and output to file.
Some website have block system, may use some auto-operate software to download the html files then crawl the local webpages.
Args:
f: output stream
sales_link: needed link returned by find_link
Output:
Write info to file
Raise:
IndexError: some tag organized differently.
'''
print "Getting Sales"
# Overwrite sales_link, crawl local downloaded webpages.
sales_link = "http://localhost/saleslink/"+sales_link[-9:]+".html"
print sales_link
html = get_html(sales_link)
# time.sleep(25+random.random()*5)
soup = BeautifulSoup(html,"lxml")
tables = soup.select('table[class="price-history-tbl"]')
_sales_thead = tables[0].select('thead th')
sales_thead =[]
for item in _sales_thead:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","")
sales_thead.append(label)
sales_thead[0] = '#'
_sales_table = tables[0].select('tbody tr')
sales_table = []
for row in _sales_table:
_trow = row.select('td')
trow = []
for item in _trow:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").strip().lstrip()
trow.append(label)
sales_table.append(trow)
print "Get sales table"
_price_thead = tables[1].select('thead th')
price_thead =[]
for item in _price_thead:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","")
price_thead.append(label)
_price_table = tables[1].select('tbody tr')
price_table = []
for row in _price_table:
_trow = row.select('td')
trow = []
for item in _trow:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").replace("\xc2\xa0","").replace(" ","").strip().lstrip()
trow.append(label)
price_table.append(trow)
print "Get price table"
tables = soup.select('table[class="price-history-tbl property-tax-history-tbl"]')[0]
_tax_thead = tables.select('thead th')
tax_thead =[]
for item in _tax_thead:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","")
tax_thead.append(label)
_tax_table = tables.select('tbody tr')
tax_table = []
for row in _tax_table:
_trow = row.select('td')
trow = []
for item in _trow:
label = item.text.encode('utf-8').replace("\n","").replace("\r","").replace("\t","").replace("\xc2\xa0","").replace(" ","").strip().lstrip()
trow.append(label)
tax_table.append(trow)
print "Get tax table"
print >> f, "Nearby Recent Sales:\n", sales_thead, "\n", sales_table, "\n"
print >> f, "Price History:\n", price_thead, "\n", price_table, "\n"
print >> f, "Tax History:\n", tax_thead, "\n", tax_table, "\n"
def craw_neighbors(f, neighbors_link):
'''Craw the sales page
Get the information on the neighbors page and output to file.
This is an api page, so no blocked.
Args:
f: output stream
neighbors_link: needed link returned by find_link
Output:
Write info to file
'''
print "Getting Neighbors"
# Re-construct neighbors_link to get different tabs data
tab_name = ["HomeValues2","Demographics2", "Economy2", "SchoolsEducation", "Environment", "Commute"]
sp = neighbors_link.index("ReportName=")
neighbors_link1 = neighbors_link[:sp+11]
neighbors_link2 = neighbors_link[sp+11:]
sp = neighbors_link2.index("&")
neighbors_link2 = neighbors_link2[sp:]
for name in tab_name:
neighbors_url = neighbors_link1+name+neighbors_link2
html = get_html(neighbors_url)
# time.sleep(2+random.random()*3)
data = re.findall(r'.Data = \[(.*?)\];',html)
# Descriptions show at the end of every row, hidden on webpages
for item in data:
_item = item.encode('utf-8')
print >> f, name, ":\n", _item, "\n"
print "Get tab "+str(tab_name.index(name))
def get_url(path):
'''Get urls from seperate files
At first the input was thousands of files, need to get all links in a list and output the links to a single file.
Args:
path: the path of files
Returns:
urls: list of urls
'''
files= os.listdir(path)
urls = []
for file in files:
if not os.path.isdir(file):
f = open(path+"/"+file)
iter_f = iter(f)
i=0 # Only get the first 10 links, all the repeated links after 10.
for line in iter_f:
urls.append(line.strip())
i=i+1
if i>9: break
print "Total urls: ", len(urls)
path = "Your url"
f = open("urls.txt","w")
for url in urls:
print >> f, url
f.close()
return urls
def read_url(file):
'''Get urls from single file
Args:
path: the path of single file
Returns:
urls: list of urls
'''
urls = []
f = open(file,"r")
iter_f = iter(f)
for line in iter_f:
urls.append(line.strip())
print "Total urls: ", len(urls)
return urls
def rename_saleslink():
'''Rename the downloaded sales pages
The downloaded filename format may be not able to request from localhost
'''
src = "Your path of downloaded sales pages"
filelist = os.listdir(src)
for i in filelist:
if i[:4] == "http":
h_id = re.search(r'listingid=(.*?) .htm',i).group(1)
os.rename(src+i,src+h_id+".html")
def main():
path = "Your url file"
filelist = os.listdir(path)
# Store the index of error pages
error = [655,755,1129,1393,1471,2181,2402,2527,2683,2887,2934,2958,3203]
# Start from breakpoint
for i in range(3204,len(filelist)):
print i
print filelist[i]
# Request from local files
url = "http://localhost/html/"+filelist[i]
html = get_html(url)
(sales_link, neighbors_link) = find_link(html)
file = "data/"+str(i)+".txt"
f = open(file,"w")
print >> f, url
print >> f, sales_link
print >> f, neighbors_link
craw_main(f, html)
try:
craw_sales(f, sales_link)
except:
print >> f, "No Sales Data!"
try:
craw_neighbors(f, neighbors_link)
except:
# Raise a weird alarm noise when error, if you leave the machine alone.
os.system('say "Error!"')
f.close()
# time.sleep(120+random.random()*60)
main() |
from plot_cell import *
from readGEOMinfo import *
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib.colors as col
import matplotlib.cm as cm
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import pdb
'''
INCLUDE VISUALIZATION OF SYNAPSES
'''
from readNRNdata import *
##############################################
# Read in Data
##############################################
# Set up vector of STDP spike timing interval values used during data collection
np.set_printoptions(suppress=True)
coarse_timesteps = np.array([-50, -25, -15, -10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 10, 15, 25, 50])
fine_timesteps = np.arange(-5, 5.1, 0.1)
# set up vector of dendrites in sectionlists Proximal, middle, and distal
prox_seclist = np.array([0, 1, 2, 3, 5, 6, 7, 8, 9, 29, 42, 43, 44, 45, 46, 47, 54, 56])
midd_seclist = np.array([4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 139, 140, 141, 142, 147, 150, 151, 154, 155, 156, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 174, 175, 176, 177, 178, 179])
dist_seclist = np.array([92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 143, 144, 145, 146, 148, 149, 152, 153, 157, 158, 173])
# set up lists for storing read data
soma_syn = []
prox_syn = []
midd_syn = []
dist_syn = []
for i in range(len(coarse_timesteps)):
soma_syn.append(nrnReader("./data/coarseExpts/soma_{0}.dat".format(coarse_timesteps[i])))
prox_syn.append(nrnReader("./data/coarseExpts/proximal_{0}.dat".format(coarse_timesteps[i])))
midd_syn.append(nrnReader("./data/coarseExpts/middle_{0}.dat".format(coarse_timesteps[i])))
dist_syn.append(nrnReader("./data/coarseExpts/distal_{0}.dat".format(coarse_timesteps[i])))
num_var = soma_syn[0].num_variables
# Set up dictionaries for organizing imported data, based on where synaptic connections were present during data collection
somatic_list = dict()
proximal_list = dict()
middle_list = dict()
distal_list = dict()
for i in range(len(coarse_timesteps)):
## SYNAPSES INSERTED IN SOMA
somatic_list["delta_{0}".format(coarse_timesteps[i])] = dict()
# Dictionaries of data recorded from each area of the cell
somatic_list["delta_{0}".format(coarse_timesteps[i])]["soma"] = dict()
somatic_list["delta_{0}".format(coarse_timesteps[i])]["prox"] = dict()
somatic_list["delta_{0}".format(coarse_timesteps[i])]["midd"] = dict()
somatic_list["delta_{0}".format(coarse_timesteps[i])]["dist"] = dict()
# add data to dictionaries
somatic_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"] = []
for k in range(num_var):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"].append(soma_syn[i].var_list.varList[k].secList[0][0]) ### what is the seclist[1]?????/not working with index 0 which should be soma
for j in range(len(prox_seclist)):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])] = []
for k in range(num_var):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])].append(soma_syn[i].var_list.varList[k].secList[1][j])
for j in range(len(midd_seclist)):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])] = []
for k in range(num_var):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])].append(soma_syn[i].var_list.varList[k].secList[2][j])
for j in range(len(dist_seclist)):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])] = []
for k in range(num_var):
somatic_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])].append(soma_syn[i].var_list.varList[k].secList[3][j])
## SYNAPSES INSERTED IN PROXIMAL DENDRITES
proximal_list["delta_{0}".format(coarse_timesteps[i])] = dict()
# Dictionaries of data recorded from each area of the cell
proximal_list["delta_{0}".format(coarse_timesteps[i])]["soma"] = dict()
proximal_list["delta_{0}".format(coarse_timesteps[i])]["prox"] = dict()
proximal_list["delta_{0}".format(coarse_timesteps[i])]["midd"] = dict()
proximal_list["delta_{0}".format(coarse_timesteps[i])]["dist"] = dict()
# add data to dictionaries
proximal_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"] = []
for k in range(num_var):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"].append(prox_syn[i].var_list.varList[k].secList[0][0]) ### what is the seclist[1]?????/not working with index 0 which should be soma
for j in range(len(prox_seclist)):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])] = []
for k in range(num_var):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])].append(prox_syn[i].var_list.varList[k].secList[1][j])
for j in range(len(midd_seclist)):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])] = []
for k in range(num_var):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])].append(prox_syn[i].var_list.varList[k].secList[2][j])
for j in range(len(dist_seclist)):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])] = []
for k in range(num_var):
proximal_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])].append(prox_syn[i].var_list.varList[k].secList[3][j])
## SYNAPSES INSERTED IN MIDDLE DENDRITES
middle_list["delta_{0}".format(coarse_timesteps[i])] = dict()
# Dictionaries of data recorded from each area of the cell
middle_list["delta_{0}".format(coarse_timesteps[i])]["soma"] = dict()
middle_list["delta_{0}".format(coarse_timesteps[i])]["prox"] = dict()
middle_list["delta_{0}".format(coarse_timesteps[i])]["midd"] = dict()
middle_list["delta_{0}".format(coarse_timesteps[i])]["dist"] = dict()
# add data to dictionaries
middle_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"] = []
for k in range(num_var):
middle_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"].append(midd_syn[i].var_list.varList[k].secList[0][0]) ### what is the seclist[1]?????/not working with index 0 which should be soma
for j in range(len(prox_seclist)):
middle_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])] = []
for k in range(num_var):
middle_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])].append(midd_syn[i].var_list.varList[k].secList[1][j])
for j in range(len(midd_seclist)):
middle_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])] = []
for k in range(num_var):
middle_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])].append(midd_syn[i].var_list.varList[k].secList[2][j])
for j in range(len(dist_seclist)):
middle_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])] = []
for k in range(num_var):
middle_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])].append(midd_syn[i].var_list.varList[k].secList[3][j])
## SYNAPSES INSERTED IN DISTAL DENDRITES
distal_list["delta_{0}".format(coarse_timesteps[i])] = dict()
# Dictionaries of data recorded from each area of the cell
distal_list["delta_{0}".format(coarse_timesteps[i])]["soma"] = dict()
distal_list["delta_{0}".format(coarse_timesteps[i])]["prox"] = dict()
distal_list["delta_{0}".format(coarse_timesteps[i])]["midd"] = dict()
distal_list["delta_{0}".format(coarse_timesteps[i])]["dist"] = dict()
# add data to dictionaries
distal_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"] = []
for k in range(num_var):
distal_list["delta_{0}".format(coarse_timesteps[i])]["soma"]["soma"].append(dist_syn[i].var_list.varList[k].secList[0][0]) ### what is the seclist[1]?????/not working with index 0 which should be soma
for j in range(len(prox_seclist)):
distal_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])] = []
for k in range(num_var):
distal_list["delta_{0}".format(coarse_timesteps[i])]["prox"]["dend_{0}".format(prox_seclist[j])].append(dist_syn[i].var_list.varList[k].secList[1][j])
for j in range(len(midd_seclist)):
distal_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])] = []
for k in range(num_var):
distal_list["delta_{0}".format(coarse_timesteps[i])]["midd"]["dend_{0}".format(midd_seclist[j])].append(dist_syn[i].var_list.varList[k].secList[2][j])
for j in range(len(dist_seclist)):
distal_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])] = []
for k in range(num_var):
distal_list["delta_{0}".format(coarse_timesteps[i])]["dist"]["dend_{0}".format(dist_seclist[j])].append(dist_syn[i].var_list.varList[k].secList[3][j])
def CA1_data_collection(section, variable):
CA1_s_ecl = {}
if section == 0:
for i in range(len(coarse_timesteps)):
CA1_s_ecl[coarse_timesteps[i]] = {}
CA1_s_ecl[coarse_timesteps[i]][999] = soma_syn[i].var_list.varList[variable].secList[0]
for j in range(len(prox_seclist)):
CA1_s_ecl[coarse_timesteps[i]][prox_seclist[j]] = soma_syn[i].var_list.varList[variable].secList[1][j]
for j in range(len(midd_seclist)):
CA1_s_ecl[coarse_timesteps[i]][midd_seclist[j]] = soma_syn[i].var_list.varList[variable].secList[2][j]
for j in range(len(dist_seclist)):
CA1_s_ecl[coarse_timesteps[i]][dist_seclist[j]] = soma_syn[i].var_list.varList[variable].secList[3][j]
if section == 3:
for i in range(len(coarse_timesteps)):
CA1_s_ecl[coarse_timesteps[i]] = {}
CA1_s_ecl[coarse_timesteps[i]][999] = dist_syn[i].var_list.varList[variable].secList[0]
for j in range(len(prox_seclist)):
CA1_s_ecl[coarse_timesteps[i]][prox_seclist[j]] = dist_syn[i].var_list.varList[variable].secList[1][j]
for j in range(len(midd_seclist)):
CA1_s_ecl[coarse_timesteps[i]][midd_seclist[j]] = dist_syn[i].var_list.varList[variable].secList[2][j]
for j in range(len(dist_seclist)):
CA1_s_ecl[coarse_timesteps[i]][dist_seclist[j]] = dist_syn[i].var_list.varList[variable].secList[3][j]
if section == 1:
for i in range(len(coarse_timesteps)):
CA1_s_ecl[coarse_timesteps[i]] = {}
CA1_s_ecl[coarse_timesteps[i]][999] = prox_syn[i].var_list.varList[variable].secList[0]
for j in range(len(prox_seclist)):
CA1_s_ecl[coarse_timesteps[i]][prox_seclist[j]] = prox_syn[i].var_list.varList[variable].secList[1][j]
for j in range(len(midd_seclist)):
CA1_s_ecl[coarse_timesteps[i]][midd_seclist[j]] = prox_syn[i].var_list.varList[variable].secList[2][j]
for j in range(len(dist_seclist)):
CA1_s_ecl[coarse_timesteps[i]][dist_seclist[j]] = prox_syn[i].var_list.varList[variable].secList[3][j]
if section == 2:
for i in range(len(coarse_timesteps)):
CA1_s_ecl[coarse_timesteps[i]] = {}
CA1_s_ecl[coarse_timesteps[i]][999] = midd_syn[i].var_list.varList[variable].secList[0]
for j in range(len(prox_seclist)):
CA1_s_ecl[coarse_timesteps[i]][prox_seclist[j]] = midd_syn[i].var_list.varList[variable].secList[1][j]
for j in range(len(midd_seclist)):
CA1_s_ecl[coarse_timesteps[i]][midd_seclist[j]] = midd_syn[i].var_list.varList[variable].secList[2][j]
for j in range(len(dist_seclist)):
CA1_s_ecl[coarse_timesteps[i]][dist_seclist[j]] = midd_syn[i].var_list.varList[variable].secList[3][j]
return CA1_s_ecl
CA1_s_ecl = CA1_data_collection(1,2)
'''
COME BACK TO FIX THIS SECTION
'''
#CA1_p = CA1_data_collection(1,0)
#CA1_m = CA1_data_collection(2,0)
#CA1_d = CA1_data_collection(3,0)
test = np.zeros(181)
test[0] = CA1_s_ecl[-50][999][0][0]
for i in range(180):
test[i+1] = CA1_s_ecl[-50][i][0]
print max(test)
print min(test)
cell_1 = geomReader("./CA1geometry.dat")
morpho = cell_1.data_vec
items = len(morpho[0,:])
entries = len(morpho[:,0])
indicies = []
indicies.append(int(999))
for i in range(180):
indicies.append(int(i))
sections_list = {}
for i in range(181):
segments = list()
for j in range(entries):
py_ind = morpho[j, 1]
if py_ind == i:
(x,y,z,r,N_ind) = (morpho[j,3], morpho[j,4], morpho[j,5], morpho[j,6],int(morpho[j,2]))
segments.append((x,y,z,r,N_ind))
sections_list[i] = segments
CA1 = {}
for i in range(len(sections_list)):
sec_info = []
x_vals = []
y_vals = []
z_vals = []
r_vals = []
for j in range(len(sections_list[i])):
x_vals.append(sections_list[i][j][0])
y_vals.append(sections_list[i][j][1])
z_vals.append(sections_list[i][j][2])
r_vals.append(sections_list[i][j][3])
sec_info.append(x_vals)
sec_info.append(y_vals)
sec_info.append(z_vals)
sec_info.append(r_vals)
CA1[sections_list[i][0][4]] = sec_info
parent_child = []
parent_child.append((0,0,999,0.5))
parent_child.append((1,0,0,1))
parent_child.append((2,0,1,1))
parent_child.append((3,0,2,1))
parent_child.append((4,0,3,1))
parent_child.append((5,0,3,1))
parent_child.append((6,0,5,1))
parent_child.append((7,0,5,1))
parent_child.append((8,0,2,1))
parent_child.append((9,0,8,1))
parent_child.append((10,0,9,1))
parent_child.append((11,0,10,1))
parent_child.append((12,0,11,1))
parent_child.append((13,0,11,1))
parent_child.append((14,0,10,1))
parent_child.append((15,0,14,1))
parent_child.append((16,0,15,1))
parent_child.append((17,0,15,1))
parent_child.append((18,0,17,1))
parent_child.append((19,0,17,1))
parent_child.append((20,0,19,1))
parent_child.append((21,0,19,1))
parent_child.append((22,0,21,1))
parent_child.append((23,0,21,1))
parent_child.append((24,0,14,1))
parent_child.append((25,0,24,1))
parent_child.append((26,0,25,1))
parent_child.append((27,0,25,1))
parent_child.append((28,0,24,1))
parent_child.append((29,0,9,1))
parent_child.append((30,0,29,1))
parent_child.append((31,0,30,1))
parent_child.append((32,0,30,1))
parent_child.append((33,0,32,1))
parent_child.append((34,0,32,1))
parent_child.append((35,0,29,1))
parent_child.append((36,0,35,1))
parent_child.append((37,0,35,1))
parent_child.append((38,0,37,1))
parent_child.append((39,0,38,1))
parent_child.append((40,0,38,1))
parent_child.append((41,0,37,1))
parent_child.append((42,0,8,1))
parent_child.append((43,0,1,1))
parent_child.append((44,0,0,1))
parent_child.append((45,0,44,1))
parent_child.append((46,0,44,1))
parent_child.append((47,0,46,1))
parent_child.append((48,0,47,1))
parent_child.append((49,0,48,1))
parent_child.append((50,0,48,1))
parent_child.append((51,0,47,1))
parent_child.append((52,0,51,1))
parent_child.append((53,0,51,1))
parent_child.append((54,0,46,1))
parent_child.append((55,0,54,1))
parent_child.append((56,0,54,1))
parent_child.append((57,0,56,1))
parent_child.append((58,0,56,1))
parent_child.append((59,0,58,1))
parent_child.append((60,0,58,1))
parent_child.append((61,0,999,0.5))
parent_child.append((62,0,61,1))
parent_child.append((63,0,62,1))
parent_child.append((64,0,63,1))
parent_child.append((65,0,64,1))
parent_child.append((66,0,65,1))
parent_child.append((67,0,65,1))
parent_child.append((68,0,64,1))
parent_child.append((69,0,68,1))
parent_child.append((70,0,69,1))
parent_child.append((71,0,69,1))
parent_child.append((72,0,68,1))
parent_child.append((73,0,72,1))
parent_child.append((74,0,73,1))
parent_child.append((75,0,73,1))
parent_child.append((76,0,75,1))
parent_child.append((77,0,76,1))
parent_child.append((78,0,77,1))
parent_child.append((79,0,77,1))
parent_child.append((80,0,76,1))
parent_child.append((81,0,80,1))
parent_child.append((82,0,80,1))
parent_child.append((83,0,82,1))
parent_child.append((84,0,83,1))
parent_child.append((85,0,83,1))
parent_child.append((86,0,85,1))
parent_child.append((87,0,86,1))
parent_child.append((88,0,87,1))
parent_child.append((89,0,88,1))
parent_child.append((90,0,89,1))
parent_child.append((91,0,90,1))
parent_child.append((92,0,91,1))
parent_child.append((93,0,92,1))
parent_child.append((94,0,92,1))
parent_child.append((95,0,94,1))
parent_child.append((96,0,95,1))
parent_child.append((97,0,96,1))
parent_child.append((98,0,97,1))
parent_child.append((99,0,98,1))
parent_child.append((100,0,99,1))
parent_child.append((101,0,100,1))
parent_child.append((102,0,100,1))
parent_child.append((103,0,99,1))
parent_child.append((104,0,103,1))
parent_child.append((105,0,104,1))
parent_child.append((106,0,105,1))
parent_child.append((107,0,105,1))
parent_child.append((108,0,104,1))
parent_child.append((109,0,103,1))
parent_child.append((110,0,98,1))
parent_child.append((111,0,110,1))
parent_child.append((112,0,111,1))
parent_child.append((113,0,111,1))
parent_child.append((114,0,110,1))
parent_child.append((115,0,114,1))
parent_child.append((116,0,115,1))
parent_child.append((117,0,115,1))
parent_child.append((118,0,117,1))
parent_child.append((119,0,117,1))
parent_child.append((120,0,114,1))
parent_child.append((121,0,97,1))
parent_child.append((122,0,96,1))
parent_child.append((123,0,122,1))
parent_child.append((124,0,123,1))
parent_child.append((125,0,123,1))
parent_child.append((126,0,125,1))
parent_child.append((127,0,126,1))
parent_child.append((128,0,126,1))
parent_child.append((129,0,125,1))
parent_child.append((130,0,122,1))
parent_child.append((131,0,95,1))
parent_child.append((132,0,94,1))
parent_child.append((133,0,132,1))
parent_child.append((134,0,132,1))
parent_child.append((135,0,91,1))
parent_child.append((136,0,90,1))
parent_child.append((137,0,89,1))
parent_child.append((138,0,88,1))
parent_child.append((139,0,87,1))
parent_child.append((140,0,139,1))
parent_child.append((141,0,139,1))
parent_child.append((142,0,141,1))
parent_child.append((143,0,142,1))
parent_child.append((144,0,142,1))
parent_child.append((145,0,141,1))
parent_child.append((146,0,86,1))
parent_child.append((147,0,85,1))
parent_child.append((148,0,147,1))
parent_child.append((149,0,147,1))
parent_child.append((150,0,82,1))
parent_child.append((151,0,150,1))
parent_child.append((152,0,151,1))
parent_child.append((153,0,151,1))
parent_child.append((154,0,150,1))
parent_child.append((155,0,154,1))
parent_child.append((156,0,155,1))
parent_child.append((157,0,156,1))
parent_child.append((158,0,156,1))
parent_child.append((159,0,155,1))
parent_child.append((160,0,159,1))
parent_child.append((161,0,159,1))
parent_child.append((162,0,154,1))
parent_child.append((163,0,75,1))
parent_child.append((164,0,72,1))
parent_child.append((165,0,164,1))
parent_child.append((166,0,165,1))
parent_child.append((167,0,165,1))
parent_child.append((168,0,164,1))
parent_child.append((169,0,63,1))
parent_child.append((170,0,169,1))
parent_child.append((171,0,170,1))
parent_child.append((172,0,170,1))
parent_child.append((173,0,172,1))
parent_child.append((174,0,172,1))
parent_child.append((175,0,169,1))
parent_child.append((176,0,62,1))
parent_child.append((177,0,61,1))
parent_child.append((178,0,177,1))
parent_child.append((179,0,177,1))
norm = col.Normalize(vmin=0, vmax=0.1)
cmap = cm.spring
m = cm.ScalarMappable(norm=norm, cmap=cmap)
def plot_CA1(time):
fig, ax = plt.subplots(figsize=(12,12))
for i in range(len(indicies)):
for j in range(len(CA1[indicies[i]][0])-1):
x1 = CA1[indicies[i]][0][j]
x2 = CA1[indicies[i]][0][j+1]
y1 = CA1[indicies[i]][1][j]
y2 = CA1[indicies[i]][1][j+1]
if i == 0:
colour = m.to_rgba(CA1_s_ecl[time][indicies[i]][0][0])
else:
colour = m.to_rgba(CA1_s_ecl[time][indicies[i]][0])
ax.plot([x1,x2], [y1,y2], c=colour)
print colour
#c=m.to_rgba(CA1_s_ecl[-50][indicies[i]][0])
for i in range(len(parent_child)):
if parent_child[i][3] == 1:
x1 = CA1[parent_child[i][0]][0][0]
x2 = CA1[parent_child[i][2]][0][(len(CA1[parent_child[i][2]][0]))-1]
y1 = CA1[parent_child[i][0]][1][0]
y2 = CA1[parent_child[i][2]][1][(len(CA1[parent_child[i][2]][0]))-1]
if i == 0:
colour = m.to_rgba(CA1_s_ecl[time][indicies[i]][0][0])
else:
colour = m.to_rgba(CA1_s_ecl[time][indicies[i]][0])
ax.plot([x1,x2], [y1,y2], c=colour)
elif parent_child[i][3] == 0.5:
x1 = CA1[parent_child[i][0]][0][0]
x2 = CA1[parent_child[i][2]][0][(len(CA1[parent_child[i][2]][0])/2)-1]
y1 = CA1[parent_child[i][0]][1][0]
y2 = CA1[parent_child[i][2]][1][(len(CA1[parent_child[i][2]][0])/2)-1]
if i == 0:
colour = m.to_rgba(CA1_s_ecl[time][indicies[i]][0][0])
else:
colour = m.to_rgba(CA1_s_ecl[time][indicies[i]][0])
ax.plot([x1,x2], [y1,y2], c=colour)
else:
print "Indexing Error"
ax.set_title("$\Delta$t = %s ms" %time)
ax1 = fig.add_axes([0.825, 0.175, 0.02, 0.65])
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm)
cb1.set_label('$Ca^{2+}$ (mM)')
#plt.show()
#for i in range(len(coarse_timesteps)):
# plot_CA1(coarse_timesteps[i])
# save('./data/PythonPlots/KCC2_vol/Ca2+_proxsyn_%s' %coarse_timesteps[i], 'png')
#plot_CA1(-50)
#plt.show()
'''
Extras, may come back to edit but may discard
'''
############################################
## Cl Reversal Potential
############################################
#soma_ecl = {}
#prox_ecl = {}
#midd_ecl = {}
#dist_ecl = {}
#
#for i in range(len(coarse_timesteps)):
# soma_ecl[coarse_timesteps[i]] = soma_syn[i].var_list.varList[0].secList[0]
# prox_ecl[coarse_timesteps[i]] = prox_syn[i].var_list.varList[0].secList[1][0] #<name of file read in>.<list of variables>[Ecl=0/KCC2=1/Ca=2/Kin=3/phos=4].<list of sections>[soma=0/prox=1/midd=2/dist=3]
# ##### NOTE: secList indices 1,2,3 contain vectors with recordings from all sections ... how do we want to organize this data?
# midd_ecl[coarse_timesteps[i]] = midd_syn[i].var_list.varList[0].secList[2][0]
# dist_ecl[coarse_timesteps[i]] = dist_syn[i].var_list.varList[0].secList[3][0]
#
#print soma_ecl[-50]
############################################
## Membrane Phosphorylated KCC2 (proportion)
############################################
#soma_MP = np.zeros(len(coarse_timesteps))
#prox_MP = np.zeros(len(coarse_timesteps))
#midd_MP = np.zeros(len(coarse_timesteps))
#dist_MP = np.zeros(len(coarse_timesteps))
#
#for i in range(len(coarse_timesteps)):
# soma_MP[i] = soma_syn[i].var_list.varList[1].secList[0]
# prox_MP[i] = prox_syn[i].var_list.varList[1].secList[1][0] #<name of file read in>.<list of variables>[Ecl=0/KCC2=1/Ca=2/Kin=3/phos=4].<list of sections>[soma=0/prox=1/midd=2/dist=3]
# midd_MP[i] = midd_syn[i].var_list.varList[1].secList[2][0]
# dist_MP[i] = dist_syn[i].var_list.varList[1].secList[3][0]
#
#
############################################
## Calcium Concentration
############################################
#soma_cal = np.zeros(len(coarse_timesteps))
#prox_cal = np.zeros(len(coarse_timesteps))
#midd_cal = np.zeros(len(coarse_timesteps))
#dist_cal = np.zeros(len(coarse_timesteps))
#
#for i in range(len(coarse_timesteps)):
# soma_cal[i] = soma_syn[i].var_list.varList[1].secList[0]
# prox_cal[i] = prox_syn[i].var_list.varList[1].secList[1][0] #<name of file read in>.<list of variables>[Ecl=0/KCC2=1/Ca=2/Kin=3/phos=4].<list of sections>[soma=0/prox=1/midd=2/dist=3]
# midd_cal[i] = midd_syn[i].var_list.varList[1].secList[2][0]
# dist_cal[i] = dist_syn[i].var_list.varList[1].secList[3][0]
#
############################################
## Active Kinase (proportion)
############################################
#soma_kin = np.zeros(len(coarse_timesteps))
#prox_kin = np.zeros(len(coarse_timesteps))
#midd_kin = np.zeros(len(coarse_timesteps))
#dist_kin = np.zeros(len(coarse_timesteps))
#
#for i in range(len(coarse_timesteps)):
# soma_kin[i] = soma_syn[i].var_list.varList[1].secList[0]
# prox_kin[i] = prox_syn[i].var_list.varList[1].secList[1][0] #<name of file read in>.<list of variables>[Ecl=0/KCC2=1/Ca=2/Kin=3/phos=4].<list of sections>[soma=0/prox=1/midd=2/dist=3]
# midd_kin[i] = midd_syn[i].var_list.varList[1].secList[2][0]
# dist_kin[i] = dist_syn[i].var_list.varList[1].secList[3][0]
#
############################################
## Active Phosphatase (proportion)
############################################
#soma_phos = np.zeros(len(coarse_timesteps))
#prox_phos = np.zeros(len(coarse_timesteps))
#midd_phos = np.zeros(len(coarse_timesteps))
#dist_phos = np.zeros(len(coarse_timesteps))
#
#for i in range(len(coarse_timesteps)):
# soma_phos[i] = soma_syn[i].var_list.varList[1].secList[0]
# prox_phos[i] = prox_syn[i].var_list.varList[1].secList[1][0] #<name of file read in>.<list of variables>[Ecl=0/KCC2=1/Ca=2/Kin=3/phos=4].<list of sections>[soma=0/prox=1/midd=2/dist=3]
# midd_phos[i] = midd_syn[i].var_list.varList[1].secList[2][0]
# dist_phos[i] = dist_syn[i].var_list.varList[1].secList[3][0] |
# author: Jonathan Marple
from pyexcel_ods import get_data
from bisect import bisect_left
import praw
import time
import re
# Reading pokemon data from spreadsheet
data = get_data("Pokemon Weightlist.ods")
# Number of pokemon in list
numOfPokemon = 151
# Creating lists based on the spreadsheet data
pokemonNums = [list(data.values())[0][i][0] for i in range(numOfPokemon)]
pokemonNames = [list(data.values())[0][i][1] for i in range(numOfPokemon)]
pokemonPounds = [list(data.values())[0][i][2] for i in range(numOfPokemon)]
pokemonKilograms = [list(data.values())[0][i][3] for i in range(numOfPokemon)]
# Function to find the closest pokemon weight to weight input
# @param weight amount to compare to pokemon
# @param units units to measure weight in
# @return index of weight closest to weight param
def findClosestWeight(weight, units):
if units == "lb":
pos = bisect_left(pokemonPounds, weight)
if pos == 0:
return 0
if pos == len(pokemonPounds):
return -1
before = pokemonPounds[pos - 1]
after = pokemonPounds[pos]
if after - weight < weight - before:
return pos
else:
return pos - 1
elif units == "kg":
pos = bisect_left(pokemonKilograms, weight)
if pos == 0:
return 0
if pos == len(pokemonKilograms):
return -1
before = pokemonKilograms[pos - 1]
after = pokemonKilograms[pos]
if after - weight < weight - before:
return pos
else:
return pos - 1
# Function to verify str is a number by converting to float
# @param str string to test
# @return 0 if str can be converted to a number, 1 otherwise
def checkVal(str):
try:
float(str)
return 1
except ValueError:
return 0
# Function to remove all special characters and numbers from a string
# @param str string to modify
# @return modified string
def formatStr(str):
return re.sub('[^A-Za-z]+', '', str)
# Function to remove all characters not desired in a number string
# @param str string to modify
# @return modified string
def formatNum(str):
return re.sub('[^0-9.]+', '', str)
# Initialize PRAW with a custom User-Agent
bot = praw.Reddit(user_agent='PokeWeight',
client_id='',
client_secret='',
username='',
password='')
# Assigning a subreddit
subreddit = bot.subreddit('all')
# Input stream of comments on subreddit
comments = subreddit.stream.comments()
# Looping through comments collected
for comment in comments:
# Skipping comments posted by bot's account
if comment.author == "":
continue
# Dividing comment into individual strings
commentParts = comment.body.split(" ")
# Looping through strings in comment
i = 0
for string in commentParts:
currentWord = formatStr(string.lower())
if currentWord == "lb" or currentWord == "lbs":
if i > 0 and checkVal(formatNum(commentParts[i - 1])) == 1:
lb = float(formatNum(commentParts[i - 1]))
totalWeight = 0
indexList = []
while totalWeight < lb - (lb / 20): # Searching within 5% accuracy
indexList.append(findClosestWeight(float(lb - totalWeight), "lb"))
totalWeight += pokemonPounds[indexList[len(indexList) - 1]]
message = "The closest pokemon to " + str(lb) + currentWord + " is "
j = 0
for index in indexList:
if j == indexList.index(index):
message += str(indexList.count(index)) + " " + pokemonNames[index] + " "
j += 1
message += "(" + str(totalWeight) + "lbs)"
print(message)
comment.reply(message)
time.sleep(1)
if currentWord == "kg":
if i > 0 and checkVal(formatNum(commentParts[i - 1])) == 1:
kg = float(formatNum(commentParts[i - 1]))
totalWeight = 0
indexList = []
while totalWeight < kg - (kg / 20): # Searching within 5% accuracy
indexList.append(findClosestWeight(float(kg - totalWeight), "kg"))
totalWeight += pokemonKilograms[indexList[len(indexList) - 1]]
message = "The closest pokemon to " + str(kg) + currentWord + " is "
j = 0
for index in indexList:
if j == indexList.index(index):
message += str(indexList.count(index)) + " " + pokemonNames[index] + " "
j += 1
message += "(" + str(totalWeight) + "kg)"
print(message)
comment.reply(message)
time.sleep(1)
i += 1 |
# Network skills
### cifar10 loss
def cifar10_loss(logits, labels):
""" Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = labels, logits = logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# calculando.py
#
print("***Calculo del área de un triangulo***")
base=6;altura=2; base,altura=altura,base
calculo=(base*altura)/2
print("La base es:", base, "La altura es:", altura)
print("El resultado es :", calculo)
|
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as vds
from features.feature_tools import get_all_texts, get_statistical_results_of_list
from collections import Counter
import emoji
analyzer = vds()
def extract_emojis(s):
emojis=[c for c in s if c in emoji.UNICODE_EMOJI]
return emojis
def get_all_emojis(tweets):
allemojis=[]
texts = get_all_texts(tweets)
for t in texts:
allemojis.extend(extract_emojis(t))
return allemojis
def tweet_emoji_ratio(tweets):
texts = get_all_texts(tweets)
j=0
for t in texts:
emojis = extract_emojis(t)
if len(emojis)>0:
j+=1
return j/len(tweets)
def get_most_common_emoji(tweets):
allemojis = get_all_emojis(tweets)
if len(allemojis)>0:
c = Counter(allemojis)
return c.most_common(1)[0][1]
else:
return 0
def get_emojis_per_tweet(tweets):
emojis_count=[]
all_texts = get_all_texts(tweets)
if len(all_texts)>2:
for t in all_texts:
emojis_count.append(len(extract_emojis(t)))
# if len(extract_emojis(t))>5:
# print (t,tweets[all_texts.index(t)]['id_str'])
return get_statistical_results_of_list(emojis_count)
def get_positive_negative_neutral_emojis_per_tweet(tweets):
neg_emojis_count = []
pos_emojis_count = []
neu_emojis_count = []
all_texts = get_all_texts(tweets)
if len(all_texts) > 2:
for t in all_texts:
neu = 0
pos = 0
neg = 0
emojis = extract_emojis(t)
if len(emojis)>0:
for e in emojis:
emojiSent = analyzer.polarity_scores(e)
if emojiSent['neu']>emojiSent['neg'] and emojiSent['neu']>emojiSent['pos']:
neu+=1
# print(emojiSent)
else:
if emojiSent['neg']>emojiSent['pos']:
neg+=1
else:
pos+=1
neg_emojis_count.append(neg)
neu_emojis_count.append(neu)
pos_emojis_count.append(pos)
return get_statistical_results_of_list(neu_emojis_count),get_statistical_results_of_list(neg_emojis_count),get_statistical_results_of_list(pos_emojis_count)
def get_positive_sentiment_per_tweet(tweets):
texts = get_all_texts(tweets)
sentiment = []
for t in texts:
sent = analyzer.polarity_scores(t)
sentiment.append(sent['pos'])
return get_statistical_results_of_list(sentiment)
def get_negative_sentiment_per_tweet(tweets):
texts = get_all_texts(tweets)
sentiment = []
for t in texts:
sent = analyzer.polarity_scores(t)
sentiment.append(sent['neg'])
return get_statistical_results_of_list(sentiment)
def get_neutral_sentiment_per_tweet(tweets):
texts = get_all_texts(tweets)
sentiment = []
for t in texts:
sent = analyzer.polarity_scores(t)
sentiment.append(sent['neg'])
return get_statistical_results_of_list(sentiment)
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Integration tests that are targeted at the optimizer.
# These are too large to be executed every iteration, but should be
# executed before every checkin.
#
import random
import re
import unittest
import encoder
import optimizer
import test_tools
import vp8
class DummyCodec(encoder.Codec):
def __init__(self):
super(DummyCodec, self).__init__('dummy')
self.extension = 'fake'
self.option_set = encoder.OptionSet(
encoder.Option('score', ['0', '5', '10']),
encoder.Option('another_parameter', ['yes']),
)
def StartEncoder(self, context):
return encoder.Encoder(context,
encoder.OptionValueSet(self.option_set,
"--score=5"))
def Execute(self, parameters, rate, videofile, workdir):
# pylint: disable=W0613
match = re.search(r'--score=(\d+)', parameters.ToString())
if match:
return {'psnr': int(match.group(1)), 'bitrate': 100}
else:
return {'psnr': -100, 'bitrate': 100}
class DummyVideofile(encoder.Videofile):
def __init__(self, filename, clip_time):
super(DummyVideofile, self).__init__(filename)
self.clip_time = clip_time
def ClipTime(self):
return self.clip_time
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0
def ReturnsClipTime(target_bitrate, result):
# pylint: disable=W0613
return float(result['cliptime'])
class TestOptimization(test_tools.FileUsingCodecTest):
# pylint: disable=R0201
def test_OptimizeOverMultipleEncoders(self):
"""Run the optimizer for a few cycles with a real codec.
This may turn out to be an over-heavy test for every-checkin testing."""
my_fileset = test_tools.TestFileSet()
my_codec = vp8.Vp8Codec()
my_optimizer = optimizer.Optimizer(my_codec, my_fileset,
cache_class=encoder.EncodingDiskCache)
# Establish a baseline.
for bitrate, videofile_name in my_fileset.AllFilesAndRates():
videofile = encoder.Videofile(videofile_name)
my_encoding = my_optimizer.BestEncoding(bitrate, videofile)
my_encoding.Execute().Store()
# Try to improve it.
encoding_count = 0
while encoding_count < 10:
(bitrate, videofile_name) = random.choice(my_fileset.AllFilesAndRates())
videofile = encoder.Videofile(videofile_name)
next_encoding = my_optimizer.BestUntriedEncoding(bitrate, videofile)
if not next_encoding:
break
encoding_count += 1
next_encoding.Execute().Store()
if __name__ == '__main__':
unittest.main()
|
from scrapy.spiders import Spider
from scrapy.selector import Selector
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from pathlib import Path
import os
import json
import scrapy
class MySpider1(Spider):
name = "spider1"
allowed_domains = ["http://www.rigzone.com"]
start_urls = ["http://www.rigzone.com"]
urls=[]
output=[]
i=0
def parse(self, response):
#parse any elements you need from the start_urls and, optionally, store them as Items.
s = Selector(response)
MySpider1.urls = s.css('div.rz-fts-section>a::attr(href)').extract()
for url in MySpider1.urls:
url=response.urljoin(url)
yield scrapy.Request(url, callback=self.parse_following_urls,dont_filter=True)
def parse_following_urls(self, response):
url=MySpider1.urls[MySpider1.i]
url=response.urljoin(url)
All_text=response.css("div>p::text").extract()
MySpider1.output.append({'url':url,'All_text': All_text})
yield{
'url' :url,
'All_text':All_text
}
MySpider1.i+=1
class MySpider2(Spider):
name = "spider2"
allowed_domains = ["http://www.offshoreenergytoday.com"]
start_urls = ["http://www.offshoreenergytoday.com"]
urls=[]
i=0
def parse(self, response):
#parse any elements you need from the start_urls and, optionally, store them as Items.
s = Selector(response)
MySpider2.urls = s.css('#newscarousel a::attr(href) , .block-news a::attr(href)').extract()
for url in MySpider2.urls:
url=response.urljoin(url)
yield scrapy.Request(url, callback=self.parse_following_urls,dont_filter=True)
def parse_following_urls(self, response):
url=MySpider2.urls[MySpider2.i]
url=response.urljoin(url)
All_text=response.css("l.content p::text,strong::text").extract()
MySpider1.output.append({'url':url,'All_text': All_text})
yield{
'url' :url,
'All_text':All_text
}
MySpider2.i+=1
class MySpider3(Spider):
name = "spider3"
allowed_domains = ["http://www.worldoil.com"]
start_urls = ["http://www.worldoil.com"]
urls=[]
i=0
def parse(self, response):
#parse any elements you need from the start_urls and, optionally, store them as Items.
s = Selector(response)
MySpider3.urls = s.css('div.col-sm-6>ul>li>a::attr(href)').extract()
for url in MySpider3.urls:
url=response.urljoin(url)
yield scrapy.Request(url, callback=self.parse_following_urls,dont_filter=True)
def parse_following_urls(self, response):
url=MySpider3.urls[MySpider3.i]
url=response.urljoin(url)
All_text=response.css("div>p::text").extract()
MySpider1.output.append({'url':url,'All_text': All_text})
yield{
'url' :url,
'All_text':All_text
}
MySpider3.i+=1
class MySpider4(Spider):
name = "spider4"
allowed_domains = ["http://www.pennenergy.com"]
start_urls = ["http://www.pennenergy.com"]
urls=[]
i=0
def parse(self, response):
#parse any elements you need from the start_urls and, optionally, store them as Items.
s = Selector(response)
MySpider4.urls = s.css('a::attr(href)').extract()
for url in MySpider4.urls:
url=response.urljoin(url)
yield scrapy.Request(url, callback=self.parse_following_urls,dont_filter=True)
def parse_following_urls(self, response):
url=MySpider4.urls[MySpider4.i]
url=response.urljoin(url)
All_text=response.css("p::text").extract()
MySpider1.output.append({'url':url,'All_text': All_text})
yield{
'url' :url,
'All_text':All_text
}
MySpider4.i+=1
class MySpider5(Spider):
name = "spider5"
allowed_domains = ["http://www.gasprocessingnews.com"]
start_urls = ["http://www.gasprocessingnews.com"]
urls=[]
i=0
def parse(self, response):
#parse any elements you need from the start_urls and, optionally, store them as Items.
s = Selector(response)
MySpider5.urls = s.css('a::attr(href)').extract()
for url in MySpider5.urls:
url=response.urljoin(url)
yield scrapy.Request(url, callback=self.parse_following_urls,dont_filter=True)
def parse_following_urls(self, response):
url=MySpider5.urls[MySpider5.i]
url=response.urljoin(url)
All_text=response.css("#content-left p::text").extract()
MySpider1.output.append({'url':url,'All_text': All_text})
yield{
'url' :url,
'All_text':All_text
}
MySpider5.i+=1
configure_logging()
runner = CrawlerRunner()
runner.crawl(MySpider1)
runner.crawl(MySpider2)
runner.crawl(MySpider3)
runner.crawl(MySpider4)
runner.crawl(MySpider5)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
my_file = Path("news.json")
if my_file.exists():
os.remove('news.json')
with open('news.json', 'a') as outfile:
json.dump(MySpider1.output, outfile) |
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
import random
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.0001)
x,y = digits.data[:-1],digits.target[:-1]
clf.fit(x,y)
def yes_or_no(question):
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return 0
elif reply[0] == 'n':
return 1
else:
return yes_or_no("Please Enter (y/n) ")
while(True):
n = random. randint(0,1000)
print("Prediction of last:",clf.predict(digits.data[[n]]))
plt.imshow(digits.images[n], cmap=plt.cm.gray_r, interpolation="nearest")
plt.show()
if(yes_or_no('Try again')):
break
print("done")
|
from ._title import Title
from plotly.graph_objs.parcats.line.colorbar import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
"""
Auto-encoder network model definition. Only defined if kwcnn is available.
"""
from .kwcnndescriptor import kwcnn
AutoEncoderModel = None
if kwcnn is not None:
class AutoEncoderModel(kwcnn.core.KWCNN_Auto_Model): # NOQA
"""FCNN Model."""
def __init__(self, *args, **kwargs):
"""FCNN init."""
super(AutoEncoderModel, self).__init__(*args, **kwargs)
self.greyscale = kwargs.get("greyscale", False)
self.bottleneck = kwargs.get("bottleneck", 64)
self.trimmed = kwargs.get("trimmed", False)
def _input_shape(self):
return (64, 64, 1) if self.greyscale else (64, 64, 3)
# noinspection PyProtectedMember
def architecture(self, batch_size, in_width, in_height,
in_channels, out_classes):
"""FCNN architecture."""
input_height, input_width, input_channels = self._input_shape()
_nonlinearity = kwcnn.tpl._lasagne.nonlinearities.LeakyRectify(
leakiness=(1. / 10.)
)
#: :type: list[lasagne.layers.Layer]
layer_list = [
kwcnn.tpl._lasagne.layers.InputLayer(
shape=(None, input_channels, input_height, input_width)
)
]
for index in range(2):
layer_list.append(
kwcnn.tpl._lasagne.layers.batch_norm(
kwcnn.tpl._lasagne.Conv2DLayer(
layer_list[-1],
num_filters=16,
filter_size=(3, 3),
nonlinearity=_nonlinearity,
W=kwcnn.tpl._lasagne.init.Orthogonal(),
)
)
)
layer_list.append(
kwcnn.tpl._lasagne.MaxPool2DLayer(
layer_list[-1],
pool_size=(2, 2),
stride=(2, 2),
)
)
for index in range(3):
layer_list.append(
kwcnn.tpl._lasagne.layers.batch_norm(
kwcnn.tpl._lasagne.Conv2DLayer(
layer_list[-1],
num_filters=32,
filter_size=(3, 3),
nonlinearity=_nonlinearity,
W=kwcnn.tpl._lasagne.init.Orthogonal(),
)
)
)
layer_list.append(
kwcnn.tpl._lasagne.MaxPool2DLayer(
layer_list[-1],
pool_size=(2, 2),
stride=(2, 2),
)
)
for index in range(2):
layer_list.append(
kwcnn.tpl._lasagne.layers.batch_norm(
kwcnn.tpl._lasagne.Conv2DLayer(
layer_list[-1],
num_filters=32,
filter_size=(3, 3),
nonlinearity=_nonlinearity,
W=kwcnn.tpl._lasagne.init.Orthogonal(),
)
)
)
l_reshape0 = kwcnn.tpl._lasagne.layers.ReshapeLayer(
layer_list[-1],
shape=([0], -1),
)
if self.trimmed:
return l_reshape0
l_bottleneck = kwcnn.tpl._lasagne.layers.DenseLayer(
l_reshape0,
num_units=self.bottleneck,
nonlinearity=kwcnn.tpl._lasagne.nonlinearities.tanh,
W=kwcnn.tpl._lasagne.init.Orthogonal(),
name="bottleneck",
)
return l_bottleneck
|
import unittest
from katas.kyu_7.candy_problem import candies
class CandiesTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(candies([5, 8, 6, 4]), 9)
def test_equal_2(self):
self.assertEqual(candies([1, 2, 4, 6]), 11)
def test_equal_3(self):
self.assertEqual(candies([1, 6]), 5)
def test_equal_4(self):
self.assertEqual(candies([]), -1)
def test_equal_5(self):
self.assertEqual(candies([1, 2, None, 3]), -1)
|
import os
import uuid
import logging
log = logging.getLogger( __name__ )
from sqlalchemy import *
from sqlalchemy import event
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, backref, sessionmaker, scoped_session
from sqlalchemy.exc import OperationalError
Base = declarative_base()
class Schema( Base ):
__tablename__ = 'db_schema'
uuid = Column( Text, primary_key = True )
schema = Column( Text, nullable = False )
ver = Column( Integer, nullable = False )
rev = Column( Integer, nullable = False )
def __init__( self, schema, ver, rev, _uuid = None ):
if( _uuid is None ):
self.uuid = str( uuid.uuid1() )
else:
self.uuid = _uuid
self.schema = schema
self.ver = ver
self.rev = rev
def __repr__( self ):
return 'Schema( %r, %r, %r, %r )' % ( self.uuid, self.schema, self.ver, self.rev )
def _do_sqlite_connect( dbapi_conn, conn_record ):
# Disable python's auto BEGIN/COMMIT
dbapi_conn.isolation_level = None
dbapi_conn.execute( 'PRAGMA busy_timeout = 10000' )
class DatabaseFile:
def __init__( self, database_file, migrators = {} ):
self.__file = database_file
self.__engine = None
self.__Session = None
self.__migrators = migrators
def __get_schema_version( self, session, schema ):
info = session.query( Schema ).filter( Schema.schema == schema ).first()
if( info is None ):
ver, rev, uuid = self.__migrators[schema].determine_schema_info( session )
if( ver is None ):
return None, None
info = Schema( schema, ver, rev, uuid )
session.add( info )
return info.ver, info.rev
def get_schema_version( self, schema ):
s = self.get_session()
try:
result = self.__get_schema_version( s, schema )
s.commit()
return result
finally:
s.close()
def set_schema_version( self, schema, ver, rev ):
s = self.get_session()
try:
s.execute( 'BEGIN EXCLUSIVE' )
info = s.query( Schema ).filter( Schema.schema == schema ).first()
if( info is not None ):
info.ver = ver
info.rev = rev
else:
info = Schema( schema, ver, rev )
s.add( info )
s.commit()
finally:
s.close()
def backup( self ):
with file( self.__file, 'rb' ) as f:
n = 0
while( 1 ):
if( not os.path.isfile( self.__file + '.bak' + str( n ) ) ):
break
n += 1
with file( self.__file + '.bak' + str( n ), 'wb' ) as g:
while( 1 ):
buff = f.read( 1024 )
if( len( buff ) == 0 ):
f.close()
g.close()
break
g.write( buff )
def init( self ):
self.__engine = create_engine( 'sqlite:///' + self.__file )
event.listen( self.__engine, 'connect', _do_sqlite_connect )
Base.metadata.create_all( self.__engine )
self.__Session = scoped_session( sessionmaker( bind = self.__engine ) )
def init_schema( self, schema, target_ver, target_rev ):
ver, rev = self.get_schema_version( schema )
if( ver is None ):
self.__migrators[schema].init_schema( self.__engine, target_ver, target_rev )
self.set_schema_version( schema, target_ver, target_rev )
elif( ver > target_ver ):
assert False, 'Unsupported schema version'
elif( ver != target_ver or rev != target_rev ):
self.backup()
s = self.get_session()
try:
m = self.__migrators[schema]
s.execute( 'BEGIN EXCLUSIVE' )
while( ver != target_ver or rev != target_rev ):
new_ver, new_rev = m.upgrade_schema( s, ver, rev )
assert new_ver > ver or (new_ver == ver and new_rev > rev)
ver, rev = new_ver, new_rev
info = s.query( Schema ).filter( Schema.schema == schema ).first()
info.ver = ver
info.rev = rev
s.commit()
finally:
s.close()
def dispose( self ):
self.__Session = None
self.__engine.dispose()
self.__engine = None
def get_engine( self ):
return self.__engine
def get_session( self ):
return self.__Session()
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
from bs4 import BeautifulSoup
import requests
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents', "https://www.googleapis.com/auth/drive"]
def main():
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('docs', 'v1', credentials=creds)
vols = [
# 1,
# 2,
# 3,
# 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54
]
with open("data/ids.json") as f:
ids = json.load(f)
results = {}
for vol in vols:
print(vol)
gid = ids[str(vol).zfill(2)]
# path = "/Users/nakamura/git/d_genji/kouigenjimonogatari.github.io/tei/" + str(vol).zfill(2) + ".xml"
url = "https://kouigenjimonogatari.github.io/tei/"+str(vol).zfill(2)+".xml"
rr = requests.get(url)
html = rr.content
soup = BeautifulSoup(html, "lxml")
rows = []
# main_text.contents = all
s_arr = soup.find("body").find_all("seg")
for s in s_arr:
id = s.get("corresp").split("/")[-1].split(".")[0]
text = s.get_text().replace(" ", "").replace("\n", "")
rows.append(id+" "+text+"\n")
requests_ = []
# -----
document = service.documents().get(documentId=gid).execute()
endIndex = document["body"]["content"][-1]["endIndex"]
print(endIndex)
if endIndex > 2:
requests_.append({
'deleteContentRange': {
'range': {
'startIndex': 1,
'endIndex': endIndex - 1,
}
}
})
# -----
start = 1
for text in rows:
# print(text)
requests_.append({
'insertText': {
'location': {
'index': start,
},
'text': text
}
})
start += len(text)
result = service.documents().batchUpdate(
documentId=gid, body={'requests': requests_}).execute()
if __name__ == '__main__':
main() |
"""
Escreva um programa que receba 10 valores, e ao final,
imprima quantos valores negativos foram inseridos.
"""
cont = 0
print("Digite 10 valores")
for i in range(10):
valor = int(input())
if valor < 0:
cont = cont + 1
print(str(cont) + " Números Negativos")
|
import environement
import numpy as np
import random as rd
import matplotlib.pyplot as plt
import time
import cv2
from threading import Thread
done = False
done2 = False
def view(name):
global done
global done2
name1 = 'Q'
name2 = 'S'
show().start()
lenbd1 = 0
lenbd2 = 0
while True:
if done2 == True:
break
episodes1 = np.load('E-'+name1+'.npy')
lenend1 = len(episodes1)
dots1 = np.load(name+'-'+name1+'.npy')
episodes2 = np.load('E-' + name2 + '.npy')
lenend2 = len(episodes2)
dots2 = np.load(name+'-' + name2 + '.npy')
if lenend1 > lenbd1 or lenend2 > lenbd2:
saveimg(episodes1, dots1, episodes2, dots2)
lenbd1 = lenend1
lenbd2 = lenend2
done = True
time.sleep(10)
done = False
def saveimg(episodes1, dots1, episodes2, dots2):
with plt.style.context('Solarize_Light2'):
# fg = plt.figure(num=None, figsize=(23.86, 7), dpi=100, facecolor='w', edgecolor='k')
fg = plt.figure(num=None, figsize=(10, 7), dpi=100, facecolor='w', edgecolor='k')
plt.plot(episodes1, dots1, '.-', color='red')
plt.plot(episodes2, dots2, '.-', color='blue')
plt.legend(('QL', 'Sarsa'))
plt.ylabel('Average Dot', fontsize=12, color='k')
plt.xlabel('Episode', fontsize=12, color='k')
plt.title('Q Learning vs Sarsa')
fg.savefig('Graph.png', bbox_inches='tight')
plt.close(fg)
class show(Thread):
def run(self):
global done
global done2
img_tmp = cv2.imread('Graph.png')
while True:
if done == True:
img = cv2.imread('Graph.png')
img_tmp = img
# print(arr.shape)
else:
img = img_tmp
cv2.imshow('View', img)
if cv2.waitKey(1) == ord('q'):
done2 = True
break
view('D')
# check(1) |
l= list(input())
le = 0
a= []
for i in range(len(l)):
t = l[:i]
rt = list(reversed(t))
if t != rt:
if len(t) > le:
le = len(t)
a = t
#print("p",t,le)
for i in range(len(l)):
t = l[i:]
rt = list(reversed(t))
if t != rt:
if len(t) > le:
le = len(t)
a = t
#print("p",t,le)
print("".join(a))
|
from django.urls import path
from . import views
urlpatterns = [
# TODO 此处添加映射的url地址
path('test/',views.test),
path('get_brief/',views.get_article_info),
path('get_detail/',views.get_article)
] |
# -*- coding: ms949 -*-
import pandas as pd # pandas의 경우 문자열도 읽을 수 있음
from sklearn.model_selection import train_test_split
from sklearn.linear_model.base import LinearRegression
# pandas로 csv 파일 읽기
data = pd.read_csv("test-score.csv",
header=None, # header가 없다고 전달
names=['1st','2nd','3rd','final'])
print(data.head()) # 상위 데이터만 출력
print(data.shape)
print(data.isnull().sum().sum()) # null인 값을 모두 sum
print(data.describe()) # min, 상위25%, 하위25%, 평균 등을 출력
X = data[['1st','2nd','3rd']]
y = data['final']
print(X)
print(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=.3, # train과 test를 7:3으로 나눔 (default: .75)
random_state=107)
lr = LinearRegression().fit(X_train, y_train)
print(lr.score(X_train, y_train))
print(lr.score(X_test, y_test)) |
from __future__ import print_function
import os
import random
import math
import numpy as np
import lightgbm as lgb
import pandas as pd
import subprocess
import datetime
import time
from tqdm import tqdm
from multiprocessing import Pool
#USER = 'Zhiying'
USER = 'Heng'
if USER == 'Zhiying':
THREAD_COUNT = 4
else:
THREAD_COUNT = 8
from sklearn.datasets import load_svmlight_file
root = r'C:\source\github\mlsa18-pass-prediction' if os.name == 'nt' else r'/mnt/c/source/github/mlsa18-pass-prediction'
output_separator = '\t'
class Player(object):
def __init__(self, player_id, x, y):
self.player_id = player_id
self.x = x
self.y = y
class Pass(object):
def __init__(self, pass_id, line_num, time_start, time_end, sender_id, receiver_id):
self.pass_id = pass_id
self.line_num = line_num
self.time_start = time_start
self.time_end = time_end
self.sender_id = sender_id
self.receiver_id = receiver_id
self.sender = None
self.receiver = None
self.players = {}
def add_player(self, player_id, x, y):
self.players[player_id] = Player(player_id, x, y)
def to_features(self):
if not self.__validate():
return None
features = []
features.append(self.sender_id)
features.append(self.receiver_id)
features.append(self.time_start)
features.append(self.time_end - self.time_start) # duration
features.append(self.__get_sender_receiver_distance())
features.append(self.sender.x)
features.append(self.sender.y)
features.append(self.receiver.x)
features.append(self.receiver.y)
return output_separator.join([str(feature) for feature in features])
def features_generator(self, get_features=False):
sender = self.players[self.sender_id]
sender_friends = {candidate_id: self.players[candidate_id] for candidate_id in self.players.keys() if candidate_id != self.sender_id and self.__in_same_team(self.sender_id, candidate_id)}
sender_opponents = {candidate_id: self.players[candidate_id] for candidate_id in self.players.keys() if not self.__in_same_team(self.sender_id, candidate_id)}
sender_friends_distances = sorted([self.__get_distance(friend_id, self.sender_id) for friend_id in sender_friends.keys()])
sender_opponent_distances = sorted([self.__get_distance(opponent_id, self.sender_id) for opponent_id in sender_opponents.keys()])
sender_closest_friend_dist = sender_friends_distances[0]
sender_closest_3_friends_dist = np.mean(sender_friends_distances[:3])
sender_closest_opponent_dist = sender_opponent_distances[0]
sender_closest_3_oppononents_dist = np.mean(sender_opponent_distances[:3])
is_sender_left_team = self.__is_player_left_team(self.sender_id)
sender_field = self.__get_player_field(self.sender_id, is_sender_left_team)
is_sender_in_back_field = 1 if sender_field == 0 else 0
is_sender_in_middle_field = 1 if sender_field == 1 else 0
is_sender_in_front_field = 1 if sender_field == 2 else 0
sender_to_offense_gate_dist, sender_to_defense_gate_dist = self.__player_to_gate_distance(self.sender_id, is_sender_left_team)
sender_friends_to_offense_gate_dists = [self.__player_to_gate_distance(friend_id, is_sender_left_team)[0]
for friend_id in sender_friends.keys()]
sender_to_offense_gate_dist_rank_relative_to_friends = \
sum([dist < sender_to_offense_gate_dist for dist in sender_friends_to_offense_gate_dists]) + 1
sender_opponents_to_offense_gate_dists = [self.__player_to_gate_distance(opponent_id, is_sender_left_team)[0]
for opponent_id in sender_opponents.keys()]
sender_to_offense_gate_dist_rank_relative_to_opponents = \
sum([dist < sender_to_offense_gate_dist for dist in sender_opponents_to_offense_gate_dists]) + 1
sender_to_top_sideline_dist_rank_relative_to_friends = \
sum([friend.y > sender.y for friend in sender_friends.values()]) + 1
sender_to_top_sideline_dist_rank_relative_to_opponents = \
sum([opponent.y > sender.y for opponent in sender_opponents.values()]) + 1
sender_team = {candidate_id: self.players[candidate_id] for candidate_id in self.players.keys() if self.__in_same_team(self.sender_id, candidate_id)}
sender_team_to_offense_goal_line_dists = [
self.__player_to_goal_line_distance(team_member_id, is_sender_left_team)[0] for team_member_id in
sender_team.keys()]
sender_team_to_defense_goal_line_dists = [
self.__player_to_goal_line_distance(team_member_id, is_sender_left_team)[1] for team_member_id in
sender_team.keys()]
sender_team_closest_dist_to_offense_goal_line = \
sorted(sender_team_to_offense_goal_line_dists)[0]
sender_team_closest_dist_to_defense_goal_line_exclude_goalie = \
sorted(sender_team_to_defense_goal_line_dists)[1]
sender_team_closest_dist_to_top_sideline = \
sorted([3400 - player.y for player in sender_team.values()])[0]
sender_team_cloeset_dist_to_bottom_sideline = \
sorted([player.y - (-3400) for player in sender_team.values()])[0]
sender_team_median_dist_to_offense_goal_line = \
np.median(sender_team_to_offense_goal_line_dists)
sender_team_median_dist_to_top_sideline = \
np.median([3400 - player.y for player in sender_team.values()])
for player_id in self.players.keys():
if player_id == self.sender_id: continue
#sender = self.players[self.sender_id]
player = self.players[player_id]
friends = {candidate_id: self.players[candidate_id] for candidate_id in self.players.keys() if candidate_id != player_id and self.__in_same_team(player_id, candidate_id)}
opponents = {candidate_id: self.players[candidate_id] for candidate_id in self.players.keys() if not self.__in_same_team(player_id, candidate_id)}
label = 1 if player_id == self.receiver_id else 0
is_in_same_team = 1 if self.sender_id in friends.keys() else 0
distance = self.__get_distance(self.sender_id, player_id)
friend_distances = sorted([self.__get_distance(friend_id, player_id) for friend_id in friends.keys()])
opponent_distances = sorted([self.__get_distance(opponent_id, player_id) for opponent_id in opponents.keys()])
is_player_left_team = self.__is_player_left_team(player_id)
player_field = self.__get_player_field(player_id, is_player_left_team)
is_player_in_back_field = 1 if player_field == 0 else 0
is_player_in_middle_field = 1 if player_field == 1 else 0
is_player_in_front_field = 1 if player_field == 2 else 0
is_sender_player_in_same_field = 1 if sender_field == player_field else 0
normalized_player_to_sender_x_diff = self.__normalized_player_to_sender_x_diff(player_id, is_sender_left_team)
player_to_offense_gate_dist, player_to_defense_gate_dist = self.__player_to_gate_distance(player_id, is_player_left_team)
opponent_to_line_dists = self.__get_min_opponent_dist_to_sender_player_line(player_id)
## here offense gate is related to the sender (i.e., the target gate of sender)
player_to_offense_gate_of_sender_dist, player_to_defense_gate_of_sender_dist = \
self.__player_to_gate_distance(player_id, is_sender_left_team)
player_friends_to_offense_gate_dists = [self.__player_to_gate_distance(friend_id, is_sender_left_team)[0]
for friend_id in friends.keys()]
player_to_offense_gate_dist_rank_relative_to_friends = \
sum([dist < player_to_offense_gate_of_sender_dist for dist in player_friends_to_offense_gate_dists]) + 1
player_opponents_to_offense_gate_dists = [self.__player_to_gate_distance(opponent_id, is_sender_left_team)[0]
for opponent_id in opponents.keys()]
player_to_offense_gate_dist_rank_relative_to_opponents = \
sum([dist < player_to_offense_gate_of_sender_dist for dist in player_opponents_to_offense_gate_dists]) + 1
player_to_top_sideline_dist_rank_relative_to_friends = \
sum([friend.y > player.y for friend in friends.values()]) + 1
player_to_top_sideline_dist_rank_relative_to_opponents = \
sum([opponent.y > player.y for opponent in opponents.values()]) + 1
player_friends_to_sender_dists = {self.__get_distance(self.sender_id, friend_id)
for friend_id in friends.keys() if friend_id != self.sender_id}
player_to_sender_dist_rank_among_friends = sum([dist < distance for dist in player_friends_to_sender_dists]) + 1
player_opponents_to_sender_dists = {self.__get_distance(self.sender_id, opponent_id)
for opponent_id in opponents.keys() if opponent_id != self.sender_id}
player_to_sender_dist_rank_among_opponents = sum([dist < distance for dist in player_opponents_to_sender_dists]) + 1
## passing angle
dangerous_opponents = [opponent_id for opponent_id in opponents.keys()
if self.__get_distance(self.sender_id, opponent_id) < distance and
self.__get_distance(player_id, opponent_id) < distance]
if len(dangerous_opponents) == 0:
min_pass_angle = 90
else:
pass_angles = [self.__calculate_sender_player_opponent_angle(self.sender_id, player_id, opponent_id)
for opponent_id in dangerous_opponents]
min_pass_angle = np.min(pass_angles)
num_dangerous_opponents_along_passing_line = len(dangerous_opponents)
## closest friends/opponents features
closest_friend_id = sorted(friends.keys(),
key=lambda friend_id: self.__get_distance(player_id, friend_id))[0]
closest_opponent_id = sorted(opponents.keys(),
key=lambda opponent_id: self.__get_distance(player_id, opponent_id))[0]
player_closest_friend_to_sender_dist = self.__get_distance(self.sender_id, closest_friend_id)
player_closest_opponent_to_sender_dist = self.__get_distance(self.sender_id, closest_opponent_id)
features = []
features.append(self.pass_id)
features.append(self.line_num)
features.append(label)
features.append(self.sender_id)
features.append(player_id)
features.append(is_in_same_team)
features.append(self.time_start)
#features.append(self.time_end - self.time_start) # duration
#features.append(distance / (self.time_end - self.time_start + 0.0001)) # ball speed
features.append(sender.x)
features.append(sender.y)
features.append(player.x)
features.append(player.y)
features.append(distance)
features.append(opponent_to_line_dists[0]) # min_opponent_dist_to_sender_player_line
features.append(opponent_to_line_dists[1]) # second_opponent_dist_to_sender_player_line
features.append(opponent_to_line_dists[2]) # third_opponent_dist_to_sender_player_line
features.append(is_sender_in_back_field)
features.append(is_sender_in_middle_field)
features.append(is_sender_in_front_field)
features.append(is_player_in_back_field)
features.append(is_player_in_middle_field)
features.append(is_player_in_front_field)
features.append(is_sender_player_in_same_field)
features.append(sender_to_offense_gate_dist)
features.append(sender_to_defense_gate_dist)
features.append(player_to_offense_gate_dist)
features.append(player_to_defense_gate_dist)
features.append(normalized_player_to_sender_x_diff)
features.append(1 if normalized_player_to_sender_x_diff >= 0 else 0) # is_player_in_offense_direction_relative_to_sender
features.append(math.fabs(sender.y - player.y)) # abs_y_diff
features.append(self.__is_start_of_game())
features.append(self.__distance_to_center(self.sender_id)) # sender_to_center_distance
features.append(self.__distance_to_center(player_id)) # player_to_center_distance
features.append(1 if self.__distance_to_center(player_id) <= 915 else 0) # is_player_in_center_circle
features.append(self.__is_goal_keeper(self.sender_id, is_sender_left_team)) # is_sender_goal_keeper
features.append(self.__is_goal_keeper(player_id, is_player_left_team)) # is_player_goal_keeper
features.append(sender_closest_friend_dist)
features.append(sender_closest_3_friends_dist)
features.append(sender_closest_opponent_dist)
features.append(sender_closest_3_oppononents_dist)
features.append(friend_distances[0]) # closest friend distance
features.append(np.mean(friend_distances[:3])) # closest 3 friends avg distance
features.append(opponent_distances[0]) # closest opponent distance
features.append(np.mean(opponent_distances[:3])) # closest 3 opponent avg distance
features.append(sender_to_offense_gate_dist_rank_relative_to_friends)
features.append(sender_to_offense_gate_dist_rank_relative_to_opponents)
features.append(sender_to_top_sideline_dist_rank_relative_to_friends)
features.append(sender_to_top_sideline_dist_rank_relative_to_opponents)
features.append(sender_team_closest_dist_to_offense_goal_line)
features.append(sender_team_closest_dist_to_defense_goal_line_exclude_goalie)
features.append(sender_team_closest_dist_to_top_sideline)
features.append(sender_team_cloeset_dist_to_bottom_sideline)
features.append(player_to_offense_gate_dist_rank_relative_to_friends)
features.append(player_to_offense_gate_dist_rank_relative_to_opponents)
features.append(player_to_top_sideline_dist_rank_relative_to_friends)
features.append(player_to_top_sideline_dist_rank_relative_to_opponents)
features.append(sender_team_median_dist_to_offense_goal_line)
features.append(sender_team_median_dist_to_top_sideline)
features.append(player_to_sender_dist_rank_among_friends)
features.append(player_to_sender_dist_rank_among_opponents)
features.append(min_pass_angle)
features.append(num_dangerous_opponents_along_passing_line)
features.append(player_closest_friend_to_sender_dist)
features.append(player_closest_opponent_to_sender_dist)
if get_features:
yield features
else:
yield output_separator.join([str(feature) for feature in features])
@staticmethod
def get_feature_start_column():
return 5
@staticmethod
def get_header():
features = [
'pass_id',
'line_num',
'label',
'sender_id',
'player_id',
'is_in_same_team',
'time_start',
#'duration',
#'ball_speed',
'sender_x',
'sender_y',
'player_x',
'player_y',
'distance',
'min_opponent_dist_to_sender_player_line',
'second_opponent_dist_to_sender_player_line',
'third_opponent_dist_to_sender_player_line',
'is_sender_in_back_field',
'is_sender_in_middle_field',
'is_sender_in_front_field',
'is_player_in_back_field',
'is_player_in_middle_field',
'is_player_in_front_field',
'is_sender_player_in_same_field',
'sender_to_offense_gate_dist',
'sender_to_defense_gate_dist',
'player_to_offense_gate_dist',
'player_to_defense_gate_dist',
'norm_player_sender_x_diff',
'is_player_in_offense_direction_relative_to_sender',
'abs_y_diff',
'is_start_of_game',
'sender_to_center_distance',
'player_to_center_distance',
'is_player_in_center_circle',
'is_sender_goal_keeper',
'is_player_goal_keeper',
'sender_closest_friend_dist',
'sender_closest_3_friends_dist',
'sender_closest_opponent_dist',
'sender_closest_3_opponents_dist',
'player_closest_friend_dist',
'player_closest_3_friends_dist',
'player_closest_opponent_dist',
'player_closest_3_opponents_dist',
'sender_to_offense_gate_dist_rank_relative_to_friends',
'sender_to_offense_gate_dist_rank_relative_to_opponents',
'sender_to_top_sideline_dist_rank_relative_to_friends',
'sender_to_top_sideline_dist_rank_relative_to_opponents',
'sender_team_closest_dist_to_offense_goal_line',
'sender_team_closest_dist_to_defense_goal_line_exclude_goalie',
'sender_team_closest_dist_to_top_sideline',
'sender_team_cloeset_dist_to_bottom_sideline',
'player_to_offense_gate_dist_rank_relative_to_friends',
'player_to_offense_gate_dist_rank_relative_to_opponents',
'player_to_top_sideline_dist_rank_relative_to_friends',
'player_to_top_sideline_dist_rank_relative_to_opponents',
'sender_team_median_dist_to_offense_goal_line',
'sender_team_median_dist_to_top_sideline',
'player_to_sender_dist_rank_among_friends',
'player_to_sender_dist_rank_among_opponents',
'min_pass_angle',
'num_dangerous_opponents_along_passing_line',
'player_closest_friend_to_sender_dist',
'player_closest_opponent_to_sender_dist'
]
return features
@staticmethod
def get_feature_to_index():
features = Pass.get_header()
return {feature:i for i,feature in enumerate(features)}
@staticmethod
def get_index_to_feature():
features = Pass.get_header()
return {i:feature for i,feature in enumerate(features)}
def __get_min_opponent_dist_to_sender_player_line(self, player_id):
if not self.__in_same_team(self.sender_id, player_id):
return [-1] * 11
opponents = {id: self.players[id] for id in self.players.keys() if not self.__in_same_team(player_id, id)}
sender = self.players[self.sender_id]
player = self.players[player_id]
sender_to_player_vec = np.array([player.x - sender.x, player.y - sender.y])
dummy_large_distance = 50000
if np.linalg.norm(sender_to_player_vec) < 0.0001: # if sender to player distance is too small, then don't need to calc
return [dummy_large_distance] * 11
distances = []
for id in opponents.keys():
opponent = opponents[id]
sender_to_opponent_vec = np.array([opponent.x - sender.x, opponent.y - sender.y])
# refer to https://blog.csdn.net/tracing/article/details/46563383
sender_to_projection_point_vec = \
(np.dot(sender_to_player_vec, sender_to_opponent_vec) / np.linalg.norm(sender_to_player_vec)) * \
(sender_to_player_vec / np.linalg.norm(sender_to_player_vec))
projection_point_to_opponent_vec = sender_to_opponent_vec - sender_to_projection_point_vec
distance = np.linalg.norm(projection_point_to_opponent_vec)
projection_point_vec = np.array([sender.x, sender.y]) + sender_to_projection_point_vec
if ((projection_point_vec[0] >= sender.x and projection_point_vec[0] <= player.x) \
or (projection_point_vec[0] <= sender.x and projection_point_vec[0] >= player.x)):
distances.append(distance)
if len(distances) < 11:
distances += [dummy_large_distance] * (11 - len(distances))
distances.sort()
return distances
def __get_player_field(self, player_id, is_player_left_team):
# divide the field into (back, middle, front), and represent it as (0, 1, 2)
player = self.players[player_id]
if math.fabs(player.x) <= 1750:
return 1
if (is_player_left_team and player.x < 0) or (not is_player_left_team and player.x > 0):
return 0
return 2
def __normalized_player_to_sender_x_diff(self, player_id, is_sender_left_team):
# set all offense direction to be positive and defense direction to be negative
x_diff = self.players[player_id].x - self.players[self.sender_id].x
return x_diff if is_sender_left_team else (-1 * x_diff)
def __is_start_of_game(self):
home_players = [id for id in self.players.keys() if id <= 14]
away_players = [id for id in self.players.keys() if id > 14]
is_home_left_team = self.__is_player_left_team(home_players[0])
left_players = home_players if is_home_left_team else away_players
right_players = away_players if is_home_left_team else home_players
threshold = 200
left_max_x = np.max([self.players[id].x for id in left_players])
right_min_x = np.min([self.players[id].x for id in right_players])
return 1 if left_max_x <= threshold and right_min_x >= -threshold else 0
def __is_player_left_team(self, player_id):
# or right team
team_players_x = [self.players[id].x for id in self.players.keys() if self.__in_same_team(player_id, id)]
oppo_players_x = [self.players[id].x for id in self.players.keys() if not self.__in_same_team(player_id, id)]
return np.mean(team_players_x) < np.mean(oppo_players_x)
def __player_to_gate_distance(self, player_id, is_player_left_team):
player = self.players[player_id]
offense_gate = [5250, 0] if is_player_left_team else [-5250, 0]
defense_gate = [-5250, 0] if is_player_left_team else [5250, 0]
player_to_offense_gate_dist = np.linalg.norm(np.array([player.x , player.y]) - np.array(offense_gate))
player_to_defense_gate_dist = np.linalg.norm(np.array([player.x , player.y]) - np.array(defense_gate))
return player_to_offense_gate_dist, player_to_defense_gate_dist
def __player_to_goal_line_distance(self, player_id, is_player_left_team):
player = self.players[player_id]
offense_goal_line_x = 5250 if is_player_left_team else -5250
defense_goal_line_x = -5250 if is_player_left_team else 5250
player_to_offense_goal_line_dist = math.fabs(offense_goal_line_x - player.x)
player_to_defense_goal_line_dist = math.fabs(player.x - defense_goal_line_x)
return player_to_offense_goal_line_dist, player_to_defense_goal_line_dist
def __distance_to_center(self, player_id):
return np.sqrt(self.players[player_id].x ** 2 + self.players[player_id].y ** 2)
def __is_goal_keeper(self, player_id, is_player_left_team):
friends = {candidate_id: self.players[candidate_id] for candidate_id in self.players.keys() if candidate_id != player_id and self.__in_same_team(player_id, candidate_id)}
friends_x = [self.players[friend].x for friend in friends]
if is_player_left_team:
min_x = np.min(friends_x)
return 1 if self.players[player_id].x <= min_x else 0
else:
max_x = np.max(friends_x)
return 1 if self.players[player_id].x >= max_x else 0
def __in_same_team(self, player_id_1, player_id_2):
return (player_id_1 - 14.5) * (player_id_2 - 14.5) > 0
def __get_distance(self, player_id_1, player_id_2):
# verify ids before calling
return np.sqrt((self.players[player_id_1].x - self.players[player_id_2].x) ** 2 +
(self.players[player_id_1].y - self.players[player_id_2].y) ** 2)
def __validate(self):
if self.sender_id not in self.players or self.receiver_id not in self.players:
return False
self.sender = self.players[self.sender_id]
self.receiver = self.players[self.receiver_id]
return True
def __get_sender_receiver_distance(self):
# call __validate before this function
return np.sqrt((self.sender.x - self.receiver.x) ** 2 + (self.sender.y - self.receiver.y) ** 2)
def __calculate_sender_player_opponent_angle(self, sender_id, player_id, opponent_id):
sender = self.players[sender_id]
opponent = self.players[opponent_id]
player = self.players[player_id]
sender_to_player = np.array([player.x, player.y]) - np.array([sender.x, sender.y])
sender_to_opponent = np.array([opponent.x, opponent.y]) - np.array([sender.x, sender.y])
cosine_angle = np.dot(sender_to_player, sender_to_opponent) / \
(np.linalg.norm(sender_to_player) * np.linalg.norm(sender_to_opponent))
cosine_angle = max(cosine_angle, -1.0)
cosine_angle = min(cosine_angle, 1.0)
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def pass_builder(pass_id, line_num, tokens):
players_count = 28
time_start = int(tokens[0])
time_end = int(tokens[1])
sender_id = int(tokens[2])
receiver_id = int(tokens[3])
a_pass = Pass(pass_id, line_num, time_start, time_end, sender_id, receiver_id)
for i in range(players_count):
x = tokens[4 + i]
y = tokens[4 + players_count + i]
if not x or not y:
continue
a_pass.add_player(i+1, float(x), float(y))
return a_pass
def get_passes():
first_line = True
line_num = 0
pass_id = 0
passes = []
for line in open('passes.csv'):
line_num += 1
if first_line:
first_line = False
continue
tokens = line.split(',')
if len(tokens) < 60: continue
a_pass = pass_builder(pass_id, line_num, tokens)
# validate
if a_pass.sender_id not in a_pass.players: continue
#if (a_pass.sender_id - 14.5) * (a_pass.receiver_id - 14.5) < 0: continue
passes.append(a_pass)
pass_id += 1
return passes
def featurize_one_pass(a_pass):
all_features = []
for features in a_pass.features_generator(get_features=True):
output_features = [str(feature) for feature in features]
all_features.append(output_features)
return all_features
def get_header_and_features(shuffle=False, npy_file=''):
if npy_file:
data = np.load(npy_file)
else:
pool = Pool(THREAD_COUNT)
passes = get_passes()
if shuffle:
random.seed(30)
random.shuffle(passes)
data = pool.map(featurize_one_pass, tqdm(passes))
headers = Pass.get_header()
assert len(data[0][0]) == len(headers), 'Feature count (%d) is not the same as header count (%d)' % (len(data[0][0]), len(headers))
return headers, data
def featurize():
headers, data = get_header_and_features()
with open('train.tsv', 'w') as train_writer, open('val.tsv', 'w') as val_writer, open('test.tsv', 'w') as test_writer:
header_item_count = len(Pass.get_header())
train_writer.write(output_separator.join(Pass.get_header()) + '\n')
val_writer.write(output_separator.join(Pass.get_header()) + '\n')
test_writer.write(output_separator.join(Pass.get_header()) + '\n')
counter = len(data)
print('Total valid samples count: %d' % counter)
random.seed(30)
#random.shuffle(passes)
train_counts = int(counter * 0.7)
val_counts = int(counter * 0.1)
print('Train count: %d, val count: %d, test count: %d' % (train_counts, val_counts, counter - train_counts - val_counts))
for i in range(counter):
if i % 1000 == 0: print(i)
#if i > (train_counts):
# receiver_features = [features for features in data[i] if int(features[2]) == 1]
# if len(receiver_features) == 0 or int(receiver_features[0][5]) == 0:
# continue
writer = train_writer if i <= train_counts else val_writer if i <= (train_counts+val_counts) else test_writer
for features in data[i]:
writer.write(output_separator.join(features) + "\n")
with open('generated.cs', 'w') as writer:
for feature in Pass.get_header():
writer.write("double %s = features[i++];\n" % feature.strip())
def featurize_npy():
headers, data = get_header_and_features(True)
np.save('all_features.npy', data)
def writePassingFeaturesInSingleFile():
headers, data = get_header_and_features()
with open('passingfeatures.tsv', 'w') as feature_writer:
feature_writer.write(output_separator.join(headers) + '\n')
counter = len(data)
for i in range(counter):
for features in data[i]:
feature_writer.write(output_separator.join(features) + '\n')
feature_blacklist = [
'distance', # or 'player_closest_opponent_to_sender_dist'
'player_to_sender_dist_rank_among_opponents', # or 'player_to_sender_dist_rank_among_friends'
'is_player_in_offense_direction_relative_to_sender', # or 'norm_player_sender_x_diff'
#'player_to_offense_gate_dist_rank_relative_to_friends', # 'player_to_offense_gate_dist_rank_relative_to_opponents'
'player_closest_3_friends_dist', # 'player_closest_friend_dist'
'player_closest_3_opponents_dist', # 'player_closest_opponent_dist'
'is_in_same_team', 'min_opponent_dist_to_sender_player_line', 'second_opponent_dist_to_sender_player_line', # 'third_opponent_dist_to_sender_player_line'
'sender_team_closest_dist_to_offense_goal_line', # ''
]
feature_whitelist = [
"min_pass_angle", "abs_y_diff", "player_closest_friend_to_sender_dist",
"distance", "num_dangerous_opponents_along_passing_line",
"player_to_sender_dist_rank_among_friends", "norm_player_sender_x_diff",
"player_to_offense_gate_dist_rank_relative_to_friends",
"player_to_offense_gate_dist_rank_relative_to_opponents",
"player_closest_friend_dist", "is_player_goal_keeper",
"player_closest_opponent_dist", #"is_sender_player_in_same_field",
"is_in_same_team", "is_sender_in_front_field",
"sender_team_closest_dist_to_offense_goal_line", "is_player_in_center_circle",
#"is_player_in_middle_field",
"player_to_center_distance",
"is_player_in_front_field", "is_player_in_back_field",
"player_to_offense_gate_dist", "time_start", #"is_start_of_game",
"sender_closest_friend_dist", "sender_to_offense_gate_dist_rank_relative_to_friends",
"sender_closest_opponent_dist", #"player_closest_3_friends_dist",
"is_sender_goal_keeper", "sender_to_center_distance",
"is_sender_in_back_field", #"is_sender_in_middle_field",
#"sender_x", "player_x", "player_y",
"player_to_top_sideline_dist_rank_relative_to_friends",
"sender_team_cloeset_dist_to_bottom_sideline",
"sender_team_median_dist_to_top_sideline",
"sender_team_closest_dist_to_top_sideline",
#"sender_y"
]
def featurize_svm():
# multi-threaded version, 1.6 times faster
headers, data = get_header_and_features()
dir = r'./lightgbm/'
feature_whitelist.extend(['pass_id', 'line_num', 'label', 'sender_id', 'player_id'])
whitelist_ids = set([i for i, header in enumerate(headers) if header in feature_whitelist])
assert len(data[0][0]) == len(headers), 'Feature count (%d) is not the same as header count (%d)' % (len(data[0][0]), len(headers))
with open(dir + 'rank.train', 'w') as train_writer, open(dir + 'rank.train.query', 'w') as train_query_writer, open(dir + 'rank.train.id', 'w') as train_id_writer,\
open(dir + 'rank.val', 'w') as val_writer, open(dir + 'rank.val.query', 'w') as val_query_writer, open(dir + 'rank.val.id', 'w') as val_id_writer, \
open(dir + 'rank.test', 'w') as test_writer, open(dir + 'rank.test.query', 'w') as test_query_writer, open(dir + 'rank.test.id', 'w') as test_id_writer:
counter = len(data)
random.seed(30)
#random.shuffle(data)
train_counts = int(counter * 0.7)
val_counts = int(counter * 0.1)
feature_start_column = Pass.get_feature_start_column()
for i in range(counter):
if i % 1000 == 0: print(i)
writer = train_writer if i <= train_counts else val_writer if i <= (train_counts+val_counts) else test_writer
query_writer = train_query_writer if i <= train_counts else val_query_writer if i <= (train_counts+val_counts) else test_query_writer
id_writer = train_id_writer if i <= train_counts else val_id_writer if i <= (train_counts+val_counts) else test_id_writer
if i >= 0:# > (train_counts):
receiver_features = [features for features in data[i] if int(features[2]) == 1]
#if len(receiver_features) == 0 or int(receiver_features[0][5]) == 1:
# continue
for features in data[i]:
output_features = []
output_features.append(features[2]) # label
for j in range(feature_start_column, len(features)):
if features[j] != 0 and j in whitelist_ids:
output_features.append('%d:%s' % (j - feature_start_column + 1, features[j]))
#output_features.append('%s:%s' % (headers[j], features[j]))
writer.write('%s\n' % (" ".join(output_features)))
id_writer.write('%s\t%s\t%s\n' % (features[0], features[1], features[4])) # pass_id, line_num, receiver_id
query_writer.write('%d\n' % len(data[i]))
def read_lines(reader, num):
lines = []
for i in range(num):
lines.append(reader.readline().strip())
return lines
def lightgbm_pred_accuracy(label_file, query_file, predict_file, id_file, output_file):
#os.popen('/mnt/c/source/github/LightGBM/lightgbm config=lightgbm/predict.conf')
topN = 5
counter = 0
topn_correct_counters = [0] * topN
feature_reader = open(label_file, 'r')
predict_reader = open(predict_file, 'r')
id_reader = open(id_file, 'r')
writer = open(output_file, 'w')
pass_lines = open(query_file).readlines()
pass_lines = [int(count) for count in pass_lines if count.strip()]
reciprocal_ranks = 0.0
for count in pass_lines:
labels = [int(line.split()[0]) for line in read_lines(feature_reader, count)]
results = [float(line) for line in read_lines(predict_reader, count)]
assert len(labels) == len(results), "len of labels and results should be same"
id_lines = read_lines(id_reader, count)
pass_id = id_lines[0].split('\t')[0]
line_num = id_lines[0].split('\t')[1]
receiver_ids = [line.split('\t')[2] for line in id_lines]
receiver = np.argmax(labels)
top_predictions = np.argsort(results)[::-1]
for i in range(topN):
topn_predictions = top_predictions[:i+1]
if receiver in topn_predictions:
topn_correct_counters[i] += 1
rank = len(labels) - 1 # set default rank to the last
for i in range(len(top_predictions)):
if top_predictions[i] == receiver:
rank = i + 1
break
#print('labels', ','.join(map(lambda x: str(x), labels)))
#print('receiver', receiver)
#print('top_predictions', ','.join(map(lambda x: str(x), top_predictions)))
#print('rank', rank)
reciprocal_ranks += 1.0 / rank
counter += 1
ranked_receiver_ids = [receiver_ids[n] for n in top_predictions]
writer.write('%s\t%s\t%s\n' % (pass_id, line_num, ",".join(ranked_receiver_ids)))
topN_accuracies = [0] * topN
for i in range(topN):
topN_accuracies[i] = float(topn_correct_counters[i])/counter
print("Top %d prediction accuracy: %d/%d = %f" % \
(i+1, topn_correct_counters[i], counter, topN_accuracies[i]))
mean_reciprocal_rank = reciprocal_ranks / counter
print("Mean reciporal rank: %f" % mean_reciprocal_rank)
writer.close()
return topN_accuracies, mean_reciprocal_rank
def xgboost_pred_accuracy(label_file, query_file, predict_file, id_file, output_file):
lightgbm_pred_accuracy(label_file, query_file, predict_file, id_file, output_file)
if USER == 'Zhiying':
LIGHTGBM_EXEC = '/mnt/c/source/github/LightGBM/lightgbm'
elif USER == 'Heng':
LIGHTGBM_EXEC = '/Users/hengli/Projects/mlsa18-pass-prediction/LightGBM/lightgbm'
def lightgbm_run(cmd):
cwd = os.getcwd()
os.chdir('./lightgbm')
#os.popen(cmd)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
print("Error while calling shell command: " + cmd)
os.chdir(cwd)
def lightgbm_pipeline():
print("Featurizing")
featurize_svm()
print("Train")
lightgbm_run(LIGHTGBM_EXEC + ' config=train.conf > train.log')
print("Predict")
if USER == 'Zhiying':
lightgbm_run('bash predict.sh')
elif USER == 'Heng':
lightgbm_run('bash predict_heng.sh')
print("Train accuracies:")
lightgbm_pred_accuracy('lightgbm/rank.train', 'lightgbm/rank.train.query', 'lightgbm/LightGBM_predict_train.txt', 'lightgbm/rank.train.id', 'lightgbm/rank.train.result')
print("Validation accuracies:")
lightgbm_pred_accuracy('lightgbm/rank.val', 'lightgbm/rank.val.query', 'lightgbm/LightGBM_predict_val.txt', 'lightgbm/rank.val.id', 'lightgbm/rank.val.result')
print("Test accuracies:")
lightgbm_pred_accuracy('lightgbm/rank.test', 'lightgbm/rank.test.query', 'lightgbm/LightGBM_predict_test.txt', 'lightgbm/rank.test.id', 'lightgbm/rank.test.result')
lightgbm_feature_importance()
def lightgbm_feature_importance():
model_file = 'lightgbm/LightGBM_model.txt'
feature_importance = False
headers = Pass.get_header()
feature_start_column = Pass.get_feature_start_column()
for line in open(model_file):
if "feature importance" in line:
feature_importance = True
continue
if not feature_importance: continue
if not line.startswith("Column_"):
feature_importance = False
break
column = int(line.strip()[len("Column_"):line.find('=')])
value = int(line.strip()[line.find('=')+1:])
print('%s=%d' % (headers[column - 1 + feature_start_column], value))
def lightgbm_train():
df_train = pd.read_csv('train.tsv', sep='\t')
df_test = pd.read_csv('test.tsv', sep='\t')
# https://github.com/Microsoft/LightGBM/blob/21487d8a28e53c63382d3ab8481b073b65176022/examples/python-guide/advanced_example.py
def update_config_file(params, input_config, output_config):
writer = open(output_config, 'w')
for line in open(input_config, 'r'):
line = line.strip()
for key in params.keys():
if line.startswith(key):
line = '%s = %s' % (key, str(params[key]))
break
writer.write(line + '\n')
writer.close()
def lightgbm_train_test_with_param(params):
update_config_file(params, 'lightgbm/train.default.config', 'lightgbm/train.tmp.config')
print(str(params))
print("Train")
lightgbm_run(LIGHTGBM_EXEC + ' config=train.tmp.config >train.log')
print("Predict")
lightgbm_run('bash predict.sh')
print("Train accuracies:")
train_accuracies, mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.train', 'lightgbm/rank.train.query', 'lightgbm/LightGBM_predict_train.txt', 'lightgbm/rank.train.id', 'lightgbm/rank.train.result')
print("Validation accuracies:")
val_accuracies, mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.val', 'lightgbm/rank.val.query', 'lightgbm/LightGBM_predict_val.txt', 'lightgbm/rank.val.id', 'lightgbm/rank.val.result')
print("Test accuracies:")
lightgbm_pred_accuracy('lightgbm/rank.test', 'lightgbm/rank.test.query', 'lightgbm/LightGBM_predict_test.txt', 'lightgbm/rank.test.id', 'lightgbm/rank.test.result')
return (train_accuracies, val_accuracies)
def params_generator(params_to_sweep):
key_count = len(params_to_sweep.keys())
assert key_count >= 1
random_key = params_to_sweep.keys()[0]
for value in params_to_sweep[random_key]:
params = {random_key: value}
if key_count == 1:
yield params
else:
sub_params_to_sweep = params_to_sweep.copy()
del sub_params_to_sweep[random_key]
for dict in params_generator(sub_params_to_sweep):
params.update(dict)
yield params
def hyper_parameter_sweep():
params_to_sweep = {'learning_rate': [0.05, 0.1, 0.2], 'num_trees': [50, 100, 200, 500], 'num_leaves': [31, 63]}
#params_to_sweep = {'learning_rate': [0.05, 0.07], 'num_trees': [200, 500, 1000], 'min_data_in_leaf': [50],
# 'feature_fraction': [0.5], 'bagging_fraction': [0.5], 'num_leaves': [15, 31, 63, 127]}
params_count = 1
for key in params_to_sweep.keys():
params_count *= len(params_to_sweep[key])
print('Totally %d parameters to sweep\n' % params_count)
writer = open('tmp.tsv', 'w')
results= {}
top1_val_results = {}
counter = 0
for params in params_generator(params_to_sweep):
print('\nParameter count: %d' % counter)
counter += 1
start_time = datetime.datetime.now()
train_accuracies, val_accuracies = lightgbm_train_test_with_param(params)
print('Finished in %f seconds' % (datetime.datetime.now() - start_time).total_seconds())
writer.write('\n\n%s\nTrain accuracies: %s\nValid accuracies: %s\n' % (str(params), str(train_accuracies), str(val_accuracies)))
writer.flush()
top1_val_results[str(params)] = val_accuracies[0]
results[str(params)] = (train_accuracies, val_accuracies)
writer.write('\nSorted val accuracies:\n')
for r in sorted(top1_val_results, key=top1_val_results.get, reverse=True):
writer.write('%s: top1_val_acc: %f, top1_train_acc: %f\n' % (r, top1_val_results[r], results[r][0][0]))
writer.close()
def avg_pred_results(readers, out_file):
writer = open(out_file, 'w')
for line in readers[0].readlines():
test_results = []
test_results.append(float(line))
for i in range(1, len(readers)):
test_results.append(float(readers[i].readline()))
writer.write("%f\n" % np.mean(test_results))
writer.close()
def model_ensemble():
lightgbm_train_test_with_param({'num_leaves': 31, 'learning_rate': 0.05, 'min_data_in_leaf': 50, 'num_trees': 500, 'bagging_fraction': 0.5, 'feature_fraction': 0.5})
lightgbm_run('mv LightGBM_model.txt LightGBM_model_1.txt')
lightgbm_train_test_with_param({'learning_rate': 0.05, 'min_data_in_leaf': 500, 'num_trees': 500, 'bagging_fraction': 0.5, 'feature_fraction': 1})
lightgbm_run('mv LightGBM_model.txt LightGBM_model_2.txt')
lightgbm_train_test_with_param({'learning_rate': 0.07, 'min_data_in_leaf': 50, 'num_trees': 200})
lightgbm_run('mv LightGBM_model.txt LightGBM_model_3.txt')
model_files = ['LightGBM_model_1.txt', 'LightGBM_model_2.txt', 'LightGBM_model_3.txt']
#model_files = ['LightGBM_model_1.txt', 'LightGBM_model_friend.txt', 'LightGBM_model_opponent.txt']
output_train_files = []
output_test_files = []
reader_train_files = []
reader_test_files = []
for i, model_file in enumerate(model_files):
output_train_file = 'LightGBM_predict_train_%d.txt' % i
output_test_file = 'LightGBM_predict_test_%d.txt' % i
params_train = {'input_model': model_file, 'output_result': output_train_file }
params_test = {'input_model': model_file, 'output_result': output_test_file }
update_config_file(params_train, 'lightgbm/predict_train.conf', 'lightgbm/predict_train.tmp.conf')
lightgbm_run(LIGHTGBM_EXEC + ' config=predict_train.tmp.conf')
update_config_file(params_test, 'lightgbm/predict_test.conf', 'lightgbm/predict_test.tmp.conf')
lightgbm_run(LIGHTGBM_EXEC + ' config=predict_test.tmp.conf')
reader_train_files.append(open('lightgbm/' + output_train_file, 'r'))
reader_test_files.append(open('lightgbm/' + output_test_file, 'r'))
# build feature file
#gen_feature_file = 'lightgbm/model_ensemble_rank.train'
avg_train_outfile = 'lightgbm/model_ensemble_avg_predict_train.txt'
avg_test_outfile = 'lightgbm/model_ensemble_avg_predict_test.txt'
avg_pred_results(reader_train_files, avg_train_outfile)
avg_pred_results(reader_test_files, avg_test_outfile)
print("Train accuracies:")
train_accuracies, train_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.train', 'lightgbm/rank.train.query', avg_train_outfile, 'lightgbm/rank.train.id', 'lightgbm/rank.train.result')
print("Test accuracies:")
test_accuracies, test_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.test', 'lightgbm/rank.test.query', avg_test_outfile, 'lightgbm/rank.test.id', 'lightgbm/rank.test.result')
return (train_accuracies, train_mean_reciprocal_rank, test_accuracies, test_mean_reciprocal_rank)
def lightgbm_python():
X_train, y_train = load_svmlight_file('lightgbm/rank.train')
X_val, y_val = load_svmlight_file('lightgbm/rank.val')
X_test, y_test = load_svmlight_file('lightgbm/rank.test')
q_train = np.loadtxt('lightgbm/rank.train.query')
q_val = np.loadtxt('lightgbm/rank.val.query')
gbm = lgb.LGBMRanker(learning_rate=0.05, n_estimators=500)
gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_val, y_val)],
eval_group=[q_val], eval_at=[1,3,5], verbose=False)
#gbm.save_model('model.txt')
y_pred = gbm.predict(X_test)
np.savetxt('lightgbm/LightGBM_predict_test.txt', y_pred)
#print(y_pred[:10])
#print(y_pred.shape)
def remove_features(infile, outfile, allowed_features):
writer = open(outfile, 'w')
for line in open(infile):
if not line.strip(): continue
tokens = line.strip().split()
output_features = []
label = True
for token in tokens:
if label:
output_features.append(token)
label = False
continue
feature_id = int(token.split(':')[0])
if feature_id in allowed_features:
output_features.append(token)
if len(output_features) < 2:
output_features.append('%d:0' % list(allowed_features)[0])
writer.write('%s\n' % ' '.join(output_features))
writer.close()
def train_test_single_feature():
headers = Pass.get_header()
writer = open('tmp.tsv', 'w')
results= {}
top1_val_results = {}
for i in range(1, 52):
print('Feature %s (%d)' % (headers[i - 1 + 5], i))
remove_features('lightgbm/rank.default.train', 'lightgbm/rank.train', set([i]))
remove_features('lightgbm/rank.default.val', 'lightgbm/rank.val', set([i]))
print("Train")
lightgbm_run(LIGHTGBM_EXEC + ' config=train.conf > train.log')
print("Predict")
if USER == 'Zhiying':
lightgbm_run('bash predict.sh')
elif USER == 'Heng':
lightgbm_run('bash predict_heng.sh')
print("Train accuracies:")
train_accuracies, train_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.train', 'lightgbm/rank.train.query', 'lightgbm/LightGBM_predict_train.txt', 'lightgbm/rank.train.id', 'lightgbm/rank.train.result')
print("Valid accuracies:")
val_accuracies, val_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.val', 'lightgbm/rank.val.query', 'lightgbm/LightGBM_predict_val.txt', 'lightgbm/rank.val.id', 'lightgbm/rank.val.result')
top1_val_results[i] = val_accuracies[0]
results[i] = (train_accuracies, val_accuracies)
writer.write('\nSorted val accuracies:\n')
for r in sorted(top1_val_results, key=top1_val_results.get, reverse=True):
feature_name = headers[r - 1 + 5]
writer.write('Feature %s (%d): top1_val_acc: %f, top1_train_acc: %f\n' % (feature_name, r, top1_val_results[r], results[r][0][0]))
writer.close()
def write_data_to_file_svm(data, feature_filename, query_filename, id_filename, whitelist_ids=None, check_whitelist=False):
feature_start_column = Pass.get_feature_start_column()
with open(feature_filename, 'w') as feature_writer, open(query_filename, 'w') as query_writer, open(id_filename, 'w') as id_writer:
for i in range(len(data)):
for features in data[i]:
output_features = []
output_features.append(features[2]) # label
for j in range(feature_start_column, len(features)):
if features[j] != 0 and ((not check_whitelist) or j in whitelist_ids):
output_features.append('%d:%s' % (j - feature_start_column + 1, features[j]))
feature_writer.write('%s\n' % (" ".join(output_features)))
id_writer.write('%s\t%s\t%s\n' % (features[0], features[1], features[4])) # pass_id, line_num, receiver_id
query_writer.write('%d\n' % len(data[i]))
def get_first_fold_train_val():
headers, data = get_header_and_features(npy_file='all_features.npy')
dir = r'./lightgbm/'
all_pass_count = len(data)
test_set_size = all_pass_count // 10
test_set_index = range(0 * test_set_size, 1 * test_set_size)
val_set_index = range(1 * test_set_size, 2 * test_set_size)
train_set_index = range(2 * test_set_size, all_pass_count)
train_data = data[train_set_index]
val_data = data[val_set_index]
write_data_to_file_svm(train_data, dir + 'rank.train', dir + 'rank.train.query', dir + 'rank.train.id')
write_data_to_file_svm(val_data, dir + 'rank.val', dir + 'rank.val.query', dir + 'rank.val.id')
def cross_validation(n_folds=10, use_model_ensemble=False, check_whitelist=True):
print("Loading data")
headers, data = get_header_and_features(npy_file='all_features.npy')
dir = r'./lightgbm/'
print("All data size: %d" % len(data))
feature_to_index = Pass.get_feature_to_index()
new_data = []
for passes in data:
receiver_features = [features for features in passes if features[feature_to_index['label']] == '1']
#if len(receiver_features) != 1 or receiver_features[0][feature_to_index['is_in_same_team']] == '0':
# continue
#if passes[0][feature_to_index['is_sender_in_back_field']] == '1':
#if passes[0][feature_to_index['is_sender_in_middle_field']] == '1':
#if passes[0][feature_to_index['is_sender_in_front_field']] == '1':
if True:
new_data.append(passes)
data = np.array(new_data)
print("Filter data size: %d" % len(data))
all_pass_count = len(data)
test_set_size = all_pass_count // n_folds
feature_whitelist.extend(['pass_id', 'line_num', 'label', 'sender_id', 'player_id'])
whitelist_ids = set([i for i, header in enumerate(headers) if header in feature_whitelist])
train_accs = []
val_accs = []
test_accs = []
train_mrr = []
val_mrr = []
test_mrr = []
for i in range(n_folds):
print("\nFold number %d" % i)
test_set_index = range(i * test_set_size, (i+1) * test_set_size)
train_set_index = range(0, i * test_set_size) + range((i+1) * test_set_size, all_pass_count)
val_set_index = train_set_index[-50:] # a fixed size of 50 passes as validation set
train_set_index = train_set_index[:-50]
train_data = data[train_set_index]
val_data = data[val_set_index]
test_data = data[test_set_index]
print("Train data size: %d" % len(train_data))
print("Val data size: %d" % len(val_data))
print("Test data size: %d [%d-%d]" % (len(test_data), test_set_index[0], test_set_index[-1]))
print("Preparing train/test files")
write_data_to_file_svm(train_data, dir + 'rank.train', dir + 'rank.train.query', dir + 'rank.train.id', whitelist_ids, check_whitelist=check_whitelist)
write_data_to_file_svm(val_data, dir + 'rank.val', dir + 'rank.val.query', dir + 'rank.val.id', whitelist_ids, check_whitelist=check_whitelist)
write_data_to_file_svm(test_data, dir + 'rank.test', dir + 'rank.test.query', dir + 'rank.test.id', whitelist_ids, check_whitelist=check_whitelist)
if use_model_ensemble:
train_accuracies, train_mean_reciprocal_rank, test_accuracies, test_mean_reciprocal_rank = model_ensemble()
else:
print("Train")
lightgbm_run(LIGHTGBM_EXEC + ' config=train.conf > train.log')
print("Predict")
lightgbm_run('bash predict.sh')
print("Train accuracies:")
train_accuracies, train_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.train', 'lightgbm/rank.train.query', 'lightgbm/LightGBM_predict_train.txt', 'lightgbm/rank.train.id', 'lightgbm/rank.train.result')
print("Val accuracies:")
val_accuracies, val_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.val', 'lightgbm/rank.val.query', 'lightgbm/LightGBM_predict_val.txt', 'lightgbm/rank.val.id', 'lightgbm/rank.val.result')
print("Test accuracies:")
test_accuracies, test_mean_reciprocal_rank = lightgbm_pred_accuracy('lightgbm/rank.test', 'lightgbm/rank.test.query', 'lightgbm/LightGBM_predict_test.txt', 'lightgbm/rank.test.id', 'lightgbm/rank.test.result')
train_accs.append(train_accuracies)
#val_accs.append(val_accuracies)
test_accs.append(test_accuracies)
train_mrr.append(train_mean_reciprocal_rank)
#val_mrr.append(val_mean_reciprocal_rank)
test_mrr.append(test_mean_reciprocal_rank)
with open('result.txt', 'w') as result_writer:
result_writer.write("\nTrain results:\n")
for i in range(5):
result_writer.write("Avg top-%d accuracy: %f\n" % (i+1, np.mean([acc[i] for acc in train_accs])))
result_writer.write("Avg mean reciprocal rank: %f\n" % np.mean(train_mrr))
#result_writer.write("\nVal results:\n")
#for i in range(5):
# result_writer.write("Avg top-%d accuracy: %f\n" % (i+1, np.mean([acc[i] for acc in val_accs])))
#result_writer.write("Avg mean reciprocal rank: %f\n" % np.mean(val_mrr))
result_writer.write("\nTest results:\n")
for i in range(5):
result_writer.write("Avg top-%d accuracy: %f\n" % (i+1, np.mean([acc[i] for acc in test_accs])))
result_writer.write("Avg mean reciprocal rank: %f\n" % np.mean(test_mrr))
if __name__ == '__main__':
start_time = datetime.datetime.now()
#featurize()
#featurize_svm()
#featurize_npy()
#writePassingFeaturesInSingleFile()
#lightgbm_pred_accuracy('lightgbm/rank.train', 'lightgbm/rank.train.query', 'lightgbm/LightGBM_predict_train.txt', 'lightgbm/rank.train.id', 'lightgbm/rank.train.result')
#lightgbm_pred_accuracy('lightgbm/rank.test', 'lightgbm/rank.test.query', 'lightgbm/LightGBM_predict_test.txt', 'lightgbm/rank.test.id', 'lightgbm/rank.test.result')
#lightgbm_pipeline()
#xgboost_pred_accuracy('xgboost/rank.test', 'xgboost/rank.test.group', 'xgboost/pred.txt', 'lightgbm/rank.test.id', 'xgboost/rank.test.result')
#lightgbm_train_test_with_param({'num_leaves': 31, 'learning_rate': 0.05, 'min_data_in_leaf': 50, 'num_trees': 500, 'bagging_fraction': 0.5, 'feature_fraction': 0.5})
#lightgbm_train_test_with_param({'learning_rate': 0.05, 'min_data_in_leaf': 500, 'num_trees': 500, 'bagging_fraction': 0.5, 'feature_fraction': 1})
#lightgbm_train_test_with_param({'learning_rate': 0.07, 'min_data_in_leaf': 50, 'num_trees': 200})
#get_first_fold_train_val()
#hyper_parameter_sweep()
#lightgbm_python()
#model_ensemble()
#train_test_single_feature()
#cross_validation(n_folds=10, use_model_ensemble=True)
#cross_validation(n_folds=10, use_model_ensemble=True, check_whitelist=False)
print('Finished in %s' % str(datetime.datetime.now() - start_time)) |
import sys
def match(command, settings):
try:
import CommandNotFound
if 'not found' in command.stderr:
try:
c = CommandNotFound.CommandNotFound()
pkgs = c.getPackages(command.script.split(" ")[0])
name,_ = pkgs[0]
return True
except IndexError:
# IndexError is thrown when no matching package is found
return False
except:
return False
def get_new_command(command, settings):
try:
import CommandNotFound
c = CommandNotFound.CommandNotFound()
if 'not found' in command.stderr:
pkgs = c.getPackages(command.script.split(" ")[0])
name,_ = pkgs[0]
return "sudo apt-get install %s" % name
except:
sys.stderr.write("Can't apt fuck\n")
return ""
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
class Dog():
def __init__(self,name):
self.name = name
def bulk(self):
print("%s :Wang Wang Wang"%self.name)
d1 = Dog("陈中华")
d2 = Dog("杨哲")
d3 = Dog("焦海")
d1.bulk()
d2.bulk()
d3.bulk()
|
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText, textio
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'OBLIGACION:STRING, '
'FECHA_PAGO:STRING, '
'VALOR:STRING, '
'GESTIONES_EFECTIVAS:STRING, '
'NOMBRE_CARTERA:STRING, '
'GRABADOR:STRING, '
'PAGO_A_GRABADOR:STRING, '
'FRANJA:STRING, '
'SEMANA:STRING, '
'TIPO_PAGO:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split('|')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), #datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha' : self.mifecha,
'OBLIGACION' : arrayCSV[0],
'FECHA_PAGO' : arrayCSV[1],
'VALOR' : arrayCSV[2],
'GESTIONES_EFECTIVAS' : arrayCSV[3],
'NOMBRE_CARTERA' : arrayCSV[4],
'GRABADOR' : arrayCSV[5],
'PAGO_A_GRABADOR' : arrayCSV[6],
'FRANJA' : arrayCSV[7],
'SEMANA' : arrayCSV[8],
'TIPO_PAGO' : arrayCSV[9]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-agaval" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
# fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
mi_runner = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
# pipeline = beam.Pipeline(runner="DirectRunner")
pipeline = beam.Pipeline(runner=mi_runner, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT")
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("archivos/BANCOLOMBIA_BM_20181203.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Base_Marcada_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/resultado_archivos_bm/Resultado_Base_Marcada", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/bm/Base_Marcada",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery Crediorbe' >> beam.io.WriteToBigQuery(
gcs_project + ":crediorbe.recaudo",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
# jobObject.wait_until_finish()
return ("Corrio Full HD")
|
with open("sentenceAnalysis.txt", "rb") as f:
for x in f:
print(x)
'''text = f.read().decode('utf-8-sig')
print(text)'''
|
import base64
import json
import hashlib
import hmac
import httplib2
import time
toby_ACCESS_TOKEN = '*'
toby_SECRET_KEY = '*'
ACCESS_TOKEN = '*'
SECRET_KEY = '*'
URL = 'https://api.coinone.co.kr/v1/transaction/coin/'
PAYLOAD = {
"access_token": ACCESS_TOKEN,
"address": "16ZMTadHVy6kx6dfVNtu4QyRdg1qXSuQR8",
"auth_number": 270539,
"qty": 0.0001,
"currency": "bch",
}
URL = 'https://api.coinone.co.kr/v2/transaction/auth_number/'
PAYLOAD = {
"access_token": ACCESS_TOKEN,
"type": "bch",
}
def get_encoded_payload(payload):
payload[u'nonce'] = int(time.time()*1000)
dumped_json = json.dumps(payload)
encoded_json = base64.b64encode(dumped_json)
print encoded_json
return encoded_json
def get_signature(encoded_payload, secret_key):
signature = hmac.new(str(secret_key).upper(), str(encoded_payload), hashlib.sha512);
return signature.hexdigest()
def get_response(url, payload):
encoded_payload = get_encoded_payload(payload)
headers = {
'Content-type': 'application/json',
'X-COINONE-PAYLOAD': encoded_payload,
'X-COINONE-SIGNATURE': get_signature(encoded_payload, SECRET_KEY)
}
http = httplib2.Http()
response, content = http.request(URL, 'POST', headers=headers, body=encoded_payload)
print response
return content
def get_result():
content = get_response(URL, PAYLOAD)
print content
content = json.loads(content)
return content
if __name__ == "__main__":
print get_result()
|
# 创建子进程
# multiprocessing模块就是跨平台版本的多进程模块
# multiprocessing模块提供了一个Process类来代表一个进程对象
# from multiprocessing import Process
# import os
#
#
# # 子进程要执行的代码
# def run_proc(name):
# print('子进程运行 %s (%s)...' % (name, os.getpid()))
#
# if __name__=='__main__':
# print('父进程 %s.' % os.getpid())
# p = Process(target=run_proc, args=('test',))
# print('子进程将要开始运行')
# p.start()
# p.join()
# print('子进程结束.')
#
# '''
# 上面的例子中:
# 创建子进程时,只需要传入一个执行函数和函数的参数,创建一个Process实例,用start()方法启动
# join()方法可以等待子进程结束后再继续往下运行,通常用于进程间的同步
# '''
#
# # 用进程池的方式批量创建子进程
# from multiprocessing import Pool
# import os, time, random
#
#
# def long_time_task(name):
# print('任务运行 %s (%s)...' % (name, os.getpid()))
# start = time.time()
# time.sleep(random.random() * 3)
# end = time.time()
# print('任务 %s 执行了 %0.2f 秒.' % (name, (end - start)))
#
# if __name__=='__main__':
# print('子进程 %s.' % os.getpid())
# p = Pool(4)
# for i in range(5):
# p.apply_async(long_time_task, args=(i,))
# print('等待所有的子进程执行...')
# p.close()
# p.join()
# print('所有进程执行完毕.')
'''
说明:
p = Pool(4) 相当于创造5个进程,有5个进程在跑,注意pool在电脑上面是
有一个默认值的,请注意输出的结果,task 0,1,2,3是立刻执行的,而task 4
要等待前面某个task完成后才执行(由于Pool的默认大小是CPU的核数)
'''
# # 控制子进程的输入和输出
# import subprocess
#
# print('$ nslookup www.python.org')
# r = subprocess.call(['nslookup', 'www.python.org'])
# print('Exit code:', r)
#
# # 如果子进程还需要输入,则可以通过communicate()方法输入
# import subprocess
#
# print('$ nslookup')
# p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# output, err = p.communicate(b'set q=mx\npython.org\nexit\n')
# print(output.decode('utf-8'))
# print('Exit code:', p.returncode)
# 进程之间的通信
# Python的multiprocessing模块包装了底层的机制,提供了Queue、Pipes等多种方式来交换数据。
# from multiprocessing import Process,Queue
# import os, time, random
#
#
# # 写数据进程的方法
# def write(q):
# print('进程开始写数据 %s'% os.getpid())
# for value in['A','B','C']:
# print('put %s to queue'%value)
# q.put(value)
# time.sleep(random.random())
#
#
# # 读数据进程的方法
# def read(q):
# print('进程开始读取数据 %s'% os.getpid())
# while True:
# value = q.get(True)
# print('%s 进程获取数据%s'%(value, os.getpid()))
#
# if __name__ == '__main__':
# # 父进程创建queue ,并传给各个子进程
# q = Queue()
# pw = Process(target=write, args=(q,))
# pr = Process(target=read, args=(q,))
#
# # 启动子进程pw写入数据
# pw.start()
# # 启动子进程pr读取数据
# pr.start()
# # 等待pw结束
# pw.join()
# # 因为子进程pr是死循环,无法等待期自动结束所以要强制终止
# pr.terminate()
'''
多线程
Python的标准库提供了两个模块:_thread和threading,_thread是低级模块,threading是高级模块,
对_thread进行了封装。绝大多数情况下,我们只需要使用threading这个高级模块。
启动一个线程就是把一个函数传入并创建Thread实例,然后调用start()开始执行:
'''
# import time, threading
#
#
# # 新线程执行的代码
# def loop():
# print('进程 %s 正在执行。。。'% threading.current_thread().name) # current_thread()函数,永远返回当前线程的实例
# n = 0
# while n < 5:
# n = n + 1
# print('thread %s >>> %s' % (threading.current_thread().name, n))
# time.sleep(1) # 进程等待时间
# print('thread %s ended.' % threading.current_thread().name)
#
# print('thread %s is running...' % threading.current_thread().name)
# t = threading.Thread(target=loop, name='测试')
# t.start()
# t.join()
# print('thread %s ended.' % threading.current_thread().name)
'''
多线程和多进程最大的不同在于,多进程中,同一个变量,各自有一份
拷贝存在于每个进程中,互不影响,而多线程中,所有变量都由所有线程
共享,所以,任何一个变量都可以被任何一个线程修改,因此,线程之间
共享数据最大的危险在于多个线程同时改一个变量,把内容给改乱了
'''
import time, threading
# 假定这是你的银行存款:
# balance = 0
#
# def change_it(n):
# # 先存后取,结果应该为0:
# global balance
# balance = balance + n
# balance = balance - n
# def run_thread(n):
# for i in range(1000000):
# change_it(n)
#
# t1 = threading.Thread(target=run_thread, args=(5,))
# t2 = threading.Thread(target=run_thread, args=(8,))
# t1.start()
# t2.start()
# t1.join()
# t2.join()
# print(balance)
# 理论上上面的例子应该是得到0的,但是由于两个线程交替执行导致balance的结果不一定为0
# 原因是因为高级语言的一条语句在CPU执行时是若干条语句,即使一个简单的计算balance = balance + n,也分两步:
# 计算balance + n,存入临时变量中;将临时变量的值赋给balance
# 究其原因,是因为修改balance需要多条语句,而执行这几条语句时,线程可能中断,从而导致多个线程把同一个对象的内容改乱了。
'''
就要给change_it()上一把锁,当某个线程开始执行change_it()时,我们说,该线程因为获得了锁,因此其他线程不能同时
执行change_it(),只能等待,直到锁被释放后,获得该锁以后才能改。由于锁只有一个,无论多少线程,同一时刻最多只有
一个线程持有该锁,所以,不会造成修改的冲突。创建一个锁就是通过threading.Lock()来实现
'''
# balance = 0
# lock = threading.Lock()
#
#
# def run_thread(n):
# for i in range(100000):
# # 先要获取锁:
# lock.acquire() # 当多个线程同时执行lock.acquire()时,只有一个线程能成功地获取锁,然后继续执行代码,其他线程就继续等待直到获得锁为止。
# try:
# # 放心地改吧:
# print('%s 正在使用锁'% threading.current_thread().name)
# change_it(n)
# finally:
# # 改完了一定要释放锁:
# print('%s 释放了锁'% threading.current_thread().name)
# lock.release()
#
# t1 = threading.Thread(target=run_thread, args=(5,))
# t2 = threading.Thread(target=run_thread, args=(8,))
# t1.start()
# t2.start()
# t1.join()
# t2.join()
# print('balance 的结果是%s'%balance)
'''
总结:
在多线程环境下,每个线程都有自己的数据。一个线程使用
自己的局部变量比使用全局变量好,因为局部变量只有线程
自己能看见,不会影响其他线程,而全局变量的修改必须加锁
而局部变量的的麻烦在于函数调用的时候
'''
'''
分布式进程:
在Thread和Process中,应当优选Process,因为Process更稳定,
而且,Process可以分布到多台机器上,而Thread最多只能分布到同一台机器的多个CPU上。
Python的multiprocessing模块不但支持多进程,其中managers子模块还支持把多进程分布
到多台机器上。一个服务进程可以作为调度者,将任务分布到其他多个进程中,依靠网络通
信。由于managers模块封装很好,不必了解网络通信的细节,就可以很容易地编写分布式多进程程序。
'''
import random, time, queue
from multiprocessing.managers import BaseManager
# 发送任务的队列:
task_queue = queue.Queue()
# 接收结果的队列:
result_queue = queue.Queue()
# 从BaseManager继承的QueueManager:
class QueueManager(BaseManager):
pass
# 把两个Queue都注册到网络上, callable参数关联了Queue对象:
QueueManager.register('get_task_queue', callable=lambda: task_queue)
QueueManager.register('get_result_queue', callable=lambda: result_queue)
# 绑定端口5000, 设置验证码'abc':
manager = QueueManager(address=('', 5000), authkey=b'abc')
# 启动Queue:
manager.start()
# 获得通过网络访问的Queue对象:
# 当我们在一台机器上写多进程程序时,创建的Queue可以直接拿
# 来用,但是,在分布式多进程环境下,添加任务到Queue不可以
# 直接对原始的task_queue进行操作,那样就绕过了QueueManager
# 的封装,必须通过manager.get_task_queue()获得的Queue接口添加
task = manager.get_task_queue()
result = manager.get_result_queue()
# 放几个任务进去:
for i in range(10):
n = random.randint(0, 10000)
print('Put task %d...' % n)
task.put(n)
# 从result队列读取结果:
print('Try get results...')
for i in range(10):
r = result.get(timeout=10)
print('Result: %s' % r)
# 关闭:
manager.shutdown()
print('master exit.')
|
import smbus
import time
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
# Create the bus connection
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
# Character LCD size
lcd_columns = 16
lcd_rows = 2
# Initialise I2C bus.
i2c = busio.I2C(board.SCL, board.SDA)
# Initialise the LCD class
lcd = character_lcd.Character_LCD_RGB_I2C(i2c, lcd_columns, lcd_rows)
lcd.clear()
# Set LCD color to white
lcd.color = [10, 10, 10]
time.sleep(1)
# function to write value to the arduino
def writeNumber(value):
bus.write_byte(address, value)
return -1
# prompt user for input and turn it into an int to write to arduino
var = input("Enter a Number: ")
var = int(var)
writeNumber(var)
lcd.message = str(var)
time.sleep(1)
|
#!/usr/bin/env python3
"""Annotate the output of ReQTL as cis or trans
Created on Aug, 29 2020
@author: Nawaf Alomran
This module annotates the output of ReQTL as cis or trans based on whether the
SNVs resides within its paired gene.
Input + Options
----------------
+ -r: the path to the ReQTL analysis result file
+ -ga: the path to the file gene location annotations
+ -o: the prefix for the output annotated result
Output
------
+ a file with the ReQTLs annotated as cis or trans
How to Run
----------
python -m PyReQTL.annotate \
-r output/ReQTL_test_all_ReQTLs.txt \
-ga data/gene_locations_hg38.txt \
-o ReQTL_test \
-c True
* Python runtime via time command 8.19s user 0.61s system 112% cpu 7.838 total
* R time command line 3.15s user 0.22s system 99% cpu 3.383 total
* Note that the speed after the importr statements Python is faster than than R
"""
import argparse
import sys
from datetime import datetime
import numpy as np # type: ignore
import pandas as pd # type: ignore
import rpy2.robjects.packages as rpackages # type: ignore
from rpy2.robjects import pandas2ri # type: ignore
from rpy2.robjects.packages import importr # type: ignore
try:
from common import (create_output_dir, output_filename_generator,
bool_conv_args)
except ModuleNotFoundError:
from PyReQTL.common import (create_output_dir, output_filename_generator,
bool_conv_args)
# install the R package GenomicFeatures from within Python
if not rpackages.isinstalled('GenomicFeatures'):
print("installing GenomicFeatures package ...")
bioc_manager = rpackages.importr('BiocManager')
bioc_manager.install('GenomicFeatures')
print("Done installing the package.")
# importing the following required R packages to be used within Python
print("Kindly wait for the required R packages to be imported into Python...")
g_ranges = importr('GenomicRanges')
print("GenomicRanges package is imported.")
g_alignments = importr('GenomicAlignments')
print("GenomicAlignments package is imported.")
iranges = importr('IRanges')
print("IRanges package is imported.")
print("Done importing.")
# This needs to be activated in order to perform pandas conversion
pandas2ri.activate()
def cis_trans_annotator(rqt_rst: str,
gene_ann: str,
out_prefx: str,
cli: bool = False) -> None:
"""Annotate the output of ReQTL as cis or trans based on whether the
SNVs resides within its paired gene
Parameter
---------
rqt_rst: the path to the ReQTL analysis result file
gene_ann: the path to the file gene location annotation
out_prefx: the prefix for the output annotated result
cli: Whether the function is been executed with the command line.
Default is False.
Return
------
reqtl_reslt_arranged: dataframe ReQTLs annotated as cis or trans
Output
------
- file with the ReQTLs annotated as cis or trans
"""
start_time = datetime.now()
# reading the ReQTL result file from run_matrix_ReQTL
reqtl_result = pd.read_table(rqt_rst, sep="\t")
# ------------------------------------------------------------------------#
# ------------------------------------------------------------------------#
# -----------------annotate which gene harbors the snp--------------------#
# classify ReQTLs in which the two members of the pair are in the same----#
# gene as cis and classify all others as trans----------------------------#
# ------------------------------------------------------------------------#
# ------------------------------------------------------------------------#
reqtl_reslt_arranged = reqtl_result.assign(new_SNP=reqtl_result.SNP)
# split them into four columns based on the pattern "[:_>]"
reqtl_reslt_arranged = reqtl_reslt_arranged.new_SNP.str.split('[:_>]',
expand=True)
reqtl_reslt_arranged.columns = ['chrom', 'start', 'ref', 'alt']
# concatenating the re-arranged dataframe with the original dataframe
reqtl_reslt_arranged = pd.concat([reqtl_result, reqtl_reslt_arranged],
axis=1)
# making the new end column the same as the start column
reqtl_reslt_arranged = reqtl_reslt_arranged.assign(
end=reqtl_reslt_arranged.start)
# convert Python Pandas DataFrame to R-dataframe
reqtl_result_df_r = pandas2ri.py2rpy(reqtl_reslt_arranged)
# read gene location file and then convert to R dataframe
gene_locs_py_df = pd.read_table(gene_ann, sep="\t")
gene_locs_df_r = pandas2ri.py2rpy(gene_locs_py_df)
# storing the location of genomic features for both R dataframes
reqtl_reslt_granges_r = g_ranges.GRanges(reqtl_result_df_r)
gene_loc_granges_r = g_ranges.GRanges(gene_locs_df_r)
# finding the overlap between the ranges
overlaps = iranges.findOverlaps(reqtl_reslt_granges_r,
gene_loc_granges_r,
select="last",
type="within")
# ignore the Pycharm warning later
overlaps = np.where(overlaps == -2147483648, None, overlaps)
overlaps = overlaps.tolist()
# reindex the gene_locs dataframe by the overlaps
genes_snp = gene_locs_py_df.ensembl_gene.reindex(overlaps)
reqtl_reslt_arranged['genes_snp'] = pd.Series(genes_snp.values.tolist())
# if genes_snp == gene in reqtl_reslt_arranged dataframe then it cis
# otherwise it will be trans
reqtl_reslt_arranged['class'] = np.where(
reqtl_reslt_arranged.genes_snp == reqtl_reslt_arranged.gene,
'cis',
'trans')
reqtl_reslt_arranged.loc[reqtl_reslt_arranged['genes_snp'].isna(),
'class'] = reqtl_reslt_arranged['genes_snp']
# drop the unneeded columns
reqtl_reslt_arranged.drop(
['chrom',
'end',
'ref',
'alt',
'start'], axis=1, inplace=True)
out_dir = create_output_dir("output")
annotated_file = output_filename_generator(out_dir,
out_prefx,
"_ReQTLs_cistrans_ann.txt")
reqtl_reslt_arranged.to_csv(annotated_file, sep="\t", index=False,
na_rep='NULL')
print(f"\nCis/trans annotated ReQTLs saved in {annotated_file}\n")
if cli:
print(f"Analysis took after importing the required packages "
f"{(datetime.now() - start_time).total_seconds()} sec")
else:
return reqtl_reslt_arranged
def main() -> None:
"""Parses the command line arguments entered by the user
Parameters
---------
None
Return
-------
None
"""
USAGE = """Annotate the output of ReQTL as cis or trans based on whether
the SNV resides within its paired gene"""
parser = argparse.ArgumentParser(description=USAGE)
parser.add_argument('-r',
dest="rqt_rst",
required=True,
help="the path to the ReQTL analysis result file")
parser.add_argument('-ga',
dest='gene_ann',
required=True,
help="the path to the file gene location annotations")
parser.add_argument('-o',
dest="out_prefx",
required=True,
help="the prefix for the output annotated result")
parser.add_argument("-c",
dest="cli",
default=False,
type=bool_conv_args,
help="""Whether the function is been executed with the
command line. Default is False!""")
args = parser.parse_args()
rqt_rst = args.rqt_rst
gene_ann = args.gene_ann
out_prefx = args.out_prefx
cli = args.cli
try:
cis_trans_annotator(rqt_rst, gene_ann, out_prefx, cli)
except KeyboardInterrupt:
sys.exit('\nthe user ends the program')
if __name__ == '__main__':
main()
|
import django.contrib.auth as auth
from django.http import HttpResponseRedirect
from django.shortcuts import render
from authapp.forms import LoginForm, RegisterForm
def login(request):
if request.method == 'POST':
form = LoginForm(data=request.POST)
if form.is_valid():
auth.login(request, form.get_user())
return HttpResponseRedirect('/')
else:
form = LoginForm()
context = {
'page_title': 'авторизация',
'form': form,
}
return render(request, 'authapp/login.html', context)
def logout(reqest):
auth.logout(request)
return HttpResponseRedirect(reverse('main:index'))
def register(request):
if request.method == 'POST':
form = RegisterForm(data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('auth:login'))
else:
form = RegisterForm
context = {
'page_title': 'Регистрация',
'form': form,
}
return render(request, 'authapp/register.html', context) |
from django.contrib import admin
from . import models
@admin.register(models.Region)
class RegionAdmin(admin.ModelAdmin):
list_display = ["id", "name"]
list_display_links = ["id", "name"]
@admin.register(models.Gallery)
class GalleryAdmin(admin.ModelAdmin):
list_display = ["id", "name", 'location', 'region']
list_display_links = ["id", "name"]
@admin.register(models.Exhibition)
class ExhibitionAdmin(admin.ModelAdmin):
list_display = ["id", "name", 'fee', 'index']
list_display_links = ["id", "name"]
@admin.register(models.ExhibitionImage)
class ExhibitionImageAdmin(admin.ModelAdmin):
list_display = ["id", "exhibition"]
list_display_links = ["id", "exhibition"]
@admin.register(models.ExhibitionView)
class ExhibitionViewAdmin(admin.ModelAdmin):
list_display = ["id", "user", 'exhibition', 'viewed_at']
list_display_links = ["id", "user"] |
numlist = []
submit = input('Enter a number: ')
#stop and exit program
while True:
submit == 'Done'
break
#add valuables into the list
else:
numlist.append(submit)
#max and min methods for list
print('Maximum:', max(numlist))
print('Minimum:', min(numlist))
|
#!/usr/bin/env python3
import serial
ser = serial .Serial('/dev/ttyACM0', 9600)
ser.write(b'1')
print("nay")
|
class Memory:
def __init__(self):
self.mem = 0
def store(self, val):
self.mem = val
return val
def recall(self):
return self.mem
memory = Memory()
|
#!/usr/bin/env python
class RESTOperation(object):
def __init__(self, api_ref=None, callback=None, *kwargs):
self.api_ref = api_ref
self.args = kwargs
self.callback = callback
def execute(self, api_obj):
func = getattr(api_obj, self.api_ref)
self.callback(func(*self.args))
|
# coding=utf-8
import logging
import os
import time as _time
import sqlite3
import threading
from persistqueue.exceptions import Empty
import persistqueue.serializers.pickle
sqlite3.enable_callback_tracebacks(True)
log = logging.getLogger(__name__)
# 10 seconds internal for `wait` of event
TICK_FOR_WAIT = 10
def with_conditional_transaction(func):
def _execute(obj, *args, **kwargs):
# for MySQL, connection pool should be used since db connection is
# basically not thread-safe
_putter = obj._putter
if str(type(obj)).find("MySQLQueue") > 0:
# use fresh connection from pool not the shared one
_putter = obj.get_pooled_conn()
with obj.tran_lock:
with _putter as tran:
# For sqlite3, commit() is called automatically afterwards
# but for other db API, this is not TRUE!
stat, param = func(obj, *args, **kwargs)
s = str(type(tran))
if s.find("Cursor") > 0:
cur = tran
cur.execute(stat, param)
else:
cur = tran.cursor()
cur.execute(stat, param)
cur.close()
tran.commit()
return cur.lastrowid
return _execute
def commit_ignore_error(conn):
"""Ignore the error of no transaction is active.
The transaction may be already committed by user's task_done call.
It's safe to ignore all errors of this kind.
"""
try:
conn.commit()
except sqlite3.OperationalError as ex:
if 'no transaction is active' in str(ex):
log.debug(
'Not able to commit the transaction, '
'may already be committed.'
)
else:
raise
class SQLBase(object):
"""SQL base class."""
_TABLE_NAME = 'base' # DB table name
_KEY_COLUMN = '' # the name of the key column, used in DB CRUD
_SQL_CREATE = '' # SQL to create a table
_SQL_UPDATE = '' # SQL to update a record
_SQL_INSERT = '' # SQL to insert a record
_SQL_SELECT = '' # SQL to select a record
_SQL_SELECT_ID = '' # SQL to select a record with criteria
_SQL_SELECT_WHERE = '' # SQL to select a record with criteria
_SQL_DELETE = '' # SQL to delete a record
# _MEMORY = ':memory:' # flag indicating store DB in memory
def __init__(self):
"""Initiate a queue in db.
"""
self._serializer = None
self.auto_commit = None
# SQL transaction lock
self.tran_lock = threading.Lock()
self.put_event = threading.Event()
# Action lock to assure multiple action to be *atomic*
self.action_lock = threading.Lock()
self.total = 0
self.cursor = 0
self._getter = None
self._putter = None
@with_conditional_transaction
def _insert_into(self, *record):
return self._sql_insert, record
@with_conditional_transaction
def _update(self, key, *args):
args = list(args) + [key]
return self._sql_update, args
@with_conditional_transaction
def _delete(self, key, op='='):
sql = self._SQL_DELETE.format(
table_name=self._table_name, key_column=self._key_column, op=op)
return sql, (key,)
def _pop(self, rowid=None, raw=False):
with self.action_lock:
if self.auto_commit:
row = self._select(rowid=rowid)
# Perhaps a sqlite3 bug, sometimes (None, None) is returned
# by select, below can avoid these invalid records.
if row and row[0] is not None:
self._delete(row[0])
self.total -= 1
item = self._serializer.loads(row[1])
if raw:
return {
'pqid': row[0],
'data': item,
'timestamp': row[2],
}
else:
return item
else:
row = self._select(
self.cursor, op=">", column=self._KEY_COLUMN, rowid=rowid
)
if row and row[0] is not None:
self.cursor = row[0]
self.total -= 1
item = self._serializer.loads(row[1])
if raw:
return {
'pqid': row[0],
'data': item,
'timestamp': row[2],
}
else:
return item
return None
def update(self, item, id=None):
if isinstance(item, dict) and "pqid" in item:
_id = item.get("pqid")
item = item.get("data")
if id is not None:
_id = id
if _id is None:
raise ValueError("Provide an id or raw item")
obj = self._serializer.dumps(item)
self._update(_id, obj)
return _id
def get(self, block=True, timeout=None, id=None, raw=False):
if isinstance(id, dict) and "pqid" in id:
rowid = id.get("pqid")
elif isinstance(id, int):
rowid = id
else:
rowid = None
if not block:
serialized = self._pop(raw=raw, rowid=rowid)
if serialized is None:
raise Empty
elif timeout is None:
# block until a put event.
serialized = self._pop(raw=raw, rowid=rowid)
while serialized is None:
self.put_event.clear()
self.put_event.wait(TICK_FOR_WAIT)
serialized = self._pop(raw=raw, rowid=rowid)
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
# block until the timeout reached
endtime = _time.time() + timeout
serialized = self._pop(raw=raw, rowid=rowid)
while serialized is None:
self.put_event.clear()
remaining = endtime - _time.time()
if remaining <= 0.0:
raise Empty
self.put_event.wait(
TICK_FOR_WAIT if TICK_FOR_WAIT < remaining else remaining
)
serialized = self._pop(raw=raw, rowid=rowid)
return serialized
def get_nowait(self, id=None, raw=False):
return self.get(block=False, id=id, raw=raw)
def task_done(self):
"""Persist the current state if auto_commit=False."""
if not self.auto_commit:
self._delete(self.cursor, op='<=')
self._task_done()
def queue(self):
rows = self._sql_queue().fetchall()
datarows = []
for row in rows:
item = {
'id': row[0],
'data': self._serializer.loads(row[1]),
'timestamp': row[2],
}
datarows.append(item)
return datarows
@with_conditional_transaction
def shrink_disk_usage(self):
sql = """VACUUM"""
return sql, ()
@property
def size(self):
return self.total
def qsize(self):
return max(0, self.size)
def empty(self):
return self.size == 0
def full(self):
return False
def __len__(self):
return self.size
def _select(self, *args, **kwargs):
start_key = self._start_key()
op = kwargs.get('op', None)
column = kwargs.get('column', None)
next_in_order = kwargs.get('next_in_order', False)
rowid = kwargs.get('rowid') if kwargs.get('rowid', None) else start_key
if not next_in_order and rowid != start_key:
# Get the record by the id
result = self._getter.execute(
self._sql_select_id(rowid), args
).fetchone()
elif op and column:
# Get the next record with criteria
rowid = rowid if next_in_order else start_key
result = self._getter.execute(
self._sql_select_where(rowid, op, column), args
).fetchone()
else:
# Get the next record
rowid = rowid if next_in_order else start_key
result = self._getter.execute(
self._sql_select(rowid), args
).fetchone()
if (
next_in_order
and rowid != start_key
and (not result or len(result) == 0)
):
# sqlackqueue: if we're at the end, start over
kwargs['rowid'] = start_key
result = self._select(*args, **kwargs)
return result
def _count(self):
sql = 'SELECT COUNT({}) FROM {}'.format(
self._key_column, self._table_name
)
row = self._getter.execute(sql).fetchone()
return row[0] if row else 0
def _start_key(self):
if self._TABLE_NAME == 'ack_filo_queue':
return 9223372036854775807 # maxsize
else:
return 0
def _task_done(self):
"""Only required if auto-commit is set as False."""
commit_ignore_error(self._putter)
def _sql_queue(self):
sql = 'SELECT * FROM {}'.format(self._table_name)
return self._getter.execute(sql)
@property
def _table_name(self):
return '`{}_{}`'.format(self._TABLE_NAME, self.name)
@property
def _key_column(self):
return self._KEY_COLUMN
@property
def _sql_create(self):
return self._SQL_CREATE.format(
table_name=self._table_name, key_column=self._key_column
)
@property
def _sql_insert(self):
return self._SQL_INSERT.format(
table_name=self._table_name, key_column=self._key_column
)
@property
def _sql_update(self):
return self._SQL_UPDATE.format(
table_name=self._table_name, key_column=self._key_column
)
def _sql_select_id(self, rowid):
return self._SQL_SELECT_ID.format(
table_name=self._table_name,
key_column=self._key_column,
rowid=rowid,
)
def _sql_select(self, rowid):
return self._SQL_SELECT.format(
table_name=self._table_name,
key_column=self._key_column,
rowid=rowid,
)
def _sql_select_where(self, rowid, op, column):
return self._SQL_SELECT_WHERE.format(
table_name=self._table_name,
key_column=self._key_column,
rowid=rowid,
op=op,
column=column,
)
def __del__(self):
"""Handles sqlite connection when queue was deleted"""
if self._getter:
self._getter.close()
if self._putter:
self._putter.close()
class SQLiteBase(SQLBase):
"""SQLite3 base class."""
_TABLE_NAME = 'base' # DB table name
_KEY_COLUMN = '' # the name of the key column, used in DB CRUD
_SQL_CREATE = '' # SQL to create a table
_SQL_UPDATE = '' # SQL to update a record
_SQL_INSERT = '' # SQL to insert a record
_SQL_SELECT = '' # SQL to select a record
_SQL_SELECT_ID = '' # SQL to select a record with criteria
_SQL_SELECT_WHERE = '' # SQL to select a record with criteria
_SQL_DELETE = '' # SQL to delete a record
_MEMORY = ':memory:' # flag indicating store DB in memory
def __init__(
self,
path,
name='default',
multithreading=False,
timeout=10.0,
auto_commit=True,
serializer=persistqueue.serializers.pickle,
db_file_name=None,
):
"""Initiate a queue in sqlite3 or memory.
:param path: path for storing DB file.
:param name: the suffix for the table name,
table name would be ${_TABLE_NAME}_${name}
:param multithreading: if set to True, two db connections will be,
one for **put** and one for **get**.
:param timeout: timeout in second waiting for the database lock.
:param auto_commit: Set to True, if commit is required on every
INSERT/UPDATE action, otherwise False, whereas
a **task_done** is required to persist changes
after **put**.
:param serializer: The serializer parameter controls how enqueued data
is serialized. It must have methods dump(value, fp)
and load(fp). The dump method must serialize the
value and write it to fp, and may be called for
multiple values with the same fp. The load method
must deserialize and return one value from fp,
and may be called multiple times with the same fp
to read multiple values.
:param db_file_name: set the db file name of the queue data, otherwise
default to `data.db`
"""
super(SQLiteBase, self).__init__()
self.memory_sql = False
self.path = path
self.name = name
self.timeout = timeout
self.multithreading = multithreading
self.auto_commit = auto_commit
self._serializer = serializer
self.db_file_name = "data.db"
if db_file_name:
self.db_file_name = db_file_name
self._init()
def _init(self):
"""Initialize the tables in DB."""
if self.path == self._MEMORY:
self.memory_sql = True
log.debug("Initializing Sqlite3 Queue in memory.")
elif not os.path.exists(self.path):
os.makedirs(self.path)
log.debug(
'Initializing Sqlite3 Queue with path {}'.format(self.path)
)
self._conn = self._new_db_connection(
self.path, self.multithreading, self.timeout
)
self._getter = self._conn
self._putter = self._conn
self._conn.execute(self._sql_create)
self._conn.commit()
# Setup another session only for disk-based queue.
if self.multithreading:
if not self.memory_sql:
self._putter = self._new_db_connection(
self.path, self.multithreading, self.timeout
)
self._conn.text_factory = str
self._putter.text_factory = str
# SQLite3 transaction lock
self.tran_lock = threading.Lock()
self.put_event = threading.Event()
def _new_db_connection(self, path, multithreading, timeout):
conn = None
if path == self._MEMORY:
conn = sqlite3.connect(path, check_same_thread=not multithreading)
else:
conn = sqlite3.connect(
'{}/{}'.format(path, self.db_file_name),
timeout=timeout,
check_same_thread=not multithreading,
)
conn.execute('PRAGMA journal_mode=WAL;')
return conn
def close(self):
"""Closes sqlite connections"""
self._getter.close()
self._putter.close()
def __del__(self):
"""Handles sqlite connection when queue was deleted"""
self.close()
|
def test_analysis():
pass
|
from torchvision import models
import json
import numpy as np
import torch
from collections import OrderedDict
from operator import itemgetter
import os
def return_top_5(processed_image):
# inception = models.inception_v3(pretrained=True)
inception = models.inception_v3()
inception.load_state_dict(torch.load("data/inception_v3_google-1a9a5a14.pth"))
inception.eval()
result = inception(processed_image)
#load imagenet classes
class_idx = json.load(open('data/imagenet_class_index.json'))
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
result_idx = result.sort()[1][0][-5:]
#exponentiate and get probabilities
exps = np.exp(result.detach().numpy()[0])
exps_sum = np.sum(exps)
softmax = [np.round((j / exps_sum)*100, 2) for j in exps]
out = []
for idx in result_idx:
out.append((idx2label[idx], softmax[idx]))
# out = {k: v for k, v in dict(out).items()}
result = OrderedDict(sorted(dict(out).items(), key=itemgetter(1), reverse=True))
return result |
from app import app
from flask import json as fJson
@app.route('/')
@app.route('/index')
def index():
return "Hello, Maxence"
@app.route('/book')
def book():
with open('./books.json', 'r') as jsonfile:
file_data = json.loads(jsonfile.read())
print(file_data)
#return json.dumps(file_data)
|
def power(x, n=2):
s = 1
while n > 0:
s = s * x
n -= 1
return s
def calc(*numbers):
sum2 = 0
for n in numbers:
sum2 = sum2 + n * n
return sum2
nums = [1, 2, 3]
print(calc(*nums))
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
|
import bs4
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
import csv
from datetime import date
import os
def writeToCSV(name,info,summary):
today = date.today()
fileName = name + '.csv'
try:
os.mkdir("./"+str(today))
except OSError as e:
print("Directory Exists")
with open("./"+str(today)+'/'+ fileName, 'a') as stonk_file:
fieldnames = ['Name', 'Value']
stonk_writer = csv.DictWriter(stonk_file, fieldnames=fieldnames)
stonk_writer.writeheader()
stonk_writer.writerow({'Name': 'Info for Stonk:', 'Value': name})
stonk_writer.writerow({'Name': 'Current Price:', 'Value': info[0].text})
stonk_writer.writerow({'Name': '$$ Change (% Change) :', 'Value': info[1].text})
stonk_writer.writerow({'Name': 'Time:', 'Value': info[2].text})
for x in range(0, len(summary), 2):
stonk_writer.writerow({'Name': summary[x].text, 'Value': summary[x+1].text})
else:
stonk_writer.writerow({'Name': '-------', 'Value': '-----'})
print("Today's date:", today)
return
def stonkInfoFunc(ticker):
url = "https://finance.yahoo.com/quote/"+ticker+"?p="+ticker+"&.tsrc=fin-srch"
try:
page = urlopen(url)
except:
print('Error opening the URL')
return
soup = bs4.BeautifulSoup(page,'html.parser')
stonkInfo = parsePrice(ticker)
if stonkInfo == 0:
return None
else:
stonkName = soup.find('div',{'class': 'D(ib) Mt(-5px) Mend(20px) Maw(56%)--tab768 Maw(52%) Ov(h) smartphone_Maw(85%) smartphone_Mend(0px)'}).find('h1').text
print("\nInfo for Stonk: "+ stonkName+"\n")
# print("Current Price: " + stonkInfo[0].text)
# print("$$ Change (% Change) : " + stonkInfo[1].text)
# print("Time: " + stonkInfo[2].text + "\n")
summaryInfo = soup.find('div',{'id': 'quote-summary'}).find_all('td')
writeToCSV(stonkName,stonkInfo,summaryInfo)
# for x in range(0, len(summaryInfo), 2):
# # print(summaryInfo[x].text + ": " +summaryInfo[x+1].text)
# else:
print("-------------------")
return None
def parsePrice(ticker):
url = "https://finance.yahoo.com/quote/"+ticker+"?p="+ticker+"&.tsrc=fin-srch"
page = urlopen(url)
soup = bs4.BeautifulSoup(page,'html.parser')
try:
price = soup.find('div',{'class': 'My(6px) Pos(r) smartphone_Mt(6px)'}).find_all('span')
except:
print('Error getting info on stock: '+ticker)
return 0
price = soup.find('div',{'class': 'My(6px) Pos(r) smartphone_Mt(6px)'}).find_all('span')
return price
if __name__ == "__main__":
watchlist = [
"ATZ.TO",
"BNS.TO",
"GOOS.TO",
"FLT.V",
"ENB.TO",
"ERE-UN.TO",
"FFH.TO",
"INO-UN.TO",
"NWH-UN.TO",
"OPEN",
"RY.TO",
"SGR-UN.TO",
"SRU-UN.TO",
"XBC.TO",
"ACIC",
"N.V",
"VCN.TO",
"VEE.TO",
"VRE.TO",
"XEF.TO",
"XSP.TO",
"XUU.TO",
"0P000073OF.TO"
]
for x in watchlist:
stonkInfoFunc(x) |
#!/usr/bin/env python3
import socket
HOST = '127.0.0.1'
PORT = 50000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(str.encode('Bom dia!'))
data = s.recv(1024)
print('Mensagem ecoada: ', data.decode())
|
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from PIL import Image
import cv2
import math
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
fig = plt.figure()
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 100 # Minimum number of steps in a memory to start training
MODEL_NAME = 'trainallSL'
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 10 # Terminal states (end of episodes)
MIN_REWARD = -500 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 200
####
Thr_cost = 800
EPI_STEP = 500
PRF_Reward = 300
PRF_PENALTY = 3
MV_PENALTY = 1
INVLD_PENALTY = -30
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
EPSILON_DECAY = 0.9998
MIN_EPSILON = 0.01
# Stats settings
AGGREGATE_STATS_EVERY = 50 # episodes
SHOW_PREVIEW = True
SIZE = 40
NET_NUM = 8
T = 0
POS_array = np.array([[8, 7, 6], [4, 5, -1], [1, 2, 3]])
#NET_FILE = "C:/Users/mehrnaza/Documents/simulations/my_ML_RF_codes/network.txt"
#SYM_FILE = "C:/Users/mehrnaza/Documents/simulations/my_ML_RF_codes/symmetry.txt"
#FIN_FILE = "C:/Users/mehrnaza/Documents/simulations/my_ML_RF_codes/finnf.txt"
NET_FILE = "./network.txt"
SYM_FILE = "./symmetry.txt"
FIN_FILE = "./finnf.txt"
network = np.zeros((NET_NUM, NET_NUM), dtype=int)
with open(NET_FILE, "rb") as f:
network = np.genfromtxt(f)
symmetry = np.zeros((NET_NUM, NET_NUM), dtype=int) ######### 1:x-symmetry , 2:x & y symmetry , 3:y symmetry
with open(SYM_FILE, "rb") as f:
symmetry = np.genfromtxt(f)
fin_prp = np.zeros((NET_NUM, NET_NUM), dtype=int) ######### 1:x-symmetry , 2:x & y symmetry , 3:y symmetry
with open(FIN_FILE, "rb") as f:
fin_prp = np.genfromtxt(f)
def Dim(fin_prp):
dim = np.zeros((len(fin_prp), 2), dtype=int)
for i in range(len(fin_prp)):
dim[i][0] = (fin_prp[i][0] - 1) + 2 + fin_prp[i][
2] * 2 ###########fin_prp[i][2]= dummy, fin_prp[i][0]= gate fingers
dim[i][1] = fin_prp[i][1] + 2
return (dim)
def Tot_wire_l(transistors, network):
sum = 0
s1 = 0
s2 = 0
s3 = 0
s4 = 0
lada = SIZE / 70
for j in range(len(transistors)):
s1 += np.exp(transistors[j].x / (2 * lada))
s2 += np.exp((-transistors[j].x) / (2 * lada))
s3 += np.exp(transistors[j].y / lada)
s4 += np.exp((-transistors[j].y) / lada)
sum += (math.log(s1) + math.log(s2) + math.log(s3) + math.log(s4))
return (sum)
def Tot_overlap(transistors):
sumov = 0
for i in range(len(transistors)):
for j in range(i + 1, len(transistors)):
sumov += transistors[i].overlap(transistors[j]) # overlap(transistors[i], transistors[j])
return (sumov)
def Area(transistors):
Max_x = 0
Min_x = 0
Min_y = 0
Max_y = 0
for i in range(len(transistors)):
if transistors[i].x > Max_x:
Max_x = transistors[i].x
if transistors[i].y > Max_y:
Max_y = transistors[i].y
if transistors[i].x < Min_x:
Min_x = transistors[i].x
if transistors[i].y < Min_y:
Min_y = transistors[i].y
return ((Max_x - Min_x) / 2) * (Max_y - Min_y)
def sym_function(transistors):
cost = 0
xmax = 0
xmin = SIZE
ymax = 0
ymin = SIZE
x = 0
y = 0
for i in range(len(transistors)):
# x +=transistors[i].x
# y+= transistors[i].y
# x=x/len(transistors)
# y=y/len(transistors)
xmin = min(transistors[i].x, xmin)
ymin = min(transistors[i].y, ymin)
xmax = max(transistors[i].x, xmax)
ymax = max(transistors[i].y, ymax)
x = (xmax + xmin) / 2
y = (ymax + ymin) / 2
for i in range(len(transistors)):
for s in range(len(transistors)):
if symmetry[i, s] == 1:
cost += max(1 * x - ((transistors[i].x + transistors[s].x) / 2), 0)
if symmetry[i, s] == 3:
cost += max(2 * y - (transistors[i].y + transistors[s].y), 0)
return (cost)
def N_Cost(transistors):
Ncost = 120 * np.sqrt(Tot_overlap(transistors))
if Ncost < 2:
Ncost = 2
return (Ncost)
def P_Cost(transistors, network):
Pcost = 6 * np.sqrt(Area(transistors)) + 1 * Tot_wire_l(transistors, network)
if Pcost < 2:
Pcost = 2
return (Pcost)
def Tot_cost(transistors, network):
return (P_Cost(transistors, network) + N_Cost(transistors) + 50 * sym_function(transistors))
class trans:
def __init__(self, PN):
"""self.x = SIZE / 2
if PN == 1: #############Pmos
self.y = (3 * SIZE) / 4
else:
self.y = (1 * SIZE) / 4"""
self.x = 0
self.y = 0
self.di_x = 2
self.di_y = 1
def __str__(self):
return f"{self.x}, {self.y}, {self.di_x}, {self.di_y}, {self.PN}"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def __add__(self, other):
return (self.x + other.x, self.y + other.y)
def overlap(self, other):
ov_y_1 = self.y + (self.di_y / 2) - (other.y - (other.di_y / 2))
ov_y_2 = other.y + (other.di_y / 2) - (self.y - (self.di_y / 2))
fy = 0.5 * (-np.sqrt(((ov_y_2 - ov_y_1) ** 2) + (T ** 2)) + ov_y_1 + ov_y_2)
ov_y = 0.5 * (np.sqrt((fy ** 2) + (T ** 2)) + fy)
ov_x_1 = self.x + (self.di_x / 2) - (other.x - (other.di_x / 2))
ov_x_2 = other.x + (other.di_x / 2) - (self.x - (self.di_x / 2))
fx = 0.5 * (-np.sqrt((((ov_x_2 / 2) - (ov_x_1 / 2)) ** 2) + (T ** 2)) + (ov_x_1 / 2) + (ov_x_2 / 2))
ov_x = 0.5 * (np.sqrt((fx ** 2) + (T ** 2)) + fx)
return (ov_x * ov_y)
def action(self, choice):
######M0 trans
if choice == 0:
self.move(x=1, y=0)
elif choice == 1:
self.move(x=-1, y=0)
elif choice == 2:
self.move(x=0, y=1)
elif choice == 3:
self.move(x=0, y=-1)
# else:
# self.move(x=0, y=0)
def move(self, x=False, y=False):
if not x:
self.x += 2 * np.random.randint(-1, 2) # values: -1 0 1
else:
self.x += 2 * x
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
if self.x < (self.di_x / 2) + 2: # self.x < (self.dx/2)
self.x += 4 # self.x < (self.dx/2)
elif (self.x + (self.di_x / 2)) > SIZE - 2: # SIZE - (self.dx/2)
self.x -= 4
if self.y < (self.di_y / 2): # self.y < (self.dy/2)
self.y += 2 # self.y < (self.dy/2)
elif (self.y + (self.di_y / 2)) > SIZE - 1: # SIZE - (self.dy/2)
self.y -= 2
# self.y = SIZE - 1 - (self.di_y / 2)
class TransEnv:
ACTION_SPACE_SIZE = 5
RETURN_IMAGES = True
# Thr_cost = 280
# PRF_Reward = 300
# PRF_PENALTY = 5
# MV_PENALTY = 1
OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
nmos = 1
pmos = 2
psym = 3
d = {1: (255, 175, 0), # NMOS
2: (0, 0, 255), # PMOS is red
3: (0, 255, 0)} # symmetries is green
def reset(self):
self.M1 = trans(1)
self.M2 = trans(1)
self.M3 = trans(1)
self.M4 = trans(0)
self.M5 = trans(0)
self.M6 = trans(0)
self.M7 = trans(0)
self.M8 = trans(0)
self.transistors = [self.M1, self.M2, self.M3, self.M4, self.M5, self.M6, self.M7, self.M8]
for i in range(len(POS_array)):
y_status = SIZE / (len(POS_array) * 2) + (i * SIZE) / len(POS_array)
for j in range(len(POS_array[i])):
x_status = SIZE / (len(POS_array[i]) * 2) + (j * SIZE) / len(POS_array[i])
if POS_array[i, j] >= 1:
index_pos = POS_array[i, j] - 1
self.transistors[index_pos].x = x_status
self.transistors[index_pos].y = y_status
for i in range(len(self.transistors)):
[self.transistors[i].di_x, self.transistors[i].di_y] = dimentions[i]
env.render(self.transistors)
self.episode_step = 0
self.sym_init = sym_function(self.transistors)
# store symmery value to prevent invalid actions
if self.RETURN_IMAGES:
observation = np.array(self.get_image(self.transistors))
else:
observation = (self.M1 - self.M2) + (self.M3 - self.M4) ###no idea why!!!!
return observation
##############to be completed
def step(self, actions, Thr_cost):
self.episode_step += 1
# index = action // 4
# act = action % 4
# old_observation = np.array(self.get_image(self.transistors))
for ind in range(NET_NUM):
act = actions[ind]
self.transistors[ind].action(act)
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image(self.transistors))
else:
env = np.zeros((SIZE, SIZE, 3), dtype=np.uint8)
new_observation = Image.fromarray(env, 'RGB')
Best_cost = Tot_cost(self.transistors, network)
if Best_cost <= Thr_cost:
reward = PRF_Reward
elif Thr_cost < Best_cost and Best_cost < 1.3 * Thr_cost:
reward = 5
elif 1.3 * Thr_cost <= Best_cost and Best_cost < 1.5 * Thr_cost:
reward = 2
# elif Best_cost < Org_cost:
# reward = -PRF_PENALTY
else:
reward = -MV_PENALTY
done = False
done2 = False
if Best_cost <= Thr_cost or self.episode_step >= EPI_STEP:
done = True
if Best_cost <= Thr_cost:
done2 = True
print(f"threshold is {Thr_cost} and episod step is {self.episode_step}")
# print(self.episode_step)
# if reward == INVLD_PENALTY:
# return old_observation, reward, done, done2
# else:
return new_observation, reward, done, done2
def render(self, transistors):
img = self.get_image(transistors)
img = img.resize((400, 400)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
def get_image(self, transistors):
env = np.zeros((SIZE, SIZE, 3), dtype=np.uint8) # starts an rbg of our size
for yi in range(int(round(transistors[0].y - (transistors[0].di_y) / 2)),
int(round(transistors[0].y + (transistors[0].di_y) / 2))):
for xi in range(int(round(transistors[0].x - (transistors[0].di_x) / 2)),
int(round(transistors[0].x + (transistors[0].di_x) / 2))):
env[yi][xi] = self.d[self.pmos]
for yi in range(int(round(transistors[1].y - (transistors[1].di_y) / 2)),
int(round(transistors[1].y + (transistors[1].di_y) / 2))):
for xi in range(int(round(transistors[1].x - (transistors[1].di_x) / 2)),
int(round(transistors[1].x + (transistors[1].di_x) / 2))):
env[yi][xi] = self.d[self.pmos]
for yi in range(int(round(transistors[2].y - (transistors[2].di_y) / 2)),
int(round(transistors[2].y + (transistors[2].di_y) / 2))):
for xi in range(int(round(transistors[2].x - (transistors[2].di_x) / 2)),
int(round(transistors[2].x + (transistors[2].di_x) / 2))):
env[yi][xi] = self.d[self.pmos]
for yi in range(int(round(transistors[3].y - (transistors[3].di_y) / 2)),
int(round(transistors[3].y + (transistors[3].di_y) / 2))):
for xi in range(int(round(transistors[3].x - (transistors[3].di_x) / 2)),
int(round(transistors[3].x + (transistors[3].di_x) / 2))):
env[yi][xi] = self.d[self.psym]
for yi in range(int(round(transistors[4].y - (transistors[4].di_y) / 2)),
int(round(transistors[4].y + transistors[4].di_y / 2))):
for xi in range(int(round(transistors[4].x - (transistors[4].di_x) / 2)),
int(round(transistors[4].x + (transistors[4].di_x) / 2))):
env[yi][xi] = self.d[self.psym]
for yi in range(int(round(transistors[5].y - (transistors[5].di_y) / 2)),
int(round(transistors[5].y + (transistors[5].di_y) / 2))):
for xi in range(int(round(transistors[5].x - (transistors[5].di_x) / 2)),
int(round(transistors[5].x + (transistors[5].di_x) / 2))):
env[yi][xi] = self.d[self.nmos]
for yi in range(int(round(transistors[6].y - (transistors[6].di_y) / 2)),
int(round(transistors[6].y + (transistors[6].di_y) / 2))):
for xi in range(int(round(transistors[6].x - (transistors[6].di_x) / 2)),
int(round(transistors[6].x + (transistors[6].di_x) / 2))):
env[yi][xi] = self.d[self.nmos]
for yi in range(int(round(transistors[7].y - (transistors[7].di_y) / 2)),
int(round(transistors[7].y + (transistors[7].di_y) / 2))):
for xi in range(int(round(transistors[7].x - (transistors[7].di_x) / 2)),
int(round(transistors[7].x + (transistors[7].di_x) / 2))):
env[yi][xi] = self.d[self.nmos]
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = TransEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
# gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
# backend.set_session(tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# checkpoint = ModelCheckpoint(filepath='trainallSL2.ckpt',save_best_only=False,save_weights_only=False,verbose=0)
#checkpoint = ModelCheckpoint(filepath='testall2a.ckpt', save_best_only=False, save_weights_only=False, verbose=0)
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.create_file_writer(self.log_dir)
self._log_write_dir = os.path.join(self.log_dir, MODEL_NAME)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
def on_train_batch_end(self, batch, logs=None):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
def _write_logs(self, logs, index):
with self.writer.as_default():
for name, value in logs.items():
tf.summary.scalar(name, value, step=index)
self.step += 1
self.writer.flush()
class DQNAgent:
def __init__(self):
# Main model
self.model = self.create_model()
# self.model.load_weights('trainallSL1.ckpt')
self.model.load_weights('trainallSL2.ckpt')
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
model = Sequential()
model.add(Conv2D(256, (3, 3),
input_shape=env.OBSERVATION_SPACE_VALUES)) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(env.ACTION_SPACE_SIZE * NET_NUM, activation='linear'))
# ACTION_SPACE_SIZE * NET_NUM = how many choices (4) * transistor_numbers
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# print(f"replay appen")
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch]) / 255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch]) / 255
future_qs_list = self.target_model.predict(new_current_states)
# images frm the game
X = []
# actions like up, down, left, ...
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
argtochng = np.argmax(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
argtochng = np.argmax(current_qs_list[index])
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[argtochng] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# print("state app")
# Fit on all samples as one batch, log only on terminal state
#self.model.fit(np.array(X) / 255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False,
#callbacks=[self.tensorboard, checkpoint] if terminal_state else None)
self.model.fit(np.array(X) / 255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False,
callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape) / 255)[0]
agent = DQNAgent()
dimentions = np.zeros((NET_NUM, 2), dtype=int)
# Iterate over episodes
# Reset environment and get initial state
current_state = env.reset()
for i in range(len(env.transistors)):
if fin_prp[i][0] % 2 == 1:
env.transistors[i].x += 1
Best_trans = env.transistors
env.render(env.transistors)
print(f"total original cost {Tot_cost(Best_trans, network)}")
residimepisode = []
residimstep = []
residthr = []
for rade in range(20):
Thr_cost -= 10
print(time.time())
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# adjust the position for x for gate alignment
for i in range(len(env.transistors)):
if fin_prp[i][0] % 2 == 1:
env.transistors[i].x += 1
# get the dimentions of the transistors
dimentions = Dim(fin_prp)
# Reset flag and start iterating until episode ends
done = False
done1 = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
j = 0
action = np.zeros(NET_NUM, dtype=int)
for i in range(NET_NUM):
if np.random.random() > epsilon:
# Get action from Q table
action[i] = np.argmax(agent.get_qs(current_state)[j:j + 5])
else:
# Get random action
# action[i] = np.random.randint(0, env.ACTION_SPACE_SIZE * NET_NUM)
action[i] = np.random.randint(0, 5)
j += 5
# print (f"action is: {action}")
new_state, reward, done, done1 = env.step(action, Thr_cost)
if Tot_cost(env.transistors, network) < Tot_cost(Best_trans, network):
Best_trans = env.transistors
print(f"best cost:{Tot_cost(Best_trans, network)} in episode {episode})")
print(
f"best dimentions: T1: {Best_trans[0].x}, {Best_trans[0].y}, T2:{Best_trans[1].x}, {Best_trans[1].y}, T3:{Best_trans[2].x}, {Best_trans[2].y}, T4:{Best_trans[3].x}, {Best_trans[3].y}, T5:{Best_trans[4].x}, {Best_trans[4].y}, T6:{Best_trans[5].x}, {Best_trans[5].y},T7:{Best_trans[6].x}, {Best_trans[6].y}, T8:{Best_trans[7].x}, {Best_trans[7].y}")
# print(f"reward is:{reward}")
# if reward == PRF_Reward:
# if Tot_cost(env.transistors,network) <= Tot_cost(Best_trans,network):
# Best_trans = env.transistors;
# print("found best")
# Transform new continous state to new discrete state and count reward
episode_reward += reward
# if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
# env.render(env.transistors)
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
if done1:
# done1 = False
residimepisode.append(episode)
residimstep.append(step)
residthr.append(Thr_cost)
print("done"}
if done1:
done1 = False
break
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
# print("reward app")
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
# print(f"stucked1: {episode}")
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:]) / len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward,
epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if min_reward >= MIN_REWARD:
# print(f"models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model")
agent.model.save(
f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# print(f"stucked2")
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
print (f" threshold{residthr}")
prinr(f"steps{residimstep}")
aveepi = sum(residimepisode) / len(residimepisode)
avestep = sum(residimstep) / len(residimstep)
print(
f"best dimentions: T1: {Best_trans[0].x}, {Best_trans[0].y}, T2:{Best_trans[1].x}, {Best_trans[1].y}, T3:{Best_trans[2].x}, {Best_trans[2].y}, T4:{Best_trans[3].x}, {Best_trans[3].y}, T5:{Best_trans[4].x}, {Best_trans[4].y}, T6:{Best_trans[5].x}, {Best_trans[5].y},T7:{Best_trans[6].x}, {Best_trans[6].y}, T8:{Best_trans[7].x}, {Best_trans[7].y}")
print(f" best cost ever:{Tot_cost(Best_trans, network)}")
print(f"average of episodes: {aveepi} and everage of epistep : {avestep}")
timelist = [a * b for a, b in zip(residimepisode, residimstep)]
plt.title("Related Episodes for each Threshold")
plt.xlabel("Threshold")
plt.ylabel("#timesteps")
plt.plot(residthr, timelist, "ob", color='r')
plt.show()
|
# Generated by Django 3.2 on 2021-04-30 08:00
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('Tour_app', '0025_auto_20210430_1312'),
]
operations = [
migrations.AddField(
model_name='room',
name='Amenities',
field=multiselectfield.db.fields.MultiSelectField(choices=[('Air Conditioning', 'Air Conditioning'), ('Free Wi-Fi', 'Free Wi-Fi'), ('Restaurant/Coffee Shop', 'Restaurant/Coffee Shop'), ('CCTV', 'CCTV'), ('Room Service', 'Room Service'), ('Swimming Pool', 'Swimming Pool')], default=' ', max_length=82),
),
migrations.AlterField(
model_name='room',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=10),
),
]
|
# Project 1: Implementation of Go-Back-N Protocol
# Group Member: Daksh Patel ID: 104 030 031
# Group Member: Nyasha Kapfumvuti ID: 104 121 166
# Date: Mar 30th, 2018
import socket #Sockets are the endpoints of a bidirectional communications channel. Channel types include TCP and UDP
import json #lightweight data interchange format inspired by JavaScript object literal syntax
import math #math functions
import numpy as np # np is an alias pointing to numpy. importing as np helps keep away any conflict due to namespaces
import time #date time
# GBN variables
baseN = 1
seqNum = 1
nextPcktNumber = 1
windowSize = 10
numOfPackets = 20
window = [] #Arrays
packets = []
unAcked = []
Acked = []
client_address = ('localhost', 10000)
server_address = ('localhost', 10000)
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# AF_INET four-tuple (host, port, flowinfo, scopeid) is used
# SOCK_STREAM type of communications between the two endpoints
clientSocket.connect(server_address)
sending = False
receiving = True
lossRate = 5 # .2 is one in 5
# Generate a list of binary values for checksum demonstration : # seq nummber, acked, data
def prepPackets(numOfPackets):
for x in range(0, numOfPackets, 1):
packet = str(x)
packets.append(packet)
for x in range(0, len(packets), 1):
print(packets[x])
pass
print('Done making ', numOfPackets, ' packets. Ready to start sending...')
# Send new and unAcked packets that are in the window
def sendData(baseN):
window = [] # empty window
while len(window) < windowSize:
for x in unAcked: # Add any unAcked packets to window for re-sending
window.append(x)
print('unacked package added', x)
if len(window) < windowSize: # Add new packets if there is room left in the window
for x in range(baseN, (windowSize+baseN), 1): # New packets fit current window location
window.append(packets[x])
for x in range(0, len(window),1): # send packet one at a time
message = window[x]
print('sending seq number: ', message) #sequence num will increase with acks
clientSocket.send(message.encode())
print('listening')
data, server = clientSocket.recvfrom(1024)
newPack = int(data)
Acked.append(newPack)
baseN += 1
print('attempted')
print('back: ,', int(newPack))
# Continually receive acknowledgements and update containers
def receiveData():
print('receiving data')
modifiedMessage = clientSocket.recvfrom(1024)
print(modifiedMessage)
clientSocket.close()
# Delay Timer to simulate GBN timeout
def timeOut(seconds):
start = time.time()
time.clock()
elapsed = 0
while elapsed < seconds:
elapsed = time.time() - start
print('Timeout! BaseN = ', baseN, ', Window Size: ', windowSize, ', Acked: ', Acked, )
time.sleep(1)
sendData(baseN)
def main():
prepPackets(numOfPackets)
timeOut(5)
main()
|
import dotenv
from google.cloud import translate_v2 as translate
dotenv.load_dotenv()
def translate_to_english(recvmsg, location):
# Instantiates a client
translate_client = translate.Client()
# The text to translate
text = recvmsg
# The target language
target = location
translation = translate_client.translate(
text,
target_language=target)
return translation['translatedText']
|
# http://www.practicepython.org/exercise/2014/12/14/23-file-overlap.html
with open("EX23_DATA1", "r") as arch1:
lista1 = [int(dato) for dato in arch1]
with open("EX23_DATA2", "r") as arch2:
lista2 = [int(dato) for dato in arch2]
# concordancias = [a for a in lista1 for b in lista2 if a == b]
concordancias = [a for a in lista1 if a in lista2]
print(concordancias)
|
from common import *
import sys
if len(sys.argv) < 2:
raise("Missing params! ")
# taking the video frames
process_ucf_dataset(sys.argv[1]) |
import json
from bs4 import BeautifulSoup
import requests
import tldextract
def extract():
"""
Scrape data on https://isthereanydeal.com/specials/
"""
freegames = []
url = "https://isthereanydeal.com/specials/"
headers = {
'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7",
}
response = requests.get(url, headers=headers).text
soup = BeautifulSoup(response, features="lxml")
bundle = soup.find_all("div", attrs={"class" : "bundle-row1"})
for i in bundle :
bundle_title = i.find("div", attrs={"class" : "bundle-title"})
bundle_tag = i.find("a", attrs={"class" : "bundle-tag"}).text
if bundle_tag == "giveaway" :
time_left = i.find("div", attrs={"class" : "bundle-time"}).text
if "unknown" in time_left:
continue # search only for limited time free games
title = str(bundle_title.text).split("-")[0].strip() # extract the game title from whitespaces
url = bundle_title.find("a", attrs={"href" : True})["href"]
store = tldextract.extract(url).registered_domain
freegames.append({"title" : title, "url" : url, "store" : store,"time_left" : time_left})
return freegames
if __name__ == "__main__":
print(extract())
|
from .boykovkolmogorov import *
from .capacityscaling import *
from .dinitz_alg import *
from .edmondskarp import *
from .gomory_hu import *
from .maxflow import *
from .mincost import *
from .networksimplex import *
from .preflowpush import *
from .shortestaugmentingpath import *
from .utils import (
build_flow_dict as build_flow_dict,
build_residual_network as build_residual_network,
)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 18:35:36 2020
@author: raghed
"""
class etudiant(object):
def __init__(self,numero,prenom,nom,niveau):
self.numero = numero
self.prenom = prenom
self.nom = nom
self.set_niveau(niveau)
data.ajouterEtudiant(self)
def set_niveau(self, value):
if value not in ('A', 'B', 'C'):
raise ValueError()
self.niveau = value
def edit_etudiant(self,nom,prenom,niveau):
self.nom =nom
self.prenom = prenom
self.set_niveau(niveau)
def __repr__(self):
return ("numero:"+self.numero+"\tnom:"+self.nom+"\tprenom:"+self.prenom+"\tniveau: "+self.niveau)
class cours(object):
def __init__(self,code,intitule,niveau):
self.code = code
self.intitule = intitule
self.set_niveau(niveau)
data.ajouterCours(self)
def set_niveau(self, value):
if value not in ('A', 'B', 'C'):
raise ValueError()
self.niveau = value
def edit_cours(self,intitule,niveau):
self.intitule=intitule
self.set_niveau(niveau)
def __repr__(self):
return ("code:"+self.code+"\tintitule:"+self.intitule+"\tniveau: "+self.niveau)
class note(object):
def __init__(self,numEtudiant,codeCours,note):
try:
numEtudiant
codeCours
except ValueError:
print("Course mark should be between 0 and 100")
else:
self.numEtudiant = numEtudiant
self.codeCours = codeCours
self.setNote(note)
data.ajouterNote(self)
def setNote(self, value):
if value <0 or value >100:
raise ValueError()
self.note = value
def __repr__(self):
return ("cours:"+self.codeCours+"\tEtudiant:"+self.numEtudiant+"\tnote: "+str(self.note))
class BD(object):
def __init__(self):
self.listEtudiant=[]
self.listCours=[]
self.listNotes=[]
def ajouterEtudiant(self,e):
self.listEtudiant.append(e)
def supprimerEtudiant(self,num):
for x in self.listEtudiant:
if x.numero == num :
self.listEtudiant.remove(x)
print("etudiant ",num," has been deleted")
break
def ajouterNote(self,n):
self.listNotes.append(n)
def supprimerNote(self,e,c):
for x in self.listNotes:
if x.numEtudiant == e and x.codeCours==c :
self.listNotes.remove(x)
print("note etudiant ",e," en cours ",c," has been deleted")
break
def ajouterCours(self,c):
self.listCours.append(c)
def supprimerCours(self,num):
for x in self.listCours:
if x.code == num :
self.listCours.remove(x)
print("cours ",num," has been deleted")
break
def moyenne_classe(self,c):
n_sum= 0
n_count= 0
for n in self.listNotes:
if n.codeCours == c:
n_sum+= n.note
n_count+=1
return (n_sum/n_count)
def moyenne_etudiant(self,e):
n_sum= 0
n_count= 0
for n in self.listNotes:
if n.numEtudiant == e:
n_sum+= n.note
n_count+=1
return (n_sum/n_count)
def consulter_classnote(self,c):
print("notes class: ",c.code)
print("-----------------------")
print("Etudiant \tNote")
print("-----------------------")
for n in self.listNotes:
if n.codeCours == c.code:
print(n.numEtudiant,"\t",n.note)
def consulter_etudianNote(self,e):
print("notes Etudiant: ",e.numero)
print("-----------------------")
print("Cours \tNote")
print("-----------------------")
for n in self.listNotes:
if n.numEtudiant == e.numero:
print(n.codeCours,"\t",n.note)
def __repr__(self):
print (self.listEtudiant)
data=BD()
e1 = etudiant("1000","raghed","idris","A")
e2 = etudiant("1001","sami","sam","B")
print(e1)
print(e2)
c1=cours("GDN100","Projet INformatique","A")
c2=cours("UTC503","Paradigme","B")
print(c1)
note_e1=note(e1.numero,c1.code,100)
note_e1=note(e1.numero,c2.code,80)
note_e2=note(e2.numero,c1.code,91)
note_e2=note(e2.numero,c2.code,95)
print(note_e1)
print(note_e2)
print("--------")
print("lists before deletion")
print(data.listNotes)
print(data.listEtudiant)
print(data.listCours)
print("--------")
print("moyenne class ",c1.code,":", data.moyenne_classe(c1.code))
print("moyenne etudiant ",e1.numero,":", data.moyenne_etudiant(e1.numero))
print("--------")
data.consulter_classnote(c2)
print("--------")
data.consulter_etudianNote(e1)
print("--------SupprimerNote---------------")
#use studient ID
data.supprimerEtudiant("1000")
data.supprimerNote("1000","GDN100")
print("--------")
print("lists after deletion")
print(data.listNotes)
print(data.listEtudiant)
print(data.listCours)
#print(data) |
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z"]
position = []
string = input ('What would you like to "Encrypt": ')
index = 0
for letter in list(string.lower()):
if letter in alphabet:
alpha_number = alphabet.index(letter) + 1
position = position.append(alpha_number)
#position = position.split("").join(" ")
print (position)
|
#import the sys module and unpack the argv variable
from sys import argv
#specify the arguments to run the file
script, filename = argv
#specify the txt file to be opened by the script
txt = open(filename)
# print this phrase and the file name variable
print "Here's your file %r:" % filename
# print out what's in the txt file
print txt.read()
txt.close()
# prompt the user to type in the name of the file
print "I'll also ask you to type it again:"
# specify the file_again variable to be user input
file_again = raw_input("> ")
# open the re-specified file
txt_again = open(file_again)
# print the contents of the re-specified file
print txt_again.read()
txt_again.close()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
# 示例,列表解析与迭代
li = [1, 2, 3, 4, 5]
# 方法一,列表解析
start = time.perf_counter()
res = [i + 10 for i in li]
end = time.perf_counter()
print('列表解析的结果:\n\t{}\n\t运行时间:{:.8f}'.format(res, end - start))
# 方法二,for循环迭代
start = time.perf_counter()
res = []
for i in li:
res.append(i + 10)
end = time.perf_counter()
print('for循环的结果:\n\t{}\n\t运行时间:{:.8f}'.format(res, end - start))
# 示例,列表解析读取文件
# 方法一,使用 readlines() 方法(不推荐)
with open('test.txt', 'r', encoding='utf-8') as f:
content = f.readlines()
content_strip = [line.strip() for line in content]
print(content_strip)
# 方法二,在解析中打开文件
content_strip = [line.strip()
for line in open('test.txt', 'r', encoding='utf-8')]
print(content_strip)
# 方法三,直接迭代文件对象
with open('test.txt', 'r', encoding='utf-8') as f:
content_strip = [line.strip() for line in f]
print(content_strip)
# 示例,增加 if 条件的列表解析!!!!!!!!!!!!!!!!!!!!!!!!!!!!
with open('2、手动迭代 - iter和next.py', 'r', encoding='utf-8') as f:
content = [line.strip().upper() for line in f if line.strip() != '']
content_p = [line for line in content if line[0] == 'P']
print(content_p)
|
from sys import argv
script, first, second, third = argv
print "The script is called:", script
print "Your first variable is:", first
print "Your second variable is:", second
print "Your third variable is:", third |
import asyncio
import time
import pandas as pd
from typing import List
from core.tools import aget, time_left_in_month
class CryptoCompareAPI:
"""
CryptoCompareAPI class is a object that maintains information for requesting the Crypto Compare
REST API. It has built in rate limiting and supports polling REST endpoints for continous data
streaming. It also can be cached to a binary file.
"""
def __init__(self, config):
"""
Initializes a new CryptoCompareAPI instance. Creates all timers for and sets statistics. Loads
endpoint data and sets the authorization header
Arguments:
config (dict): A dictionary of keyword arguments required to initialize the class
apiKey (str): the apiKey to use for authorization
urlBase (str): the base URL to request
endpoints (dict): a dictionary of endpoints available to the API
rateLimits (dict): a dictionary of rate limits for REST requests
"""
self.apiKey = config['apiKey']
self.urlBase = config['urlBase']
self.endpoints = config['endpoints']
self.limits = config['rateLimits']
self.authHeader = {
'authorization': f'ApiKey {self.apiKey}'
}
self.callsMade = {
'second': 0,
'minute': 0,
'hour': 0,
'day': 0,
'month': 0,
}
self.callsRemain = self.limits.copy()
ms = self.milliseconds()
self.lastCall = ms
timers = {
'second': {
},
'minute': {
},
'hour': {
},
'day': {
},
'month': {
}
}
self.timers = timers
self.start_timers()
def start_timers(self):
"""
Starts timers for rate limiting
"""
l = asyncio.get_event_loop()
for timer in self.timers:
ms = self.milliseconds()
self.timers[timer]['started'] = ms
if timer == 'second':
duration = 1000
elif timer == 'minute':
duration = 60 * 1000
elif timer == 'hour':
duration = 60 * 60 * 1000
elif timer == 'day':
duration = 24 * 60 * 60 * 1000
else:
duration = time_left_in_month()
self.timers[timer]['duration'] = duration
@staticmethod
def milliseconds():
"""
A static method for getting the utc timestamp in milliseconds
Returns:
the timestamp in milliseconds
"""
return int(time.time() * 1000)
def update(self):
"""
Increments call counters
"""
for timer in self.timers:
elapsed = self.milliseconds() - self.timers[timer]['started']
duration = self.timers[timer]['duration']
if elapsed >= duration:
self.callsMade[timer] = 0
self.callsRemain[timer] = self.limits[timer]
self.timers[timer]['started'] = self.milliseconds()
duration = time_left_in_month() if timer == 'month' else 2 * duration - elapsed
self.timers[timer]['duration'] = duration
self.callsMade[timer] += 1
self.callsRemain[timer] -= 1
async def throttle(self):
"""
Throttles the request to ensure that rate limits are not hit
"""
remains = self.timers['month']['duration'] - (
self.milliseconds() - self.timers['month']['started'])
elapsed = self.milliseconds() - self.lastCall
call_rate = remains / self.callsRemain['month']
if elapsed < call_rate: await asyncio.sleep(call_rate - elapsed)
async def get_usage(self):
"""
Requests usage statistics for this API
"""
await self.throttle()
response = await aget(self.urlBase + self.endpoints['usage'], headers = self.authHeader)
self.update()
self.lastCall = self.milliseconds()
response.raise_for_status()
js = response.json()
return js['Data']
async def fetch_daily_ohlcv(self, base: str, quote: str, limit: int = 2000,
toTime: int = (time.time() * 1000)):
"""
Gets the daily price change data from CryptoCompare API
Arg:
base (str) - the base symbol to get ohlcv data for
quote (str) - the quote symbol to read ohlcv data as
limit (int) - the number of days to look back in each request
toTime (int) - the time to end the request
Returns:
df (DataFrame) - a dataframe that contains all historical price data
about a given base/quote pair dating back limit number of days.
"""
df = pd.DataFrame(
columns = [
'timestamp',
'symbol',
'toSymbol',
'close',
'high',
'low',
'open',
'volumeFrom',
'volumeTo',
])
self.update()
await self.throttle()
response = await aget(self.urlBase + self.endpoints['dailyOhlcv'], headers = self.authHeader,
params = {
'fsym': base,
'tsym': quote,
'limit': limit,
'toT': toTime / 1000,
})
self.lastCall = self.milliseconds()
response.raise_for_status()
js = response.json()
for candle in js['Data']:
df = df.append(
{
'timestamp': candle['time'] * 1000,
'symbol': base,
'toSymbol': quote,
'close': candle['close'],
'high': candle['high'],
'low': candle['low'],
'open': candle['open'],
'volumeFrom': candle['volumefrom'],
'volumeTo': candle['volumeto'],
}, ignore_index = True)
return df.set_index('timestamp')
async def fetch_full_data(self, bases: List[str], quotes: List[str]):
"""
Gets the full data for every coin in bases in terms of every quote in quotes
Args:
bases (List[str]) - the base coins to get data for
quotes (List[str]) - the quote coins to gen data in
Returns:
df (DataFrame) - a dataframe containing the full history for every coin in
bases in terms of every coin in quotes
"""
df = pd.DataFrame(
columns = [
'timestamp',
'symbol',
'toSymbol',
'price',
'lastVolume',
'lastVolumeTo',
'volumeDay',
'volumeDayTo',
'volume24Hour',
'volume24HourTo',
'openDay',
'highDay',
'lowDay',
'open24Hour',
'high24Hour',
'change24Hour',
'changePct24Hour',
'changeDay',
'changePctDay',
'supply',
'mktCap',
'totalVolume24Hr',
'totalVolume24HrTo',
])
self.update()
await self.throttle()
response = await aget(self.urlBase + self.endpoints['fullData'], headers = self.authHeader,
params = {
'fsyms': ','.join(bases)[:-1],
'tosyms': ','.join(quotes)[:-1],
})
self.lastCall = self.milliseconds()
response.raise_for_status()
js = response.json()
for base in js['RAW']:
for quote in js['RAW'][base]:
d = js['RAW'][base][quote]
suffix = d['FROMSYMBOL']
df = df.append(
{
'timestamp': self.lastCall,
'symbol': d['FROMSYMBOL'],
'toSymbol': d['TOSYMBOL'],
'price': d['PRICE'],
'lastVolume': d['LASTVOLUME'],
'lastVolumeTo': d['LASTVOLUMETO'],
'volumeDay': d['VOLUMEDAY'],
'volumeDayTo': d['VOLUMEDAYTO'],
'volume24Hour': d['VOLUME24HOUR'],
'volume24HourTo': d['VOLUME24HOURTO'],
'openDay': d['OPENDAY'],
'highDay': d['HIGHDAY'],
'lowDay': d['LOWDAY'],
'open24Hour': d['OPEN24HOUR'],
'high24Hour': d['HIGH24HOUR'],
'change24Hour': d['CHANGE24HOUR'],
'changePct24Hour': d['CHANGEPCT24HOUR'],
'changeDay': d['CHANGEDAY'],
'changePctDay': d['CHANGEPCTDAY'],
'supply': d['SUPPLY'],
'mktCap': d['MKTCAP'],
'totalVolume24Hr': d['TOTALVOLUME24H'],
'totalVolume24HrTo': d['TOTALVOLUME24HTO'],
}, ignore_index = True)
return df.set_index('timestamp')
async def fetch_coins(self, coins: List[str]):
"""
Gets technical data for a list of coins
Args:
coins (List[str]) - list of coins to get technical data on
Returns:
df (DataFrame) - a dataframe containing technical information for each coin
in coins
"""
df = pd.DataFrame(
columns = [
'timestamp',
'symbol',
'totalCoinsMined',
'blockNumber',
'netHashesPerSecond',
'blockReward',
'blockTime'
])
self.update()
await self.throttle()
response = await aget(self.urlBase + self.endpoints['coins'])
self.lastCall = self.milliseconds()
response.raise_for_status()
js = response.json()
data = filter(lambda s: s in coins, js['Data'])
for symbol in data:
d = data[symbol]
df = df.append(
{
'timestamp': self.lastCall,
'symbol': symbol,
'totalCoinsMined': d['TotalCoinsMined'],
'blockNumber': d['BlockNumber'],
'netHashesPerSecond': d['NetHashesPerSecond'],
'blockReward': d['BlockReward'],
'blockTime': d['BlockTime'],
}, ignore_index = True)
return df.set_index('timestamp')
|
# -*- coding: utf-8 -*-
__author__ = 'Steven Willis'
__email__ = 'onlynone@gmail.com'
__version__ = '0.1.0'
import subprocess
__all__ = ["git", "CALL", "CHECK_CALL", "CHECK_OUTPUT"]
CALL = 'call'
CHECK_CALL = 'check_call'
CHECK_OUTPUT = 'check_output'
__sub_calls = {
CALL: subprocess.call,
CHECK_CALL: subprocess.check_call,
CHECK_OUTPUT: subprocess.check_output
}
__default_subprocess_kwargs = {
'close_fds': True,
'shell': False,
}
def git(*args, **kwargs):
"""Execute git commands
Args:
*args: The positional arguments are used as arguments to the git
command. For example, the following python code:
git("commit", "--help")
would execute:
git commit --help
f: One of CALL, CHECK_CALL, or CHECK_OUTPUT. Corresponds to the function
from the subprocess module called to execute git. Defaults to CHECK_CALL
**kwargs: The keyword arguments are passed through to the subprocess
function as-is.
Returns:
Whatever is returned by the respective subprocess function. For example,
f=CALL would return the returncode attribute, and f=CHECK_OUTPUT would
return the content of stdout.
Exmples:
The following call:
git("commit", "-m", "Commit Message", cwd="/path/to/repo")
results in:
subprocess.check_call(["git", "commit", "-m", "Commit Message"], cwd="/path/to/repo")
And:
git("checkout", "-b", "branch_name", f=CHECK_OUTPUT, cwd="/path/to/repo")
results in:
subprocess.check_output(["git", "checkout", "-b", "branch_name"], cwd="/path/to/repo")
"""
f = kwargs.pop('f', CHECK_CALL)
f = __sub_calls[f]
full_args = ("git",) + tuple(args)
full_kwargs = __default_subprocess_kwargs.copy()
full_kwargs.update(kwargs)
return f(full_args, **full_kwargs)
del subprocess
|
import random
class Number:
def __init__(self, player):
self.player = player
self.chances = 6
def gen_num(self):
num = random.randint(0, 100)
# print(num)
print(f"Hello! {self.player} Game Starts... I've selected a number between 0-100. You have total 6 chances! Guess it!")
current_guess = int(input('\n Enter the number: '))
while True:
if (num == current_guess):
print(f'You won the game! Congratulations {self.player}. You guessed the correct number.')
break
else:
self.chances -= 1
if (self.chances == 0):
print(f'You lost {self.player}! Game Over. The number was {num}')
break
if (num < current_guess):
current_guess = int(input(f'Your guess is too high. You have {self.chances} chances remaining.\n Guess Again? '))
elif (num > current_guess):
current_guess = int(input(f'Your guess is too low. You have {self.chances} chances remaining.\n Guess Again? '))
print('\n')
def hints():
# Hint 1: Sum of the digits
ones_digit=num%10
tens_digit=int(num/10)
num_sum=ones_digit+tens_digit
while self.chances==5:
print(f'Hint: The sum of the digits of the number is {num_sum}')
break
# Hint 2: Number is odd or even?
while self.chances==2:
if num%2==0:
print('Hint: The given number is even.')
else:
print('Hint: The given number is odd.')
break
hints()
|
import json
import os
import sys
import pandas.io.sql as psql
import requests
crypto_tools_dir = os.getcwd().split('/scripts/')[0] + '/scripts/'
sys.path.append(crypto_tools_dir)
from crypto_tools import *
class PopulateKraken(object):
"""
"""
def __init__(self):
"""
"""
self.port = 3306
self.host = "159.89.20.249"
self.database_name = 'crypto_test'
self.user = 'toby'
self.password = 'R1i9p1p1l9e0$'
self.database = DatabaseConnect(self.host, self.database_name, self.user, self.password, self.port)
self.database.database_connect()
self.get_kraken_exchange_id()
self.crypto_currency_dict()
def get_kraken_exchange_id(self):
"""
"""
sql_str = """SELECT id FROM crypto_test.exchange
WHERE name = 'kraken' """
results = psql.read_sql(sql_str,con = self.database.mydb)
self.exchange_id = results['id'].loc[0]
def crypto_currency_dict(self):
"""
"""
sql_str = """SELECT id,altname FROM crypto_test.crypto_currency"""
results = psql.read_sql(sql_str,con = self.database.mydb)
crypto_db_dict = {}
for ind,row in results.T.iteritems():
altname = row['altname']
crypto_db_dict[altname] = row['id']
self.crypto_db_dict = crypto_db_dict
def asset_pairs_dict(self):
"""
"""
sql_str = """SELECT id,name FROM crypto_test.asset_pairs"""
results = psql.read_sql(sql_str,con = self.database.mydb)
asset_pairs_db_dict = {}
for ind,row in results.T.iteritems():
name = row['name']
asset_pairs_db_dict[name] = row['id']
print (asset_pairs_db_dict)
self.asset_pairs_db_dict = asset_pairs_db_dict
def get_kraken_lookup_cryptos(self):
"""
"""
#asset_pairs_json = requests.get("https://api.kraken.com/0/public/AssetPairs")
crypto_json = requests.get("https://api.kraken.com/0/public/Assets")
crypto_json_string = crypto_json.text
crypto_json_dictionary = json.loads(crypto_json_string)
result = crypto_json_dictionary['result']
for i in result:
name = result[i]['altname']
exchange_id = 1
ut = datetime.now()
try:
crypto_currency_id = self.crypto_db_dict[i]
except:
crypto_currency_id ='NULL'
sql_str = """INSERT INTO crypto_test.crypto_currency_lookup(name,crypto_currency_id,exchange_id,ut)
VALUES('%s',%s,%s,"%s")
"""%(name,crypto_currency_id,exchange_id,ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
def get_kraken_tradeable_assets(self):
"""
"""
self.asset_pairs_dict()
print (self.asset_pairs_dict)
asset_pairs_json = requests.get("https://api.kraken.com/0/public/AssetPairs")
asset_pairs_json_string = asset_pairs_json.text
asset_pairs_json_dictionary = json.loads(asset_pairs_json_string)
result = asset_pairs_json_dictionary['result']
x = 0
for i in result:
new_name = i.replace('.d','')
ut = datetime.now()
sql_str = """INSERT IGNORE INTO crypto_test.asset_pairs_lookup(name,asset_pairs_id,exchange_id,tradeable,ut)
VALUES('%s',%s,1,1,"%s")
"""%(new_name,'NULL',ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
x = x + 1
def populate_asset_pairs(self):
"""
"""
asset_pairs_list = ["BTCCAD","XMRUSD","BTCEUR","ETHBTC","BTCGBP","ETHEUR","XMRBTC","MLNETH","ETHJPY","ZECEUR","REPBTC","GNOBTC","BTCJPY","XRPUSD","LTCUSD",
"REPETH","BTCGBP","ETHZUSD","EOSBTC","ETHJPY","ETHCAD","ETCBTC","ZECUSD","ETHGBP","BCHEUR","XDGBTC","BTCEUR","LTCEUR","ETCETH","ETHGBP","REPEUR","BTCCAD","LTCBTC","BTCJPY",
"XMREUR","BTCUSD","GNOETH","ETHCAD","DASHBTC","XLMBTC","ETCEUR","MLNBTC","BCHUSD","ICNETH","ETHBTC","XRPBTC","ETHUSD","XRPEUR","EOSETH","DASHEUR",
"ICNBTC","ETCUSD","ETHEUR","ZECBTC","DASHUSD","BTCUSD","BCHBTC","USDTUSD"]
for asset_pair in asset_pairs_list:
if 'DASH' in asset_pair:
asset1 = 'DASH'
asset2 = asset_pair[-3:]
else:
asset1 = asset_pair[:3]
asset2 = asset_pair[-3:]
try:
cryptoid1 = self.crypto_db_dict[asset1]
cryptoid2 = self.crypto_db_dict[asset2]
except:
print (asset_pair, 'not in crypto_crrency table')
continue
ut = datetime.now()
sql_str = """INSERT IGNORE INTO crypto_test.asset_pairs(name,crypto_currency_id,crypto_currency_id2,ut)
VALUES('%s',%s,%s,"%s")
"""%(asset_pair,cryptoid1,cryptoid2,ut)
print (sql_str)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
def get_kraken_asset_pairs_lookup(self):
"""
"""
sql_str = """SELECT apl.name,apl.id AS asset_pairs_lookup_id
FROM crypto_test.asset_pairs_lookup apl
INNER JOIN crypto_test.exchange e ON e.id = apl.exchange_id
WHERE e.name = 'kraken'"""
results = psql.read_sql(sql_str,con = self.database.mydb)
asset_pairs_lookup_dict = {}
self.asset_pairs_list = results['name'].tolist()
self.asset_pairs_str = ','.join(self.asset_pairs_list)
print (self.asset_pairs_str)
for ind,row in results.T.iteritems():
name = row['name']
asset_pairs_lookup_dict[name] = row['asset_pairs_lookup_id']
self.asset_pairs_lookup_dict = asset_pairs_lookup_dict
def get_server_time(self):
"""get kraken sever time
"""
url = "https://api.kraken.com/0/public/Time"
server_time_request = requests.get(url)
server_time_text = server_time_request.text
server_time_json = json.loads(server_time_text)
result = server_time_json['result']
server_time_unixtime = result['unixtime']
server_time = datetime.fromtimestamp(int(server_time_unixtime)).strftime('%Y-%m-%d %H:%M:%S')
self.server_time = server_time
def populate_order_book(self):
"""
"""
self.get_server_time()
self.get_kraken_asset_pairs_lookup()
for kraken_asset_pair in self.asset_pairs_list:
print (kraken_asset_pair)
url = "https://api.kraken.com/0/public/Depth?pair=%s"%(kraken_asset_pair)
try:
order_book_json = requests.get(url)
except:
print (i, 'no order book')
#order_book = open('/Users/toby/git/toby_test/crypto_arbing/crypto_db/populate_db/kraken_order_book.json')
order_book = order_book_json.text
order_book_json = json.loads(order_book)
result = order_book_json['result']
bids = result[kraken_asset_pair]['bids']
asks = result[kraken_asset_pair]['asks']
asset_pairs_lookup_id = self.asset_pairs_lookup_dict[kraken_asset_pair]
bid_ask_list = [[1,bids],[2,asks]]
for order_type in bid_ask_list:
order_type_id = order_type[0]
for order in order_type[1]:
price = order[0]
quantity = order[1]
order_time = datetime.fromtimestamp(int(order[2])).strftime('%Y-%m-%d %H:%M:%S')
#need to remove trailing zeros before and after decimal
new_price = '{0:g}'.format(float(price))
new_quantity = '{0:g}'.format(float(quantity))
ut = datetime.now()
sql_str = """INSERT IGNORE INTO crypto_test.order_book(asset_pairs_lookup_id,order_type_id,price,quantity,order_time,server_time,ut)
VALUES(%s,%s,%s,%s,"%s","%s","%s")
"""%(asset_pairs_lookup_id,order_type_id,float(new_price),float(quantity),order_time,self.server_time,ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
def main():
"""
"""
PC = PopulateKraken()
#PC.get_kraken_tradeable_assets()
#PC.get_kraken_lookup_cryptos()
#PC.populate_asset_pairs()
PC.populate_order_book()
if __name__=="__main__":
main() |
N = int(input("N="))
K = 1
S = 1
while S <= N:
K += 1
S += K
S -= K
K -= 1
print("K=",K,"S=",S) |
import sys
from awg4100 import AwgDevice
local_ip = "192.168.8.10" # 本机 IP
out_ch = 1
# 定义波形代码
# sin(x,y),x represents x MHz,y represents running for y ns, x*y/1000 equals to how many periods for the function
#wave_code = WAVE('C:\\Users\Administrator\Desktop\wave.wave');
with open('C:\\Users\Administrator\Desktop\IonTrap-WIPM-master\AWG4100-Python64\wave.txt', 'r') as f:
wave_code=f.read() #读取波形
with open('C:\\Users\Administrator\Desktop\IonTrap-WIPM-master\AWG4100-Python64\wave.txt', 'r') as f:
first_line = f.readlines()[0] #读取第一行的Repeat参数
exec(first_line) #将Repeat赋值
dev = AwgDevice()
result = dev.init_network(local_ip)
if result == 0:
print("Init network failed.")
sys.exit()
dev_info = dev.find_device()
dev_num = len(dev_info)
if dev_num == 0:
print("Cannot found device")
sys.exit()
for idx in range(dev_num):
print("[{}] IP={}, MAC={}, Name={}".format(idx, \
dev_info[idx][0], dev_info[idx][1], dev_info[idx][2]))
trgt = 0
ip = dev_info[trgt][0]
mac = dev_info[trgt][1]
# 1. 连接设备
result = dev.connect(ip, mac)
if result != 1:
print("Connect failed.")
sys.exit()
def check_ret(rtn, msg=None):
if rtn == 0:
print(msg)
sys.exit()
rtn, msg = dev.system_init()
check_ret(rtn, "System Reset failed.")
# 2. 参数配置
rtn, msg = dev.channel_mode(0) # 选择独立模式
check_ret(rtn, "set mode failed: {}".format(msg))
rtn, msg = dev.awg_cast_mode(1) # 播放模式,0-连续, 1-Trig
check_ret(rtn, "set awg cast mode failed: {}".format(msg))
rtn, msg = dev.awg_offset(out_ch, "10") # 通道1,AWG 空闲偏置
check_ret(rtn, "set offset failed: {}".format(msg))
rtn, msg = dev.marker_switch(out_ch, 1) # 通道1,Marker 打开
check_ret(rtn, "set marker failed: {}".format(msg))
rtn, msg = dev.clock_mode(0) # 内部时钟
check_ret(rtn, "set clock failed: {}".format(msg))
# 3. 下载波形
result = dev.load_wave_data(out_ch, wave_code) # 通道1
if result == 0:
print("wave download failed: {}".format(result))
sys.exit()
# 4. 设置播放次数
rtn, info = dev.awg_cast_number(Repeat)
check_ret(rtn, "set awg cast number failed: {}".format(info))
# 4. 播放控制
rtn, info = dev.awg_broadcast(out_ch, 1) # 播放通道1
check_ret(rtn, "start failed: {}".format(info))
input("enter any to stop")
# 5. 停止播放
rtn, info = dev.awg_broadcast(out_ch, 0)
check_ret(rtn, "stop failed: {}".format(info))
# 6. 关闭设备
result = dev.close_device()
if not result:
sys.exit()
|
from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = tv_tensors.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBoxes(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
.. v2betastatus:: ClampBoundingBoxes transform
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
import unittest
from katas.beta.bin_to_decimal import bin_to_decimal
class BinaryToDecimalTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(bin_to_decimal('1'), 1)
def test_equal_2(self):
self.assertEqual(bin_to_decimal('0'), 0)
def test_equal_3(self):
self.assertEqual(bin_to_decimal('1001001'), 73)
|
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from scipy.sparse import coo_matrix, csr_matrix
from scipy.spatial.distance import pdist, squareform
import logging
logger = logging.getLogger(__name__)
STATES = ["S", "I", "R"]
def csr_to_list(x):
x_coo = x.tocoo()
return zip(x_coo.row, x_coo.col, x_coo.data)
def indicator(states):
probas = np.zeros(states.shape + (3,))
for s in [0,1,2]:
probas[..., s] = (states==s)*1
assert np.all(probas.argmax(axis = -1) == states)
return probas
def frequency(states, verbose=True):
"Generate initial proba according to the frequencies of states"
freqs = [np.mean(states==s) for s in [0,1,2]]
if verbose:
print("freqs = ", freqs)
N = len(states)
initial_probas = np.broadcast_to(freqs, (N, 3)).copy()
return initial_probas
def patient_zeros_states(N, N_patient_zero):
states = np.zeros(N)
patient_zero = np.random.choice(N, N_patient_zero, replace=False)
states[patient_zero] = 1
return states
def random_individuals(N, n_obs):
return np.random.choice(N, n_obs, replace=False)
def infected_individuals(states, n_obs):
"""
Return n_obs infected individuals in states.
If n_infected < n_obs, then it returns all the n_infected ones.
"""
infected, = np.where(states == 1)
if len(infected) < n_obs:
return infected
return np.random.choice(infected, n_obs, replace=False)
def symptomatic_individuals(states, t, tau, p):
tI = t - tau
if (tI <= 0):
return []
# S at tI-1 and I at tI
symptomatic, = np.where(
np.logical_and(np.logical_and(states[tI - 1, :]==0, states[tI, :]==1), states[t,:]==1)
)
# select proportion p of them
n_symptomatic = len(symptomatic)
n_obs = int(p * n_symptomatic)
return np.random.choice(symptomatic, n_obs, replace=False)
def random_observations(model, tests):
"""
Observations given by random sampling of the population.
Parameters
----------
- model : EpidemicModel instance to gives the states
- tests : dict
n_test = tests[t_test] number of random tests done at t=t_test
Returns
-------
- observations : list of dict(i=i, s=s, t_test=t_test) observations
"""
observations = []
for t_test, n_test in tests.items():
tested = random_individuals(model.N, n_test)
for i in tested:
obs = dict(i=i, t_test=t_test, s=model.states[t_test, i])
observations.append(obs)
return observations
def infected_observations(model, t_test, n_test):
"""
Observations corresponding to n_test infected individuals at t=t_test.
Parameters
----------
- model : EpidemicModel instance to gives the states
- t_test : int
- n_test : int
Returns
-------
- observations : list of dict(i=i, s=s, t_test=t_test) observations
"""
infected = infected_individuals(model.states[t_test], n_test)
observations = [dict(i=i, t_test=t_test, s=1) for i in infected]
return observations
def get_infection_probas(states, transmissions):
"""
- states[i] = state of i
- transmissions = csr sparse matrix of i, j, lambda_ij
- infection_probas[i] = 1 - prod_{j: s[j]==I} [1 - lambda_ij]
We use prod_{j:I} [1 - lambda_ij] = exp(
sum_j log(1 - lambda_ij) (s[j]==I)
)
"""
infected = (states == 1)
infection_probas = 1 - np.exp(
transmissions.multiply(-1).log1p().dot(infected)
)
return infection_probas
def propagate(current_states, infection_probas, recover_probas, RandomStream = np.random):
"""
- current_states[i] = state of i
- infection_probas[i] = proba that i get infected (if susceptible)
- recover_probas[i] = proba that i recovers (if infected)
"""
next_states = np.zeros_like(current_states)
for i, state in enumerate(current_states):
if (state == 0):
infected = RandomStream.rand() < infection_probas[i]
next_states[i] = 1 if infected else 0
elif (state == 1):
recovered = RandomStream.rand() < recover_probas[i]
next_states[i] = 2 if recovered else 1
else:
next_states[i] = 2
return next_states
class EpidemicModel():
def __init__(self, initial_states, x_pos, y_pos):
assert len(x_pos) == len(y_pos) == len(initial_states)
self.N = len(initial_states)
self.initial_states = initial_states
self.x_pos = x_pos
self.y_pos = y_pos
def time_evolution(self, recover_probas, transmissions, print_every=0):
"""Run the simulation where
- recover_probas[i] = mu_i time-independent
- transmissions[t] = csr sparse matrix of i, j, lambda_ij(t)
- states[t, i] = state of i at time t
"""
# initialize states
T = len(transmissions)
states = np.zeros((T + 1, self.N), dtype=int)
states[0] = self.initial_states
if print_every:
print("Running SIR simulation")
# iterate over time steps
for t in range(T):
if print_every and (t % print_every == 0):
print(f"t = {t} / {T}")
infection_probas = get_infection_probas(states[t], transmissions[t])
states[t+1] = propagate(states[t], infection_probas, recover_probas)
self.states = states
self.probas = indicator(states)
self.recover_probas = recover_probas
self.transmissions = transmissions
def plot(self, t):
fig, ax = plt.subplots(1, 1, figsize = (5,5))
for idx, state in enumerate(STATES):
ind = np.where(self.states[t] == idx)
ax.scatter(self.x_pos[ind], self.y_pos[ind], label=state)
ax.set(title="t = %d" % t)
ax.legend()
def get_counts(self):
counts = {
state: (self.states == idx).sum(axis=1)
for idx, state in enumerate(STATES)
}
return pd.DataFrame(counts)
def sample_transmissions(self):
raise NotImplementedError
def generate_transmissions(self, T, print_every=0):
transmissions = []
if print_every:
print("Generating transmissions")
for t in range(T):
if print_every and (t % print_every == 0):
print(f"t = {t} / {T}")
transmissions.append(self.sample_transmissions())
self.transmissions = transmissions
def run(self, T, print_every=0):
self.generate_transmissions(T, print_every=print_every)
self.time_evolution(
self.recover_probas, self.transmissions, print_every=print_every
)
def load_transmissions(self, csv_file, new_lambda = None):
print(f"Loading transmissions from {csv_file}")
df = pd.read_csv(csv_file)
assert all(df.columns == ["t","i","j","lamb"])
assert df["i"].max() == df["j"].max() == self.N - 1
tmax = df["t"].max() + 1
if new_lambda != None:
df["lamb"] = new_lambda
transmissions = []
for t in range(tmax):
sub_data = df.query(f"t=={t}")
i, j, lamb = sub_data["i"], sub_data["j"], sub_data["lamb"]
transmissions.append(
csr_matrix((lamb, (i, j)), shape=(self.N, self.N))
)
self.transmissions = transmissions
def save_transmissions(self, csv_file):
print(f"Saving transmissions in {csv_file}")
df_transmissions = pd.DataFrame(
dict(t=t, i=i, j=j, lamb=lamb)
for t, A in enumerate(self.transmissions)
for i, j, lamb in csr_to_list(A)
)[["t","i","j","lamb"]]
df_transmissions.to_csv(csv_file, index=False)
def load_positions(self, csv_file):
print(f"Loading positions from {csv_file}")
df = pd.read_csv(csv_file)
assert all(df.columns == ["x_pos","y_pos"])
assert df.shape[0]==self.N
self.x_pos = df["x_pos"].values
self.y_pos = df["y_pos"].values
def save_positions(self, csv_file):
print(f"Saving positions in {csv_file}")
df_pos = pd.DataFrame({"x_pos":self.x_pos, "y_pos":self.y_pos})
df_pos.to_csv(csv_file, index=False)
class ProximityModel(EpidemicModel):
"""
Model:
- N = population
- mu = constant recovery proba
- lamd = constant transmission rate (if in contact)
- proba_contact = np.exp(-distance / scale)
- initial_states = random patient zero
- x_pos, y_pos = random uniform
You can also provide the initial_states, x_pos, y_pos or proba_contact.
"""
def __init__(self, N, scale, mu, lamb,
initial_states = None, x_pos = None, y_pos = None, proba_contact = None):
self.scale = scale
self.mu = mu
self.lamb = lamb
# initial states : patient zero infected
if initial_states is None:
patient_zero = np.random.randint(N)
initial_states = np.zeros(N)
initial_states[patient_zero] = 1
# positions
x_pos = np.sqrt(N)*np.random.rand(N) if x_pos is None else x_pos
y_pos = np.sqrt(N)*np.random.rand(N) if y_pos is None else y_pos
if proba_contact is None:
# proba of contact = np.exp(-distance / scale)
pos = np.array([x_pos, y_pos]).T
assert pos.shape == (N, 2)
distance = squareform(pdist(pos))
proba_contact = np.exp(-distance / scale)
np.fill_diagonal(proba_contact, 0.) # no contact with oneself
self.proba_contact = proba_contact
# expected number of contacts
self.n_contacts = proba_contact.sum()/N
# constant recovery proba
self.recover_probas = mu*np.ones(N)
super().__init__(initial_states, x_pos, y_pos)
def sample_contacts(self):
"contacts[i,j] = symmetric matrix, flag if i and j in contact"
# sample only in lower triangular
A = np.random.rand(self.N, self.N) < self.proba_contact
L = np.tril(A, -1)
# symmetrize
contacts = np.maximum(L, L.T)
return contacts
def sample_transmissions(self):
"transmissions = csr sparse matrix of i, j, lambda_ij"
contacts = self.sample_contacts()
i, j = np.where(contacts)
# constant rate = lamb
rates = self.lamb * np.ones(len(i))
transmissions = coo_matrix(
(rates, (i, j)), shape=(self.N, self.N)
).tocsr()
# sanity check
assert contacts.sum() == transmissions.nnz
assert np.all(i != j)
return transmissions
class FastProximityModel(EpidemicModel):
"""
Model:
- N = population
- mu = constant recovery proba
- lamd = constant transmission rate (if in contact)
- proba_contact = np.exp(-distance / scale)
- initial_states = random patient zero
- x_pos, y_pos = random uniform
You can also provide the initial_states.
"""
def __init__(self, N, scale, mu, lamb, initial_states = None, seed = 1000):
self.N = N
self.scale = scale
self.mu = mu
self.lamb = lamb
# initial states : patient zero infected
if initial_states is None:
initial_states = patient_zeros_states(N, 1)
#### define separate rnd stream (giova)
rng = np.random.RandomState()
rng.seed(seed)
self.rng = rng
####
# positions
x_pos = np.sqrt(N)*rng.random(N)
y_pos = np.sqrt(N)*rng.random(N)
# for soft geometric graph generation
self.pos = {i: (x, y) for i, (x, y) in enumerate(zip(x_pos,y_pos))}
self.radius = 5*scale
def p_dist(d):
return np.exp(-d/scale)
self.p_dist = p_dist
# constant recovery proba
self.recover_probas = mu*np.ones(N)
super().__init__(initial_states, x_pos, y_pos)
def sample_contacts(self):
"contacts = csr sparse matrix of i, j in contact"
g=nx.soft_random_geometric_graph(
self.N, radius=self.radius, p_dist=self.p_dist, pos=self.pos, seed = self.rng
)
contacts = nx.to_scipy_sparse_matrix(g)
return contacts
def sample_transmissions(self):
"transmissions = csr sparse matrix of i, j, lambda_ij"
contacts = self.sample_contacts()
transmissions = contacts.multiply(self.lamb)
return transmissions
class NetworkModel(EpidemicModel):
"""
Model:
- graph = networkx undirected graph
- mu = constant recovery proba
- lamd = constant transmission rate (if in contact)
- proba_contact = float, constant proba for all edges and time steps.
At time step t, the edge ij is activated as a contact with proba_contact.
So the contacts network is at each time a subgraph of the original graph.
proba_contact = 1 corresponds to the fixed contacts network case.
- initial_states = random patient zero by default
- layout = spring layout by default
You can also provide the initial_states and layout.
"""
def __init__(self, graph, mu, lamb, proba_contact,
initial_states = None, layout = None):
self.graph = graph
self.n_edges = graph.number_of_edges()
self.mu = mu
self.lamb = lamb
self.proba_contact = proba_contact
N = graph.number_of_nodes()
# initial states : patient zero infected
if initial_states is None:
patient_zero = np.random.randint(N)
initial_states = np.zeros(N)
initial_states[patient_zero] = 1
# positions
if layout is None:
print("Computing spring layout")
layout = nx.spring_layout(graph)
x_pos = np.array([layout[i][0] for i in graph.nodes])
y_pos = np.array([layout[i][1] for i in graph.nodes])
# expected number of contacts
self.n_contacts = 2*self.n_edges*proba_contact/N
# constant recovery proba
self.recover_probas = mu*np.ones(N)
super().__init__(initial_states, x_pos, y_pos)
def sample_contacts(self):
"contacts = list of i and j in contact"
# each edge is selected with proba_contact
selected = np.random.rand(self.n_edges) <= self.proba_contact
contacts = [
(i, j) for idx, (i, j) in enumerate(self.graph.edges)
if selected[idx]
]
# symmetrize
contacts += [(j, i) for (i, j) in contacts]
return contacts
def sample_transmissions(self):
"transmissions = csr sparse matrix of i, j, lambda_ij"
contacts = self.sample_contacts()
i = [i_ for (i_, j_) in contacts]
j = [j_ for (i_, j_) in contacts]
# constant rate = lamb
rates = self.lamb * np.ones(len(i))
transmissions = coo_matrix(
(rates, (i, j)), shape=(self.N, self.N)
).tocsr()
# sanity check
assert len(contacts) == transmissions.nnz
assert np.all(i != j)
return transmissions
def read_ferretti_data(csv_file, lamb, N):
df = pd.read_csv(csv_file, usecols=["ID","ID_2","time"])
assert N-1 == df["ID"].max() == df["ID_2"].max()
tmax = df["time"].max()
transmissions = []
for t in range(tmax):
sub_data = df.query(f"time=={t}")
i, j = sub_data["ID"], sub_data["ID_2"]
rates = lamb*np.ones_like(i)
transmissions.append(
csr_matrix((rates, (i, j)), shape=(N, N))
)
return transmissions
def proximity_model(N, N_patient_zero, scale, mu, lamb, t_max, seed):
print("Using ProximityModel")
np.random.seed(seed)
initial_states = patient_zeros_states(N, N_patient_zero)
model = ProximityModel(N, scale, mu, lamb, initial_states)
print("expected number of contacts %.1f" % model.n_contacts)
model.run(t_max, print_every=100)
return model
def ferretti_model(N_patient_zero=10, mu=1/15, lamb=0.02, seed=123,
csv_file="all_interaction_10000.csv", N=10000):
print("Using Ferretti transmissions")
np.random.seed(seed)
transmissions = read_ferretti_data(csv_file, lamb=lamb, N=N)
initial_states = patient_zeros_states(N, N_patient_zero)
# random x_pos, y_pos
x_pos = np.random.rand(N)
y_pos = np.random.rand(N)
model = EpidemicModel(initial_states=initial_states, x_pos=x_pos, y_pos=y_pos)
recover_probas = mu*np.ones(N)
model.time_evolution(recover_probas, transmissions, print_every=100)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.