hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e60bf0fffa5d03c3b96086eba8b1615a72a54c3f
| 15,002
|
py
|
Python
|
fhir/resources/DSTU2/careplan.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/DSTU2/careplan.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/DSTU2/careplan.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/CarePlan
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import fhirtypes
from .backboneelement import BackboneElement
from .domainresource import DomainResource
class CarePlan(DomainResource):
"""Healthcare plan for patient or group.
Describes the intention of how one or more practitioners intend to deliver
care for a particular patient, group or community for a period of time,
possibly limited to care for a specific condition or set of conditions.
"""
resource_type = Field("CarePlan", const=True)
activity: ListType[fhirtypes.CarePlanActivityType] = Field(
None,
alias="activity",
title="List of `CarePlanActivity` items (represented as `dict` in JSON)",
description="Action to occur as part of plan",
)
addresses: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="addresses",
title=(
"List of `Reference` items referencing `Condition` (represented as "
"`dict` in JSON)"
),
description="Health issues this plan addresses",
)
author: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="author",
title=(
"List of `Reference` items referencing `Patient, Practitioner, "
"RelatedPerson, Organization, CareTeam` (represented as `dict` in JSON)"
),
description="Who is responsible for contents of the plan",
)
category: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="category",
title="List of `CodeableConcept` items (represented as `dict` in JSON)",
description="Type of plan",
)
context: fhirtypes.ReferenceType = Field(
None,
alias="context",
title=(
"Type `Reference` referencing `Encounter, EpisodeOfCare` (represented "
"as `dict` in JSON)"
),
description="Created in context of",
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `String` (represented as `dict` in JSON)",
description="Summary of nature of plan",
)
goal: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="goal",
title=(
"List of `Reference` items referencing `Goal` (represented as `dict` in"
" JSON)"
),
description="Desired outcome of plan",
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of `Identifier` items (represented as `dict` in JSON)",
description="External Ids for this plan",
)
note: fhirtypes.AnnotationType = Field(
None,
alias="note",
title="Type `Annotation` items (represented as `dict` in JSON)",
description="Comments about the plan",
)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Type `Period` (represented as `dict` in JSON)",
description="Time period plan covers",
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="Type `Code` (represented as `dict` in JSON)",
description=(
"draft | active | suspended | completed | entered-in-error | cancelled "
"| unknown"
),
)
subject: fhirtypes.ReferenceType = Field(
None,
alias="subject",
title=(
"Type `Reference` referencing `Patient, Group` (represented as `dict` "
"in JSON)"
),
description="Who care plan is for",
)
support: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="support",
title=(
"List of `Reference` items referencing `Resource` (represented as "
"`dict` in JSON)"
),
description="Information considered as part of plan",
)
modified: fhirtypes.DateTime = Field(
None,
alias="title",
title="Type `DateTime` (represented as `dict` in JSON)",
description="When last updated",
)
participant: ListType[fhirtypes.CarePlanParticipantType] = Field(
None,
alias="participant",
title="List of `CarePlanParticipant` items (represented as `dict` in JSON).",
description="Who's involved in plan?.",
)
relatedPlan: ListType[fhirtypes.CarePlanRelatedPlanType] = Field(
None,
alias="relatedPlan",
title="Plans related to this one.",
description="List of `CarePlanRelatedPlan` items (represented as `dict` in JSON).",
)
class CarePlanActivity(BackboneElement):
"""Action to occur as part of plan.
Identifies a planned action to occur as part of the plan. For example, a
medication to be used, lab tests to perform, self-monitoring, education,
etc.
"""
resource_type = Field("CarePlanActivity", const=True)
actionResulting: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="actionResulting",
title=(
"List of `Reference` items referencing `Resource` "
"(represented as `dict` in JSON)."
),
description="Appointments, orders, etc..",
)
detail: fhirtypes.CarePlanActivityDetailType = Field(
None,
alias="detail",
title="Type `CarePlanActivityDetail` (represented as `dict` in JSON).",
description="In-line definition of activity.",
)
progress: ListType[fhirtypes.AnnotationType] = Field(
None,
alias="progress",
title="List of `Annotation` items (represented as `dict` in JSON).",
description="Comments about the activity status/progress.",
)
reference: fhirtypes.ReferenceType = Field(
None,
alias="reference",
title=(
"Type `Reference` referencing `Appointment, CommunicationRequest, "
"DeviceUseRequest, DiagnosticOrder, MedicationOrder, "
"NutritionOrder, Order, ProcedureRequest, ProcessRequest, "
"ReferralRequest, SupplyRequest, VisionPrescription` "
"(represented as `dict` in JSON)."
),
description="Activity details defined in specific resource.",
)
class CarePlanActivityDetail(BackboneElement):
"""In-line definition of activity.
A simple summary of a planned activity suitable for a general care plan
system (e.g. form driven) that doesn't know about specific resources such
as procedure etc.
"""
resource_type = Field("CarePlanActivityDetail", const=True)
category: fhirtypes.CodeableConceptType = Field(
None,
alias="category",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="diet | drug | encounter | observation | procedure | supply | other.",
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Detail type of activity.",
)
dailyAmount: fhirtypes.QuantityType = Field(
None,
alias="dailyAmount",
title="Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in JSON).",
description="How to consume/day?.",
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `str`.",
description="Extra info describing activity to perform.",
)
goal: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="goal",
title="List of `Reference` items referencing `Goal` (represented as `dict` in JSON).",
description="Goals this activity relates to.",
)
location: fhirtypes.ReferenceType = Field(
None,
alias="location",
title="Type `Reference` referencing `Location` (represented as `dict` in JSON).",
description="Where it should happen.",
)
performer: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="performer",
title=(
"List of `Reference` items referencing `Practitioner,"
" Organization, RelatedPerson, Patient` "
"(represented as `dict` in JSON)."
),
description="Who will be responsible?.",
)
productCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="productCodeableConcept",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="What is to be administered/supplied.",
one_of_many="product", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
productReference: fhirtypes.ReferenceType = Field(
None,
alias="productReference",
title=(
"Type `Reference` referencing `Medication, Substance`"
" (represented as `dict` in JSON)."
),
description="What is to be administered/supplied.",
one_of_many="product", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
prohibited: fhirtypes.Boolean = Field(
None, alias="prohibited", title="Type `bool`.", description="Do NOT do."
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title=(
"Type `Quantity` referencing `SimpleQuantity` "
"(represented as `dict` in JSON)."
),
description="How much to administer/supply/consume.",
)
reasonCode: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonCode",
title="List of `CodeableConcept` items (represented as `dict` in JSON).",
description="Why activity should be done.",
)
reasonReference: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="reasonReference",
title=(
"List of `Reference` items referencing `Condition` "
"(represented as `dict` in JSON)."
),
description="Condition triggering need for activity.",
)
scheduledPeriod: fhirtypes.PeriodType = Field(
None,
alias="scheduledPeriod",
title="Type `Period` (represented as `dict` in JSON).",
description="When activity is to occur.",
one_of_many="scheduled", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
scheduledString: fhirtypes.String = Field(
None,
alias="scheduledString",
title="Type `str`.",
description="When activity is to occur.",
one_of_many="scheduled", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
scheduledTiming: fhirtypes.TimingType = Field(
None,
alias="scheduledTiming",
title="Type `Timing` (represented as `dict` in JSON).",
description="When activity is to occur.",
one_of_many="scheduled", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="Type `str`.",
description="not-started | scheduled | in-progress | on-hold | completed | cancelled.",
)
statusReason: fhirtypes.CodeableConceptType = Field(
None,
alias="statusReason",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Reason for current status.",
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"scheduled": ["scheduledPeriod", "scheduledString", "scheduledTiming"],
"product": ["productCodeableConcept", "productReference"],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class CarePlanParticipant(BackboneElement):
"""Who's involved in plan?.
Identifies all people and organizations who are expected to be involved in
the care envisioned by this plan.
"""
resource_type = Field("CarePlanParticipant", const=True)
member: fhirtypes.ReferenceType = Field(
None,
alias="member",
title=(
"Type `Reference` referencing `Practitioner, RelatedPerson,"
" Patient, Organization` (represented as `dict` in JSON)."
),
description="Who is involved.",
)
role: fhirtypes.CodeableConceptType = Field(
None,
alias="role",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Type of involvement.",
)
class CarePlanRelatedPlan(BackboneElement):
"""Plans related to this one.
Identifies CarePlans with some sort of formal relationship to the current
plan.
"""
resource_type = Field("CarePlanRelatedPlan", const=True)
code: fhirtypes.Code = Field(
None,
alias="code",
title="Type `str`.",
description="includes | replaces | fulfills.",
)
plan: fhirtypes.ReferenceType = Field(
...,
alias="plan",
title="Plan relationship exists with.",
description="Type `Reference` referencing `CarePlan` (represented as `dict` in JSON).",
)
| 33.190265
| 95
| 0.611318
| 14,630
| 0.975203
| 0
| 0
| 1,927
| 0.12845
| 0
| 0
| 7,241
| 0.482669
|
e60f46b99d8c8cc0e5235381e1f3fff24068c1ab
| 19,993
|
py
|
Python
|
cogen/core/sockets.py
|
ionelmc/cogen
|
83b0edb88425eba6e5bfda9f1dcd34642517e2a8
|
[
"MIT"
] | 6
|
2016-01-22T09:42:45.000Z
|
2020-11-28T14:00:25.000Z
|
cogen/core/sockets.py
|
ionelmc/cogen
|
83b0edb88425eba6e5bfda9f1dcd34642517e2a8
|
[
"MIT"
] | 1
|
2017-02-16T15:20:11.000Z
|
2017-02-16T22:50:41.000Z
|
cogen/core/sockets.py
|
ionelmc/python-cogen
|
83b0edb88425eba6e5bfda9f1dcd34642517e2a8
|
[
"MIT"
] | null | null | null |
"""
Socket-only coroutine operations and `Socket` wrapper.
Really - the only thing you need to know for most stuff is
the :class:`~cogen.core.sockets.Socket` class.
"""
#TODO: how to deal with requets that have unicode params
__all__ = [
'getdefaulttimeout', 'setdefaulttimeout', 'Socket', 'SendFile', 'Recv',
'Send', 'SendAll','Accept','Connect',
'SocketOperation', 'SocketError', 'ConnectionClosed'
]
from socket import socket as stdsocket, AF_INET, SOCK_STREAM
import events
from coroutines import coro
_TIMEOUT = None
class SocketError(Exception):
"Raised when a socket has a error flag (in epoll or select)"
class ConnectionClosed(SocketError):
"Raised when the other peer has closed connection."
def getdefaulttimeout():
return _TIMEOUT
def setdefaulttimeout(timeout):
"""Set the default timeout used by the socket wrapper
(`Socket <cogen.core.sockets.Socket.html>`_ class)"""
global _TIMEOUT
_TIMEOUT = timeout
class Socket(object):
"""
A wrapper for socket objects, sets nonblocking mode and
adds some internal bufers and wrappers. Regular calls to the usual
socket methods return operations for use in a coroutine.
So you use this in a coroutine like:
.. sourcecode:: python
sock = Socket(family, type, proto) # just like the builtin socket module
yield sock.read(1024)
Constructor details:
.. sourcecode:: python
Socket([family[, type[, proto]]]) -> socket object
Open a socket of the given type. The family argument specifies the
address family; it defaults to AF_INET. The type argument specifies
whether this is a stream (SOCK_STREAM, this is the default)
or datagram (SOCK_DGRAM) socket. The protocol argument defaults to 0,
specifying the default protocol. Keyword arguments are accepted.
A socket object represents one endpoint of a network connection.
"""
__slots__ = ('_fd', '_timeout', '_proactor_added')
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0,
_timeout=None, _sock=None, _proactor_added=False):
self._fd = _sock or stdsocket(family, type, proto)
self._fd.setblocking(0)
self._timeout = _timeout or _TIMEOUT
self._proactor_added = _proactor_added
def recv(self, bufsize, **kws):
"""Receive data from the socket. The return value is a string
representing the data received. The amount of data may be less than the
ammount specified by _bufsize_. """
return Recv(self, bufsize, timeout=self._timeout, **kws)
def makefile(self, mode='r', bufsize=-1):
"""
Returns a special fileobject that has corutines instead of the usual
read/readline/write methods. Will work in the same manner though.
"""
return _fileobject(self, mode, bufsize)
def send(self, data, **kws):
"""Send data to the socket. The socket must be connected to a remote
socket. Ammount sent may be less than the data provided."""
return Send(self, data, timeout=self._timeout, **kws)
def sendall(self, data, **kws):
"""Send data to the socket. The socket must be connected to a remote
socket. All the data is guaranteed to be sent."""
return SendAll(self, data, timeout=self._timeout, **kws)
def accept(self, **kws):
"""Accept a connection. The socket must be bound to an address and
listening for connections. The return value is a pair (conn, address)
where conn is a new socket object usable to send and receive data on the
connection, and address is the address bound to the socket on the other
end of the connection.
Example:
{{{
conn, address = yield mysock.accept()
}}}
"""
return Accept(self, timeout=self._timeout, **kws)
def close(self):
"""Close the socket. All future operations on the socket object will
fail. The remote end will receive no more data (after queued data is
flushed). Sockets are automatically closed when they are garbage-collected.
"""
self._fd.close()
def bind(self, *args):
"""Bind the socket to _address_. The socket must not already be bound.
(The format of _address_ depends on the address family)
"""
return self._fd.bind(*args)
def connect(self, address, **kws):
"""Connect to a remote socket at _address_. """
return Connect(self, address, timeout=self._timeout, **kws)
def fileno(self):
"""Return the socket's file descriptor """
return self._fd.fileno()
def listen(self, backlog):
"""Listen for connections made to the socket. The _backlog_ argument
specifies the maximum number of queued connections and should be at
least 1; the maximum value is system-dependent (usually 5).
"""
return self._fd.listen(backlog)
def getpeername(self):
"""Return the remote address to which the socket is connected."""
return self._fd.getpeername()
def getsockname(self):
"""Return the socket's own address. """
return self._fd.getsockname()
def settimeout(self, to):
"""Set a timeout on blocking socket operations. The value argument can
be a nonnegative float expressing seconds, timedelta or None.
"""
self._timeout = to
def gettimeout(self):
"""Return the associated timeout value. """
return self._timeout
def shutdown(self, *args):
"""Shut down one or both halves of the connection. Same as the usual
socket method."""
return self._fd.shutdown(*args)
def setblocking(self, val):
if val:
raise RuntimeError("You can't.")
def setsockopt(self, *args):
"""Set the value of the given socket option. Same as the usual socket
method."""
self._fd.setsockopt(*args)
def sendfile(self, file_handle, offset=None, length=None, blocksize=4096, **kws):
return SendFile(file_handle, self, offset, length, blocksize, **kws)
def __repr__(self):
return '<socket at 0x%X>' % id(self)
def __str__(self):
return 'sock@0x%X' % id(self)
class SocketOperation(events.TimedOperation):
"""
This is a generic class for a operation that involves some socket call.
A socket operation should subclass WriteOperation or ReadOperation, define a
`run` method and call the __init__ method of the superclass.
"""
__slots__ = (
'sock', 'last_update', 'coro', 'flags'
)
def __init__(self, sock, **kws):
"""
All the socket operations have these generic properties that the
poller and scheduler interprets:
* timeout - the ammout of time in seconds or timedelta, or the datetime
value till the poller should wait for this operation.
* weak_timeout - if this is True the timeout handling code will take
into account the time of last activity (that would be the time of last
`try_run` call)
* prio - a flag for the scheduler
"""
assert isinstance(sock, Socket)
super(SocketOperation, self).__init__(**kws)
self.sock = sock
def fileno(self):
return self.sock._fd.fileno()
def cleanup(self, sched, coro):
super(SocketOperation, self).cleanup(sched, coro)
return sched.proactor.remove_token(self)
class SendFile(SocketOperation):
"""
Uses underling OS sendfile (or equivalent) call or a regular memory copy
operation if there is no sendfile.
You can use this as a WriteAll if you specify the length.
Usage::
yield sockets.SendFile(file_object, socket_object, 0)
# will send till send operations return 0
yield sockets.SendFile(file_object, socket_object, 0, blocksize=0)
# there will be only one send operation (if successfull)
# that meas the whole file will be read in memory if there is
#no sendfile
yield sockets.SendFile(file_object, socket_object, 0, file_size)
# this will hang if we can't read file_size bytes
#from the file
"""
__slots__ = (
'sent', 'file_handle', 'offset',
'position', 'length', 'blocksize'
)
def __init__(self, file_handle, sock, offset=None, length=None, blocksize=4096, **kws):
super(SendFile, self).__init__(sock, **kws)
self.file_handle = file_handle
self.offset = self.position = offset or file_handle.tell()
self.length = length
self.sent = 0
self.blocksize = blocksize
def process(self, sched, coro):
super(SendFile, self).process(sched, coro)
return sched.proactor.request_sendfile(self, coro)
def finalize(self, sched):
super(SendFile, self).finalize(sched)
return self.sent
class Recv(SocketOperation):
"""
Example usage:
.. sourcecode:: python
yield sockets.Read(socket_object, buffer_length)
`buffer_length` is max read size, BUT, if if there are buffers from ReadLine
return them first.
"""
__slots__ = ('buff', 'len')
def __init__(self, sock, len = 4096, **kws):
super(Recv, self).__init__(sock, **kws)
self.len = len
self.buff = None
def process(self, sched, coro):
super(Recv, self).process(sched, coro)
return sched.proactor.request_recv(self, coro)
def finalize(self, sched):
super(Recv, self).finalize(sched)
return self.buff
class Send(SocketOperation):
"""
Write the buffer to the socket and return the number of bytes written.
"""
__slots__ = ('sent', 'buff')
def __init__(self, sock, buff, **kws):
super(Send, self).__init__(sock, **kws)
self.buff = str(buff)
self.sent = 0
def process(self, sched, coro):
super(Send, self).process(sched, coro)
return sched.proactor.request_send(self, coro)
def finalize(self, sched):
super(Send, self).finalize(sched)
return self.sent
class SendAll(SocketOperation):
"""
Run this operation till all the bytes have been written.
"""
__slots__ = ('sent', 'buff')
def __init__(self, sock, buff, **kws):
super(SendAll, self).__init__(sock, **kws)
self.buff = str(buff)
self.sent = 0
def process(self, sched, coro):
super(SendAll, self).process(sched, coro)
return sched.proactor.request_sendall(self, coro)
def finalize(self, sched):
super(SendAll, self).finalize(sched)
return self.sent
class Accept(SocketOperation):
"""
Returns a (conn, addr) tuple when the operation completes.
"""
__slots__ = ('conn', 'addr', 'cbuff')
def __init__(self, sock, **kws):
super(Accept, self).__init__(sock, **kws)
self.conn = None
def process(self, sched, coro):
super(Accept, self).process(sched, coro)
return sched.proactor.request_accept(self, coro)
def finalize(self, sched):
super(Accept, self).finalize(sched)
return (self.conn, self.addr)
def __repr__(self):
return "<%s at 0x%X %s conn:%r to:%s>" % (
self.__class__.__name__,
id(self),
self.sock,
self.conn,
self.timeout
)
class Connect(SocketOperation):
"""
"""
__slots__ = ('addr', 'conn', 'connect_attempted')
def __init__(self, sock, addr, **kws):
"""
Connect to the given `addr` using `sock`.
"""
super(Connect, self).__init__(sock, **kws)
self.addr = addr
self.connect_attempted = False
def process(self, sched, coro):
super(Connect, self).process(sched, coro)
return sched.proactor.request_connect(self, coro)
def finalize(self, sched):
super(Connect, self).finalize(sched)
return self.sock
@coro
def RecvAll(sock, length, **k):
recvd = 0
data = []
while recvd < length:
chunk = (yield Recv(sock, length-recvd, **k))
recvd += len(chunk)
data.append(chunk)
assert recvd == length
raise StopIteration(''.join(data))
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ("mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
"_close")
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
@coro
def close(self, **kws):
try:
if self._sock:
yield self.flush(**kws)
finally:
if self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
@coro
def flush(self, **kws):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
yield self._sock.sendall(buffer, **kws)
def fileno(self):
return self._sock.fileno()
@coro
def write(self, data, **kws):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
yield self.flush(**kws)
@coro
def writelines(self, list, **kws):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
yield self.flush(**kws)
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
#~ from cogen.core.coroutines import debug_coro
#~ @debug_coro
@coro
def read(self, size=-1, **kws):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = (yield self._sock.recv(recv_size, **kws))
if not data:
break
buffers.append(data)
raise StopIteration("".join(buffers))
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
raise StopIteration(data[:size])
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = (yield self._sock.recv(recv_size, **kws))
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
raise StopIteration("".join(buffers))
#~ from coroutines import debug_coro
#~ @debug_coro
@coro
def readline(self, size=-1, **kws):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = (yield recv(1, **kws))
if not data:
break
buffers.append(data)
raise StopIteration("".join(buffers))
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
raise StopIteration(data[:nl])
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = (yield self._sock.recv(self._rbufsize, **kws))
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
raise StopIteration("".join(buffers))
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
raise StopIteration(data[:nl])
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
raise StopIteration(data[:size])
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = (yield self._sock.recv(self._rbufsize, **kws))
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
raise StopIteration("".join(buffers))
@coro
def readlines(self, sizehint=0, **kws):
total = 0
list = []
while True:
line = (yield self.readline(**kws))
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
raise StopIteration(list)
| 32.991749
| 92
| 0.553644
| 18,845
| 0.94258
| 5,819
| 0.291052
| 5,903
| 0.295253
| 0
| 0
| 6,987
| 0.349472
|
e6129fa4f147cc4a7f5689db1f136f1e4febaef9
| 399
|
py
|
Python
|
observatory/views/side.py
|
spookey/observatory
|
be5cc92f53f12e6341e7e3040f26360e54cfdf7d
|
[
"MIT"
] | null | null | null |
observatory/views/side.py
|
spookey/observatory
|
be5cc92f53f12e6341e7e3040f26360e54cfdf7d
|
[
"MIT"
] | 1
|
2020-03-28T09:51:56.000Z
|
2020-03-28T09:51:56.000Z
|
observatory/views/side.py
|
spookey/dz_stats_page
|
be5cc92f53f12e6341e7e3040f26360e54cfdf7d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, current_app, send_from_directory
BLUEPRINT_SIDE = Blueprint('side', __name__)
@BLUEPRINT_SIDE.route('/logo.png', endpoint='logo')
@BLUEPRINT_SIDE.route('/favicon.ico')
@BLUEPRINT_SIDE.route('/favicon.png')
def favicon():
return send_from_directory(
current_app.static_folder,
current_app.config.get('FAVICON'),
mimetype='image/png',
)
| 26.6
| 61
| 0.719298
| 0
| 0
| 0
| 0
| 288
| 0.721805
| 0
| 0
| 71
| 0.177945
|
e615a7a5295760430baa3da38fd3109a777ed978
| 226
|
py
|
Python
|
TestDictComprehension.py
|
prakharindoria/Python-Code-Snippets
|
96887caa73c92fda7ebae0b59a72cf24c97a94f5
|
[
"MIT"
] | null | null | null |
TestDictComprehension.py
|
prakharindoria/Python-Code-Snippets
|
96887caa73c92fda7ebae0b59a72cf24c97a94f5
|
[
"MIT"
] | null | null | null |
TestDictComprehension.py
|
prakharindoria/Python-Code-Snippets
|
96887caa73c92fda7ebae0b59a72cf24c97a94f5
|
[
"MIT"
] | null | null | null |
t=input("Type a string")
s=t.lower()
alll="abcdefghijklmnopqrstuvwxyz1234567890"
for c in alll:
d={c:s.count(c) for c in s}
dt=sorted(d.items(), key=lambda x: x[1], reverse=True)
for k,v in dt.items():
print(k,":",v)
| 25.111111
| 54
| 0.654867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.252212
|
e6170d2f693f08c4d21d708512f6fdcb2389cdda
| 1,183
|
py
|
Python
|
scripts/update_readme_metrics.py
|
ygormutti/executor-exporter
|
4b985fdf03cbf0515b912aa9631c3c8f0c81a461
|
[
"Apache-2.0"
] | 1
|
2022-02-24T02:21:14.000Z
|
2022-02-24T02:21:14.000Z
|
scripts/update_readme_metrics.py
|
ygormutti/executor-exporter
|
4b985fdf03cbf0515b912aa9631c3c8f0c81a461
|
[
"Apache-2.0"
] | null | null | null |
scripts/update_readme_metrics.py
|
ygormutti/executor-exporter
|
4b985fdf03cbf0515b912aa9631c3c8f0c81a461
|
[
"Apache-2.0"
] | null | null | null |
from sys import argv
from executor_exporter.exporter import metrics
def update_readme_metrics(readme_path):
columns = ("Name", "Type", "Labels", "Description")
sep = " | "
table_lines = [sep.join(columns), sep.join(["---"] * len(columns))]
for metric in metrics:
table_lines.append(
sep.join(
(
metric._name,
metric._type,
", ".join(metric._labelnames),
metric._documentation,
)
)
)
readme_lines = []
with open(readme_path) as readme_file:
for lineno, line in enumerate(readme_file.readlines()):
if "metrics:begin" in line:
begin = lineno
elif "metrics:end" in line:
end = lineno
readme_lines.append(line)
readme_lines = [
*readme_lines[: begin + 1],
"\n".join(table_lines) + "\n",
*readme_lines[end:],
]
with open(readme_path, "w") as readme_file:
readme_file.writelines(readme_lines)
if __name__ == "__main__":
readme_path = argv[1]
update_readme_metrics(readme_path)
| 26.288889
| 71
| 0.540997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.08115
|
e61732e97bfa8fdbdf35bca08255c56c7737afe4
| 1,717
|
py
|
Python
|
accounts/migrations/0002_auto_20160226_1548.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 15
|
2015-02-10T21:16:31.000Z
|
2021-03-25T16:46:20.000Z
|
accounts/migrations/0002_auto_20160226_1548.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 4
|
2021-02-10T15:42:08.000Z
|
2022-03-11T23:20:38.000Z
|
accounts/migrations/0002_auto_20160226_1548.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 6
|
2016-07-10T14:20:48.000Z
|
2022-01-19T18:34:02.000Z
|
import accounts.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='hierarchicuser',
managers=[
('objects', accounts.models.HierarchicUserManager()),
],
),
migrations.AlterField(
model_name='hierarchicuser',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='email address'),
),
migrations.AlterField(
model_name='hierarchicuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='hierarchicuser',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AlterField(
model_name='hierarchicuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| 42.925
| 409
| 0.630169
| 1,620
| 0.943506
| 0
| 0
| 0
| 0
| 0
| 0
| 554
| 0.322656
|
e6184b491c0b4ec3b27d15dd525a54b420ab6962
| 109
|
py
|
Python
|
tests/python-reference/dict/dict-bool.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 25
|
2015-04-16T04:31:49.000Z
|
2022-03-10T15:53:28.000Z
|
tests/python-reference/dict/dict-bool.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2018-11-21T22:40:02.000Z
|
2018-11-26T17:53:11.000Z
|
tests/python-reference/dict/dict-bool.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2021-03-26T03:36:19.000Z
|
2021-03-26T03:36:19.000Z
|
___assertIs(not {}, True)
___assertTrue({1: 2})
___assertIs(bool({}), False)
___assertIs(bool({1: 2}), True)
| 21.8
| 31
| 0.678899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e618960b87b4b729b558ed9c9d5f90f4d0854b6a
| 8,575
|
py
|
Python
|
danceschool/private_lessons/forms.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 32
|
2017-09-12T04:25:25.000Z
|
2022-03-21T10:48:07.000Z
|
danceschool/private_lessons/forms.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 97
|
2017-09-01T02:43:08.000Z
|
2022-01-03T18:20:34.000Z
|
danceschool/private_lessons/forms.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 19
|
2017-09-26T13:34:46.000Z
|
2022-03-21T10:48:10.000Z
|
from django import forms
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.core.exceptions import ValidationError
from datetime import datetime, timedelta
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit
from danceschool.core.constants import getConstant
from danceschool.core.models import DanceRole, Location, Room, Instructor, PricingTier
from danceschool.core.utils.timezone import ensure_localtime
from danceschool.core.forms import LocationWithDataWidget
from .models import InstructorAvailabilitySlot
def get_duration_choices():
return [(x, x) for x in range(
getConstant('privateLessons__minimumLessonLength'),
getConstant('privateLessons__maximumLessonLength') + 1,
getConstant('privateLessons__lessonLengthInterval'),
)]
def get_default_duration():
return getConstant('privateLessons__defaultLessonLength')
class SlotBookingForm(forms.Form):
slotId = forms.IntegerField(required=True, widget=forms.HiddenInput)
duration = forms.ChoiceField(
label=_('Duration'), choices=get_duration_choices,
initial=get_default_duration
)
role = forms.ModelChoiceField(label=_('Dance role'), queryset=DanceRole.objects.all())
participants = forms.IntegerField(
label=_('Expected # Participants'), initial=1, min_value=1,
help_text=_('Be advised that group lessons may be charged a different rate.')
)
comments = forms.CharField(
label=_('Comments/Notes'), required=False,
help_text=_(
'Please enter any comments or notes that you would like to be ' +
'provided to the instructor before the lesson, such as the topics ' +
'on which you may want to focus.'
)
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
# Initialize the default form
super().__init__(*args, **kwargs)
# Allow users with appropriate permissions to process door registrations.
if user and user.has_perm('core.accept_door_payments'):
self.fields['payAtDoor'] = forms.BooleanField(
required=False, label=_('Door/Invoice Registration')
)
class SlotCreationForm(forms.Form):
instructorId = forms.ModelChoiceField(
label=_('Instructor'), queryset=Instructor.objects.all(),
widget=forms.HiddenInput, required=True
)
startDate = forms.DateField(label=_('Start date'), required=True, widget=forms.HiddenInput)
endDate = forms.DateField(label=_('End date'), required=True, widget=forms.HiddenInput)
startTime = forms.TimeField(
label=_('Start time'), required=True,
input_formats=(
getattr(settings, 'TIME_INPUT_FORMATS', []) +
['%I:%M %p', '%-I:%M %p', '%I:%M%p', '%-I:%M%p']
)
)
endTime = forms.TimeField(
label=_('End time'), required=True,
input_formats=(
getattr(settings, 'TIME_INPUT_FORMATS', []) +
['%I:%M %p', '%-I:%M %p', '%I:%M%p', '%-I:%M%p']
),
)
location = forms.ModelChoiceField(
label=_('Location'),
queryset=Location.objects.exclude(status=Location.StatusChoices.former),
required=False, widget=LocationWithDataWidget
)
room = forms.ModelChoiceField(
label=_('Room'),
queryset=Room.objects.exclude(location__status=Location.StatusChoices.former),
required=False
)
pricingTier = forms.ModelChoiceField(
label=_('Pricing Tier'), queryset=PricingTier.objects.filter(expired=False),
required=False,
help_text=_(
'A pricing tier is required for online registration and payment. ' +
'If your school handles scheduling, but not payment for lessons, ' +
'then leave this blank.'
)
)
status = forms.ChoiceField(
label=_('Initial Status'), required=True,
choices=InstructorAvailabilitySlot.SlotStatus.choices,
initial=InstructorAvailabilitySlot.SlotStatus.available
)
def clean(self):
'''
Only allow submission if there are not already slots in the submitted window,
and only allow rooms associated with the chosen location.
'''
super().clean()
startDate = self.cleaned_data.get('startDate')
endDate = self.cleaned_data.get('endDate')
startTime = self.cleaned_data.get('startTime')
endTime = self.cleaned_data.get('endTime')
instructor = self.cleaned_data.get('instructorId')
existingSlots = InstructorAvailabilitySlot.objects.filter(
instructor=instructor,
startTime__gt=(
ensure_localtime(datetime.combine(startDate, startTime)) -
timedelta(minutes=getConstant('privateLessons__lessonLengthInterval'))
),
startTime__lt=ensure_localtime(datetime.combine(endDate, endTime)),
)
if existingSlots.exists():
raise ValidationError(
_('Newly created slots cannot overlap existing slots for this instructor.'),
code='invalid'
)
class SlotUpdateForm(forms.Form):
slotIds = forms.ModelMultipleChoiceField(
required=True, widget=forms.MultipleHiddenInput,
queryset=InstructorAvailabilitySlot.objects.all()
)
updateStatus = forms.ChoiceField(
label=_('Update Status'), required=True,
choices=InstructorAvailabilitySlot.SlotStatus.choices,
initial=InstructorAvailabilitySlot.SlotStatus.available
)
updateLocation = forms.ModelChoiceField(
label=_('Update Location'),
queryset=Location.objects.exclude(status=Location.StatusChoices.former),
required=False, widget=LocationWithDataWidget
)
updateRoom = forms.ModelChoiceField(
label=_('Room'),
queryset=Room.objects.exclude(location__status=Location.StatusChoices.former),
required=False
)
updatePricing = forms.ModelChoiceField(
label=_('Update pricing'), queryset=PricingTier.objects.filter(expired=False), required=False,
help_text=_(
'A pricing tier is required for online registration and payment. ' +
'If your school handles scheduling, but not payment for lessons, ' +
'then leave this blank.'
)
)
deleteSlot = forms.BooleanField(
label=_('Delete slot'), initial=False,
help_text=_('Note that only slots with no current bookings may be deleted at this time.'),
required=False
)
class PrivateLessonStudentInfoForm(forms.Form):
'''
This is the form customers use to fill out their contact info
for private lessons that don't involve online payment only.
'''
firstName = forms.CharField(label=_('First Name'))
lastName = forms.CharField(label=_('Last Name'))
email = forms.EmailField()
phone = forms.CharField(
required=False, label=_('Telephone (optional)'),
help_text=_('We may use this to notify you in event of a cancellation.')
)
agreeToPolicies = forms.BooleanField(
required=True,
label=_('<strong>I agree to all policies (required)</strong>'),
help_text=_('By checking, you agree to abide by all policies.')
)
def __init__(self, *args, **kwargs):
self._request = kwargs.pop('request', None)
user = getattr(self._request, 'user', None)
payAtDoor = kwargs.pop('payAtDoor', False)
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_tag = False # Our template must explicitly include the <form tag>
if user and hasattr(user, 'customer') and user.customer and not payAtDoor:
# Input existing info for users who are logged in and have signed up before
self.fields['firstName'].initial = user.customer.first_name or user.first_name
self.fields['lastName'].initial = user.customer.last_name or user.last_name
self.fields['email'].initial = user.customer.email or user.email
self.fields['phone'].initial = user.customer.phone
self.helper.layout = Layout(
Div('firstName', 'lastName', 'email', css_class='form-inline'),
Div('phone', css_class='form-inline'),
Div('agreeToPolicies', css_class='card card-body bg-light'),
Submit('submit', _('Complete Registration'))
)
| 38.977273
| 102
| 0.661574
| 7,607
| 0.887114
| 0
| 0
| 0
| 0
| 0
| 0
| 2,316
| 0.270087
|
e61a46397ce546d99911d246529ba90ca1cf69a8
| 32,292
|
py
|
Python
|
engine/modules.py
|
scofield7419/HeSyFu
|
cc06a644918d65aa898f65348077f3d9a3e5252b
|
[
"Apache-2.0"
] | 1
|
2021-11-04T02:31:39.000Z
|
2021-11-04T02:31:39.000Z
|
engine/modules.py
|
scofield7419/HeSyFu
|
cc06a644918d65aa898f65348077f3d9a3e5252b
|
[
"Apache-2.0"
] | null | null | null |
engine/modules.py
|
scofield7419/HeSyFu
|
cc06a644918d65aa898f65348077f3d9a3e5252b
|
[
"Apache-2.0"
] | 2
|
2021-12-28T03:38:37.000Z
|
2021-12-29T12:56:41.000Z
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from typing import List, Tuple, Dict
import numpy as np
from torch.autograd import Variable
class DepGCN(nn.Module):
"""
Label-aware Dependency Convolutional Neural Network Layer
"""
def __init__(self, dep_num, dep_dim, in_features, out_features):
super(DepGCN, self).__init__()
self.dep_dim = dep_dim
self.in_features = in_features
self.out_features = out_features
self.dep_embedding = nn.Embedding(dep_num, dep_dim, padding_idx=0)
self.dep_attn = nn.Linear(dep_dim + in_features, out_features)
self.dep_fc = nn.Linear(dep_dim, out_features)
self.relu = nn.ReLU()
def forward(self, text, dep_mat, dep_labels):
dep_label_embed = self.dep_embedding(dep_labels)
batch_size, seq_len, feat_dim = text.shape
val_us = text.unsqueeze(dim=2)
val_us = val_us.repeat(1, 1, seq_len, 1)
val_sum = torch.cat([val_us, dep_label_embed], dim=-1)
r = self.dep_attn(val_sum)
p = torch.sum(r, dim=-1)
mask = (dep_mat == 0).float() * (-1e30)
p = p + mask
p = torch.softmax(p, dim=2)
p_us = p.unsqueeze(3).repeat(1, 1, 1, feat_dim)
output = val_us + self.dep_fc(dep_label_embed)
output = torch.mul(p_us, output)
output_sum = torch.sum(output, dim=2)
output_sum = self.relu(output_sum)
return output_sum
class ConstGCN(nn.Module):
"""
Label-aware Constituency Convolutional Neural Network Layer
"""
def __init__(
self,
num_inputs,
num_units,
num_labels,
dropout=0.0,
in_arcs=True,
out_arcs=True,
batch_first=False,
use_gates=True,
residual=False,
no_loop=False,
non_linearity="relu",
edge_dropout=0.0,
):
super(ConstGCN, self).__init__()
self.in_arcs = in_arcs
self.out_arcs = out_arcs
self.no_loop = no_loop
self.retain = 1.0 - edge_dropout
self.num_inputs = num_inputs
self.num_units = num_units
self.num_labels = num_labels
self.batch_first = batch_first
self.non_linearity = non_linearity
self.sigmoid = nn.Sigmoid()
self.use_gates = use_gates
self.residual = residual
self.dropout = nn.Dropout(p=dropout)
self.layernorm = nn.LayerNorm(num_units)
if in_arcs:
self.V_in = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.V_in)
self.b_in = Parameter(torch.Tensor(num_labels, self.num_units))
nn.init.constant_(self.b_in, 0)
if self.use_gates:
self.V_in_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.V_in_gate)
self.b_in_gate = Parameter(torch.Tensor(num_labels, 1))
nn.init.constant_(self.b_in_gate, 1)
if out_arcs:
# self.V_out = autograd.Variable(torch.FloatTensor(self.num_inputs, self.num_units))
self.V_out = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.V_out)
# self.b_out = autograd.Variable(torch.FloatTensor(num_labels, self.num_units))
self.b_out = Parameter(torch.Tensor(num_labels, self.num_units))
nn.init.constant_(self.b_out, 0)
if self.use_gates:
self.V_out_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.V_out_gate)
self.b_out_gate = Parameter(torch.Tensor(num_labels, 1))
nn.init.constant_(self.b_out_gate, 1)
if not self.no_loop:
self.W_self_loop = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.W_self_loop)
if self.use_gates:
self.W_self_loop_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.W_self_loop_gate)
def forward(
self,
src,
arc_tensor_in=None,
arc_tensor_out=None,
label_tensor_in=None,
label_tensor_out=None,
mask_in=None,
mask_out=None,
mask_loop=None,
sent_mask=None,
):
if not self.batch_first:
encoder_outputs = src.permute(1, 0, 2).contiguous()
else:
encoder_outputs = src.contiguous()
batch_size = encoder_outputs.size()[0]
seq_len = encoder_outputs.size()[1]
max_degree = 1
input_ = encoder_outputs.view(
(batch_size * seq_len, self.num_inputs)
) # [b* t, h]
input_ = self.dropout(input_)
if self.in_arcs:
input_in = torch.mm(input_, self.V_in) # [b* t, h] * [h,h] = [b*t, h]
first_in = input_in.index_select(
0, arc_tensor_in[0] * seq_len + arc_tensor_in[1]
) # [b* t* degr, h]
second_in = self.b_in.index_select(0, label_tensor_in[0]) # [b* t* degr, h]
in_ = first_in + second_in
degr = int(first_in.size()[0] / batch_size // seq_len)
in_ = in_.view((batch_size, seq_len, degr, self.num_units))
if self.use_gates:
# compute gate weights
input_in_gate = torch.mm(
input_, self.V_in_gate
) # [b* t, h] * [h,h] = [b*t, h]
first_in_gate = input_in_gate.index_select(
0, arc_tensor_in[0] * seq_len + arc_tensor_in[1]
) # [b* t* mxdeg, h]
second_in_gate = self.b_in_gate.index_select(0, label_tensor_in[0])
in_gate = (first_in_gate + second_in_gate).view(
(batch_size, seq_len, degr)
)
max_degree += degr
if self.out_arcs:
input_out = torch.mm(input_, self.V_out) # [b* t, h] * [h,h] = [b* t, h]
first_out = input_out.index_select(
0, arc_tensor_out[0] * seq_len + arc_tensor_out[1]
) # [b* t* mxdeg, h]
second_out = self.b_out.index_select(0, label_tensor_out[0])
degr = int(first_out.size()[0] / batch_size // seq_len)
max_degree += degr
out_ = (first_out + second_out).view(
(batch_size, seq_len, degr, self.num_units)
)
if self.use_gates:
# compute gate weights
input_out_gate = torch.mm(
input_, self.V_out_gate
) # [b* t, h] * [h,h] = [b* t, h]
first_out_gate = input_out_gate.index_select(
0, arc_tensor_out[0] * seq_len + arc_tensor_out[1]
) # [b* t* mxdeg, h]
second_out_gate = self.b_out_gate.index_select(0, label_tensor_out[0])
out_gate = (first_out_gate + second_out_gate).view(
(batch_size, seq_len, degr)
)
if self.no_loop:
if self.in_arcs and self.out_arcs:
potentials = torch.cat((in_, out_), dim=2) # [b, t, mxdeg, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, out_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_in, mask_out), dim=1) # [b* t, mxdeg]
elif self.out_arcs:
potentials = out_ # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = out_gate # [b, t, mxdeg, h]
mask_soft = mask_out # [b* t, mxdeg]
elif self.in_arcs:
potentials = in_ # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = in_gate # [b, t, mxdeg, h]
mask_soft = mask_in # [b* t, mxdeg]
max_degree -= 1
else:
same_input = torch.mm(input_, self.W_self_loop).view(
encoder_outputs.size(0), encoder_outputs.size(1), -1
)
same_input = same_input.view(
encoder_outputs.size(0),
encoder_outputs.size(1),
1,
self.W_self_loop.size(1),
)
if self.use_gates:
same_input_gate = torch.mm(input_, self.W_self_loop_gate).view(
encoder_outputs.size(0), encoder_outputs.size(1), -1
)
if self.in_arcs and self.out_arcs:
potentials = torch.cat(
(in_, out_, same_input), dim=2
) # [b, t, mxdeg, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, out_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat(
(mask_in, mask_out, mask_loop), dim=1
) # [b* t, mxdeg]
elif self.out_arcs:
potentials = torch.cat(
(out_, same_input), dim=2
) # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = torch.cat(
(out_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_out, mask_loop), dim=1) # [b* t, mxdeg]
elif self.in_arcs:
potentials = torch.cat(
(in_, same_input), dim=2
) # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_in, mask_loop), dim=1) # [b* t, mxdeg]
else:
potentials = same_input # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = same_input_gate # [b, t, mxdeg, h]
mask_soft = mask_loop # [b* t, mxdeg]
potentials_resh = potentials.view(
(batch_size * seq_len, max_degree, self.num_units)
) # [h, b * t, mxdeg]
if self.use_gates:
potentials_r = potentials_gate.view(
(batch_size * seq_len, max_degree)
) # [b * t, mxdeg]
probs_det_ = (self.sigmoid(potentials_r) * mask_soft).unsqueeze(
2
) # [b * t, mxdeg]
potentials_masked = potentials_resh * probs_det_ # [b * t, mxdeg,h]
else:
# NO Gates
potentials_masked = potentials_resh * mask_soft.unsqueeze(2)
if self.retain == 1 or not self.training:
pass
else:
mat_1 = torch.Tensor(mask_soft.data.size()).uniform_(0, 1)
ret = torch.Tensor([self.retain])
mat_2 = (mat_1 < ret).float()
drop_mask = Variable(mat_2, requires_grad=False)
if potentials_resh.is_cuda:
drop_mask = drop_mask.cuda()
potentials_masked *= drop_mask.unsqueeze(2)
potentials_masked_ = potentials_masked.sum(dim=1) # [b * t, h]
potentials_masked_ = self.layernorm(potentials_masked_) * sent_mask.view(
batch_size * seq_len
).unsqueeze(1)
potentials_masked_ = self.non_linearity(potentials_masked_) # [b * t, h]
result_ = potentials_masked_.view(
(batch_size, seq_len, self.num_units)
) # [ b, t, h]
result_ = result_ * sent_mask.unsqueeze(2) # [b, t, h]
memory_bank = result_ # [t, b, h]
if self.residual:
memory_bank += src
return memory_bank
class BilinearScorer(nn.Module):
def __init__(self, hidden_dim, role_vocab_size, dropout=0.0, gpu_id=-1):
super(BilinearScorer, self).__init__()
if gpu_id > -1:
self.use_gpu = True
else:
self.use_gpu = False
self.hidden_dim = hidden_dim
self.role_vocab_size = role_vocab_size
self.dropout = nn.Dropout(p=dropout)
self.U = Parameter(
torch.Tensor(self.hidden_dim, self.role_vocab_size, self.hidden_dim)
)
nn.init.orthogonal_(self.U)
self.bias1 = Parameter(torch.Tensor(1, self.hidden_dim * self.role_vocab_size))
nn.init.constant_(self.bias1, 0)
self.bias2 = Parameter(torch.Tensor(1, self.role_vocab_size))
nn.init.constant_(self.bias2, 0)
def forward(self, pred_input, args_input):
b, t, h = pred_input.data.shape
pred_input = self.dropout(pred_input)
args_input = self.dropout(args_input)
first = (
torch.mm(pred_input.view(-1, h), self.U.view(h, -1)) + self.bias1
) # [b*t, h] * [h,r*h] = [b*t,r*h]
out = torch.bmm(
first.view(-1, self.role_vocab_size, h), args_input.view(-1, h).unsqueeze(2)
) # [b*t,r,h] [b*t, h, 1] = [b*t, r]
out = out.squeeze(2) + self.bias2
return out
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, q, k, v, attn_mask):
attn_score = torch.matmul(q, k.transpose(-1, -2)) / np.sqrt(self.d_k)
attn_score.masked_fill_(attn_mask, -1e9)
attn_weights = nn.Softmax(dim=-1)(attn_score)
output = torch.matmul(attn_weights, v)
return output, attn_weights
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.d_k = self.d_v = d_model // n_heads
self.WQ = nn.Linear(d_model, d_model)
self.WK = nn.Linear(d_model, d_model)
self.WV = nn.Linear(d_model, d_model)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self.linear = nn.Linear(n_heads * self.d_v, d_model)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
q_heads = self.WQ(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_heads = self.WK(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_heads = self.WV(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
attn, attn_weights = self.scaled_dot_product_attn(q_heads, k_heads, v_heads, attn_mask)
attn = attn.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
output = self.linear(attn)
return output, attn_weights
class PositionWiseFeedForwardNetwork(nn.Module):
def __init__(self, d_model, d_ff):
super(PositionWiseFeedForwardNetwork, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.relu = nn.ReLU()
def forward(self, inputs):
output = self.relu(self.linear1(inputs))
output = self.linear2(output)
return output
class EncoderLayer(nn.Module):
def __init__(self, d_model, n_heads, p_drop, d_ff):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, n_heads)
self.dropout1 = nn.Dropout(p_drop)
self.layernorm1 = nn.LayerNorm(d_model, eps=1e-6)
self.ffn = PositionWiseFeedForwardNetwork(d_model, d_ff)
self.dropout2 = nn.Dropout(p_drop)
self.layernorm2 = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, inputs, attn_mask):
attn_outputs, attn_weights = self.mha(inputs, inputs, inputs, attn_mask)
attn_outputs = self.dropout1(attn_outputs)
attn_outputs = self.layernorm1(inputs + attn_outputs)
ffn_outputs = self.ffn(attn_outputs)
ffn_outputs = self.dropout2(ffn_outputs)
ffn_outputs = self.layernorm2(attn_outputs + ffn_outputs)
return ffn_outputs, attn_weights
class TransformerEncoder(nn.Module):
def __init__(self, vocab_size, seq_len=300, d_model=768, n_layers=3, n_heads=8, p_drop=0.1, d_ff=500, pad_id=0):
super(TransformerEncoder, self).__init__()
self.pad_id = pad_id
self.sinusoid_table = self.get_sinusoid_table(seq_len + 1, d_model) # (seq_len+1, d_model)
self.embedding = nn.Embedding(vocab_size, d_model)
self.pos_embedding = nn.Embedding.from_pretrained(self.sinusoid_table, freeze=True)
self.layers = nn.ModuleList([EncoderLayer(d_model, n_heads, p_drop, d_ff) for _ in range(n_layers)])
def forward(self, inputs):
positions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).repeat(inputs.size(0), 1) + 1
position_pad_mask = inputs.eq(self.pad_id)
positions.masked_fill_(position_pad_mask, 0)
outputs = self.embedding(inputs) + self.pos_embedding(positions)
attn_pad_mask = self.get_attention_padding_mask(inputs, inputs, self.pad_id)
for layer in self.layers:
outputs, attn_weights = layer(outputs, attn_pad_mask)
return outputs
def get_attention_padding_mask(self, q, k, pad_id):
attn_pad_mask = k.eq(pad_id).unsqueeze(1).repeat(1, q.size(1), 1)
return attn_pad_mask
def get_sinusoid_table(self, seq_len, d_model):
def get_angle(pos, i, d_model):
return pos / np.power(10000, (2 * (i // 2)) / d_model)
sinusoid_table = np.zeros((seq_len, d_model))
for pos in range(seq_len):
for i in range(d_model):
if i % 2 == 0:
sinusoid_table[pos, i] = np.sin(get_angle(pos, i, d_model))
else:
sinusoid_table[pos, i] = np.cos(get_angle(pos, i, d_model))
return torch.FloatTensor(sinusoid_table)
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : ``Dict[int, str]``, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
Returns
-------
``List[Tuple[int, int]]``
The allowed transitions (from_label_id, to_label_id).
"""
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity,
to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed
def is_transition_allowed(constraint_type: str,
from_tag: str,
from_entity: str,
to_tag: str,
to_entity: str):
"""
Given a constraint type and strings ``from_tag`` and ``to_tag`` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : ``str``, required
The tag that the transition originates from. For example, if the
label is ``I-PER``, the ``from_tag`` is ``I``.
from_entity: ``str``, required
The entity corresponding to the ``from_tag``. For example, if the
label is ``I-PER``, the ``from_entity`` is ``PER``.
to_tag : ``str``, required
The tag that the transition leads to. For example, if the
label is ``I-PER``, the ``to_tag`` is ``I``.
to_entity: ``str``, required
The entity corresponding to the ``to_tag``. For example, if the
label is ``I-PER``, the ``to_entity`` is ``PER``.
Returns
-------
``bool``
Whether the transition is allowed under the given ``constraint_type``.
"""
# pylint: disable=too-many-return-statements
if to_tag == "START" or from_tag == "END":
return False
if constraint_type == "BIOUL":
if from_tag == "START":
return to_tag in ('O', 'B', 'U')
if to_tag == "END":
return from_tag in ('O', 'L', 'U')
return any([
from_tag in ('O', 'L', 'U') and to_tag in ('O', 'B', 'U'),
from_tag in ('B', 'I') and to_tag in ('I', 'L') and from_entity == to_entity
])
elif constraint_type == "BIO":
if from_tag == "START":
return to_tag in ('O', 'B')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
to_tag in ('O', 'B'),
to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "IOB1":
if from_tag == "START":
return to_tag in ('O', 'I')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
to_tag in ('O', 'I'),
to_tag == 'B' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "BMES":
if from_tag == "START":
return to_tag in ('B', 'S')
if to_tag == "END":
return from_tag in ('E', 'S')
return any([
to_tag in ('B', 'S') and from_tag in ('E', 'S'),
to_tag == 'M' and from_tag == 'B' and from_entity == to_entity,
to_tag == 'E' and from_tag in ('B', 'M') and from_entity == to_entity,
])
else:
raise IOError("Unknown constraint type: {constraint_type}")
class CRF(torch.nn.Module):
def __init__(self,
num_tags: int,
constraints: List[Tuple[int, int]] = None,
include_start_end_transitions: bool = True) -> None:
super().__init__()
self.num_tags = num_tags
self.transitions = torch.nn.Parameter(torch.Tensor(num_tags, num_tags))
if constraints is None:
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(1.)
else:
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(0.)
for i, j in constraints:
constraint_mask[i, j] = 1.
self._constraint_mask = torch.nn.Parameter(constraint_mask, requires_grad=False)
self.include_start_end_transitions = include_start_end_transitions
if include_start_end_transitions:
self.start_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.end_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.transitions)
if self.include_start_end_transitions:
torch.nn.init.normal_(self.start_transitions)
torch.nn.init.normal_(self.end_transitions)
def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
mask = mask.float().transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
for i in range(1, sequence_length):
emit_scores = logits[i].view(batch_size, 1, num_tags)
transition_scores = self.transitions.view(1, num_tags, num_tags)
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
inner = broadcast_alpha + emit_scores + transition_scores
alpha = (logsumexp(inner, 1) * mask[i].view(batch_size, 1) +
alpha * (1 - mask[i]).view(batch_size, 1))
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
return logsumexp(stops)
def _joint_likelihood(self,
logits: torch.Tensor,
tags: torch.Tensor,
mask: torch.LongTensor) -> torch.Tensor:
"""
Computes the numerator term for the log-likelihood, which is just score(inputs, tags)
"""
batch_size, sequence_length, _ = logits.data.shape
logits = logits.transpose(0, 1).contiguous()
mask = mask.float().transpose(0, 1).contiguous()
tags = tags.transpose(0, 1).contiguous()
if self.include_start_end_transitions:
score = self.start_transitions.index_select(0, tags[0])
else:
score = 0.0
for i in range(sequence_length - 1):
current_tag, next_tag = tags[i], tags[i + 1]
transition_score = self.transitions[current_tag.view(-1), next_tag.view(-1)]
emit_score = logits[i].gather(1, current_tag.view(batch_size, 1)).squeeze(1)
score = score + transition_score * mask[i + 1] + emit_score * mask[i]
last_tag_index = mask.sum(0).long() - 1
last_tags = tags.gather(0, last_tag_index.view(1, batch_size)).squeeze(0)
if self.include_start_end_transitions:
last_transition_score = self.end_transitions.index_select(0, last_tags)
else:
last_transition_score = 0.0
last_inputs = logits[-1] # (batch_size, num_tags)
last_input_score = last_inputs.gather(1, last_tags.view(-1, 1)) # (batch_size, 1)
last_input_score = last_input_score.squeeze() # (batch_size,)
score = score + last_transition_score + last_input_score * mask[-1]
return score
def forward(self,
inputs: torch.Tensor,
tags: torch.Tensor,
mask: torch.ByteTensor = None) -> torch.Tensor:
"""
Computes the log likelihood.
"""
if mask is None:
mask = torch.ones(*tags.size(), dtype=torch.long)
log_denominator = self._input_likelihood(inputs, mask)
log_numerator = self._joint_likelihood(inputs, tags, mask)
return torch.sum(log_numerator - log_denominator)
def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
_, max_seq_length, num_tags = logits.size()
logits, mask = logits.data, mask.data
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.)
constrained_transitions = (
self.transitions * self._constraint_mask[:num_tags, :num_tags] +
-10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
)
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[start_tag, :num_tags] = (
self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data +
-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())
)
transitions[:num_tags, end_tag] = (
self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data +
-10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
)
else:
transitions[start_tag, :num_tags] = (-10000.0 *
(1 - self._constraint_mask[start_tag, :num_tags].detach()))
transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
best_paths = []
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
sequence_length = (torch.sum(prediction_mask)).int()
tag_sequence.fill_(-10000.)
tag_sequence[0, start_tag] = 0.
tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length]
tag_sequence[sequence_length + 1, end_tag] = 0.
viterbi_path, viterbi_score = viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions)
viterbi_path = viterbi_path[1:-1]
best_paths.append((viterbi_path, viterbi_score.item()))
return best_paths
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations=None):
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise IOError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
for timestep in range(1, sequence_length):
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
observation = tag_observations[timestep]
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
print("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
viterbi_path.reverse()
return viterbi_path, viterbi_score
| 38.488677
| 120
| 0.580515
| 24,894
| 0.770903
| 0
| 0
| 0
| 0
| 0
| 0
| 4,068
| 0.125975
|
e61d028876011792c07b86f917e40c7cc75e894b
| 2,293
|
py
|
Python
|
src/ScreenCapLibrary/utils.py
|
davesliu/ScreenCapLibrary
|
b5537c44c740e0f43e424fb0028dbcfd0e5b0557
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ScreenCapLibrary/utils.py
|
davesliu/ScreenCapLibrary
|
b5537c44c740e0f43e424fb0028dbcfd0e5b0557
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ScreenCapLibrary/utils.py
|
davesliu/ScreenCapLibrary
|
b5537c44c740e0f43e424fb0028dbcfd0e5b0557
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def _norm_path(path):
if not path:
return path
return os.path.normpath(path.replace('/', os.sep))
def _compression_value_conversion(value):
"""
PNG compression values are within range [0, 9]. This value must
be mapped to a [0-100] interval.
"""
try:
if int(value) < 0 or int(value) > 100:
raise RuntimeError("Quality argument must be of between 0 and 100.")
return 0 if int(value) == 100 else int(9 - (int(value) / 11))
except ValueError:
raise RuntimeError("Quality argument must be of type integer.")
def _pil_quality_conversion(value):
"""
The quality in Pillow is between [1, 95] and must be converted to
a [0-100] interval.
"""
try:
if int(value) < 0 or int(value) > 100:
raise RuntimeError("Quality argument must be of between 0 and 100.")
if int(value) < 1:
return 1
elif int(value) >= 95:
return 95
return int(value)
except ValueError:
raise RuntimeError("The image quality argument must be of type integer.")
class suppress_stderr(object):
def __init__(self):
# Open a null file
self.null_fd = os.open(os.devnull, os.O_RDWR)
# Save the actual stderr (2) file descriptor.
self.save_fd = os.dup(2)
def __enter__(self):
# Assign the null pointer to stderr.
os.dup2(self.null_fd, 2)
def __exit__(self, *_):
# Re-assign the real stderr back to (2)
os.dup2(self.save_fd, 2)
# Close all file descriptors
os.close(self.null_fd)
os.close(self.save_fd)
| 31.410959
| 81
| 0.647187
| 536
| 0.233755
| 0
| 0
| 0
| 0
| 0
| 0
| 1,211
| 0.528129
|
e61e22d90752c055fa760632a6070ebc75d3da15
| 228
|
py
|
Python
|
utils/alerts.py
|
RealDebian/Palpeo
|
23be184831a3c529cf933277944e7aacda08cdad
|
[
"MIT"
] | null | null | null |
utils/alerts.py
|
RealDebian/Palpeo
|
23be184831a3c529cf933277944e7aacda08cdad
|
[
"MIT"
] | null | null | null |
utils/alerts.py
|
RealDebian/Palpeo
|
23be184831a3c529cf933277944e7aacda08cdad
|
[
"MIT"
] | null | null | null |
from colorama import Fore
good = Fore.GREEN + '[*]' + Fore.RESET
bad = Fore.RED + '[!]' + Fore.RESET
warning = Fore.RED + '[$]' + Fore.RESET
excellent = Fore.CYAN + '[$]' + Fore.RESET
debug_point = Fore.RED + '{*}' + Fore.RESET
| 32.571429
| 43
| 0.609649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.109649
|
e6230fc750b387220c642ab851a058153bb25082
| 7,206
|
py
|
Python
|
pybilt/common/gaussian.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 11
|
2019-07-29T16:21:53.000Z
|
2022-02-02T11:44:57.000Z
|
pybilt/common/gaussian.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 11
|
2019-05-15T09:30:05.000Z
|
2021-07-19T16:49:59.000Z
|
pybilt/common/gaussian.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 9
|
2019-08-12T11:14:45.000Z
|
2020-12-22T18:22:55.000Z
|
"""Define Gaussian function objects.
This module defines the Gaussian class and the GaussianRange class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import object
import numpy as np
from six.moves import range
class Gaussian(object):
"""A Gaussian function object.
Attributes:
mean (float): The mean of the Gaussian.
std (float): The standard deviation of the Gaussian.
"""
def __init__(self, mean,std):
"""Initialize a Gaussian function object.
Args:
mean (float): Set the mean of the Gaussian.
std (float): Set the standard deviation of the Gaussian.
"""
stdinv = 1.0/std
normalc = stdinv*(1.0/np.sqrt(np.pi))
self.sigma = std
self.mean = mean
self._normconst = normalc
return
def eval(self,x_in):
"""Return the Gaussian function evaluated at the input x value.
Args:
x_in (float): The x value to evaluate the function at.
Returns:
float: The function evaluation for the Gaussian.
"""
stdinv = 1.0/self.sigma
stdinvsq = stdinv**2
normalc = self._normconst
expon = -(x_in - self.mean)**2 * (0.5*stdinvsq)
y = normalc * np.exp(expon)
return y
def reset_mean(self,new_mean):
"""Change the mean of the Gaussian function.
Args:
new_mean (float): The new mean of the Gaussian function.
"""
self.mean = new_mean
return
class GaussianRange(object):
"""Define a Gaussian function over a range.
This object is used to define a Gaussian function over a defined
finite range and store its values as evaluated at points evenly spaced
over the range. The points can then for example be used for integrating
the Gaussian function over the range using numerical quadrature.
Attributes:
mean (float): The mean of the Gaussian.
std (float): The standard deviation of the Gaussian.
upper (float): The upper boundary of the range.
lower (float): The lower boundary of the range.
npoints (int): The number of points to evaluate in the range.
"""
def __init__(self,in_range,mean,std,npoints=200):
"""Initialize the GaussianRange object.
The GaussianRange stores the values of Gaussian function with the
input mean and standard deviation evaluated at evenly spaced points
in the specified x-value range.
Args:
in_range (tuple, list): Specify the endpoints for range, e.g.
(x_start, x_end).
mean (float): The mean of the Gaussian function.
std (float): The standard deviation of the Gaussian function.
npoints (Optional[int]): The number of x-value points to
evaluate the Gaussian function for in the specified range (i.e.
in_range).
"""
x_p = np.linspace(in_range[0],in_range[1],npoints,endpoint=True)
y_p = np.zeros(npoints)
yc = 0
stdinv = 1.0/std
stdinvsq = stdinv**2
normalc = stdinv*(1.0/np.sqrt(np.pi))
for x in x_p:
expon = -(x - mean)**2 * (0.5*stdinvsq)
y = normalc * np.exp(expon)
y_p[yc]=y
yc+=1
self.x = x_p
self.y = y_p
self.sigma = std
self.mean = mean
self._normconst = normalc
self.upper = in_range[1]
self.lower = in_range[0]
self._dx = x_p[1]-x_p[0]
self.npoints = npoints
return
def get_values(self):
"""Return the x and y values for the Gaussian range function.
Returns:
tuple: The x and y values for the function, returned as (
x_values, y_values).
"""
return (self.x,self.y)
def eval(self,x_in):
"""Return the Gaussian function evaluated at the input x value.
Args:
x_in (float): The x value to evaluate the function at.
Returns:
float: The function evaluation for the Gaussian.
"""
stdinv = 1.0/self.sigma
stdinvsq = stdinv**2
normalc = self._normconst
expon = -(x_in - self.mean)**2 * (0.5*stdinvsq)
y = normalc * np.exp(expon)
return y
def integrate_range(self, lower, upper):
"""Returns the numerical integration of the Gaussian range.
This function does a simple quadrature for the Gaussian function as
evaluated on the range (or subset of the range) specified at
initialization.
Args:
lower (float): The lower boundary for the integration.
upper (float): The upper boundary for the integration.
Returns:
float: The numerical value of the Gaussian range integrated from
lower to upper.
Notes:
This function does not thoroughly check the bounds, so if upper
is less than lower the function will break.
"""
if upper>self.upper:
upper=self.upper
if lower<self.lower:
lower = self.lower
i_l = int(np.floor((lower-self.lower)/self._dx))
i_u = int(np.floor((upper-self.lower)/self._dx))
#print "i_l ",i_l," i_u ",i_u
total = 0.0
for i in range(i_l,i_u):
total+= self.y[i]*self._dx
return total
def sum_range(self, lower, upper):
"""Returns the over the Gaussian range.
This function sums the Gaussian function at the points that were
evaluated on the range (or subset of the range) specified at
initialization.
Args:
lower (float): The lower boundary for the sum.
upper (float): The upper boundary for the sum.
Returns:
float: The numerical value of the Gaussian range as summed from
lower to upper.
Notes:
This function does not thoroughly check the bounds, so if upper
is less than lower the function will break.
"""
if upper>self.upper:
upper=self.upper
if lower<self.lower:
lower = self.lower
i_l = int(np.floor((lower-self.lower)/self._dx))
i_u = int(np.floor((upper-self.lower)/self._dx))
total = 0.0
for i in range(i_l,i_u):
total+= self.y[i]
return total
def normalize(self):
"""Normalizes (by area) the Gaussian function values over the range."""
total = 0.0
for i in range(0,self.npoints):
total+=self.y[i]*self._dx
for i in range(0,self.npoints):
self.y[i]/=total
return
def reset_mean(self,new_mean):
"""Change the mean of the Gaussian function.
Args:
new_mean (float): The new mean of the Gaussian function.
Notes:
This function does not re-evaluate the Gaussian range and
therefore only affects the output of the eval function.
"""
self.mean = new_mean
return
| 30.927039
| 79
| 0.592007
| 6,903
| 0.957952
| 0
| 0
| 0
| 0
| 0
| 0
| 4,294
| 0.595892
|
e62402ac83b864060bad9000d2d1550eba4920c4
| 3,538
|
py
|
Python
|
savingzelda/lib.py
|
recarreira/saving-zelda
|
70d1dd799f516ceb7ea9a435472da74b3b58bf91
|
[
"MIT"
] | 2
|
2016-07-11T01:56:08.000Z
|
2017-01-04T17:39:17.000Z
|
savingzelda/lib.py
|
recarreira/saving-zelda
|
70d1dd799f516ceb7ea9a435472da74b3b58bf91
|
[
"MIT"
] | 5
|
2016-07-10T16:25:44.000Z
|
2016-07-22T14:20:02.000Z
|
savingzelda/lib.py
|
recarreira/saving-zelda
|
70d1dd799f516ceb7ea9a435472da74b3b58bf91
|
[
"MIT"
] | 1
|
2016-07-21T22:10:02.000Z
|
2016-07-21T22:10:02.000Z
|
import requests
import re
from urlparse import urlparse
from bs4 import BeautifulSoup
from collections import defaultdict
class SavingZelda(object):
def __init__(self, url, logger):
self.url = url
self.logger = logger
self.body = ""
self.not_checked = []
self.list_of_links = []
self.links_and_status = {}
self.dead_links = {}
self.links_by_status = {}
def get_page(self, url):
response = requests.get(url, verify=False)
if response.status_code == 200:
self.body = response.text
else:
message = 'Oops! The page returned a status code {status}'.format(status=str(response.status_code))
raise Exception(message)
def get_links(self, html):
soup = BeautifulSoup(html)
http = re.compile("^http*")
relative = re.compile("^/")
for link in soup.find_all('a'):
href = link.get('href')
if not href:
continue
elif http.findall(href):
self.list_of_links.append(href)
elif relative.findall(href):
base_url = urlparse(self.url)
url = "{0}://{1}{2}".format(base_url.scheme, base_url.netloc, href)
self.list_of_links.append(url)
else:
self.not_checked.append(href)
def check_link(self, link):
self.logger.debug("Checking {0}".format(link))
try:
response = requests.get(link, verify=False, allow_redirects=True)
status = response.status_code
except requests.exceptions.ConnectionError:
status = "Nodename nor servname provided, or not known"
except Exception, e:
status = str(e)
self.links_and_status[link] = status
def check_links(self, list_of_links):
self.logger.info("Checking links...")
for link in list_of_links:
self.check_link(link)
def is_recursive(self, link):
base_url_parsed = urlparse(self.url)
link_parsed = urlparse(link)
return link_parsed.netloc == base_url_parsed.netloc
def can_we_save_the_day(self, links_and_status):
return links_and_status.values().count(200) == len(links_and_status)
def get_dead_links(self, links_and_status):
self.dead_links = [link for link in links_and_status if links_and_status[link] != 200]
def group_links_by_status(self, links_and_status):
self.links_by_status = defaultdict(list)
for key, value in sorted(links_and_status.iteritems()):
self.links_by_status[value].append(key)
def run(self):
self.get_page(self.url)
self.get_links(self.body)
self.check_links(self.list_of_links)
self.group_links_by_status(self.links_and_status)
if self.can_we_save_the_day(self.links_and_status):
success_message = "No dead links! Zelda is safe and Hyrule is in peace! <3"
self.logger.info(success_message)
else:
self.get_dead_links(self.links_and_status)
dead_link_message = "Oh no! Hyrule is in great danger! Dead link found: {0}".format(self.dead_links)
self.logger.info(dead_link_message)
self.logger.debug("Links not checked: {0}".format(self.not_checked))
self.logger.debug("Result by status: \n{0}".format(self.links_by_status))
if __name__ == "__main__":
import sys
saving_zelda = SavingZelda(sys.argv[1])
saving_zelda.run()
| 35.38
| 112
| 0.628604
| 3,302
| 0.933296
| 0
| 0
| 0
| 0
| 0
| 0
| 336
| 0.094969
|
e6257ae02757a301799ad06a407d32569d49a6d5
| 1,352
|
py
|
Python
|
django_forms/forms_project/forms_project/urls.py
|
joyliao07/django_review
|
e4311d2ccbb96646a6867e5fc426ca67a122d7ed
|
[
"MIT"
] | null | null | null |
django_forms/forms_project/forms_project/urls.py
|
joyliao07/django_review
|
e4311d2ccbb96646a6867e5fc426ca67a122d7ed
|
[
"MIT"
] | 8
|
2020-02-12T00:30:10.000Z
|
2021-06-10T18:16:37.000Z
|
django_forms/forms_project/forms_project/urls.py
|
joyliao07/django_review
|
e4311d2ccbb96646a6867e5fc426ca67a122d7ed
|
[
"MIT"
] | null | null | null |
"""forms_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from forms_app import views
urlpatterns = [
path('', views.home_view, name="home"),
path('cbv', views.CBView.as_view()),
path('cbvt', views.CBVTemplate.as_view()),
path('forms_app/', include('forms_app.urls', namespace='forms_app')),
path('showtopic', views.show_topic, name="show topic"),
path('testforms', views.testform_view, name="test forms"),
path('userprofile', views.userprofile_view, name="user profile"),
path('register', views.register, name="register"),
path('login', views.login_view, name="login"),
path('logout', views.logout_view, name="logout"),
path('admin/', admin.site.urls),
]
| 39.764706
| 77
| 0.697485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 825
| 0.610207
|
e625df758f3e2fdaeb576f8377536aeeebd5b8b3
| 635
|
py
|
Python
|
extractors/folha_news_extractor.py
|
LorhanSohaky/POOA
|
c604f03f9b7bbfccecb75a982cc76fe428c36433
|
[
"MIT"
] | 1
|
2020-12-05T21:01:10.000Z
|
2020-12-05T21:01:10.000Z
|
extractors/folha_news_extractor.py
|
LorhanSohaky/POOA
|
c604f03f9b7bbfccecb75a982cc76fe428c36433
|
[
"MIT"
] | null | null | null |
extractors/folha_news_extractor.py
|
LorhanSohaky/POOA
|
c604f03f9b7bbfccecb75a982cc76fe428c36433
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from news import News
from .abstract_news_extractor import AbstractNewsExtractor
class FolhaNewsExtractor(AbstractNewsExtractor):
def __init__(self):
super().__init__('https://www.folha.uol.com.br')
def extract_news(self):
news = []
html_text = requests.get(self.url).text
soup = BeautifulSoup(html_text, 'html.parser')
for item in soup.find_all('ul','c-tools-share__list'):
title = item.get('data-sharebar-text')
url = item.get('data-sharebar-url')
news.append(News(title,url))
return news
| 27.608696
| 62
| 0.661417
| 505
| 0.795276
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.168504
|
e626176d8836eb758155d498bd351936493ec76d
| 3,871
|
py
|
Python
|
pyrbi/__init__.py
|
hack-a-team/pyrbi
|
09296788d9b9a29ae7aaeff960992d2893468372
|
[
"MIT"
] | null | null | null |
pyrbi/__init__.py
|
hack-a-team/pyrbi
|
09296788d9b9a29ae7aaeff960992d2893468372
|
[
"MIT"
] | null | null | null |
pyrbi/__init__.py
|
hack-a-team/pyrbi
|
09296788d9b9a29ae7aaeff960992d2893468372
|
[
"MIT"
] | null | null | null |
"""
Provides a simple Python client for RBI REST API
"""
from . import exceptions
import requests
__version__ = "0.1.0"
class PyRBI:
"""
A client for RBI Blockchain's REST API
Usage:
Create a new instance using your credentials
>>> cli = PyRBI("user", "pass")
From there on you can create a new wallet, sync a wallet and put/retrieve
data from a wallet.
"""
def __init__(self, username, password, home="http://portohack.rbiblockchain.io"):
"""
Creates a new client and authenticates the given user and password.
Accepts an optional argument regarding the root of the API.
"""
self.username = username
self.password = password
self.home = home
self.auth_data = self.auth()
self.token = self.auth_data["access_token"]
def _call(
self, path: str, headers=None, payload=None, method="GET", authenticate=True
) -> requests.Response:
"""
_call makes a generic HTTP request to the API. It wraps around Requests
capabilities.
"""
# If path does no start with a '/', we add one
path = path if (path[0] == "/") else f"/{path}"
url = f"{self.home}{path}"
kwargs = {}
handler = getattr(requests, method.lower())
if authenticate:
if not headers:
headers = {}
headers["Authorization"] = f"Bearer {self.token}"
if headers:
# Sending the headers back to kwargs, in order to pass to handler
kwargs["headers"] = headers
if payload:
if method.upper() == "POST":
kwargs["json"] = payload
elif method.upper() == "GET":
kwargs["params"] = payload
return handler(url, **kwargs)
def auth(self):
"""
performs an OAuth2 password-based authentication against the API
"""
url = f"{self.home}/oauth/token"
header = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"grant_type": "password",
"username": self.username,
"password": self.password,
}
return requests.post(url, data, headers=header).json()
def get_mnemonic(self):
"""
returns 12 words that can be used as mnemonics in wallet creation
"""
return self._call("/stcpconnector/createmnemonic").json()["data"]
def create_wallet(self):
"""
creates a new wallet using new mnemonics
"""
mnemonic = self.get_mnemonic()
return self._call(
"/stcpconnector/createwallet", method="POST", payload={"mnemonic": mnemonic}
).json()["data"]
def sync_wallet(self, name, address):
"""
syncs a wallet identified by its name and address
"""
if not address.startswith("0x"):
raise exceptions.InvalidWalletAddress()
payload = {"name": name, "address": address}
return self._call("/stcpconnector/sync", method="POST", payload=payload).json()
def put_data(self, data, address, private_key):
"""
puts a data string into an wallet
"""
if not address.startswith("0x"):
raise exceptions.InvalidWalletAddress()
payload = {"data": data, "to": address, "from": address, "pk": private_key}
return self._call(
"/stcpconnector/registerdata", method="POST", payload=payload
).json()
def get_data(self, transaction_hash):
"""
returns the data input at a given transaction
"""
payload = self._call(
f"/stcpconnector/querytransaction/{transaction_hash}"
).json()
data = payload["data"]["input"][2:]
data = bytearray.fromhex(data).decode()[2:]
return data
| 28.674074
| 88
| 0.57427
| 3,745
| 0.96745
| 0
| 0
| 0
| 0
| 0
| 0
| 1,681
| 0.434255
|
e6266840cb7ce270f6afeec9709e2ac1a2d1d286
| 1,426
|
py
|
Python
|
scripts/sequence/replace_selenocystein.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 10
|
2015-04-28T14:15:04.000Z
|
2021-03-15T00:07:38.000Z
|
scripts/sequence/replace_selenocystein.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | null | null | null |
scripts/sequence/replace_selenocystein.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 6
|
2017-03-16T22:38:41.000Z
|
2021-08-11T00:22:52.000Z
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
import os
from copy import deepcopy
from Bio import SeqIO
from Bio.Seq import Seq
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input_file",
help="Input file with sequences")
parser.add_argument("-c", "--symbol_to_use", action="store", dest="char_to_use",
default="X",
help="Symbol to use to replace selenocystein. Default - 'X'")
parser.add_argument("-o", "--output", action="store", dest="output",
help="File to write output")
parser.add_argument("-f", "--format", action="store", dest="format", default="fasta",
help="Format of input and output files. Allowed formats genbank, fasta(default)")
args = parser.parse_args()
tmp_index_file = "temp.idx"
print("Parsing %s..." % args.input_file)
sequence_dict = SeqIO.index_db(tmp_index_file, args.input_file, format=args.format)
def record_with_replacenment_generator(sequence_dict):
for record_id in sequence_dict:
new_record = deepcopy(sequence_dict[record_id])
new_record.seq = Seq(str(sequence_dict[record_id].seq).replace("U", args.char_to_use).replace("u", args.char_to_use))
yield new_record
SeqIO.write(record_with_replacenment_generator(sequence_dict), args.output, args.format)
os.remove(tmp_index_file)
| 33.952381
| 125
| 0.691445
| 0
| 0
| 297
| 0.208275
| 0
| 0
| 0
| 0
| 395
| 0.276999
|
e626e6e7f40b567d4b7615f9b578110b40aa795b
| 438
|
py
|
Python
|
Aulas Gustavo Guanabara/Aula018.1.py
|
RobertoRanulfo/Phyton
|
d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01
|
[
"MIT"
] | null | null | null |
Aulas Gustavo Guanabara/Aula018.1.py
|
RobertoRanulfo/Phyton
|
d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01
|
[
"MIT"
] | null | null | null |
Aulas Gustavo Guanabara/Aula018.1.py
|
RobertoRanulfo/Phyton
|
d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01
|
[
"MIT"
] | null | null | null |
teste = list()
teste.append('Gustavo')
teste.append(40)
galera = []
galera.append(teste) #neste caso estamos criando uma ligação entre as duas listas
teste[0] = 'Maria'
teste[1] = 22
galera.append(teste)
print(teste)
print(galera) # No caso os elementos não se acumularam porque não foi feita uma cópia dos elementos da lista
# e sim um elo que espelha a lista... dessa forma ela foi copiada mais uma vez do jeito que estava
| 39.818182
| 112
| 0.730594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.616253
|
e62704d640c5f34b51dc4894e557ad6bcb2ec7d5
| 1,637
|
py
|
Python
|
ad_hoc/name_mapper_backfill.py
|
Connor-R/NSBL
|
16615990d058d171fab4790f937846fd1f0b2ee9
|
[
"MIT"
] | 1
|
2020-11-19T23:20:19.000Z
|
2020-11-19T23:20:19.000Z
|
ad_hoc/name_mapper_backfill.py
|
Connor-R/NSBL
|
16615990d058d171fab4790f937846fd1f0b2ee9
|
[
"MIT"
] | null | null | null |
ad_hoc/name_mapper_backfill.py
|
Connor-R/NSBL
|
16615990d058d171fab4790f937846fd1f0b2ee9
|
[
"MIT"
] | null | null | null |
from py_db import db
import NSBL_helpers as helper
db = db("NSBL")
table_dict = {"register_batting_analytical": "a.player_name"
, "register_batting_primary": "a.player_name"
, "register_batting_secondary": "a.player_name"
, "register_batting_splits": "a.player_name"
, "register_pitching_analytical": "a.player_name"
, "register_pitching_primary": "a.player_name"
, "register_pitching_rates_relief": "a.player_name"
, "register_pitching_rates_start": "a.player_name"
, "register_pitching_secondary": "a.player_name"
, "zips_defense": "a.player_name"
, "zips_fangraphs_batters_counting": "a.Player"
, "zips_fangraphs_batters_rate": "a.Player"
, "zips_fangraphs_pitchers_counting": "a.Player"
, "zips_fangraphs_pitchers_rate": "a.Player"
, "zips_offense": "a.player_name"
, "zips_offense_splits": "a.player_name"
, "zips_pitching": "a.player_name"
, "zips_pitching_splits": "a.player_name"
, "mlb_prospects.fg_raw": "a.playerName"
, "mlb_prospects.minorleagueball_professional": "a.full_name"
, "mlb_prospects.mlb_prospects_draft": "CONCAT(a.fname, ' ', a.lname)"
, "mlb_prospects.mlb_prospects_international": "CONCAT(a.fname, ' ', a.lname)"
, "mlb_prospects.mlb_prospects_professional": "CONCAT(a.fname, ' ', a.lname)"
}
for k,v in table_dict.items():
print k
qry = """
SELECT DISTINCT %s
FROM %s a
LEFT JOIN name_mapper nm ON (%s = nm.wrong_name)
WHERE 1
AND nm.wrong_name IS NULL
""" % (v, k, v)
# raw_input(qry)
names = db.query(qry)
for name in names:
helper.input_name(name[0])
| 32.098039
| 82
| 0.681735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,194
| 0.729383
|
e628182131b1688593a8c2682f0d77aa16ecd697
| 1,287
|
py
|
Python
|
camcommander/watcher.py
|
tparker-usgs/camcommander
|
0e508a1b24cc99496745652e52118000470d7e32
|
[
"CC0-1.0"
] | null | null | null |
camcommander/watcher.py
|
tparker-usgs/camcommander
|
0e508a1b24cc99496745652e52118000470d7e32
|
[
"CC0-1.0"
] | null | null | null |
camcommander/watcher.py
|
tparker-usgs/camcommander
|
0e508a1b24cc99496745652e52118000470d7e32
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
#
# I waive copyright and related rights in the this work worldwide
# through the CC0 1.0 Universal public domain dedication.
# https://creativecommons.org/publicdomain/zero/1.0/legalcode
#
# Author(s):
# Tom Parker <tparker@usgs.gov>
""" watch for new webcam images."""
import zmq
import tomputils.util as tutil
class Watcher:
def __init__(self, config, proxy_frontend, context=None):
global logger
logger = tutil.setup_logging("watcher errors")
self.config = config
self.context = context or zmq.Context().instance()
self.socket = self.context.socket(zmq.SUB)
self.socket.connect(proxy_frontend)
def watch(self):
pass
def watcher_factory(config, proxy_frontend):
if config["type"] == "console":
msg = "Creating %s watcher %s."
logger.debug(msg.format(config["name"], config["type"]))
return ConsoleWatcher(config, proxy_frontend)
else:
error_msg = "Unkown watcher type %s for source %s"
tutil.exit_with_error(error_msg.format(config["type"], config["name"]))
class ConsoleWatcher(Watcher):
def watch(self):
run = True
while run:
image = self.socket.recv()
logger.info("New Image: %s", image)
| 26.8125
| 79
| 0.655012
| 548
| 0.425796
| 0
| 0
| 0
| 0
| 0
| 0
| 420
| 0.32634
|
e629f1eb273463da4f3c8be6f4e44ca1b639ae9f
| 1,866
|
py
|
Python
|
Filter/kalman_filter.py
|
KNakane/filter
|
43ece9771003b63b477499dab2eb8d69e5bfdabe
|
[
"MIT"
] | null | null | null |
Filter/kalman_filter.py
|
KNakane/filter
|
43ece9771003b63b477499dab2eb8d69e5bfdabe
|
[
"MIT"
] | null | null | null |
Filter/kalman_filter.py
|
KNakane/filter
|
43ece9771003b63b477499dab2eb8d69e5bfdabe
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
class KalmanFilter():
def __init__(self, data, dim=1):
self.data = data.values
self.timelength = len(self.data)
# 潜在変数
self.x = np.zeros((self.timelength+1, dim))
self.x_filter = np.zeros((self.timelength+1, dim))
# 共分散行列
self.sigma = np.zeros((self.timelength+1, dim))
self.sigma_filter = np.zeros((self.timelength+1, dim))
# 状態遷移行列
self.A = np.ones(dim)
# 観測行列
self.C = np.ones(dim)
# ノイズ
self.Q = 1.0
self.R = 1.0
self.W = np.random.normal(loc=0, scale=self.Q, size=self.x.shape)
self.V = np.random.normal(loc=0, scale=self.R, size=self.x.shape)
def __call__(self):
#for t in tqdm(range(self.timelength-1)):
for t in (range(self.timelength-1)):
# 状態量推定
self.x[t+1] = self.A * self.x[t] + self.W[t]
self.sigma[t+1] = self.Q + self.A * self.sigma[t] * self.A.T
# 更新
#Kalman_gain = self.sigma[t+1] * self.C.T * (self.C * self.sigma[t+1] * self.sigma[t+1].T + self.R).T
Kalman_gain = self.sigma[t+1] / (self.sigma[t+1] + self.R)
self.x_filter[t+1] = self.x[t+1] + Kalman_gain * (self.data[t+1] - self.C * self.x[t+1])
self.sigma_filter[t+1] = self.sigma[t+1] - Kalman_gain * self.C * self.sigma[t+1]
self.draw_graph()
return
def draw_graph(self):
# グラフ描画
plt.figure(figsize=(16,8))
plt.plot(range(self.timelength), self.data, label='Grand Truth')
plt.plot(range(self.timelength), self.x_filter[:-1], "g", label='Prediction')
plt.legend()
plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.95)
plt.savefig('./Kalman_filter.png')
return
| 33.927273
| 113
| 0.559486
| 1,859
| 0.96122
| 0
| 0
| 0
| 0
| 0
| 0
| 309
| 0.159772
|
e62b0481e9ee04d621f3915eddb5dfd2397e270a
| 4,394
|
py
|
Python
|
mwarp1d/ui/figures/artists/draggable_points.py
|
0todd0000/mwarp1d
|
7b40a47e6c112a8da5a1b67aff890fc77fe83d71
|
[
"MIT"
] | null | null | null |
mwarp1d/ui/figures/artists/draggable_points.py
|
0todd0000/mwarp1d
|
7b40a47e6c112a8da5a1b67aff890fc77fe83d71
|
[
"MIT"
] | 6
|
2019-11-25T08:15:05.000Z
|
2020-02-07T13:05:59.000Z
|
mwarp1d/ui/figures/artists/draggable_points.py
|
0todd0000/mwarp1d
|
7b40a47e6c112a8da5a1b67aff890fc77fe83d71
|
[
"MIT"
] | 2
|
2019-11-28T02:58:14.000Z
|
2019-12-18T11:45:33.000Z
|
from PyQt5 import QtWidgets, QtCore
from math import floor
import numpy as np
from . _base import _SelectableArtist2D
class _DraggablePoints(_SelectableArtist2D):
dragged = QtCore.pyqtSignal(object, int, int, float)
dragging_stopped = QtCore.pyqtSignal()
point_added = QtCore.pyqtSignal(int, int)
point_deleted = QtCore.pyqtSignal(int)
point_delete_failed = QtCore.pyqtSignal()
maxpointsreached = QtCore.pyqtSignal(int)
color_active = 0.98, 0.7, 0.3
color_inactive = '0.7'
dragging_enabled = True
dragging = False
# n = 0 #number of points
nmax = 8 #maximum number of points
selected_ind = None
xminmax = None
def __init__(self, ax, x, y_constraint=None, collection=None):
super().__init__(ax, collection)
self.Q = y_constraint.size
# self.n = len(x)
self.h = self.ax.plot(x, y_constraint[x], 'o', ms=8, color=self.color_active, markeredgecolor='w', zorder=self.zorder)[0]
self.y_constraint = y_constraint
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.ax.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
@property
def n(self):
return self.h.get_xdata().size
@property
def values(self):
return self.h.get_xdata()
def add_point(self, x):
if self.n < self.nmax:
y = self.y_constraint[x]
x0,y0 = self.get_point_coordinates()
x0,y0 = np.append(x0, x), np.append(y0, y)
ind = np.argsort(x0)
self.set_point_coordinates(x0[ind], y0[ind])
# self.n += 1
self.ax.figure.canvas.draw()
col = x0[ind].tolist().index(x)
self.point_added.emit(col, x)
else:
self.maxpointsreached.emit(self.nmax)
def delete_point(self, ind):
deleted = False
if self.n > 1:
x,y = self.get_point_coordinates()
x = np.hstack((x[:ind], x[ind+1:]))
y = np.hstack((y[:ind], y[ind+1:]))
self.set_point_coordinates(x, y)
deleted = True
self.point_deleted.emit(ind)
self.ax.figure.canvas.draw()
else:
self.point_delete_failed.emit()
return deleted
def get_point_coordinates(self):
x,y = self.h.get_xdata(), self.h.get_ydata()
return x,y
def get_previous_point(self, ind):
return None if (ind==0) else (ind-1)
def get_previous_x(self, ind0):
ind = self.get_previous_point(ind0)
return None if (ind is None) else self.h.get_xdata()[ind]
def get_next_point(self, ind):
return None if (ind==(self.n-1)) else (ind+1)
def get_next_x(self, ind0):
ind = self.get_next_point(ind0)
return None if (ind is None) else self.h.get_xdata()[ind]
def get_xminmax(self, ind):
x0,x1 = self.get_previous_x(ind), self.get_next_x(ind)
x0 = 2 if (x0 is None) else x0+2
x1 = self.Q-3 if (x1 is None) else x1-2
return x0,x1
def on_motion(self, event):
if event.inaxes:
# # self.crosshairs.update(x, y)
if self.dragging_enabled and self.dragging:
ind = self.selected_ind
x = floor(event.xdata)
x0,x1 = self.xminmax
x = min(x1, max(x0, x))
y = self.y_constraint[x]
self.set_data(ind, x, y)
self.dragged.emit(self, ind, x, y)
def on_selected(self, ind, distance):
super().on_selected(ind, distance)
self.dragging = True
self.selected_ind = ind
self.xminmax = self.get_xminmax(ind)
def on_release(self, event):
self.dragging_stopped.emit()
self.dragging = False
self.selected_ind = None
self.xminmax = None
def set_active(self, active):
super().set_active(active)
self.isselectable = active
def set_all_xdata(self, x):
self.h.set_xdata(x)
self.h.set_ydata( self.y_constraint[x] )
def set_data(self, ind, xnew, ynew):
x,y = self.h.get_xdata(), self.h.get_ydata()
x[ind] = xnew
y[ind] = ynew
self.h.set_xdata(x)
self.h.set_ydata(y)
def set_dragging_enabled(self, enabled):
self.dragging_enabled = enabled
def set_point_coordinates(self, x, y):
self.h.set_xdata(x)
self.h.set_ydata(y)
class SourceLandmarks(_DraggablePoints):
color_active = 0.98, 0.7, 0.3
zorder = 1
def set_active(self, active):
super().set_active(active)
self.h.set_visible(active)
class TemplateLandmarks(_DraggablePoints):
color_active = 0.3, 0.3, 0.98
zorder = 3
| 24.824859
| 134
| 0.649067
| 4,259
| 0.969276
| 0
| 0
| 112
| 0.025489
| 0
| 0
| 195
| 0.044379
|
e62bee983944925691e81c42d718cf0680c6b087
| 7,370
|
py
|
Python
|
convert/tartan_air_to_benchmark.py
|
AaltoML/vio_benchmark
|
cb2277026f824f88f3bc131057ebc687cb19d648
|
[
"Apache-2.0"
] | 32
|
2021-04-23T15:07:04.000Z
|
2022-03-30T08:04:28.000Z
|
convert/tartan_air_to_benchmark.py
|
AaltoML/vio_benchmark
|
cb2277026f824f88f3bc131057ebc687cb19d648
|
[
"Apache-2.0"
] | 3
|
2021-02-10T18:54:06.000Z
|
2022-03-12T16:58:19.000Z
|
convert/tartan_air_to_benchmark.py
|
AaltoML/vio_benchmark
|
cb2277026f824f88f3bc131057ebc687cb19d648
|
[
"Apache-2.0"
] | 4
|
2021-02-08T11:11:09.000Z
|
2022-03-15T12:45:05.000Z
|
#!/usr/bin/env python
#
# Download and convert TartanAir data <https://theairlab.org/tartanair-dataset/>.
#
# NOTE The whole dataset is several terabytes, so be sure to tune the `LEVELS` and
# `DATASETS` variables before running.
#
# It is recommended to install "AzCopy", an official tool for Azure, to get tolerable
# download speeds (pass `--azcopy` flag to enable).
#
# NOTE At the time of writing the data does not include simulated IMU samples.
import argparse
import csv
import json
import os
from pathlib import Path
import subprocess
from tartan_air_transformations import fixTartan
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--azcopy', action='store_true', default=False, help='download the data with AzCopy')
args = parser.parse_args()
# Since the downloads can be slow, an option to leave the downloaded zip files in the RAW directory.
BACKUP_ZIPS = False
RAW = "data/raw/tartan-air"
OUT = "data/benchmark/tartan-air"
# <https://github.com/castacks/tartanair_tools/blob/master/download_training_zipfiles.txt>
RELEASE = "https://tartanair.blob.core.windows.net/tartanair-release1"
LEVELS = ["Easy", "Hard"]
DATASETS = [
"abandonedfactory",
"abandonedfactory_night",
"amusement",
"carwelding",
"endofworld",
"gascola",
"hospital",
"japanesealley",
"neighborhood",
"ocean",
"office",
"office2",
"oldtown",
"seasidetown",
"seasonsforest",
"seasonsforest_winter",
"soulcity",
"westerndesert",
]
DOWNLOAD_CMD = "wget -O"
UNZIP_CMD = "unzip -o -d"
# The data doesn't have time information of any sort,
# so pick something that makes the videos run at a pleasant speed.
FPS = 10
def runCmd(cmd):
print("Running command:", cmd)
os.system(cmd)
def convertVideo(files, output):
# Use `-crf 0` for lossless compression.
subprocess.run(["ffmpeg",
"-y",
"-r", str(FPS),
"-f", "image2",
"-pattern_type", "glob", "-i", files,
"-c:v", "libx264",
"-preset", "ultrafast",
# "-preset", "veryslow",
"-crf", "0",
"-vf", "format=yuv420p",
"-an",
output])
def getExtractedPath(dataset, level):
# For some reason `dataset` is duplicated in the zip hierarchy.
return "{}/{}/{}/{}".format(RAW, dataset, dataset, level)
def download(dataset, level):
extractedPath = getExtractedPath(dataset, level)
if os.path.isdir(extractedPath):
print(extractedPath, "already exists, skipping.")
return
outPath = RAW
Path(outPath).mkdir(parents=True, exist_ok=True)
for d in ["image_left", "image_right"]:
url = "{}/{}/{}/{}.zip".format(RELEASE, dataset, level, d)
z = "{}/{}.zip".format(outPath, d)
if args.azcopy:
cmd = "azcopy copy {} {}".format(url, z)
runCmd(cmd)
else:
cmd = "{} {} {}".format(DOWNLOAD_CMD, z, url)
runCmd(cmd)
cmd = "{} {} {}".format(UNZIP_CMD, outPath, z)
runCmd(cmd)
src = "{}/{}.zip".format(outPath, d)
if BACKUP_ZIPS:
name = "{}-{}-{}".format(dataset, level, d)
dst = "{}/{}.zip".format(outPath, name)
os.rename(src, dst)
else:
os.remove(src)
def convert_sequence(fullPath, sequence, dataset, level):
datasetOut = "{}/{}-{}".format(dataset, level.lower(), sequence)
outPath = "{}/{}".format(OUT, datasetOut)
Path(outPath).mkdir(parents=True, exist_ok=True)
convertVideo("{}/image_left/*.png".format(fullPath), "{}/data.mp4".format(outPath))
convertVideo("{}/image_right/*.png".format(fullPath), "{}/data2.mp4".format(outPath))
output = []
number = 0
time = 0.0
dt = 1.0 / FPS
p0 = [None, None, None]
# We define ground truth as pose of the left camera.
with open("{}/pose_left.txt".format(fullPath)) as f:
# format: tx ty tz qx qy qz qw
csvRows = csv.reader(f, delimiter=' ')
rows = []
for row in csvRows:
rows.append(row)
# The general coordinate transformation has the form
# M -> W*M*L, where M = M(p, q)
# The W and L matrices were found by experimentation starting with transforms
# in `ned2cam()` function in the TartanAir repository's scripts.
W = np.array([
[0,1,0,0],
[1,0,0,0],
[0,0,-1,0],
[0,0,0,1]], dtype=np.float32)
L = np.array([
[0,0,1,0],
[1,0,0,0],
[0,1,0,0],
[0,0,0,1]], dtype=np.float32)
fixedRows = fixTartan(W, L, rows)
for row in fixedRows:
if not p0[0]:
p0 = [row[0], row[1], row[2]]
p = [row[0] - p0[0], row[1] - p0[1], row[2] - p0[2]]
q = [row[6], row[3], row[4], row[5]] # wxyz
gt = {
"groundTruth": {
"position": {
"x": p[0], "y": p[1], "z": p[2]
},
"orientation": {
"w": q[0], "x": q[1], "y": q[2], "z": q[3]
}
},
"time": time
}
frame = {
"number": number,
"time": time,
"frames": [
{"cameraInd": 0, "time": time},
{"cameraInd": 1, "time": time},
],
}
output.append(gt)
output.append(frame)
time += dt
number += 1
# Write JSONL
with open(outPath + "/data.jsonl", "w") as f:
for obj in output:
f.write(json.dumps(obj, separators=(',', ':')))
f.write("\n")
# Write parameters
with open(outPath + "/parameters.txt", "w") as f:
# <https://github.com/castacks/tartanair_tools/blob/master/data_type.md>
fx = 320
fy = 320
cx = 320
cy = 240
f.write("focalLengthX {}; focalLengthY {};\nprincipalPointX {}; principalPointY {};\n".format(fx, fy, cx, cy))
f.write("secondFocalLengthX {}; secondFocalLengthY {};\nsecondPrincipalPointX {}; secondPrincipalPointY {};\n".format(fx, fy, cx, cy))
f.write("rot 0;\n")
# Define the (non-existent) IMU to have the same pose as the left camera.
for cam in [0, 1]:
columnMajor = []
for i in [0, 1, 2, 3]:
for j in [0, 1, 2, 3]:
if cam == 1 and i == 3 and j == 0:
num = "-0.25" # baseline
elif i == j:
num = "1"
else:
num = "0"
columnMajor.append(num)
f.write("{} {};\n".format(
"imuToCameraMatrix" if cam == 0 else "secondImuToCameraMatrix",
",".join(columnMajor)))
def convert(dataset, level):
extractedPath = getExtractedPath(dataset, level)
folders = [ (f.path, f.name) for f in os.scandir(extractedPath) if f.is_dir() ]
folders.sort()
for fullPath, sequence in folders:
convert_sequence(fullPath, sequence, dataset, level)
def main():
for dataset in DATASETS:
for l in LEVELS:
download(dataset, l)
convert(dataset, l)
if __name__ == "__main__":
main()
| 31.767241
| 142
| 0.53867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,655
| 0.360244
|
e62c2f0e0a2aa9f2cc633c9f3f0f670db80af86f
| 24,534
|
py
|
Python
|
test/test_tsdb.py
|
eneelo/qats
|
9280e2487bde97874cc8857b2780ac830323f363
|
[
"MIT"
] | null | null | null |
test/test_tsdb.py
|
eneelo/qats
|
9280e2487bde97874cc8857b2780ac830323f363
|
[
"MIT"
] | null | null | null |
test/test_tsdb.py
|
eneelo/qats
|
9280e2487bde97874cc8857b2780ac830323f363
|
[
"MIT"
] | 1
|
2020-10-29T13:40:47.000Z
|
2020-10-29T13:40:47.000Z
|
# -*- coding: utf-8 -*-
"""
Module for testing TsDB class
"""
from qats import TimeSeries, TsDB
import unittest
import os
import numpy as np
import sys
# todo: add tests for listing subset(s) based on specifying parameter `names` (with and wo param. `keys`)
# todo: add test for getm() with fullkey=False (similar to test_get_many_correct_key, but with shorter key)
class TestTsDB(unittest.TestCase):
def setUp(self):
self.db = TsDB()
# the data directory used in the test relative to this module
# necessary to do it like this for the tests to work both locally and in virtual env for conda build
self.data_directory = os.path.join(os.path.dirname(__file__), '..', 'data')
def test_exception_load_numeric(self):
try:
self.db.load(223334) # numeric values should throw an exception
except TypeError:
pass
else:
self.fail("Did not throw exception on numeric file name")
def test_exception_load_dict(self):
try:
self.db.load({}) # dictionary should throw an exception
except TypeError:
pass
else:
self.fail("Did not throw exception on dictionary of file names.")
def test_exception_load_directory(self):
try:
self.db.load(self.data_directory)
except FileExistsError:
pass
else:
self.fail("Did not throw exception when trying to load a directory.")
def test_exception_load_nonexistingfile(self):
try:
self.db.load(os.path.join(self.data_directory, 'donotexist.ts'))
except FileExistsError:
pass
else:
self.fail("Did not throw exception when trying to load a non-existing file.")
def test_exception_load_unsupportedfile(self):
try:
self.db.load(os.path.join(self.data_directory, 'unsupportedfile.out'))
except NotImplementedError:
pass
else:
self.fail("Did not throw exception when trying to load a file type which is not yet supported.")
def test_list_all(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
k = self.db.list(display=False)
self.assertEqual(14, len(k), "Deviating number of listed keys = %d" % len(k))
def test_list_subset(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
k = self.db.list(names="Mooring line*", display=False)
self.assertEqual(8, len(k), "Deviating number of listed keys = %d" % len(k))
def test_list_subset_misc_criteria(self):
for tsfile in ('mooring.ts', 'simo_p.ts'):
self.db.load(os.path.join(self.data_directory, tsfile))
# test 1
k = self.db.list(names="Tension*", display=False)
self.assertEqual(10, len(k), "Deviating number of listed keys = %d" % len(k))
# test 2
k = self.db.list(names="simo_p.ts*line*", display=False)
self.assertEqual(2, len(k), "Deviating number of listed keys = %d" % len(k))
def test_list_subset_keep_specified_order(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
names_reversed = list(reversed([os.path.basename(k) for k in self.db.register_keys]))
namelist = [os.path.basename(_) for _ in self.db.list(names=names_reversed)]
self.assertEqual(names_reversed, namelist, "Failed to keep specified order")
def test_list_subset_special_characters(self):
self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
# should return exactly one key
self.assertEqual(1, len(self.db.list(names="RW1[m]")), "TsDB.list() returned wrong number of keys")
def test_list_subset_special_characters_2(self):
self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
# should return exactly one key
self.assertEqual(1, len(self.db.list(names="Acc-X[m/s^2]")), "TsDB.list() returned wrong number of keys")
def test_list_prepended_wildcard_1_3(self):
"""
Test that wildcard is prepended in a reasonable manner. Test cases:
1. Specifying 'XG' should not return 'vel_XG'
2. Specifying '*XG' should return both 'XG' and 'vel_XG'
3. Specifying full key should be possible
4. If multiple files are loaded, specifying 'XG' should return all occurrences (across files)
The first three are tested here, while the fourth is tested in `test_list_prepended_wildcard_4()`
"""
path = os.path.join(self.data_directory, 'simo_r1.ts')
db = self.db
db.load(path)
k1 = db.list(names="XG") # should return 1 key
k2 = db.list(names="*XG") # should return 2 keys
k3 = db.list(names=os.path.abspath(os.path.join(path, "XG"))) # should return 1 key
# test of the cases described in docstring
self.assertEqual(len(k1), 1, "TsDB.list() failed to return correct number of keys for names='XG'")
self.assertEqual(len(k2), 2, "TsDB.list() failed to return correct number of keys for names='*XG'")
self.assertEqual(len(k3), 1, "TsDB.list() failed to return correct number of keys when specifying full path")
def test_list_prepended_wildcard_4(self):
"""
See description of `test_list_prepended_wildcard_1_3()`
"""
db = self.db
db.load(os.path.join(self.data_directory, 'simo_r1.ts'))
db.load(os.path.join(self.data_directory, 'simo_r2.ts'))
k1 = db.list(names="XG") # should return 2 keys
k2 = db.list(names="*XG") # should return 4 keys
# test of the cases described in docstring
self.assertEqual(len(k1), 2, "TsDB.list() failed to return correct number of keys for names='XG'")
self.assertEqual(len(k2), 4, "TsDB.list() failed to return correct number of keys for names='*XG'")
def test_clear_all(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
self.db.clear(display=False)
k = self.db.list(display=False)
self.assertEqual([], k, "Did not clear all registered keys.")
def test_clear_subset(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
self.db.clear(names="*Mooring line*", display=False)
k = self.db.list(display=False)
self.assertEqual(6, len(k), "Did not clear subset of registered keys correctly. %d keys remaining" % len(k))
def test_getda_correct_key(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
rk = self.db.list(names="Heave", display=False)
container = self.db.getda(names="Heave", fullkey=True)
self.assertEqual(rk, list(container.keys()), "db list method and get_many method returns different keys.")
def test_getda_correct_number_of_arrays(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
rk = self.db.list(names="Heave", display=False) # should be only 1 key returned in this case
container = self.db.getda(names="Heave", fullkey=True)
self.assertEqual(2, len(container[rk[0]]), "Got more than 2 arrays (time and data) in return from get_many().")
def test_gets_none(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
container = self.db.getda(names=[])
n = len(container)
self.assertEqual(0, n, "Should have received empty container (OrderedDict) from getda()")
def test_getl_correct_key(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
rk = self.db.list(names="Heave", display=False, relative=True)
tslist = self.db.getl(names="Heave")
self.assertEqual(rk, [ts.name for ts in tslist], "db list method and getl returns different keys.")
def test_getm_correct_key(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
rk = self.db.list(names="Heave", display=False)
container = self.db.getm(names="Heave", fullkey=True)
self.assertEqual(rk, list(container.keys()), "db list method and getm method returns different keys.")
def test_getm_correct_key_by_ind(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
rk = self.db.list(names="Heave", display=False)
container = self.db.getm(ind=2, fullkey=True)
self.assertEqual(rk, list(container.keys()), "db list method and getm method returns different keys.")
def test_getd_equals_getm(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
container1 = self.db.getm(names="*", fullkey=True)
container2 = self.db.getd(names="*", fullkey=True)
for name, ts in container1.items():
self.assertTrue(name in container2 and container2[name] is container1[name],
"container returned by getd is not identical to container returned by getm")
def test_geta(self):
tsfile = os.path.join(self.data_directory, 'simo_p.ts')
self.db.load(tsfile)
tsname = "Tension_2_qs"
keys = self.db.list(names=tsname, display=False)
_, data1 = self.db.geta(name=keys[0])
# test 1: geta() when ts is already loaded
_, data2 = self.db.geta(name=tsname)
self.assertTrue(np.array_equal(data1, data2), "Did not get correct data time series using get() "
"(ts pre-loaded)")
# test 2: geta() when ts is not already loaded
db2 = TsDB()
db2.load(tsfile)
_, data3 = db2.geta(name=tsname)
self.assertTrue(np.array_equal(data1, data3), "Did not get correct data time series using get() "
"(ts not pre-loaded)")
def test_get_by_name(self):
tsfile = os.path.join(self.data_directory, 'simo_p.ts')
self.db.load(tsfile)
tsname = "Tension_2_qs"
keys = self.db.list(names=tsname, display=False)
key = keys[0]
ts1 = self.db.getm(names=key, fullkey=True)[key]
# test 1: get_ts() when ts is already loaded
ts2 = self.db.get(name=tsname)
self.assertIs(ts1, ts2, "Did not get correct TimeSeries using get_ts()"
" (ts pre-loaded)")
# test 2: get_ts() when ts is not already loaded
db2 = TsDB.fromfile(tsfile)
ts3 = db2.get(name=tsname)
self.assertTrue(np.array_equal(ts1.x, ts3.x), "Did not get correct TimeSeries using get_ts()"
" (ts not pre-loaded)")
def test_get_by_index(self):
tsfile = os.path.join(self.data_directory, 'simo_p.ts')
self.db.load(tsfile)
tsname = "Tension_2_qs"
key = self.db.list(names=tsname, display=False)[0]
ts1 = self.db.get(name=tsname)
ind = self.db.register_keys.index(key)
# test 1: get_ts() using index when ts is already loaded
ts2 = self.db.get(ind=ind)
self.assertIs(ts1, ts2, "Did not get correct TimeSeries using get_ts() and specifying index"
" (ts pre-loaded)")
# test 2: get_ts() using index when ts is not already loaded
db2 = TsDB.fromfile(tsfile)
ts3 = db2.get(ind=ind)
self.assertTrue(np.array_equal(ts1.x, ts3.x), "Did not get correct TimeSeries using get_ts() and specifying"
" index (ts not pre-loaded)")
def test_get_by_index_0(self):
""" Should not fail when index 0 is specified """
tsfile = os.path.join(self.data_directory, 'simo_p.ts')
self.db.load(tsfile)
_ = self.db.get(ind=0)
# should not fail
def test_get_exceptions(self):
self.db.load(os.path.join(self.data_directory, 'simo_p.ts'))
# test 1: no match
try:
_ = self.db.geta(name="nonexisting_key")
except LookupError:
pass
else:
self.fail("Did not raise LookupError when no match was found")
# test 2: more than one match
try:
_ = self.db.geta(name="Tension*")
except ValueError:
pass
else:
self.fail("Did not raise ValueError when multiple matches were found")
def test_get_correct_number_of_timesteps(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
rk = self.db.list(names="Heave", display=False) # should be only 1 key returned in this case
container = self.db.getda(names="Heave", fullkey=True)
self.assertEqual(65536, len(container[rk[0]][0]), "Deviating number of time steps.")
def test_add_raises_keyerror_on_nonunique_key(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
container = self.db.getm(names="Surge", fullkey=True)
for k, v in container.items():
try:
self.db.add(v)
except KeyError:
pass
else:
self.fail("Did not raise KeyError when trying to add time series with non-unique name to db.")
def test_add_does_not_raise_error(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
ts = TimeSeries("quiteuniquekeyiguess", np.arange(0., 100., 0.01), np.sin(np.arange(0., 100., 0.01)))
self.db.add(ts)
# should not raise errors
def test_rename(self):
tsfile = os.path.abspath(os.path.join(self.data_directory, 'simo_p.ts'))
self.db.load(tsfile)
oldname = "Tension_2_qs"
newname = "mooringline"
#
oldkey = os.path.join(tsfile, oldname)
newkey = os.path.join(tsfile, newname)
# get data before rename()
_, data1 = self.db.geta(name=oldname)
parent1 = self.db.register_parent[oldkey]
index1 = self.db.register_indices[oldkey]
# rename
self.db.rename(oldname, newname)
# get data after rename()
_, data2 = self.db.geta(name=newname)
parent2 = self.db.register_parent[newkey]
index2 = self.db.register_indices[newkey]
# checks
self.assertTrue(newkey in self.db.register_keys, "register_keys not updated by rename()")
self.assertEqual(parent1, parent2, "register_parent not correctly updated")
self.assertEqual(index1, index2, "register_indices not correctly updated")
self.assertTrue(np.array_equal(data1, data2), "register not correctly updated")
def test_rename_execption(self):
tsfile = os.path.join(self.data_directory, 'simo_p.ts')
self.db.load(tsfile)
oldname = "Tension_2_qs"
newname = "Tension_3_qs"
try:
self.db.rename(oldname, newname)
except ValueError:
pass
else:
self.fail("Did not throw ValueError when attempting renaming to non-unique name.")
def test_maxima_minima(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
container = self.db.getm(names="Surge")
for k, ts in container.items():
_ = ts.maxima()
_, _ = ts.maxima(rettime=True)
_ = ts.minima()
_, _ = ts.minima(rettime=True)
# currently only testing that no error are thrown
def test_types_in_container_from_get_many(self):
"""
Test correct types
"""
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
container = self.db.getda(names="Surge")
for key, ts in container.items():
self.assertIsInstance(key, str, "Key should be type string.")
self.assertIsInstance(ts, tuple, "Time series container should be type tuple.")
self.assertIsInstance(ts[0], np.ndarray, "First item of time series container should be type numpy array.")
self.assertIsInstance(ts[1], np.ndarray, "Second item of time series container should be type numpy array.")
def test_types_in_container_from_get_many_ts(self):
"""
Test correct types
"""
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
container = self.db.getm(names="Surge")
for key, ts in container.items():
self.assertIsInstance(key, str, "Key should be type string.")
self.assertIsInstance(ts, TimeSeries, "Time series container should be type TimeSeries.")
self.assertIsInstance(ts.t, np.ndarray, "Attribute t of time series should be type numpy array.")
self.assertIsInstance(ts.x, np.ndarray, "Attribute x of time series should be type numpy array.")
def test_copy(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
name = "Surge"
ts1 = self.db.get(name=name)
db2 = self.db.copy()
ts2 = db2.get(name=name)
self.assertIsNot(ts1, ts2, "Copy with shallow=False kept binding on ts to source database")
self.assertTrue(np.array_equal(ts1.x, ts2.x), "Copy did returned TimeSeries with different value array")
def test_copy_shallow(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
name = "Surge"
ts1 = self.db.get(name=name)
db2 = self.db.copy(shallow=True)
ts2 = db2.get(name=name)
self.assertIs(ts1, ts2, "Copy with shallow=True did not return source instance")
def test_update(self):
pass
# todo: update db2 name and ts names
'''
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
n_before = self.db.n
db2 = TsDB()
db2.load(os.path.join(self.data_directory, ' ... '))
self.db.update(db2, names="*")
n_after = self.db.n
ts1 = self.db.get_ts(name="")
ts2 = db2.get_ts(name="")
self.assertEqual(n_before + 3, n_after, "Did not update with correct number of keys")
self.assertIsNot(ts1, ts2, "Update with shallow=False kept binding on ts to source database")
'''
def test_update_shallow(self):
pass
# todo: update db2 name and ts names
'''
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
n_before = self.db.n
db2 = TsDB()
db2.load(os.path.join(self.data_directory, '....ts'))
self.db.update(db2, names="JACKET*motion", shallow=True)
n_after = self.db.n
ts1 = self.db.get_ts(name="...")
ts2 = db2.get_ts(name="...")
self.assertEqual(n_before + 3, n_after, "Did not update with correct number of keys")
self.assertIs(ts1, ts2, "Update with shallow=True did not return source instance")
'''
def test_is_common_time_false(self):
pass
# todo: update db2 name and ts names
'''
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
self.db.load(os.path.join(self.data_directory, '....ts'))
names = "Surge", "..."
is_common = self.db.is_common_time(names=names)
self.assertFalse(is_common, "'is_common_time()' did not report False")
'''
def test_is_common_time_true(self):
pass
# todo: update db2 name and ts names
'''
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
self.db.load(os.path.join(self.data_directory, '....ts'))
names = "Surge", "Sway"
is_common = self.db.is_common_time(names=names)
self.assertTrue(is_common, "'is_common_time()' did not report True")
'''
def test_export_uncommon_timearray_error(self):
pass
# todo: update db2 name and ts names
'''
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
self.db.load(os.path.join(self.data_directory, '....ts'))
names = "Surge", "..."
keys = self.db.list(names=names, display=False)
fnout = os.path.join(self.data_directory, '_test_export.ts')
try:
self.db.export(fnout, keys=keys)
except ValueError:
pass
else:
# clean exported files (in the event is was exported though it should not)
os.remove(fnout)
os.remove(os.path.splitext(fnout)[0] + ".key")
self.fail("Did not throw exception when exporting un-common time arrays to .ts")
'''
def test_export(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
names = "Surge", "Sway"
keys = self.db.list(names=names, display=False)
fnout = os.path.join(self.data_directory, '_test_export.ts')
try:
# route screen dump from export to null
was_stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
# export, should not raise errors
self.db.export(fnout, names=keys)
finally:
# reset sys.stdout
sys.stdout = was_stdout
f.close()
# clean (remove exported files)
try:
os.remove(fnout)
os.remove(os.path.splitext(fnout)[0] + ".key")
except FileNotFoundError:
pass
# should not raise errors
def test_export_reload(self):
self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
name = "Sway"
fnout = os.path.join(self.data_directory, '_test_export.ts')
try:
# route screen dump from export to null
was_stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
# export, should not raise errors
self.db.export(fnout, names=name)
finally:
# reset sys.stdout
sys.stdout = was_stdout
f.close()
# reload
db2 = TsDB()
db2.load(fnout)
# compare ts
ts1 = self.db.get(name=name)
ts2 = db2.get(name=name)
# clean exported files
try:
os.remove(fnout)
os.remove(os.path.splitext(fnout)[0] + ".key")
except FileNotFoundError:
pass
# check arrays
self.assertTrue(np.array_equal(ts1.x, ts2.x), "Export/reload did not yield same arrays")
def test_export_ascii(self):
self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
names = "WaveC[m]", "Wave-S[m]", "Surge[m]"
fnout = os.path.join(self.data_directory, '_test_export.dat')
try:
# route screen dump from export to null
was_stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
# export, should not raise errors
self.db.export(fnout, names=names, verbose=False)
finally:
# clean exported files and route screen dump back
os.remove(fnout)
sys.stdout = was_stdout
f.close()
# should not raise errors
def test_export_reload_ascii(self):
self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
name = "Wave-S[m]"
fnout = os.path.join(self.data_directory, '_test_export.dat')
try:
# route screen dump from export to null
was_stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
# export, should not raise errors
self.db.export(fnout, names=name)
finally:
sys.stdout = was_stdout
f.close()
# reload
db2 = TsDB()
db2.load(fnout)
# compare ts
ts1 = self.db.get(name=name)
ts2 = db2.get(name=name)
# clean exported files
os.remove(fnout)
# check arrays
np.testing.assert_array_almost_equal(ts1.x, ts2.x, 6, "Export/reload did not yield same arrays")
if __name__ == '__main__':
unittest.main()
| 44.36528
| 121
| 0.59815
| 24,094
| 0.982066
| 0
| 0
| 0
| 0
| 0
| 0
| 9,435
| 0.384568
|
e62dd8453d35731f8df986056643c1efd1e8ea57
| 1,257
|
py
|
Python
|
py/py_0105_special_subset_sums_testing.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0105_special_subset_sums_testing.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0105_special_subset_sums_testing.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
# Solution of;
# Project Euler Problem 105: Special subset sums: testing
# https://projecteuler.net/problem=105
#
# Let S(A) represent the sum of elements in set A of size n.
# We shall call it a special sum set if for any two non-empty disjoint
# subsets, B and C, the following properties are true:
#
# S(B) ≠ S(C); that is, sums of subsets cannot be equal.
# If B contains more elements than C then S(B) > S(C).
# For example, {81, 88, 75, 42, 87, 84, 86, 65} is not a special sum set
# because 65 + 87 + 88 = 75 + 81 + 84, whereas
# {157, 150, 164, 119, 79, 159, 161, 139, 158} satisfies both rules
# for all possible subset pair combinations and S(A) = 1286.
#
# Using sets.txt (right click and "Save Link/Target As..."), a 4K text file
# with one-hundred sets containing seven to twelve elements
# (the two examples given above are the first two sets in the file),
# identify all the special sum sets, A1, A2, ..., Ak,
# and find the value of S(A1) + S(A2) + ... + S(Ak).
# NOTE: This problem is related to Problem 103 and Problem 106.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 102
timed.caller(dummy, n, i, prob_id)
| 34.916667
| 76
| 0.67144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,095
| 0.869738
|
e62e5420e9280590cadcb41f39f8b617ff3cad05
| 3,673
|
py
|
Python
|
pyserver/item/attc/annotation.py
|
lbouma/Cyclopath
|
d09d927a1e6f9e07924007fd39e8e807cd9c0f8c
|
[
"Apache-2.0"
] | 15
|
2015-05-06T05:11:48.000Z
|
2021-12-03T14:56:58.000Z
|
pyserver/item/attc/annotation.py
|
landonb/Cyclopath
|
d09d927a1e6f9e07924007fd39e8e807cd9c0f8c
|
[
"Apache-2.0"
] | null | null | null |
pyserver/item/attc/annotation.py
|
landonb/Cyclopath
|
d09d927a1e6f9e07924007fd39e8e807cd9c0f8c
|
[
"Apache-2.0"
] | 8
|
2015-05-06T05:11:36.000Z
|
2020-11-04T05:11:22.000Z
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import conf
import g
from item import attachment
from item import item_base
from item import item_versioned
from item.util.item_type import Item_Type
from util_.streetaddress import ccp_stop_words
log = g.log.getLogger('annotation')
class One(attachment.One):
item_type_id = Item_Type.ANNOTATION
item_type_table = 'annotation'
item_gwis_abbrev = 'anno'
child_item_types = None
local_defns = [
# py/psql name, deft, send?, pkey?, pytyp, reqv
('comments', None, True, False, str, 2),
]
attr_defns = attachment.One.attr_defns + local_defns
psql_defns = attachment.One.psql_defns + local_defns
gwis_defns = item_base.One.attr_defns_reduce_for_gwis(attr_defns)
__slots__ = [] + [attr_defn[0] for attr_defn in local_defns]
# *** Constructor
def __init__(self, qb=None, row=None, req=None, copy_from=None):
g.assurt(copy_from is None) # Not supported for this class.
attachment.One.__init__(self, qb, row, req, copy_from)
# *** Saving to the Database
#
def save_core(self, qb):
attachment.One.save_core(self, qb)
# Save to the 'annotation' table.
self.save_insert(qb, One.item_type_table, One.psql_defns)
# ***
class Many(attachment.Many):
one_class = One
__slots__ = ()
sql_clauses_cols_all = attachment.Many.sql_clauses_cols_all.clone()
# FIXME: Maybe call a fcn. instead, like opt/argparse? Or does that
# just complicate things more?
#sqlc_all.inner.select_list("annot.comments")
sql_clauses_cols_all.inner.shared += (
"""
, annot.comments
"""
)
sql_clauses_cols_all.inner.join += (
"""
JOIN annotation AS annot
ON (gia.item_id = annot.system_id)
"""
)
sql_clauses_cols_all.outer.shared += (
"""
, group_item.comments
"""
)
# *** Constructor
def __init__(self):
attachment.Many.__init__(self)
# *** Query Builder routines
# FIXME [aa] Only get gf's whose username = '' or = [current_user]
# FIXME [aa] Security leak -- private annotations being sent to client
# 2012.04.02: Is this really still true??
# FIXME Send where feat_type != and append feat_type == region_watched
# FIXME [aa] Bug: Deleted and old version attachments being sent for no-diff
# Is this a regression, or have annots always been fetches this way?
# SELECT DISTINCT
# lhs_stack_id AS id,
# version,
# comments
# FROM annotation_geo AS ag
# WHERE (ST_Intersects(ag.geometry,
# ST_SetSRID('BOX(479932.800000 4978592.800000,
# 482124.800000 4981408.800000)'::box2d,
# 26915)))
#
def sql_apply_query_filters(self, qb, where_clause="", conjunction=""):
g.assurt((not where_clause) and (not conjunction))
g.assurt((not conjunction) or (conjunction == "AND"))
where_clause = attachment.Many.sql_apply_query_filters(
self, qb, where_clause, conjunction)
return where_clause
#
def sql_apply_query_filter_by_text(self, qb, table_cols, stop_words,
use_outer=False):
table_cols.insert(0, 'annot.comments')
stop_words = ccp_stop_words.Addy_Stop_Words__Annotation
return attachment.Many.sql_apply_query_filter_by_text(
self, qb, table_cols, stop_words, use_outer)
# ***
# ***
| 30.355372
| 79
| 0.632181
| 3,314
| 0.90226
| 0
| 0
| 0
| 0
| 0
| 0
| 1,522
| 0.414375
|
e62e88ca85209412b46b34f3a3135f7a89043c82
| 183
|
py
|
Python
|
examples/sandbox/sandbox/__main__.py
|
salt-die/nurses_2
|
29b76c34b9a28bf7c115998f4e81979966c82df0
|
[
"MIT"
] | 171
|
2021-06-23T15:29:15.000Z
|
2022-03-25T18:53:10.000Z
|
examples/sandbox/sandbox/__main__.py
|
salt-die/nurses_2
|
29b76c34b9a28bf7c115998f4e81979966c82df0
|
[
"MIT"
] | 1
|
2022-01-07T05:08:35.000Z
|
2022-01-10T04:53:57.000Z
|
examples/sandbox/sandbox/__main__.py
|
salt-die/nurses_2
|
29b76c34b9a28bf7c115998f4e81979966c82df0
|
[
"MIT"
] | 3
|
2021-10-01T09:12:15.000Z
|
2022-01-14T21:31:11.000Z
|
from nurses_2.app import App
from .sandbox import Sandbox
class SandboxApp(App):
async def on_start(self):
self.add_widget(Sandbox(size=(31, 100)))
SandboxApp().run()
| 16.636364
| 48
| 0.704918
| 101
| 0.551913
| 0
| 0
| 0
| 0
| 74
| 0.404372
| 0
| 0
|
e630f7f1230425fb80852a1c185d9c2e86b9dabb
| 4,985
|
py
|
Python
|
midas2/common/bowtie2.py
|
czbiohub/microbiome-igg
|
fd4bc62bee15e53587a947ca32bf3c5b9e8022e6
|
[
"MIT"
] | null | null | null |
midas2/common/bowtie2.py
|
czbiohub/microbiome-igg
|
fd4bc62bee15e53587a947ca32bf3c5b9e8022e6
|
[
"MIT"
] | 6
|
2022-03-14T19:37:52.000Z
|
2022-03-14T19:51:47.000Z
|
midas2/common/bowtie2.py
|
czbiohub/microbiome-igg
|
fd4bc62bee15e53587a947ca32bf3c5b9e8022e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import numpy as np
from midas2.common.utils import tsprint, command, split, OutputStream
def bowtie2_index_exists(bt2_db_dir, bt2_db_name):
bt2_db_suffixes = ["1.bt2", "2.bt2", "3.bt2", "4.bt2", "rev.1.bt2", "rev.2.bt2"]
if all(os.path.exists(f"{bt2_db_dir}/{bt2_db_name}.{ext}") for ext in bt2_db_suffixes):
tsprint(f"Use existing Bowtie2 indexes {bt2_db_dir}/{bt2_db_name}")
return True
bt2_db_large_suffixes = ["1.bt2l", "2.bt2l", "3.bt2l", "4.bt2l", "rev.1.bt2l", "rev.2.bt2l"]
if all(os.path.exists(f"{bt2_db_dir}/{bt2_db_name}.{ext}") for ext in bt2_db_large_suffixes):
tsprint(f"Use existing large Bowtie2 indexes {bt2_db_dir}/{bt2_db_name}")
return True
return False
def build_bowtie2_db(bt2_db_dir, bt2_db_name, downloaded_files, num_cores):
""" Build Bowtie2 database for the collections of fasta files """
bt2_db_prefix = f"{bt2_db_dir}/{bt2_db_name}"
if not bowtie2_index_exists(bt2_db_dir, bt2_db_name):
# Primarily for build_bowtie2db.py
if not os.path.exists(bt2_db_dir):
tsprint(f"Create bt2_db_dir: {bt2_db_dir}")
command(f"mkdir -p {bt2_db_dir}")
# Write the species_id to file, that used to build the bowtie2 indexes
with OutputStream(f"{bt2_db_prefix}.species") as stream:
stream.write("\n".join(map(str, downloaded_files.keys())))
command(f"rm -f {bt2_db_dir}/{bt2_db_name}.fa", quiet=False)
command(f"touch {bt2_db_dir}/{bt2_db_name}.fa")
for files in split(downloaded_files.values(), 20): # keep "cat" commands short
command("cat " + " ".join(files) + f" >> {bt2_db_dir}/{bt2_db_name}.fa")
try:
command(f"bowtie2-build --threads {num_cores} {bt2_db_prefix}.fa {bt2_db_prefix} > {bt2_db_dir}/bt2-db-build-{bt2_db_name}.log", quiet=False)
except:
tsprint(f"Bowtie2 index {bt2_db_prefix} run into error")
command(f"rm -f {bt2_db_prefix}.1.bt2")
raise
return bt2_db_prefix
def bowtie2_align(bt2_db_dir, bt2_db_name, bamfile_path, args):
""" Use Bowtie2 to map reads to prebuilt bowtie2 database """
bt2_db_prefix = f"{bt2_db_dir}/{bt2_db_name}"
if os.path.exists(bamfile_path):
tsprint(f"Use existing bamfile {bamfile_path}")
return
# Construct bowtie2 align input arguments
max_reads = f"-u {args.max_reads}" if args.max_reads else ""
aln_mode = "local" if args.aln_mode == "local" else "end-to-end"
aln_speed = args.aln_speed if aln_mode == "end-to-end" else args.aln_speed + "-local"
r2 = ""
max_fraglen = f"-X {args.fragment_length}" if args.r2 else ""
if args.r2:
r1 = f"-1 {args.r1}"
r2 = f"-2 {args.r2}"
elif args.aln_interleaved:
r1 = f"--interleaved {args.r1}"
else:
r1 = f"-U {args.r1}"
try:
bt2_command = f"bowtie2 --no-unal -x {bt2_db_prefix} {max_fraglen} {max_reads} --{aln_mode} --{aln_speed} --threads {args.num_cores} -q {r1} {r2}"
command(f"set -o pipefail; {bt2_command} | \
samtools view --threads {args.num_cores} -b - | \
samtools sort --threads {args.num_cores} -o {bamfile_path}", quiet=False)
except:
tsprint(f"Bowtie2 align to {bamfile_path} run into error")
command(f"rm -f {bamfile_path}")
raise
def samtools_sort(bamfile_path, sorted_bamfile, debug, num_cores):
if debug and os.path.exists(sorted_bamfile):
tsprint(f"Skipping samtools sort in debug mode as temporary data exists: {sorted_bamfile}")
return
try:
command(f"samtools sort -@ {num_cores} -o {sorted_bamfile} {bamfile_path}", quiet=False) #-m 2G
except:
tsprint(f"Samtools sort {bamfile_path} run into error")
command(f"rm -f {sorted_bamfile}")
raise
def samtools_index(bamfile_path, debug, num_cores):
if debug and os.path.exists(f"{bamfile_path}.bai"):
tsprint(f"Skipping samtools index in debug mode as temporary data exists: {bamfile_path}.bai")
return
try:
command(f"samtools index -@ {num_cores} {bamfile_path}", quiet=False)
except:
tsprint(f"Samtools index {bamfile_path} run into error")
command(f"rm -f {bamfile_path}.bai")
raise
def _keep_read(aln, aln_mapid, aln_readq, aln_mapq, aln_cov):
""" Check the quality of one alignnment from BAM file """
if aln.is_secondary:
return False
align_len = len(aln.query_alignment_sequence)
query_len = aln.query_length
# min pid
if 100 * (align_len - dict(aln.tags)['NM']) / float(align_len) < aln_mapid:
return False
# min read quality
if np.mean(aln.query_qualities) < aln_readq:
return False
# min map quality
if aln.mapping_quality < aln_mapq:
return False
# min aln cov
if align_len / float(query_len) < aln_cov:
return False
return True
| 38.643411
| 154
| 0.649549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,244
| 0.45015
|
e6314fc5be266fa2fd430fad718dac793df709ff
| 3,541
|
py
|
Python
|
src/race/src/my_lane_detection/findpoint.py
|
young43/ISCC_2020
|
2a7187410bceca901bd87b753a91fd35b73ca036
|
[
"MIT"
] | 3
|
2020-11-13T04:59:27.000Z
|
2021-04-02T06:36:03.000Z
|
src/race/src/my_lane_detection/findpoint.py
|
yongbeomkwak/ISCC_2021
|
7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015
|
[
"MIT"
] | null | null | null |
src/race/src/my_lane_detection/findpoint.py
|
yongbeomkwak/ISCC_2021
|
7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015
|
[
"MIT"
] | 5
|
2020-09-13T09:06:16.000Z
|
2021-06-19T02:31:23.000Z
|
import numpy as np
import cv2
class FindPoint:
def __init__(self,img):
self.window_height = 10
self.nwindows = 15
self.margin = 20
self.minpix = 70
self.center = img.shape[1]/2
def findpoint(self, img):
out_img = np.dstack((img, img, img))
h, w = img.shape
good_left_inds = []
good_right_inds = []
nonzero = img.nonzero()
nonzerox = nonzero[1]
nonzeroy = nonzero[0]
tmp_lx = 0
tmp_rx = 640
for i in range(1, self.center//10):
win_high = 390
win_low = 380
l_x_max = self.center - (i * 10 - 10)
l_x_min = self.center - (i * 10 + 10)
good_left_inds = \
((nonzerox >= l_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (nonzerox <= l_x_max)).nonzero()[
0]
if len(good_left_inds) > self.minpix:
tmp_lx = np.int(np.mean(nonzerox[good_left_inds]))
cv2.rectangle(out_img, (l_x_max, 380), (l_x_min, 390), (0, 255, 0), 1)
if tmp_lx != 0:
break
for i in range(1, 64-self.center//10):
win_high = 390
win_low = 380
r_x_min = self.center + (i * 10 - 10)
r_x_max = self.center + (i * 10 + 10)
good_right_inds = \
((nonzerox >= r_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (
nonzerox <= r_x_max)).nonzero()[
0]
if len(good_right_inds) > self.minpix:
tmp_rx = np.int(np.mean(nonzerox[good_right_inds]))
cv2.rectangle(out_img, (r_x_min, 380), (r_x_max, 390), (255, 0, 0), 1)
if tmp_rx != 640:
break
if tmp_rx - tmp_lx < 250:
for window in range(0,self.nwindows):
if tmp_lx != 0:
l_x_min = tmp_lx-(window+1)*self.window_height
l_x_max = tmp_lx - (window) * self.window_height
good_left_inds = \
((nonzerox >= l_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (
nonzerox <= l_x_max)).nonzero()[
0]
if len(good_left_inds) > self.minpix:
tmp_lx = np.int(np.mean(nonzerox[good_left_inds]))
cv2.rectangle(out_img, (l_x_max, 380), (l_x_min, 390), (0, 255, 0), 1)
if tmp_rx != 0:
r_x_max = tmp_rx+(window+1)*self.window_height
r_x_min = tmp_rx + (window) * self.window_height
good_right_inds = \
((nonzerox >= r_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (
nonzerox <= r_x_max)).nonzero()[
0]
if len(good_right_inds) > self.minpix:
tmp_rx = np.int(np.mean(nonzerox[good_right_inds]))
cv2.rectangle(out_img, (r_x_min, 380), (r_x_max, 390), (255,0, 0), 1)
# tmp_rx=None
# if tmp_rx - tmp_lx >250:
# break
print('l', tmp_lx , ' ', 'r',tmp_rx)
cv2.rectangle(out_img, (tmp_lx-10, 380), (tmp_lx+10, 390), (255, 0,255), 1)
cv2.rectangle(out_img, (tmp_rx-10, 380), (tmp_rx+10, 390), (255,0,255), 1)
# cv2.imshow('width_slide',out_img)
return tmp_lx, tmp_rx
| 40.238636
| 119
| 0.475572
| 3,508
| 0.990681
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.027111
|
e6315a99e2517f5c7110b8dd1b8d7574b184b340
| 6,198
|
py
|
Python
|
backend/ibutsu_server/tasks/db.py
|
john-dupuy/ibutsu-server
|
ae380fc7a72a4898075291bac8fdb86952bfd06a
|
[
"MIT"
] | null | null | null |
backend/ibutsu_server/tasks/db.py
|
john-dupuy/ibutsu-server
|
ae380fc7a72a4898075291bac8fdb86952bfd06a
|
[
"MIT"
] | null | null | null |
backend/ibutsu_server/tasks/db.py
|
john-dupuy/ibutsu-server
|
ae380fc7a72a4898075291bac8fdb86952bfd06a
|
[
"MIT"
] | null | null | null |
import time
from datetime import datetime
from datetime import timedelta
from bson import ObjectId
from bson.errors import InvalidId
from dynaconf import settings
from ibutsu_server.mongo import mongo
from ibutsu_server.tasks.queues import task
from ibutsu_server.tasks.results import add_result_start_time
from ibutsu_server.tasks.runs import update_run as update_run_task
from ibutsu_server.util import serialize
from kombu.exceptions import OperationalError
from pymongo import DESCENDING
from redis import Redis
from redis.exceptions import LockError
""" Tasks for DB related things"""
LOCK_EXPIRE = 1
@task
def create_runs_from_results():
# 1. get all the runs
runs_to_create = mongo.results.aggregate([{"$group": {"_id": "$metadata.run"}}])
# 2. loop over all the runs
for run_id in runs_to_create:
# first check if the run exists already
_id = run_id["_id"]
try:
if mongo.runs.find_one({"_id": ObjectId(_id)}):
continue
except InvalidId:
continue
run_dict = {
"_id": ObjectId(_id),
}
# 3. Create the run in Ibutsu
mongo.runs.insert_one(run_dict)
run_dict = serialize(run_dict)
# 4. Start the update task
update_run_task.apply_async((run_dict["id"],), countdown=5)
@task
def add_start_time_to_results():
""" Add the field 'start_time' to all the results. For this we create a task for each run. """
for run in mongo.runs.find(sort=[("start_time", DESCENDING)]):
run = serialize(run)
try:
add_result_start_time.apply_async((run["id"],), countdown=5)
except OperationalError:
pass
@task
def _add_project_metadata(run, project_id):
""" Update all runs and results to add the 'metadata.project' field"""
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"update-run-lock-{run['id']}", blocking_timeout=LOCK_EXPIRE):
# add project metadata to the run
if not run.get("metadata"):
run["metadata"] = {}
run["metadata"]["project"] = project_id
mongo.runs.replace_one({"_id": ObjectId(run["id"])}, run)
results = mongo.results.find(
{"metadata.run": run["id"], "metadata.project": {"$exists": False}}
)
for result in results:
result = serialize(result)
# add project metadata to the result
if not result.get("metadata"):
result["metadata"] = {}
result["metadata"]["project"] = project_id
mongo.results.replace_one({"_id": ObjectId(result["id"])}, result)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
@task
def add_project_metadata_to_objects(project_name="insights-qe"):
""" Add IQE Project Metadata to historical DB objects. """
project_id = serialize(mongo.projects.find_one({"name": project_name})).get("id")
if not project_id:
return
for run in mongo.runs.find(
{"metadata.project": {"$exists": False}}, sort=[("start_time", DESCENDING)]
):
run = serialize(run)
try:
_add_project_metadata.apply_async((run, project_id), countdown=5)
except OperationalError:
pass
@task
def _delete_old_files(filename, max_date):
""" Delete all files uploaded before the max_date """
try:
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
if not isinstance(max_date, datetime):
max_date = datetime.fromisoformat(max_date)
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"delete-file-lock-{filename}", blocking_timeout=LOCK_EXPIRE):
for file in mongo.fs.find({"filename": filename, "uploadDate": {"$lt": max_date}}):
mongo.fs.delete(file._id)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
except Exception:
# we don't want to continually retry this task
return
@task
def prune_old_files(months=5):
""" Delete artifact files older than specified months (here defined as 4 weeks). """
try:
if isinstance(months, str):
months = int(months)
if months < 2:
# we don't want to remove files more recent than 3 months
return
files_to_delete = ["traceback.log", "screenshot.png", "iqe.log"]
delta = timedelta(weeks=months * 4).total_seconds()
current_time = time.time()
timestamp_in_sec = current_time - delta
# get datetime obj
max_date = datetime.fromtimestamp(timestamp_in_sec)
# send out the tasks
for filename in files_to_delete:
try:
_delete_old_files.apply_async((filename, max_date), countdown=5)
except OperationalError:
pass
except Exception:
# we don't want to continually retry this task
return
@task
def delete_large_files(limit=256 * 1024):
""" Delete 'iqe.log' files larger than the limit, defaults to 256 KiB"""
try:
if isinstance(limit, str):
limit = int(limit)
if limit < (256 * 1024):
# we don't want to remove files smaller than 256 KiB
return
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"delete-file-lock-{limit}", blocking_timeout=LOCK_EXPIRE):
for file in mongo.fs.find({"length": {"$gt": limit}, "filename": "iqe.log"}):
mongo.fs.delete(file._id)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
except Exception:
# we don't want to continually retry this task
return
| 35.016949
| 99
| 0.62262
| 0
| 0
| 0
| 0
| 5,568
| 0.898354
| 0
| 0
| 1,860
| 0.300097
|
e631a24a11407592b87e8e8c899720b7b1343b18
| 1,457
|
py
|
Python
|
vectorc2/blocks/migrations/0002_initialize_data.py
|
sebastiankruk/vectorc2
|
13232cd63ebed32346fb4a669511b102b8ed24c0
|
[
"Apache-2.0"
] | 11
|
2019-02-27T01:38:47.000Z
|
2020-11-13T02:14:58.000Z
|
vectorc2/blocks/migrations/0002_initialize_data.py
|
sebastiankruk/vectorc2
|
13232cd63ebed32346fb4a669511b102b8ed24c0
|
[
"Apache-2.0"
] | 20
|
2019-02-27T21:22:59.000Z
|
2022-01-13T01:22:16.000Z
|
vectorc2/blocks/migrations/0002_initialize_data.py
|
sebastiankruk/vectorc2
|
13232cd63ebed32346fb4a669511b102b8ed24c0
|
[
"Apache-2.0"
] | 1
|
2020-01-14T09:14:28.000Z
|
2020-01-14T09:14:28.000Z
|
# Generated by Django 2.1.7 on 2019-05-14 20:36
from django.db import migrations
from blocks.models import AnimationName, AnimationTrigger
import anki_vector
def generate_names(apps, schema_editor):
"""
Helper function to populate names of animations and triggers and update their status
"""
def __update_or_create(source, name):
"""
Helper function to create/update a single name index
"""
source.objects.update_or_create(
name=name,
defaults={
'name': name,
'active': True
})
AnimationName = apps.get_model('blocks', 'AnimationName')
AnimationName.objects.filter(active=True).update(active=False)
AnimationTrigger = apps.get_model('blocks', 'AnimationTrigger')
AnimationTrigger.objects.filter(active=True).update(active=False)
with anki_vector.AsyncRobot() as robot:
anim_request = robot.anim.load_animation_list()
anim_request.result()
for anim_name in robot.anim.anim_list:
__update_or_create(AnimationName, anim_name)
anim_trigger_request = robot.anim.load_animation_trigger_list()
anim_trigger_request.result()
for anim_trigger_name in robot.anim.anim_trigger_list:
__update_or_create(AnimationTrigger, anim_trigger_name)
# ----------------------
class Migration(migrations.Migration):
dependencies = [
('blocks', '0001_initial'),
]
operations = [
migrations.RunPython(generate_names),
]
| 28.019231
| 86
| 0.705559
| 174
| 0.119423
| 0
| 0
| 0
| 0
| 0
| 0
| 320
| 0.219629
|
e6329473dcae584a805afb6f3b7a11b77f6eec4b
| 130
|
py
|
Python
|
src/app/groups/apps.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 9
|
2020-04-05T07:35:55.000Z
|
2021-08-03T05:50:05.000Z
|
src/app/groups/apps.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 89
|
2020-01-26T11:50:06.000Z
|
2022-03-31T07:14:18.000Z
|
src/app/groups/apps.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 13
|
2020-03-10T14:45:07.000Z
|
2021-07-31T02:43:40.000Z
|
from django.apps import AppConfig
class GroupsAppConfig(AppConfig):
name = 'app.groups'
verbose_name = "Учебные группы"
| 18.571429
| 35
| 0.738462
| 106
| 0.741259
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.286713
|
e6338656305747e7dd588f6558bdad231c542786
| 830
|
py
|
Python
|
Estudos/namedtuple.py
|
Gbrvi/Python
|
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
|
[
"MIT"
] | null | null | null |
Estudos/namedtuple.py
|
Gbrvi/Python
|
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
|
[
"MIT"
] | null | null | null |
Estudos/namedtuple.py
|
Gbrvi/Python
|
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
# É tipo um dicionario, é mais lento, mas é imutável!
#Jogador é a classe | #Atributos da classe
J = namedtuple('Jogador', ['nome', 'time', 'camisa', 'numero'])
j = J('Abel Hernadez', 'Flu', 99, 100) #Adicionando valores
j2 = J('Fred', 'Fluminense', 9, 157)
print(j2.nome)
#-------------------------------------------------------
# Nomes repetidos ou destinado ao python (def, class) são subtituidos se colocar o rename
P = namedtuple('Pessoa', ['nome', 'idade', 'def'], rename=True)
p = P('Carlos', 15, 'viano')
#output: Pessoa(nome='Carlos', idade=15, _2='viano')
#Default define um valor padrão, mas é nececssario que o primeiro valor "x" seja informado
L = namedtuple('valores', ['x', 'y', 'z'], defaults=(None, None))
l = L(2)
print(l)
| 31.923077
| 91
| 0.591566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 554
| 0.661098
|
e6342f9f6fc2f8be229cda6971a2b29ca77c7c7c
| 1,330
|
py
|
Python
|
src/decker/format/command.py
|
douglasfarinelli/pydev
|
9d43d485b102e5b44ee28894278ae496c3cec024
|
[
"MIT"
] | 21
|
2020-12-11T17:59:50.000Z
|
2022-03-12T02:22:09.000Z
|
src/decker/format/command.py
|
douglasfarinelli/decker
|
9d43d485b102e5b44ee28894278ae496c3cec024
|
[
"MIT"
] | null | null | null |
src/decker/format/command.py
|
douglasfarinelli/decker
|
9d43d485b102e5b44ee28894278ae496c3cec024
|
[
"MIT"
] | 2
|
2021-07-31T00:05:25.000Z
|
2021-11-04T12:09:26.000Z
|
import sys
from typing import List
import click
from decker.conf import Config
from decker.utils import print_done
from .pool import FormatterBackendPool
from .services import run_format
@click.option(
'-b',
'--backend',
type=click.Choice([backend.id for backend in FormatterBackendPool.all()]),
multiple=True,
help='Specify formatting backends.',
)
@click.option(
'-l',
'--line-length',
type=int,
default=79,
help='How many characters per line to allow.',
show_default=True,
)
@click.option(
'--exclude',
type=str,
default=None,
help='Files and directories that should be excluded on recursive searches.',
)
@click.argument(
'sources',
nargs=-1,
type=click.Path(
exists=True,
file_okay=True,
dir_okay=True,
readable=True,
allow_dash=True,
),
is_eager=True,
)
@click.command(name='format')
@click.pass_context
def format_command(
ctx: click.Context,
backend: List[str],
sources: List[str],
line_length: int,
exclude: str,
) -> None:
"""
Run code style format.
"""
config = Config.create(
ctx=ctx, sources=sources, line_length=line_length, exclude=exclude
)
run_format(
config,
backends=backend,
)
print_done()
sys.exit(0)
| 19
| 80
| 0.635338
| 0
| 0
| 0
| 0
| 1,137
| 0.854887
| 0
| 0
| 240
| 0.180451
|
e63506be46724ae2661303db422a81cac16e9cfd
| 709
|
py
|
Python
|
seeds.py
|
hazzillrodriguez/Flaskdesk
|
16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5
|
[
"MIT"
] | null | null | null |
seeds.py
|
hazzillrodriguez/Flaskdesk
|
16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5
|
[
"MIT"
] | null | null | null |
seeds.py
|
hazzillrodriguez/Flaskdesk
|
16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models import Category, Priority, Status
from sqlalchemy.exc import SQLAlchemyError
category = 'Uncategorized'
priorities = ['Low', 'Medium', 'High', 'Urgent']
statuses = ['Open', 'Resolved', 'Pending', 'Closed']
def db_commit():
try:
db.session.commit()
print('Category, priorities, and statuses has been created.')
return True
except SQLAlchemyError:
result = str(SQLAlchemyError)
print(result)
return False
with app.app_context():
if db_commit():
for priority, status in zip(priorities, statuses):
db.session.add(Priority(priority=priority))
db.session.add(Status(status=status))
db.session.add(Category(category=category))
db.session.commit()
| 28.36
| 63
| 0.734838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.181946
|
e637df68b541d6e5860c6604066ab8cbf8d7df24
| 164
|
py
|
Python
|
xs/nn/__init__.py
|
eLeVeNnN/xshinnosuke
|
69da91e0ea5042437edfc31c0e6ff9ef394c6cc9
|
[
"MIT"
] | 290
|
2020-07-06T02:13:12.000Z
|
2021-01-04T14:23:39.000Z
|
xs/nn/__init__.py
|
E1eveNn/xshinnosuke
|
69da91e0ea5042437edfc31c0e6ff9ef394c6cc9
|
[
"MIT"
] | 1
|
2020-12-03T11:11:48.000Z
|
2020-12-03T11:11:48.000Z
|
xs/nn/__init__.py
|
E1eveNn/xshinnosuke
|
69da91e0ea5042437edfc31c0e6ff9ef394c6cc9
|
[
"MIT"
] | 49
|
2020-07-16T00:27:47.000Z
|
2020-11-26T03:03:14.000Z
|
from .objectives import MSELoss, CrossEntropyLoss
from .models import Sequential, Model, Module
from .grad_fn import Parameter, Tensor
from .td_functional import *
| 32.8
| 49
| 0.823171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e63871f321b5d3bb45b965cb63b221c456ac757e
| 2,527
|
py
|
Python
|
eval/plot.py
|
yhlleo/TriangleGAN
|
5bab76561e75145c2645a93e23d22abd3f66f329
|
[
"BSD-3-Clause"
] | 32
|
2019-07-15T11:11:57.000Z
|
2022-01-09T11:03:00.000Z
|
eval/plot.py
|
yhlleo/TriangleGAN
|
5bab76561e75145c2645a93e23d22abd3f66f329
|
[
"BSD-3-Clause"
] | null | null | null |
eval/plot.py
|
yhlleo/TriangleGAN
|
5bab76561e75145c2645a93e23d22abd3f66f329
|
[
"BSD-3-Clause"
] | 4
|
2019-07-17T09:00:14.000Z
|
2021-11-16T21:20:25.000Z
|
# plot prd scores
import os
import json
from matplotlib import pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("json_files", nargs="*")
parser.add_argument("--output_fig", type=str, default='prd.png')
args = parser.parse_args()
def load_jsons(file_paths):
scores, labels = [], []
for json_file in file_paths:
with open(json_file) as f:
result = json.load(f)
scores.append(result["score"])
labels.append(result["label"])
return [[s["recall"], s["precision"]] for s in scores], labels
def plot(precision_recall_pairs, labels=None, out_path=None,
legend_loc='lower left', dpi=300):
"""Plots precision recall curves for distributions.
Creates the PRD plot for the given data and stores the plot in a given path.
Args:
precision_recall_pairs: List of prd_data to plot. Each item in this list is
a 2D array of precision and recall values for the
same number of ratios.
labels: Optional list of labels of same length as list_of_prd_data. The
default value is None.
out_path: Output path for the resulting plot. If None, the plot will be
opened via plt.show(). The default value is None.
legend_loc: Location of the legend. The default value is 'lower left'.
dpi: Dots per inch (DPI) for the figure. The default value is 150.
Raises:
ValueError: If labels is a list of different length than list_of_prd_data.
"""
if labels is not None and len(labels) != len(precision_recall_pairs):
raise ValueError(
'Length of labels %d must be identical to length of '
'precision_recall_pairs %d.'
% (len(labels), len(precision_recall_pairs)))
fig = plt.figure(figsize=(3.5, 3.5), dpi=dpi)
plot_handle = fig.add_subplot(111)
plot_handle.tick_params(axis='both', which='major', labelsize=12)
for i in range(len(precision_recall_pairs)):
precision, recall = precision_recall_pairs[i]
label = labels[i] if labels is not None else None
plt.plot(recall, precision, label=label, alpha=0.5, linewidth=3)
if labels is not None:
plt.legend(loc=legend_loc)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall', fontsize=12)
plt.ylabel('Precision', fontsize=12)
plt.tight_layout()
plt.savefig(out_path, bbox_inches='tight', dpi=dpi)
plt.close()
if __name__ == '__main__':
precision_recall_pairs, labels = load_jsons(args.json_files)
plot(precision_recall_pairs, labels, args.output_fig)
| 37.716418
| 79
| 0.693708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,069
| 0.423031
|
e63ab07fc8212736ff3ef91cca7ad9e31b8c2243
| 2,218
|
py
|
Python
|
data_output.py
|
adebraine/Time-Series-RNN
|
2e5ef0a222d84e15ed09141724fa437492c1466e
|
[
"MIT"
] | null | null | null |
data_output.py
|
adebraine/Time-Series-RNN
|
2e5ef0a222d84e15ed09141724fa437492c1466e
|
[
"MIT"
] | null | null | null |
data_output.py
|
adebraine/Time-Series-RNN
|
2e5ef0a222d84e15ed09141724fa437492c1466e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import keras
def evaluate_model(model, split_sets):
training_error = model.evaluate(split_sets['X_train'], split_sets['y_train'], verbose=0)
print('training error = ' + str(training_error))
testing_error = model.evaluate(split_sets['X_test'], split_sets['y_test'], verbose=0)
print('testing error = ' + str(testing_error))
def output_plot(dataset, y, window_size, train_percent,
predictions):
if len(predictions) > 2:
train_split = int(np.ceil(len(y)*train_percent)) + window_size
valid_split = int(np.ceil(len(y)*((1-train_percent)/2))) + train_split
# plot original series
plt.plot(dataset, color='k')
# plot training set prediction
plt.plot(np.arange(window_size, train_split, 1),
predictions['train'], color='b')
# plot validation set prediction
plt.plot(np.arange(train_split, valid_split, 1),
predictions['valid'], color='g')
# plot testing set prediction
plt.plot(np.arange(valid_split, valid_split + len(predictions['test']), 1),
predictions['test'], color='r')
# pretty up graph
plt.xlabel('day')
plt.ylabel('(normalized) price')
plt.legend(['original series', 'training fit',
'Validation fit', 'testing fit'],
loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
else:
train_split = int(np.ceil(len(y)*train_percent)) + window_size
# plot original series
plt.plot(dataset, color='k')
# plot training set prediction
plt.plot(np.arange(window_size, train_split, 1),
predictions['train'], color='b')
# plot testing set prediction
plt.plot(np.arange(train_split, train_split + len(predictions['test']), 1),
predictions['test'], color='r')
# pretty up graph
plt.xlabel('day')
plt.ylabel('(normalized) price')
plt.legend(['original series', 'training fit',
'testing fit'],
loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
| 35.206349
| 92
| 0.595131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 545
| 0.245717
|
e63b0d4192a6f56afdb4ff053aeafe21f3a6cf89
| 1,837
|
py
|
Python
|
vector_auto_regression.py
|
hotpxl/nebuchadnezzar
|
b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409
|
[
"MIT"
] | 2
|
2015-05-20T18:02:40.000Z
|
2016-08-07T18:57:27.000Z
|
vector_auto_regression.py
|
hotpxl/nebuchadnezzar
|
b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409
|
[
"MIT"
] | null | null | null |
vector_auto_regression.py
|
hotpxl/nebuchadnezzar
|
b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.4
import stats.data
import stats.plot
import stats.preprocess
import pandas
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates
import datetime
from statsmodels.tsa.api import VAR, DynamicVAR
sse_indices = stats.data.sse_indices()
for i in sse_indices:
d = stats.data.get_merged(i, 'date', 'volume', 'readCount')
# strip first few data points
d = d[2:]
for window_size in range(3, 10):
# window_size = 7
raw_volume = d[:, 1].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size).astype(float)))
read_count = d[:, 2].astype(float)
data = pandas.DataFrame({'volume': volume, 'readCount': read_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = VAR(data)
lag = model.select_order()['hqic']
length = data.values.shape[0]
print('using lag {}'.format(lag))
results = model.fit(lag)
# import IPython; IPython.embed()
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
pred = np.asarray(prediction).reshape((length, 1))
fig, ax = plt.subplots()
dates = list(map(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), d[:, 0]))
ax.plot(dates, pred, 'r', label='forecast')
ax.plot(dates, volume, 'b', label='real')
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.set_ylabel('Volume')
ax.legend()
plt.show()
# plt.savefig('{}_{}.png'.format(i, window_size))
# stats.plot.twin_x(np.concatenate((d[:, 1].reshape((length, 1)), pred), axis=1))
# import IPython; IPython.embed()
| 37.489796
| 132
| 0.625476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 380
| 0.206859
|
e63cd901a3e8b73ecbb160ecf9c349073434a2bf
| 2,086
|
py
|
Python
|
ArticleClassifierTF/src/data_models/weights/theme_weights.py
|
joduss/ArticleClassifier
|
38c0e168cdd74214b7f591c7cfc7b93fc496e46b
|
[
"Unlicense"
] | null | null | null |
ArticleClassifierTF/src/data_models/weights/theme_weights.py
|
joduss/ArticleClassifier
|
38c0e168cdd74214b7f591c7cfc7b93fc496e46b
|
[
"Unlicense"
] | null | null | null |
ArticleClassifierTF/src/data_models/weights/theme_weights.py
|
joduss/ArticleClassifier
|
38c0e168cdd74214b7f591c7cfc7b93fc496e46b
|
[
"Unlicense"
] | null | null | null |
from typing import Dict, List
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
from data_models.ThemeStat import ThemeStat
class ThemeWeights:
theme_stats: List[ThemeStat]
theme_tokenizer: ArticleThemeTokenizer
def __init__(self, theme_stats: List[ThemeStat], theme_tokenizer: ArticleThemeTokenizer):
self.theme_stats = theme_stats
self.theme_tokenizer = theme_tokenizer
def weight_list(self) -> List[float]:
"""
Returns a list of weight for each theme, ordered by theme index.
"""
theme_weight: List[float] = list([])
#raise Exception("To review")
for theme in self.theme_tokenizer.orderedThemes:
stat = [stat for stat in self.theme_stats if stat.theme == theme][0]
theme_weight.append(stat.binary_weight_pos())
return theme_weight
def weights_of_theme(self, theme_idx: int) -> Dict[int, float]:
"""
Returns the weights for a theme under the form {0 : VAL_1, 1 : VAL_2}
:param theme_idx: index of the theme
"""
theme = self.theme_tokenizer.theme_at_index(theme_idx)
theme_stat = list(filter(lambda stat: stat.theme == theme, self.theme_stats))
if len(theme_stat) == 0:
raise Exception("Theme {} not found.".format(theme))
if len(theme_stat) > 1:
raise Exception("Theme {} found multiple times.".format(theme))
return {0 : theme_stat[0].binary_weight_neg(),
1 : theme_stat[0].binary_weight_pos()}
def weight_array(self) -> List[List[float]]:
theme_weight_array: List[List[float]] = []
# raise Exception("To review")
for theme in self.theme_tokenizer.orderedThemes:
stat = [stat for stat in self.theme_stats if stat.theme == theme][0]
theme_weight = [0,0]
theme_weight[0] = stat.binary_weight_neg()
theme_weight[1] = stat.binary_weight_pos()
theme_weight_array.append(theme_weight)
return theme_weight_array
| 32.59375
| 93
| 0.650527
| 1,926
| 0.923298
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.162033
|
e63cf8d084bbaa33179f664b68770d2a61c1830b
| 2,688
|
py
|
Python
|
installation_text.py
|
bryanrtboy/videoselector
|
6867c14ebb3f9ac563a2aa5533806ec4872a53e9
|
[
"MIT"
] | 1
|
2017-12-10T12:42:09.000Z
|
2017-12-10T12:42:09.000Z
|
installation_text.py
|
bryanrtboy/videoselector
|
6867c14ebb3f9ac563a2aa5533806ec4872a53e9
|
[
"MIT"
] | null | null | null |
installation_text.py
|
bryanrtboy/videoselector
|
6867c14ebb3f9ac563a2aa5533806ec4872a53e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from pssh import SSHClient, ParallelSSHClient, utils
import datetime
import time
import random
import sys
output = []
hosts = ['client0', 'client1', 'client2','client3', 'client4']
client = ParallelSSHClient(hosts)
values = ["bear","cake","fork","pipe","gun"]
def open_movies(my_values, delay):
choices = list(my_values)
for x in range(len(hosts)):
if x < len(hosts) - 1:
prompt = "Type "
for v in choices:
prompt += v + ", "
prompt = prompt[:-2]
prompt += " :"
choice = get_valid_input(prompt)
choices.remove(choice.lower())
open_movie(choice, x)
else:
choice = choices[0]
open_movie(choice, x)
print("wait {0} seconds".format(delay))
time.sleep(delay)
print("done waiting, back to the command and play idle movies on clients")
cmds = ["~/dbuscontrol.sh stop", "sleep 2", "omxplayer /mnt/usb/media/intro.mp4 --aspect-mode=stretch --loop"]
#run all the commands on all the clients
for cmd in cmds:
client.run_command(cmd, stop_on_errors=False)
#show a prompt to decide what to do next
next = raw_input("Hit return to continue or 'Q' to quit:")
if next == "Q":
print("quitting")
exit()
else:
open_movies()
def open_movie(choice, clientID) :
one_client = SSHClient(hosts[clientID])
num = random.randint(0,2)
command = "~/dbuscontrol.sh stop"
one_client.exec_command(command)
command = "omxplayer /mnt/usb/media/" + choice + "/mov_" + str(num) + ".mp4 --aspect-mode=stretch --loop"
one_client.exec_command(command)
print("Opening a " +choice+ " movie, number " + str(num) + " on " + hosts[clientID] + "!")
def get_valid_input(prompt):
while True:
data = raw_input(prompt)
#check if the entered word is in our list of values
if data.lower() not in values:
print("Not an appropriate choice.")
else:
break
return data
#if you need to get a response back from the client, use this functio
#instead of open_movies().
#Note with --loop argument in cmds, the process will never quit
#requires CTRL-C to end the process
def open_movies_wait_for_output():
cmds = ["omxplayer /mnt/usb/media/gun/mov_0.mp4 --aspect-mode=stretch --loop"]
start = datetime.datetime.now()
for cmd in cmds:
output.append(client.run_command(cmd, stop_on_errors=False))
end = datetime.datetime.now()
print("Started %s commands on %s host(s) in %s" % (
len(cmds), len(hosts), end-start,))
start = datetime.datetime.now()
for _output in output:
print("waiting for output")
client.join(_output)
print(_output)
end = datetime.datetime.now()
print("All commands finished in %s" % (end-start,))
if __name__ == "__main__":
open_movies(values, 15)
| 29.217391
| 111
| 0.679315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 993
| 0.36942
|
e63d83d29b28004d4dc6e59ec720b1e34cdc3bc7
| 3,744
|
py
|
Python
|
poi/cache.py
|
jchluo/poi
|
6892d3e219ee2b841053a41d308887a5e6b60017
|
[
"Apache-2.0"
] | 10
|
2016-01-11T09:24:38.000Z
|
2021-07-20T06:40:15.000Z
|
poi/cache.py
|
jchluo/poi
|
6892d3e219ee2b841053a41d308887a5e6b60017
|
[
"Apache-2.0"
] | 1
|
2018-04-10T04:48:18.000Z
|
2018-04-10T04:48:18.000Z
|
poi/cache.py
|
jchluo/poi
|
6892d3e219ee2b841053a41d308887a5e6b60017
|
[
"Apache-2.0"
] | 8
|
2016-01-11T09:24:56.000Z
|
2020-04-23T08:25:53.000Z
|
# -*- coding: utf-8 -*-
"""Cache Recommender.
dump : run topN predict item for each user, and
dump them to file like object(disk file or memory).
load : recover from file like object, return CacheRecommender.
Note that this recommender just a tiny version of the original one,
which can only predict topN (stored in file) items.
usage:
>>> class M(object):
... def __init__(self):
... self.num_users = 1
... self.num_items = 3
... self.checkins = {0: {0:1}}
... self.name = "Test"
... def predict(self, u, i):
... return 1.0 * i
usage dump:
>>> from StringIO import StringIO
>>> f = StringIO()
>>> md = M()
>>> dump(md, f, attrs=["name"], num_pool=0)
usage load
>>> f.seek(0)
>>> cr = load(f)
>>> print cr.predict(0, 2)
2.0
>>> print cr.name
Test
"""
import time
import json
import logging
import numpy as np
from .utils import threads
from .models import Recommender
log = logging.getLogger(__name__)
__all__ = ["Recommender", "Evaluation"]
class CacheRecommender(Recommender):
"""Cache File Recommender.
"""
def __init__(self):
self.checkins = {}
self._data = {}
self._meta = {}
def __getattr__(self, attr):
if attr == "_meta":
raise AttributeError()
if attr in self._meta:
return self._meta[attr]
raise AttributeError("attribute: %s Not Found." % attr)
def __repr__(self):
return "<Cache %s>" % self._meta["__repr__"][1: -1]
def predict(self, user, item):
return self._data.get(user, {}).get(item, -10 * 10)
def _proxy_predict(arg):
model, i, num = arg
scores = [(j, model.predict(i, j)) for j in xrange(model.num_items)\
if j not in model.checkins[i]]
scores.sort(key=lambda x: x[1], reverse=True)
return [i, scores[: num]]
def dump(model, fp, num=1000, attrs=None, num_pool=4):
"""Dump predict record to file.
fp: file pointer like object,
num: top num item and its score will be stored,
other item will be abandoned.
attrs: list like, the attributes want to be stored,
num_items and num_users will auto stored.
num_pool: number of threads, 0 will turn off multiple threads.
"""
if model is None:
raise ValueError("model is None.")
t0 = time.time()
args = [(model, i, num) for i in xrange(model.num_users)]
if num_pool > 0:
results = threads(_proxy_predict, args, num_pool)
else:
results = [_proxy_predict(arg) for arg in args]
meta = {}
# write attributes
if attrs is None:
attrs = ["num_users", "num_items"]
else:
attrs = list(attrs)
attrs.extend(["num_users", "num_items"])
attrs = set(attrs)
for attr in attrs:
if not hasattr(model, attr):
raise AttributeError("attribute: %s Not Found." % attr)
meta[attr] = getattr(model, attr)
# write __repr__
meta["__repr__"] = str(model)
print >> fp, json.dumps(meta)
# write recoreds
for one in results:
print >> fp, json.dumps(one)
t1 = time.time()
log.debug("dump ok, time: %.2fs" % (t1 - t0))
def load(fp):
"""Reture a cacherecommender, which is the tiny version of the
original one.
fp: file like object.
"""
cr = CacheRecommender()
# meta
cr._meta = json.loads(fp.readline())
# recoreds
for line in fp:
rd = json.loads(line.strip())
user = int(rd[0])
scores = rd[1]
cr._data[user] = {}
for l, s in scores:
cr._data[user][int(l)] = float(s)
return cr
| 27.328467
| 75
| 0.576656
| 583
| 0.155716
| 0
| 0
| 0
| 0
| 0
| 0
| 1,683
| 0.449519
|
e63da7efdb0e189e1a9e15a53af922678e7b6e0e
| 2,335
|
py
|
Python
|
p2p/protocol.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
p2p/protocol.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
p2p/protocol.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
import logging
from typing import (
Any,
Sequence,
Tuple,
Type,
)
from eth_utils.toolz import accumulate
from p2p.abc import (
CommandAPI,
ProtocolAPI,
TransportAPI,
)
from p2p.constants import P2P_PROTOCOL_COMMAND_LENGTH
from p2p.typing import Capability
class BaseProtocol(ProtocolAPI):
logger = logging.getLogger('p2p.protocol.Protocol')
def __init__(self,
transport: TransportAPI,
command_id_offset: int,
snappy_support: bool) -> None:
self.transport = transport
self.command_id_offset = command_id_offset
self.snappy_support = snappy_support
self.command_id_by_type = {
command_type: command_id_offset + command_type.protocol_command_id
for command_type
in self.commands
}
self.command_type_by_id = {
command_id: command_type
for command_type, command_id
in self.command_id_by_type.items()
}
def __repr__(self) -> str:
return "(%s, %d)" % (self.name, self.version)
@classmethod
def supports_command(cls, command_type: Type[CommandAPI[Any]]) -> bool:
return command_type in cls.commands
@classmethod
def as_capability(cls) -> Capability:
return (cls.name, cls.version)
def get_command_type_for_command_id(self, command_id: int) -> Type[CommandAPI[Any]]:
return self.command_type_by_id[command_id]
def send(self, command: CommandAPI[Any]) -> None:
message = command.encode(self.command_id_by_type[type(command)], self.snappy_support)
self.transport.send(message)
def get_cmd_offsets(protocol_types: Sequence[Type[ProtocolAPI]]) -> Tuple[int, ...]:
"""
Computes the `command_id_offsets` for each protocol. The first offset is
always P2P_PROTOCOL_COMMAND_LENGTH since the first protocol always begins
after the base `p2p` protocol. Each subsequent protocol is the accumulated
sum of all of the protocol offsets that came before it.
"""
return tuple(accumulate(
lambda prev_offset, protocol_class: prev_offset + protocol_class.command_length,
protocol_types,
P2P_PROTOCOL_COMMAND_LENGTH,
))[:-1] # the `[:-1]` is to discard the last accumulated offset which is not needed
| 31.986301
| 93
| 0.677088
| 1,379
| 0.590578
| 0
| 0
| 225
| 0.09636
| 0
| 0
| 415
| 0.17773
|
e63f1e8cde7eb9bc19101fd61c76b84d56a931e5
| 6,314
|
py
|
Python
|
soocii_services_lib/tokens.py
|
jonascheng/services-lib
|
5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc
|
[
"Apache-2.0"
] | null | null | null |
soocii_services_lib/tokens.py
|
jonascheng/services-lib
|
5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc
|
[
"Apache-2.0"
] | 5
|
2017-11-23T08:24:09.000Z
|
2018-12-25T04:42:48.000Z
|
soocii_services_lib/tokens.py
|
jonascheng/services-lib
|
5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc
|
[
"Apache-2.0"
] | 3
|
2017-06-28T07:54:40.000Z
|
2018-12-25T04:44:42.000Z
|
import binascii
import json
import time
import jsonschema
from .crypter import AESCipher
from .exceptions import AccessTokenValidationError, RefreshTokenValidationError, TokenExpiredError, TokenSchemaError
class BaseToken(dict):
_schema = {}
def is_valid(self, age=None, raise_exception=False):
try:
jsonschema.validate(self, self._schema)
if age and ('timestamp' not in self or self['timestamp'] + age < int(time.time())):
msg = 'timestamp {} is expired'.format(self.get("timestamp"))
raise TokenExpiredError(msg)
except jsonschema.exceptions.ValidationError as e:
if raise_exception:
raise TokenSchemaError(str(e))
except TokenExpiredError:
if raise_exception:
raise
else:
return True
return False
class AccessToken(BaseToken):
ROLE_USER = 'user'
ROLE_BACKSTAGE = 'backstage'
ROLE_SERVICE = 'service'
_schema = {
'definitions': {
'basic': {
'type': 'object',
'properties': {
'timestamp': {
'type': 'integer'
}
}
},
ROLE_USER: {
'type': 'object',
'properties': {
'role': {
'type': 'string',
'enum': [ROLE_USER]
},
'pid': {
'type': 'string'
},
'id': {
'type': 'integer'
},
'soocii_id': {
'type': 'string'
},
'uid': {
'type': 'string',
'pattern': '^[0-9a-fA-F]{32}$'
}
},
'required': ['pid', 'id', 'soocii_id', 'uid']
},
ROLE_BACKSTAGE: {
'type': 'object',
'properties': {
'role': {
'type': 'string',
'enum': [ROLE_BACKSTAGE]
},
'id': {
'type': 'integer'
}
},
'required': ['id']
},
ROLE_SERVICE: {
'type': 'object',
'properties': {
'role': {
'type': 'string',
'enum': [ROLE_SERVICE]
},
'name': {
'type': 'string'
}
},
'required': ['name']
},
},
'allOf': [
{
'#ref': '#/definitions/basic'
},
{
'oneOf': [
{
'$ref': '#/definitions/user'
}, {
'$ref': '#/definitions/backstage'
}, {
'$ref': '#/definitions/service'
}
]
}
],
'required': ['role', 'timestamp']
}
@property
def role(self):
return self.get('role')
def is_role(self, role):
return self.role == role
class RefreshToken(BaseToken):
_schema = {
'type': 'object',
'properties': {
'timestamp': {
'type': 'integer'
},
'access_token': {
'type': 'string'
}
},
'required': ['timestamp', 'access_token']
}
class AccessTokenCryper(object):
age = 43200
exception = AccessTokenValidationError
_token_cls = AccessToken
def __init__(self, key, age=None):
key = binascii.unhexlify(key)
self.cipher = AESCipher(key)
if age:
self.age = age
def _encode(self, raw):
if isinstance(raw, str):
raw = raw.encode('utf-8')
return self.cipher.encrypt(raw)
def _decode(self, data):
# convert the pre-defined secret from hex string.
if isinstance(data, str):
data = data.encode('utf-8')
return self.cipher.decrypt(data)
def dumps(self, data=None, **kwargs):
"""
Generate token from encrypting the given data and keyword arguments. data should be a dict
"""
if not isinstance(data, dict):
data = {}
data.update(kwargs)
# append timestamp
data.update(timestamp=int(time.time()))
token = self._token_cls(data)
token.is_valid(raise_exception=True)
return self._encode(json.dumps(token))
def loads(self, token, valid_age=True):
"""
Load and decrypt token
"""
try:
token = self._token_cls(json.loads(self._decode(token).decode('utf-8')))
token.is_valid(self.age if valid_age else None, raise_exception=True)
except ValueError:
raise self.exception('invalid token format')
return token
def _get_specific_token(role):
def _wrapper(self, **kwargs):
mandatory_keys = self._token_cls._schema['definitions'][role]['required']
if any(k not in kwargs for k in mandatory_keys):
msg = '{} are required'.format(', '.join(mandatory_keys))
raise TokenSchemaError(msg)
kwargs['role'] = role
return self.dumps(kwargs).decode('utf-8')
return _wrapper
_get_user_token = _get_specific_token(_token_cls.ROLE_USER)
get_backstage_token = _get_specific_token(_token_cls.ROLE_BACKSTAGE)
get_service_token = _get_specific_token(_token_cls.ROLE_SERVICE)
def get_user_token(self, **kwargs):
if 'lang' not in kwargs:
kwargs['lang'] = 'EN-US'
return self._get_user_token(**kwargs)
class RefreshTokenCryper(AccessTokenCryper):
age = 604800
exception = RefreshTokenValidationError
_token_cls = RefreshToken
def get_token(self, access_token):
return self.dumps({'access_token': access_token}).decode('utf-8')
| 28.062222
| 116
| 0.464365
| 6,092
| 0.96484
| 0
| 0
| 61
| 0.009661
| 0
| 0
| 1,168
| 0.184986
|
e648ade42231ae7382e8ffb8232ee7fd02bab1ce
| 6,060
|
py
|
Python
|
software/camera-imu/tools/imu_driver_alt.py
|
MomsFriendlyRobotCompany/mjolnir
|
76f53e8e650ba1051b5f14e94ff2a9a283158da4
|
[
"MIT"
] | 1
|
2020-08-17T04:36:14.000Z
|
2020-08-17T04:36:14.000Z
|
software/camera-imu/tools/imu_driver_alt.py
|
MomsFriendlyRobotCompany/mjolnir
|
76f53e8e650ba1051b5f14e94ff2a9a283158da4
|
[
"MIT"
] | null | null | null |
software/camera-imu/tools/imu_driver_alt.py
|
MomsFriendlyRobotCompany/mjolnir
|
76f53e8e650ba1051b5f14e94ff2a9a283158da4
|
[
"MIT"
] | 1
|
2021-04-06T08:26:03.000Z
|
2021-04-06T08:26:03.000Z
|
from serial import Serial
import struct
from math import log10, sin, cos, acos, atan2, asin, pi, sqrt
import time
from collections import namedtuple
from colorama import Fore
# agmpt_t = namedtuple("agmpt_t", "accel gyro mag pressure temperature timestamp")
# ImageIMU = namedtuple("ImageIMU","image accel gyro temperature timestamp")
AccelGyroMag = namedtuple("AccelGyroMag", "ax ay az gx gy gz mx my mz")
TempPress = namedtuple("TempPress", "temperature pressure")
Light = namedtuple("Light", "lux")
c2f = lambda t: t*9/5+32
class cAccelGyroMag:
"""
Accel: g's
Gyro: rads/sec
Mag: uT
"""
header = 0xfd
unpack = struct.Struct("<9f").unpack
length = 9*4
def astuple(self, data):
return AccelGyroMag(*self.unpack(data))
class cAccelGyro:
header = 0xfe
unpack = struct.Struct("<6f").unpack
length = 6*4
def astuple(self, data):
raise NotImplementedError()
class cMag:
header = 0xfc
unpack = struct.Struct("<3f").unpack
length = 3*4
def astuple(self, data):
raise NotImplementedError()
class cTempPress:
"""
Temperature: C
Pressure: hPa
"""
header = 0xfb
unpack = struct.Struct("<ff").unpack
length = 2*4
def astuple(self, data):
return TempPress(*self.unpack(data))
class cLight:
header = 0xf9
unpack = struct.Struct("f").unpack
length = 1*4
def astuple(self, data):
return Light(*self.unpack(data))
class cIRCamera:
header = 0xf8
unpack = struct.Struct(f"<{32*24}f").unpack
length = 32*24*4
def astuple(self, data):
raise NotImplementedError()
Key = {
cAccelGyroMag.header: cAccelGyroMag(),
cAccelGyro.header: cAccelGyro(),
cMag.header: cMag(),
cTempPress.header: cTempPress(),
cLight.header: cLight(),
cIRCamera.header: cIRCamera(),
}
class Parser:
"""
[0xFF,0xFF]: start
0xFE: accel, gyro
0xFD: accel, gyro, mag
0xFC: mag
0xFB: temperature, pressure
0xFA:
0xF9: light
0xF8: MLX90640 IR camera
0xF7-0xF1: unused
0xF0: position, velocity, quaternion
[0xEE,0xEE]: end
"""
header = b"\xff"
ender = b"\xee"
def decode(self, data):
# print(f"{Fore.CYAN}[{len(data)}]{Fore.YELLOW}{data}{Fore.RESET}", flush=True)
if data[-2:] != b"\xee\xee":
print(f"{Fore.RED} ERROR: wrong message ending: {data[-2:]}{Fore.RESET}")
return None
size = len(data)
i = 0
ret = []
while True:
try:
k = data[i]
parse = Key[k]
except Exception as e:
print(e)
print(f"{Fore.RED}** Invalid key: {hex(data[i])}{Fore.RESET}")
return ret
i += 1 # header
if 0:
d = parse.unpack(data[i:i+parse.length])
ret += d
else:
d = parse.astuple(data[i:i+parse.length])
ret.append(d)
i += parse.length # message length
if i == size-2: # \xee\xee
break
return ret
class IMUDriver:
__slots__ = ["s", "decoder"]
def __init__(self, port):
# speed = 115200
speed = 1000000
self.s = Serial(port, speed, timeout=0.005)
self.decoder = Parser()
print(f">> IMUDriver opened {port}@{speed}")
def close(self):
self.s.close()
def read(self, cmd=b'g'):
"""
Return: array of data or None
"""
self.s.reset_input_buffer()
self.s.write(cmd)
bad = True
while self.s.out_waiting > 0:
time.sleep(0.001)
while self.s.in_waiting < 10:
# print(".", end="", flush=True)
time.sleep(0.001)
# print(" ")
a = self.s.read(1)
b = self.s.read(1)
success = False
for _ in range(8):
if a == b"\xff" and b == b"\xff":
success = True
break
time.sleep(0.001)
a = b
b = self.s.read(1)
if not success:
print(f"{Fore.RED}** failed header **{Fore.RESET}")
time.sleep(0.001)
self.s.flushInput()
return None
data_size = ord(self.s.read(1))
# print(f">> {Fore.BLUE}data size:{Fore.RESET} {data_size}", flush=True)
data = self.s.read(data_size)
ret = self.decoder.decode(data)
ret.append(time.time())
return ret
def compensate(self, accel, mag=None):
"""
"""
try:
ax, ay, az = normalize3(*accel)
pitch = asin(-ax)
if abs(pitch) >= pi/2:
roll = 0.0
else:
roll = asin(ay/cos(pitch))
if mag:
# mx, my, mz = mag
mx, my, mz = normalize3(*mag)
x = mx*cos(pitch)+mz*sin(pitch)
y = mx*sin(roll)*sin(pitch)+my*cos(roll)-mz*sin(roll)*cos(pitch)
heading = atan2(y, x)
# wrap heading between 0 and 360 degrees
if heading > 2*pi:
heading -= 2*pi
elif heading < 0:
heading += 2*pi
else:
heading = None
# if self.angle_units == Angle.degrees:
# roll *= RAD2DEG
# pitch *= RAD2DEG
# heading *= RAD2DEG
# elif self.angle_units == Angle.quaternion:
# return Quaternion.from_euler(roll, pitch, heading)
return (roll, pitch, heading,)
except ZeroDivisionError as e:
print('Error', e)
# if self.angle_units == Angle.quaternion:
# return Quaternion(1, 0, 0, 0)
# else:
return (0.0, 0.0, 0.0,)
def height(self, p):
"""
given pressure in hPa, returns altitude in meters.
"""
h = (1 - pow(p / 1013.25, 0.190263)) * 44330.8
return h
| 25.897436
| 87
| 0.518317
| 5,294
| 0.873597
| 0
| 0
| 0
| 0
| 0
| 0
| 1,645
| 0.271452
|
e64b61756e2c5141a88d05ce00a52ea06f0af2cf
| 1,718
|
py
|
Python
|
main.py
|
hwangseonu/pokeka
|
39e56c59dfc85a0c73232ac9105766ef060aa90e
|
[
"MIT"
] | 1
|
2021-06-01T05:26:48.000Z
|
2021-06-01T05:26:48.000Z
|
main.py
|
hwangseonu/pokeka
|
39e56c59dfc85a0c73232ac9105766ef060aa90e
|
[
"MIT"
] | null | null | null |
main.py
|
hwangseonu/pokeka
|
39e56c59dfc85a0c73232ac9105766ef060aa90e
|
[
"MIT"
] | null | null | null |
import base64
import svgwrite
import svgwrite.container
import svgwrite.shapes
import svgwrite.image
import bs4
import os
from urllib.request import urlopen
from selenium import webdriver
index = 0
code = input('덱 코드를 입력하세요.> ')
os.mkdir(code)
url = 'https://pokemoncard.co.kr/recipe/search?code=' + code
driver = webdriver.PhantomJS('phantomjs.exe')
driver.implicitly_wait(5)
driver.get(url)
soup = bs4.BeautifulSoup(driver.page_source, 'lxml')
card_items = soup.select(f'#show-card-detail-{code} .card-item')
card_list = []
for item in card_items:
cnt = item.select_one('.count')
cnt = int(cnt.text)
for i in range(cnt):
img = item.select_one('img')
card_list.append(img['src'])
pages = (len(card_list) // 9) + 1 if len(card_list) % 9 != 0 else 0
start_x, start_y = 10.5, 16.5
for p in range(0, pages):
x, y = 0, 0
path = os.path.join(code, f'card{p + 1}.svg')
dwg = svgwrite.Drawing(path, size=('210mm', '297mm'))
background = svgwrite.container.Group()
background.add(svgwrite.shapes.Rect(size=('210mm', '297mm'), fill='#ffe659'))
dwg.add(background)
cards_group = svgwrite.container.Group()
for i in range(0, 9):
index = p * 9 + i
if index >= len(card_list):
break
image = urlopen(card_list[index]).read()
cards_group.add(svgwrite.image.Image(
href='data:image/png;base64,' + base64.b64encode(image).decode(),
width='63mm', height='88mm',
x=str(start_x + (63 * x))+'mm', y=str(start_y + (88 * y))+'mm')),
x += 1
if x >= 3:
x = 0
y += 1
if y >= 3:
continue
dwg.add(cards_group)
dwg.save()
| 24.542857
| 81
| 0.610594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 257
| 0.148041
|
e64c6e151cf650530049e08cc621f9b0d7bf3833
| 967
|
py
|
Python
|
save.py
|
jonoxia/pencilbox
|
1ebcbefd0110a2d23ad0da27427df2e32eadfbfe
|
[
"Condor-1.1"
] | 3
|
2015-04-01T07:20:09.000Z
|
2020-12-26T02:37:56.000Z
|
save.py
|
jonoxia/pencilbox
|
1ebcbefd0110a2d23ad0da27427df2e32eadfbfe
|
[
"Condor-1.1"
] | null | null | null |
save.py
|
jonoxia/pencilbox
|
1ebcbefd0110a2d23ad0da27427df2e32eadfbfe
|
[
"Condor-1.1"
] | null | null | null |
#!/usr/bin/python
from database_tables import DrawingHistory
from webserver_utils import verify_id
import cgi
import cgitb
import datetime
def createNew(title, creator, history, layers):
kwargs = {"date": datetime.datetime.now(),
"title": title,
"history_json": history,
"layer_json": layers,
"creator": creator}
newEntry = DrawingHistory(**kwargs)
def updateOld(entry, history, layers):
entry.date = datetime.datetime.now()
entry.history_json = history
entry.layer_json = layers
cgitb.enable()
q = cgi.FieldStorage()
history = q.getfirst("history", "")
layers = q.getfirst("layers", "")
title = q.getfirst("title", "")
artist = verify_id()
matches = DrawingHistory.selectBy(title = title, creator=artist)
if matches.count() > 0:
updateOld(matches[0], history, layers)
else:
createNew(title, artist, history, layers)
print "Content-type: text/html"
print
print "OK, saved"
| 23.585366
| 64
| 0.677353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.135471
|
e64d35a8b39dc13cbd44e4267a4f6d20a1f20bd8
| 746
|
py
|
Python
|
games/stock/environment.py
|
hkp1030/muzero-stock
|
d3acddf11afa523b81bbe1a626a50c95eb78c165
|
[
"MIT"
] | 1
|
2022-01-21T21:15:49.000Z
|
2022-01-21T21:15:49.000Z
|
games/stock/environment.py
|
hkp1030/muzero-stock
|
d3acddf11afa523b81bbe1a626a50c95eb78c165
|
[
"MIT"
] | null | null | null |
games/stock/environment.py
|
hkp1030/muzero-stock
|
d3acddf11afa523b81bbe1a626a50c95eb78c165
|
[
"MIT"
] | 1
|
2022-01-21T21:30:37.000Z
|
2022-01-21T21:30:37.000Z
|
class Environment:
PRICE_IDX = 4 # 종가의 위치
def __init__(self, chart_data=None, training_data=None):
self.chart_data = chart_data
self.training_data = training_data
self.observation = None
self.idx = -1
def reset(self):
self.observation = None
self.idx = -1
def is_done(self):
if self.idx + 1 >= len(self.training_data):
return True
else:
return False
def observe(self):
if self.is_done():
return None
self.idx += 1
self.observation = self.training_data.iloc[self.idx]
return self.observation.tolist()
def get_price(self):
return self.chart_data.iloc[self.idx][self.PRICE_IDX]
| 24.866667
| 61
| 0.591153
| 755
| 0.998677
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.02381
|
e64d3c1360f948a0e4e91a1e5bc77802db0ff7e0
| 2,148
|
py
|
Python
|
synthesis/paramGen/testcase2.py
|
hyunynim/DIST-Renderer
|
4717ee8cea77f4f413b61f380a893c6800d0bde5
|
[
"MIT"
] | 176
|
2020-06-11T19:16:33.000Z
|
2022-03-29T01:38:28.000Z
|
synthesis/paramGen/testcase2.py
|
hyunynim/DIST-Renderer
|
4717ee8cea77f4f413b61f380a893c6800d0bde5
|
[
"MIT"
] | 6
|
2020-06-26T05:26:56.000Z
|
2021-11-10T07:31:21.000Z
|
synthesis/paramGen/testcase2.py
|
hyunynim/DIST-Renderer
|
4717ee8cea77f4f413b61f380a893c6800d0bde5
|
[
"MIT"
] | 23
|
2020-06-11T21:43:03.000Z
|
2022-02-18T00:16:16.000Z
|
'''
2019-08-07 00:01
Method:
20 x 5 grid over (camera x lighting)
'''
VIEW_NUM, LIGHTING_NUM = 20, 5
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.param_decomposer import AllParams
def generate_params(shape_list, randomizer):
nowpath = os.path.dirname(os.path.abspath(__file__))
basepath = os.path.dirname(nowpath)
folder = os.path.join(basepath, 'output', os.path.splitext(os.path.basename(__file__))[0])
all_params_list = []
shape_list = shape_list[:5] # take only five for testing.
print('generating rendering params...')
from tqdm import tqdm
for shape in tqdm(shape_list):
view_cfg, light_cfg, truncparam_cfg, cropbg_param_cfg, fname_cfg = [], [], [], [], []
# generate cameras and lights
camera_list, lighting_list = [], []
for idx in range(VIEW_NUM):
view = randomizer.randomize_view()
truncparam = randomizer.randomize_truncparam()
camera_list.append((view, truncparam))
for idx in range(LIGHTING_NUM):
lighting_list.append(randomizer.randomize_lighting())
counter = 0
for j1 in range(VIEW_NUM): # 10 cameras (views and truncparams)
camera = camera_list[j1]
view, truncparam = camera[0], camera[1]
for j2 in range(LIGHTING_NUM): # 10 lighting condtions and bg
lighting = lighting_list[j2]
cropbg_param = randomizer.randomize_cropbg_param()
# to append info to the list.
view_cfg.append(view)
light_cfg.append(lighting)
truncparam_cfg.append(truncparam)
cropbg_param_cfg.append(cropbg_param)
fname = os.path.join(shape.shape_md5, shape.shape_md5 + '_{0:08d}.png'.format(counter))
fname_cfg.append(fname)
counter = counter + 1
# to append all_params
all_params = AllParams(shape, view_cfg, light_cfg, truncparam_cfg, cropbg_param_cfg, fname_cfg)
all_params_list.append(all_params)
return folder, all_params_list
| 39.777778
| 103
| 0.640596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 298
| 0.138734
|
e64e4471df6551917b2b1289cce293fbc929f162
| 12,219
|
py
|
Python
|
daisy-world/dashdir/plotting.py
|
frossmann/addon_containers
|
de6dde175947b24bbfa35d94d44c9d9633a73226
|
[
"BSD-3-Clause"
] | null | null | null |
daisy-world/dashdir/plotting.py
|
frossmann/addon_containers
|
de6dde175947b24bbfa35d94d44c9d9633a73226
|
[
"BSD-3-Clause"
] | null | null | null |
daisy-world/dashdir/plotting.py
|
frossmann/addon_containers
|
de6dde175947b24bbfa35d94d44c9d9633a73226
|
[
"BSD-3-Clause"
] | 2
|
2021-11-16T06:05:02.000Z
|
2021-11-18T17:16:35.000Z
|
import plotly.graph_objects as go
import plotly.figure_factory as ff
import numpy as np
import calculations as calc
from plotly.subplots import make_subplots
def initialize_albedo_plot(T_min, T_opt):
# how does the growth curve of the Daisies look like?
gw = []
gb = []
# amount of intervals to plot
nt = 20
t0 = 0
t1 = 45
dT = (t1 - t0) / nt
tempv = [t0 + i * dT for i in range(nt)]
for t in tempv:
gw.append(calc.DaisyGrowth(t + 273.15, "w", T_min, T_opt))
gb.append(calc.DaisyGrowth(t + 273.15, "b", T_min, T_opt))
albedo_plot = go.Figure()
albedo_plot.add_hrect(
xref="paper",
yref="paper",
x0=1,
x1=1.5,
y0=-15,
y1=100,
line_width=0,
fillcolor="white",
opacity=1,
)
albedo_plot.update_xaxes(showgrid=True, zeroline=False)
albedo_plot.update_yaxes(showgrid=True, zeroline=False)
albedo_plot.add_trace(go.Scatter(x=tempv, y=gw, name="gw"))
albedo_plot.add_trace(go.Scatter(x=tempv, y=gb, name="gb"))
albedo_plot.update_layout(xaxis_title="tempv", yaxis_title="growth")
albedo_plot.update_layout(xaxis_title="Temp [degC]", yaxis_title="Ratio")
albedo_plot.update_xaxes(range=[0, t1])
albedo_plot.update_yaxes(range=[0, 1])
albedo_plot.layout.title = "Growth curve of daisies"
return albedo_plot
def constant_flux_temp(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt
):
# initial areas are embedded in here but should be passed in as an
# argument later if we want to change the initial conditions
# externally...
areas = {"w": 0.01, "b": 0.01} # initial conditions for area
# solve the constant flux problem:
xgens, gens = calc.update_constant_flux(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt, areas
)
# temperatures plot
fig = go.Figure()
fig.add_hrect(
xref="paper",
yref="paper",
x0=1,
x1=1.5,
y0=-15,
y1=100,
line_width=0,
fillcolor="white",
opacity=1,
)
fig.update_xaxes(showgrid=True, zeroline=False)
fig.update_yaxes(showgrid=True, zeroline=False)
fig.add_trace(
go.Scatter(
x=gens,
y=[x["Tw"] - 273.15 for x in xgens],
name="White daisies temperature",
line=dict(color="lavender", width=8),
)
)
fig.add_trace(
go.Scatter(
x=gens,
y=[x["Tb"] - 273.15 for x in xgens],
name="Black daisies temperature",
line=dict(color="black", width=3),
)
)
fig.add_trace(
go.Scatter(
x=gens,
y=[x["Tp"] - 273.15 for x in xgens],
name="Planet temperature",
line=dict(color="seagreen", width=5, dash="dot"),
)
)
fig.update_layout(
xaxis_title="Simulation Time (Daisy generation #)",
yaxis_title="Temperature [degC]",
)
fig.update_xaxes(range=[0, len(gens)])
fig.update_yaxes(range=[10, 40])
fig.layout.title = "Constant flux temperature with daisy generation"
fig.update_layout(plot_bgcolor="silver")
return fig
def constant_flux_area(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt
):
# initial areas are embedded in here but should be passed in as an
# argument later if we want to change the initial conditions
# externally...
areas = {"w": 0.01, "b": 0.01} # initial conditions for area
# solve the constant flux problem:
xgens, gens = calc.update_constant_flux(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt, areas
)
# make the figure:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_hrect(
xref="paper",
yref="paper",
x0=1,
x1=1.5,
y0=-15,
y1=100,
line_width=0,
fillcolor="white",
opacity=1,
)
fig.update_xaxes(showgrid=True, zeroline=False)
fig.update_yaxes(showgrid=True, zeroline=False, secondary_y=False)
fig.update_yaxes(showgrid=False, zeroline=False, secondary_y=True)
fig.add_trace(
go.Scatter(
x=gens,
y=[100 * x["Sw"] for x in xgens],
name="White daisies area",
line=dict(color="lavender", width=8),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=gens,
y=[100 * x["Sb"] for x in xgens],
name="Black daisies area",
line=dict(color="black", width=3),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=gens,
y=[100 * x["Su"] for x in xgens],
name="Uninhabited area",
line=dict(color="saddlebrown", width=4),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=gens,
y=[x["Ap"] for x in xgens],
name="Combined albedo",
line=dict(color="royalblue", dash="dash"),
),
secondary_y=True,
)
# fig.update_layout(xaxis_title="Generation number", yaxis_title="Fractional area")
fig.update_xaxes(title_text="Simulation Time (Daisy generation #)")
fig.update_yaxes(title_text="Inhabited area [%]", secondary_y=False)
fig.update_yaxes(title_text="Albedo", secondary_y=True)
fig.update_xaxes(range=[0, len(gens)])
fig.update_yaxes(range=[0, 100], secondary_y=False) # % area
fig.update_yaxes(range=[0.35, 0.65], secondary_y=True) # albedo
fig.layout.title = "Constant flux daisy coverage"
# fig.update_layout(paper_bgcolor="black")
fig.update_layout(plot_bgcolor="silver")
return fig
def varying_solar_flux_temp(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt
):
xeq, xeqbar, _, F = calc.update_equi_flux(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt
)
# fig = go.Figure(data=go.Scatter(x=F, y=[x["Tw"] - 273.15 for x in xeq]))
##
# # fig = make_subplots(rows=1, cols=2, subplot_titles=("Plot1", "Plot2"))
# make a list of arbitrary times to plot against
times = np.arange(0, len(F) + 1, 1)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_hrect(
xref="paper",
yref="paper",
x0=1,
x1=1.5,
y0=-15,
y1=100,
line_width=0,
fillcolor="white",
opacity=1,
)
# # subplot 1
fig.update_xaxes(showgrid=True, zeroline=False)
fig.update_yaxes(showgrid=True, zeroline=False, secondary_y=False)
fig.update_yaxes(showgrid=False, zeroline=False, secondary_y=True)
fig.add_trace(
go.Scatter(
x=times,
y=[Fi * Fsnom for Fi in F],
name="Solar flux (right axis)",
line=dict(color="rgba(255, 255, 0, 0.3)", width=5),
),
secondary_y=True,
)
fig.add_trace(
go.Scatter(
x=times,
y=[x["Tw"] - 273.15 for x in xeq],
name="White daisies temperature",
line=dict(color="lavender", width=7),
),
secondary_y=False,
)
# fig.add_trace(
# go.Scatter(
# x=F,
# y=[x["Tw"] - 273.15 for x in xeqinv],
# name="White daisies temperature (backwards)",
# line=dict(color="lightskyblue", dash="dot", width=5),
# ),
# )
fig.add_trace(
go.Scatter(
x=times,
y=[x["Tb"] - 273.15 for x in xeq],
name="Black daisies temperature",
line=dict(color="black", width=3),
),
secondary_y=False,
)
# fig.add_trace(
# go.Scatter(
# x=F,
# y=[x["Tb"] - 273.15 for x in xeqinv],
# name="Black daisies temperature (backwards)",
# line=dict(color="darkslategray", dash="dot", width=3),
# ),
# )
fig.add_trace(
go.Scatter(
x=times,
y=[x["Tp"] - 273.15 for x in xeq],
name="Planet temperature",
line=dict(color="seagreen", width=5, dash="dot"),
),
secondary_y=False,
)
# fig.add_trace(
# go.Scatter(
# x=F,
# y=[x["Tp"] - 273.15 for x in xeqinv],
# name="Planet temperature (backwards)",
# line=dict(color="sienna", dash="dot", width=3),
# ),
# )
fig.add_trace(
go.Scatter(
x=times,
y=[x["Tp"] - 273.15 for x in xeqbar],
name="Planet temperature (without life)",
line=dict(color="gray", dash="dash", width=3),
),
secondary_y=False,
)
fig.update_xaxes(title="Simulation Time [Myr]", range=[0, times[-1]])
fig.update_yaxes(
title="Temperature [degC]",
range=[-20, 80],
secondary_y=False,
)
fig.update_yaxes(title_text="Solar flux [Wm-2]", secondary_y=True)
fig.update_layout(title_text="Equilibrium temperature vs solar flux")
fig.update_layout(plot_bgcolor="silver")
return fig
def varying_solar_flux_area(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt
):
xeq, _, _, F = calc.update_equi_flux(
Fsnom, Albedo, rat, em_p, sig, ins_p, death, minarea, T_min, T_opt
)
# make a list of arbitrary times to plot against
times = np.arange(0, len(F) + 1, 1)
# fig = go.Figure(data=go.Scatter(x=F, y=[x["Tw"] - 273.15 for x in xeq]))
##
# # fig = make_subplots(rows=1, cols=2, subplot_titles=("Plot1", "Plot2"))
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_hrect(
xref="paper",
yref="paper",
x0=1,
x1=1.5,
y0=-15,
y1=100,
line_width=0,
fillcolor="white",
opacity=1,
)
# # subplot 1
fig.update_xaxes(showgrid=True, zeroline=False)
fig.update_yaxes(showgrid=False, zeroline=False, secondary_y=True)
fig.update_yaxes(showgrid=True, zeroline=False, secondary_y=False)
fig.add_trace(
go.Scatter(
x=times,
y=[Fi * Fsnom for Fi in F],
name="Solar flux (right axis)",
line=dict(color="rgba(255, 255, 0, 0.3)", width=5),
),
secondary_y=True,
)
fig.add_trace(
go.Scatter(
x=times,
y=[100 * x["Sw"] for x in xeq],
name="White daisies area",
line=dict(color="lavender", width=7),
),
secondary_y=False,
)
# fig.add_trace(
# go.Scatter(
# x=F,
# y=[x["Sw"] for x in xeqinv],
# name="White daisies area (backwards)",
# line=dict(color="lightskyblue", dash="dot", width=5),
# ),
# )
fig.add_trace(
go.Scatter(
x=times,
y=[100 * x["Sb"] for x in xeq],
name="Black daisies area",
line=dict(color="black", width=3),
),
secondary_y=False,
)
# fig.add_trace(
# go.Scatter(
# x=F,
# y=[x["Sb"] for x in xeqinv],
# name="Black daisies area (backwards)",
# line=dict(color="darkslategray", dash="dot", width=3),
# ),
# )
fig.add_trace(
go.Scatter(
x=times,
y=[100 * x["Su"] for x in xeq],
name="Uninhabited area",
line=dict(color="saddlebrown", width=3),
),
secondary_y=False,
)
# fig.add_trace(
# go.Scatter(
# x=F,
# y=[x["Su"] for x in xeqinv],
# name="Uninhabited area (backwards)",
# line=dict(color="sienna", dash="dot", width=3),
# ),
# )
fig.update_xaxes(title="Simulation Time [Myr]", range=[0, times[-1]])
fig.update_yaxes(
title="Inhabited area [%]",
range=[0, 100],
secondary_y=False,
)
fig.update_yaxes(title_text="Solar flux [Wm-2]", secondary_y=True)
fig.update_layout(title_text="Equilibrium area vs solar flux")
fig.update_layout(plot_bgcolor="silver")
return fig
| 30.022113
| 87
| 0.554301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,644
| 0.298224
|
e64e9265798874238fe8da1d312e841fe0ab8876
| 36
|
py
|
Python
|
opendart/config/__init__.py
|
JehunYoo/opendart
|
c88105baf85af57d006cc2404d192aaf9baf73cc
|
[
"MIT"
] | null | null | null |
opendart/config/__init__.py
|
JehunYoo/opendart
|
c88105baf85af57d006cc2404d192aaf9baf73cc
|
[
"MIT"
] | 2
|
2021-07-12T10:59:20.000Z
|
2021-07-13T02:06:27.000Z
|
opendart/config/__init__.py
|
JehunYoo/opendart
|
c88105baf85af57d006cc2404d192aaf9baf73cc
|
[
"MIT"
] | null | null | null |
from opendart.config.config import *
| 36
| 36
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e64ebb3be618b728917060906d4f5af8a1cfc287
| 10,720
|
py
|
Python
|
NetworkScanners/libs/Pyssh/pyssh.py
|
isislovecruft/torflow
|
666689ad18d358d764a35d041a7b16adb8d3287c
|
[
"BSD-3-Clause"
] | null | null | null |
NetworkScanners/libs/Pyssh/pyssh.py
|
isislovecruft/torflow
|
666689ad18d358d764a35d041a7b16adb8d3287c
|
[
"BSD-3-Clause"
] | 1
|
2018-12-18T15:58:40.000Z
|
2018-12-26T16:52:51.000Z
|
NetworkScanners/libs/Pyssh/pyssh.py
|
isislovecruft/torflow
|
666689ad18d358d764a35d041a7b16adb8d3287c
|
[
"BSD-3-Clause"
] | null | null | null |
"""A SSH Interface class.
An interface to ssh on posix systems, and plink (part of the Putty
suite) on Win32 systems.
By Rasjid Wilcox.
Copyright (c) 2002.
Version: 0.2
Last modified 4 September 2002.
Drawing on ideas from work by Julian Schaefer-Jasinski, Guido's telnetlib and
version 0.1 of pyssh (http://pyssh.sourceforge.net) by Chuck Esterbrook.
Licenced under a Python 2.2 style license. See License.txt.
"""
DEBUG_LEVEL = 0
import os, getpass
import signal # should cause all KeyboardInterrupts to go to the main thread
# try for Linux, does not seem to be try under Cygwin
import nbpipe
import time
# Constants
SSH_PORT=22
SSH_PATH=''
CTRL_C=chr(3)
READ_LAZY=0
READ_SOME=1
READ_ALL=2
# set the path to ssh / plink, and chose the popen2 funciton to use
if os.name=='posix':
import fssa # we can look for ssh-agent on posix
# XXX Can we on Win32/others?
import ptyext # if my patch gets accepted, change this to check for a
# sufficiently high version of python, and assign ptyext=pty
# if sufficient.
sshpopen2=ptyext.popen2
CLOSE_STR='~.'
tp=os.popen('/usr/bin/which ssh')
SSH_PATH=tp.read().strip()
try:
tp.close()
except IOError:
# probably no child process
pass
if SSH_PATH == '':
tp=os.popen('command -v ssh') # works in bash, ash etc, not csh etc.
SSH_PATH=tp.read().strip()
tp.close()
if SSH_PATH == '':
check = ['/usr/bin/ssh', '/usr/local/bin/ssh', '/bin/ssh']
for item in check:
if os.path.isfile(item):
SSH_PATH=item
break
PORT_STR='-p '
else:
sshpopen2=os.popen2
CLOSE_STR=CTRL_C # FIX-ME: This does not work.
# I think I need to implement a 'kill' component
# to the close function using win32api.
SSH_PATH=''
PORT_STR='-P '
class mysshError(Exception):
"""Error class for myssh."""
pass
# Helper functions
def _prompt(prompt):
"""Print the message as the prompt for input.
Return the text entered."""
noecho = (prompt.lower().find('password:') >= 0) or \
(prompt.lower().find('passphrase:') >=0)
print """User input required for ssh connection.
(Type Ctrl-C to abort connection.)"""
abort = 0
try:
if noecho:
response = getpass.getpass(prompt)
else:
response = raw_input(prompt)
except KeyboardInterrupt:
response = ''
abort = 1
return response, abort
class Ssh:
"""A SSH connection class."""
def __init__(self, username=None, host='localhost', port=None):
"""Constructor. This does not try to connect."""
self.debuglevel = DEBUG_LEVEL
self.sshpath = SSH_PATH
self.username = username
self.host = host
self.port = port
self.isopen = 0
self.sshpid = 0 # perhaps merge this with isopen
self.old_handler = signal.getsignal(signal.SIGCHLD)
sig_handler = signal.signal(signal.SIGCHLD, self.sig_handler)
def __del__(self):
"""Destructor -- close the connection."""
if self.isopen:
self.close()
def sig_handler(self, signum, stack):
""" Handle SIGCHLD signal """
if signum == signal.SIGCHLD:
try:
os.waitpid(self.sshpid, 0)
except:
pass
if self.old_handler != signal.SIG_DFL:
self.old_handler(signum, stack)
def attach_agent(self, key=None):
if os.name != 'posix':
# only posix support at this time
return
if 'SSH_AUTH_SOCK' not in os.environ.keys():
fssa.fssa(key)
def set_debuglevel(self, debuglevel):
"""Set the debug level."""
self.debuglevel = debuglevel
def set_sshpath(self, sshpath):
"""Set the ssh path."""
self.sshpath=sshpath
# Low level functions
def open(self, cmd=None):
"""Opens a ssh connection.
Raises an mysshError if myssh.sshpath is not a file.
Raises an error if attempting to open an already open connection.
"""
self.attach_agent()
if not os.path.isfile(self.sshpath):
raise mysshError, \
"Path to ssh or plink is not defined or invalid.\nsshpath='%s'" \
% self.sshpath
if self.isopen:
raise mysshError, "Connection already open."
sshargs = ''
if self.sshpath.lower().find('plink') != -1:
sshargs = '-ssh '
if self.port and self.port != '':
sshargs += PORT_STR + `self.port` + ' '
if self.username and self.username !='':
sshargs += self.username + '@'
sshargs += self.host
if cmd:
sshargs += ' ' + cmd
if self.debuglevel:
print ">> Running %s %s." % (self.sshpath, sshargs)
# temporary workaround until I get pid's working under win32
if os.name == 'posix':
self.sshin, self.sshoutblocking, self.sshpid = \
sshpopen2(self.sshpath + ' ' + sshargs)
else:
self.sshin, self.sshoutblocking = \
sshpopen2(self.sshpath + ' ' + sshargs)
self.sshout = nbpipe.nbpipe(self.sshoutblocking)
self.isopen = 1
if self.debuglevel:
print ">> ssh pid is %s." % self.sshpid
def close(self, addnewline=1):
"""Close the ssh connection by closing the input and output pipes.
Returns the closing messages.
On Posix systems, by default it adds a newline before sending the
disconnect escape sequence. Turn this off by setting addnewline=0.
"""
if os.name == 'posix':
try:
if addnewline:
self.write('\n')
self.write(CLOSE_STR)
except (OSError, IOError, mysshError):
pass
output = self.read_lazy()
try:
self.sshin.close()
self.sshoutblocking.close()
except:
pass
if os.name == 'posix':
try:
os.kill(self.sshpid, signal.SIGHUP)
except:
pass
self.isopen = 0
if self.debuglevel:
print ">> Connection closed."
return output
def write(self, text):
"""Send text to the ssh process."""
# May block?? Probably not in practice, as ssh has a large input buffer.
if self.debuglevel:
print ">> Sending %s" % text
if self.isopen:
while len(text):
numtaken = os.write(self.sshin.fileno(),text)
if self.debuglevel:
print ">> %s characters taken" % numtaken
text = text[numtaken:]
else:
raise mysshError, "Attempted to write to closed connection."
# There is a question about what to do with connections closed by the other
# end. Should write and read check for this, and force proper close?
def read_very_lazy(self):
"""Very lazy read from sshout. Just reads from text already queued."""
return self.sshout.read_very_lazy()
def read_lazy(self):
"""Lazy read from sshout. Waits a little, but does not block."""
return self.sshout.read_lazy()
def read_some(self):
"""Always read at least one block, unless the connection is closed.
My block."""
if self.isopen:
return self.sshout.read_some()
else:
return self.sshout.read_very_lazy()
def read_all(self):
"""Reads until end of file hit. May block."""
if self.isopen:
return self.sshout.read_all()
else:
return self.sshout.read_very_lazy()
# High level funcitons
def login(self, logintext='Last login:', prompt_callback=_prompt):
"""Logs in to the ssh host. Checks for standard prompts, and calls
the function passed as promptcb to process them.
Returns the login banner, or 'None' if login process aborted.
"""
self.open()
banner = self.read_some()
if self.debuglevel:
print ">> 1st banner read is: %s" % banner
while banner.find(logintext) == -1:
response, abort = prompt_callback(banner)
if abort:
return self.close()
self.write(response + '\n')
banner = self.read_some()
return banner
def logout(self):
"""Logs out the session."""
self.close()
def sendcmd(self, cmd, readtype=READ_SOME):
"""Sends the command 'cmd' over the ssh connection, and returns the
result. By default it uses read_some, which may block.
"""
if cmd[-1] != '\n':
cmd += '\n'
self.write(cmd)
if readtype == READ_ALL:
return self.read_all()
elif readtype == READ_LAZY:
return self.read_lazy()
else:
return self.read_some()
def test():
"""Test routine for myssh.
Usage: python myssh.py [-d] [-sshp path-to-ssh] [username@host | host] [port]
Default host is localhost, default port is 22.
"""
import sys
debug = 0
if sys.argv[1:] and sys.argv[1] == '-d':
debug = 1
del sys.argv[1]
testsshpath = SSH_PATH
if sys.argv[1:] and sys.argv[1] == '-sshp':
testsshpath = sys.argv[2]
del sys.argv[1]
del sys.argv[1]
testusername = None
testhost = 'localhost'
testport = '22'
if sys.argv[1:]:
testhost = sys.argv[1]
if testhost.find('@') != -1:
testusername, testhost = testhost.split('@')
if sys.argv[2:]:
testport = sys.argv[2]
testcon = Ssh(testusername, testhost, testport)
testcon.set_debuglevel(debug)
testcon.set_sshpath(testsshpath)
testcon.login()
cmd = None
while (cmd != 'exit') and testcon.isopen:
cmd = raw_input("Enter command to send: ")
print testcon.sendcmd(cmd)
testcon.close()
if __name__ == '__main__':
test()
| 33.395639
| 82
| 0.547201
| 6,942
| 0.647575
| 0
| 0
| 0
| 0
| 0
| 0
| 3,749
| 0.34972
|
e64ec15e4f7b983862625b28f909feef4c9e7bb4
| 3,894
|
py
|
Python
|
pygacal/camera/__init__.py
|
ereide/pyga-camcal
|
fd25748ddb11c5b05ef24a2deca2689e0d899875
|
[
"MIT"
] | 5
|
2018-05-22T09:11:31.000Z
|
2022-03-11T02:32:01.000Z
|
pygacal/camera/__init__.py
|
ereide/pyga-camcal
|
fd25748ddb11c5b05ef24a2deca2689e0d899875
|
[
"MIT"
] | null | null | null |
pygacal/camera/__init__.py
|
ereide/pyga-camcal
|
fd25748ddb11c5b05ef24a2deca2689e0d899875
|
[
"MIT"
] | null | null | null |
from clifford import g3c
import numpy as np
import scipy.optimize as opt
from pygacal.rotation.costfunction import restrictedImageCostFunction, restrictedMultiViewImageCostFunction
from pygacal.rotation import minimizeError
from pygacal.rotation.mapping import BivectorLineImageMapping, BivectorLineMapping, LinePropertyBivectorMapping, BivectorLineEstimationMapping
from pygacal.common.cgatools import Sandwich, Dilator, Translator, Reflector, inversion, Rotor, Transversor, I3, I5, VectorEquality, anticommuter, ga_exp, Meet
#Defining variables
layout = g3c.layout
locals().update(g3c.blades)
ep, en, up, down, homo, E0, ninf, no = (g3c.stuff["ep"], g3c.stuff["en"],
g3c.stuff["up"], g3c.stuff["down"], g3c.stuff["homo"],
g3c.stuff["E0"], g3c.stuff["einf"], -g3c.stuff["eo"])
class SLAM(object):
def __init__(self, model_estimate, lines_img_base, lines_imgs, R_start = None, mapping = BivectorLineImageMapping):
self.mapping = mapping
self.model_estimate = model_estimate
self.lines_img_base = lines_img_base
self.lines_imgs = lines_imgs
assert(len(lines_imgs[0]) == len(model_estimate))
if R_start is None:
self.R_estimate = [None for _ in range(len(lines_imgs))]
else:
assert(len(R_start) == len(lines_imgs))
self.R_estimate = R_start
def cost(self):
cost = sum([self.mapping.costfunction(self.R_estimate[i], self.model_estimate, self.lines_imgs[i]) for i in range(len(self.lines_imgs))])
return cost/len(self.lines_imgs)
def updateLocation(self):
print("Update Location")
for i in range(len(self.lines_imgs)):
args = (self.model_estimate, self.lines_imgs[i])
if (self.R_estimate[i] is None):
x0 = None
else:
x0 = self.mapping.inverserotorconversion(self.R_estimate[i])
R_min, N_int = minimizeError(args, self.mapping, x0 = x0)
self.R_estimate[i] = R_min
print("N_int = ", N_int)
print("Complete: Update location")
def addImage(self, lines_img_new, R_img_new = None):
self.lines_imgs.append(lines_img_new)
self.R_estimate.append(R_img_new)
def improveLine(self, i, O1 = up(0)):
line_guesses = []
R_B = self.R_estimate[ 0 ]
Line_A = self.lines_img_base[i]
Line_B = self.lines_imgs[0][i]
P_A = (O1 ^ Line_A).normal()
P_B = (R_B * (O1 ^ Line_B) * ~R_B).normal()
new_line = Meet(P_A, P_B)
line_guesses.append(new_line)
for j in range(1, len(self.R_estimate)):
R_A = self.R_estimate[j-1]
R_B = self.R_estimate[ j ]
Line_A = self.lines_imgs[j-1][i]
Line_B = self.lines_imgs[ j ][i]
P_A = (R_A * (O1 ^ Line_A) * ~R_A).normal()
P_B = (R_B * (O1 ^ Line_B) * ~R_B).normal()
new_line = Meet(P_A, P_B)
line_guesses.append(new_line)
for guess in line_guesses:
print("guess ", guess)
print("model ", self.model_estimate[i], "\n")
return self.averageLines(self.model_estimate[i], line_guesses)
def averageLines(self, line_start, line_guesses):
mapping = BivectorLineEstimationMapping
args = [line_start, line_guesses]
x0 = np.random.normal(0.01, size=6)
R_min, Nint = minimizeError(args, mapping, x0 = x0)
return R_min * line_start * ~R_min
def updateModel(self):
if any(self.R_estimate) is None:
self.updateLocation()
print("Update Model ")
for i in range(len(self.model_estimate)):
self.model_estimate[i] = self.improveLine(i)
print("Complete: model update")
| 34.460177
| 159
| 0.612994
| 3,024
| 0.776579
| 0
| 0
| 0
| 0
| 0
| 0
| 172
| 0.044171
|
e651cc7d2f10c7d86c7ce7b411ef45695942e02f
| 1,320
|
py
|
Python
|
tests/test_utils.py
|
SalemHarrache/dbcut
|
0fd60e15f3b5532c5c531923d2e9ef08ce17c935
|
[
"MIT"
] | 16
|
2019-11-22T16:36:56.000Z
|
2022-03-12T01:49:03.000Z
|
tests/test_utils.py
|
SalemHarrache/dbcut
|
0fd60e15f3b5532c5c531923d2e9ef08ce17c935
|
[
"MIT"
] | 3
|
2019-11-23T06:11:30.000Z
|
2020-06-23T13:34:04.000Z
|
tests/test_utils.py
|
SalemHarrache/dbcut
|
0fd60e15f3b5532c5c531923d2e9ef08ce17c935
|
[
"MIT"
] | 4
|
2019-11-22T20:42:57.000Z
|
2022-02-23T09:10:00.000Z
|
import unittest
from collections import OrderedDict
from dbcut.utils import sorted_nested_dict
def test_simple_dict_is_sorted():
data = {
"c": 1,
"a": 2,
"b": 3,
}
expected = OrderedDict([("a", 2), ("b", 3), ("c", 1)])
assert expected == sorted_nested_dict(data)
def test_nested_iterables_are_sorted():
data = {
"c": [1, 3, 2],
"a": 2,
"b": (3, 1, 2),
}
expected = OrderedDict(
[
("a", 2),
# The tuple is transformed into a list here. Still an iterable though.
("b", [1, 2, 3]),
("c", [1, 2, 3]),
]
)
assert expected == sorted_nested_dict(data)
def test_nested_dicts_are_sorted():
data = {
"c": 1,
"a": {"b": 1, "a": 2},
"b": 3,
}
expected = OrderedDict(
[("a", OrderedDict([("a", 2), ("b", 1)])), ("b", 3), ("c", 1)]
)
assert expected == sorted_nested_dict(data)
def test_non_dicts_are_untouched():
data = "ravioli"
assert data is sorted_nested_dict(data)
data = ["r", "a", "v", "i", "o", "l", "i"]
assert data is sorted_nested_dict(data)
data = 42
assert data is sorted_nested_dict(data)
class Custom:
pass
data = Custom()
assert data is sorted_nested_dict(data)
| 22.372881
| 82
| 0.524242
| 26
| 0.019697
| 0
| 0
| 0
| 0
| 0
| 0
| 166
| 0.125758
|
e6543ff7671521504ac838b1689dbe9bfbccaca2
| 4,704
|
py
|
Python
|
sprout/runner.py
|
tjduigna/sprout
|
d8762ce7e6f04bb082b8ca1e65f73d8900338d9d
|
[
"Apache-2.0"
] | null | null | null |
sprout/runner.py
|
tjduigna/sprout
|
d8762ce7e6f04bb082b8ca1e65f73d8900338d9d
|
[
"Apache-2.0"
] | null | null | null |
sprout/runner.py
|
tjduigna/sprout
|
d8762ce7e6f04bb082b8ca1e65f73d8900338d9d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019, Sprout Development Team
# Distributed under the terms of the Apache License 2.0
import os
import asyncio
import asyncpg
from tortoise import Tortoise
import sprout
class Runner(sprout.Log):
"""An object-oriented interface
to the sprout utilities.
Args:
cfg (str,dict): config or path to it
env (str): key in cfg if it's nested
rc (str): path to secrets yaml file
app (str): app name
"""
_loop = asyncio.get_event_loop()
def _init_cfg(self, cfg):
if isinstance(cfg, str):
cfg = sprout.load_yml(cfg)
if not isinstance(cfg, dict) or not cfg:
raise Exception("cfg not understood")
if self.env is not None:
cfg = cfg[self.env]
for key in ['host', 'port', 'database']:
if key not in cfg:
raise Exception(f"'{key}' not found in cfg")
if 'username' not in cfg:
raise Exception("'username' not found in cfg")
if self.rc is not None:
cfg.update(sprout.load_yml(self.rc))
return cfg
def __init__(self, cfg, env=None, rc=None,
app=None, schemas=None):
self.env = env
self.rc = rc
self._cfg = self._init_cfg(cfg)
self.app = app
if schemas is None:
schemas = []
self.schemas = schemas
def db_str(self, dbname=None, schema=None):
"""Construct a 'jdbc' string"""
c = self._cfg
dbname = dbname or c['database']
auth = f"{c['username']}:{c['password']}"
url = f"{c['host']}:{c['port']}"
base = f"{c['driver']}://{auth}@{url}"
if schema is not None:
return f"{base}/{dbname}?schema={schema}"
return f"{base}/{dbname}"
async def _create_database(self):
if self.app is None:
self.log.error("has no app")
return
con = await asyncpg.connect(self.db_str(dbname='postgres'))
try:
await con.execute(f"create database {self.app};")
except asyncpg.exceptions.DuplicateDatabaseError:
sprout.cfg.log.info(f"database {self.app} exists")
finally:
await con.close()
async def _create_schemas(self):
if not self.app or not self.schemas:
self.log.error("either has no app or schemas")
return
con = await asyncpg.connect(self.db_str())
for name in self.schemas:
try:
await con.execute(f"create schema {name};")
except asyncpg.exceptions.DuplicateSchemaError:
sprout.cfg.log.info(f"schema {name} exists")
await con.close()
async def _init_schemas(self):
await self._create_database()
for schema in self.schemas:
await self._create_schemas()
await Tortoise.init(
db_url=self.db_str(schema=schema),
modules={'models': [f'{self.app}.orm.{schema}']}
)
await Tortoise.generate_schemas()
self.log.info(f"'{schema}' ready")
async def _init_db_pool(self):
c = self._cfg.copy()
c['user'] = c.pop('username')
c.pop('driver')
if self.app is None:
self.log.error("no app name provided")
return
c['database'] = self.app
pw = c.pop('password')
self.log.info(f"db_pool: {c}")
c['password'] = pw
pool = await asyncpg.create_pool(**c)
return pool
def create_database(self, app=None):
"""Initialize db"""
self.app = app or self.app
self._loop.run_until_complete(self._create_database())
def create_schemas(self, app=None, schemas=None):
"""Initialize db schemas"""
self.app = app or self.app
self.schemas = schemas or self.schemas
self._loop.run_until_complete(self._create_schemas())
def init_schemas(self, app=None, schemas=None):
"""Initialize db tables"""
self.app = app or self.app
self.schemas = schemas or self.schemas
self._loop.run_until_complete(self._init_schemas())
def init_db_pool(self, app=None):
"""Initialize db connection pool"""
self.app = app or self.app
pool = self._loop.run_until_complete(self._init_db_pool())
return pool
def easy_up(self, app):
"""Initialize everything and return a db
connection pool."""
self.create_database(app=app)
schemas = []
self.create_schemas(app=app, schemas=schemas)
self.init_schemas(app=app, schemas=schemas)
return self.init_db_pool(app=app)
| 32.895105
| 67
| 0.577594
| 4,492
| 0.954932
| 0
| 0
| 0
| 0
| 1,714
| 0.364371
| 1,133
| 0.240859
|
e6546737a433ea44c0aabf656ba019b30d17d227
| 1,033
|
py
|
Python
|
tests/test_service.py
|
beepscore/pi_gpio_service
|
47aa9c6e4e378a168320d1f42b6d4c18c998e4db
|
[
"MIT"
] | 2
|
2018-10-16T18:22:04.000Z
|
2021-05-04T21:09:53.000Z
|
tests/test_service.py
|
beepscore/pi_gpio_service
|
47aa9c6e4e378a168320d1f42b6d4c18c998e4db
|
[
"MIT"
] | null | null | null |
tests/test_service.py
|
beepscore/pi_gpio_service
|
47aa9c6e4e378a168320d1f42b6d4c18c998e4db
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env/python3
import unittest
from pi_gpio_service import service
class TestPiGpioService(unittest.TestCase):
def test_input_pins(self):
pins = {
'23': {'name': 'IN_23', 'pin_direction': 'input'},
'24': {'name': 'OUT_24', 'pin_direction': 'output'},
'25': {'name': 'OUT_25', 'pin_direction': 'output'}
}
# call method under test
self.assertEqual(service.input_pins(pins),
{'23': {'name': 'IN_23', 'pin_direction': 'input'}})
def test_output_pins(self):
pins = {
'23': {'name': 'IN_23', 'pin_direction': 'input'},
'24': {'name': 'OUT_24', 'pin_direction': 'output'},
'25': {'name': 'OUT_25', 'pin_direction': 'output'}
}
expected = {'24': {'name': 'OUT_24', 'pin_direction': 'output'},
'25': {'name': 'OUT_25', 'pin_direction': 'output'}}
# call method under test
self.assertEqual(service.output_pins(pins), expected)
| 30.382353
| 77
| 0.535334
| 954
| 0.923524
| 0
| 0
| 0
| 0
| 0
| 0
| 433
| 0.419167
|
e654e957c98bffeffb8209db916fbae89bbb1792
| 2,726
|
py
|
Python
|
sangam_poem_csv.py
|
naturalstupid/sangam_tamil_bot
|
2b8117504f10ce4b4bdc2fa8160951374c9d1516
|
[
"MIT"
] | null | null | null |
sangam_poem_csv.py
|
naturalstupid/sangam_tamil_bot
|
2b8117504f10ce4b4bdc2fa8160951374c9d1516
|
[
"MIT"
] | null | null | null |
sangam_poem_csv.py
|
naturalstupid/sangam_tamil_bot
|
2b8117504f10ce4b4bdc2fa8160951374c9d1516
|
[
"MIT"
] | null | null | null |
import string
import regex
import pandas as pd
from pandas.tests.io.parser import index_col
sangam_text_folder = "./sangam_tamil_text/"
sangam_poem_folder = "./sangam_tamil_poems/"
sangam_csv_folder = "./sangam_tamil_csv/"
data_files = ['agananuru','purananuru','ainkurunuru','kalithokai', 'kurunthokai', 'natrinai', 'pathitrupathu', 'pattinapaalai',
'mullaipaattu', 'nedunalvaadai', 'kurinjipaattu','malaipadukadaam','maduraikaanji','porunaraatrupadai',
'perumpaanaatrupadai', 'sirupaanaatrupadai', 'thirumurugaatrupadai', 'ainthinaiezhupathu', 'ainthinaiaimpathu',
'kaarnaarpathu','thinaimozhiaimpathu','kainnilai','thinaimaalainootraimbathu']#, 'thirukkural' ]
POEM_TYPES = ['அகநானூறு', 'புறநானூறு', 'ஐங்குறுநூறு', 'கலித்தொகை', 'குறுந்தொகை', 'நற்றிணை', 'பதிற்றுப்பத்து', 'பட்டினப்பாலை',
'முல்லைப்பாட்டு', 'நெடுநல்வாடை','குறிஞ்சிப்பாட்டு','மலைபடுகடாம்', 'மதுரைக்காஞ்சி','பொருநராற்றுப்படை',
'பெரும்பாணாற்றுப்படை', 'சிறுபாணாற்றுப்படை','திருமுருகாற்றுப்படை','ஐந்திணை எழுபது','ஐந்திணை ஐம்பது','கார் நாற்பது',
'திணைமொழி ஐம்பது','கைந்நிலை','திணைமாலை நூற்றைம்பது']#,'திருக்குறள்']
EN_POEM_TYPES = ['Akanānūru','Puranānūru','Ainkurunūru','Kalithokai','Kurunthokai','Natrinai','Pathitruppathu','Pattinapaalai',
'Mullaipaattu','Nedunalvaadai','Kurinjippāttu','Malaipadukadaam','Maduraikaanji','Porunaratrupadai',
'Perumpaanatrupadai','Sirupaanaatrupadai','Thirumurugaatrupadai','Ainthinai Ezhupathu','Aithinai Aimbathu',
'Kaar Naarpathu','Thinaimozhi Aimpathu','Kainnilai','Thinaimaalai Nootraimbathu'
]
sangam_poem_csv_file = sangam_csv_folder+"sangam_poems.csv"
sangam_poems_combined = []
csv_separator = ","
for i, sangam_poem in enumerate(data_files):
csv_file = sangam_csv_folder+sangam_poem+".csv" # agananuru
print("reading poems from",csv_file)
df = pd.read_csv(csv_file,encoding='utf-8',sep=csv_separator,header=0,usecols=['poem'],index_col=None)
df['poem_type'] = POEM_TYPES[i]
df['poem'] = df['poem'].str.translate(str.maketrans('', '', string.punctuation))
df['poem'] = df['poem'].str.replace("‘", '')
df['poem'] = df['poem'].str.replace("’", '')
df['poem'] = df['poem'].str.replace("“", '')
df['poem'] = df['poem'].str.replace("”", '')
df['poem'] = df['poem'].replace("\d+","",regex=True)
sangam_poems_combined.append(df)
print("Combining all sangam poems into a single database")
sangam_df = pd.concat(sangam_poems_combined,axis=0,ignore_index=True)
print("Writing sangam poems into",sangam_poem_csv_file)
sangam_df.to_csv(sangam_poem_csv_file,encoding='utf-8',sep=csv_separator, index=False, columns=["poem_type", "poem"])
| 69.897436
| 128
| 0.655906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,103
| 0.627761
|
e65ac754450ace4ed72f64034c033bc64885d270
| 4,026
|
py
|
Python
|
backend/src/chat/api/views.py
|
CSCapstone2019/WebDoctor
|
cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5
|
[
"MIT"
] | 4
|
2019-09-13T14:50:22.000Z
|
2019-11-27T03:19:44.000Z
|
backend/src/chat/api/views.py
|
CSCapstone2019/WebDoctor
|
cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5
|
[
"MIT"
] | 8
|
2019-09-15T23:02:21.000Z
|
2022-02-10T09:26:10.000Z
|
backend/src/chat/api/views.py
|
CSCapstone2019/WebDoctor
|
cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from rest_framework import viewsets
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import FileSystemStorage
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
DestroyAPIView,
UpdateAPIView
)
from patients.models import Chat, Contact, Schedule, Scheduler, Report, Uploader
from chat.views import get_user_contact, get_user_scheduler, get_user_uploader
from .serializers import ChatSerializer, ScheduleSerializer, ReportSerializer
User = get_user_model()
class ChatListView(ListAPIView):
serializer_class = ChatSerializer
permission_classes = (permissions.AllowAny, )
def get_queryset(self):
queryset = Chat.objects.all()
username = self.request.query_params.get('username', None)
if username is not None:
contact = get_user_contact(username)
queryset = contact.chats.all()
return queryset
class ChatDetailView(RetrieveAPIView):
queryset = Chat.objects.all()
serializer_class = ChatSerializer
permission_classes = (permissions.AllowAny, )
class ChatCreateView(CreateAPIView):
queryset = Chat.objects.all()
serializer_class = ChatSerializer
permission_classes = (permissions.IsAuthenticated, )
class ChatUpdateView(UpdateAPIView):
queryset = Chat.objects.all()
serializer_class = ChatSerializer
permission_classes = (permissions.IsAuthenticated, )
class ChatDeleteView(DestroyAPIView):
queryset = Chat.objects.all()
serializer_class = ChatSerializer
permission_classes = (permissions.IsAuthenticated, )
# SCHEDULE
class ScheduleListView(ListAPIView):
serializer_class = ScheduleSerializer
permission_classes = (permissions.AllowAny, )
def get_queryset(self):
queryset = Schedule.objects.all()
username = self.request.query_params.get('username', None)
if username is not None:
scheduler = get_user_scheduler(username)
queryset = scheduler.schedule.all()
return queryset
class ScheduleDetailView(RetrieveAPIView):
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
permission_classes = (permissions.AllowAny, )
class ScheduleCreateView(CreateAPIView):
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
permission_classes = (permissions.IsAuthenticated, )
class ScheduleUpdateView(UpdateAPIView):
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
permission_classes = (permissions.IsAuthenticated, )
class ScheduleDeleteView(DestroyAPIView):
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
permission_classes = (permissions.IsAuthenticated, )
# UPLOAD
class ReportListView(ListAPIView):
serializer_class = ReportSerializer
permission_classes = (permissions.AllowAny, )
def get_queryset(self):
queryset = Report.objects.all()
username = self.request.query_params.get('username', None)
if username is not None:
uploader = get_user_uploader(username)
queryset = uploader.report.all()
return queryset
class ReportDetailView(RetrieveAPIView):
queryset = Report.objects.all()
serializer_class = ReportSerializer
permission_classes = (permissions.AllowAny, )
class ReportCreateView(CreateAPIView):
queryset = Report.objects.all()
serializer_class = ReportSerializer
permission_classes = (permissions.IsAuthenticated, )
class ReportUpdateView(UpdateAPIView):
queryset = Report.objects.all()
serializer_class = ReportSerializer
permission_classes = (permissions.IsAuthenticated, )
class ReportDeleteView(DestroyAPIView):
queryset = Report.objects.all()
serializer_class = ReportSerializer
permission_classes = (permissions.IsAuthenticated, )
| 29.822222
| 80
| 0.748634
| 3,281
| 0.814953
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.011923
|
e65bcafb9495c37c2cdeefdfa42cd99132b78632
| 6,256
|
py
|
Python
|
flask_opa.py
|
hirosh7/flask-opa
|
a090083ce62944d1085a6923572ed9c68f0dbfa3
|
[
"MIT"
] | 34
|
2018-10-16T03:12:44.000Z
|
2022-02-21T09:53:13.000Z
|
flask_opa.py
|
hirosh7/flask-opa
|
a090083ce62944d1085a6923572ed9c68f0dbfa3
|
[
"MIT"
] | 12
|
2018-10-17T00:41:27.000Z
|
2021-03-16T12:58:33.000Z
|
flask_opa.py
|
hirosh7/flask-opa
|
a090083ce62944d1085a6923572ed9c68f0dbfa3
|
[
"MIT"
] | 8
|
2019-05-28T19:54:41.000Z
|
2022-02-23T13:19:33.000Z
|
"""
Flask Extension for OPA
"""
import requests
from flask.app import Flask
__version__ = "1.0.0"
class OPAException(Exception):
"""Exception evaluating a request in OPA"""
def __init__(self, message):
super().__init__(message)
class OPAUnexpectedException(OPAException):
"""Unexpected error evaluating the request in OPA"""
def __init__(self, message='Unexpected error'):
super().__init__(message)
class AccessDeniedException(OPAException):
"""OPA Denied the request"""
def __init__(self, message='Denied'):
super().__init__(message)
class OPAServerUnavailableException(OPAException):
"""When it cannot connect to the OPA Server"""
def __init__(self, message='OPA Server unavailable'):
super().__init__(message)
class OPA(object):
def __init__(self,
app: Flask,
input_function,
url: str = None,
allow_function=None,
wait_time: int = 20000):
super(OPA, self).__init__()
self._app = app
self._pep = {}
self._input_function = input_function
self._allow_function = allow_function or self.default_allow_function
self._deny_on_opa_fail = app.config.get('OPA_DENY_ON_FAIL', True)
self._url = url or app.config.get('OPA_URL')
self._wait_time = wait_time or app.config.get('OPA_WAIT_TIME')
if self._app.config.get('OPA_SECURED', False):
self.secured()
@staticmethod
def secure(*args, **kwargs):
return OPA(*args, **kwargs).secured()
def secured(self,
url=None,
input_function=None,
allow_function=None):
"""Secure app"""
if self.check_authorization not in self._app.before_request_funcs:
self._url = url or self._url
self._allow_function = allow_function or self._allow_function
self._input_function = input_function or self._input_function
if self._url and self._input_function and self._allow_function:
self._app.before_request(self.check_authorization)
else:
raise ValueError("Invalid OPA configuration")
return self
def check_authorization(self):
input = self.input
url = self.url
try:
response = self.query_opa(url, input)
if response is not None:
self.check_opa_response(response)
except OPAException as e:
if self.deny_on_opa_fail:
raise e
def query_opa(self, url, input):
self._app.logger.debug("%s query: %s. content: %s",
self.app, url, input)
try:
return requests.post(url, json=input, timeout=self.wait_time)
except requests.exceptions.ConnectionError as e:
if self.deny_on_opa_fail:
raise OPAServerUnavailableException(str(e))
def check_opa_response(self, response):
if response.status_code != 200:
opa_error = "OPA status code: {}. content: {}".format(
response.status_code, str(response)
)
self._app.logger.error(opa_error)
raise OPAUnexpectedException(opa_error)
resp_json = response.json()
self._app.logger.debug(" => %s", resp_json)
if not self.allow_function(resp_json):
raise AccessDeniedException()
return resp_json
def __call__(self, name: str, url: str,
input_function=None,
allow_function=None):
"""Creates a PEP"""
return PEP(self, name, url, input_function, allow_function)
@property
def pep(self):
return self._pep
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
@property
def deny_on_opa_fail(self):
return self._deny_on_opa_fail
@deny_on_opa_fail.setter
def deny_on_opa_fail(self, value):
self._deny_on_opa_fail = value
@property
def input(self):
return self.input_function()
@property
def input_function(self):
return self._input_function
@property
def allow_function(self):
return self._allow_function
@property
def app(self):
return self._app
@property
def wait_time(self):
return self._wait_time
@wait_time.setter
def wait_time(self, value):
self._wait_time = value
@classmethod
def default_allow_function(cls, response_json):
return response_json.get('result', False)
class PEP(OPA):
"""Class to handle Policy Enforcement Points"""
def __init__(self,
opa: OPA,
name: str,
url: str,
input_function=None,
allow_function=None,
deny_on_opa_fail: bool = False):
super(OPA, self).__init__()
self._app = opa.app
opa.pep[name] = self
self._url = url
self._input_function = input_function or opa.input_function
self._allow_function = allow_function or opa.allow_function
self._deny_on_opa_fail = deny_on_opa_fail or False
self._wait_time = opa.wait_time
self._name = name or "PEP"
if not (self._app and self._url and
self._input_function and self._allow_function):
raise ValueError("Invalid Police Enforcement Point configuration")
def check_authorization(self, *args, **kwargs):
_input = self.input(*args, **kwargs)
response = self.query_opa(self.url, _input)
if response is not None:
self.check_opa_response(response)
def __call__(self, f):
def secure_function(*args, **kwargs):
try:
self.check_authorization(*args, **kwargs)
return f(*args, **kwargs)
except OPAException as e:
if self.deny_on_opa_fail:
raise e
return secure_function
def input(self, *args, **kwargs):
return self._input_function(*args, **kwargs)
def __str__(self):
return "<{}>".format(self._name)
| 30.076923
| 78
| 0.605499
| 6,139
| 0.981298
| 0
| 0
| 972
| 0.155371
| 0
| 0
| 557
| 0.089035
|
e65e9051029543698ac667d8972b05b6ac01763f
| 8,920
|
py
|
Python
|
model.py
|
Schrodinger1926/Project-3
|
88f8a1411a712a8ba62036e400ebce9e6df8e40f
|
[
"MIT"
] | null | null | null |
model.py
|
Schrodinger1926/Project-3
|
88f8a1411a712a8ba62036e400ebce9e6df8e40f
|
[
"MIT"
] | null | null | null |
model.py
|
Schrodinger1926/Project-3
|
88f8a1411a712a8ba62036e400ebce9e6df8e40f
|
[
"MIT"
] | null | null | null |
import sys
import os
import csv
from random import shuffle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten,\
Dense,\
Lambda,\
Conv2D,\
MaxPooling2D,\
Dropout, \
Cropping2D
DATA_DIR = 'data'
IMG_DIR = os.path.join(DATA_DIR, 'IMG')
samples = []
with open(os.path.join(DATA_DIR, 'driving_log.csv')) as csvfile:
reader = csv.reader(csvfile)
next(reader)
for line in reader:
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def fetch_view_angle(batch_sample):
"""
Conducts Preprocessing on a single data point.
1. flips original image
2. adds an offset to steering angle depending upon camera view i.e left, center, right.
Arguments
---------
batch_sample: array_like
Elements as [path_center_image, path_left_image, path_right_image, steering_angle, ..]
Returns
---------
res_images: array_like
Elements as original and fliped images of each camera view as numpy ndarray.
res_angles: array_like
Elements as steering angle of original and fliped images of each camera view as float.
"""
res_images, res_angles = [], []
# fetch center angle
center_angle = float(batch_sample[3])
viewpoints = ['center', 'left', 'right']
for idx, view in enumerate(viewpoints):
filename = os.path.join(IMG_DIR, batch_sample[idx].split('/')[-1])
image = cv2.imread(filename)
# Store original image
res_images.append(image)
# store fliped image
res_images.append(cv2.flip(image, 1))
offset = 0.1
if view == 'center':
# Store angles
res_angles.append(center_angle)
# Store flip angle
res_angles.append(-center_angle)
if view == 'left':
# Store angle
res_angles.append(center_angle + offset)
# Store flip angle
res_angles.append(-(center_angle + offset))
if view == 'right':
# Store angle
res_angles.append(center_angle - offset)
# Store fliped angle
res_angles.append(-(center_angle - offset))
return res_images, res_angles
def generator(samples, batch_size=32):
"""
Generates a batch of data on the fly
Arguments
---------
samples: numpy ndarray
4 dimensional numpy array of images
batch_size: int
Size of the data to be generated
Returns
---------
4-D numpy ndarray of size(axis = 0) batch_size
"""
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
_images, _angles = fetch_view_angle(batch_sample = batch_sample)
images.extend(_images)
angles.extend(_angles)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
def sanity_check_model():
"""
Bare Bones model with one no hidden layer i.e flattened input features
directly connected to output node.
This model is suppose to be used when building pipeline with minimum focus on model
performance.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Flatten(input_shape = (160, 320, 3)))
# Normalization
model.add(Lambda(lambda x: (x - 127)/127))
# Fully connected layer
model.add(Dense(1))
# Comple model
model.compile(loss='mse', optimizer='adam')
return model
def LeNet():
"""
Conventional LeNet model.
This model is suppose to be used when building insight about the model performance.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x - 127)/255, input_shape = (160, 320, 3)))
# Crop image, removing hood and beyond horizon
model.add(Cropping2D(cropping = ((70, 25), (0, 0))))
# First: Convolutional layer
model.add(Conv2D(6, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second: Convolutional layer
model.add(Conv2D(6, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Third: Fully Connected layer
model.add(Flatten())
model.add(Dense(120))
model.add(Dropout(0.5))
# Fourth: Fully Connected layer
model.add(Dense(84))
model.add(Dropout(0.5))
# Fourth: Output layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def nvidia():
"""
Model architeture used by Nvidia for end-to-end human behaviour cloning.
Reference: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
This is an even powerfull network with 5 Convolutional layers and 3 Fully connected layers.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x - 127)/255, input_shape = (160, 320, 3)))
# Crop image, removing hood and beyond horizon
model.add(Cropping2D(cropping = ((70, 25), (0, 0))))
# First: Convolutional layer
model.add(Conv2D(24, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Second: Convolutional layer
model.add(Conv2D(36, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Third: Convolutional layer
model.add(Conv2D(48, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Fourth: Convolutional layer
model.add(Conv2D(64, (3, 3), strides = (1, 1), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Fifth: Convolutional layer
model.add(Conv2D(64, (3, 3), strides = (1, 1), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
model.add(Flatten())
# Sixth: Fully Connected layer
model.add(Dense(100))
model.add(Dropout(0.5))
# Seventh: Fully Connected layer
model.add(Dense(50))
model.add(Dropout(0.5))
# Eigth: Fully Connected layer
model.add(Dense(10))
model.add(Dropout(0.5))
# Ninth: Output layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def get_model(name = 'sanity_check'):
"""
Return appropriate model
Arguments
---------
name: string
Name of the model to be trained
Returns
---------
Keras model
"""
if name == 'sanity_check':
return sanity_check_model()
if name == 'LeNet':
return LeNet()
if name == 'nvidia':
return nvidia()
batch_size = 64
train_generator = generator(train_samples, batch_size = batch_size)
validation_generator = generator(validation_samples, batch_size = batch_size)
# Final Model Architecture to be used
model_name = 'nvidia'
print("Traning samples : {} | Validation samples : {}"\
.format(3*2*len(train_samples), 3*2*len(validation_samples)))
print(model_name)
model = get_model(name = model_name)
history_object = model.fit_generator(train_generator, steps_per_epoch= \
2*3*len(train_samples)//batch_size, validation_data=validation_generator, \
validation_steps=3*2*len(validation_samples)//batch_size, epochs=5)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('post_training_analysis.png')
model.save('model_{}.h5'.format(model_name))
| 27.875
| 118
| 0.638453
| 0
| 0
| 1,030
| 0.115471
| 0
| 0
| 0
| 0
| 3,711
| 0.416031
|
e65f8dcc762ad6c2b71e1c9a7964a20b18c50603
| 3,807
|
py
|
Python
|
enlarge_form/enlarge_form.py
|
lester-lees/extra_addons_sz
|
cddaf972cf4ea64c553bcff0006eb006a115d5ee
|
[
"Apache-2.0"
] | null | null | null |
enlarge_form/enlarge_form.py
|
lester-lees/extra_addons_sz
|
cddaf972cf4ea64c553bcff0006eb006a115d5ee
|
[
"Apache-2.0"
] | null | null | null |
enlarge_form/enlarge_form.py
|
lester-lees/extra_addons_sz
|
cddaf972cf4ea64c553bcff0006eb006a115d5ee
|
[
"Apache-2.0"
] | null | null | null |
#! -*- encoding: utf-8 -*-
from openerp import addons
from openerp.osv import fields, osv, orm
from openerp import tools
from openerp.tools.translate import _
class ir_ui_view(orm.Model):
_inherit = 'ir.ui.view'
_columns={
'enlarge_form' : fields.boolean('Use full width of the screen?' ,help='Set to true if you want to widden this form so that it will use full width of the screen.'),
}
def create(self, cr, uid, data, context=None):
result = super(ir_ui_view, self).create(cr, uid, data, context=context)
if result:
self.manipulate_sheet_tag(cr, uid, result)
return result
def write(self, cr, uid, ids, data, context=None):
result = super(ir_ui_view, self).write(cr, uid, ids, data, context=context)
if result:
self.manipulate_sheet_tag(cr, uid, ids)
return result
def has_sheet_tag(self, arch):
res=False
if arch.find('<sheet')>=0:
res=True
return res
def manipulate_sheet_tag(self, cr, uid, ids):
if not isinstance(ids,(tuple,list)):
ids=[ids]
#Warning(str(ids))
for this in self.browse(cr, uid, ids):
enlargement_view = str(this.model).replace('.','_') + '_enlarge_form'
#does a view already exist?
#view_exists=self.search(cr, uid, [('name','=',enlargement_view),('type','=','form'),('active','in',[True,False])])
view_exists=self.search(cr, uid, [('name','=',enlargement_view),('type','=','form')])
if view_exists:
if isinstance(view_exists,(tuple,list)):
view_exists=view_exists[0]
has_sheet_tag=self.has_sheet_tag(this.arch)
#what should we do?
if view_exists:
if not has_sheet_tag:
operation='deactivate_view'
else:
if this.enlarge_form:
operation='activate_view'
else:
operation='deactivate_view'
else:
if has_sheet_tag and this.enlarge_form:
operation='create_view'
else:
#nothing to do
operation=False
if not operation:
return True
if operation=='create_view':
view_arch="""<?xml version='1.0'?><xpath expr='//form/sheet' position='attributes'><attribute name='class'>enlarge_form</attribute></xpath>"""
#model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
vals={
'name' : enlargement_view,
'type' : 'form',
'model' : this.model,
'inherit_id' : this.id,
'arch' : view_arch,
'xml_id' : 'enlarge_form.'+enlargement_view,
'active' : 'True',
}
res=self.create(cr, uid, vals)
#for some reason, active was always getting saved as false
if res:
cr.execute("UPDATE ir_ui_view SET active=TRUE WHERE id=%s" % res)
elif operation=='activate_view':
self.write(cr, uid, view_exists, {'active':True})
elif operation=='deactivate_view':
self.write(cr, uid, view_exists, {'active':False})
return True
| 34.297297
| 186
| 0.504334
| 3,645
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 1,030
| 0.270554
|
e65fe6b2cc9e13aae056f4e22435bebdff299fe1
| 1,148
|
py
|
Python
|
tests/test_app.py
|
betasewer/machaon
|
63ccb4405ac693f14f9d25f6a706466a917dddbf
|
[
"MIT"
] | 2
|
2020-07-05T08:39:12.000Z
|
2022-01-19T22:08:21.000Z
|
tests/test_app.py
|
betasewer/machaon
|
63ccb4405ac693f14f9d25f6a706466a917dddbf
|
[
"MIT"
] | 23
|
2020-06-23T16:18:17.000Z
|
2021-12-29T09:56:48.000Z
|
tests/test_app.py
|
betasewer/machaon
|
63ccb4405ac693f14f9d25f6a706466a917dddbf
|
[
"MIT"
] | null | null | null |
from machaon.app import AppRoot, deploy_directory, transfer_deployed_directory
from machaon.process import Spirit, TempSpirit
from machaon.types.shell import Path
def test_deploy(tmpdir):
deploydir = tmpdir.mkdir("deploy")
deploy_directory(Path(deploydir))
assert deploydir.join("machaon").check()
assert deploydir.join("machaon", "store").check()
assert deploydir.join("machaon", "packages").check()
assert deploydir.join("machaon", "credential").check()
assert deploydir.join("machaon", "credential", "credential.ini").check()
assert deploydir.join("machaon", "local").check()
assert deploydir.join("machaon", "apps.ini").check()
assert deploydir.join("main.py").check()
deploydir2 = tmpdir.mkdir("deploy2")
spi = TempSpirit()
transfer_deployed_directory(spi, Path(deploydir.join("machaon")), Path(deploydir2))
assert deploydir2.join("machaon").check()
assert deploydir2.join("machaon", "apps.ini").check()
assert deploydir2.join("machaon", "credential", "credential.ini").check()
assert deploydir2.join("main.py").check()
assert not deploydir.join("machaon").check()
| 39.586207
| 87
| 0.715157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 255
| 0.222125
|
e66034257961b772951c35834b998bf6cf78de31
| 74
|
py
|
Python
|
02 Algorithm Reference/06 Securities and Portfolio/01 Securities and Portfolio Classes/03 code.py
|
Jay-Jay-D/Documentation
|
c4894e5ac20355ec82ee0db19618ad7f17bf8592
|
[
"Apache-2.0"
] | null | null | null |
02 Algorithm Reference/06 Securities and Portfolio/01 Securities and Portfolio Classes/03 code.py
|
Jay-Jay-D/Documentation
|
c4894e5ac20355ec82ee0db19618ad7f17bf8592
|
[
"Apache-2.0"
] | null | null | null |
02 Algorithm Reference/06 Securities and Portfolio/01 Securities and Portfolio Classes/03 code.py
|
Jay-Jay-D/Documentation
|
c4894e5ac20355ec82ee0db19618ad7f17bf8592
|
[
"Apache-2.0"
] | null | null | null |
#Securities array access to Security Objects:
self.Securities["IBM"].Price
| 37
| 45
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.675676
|
e66228a86c2396ec8a63b8d48e9ca8a5edd9c594
| 502
|
py
|
Python
|
migrations/versions/1d09e9261d5_.py
|
mainulhossain/biowl
|
039adc96539fae25843b1fc36074a4e5e55830ec
|
[
"MIT"
] | null | null | null |
migrations/versions/1d09e9261d5_.py
|
mainulhossain/biowl
|
039adc96539fae25843b1fc36074a4e5e55830ec
|
[
"MIT"
] | null | null | null |
migrations/versions/1d09e9261d5_.py
|
mainulhossain/biowl
|
039adc96539fae25843b1fc36074a4e5e55830ec
|
[
"MIT"
] | 1
|
2020-01-05T10:47:21.000Z
|
2020-01-05T10:47:21.000Z
|
"""empty message
Revision ID: 1d09e9261d5
Revises: 40d93619b7d
Create Date: 2016-12-16 11:38:41.336859
"""
# revision identifiers, used by Alembic.
revision = '1d09e9261d5'
down_revision = '40d93619b7d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| 18.592593
| 63
| 0.687251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 348
| 0.693227
|
e6622feade344255592fd9a7d47d6b9f1bd055ff
| 5,732
|
py
|
Python
|
hops/xattr.py
|
robzor92/hops-util-py
|
88540a0c2b4e366fe6d2acb0441cea9378150c01
|
[
"Apache-2.0"
] | 24
|
2018-09-20T17:56:43.000Z
|
2021-11-11T23:34:43.000Z
|
hops/xattr.py
|
robzor92/hops-util-py
|
88540a0c2b4e366fe6d2acb0441cea9378150c01
|
[
"Apache-2.0"
] | 39
|
2018-10-04T15:19:07.000Z
|
2021-12-23T10:50:33.000Z
|
hops/xattr.py
|
robzor92/hops-util-py
|
88540a0c2b4e366fe6d2acb0441cea9378150c01
|
[
"Apache-2.0"
] | 23
|
2018-09-18T07:51:56.000Z
|
2021-08-10T12:10:27.000Z
|
"""
API for attaching, detaching, and reading extended metadata to HopsFS files/directories.
It uses the Hopsworks /xattrs REST API
"""
from hops import constants, util, hdfs
from hops.exceptions import RestAPIError
import urllib
def set_xattr(hdfs_path, xattr_name, value):
"""
Attach an extended attribute to an hdfs_path
Args:
:hdfs_path: path of a file or directory
:xattr_name: name of the extended attribute
:value: value of the extended attribute
Returns:
None
"""
value = str(value)
hdfs_path = urllib.parse.quote(hdfs._expand_path(hdfs_path))
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_PUT
resource_url = constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_XATTR_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs_path + constants.DELIMITERS.QUESTION_MARK_DELIMITER + constants.XATTRS.XATTRS_PARAM_NAME + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + xattr_name
response = util.send_request(method, resource_url, data=value, headers=headers)
response_object = response.json()
if response.status_code >= 400:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not attach extened attributes from a path (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status_code, response.reason, error_code, error_msg, user_msg))
def get_xattr(hdfs_path, xattr_name=None):
"""
Get the extended attribute attached to an hdfs_path.
Args:
:hdfs_path: path of a file or directory
:xattr_name: name of the extended attribute
Returns:
A dictionary with the extended attribute(s) as key value pair(s). If the :xattr_name is None,
the API returns all associated extended attributes.
"""
hdfs_path = urllib.parse.quote(hdfs._expand_path(hdfs_path))
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_GET
resource_url = constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_XATTR_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs_path
if xattr_name is not None:
resource_url += constants.DELIMITERS.QUESTION_MARK_DELIMITER + constants.XATTRS.XATTRS_PARAM_NAME + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + xattr_name
response = util.send_request(method, resource_url, headers=headers)
response_object = response.json()
if response.status_code >= 400:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not get extened attributes attached to a path (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status_code, response.reason, error_code, error_msg, user_msg))
results = {}
for item in response_object["items"]:
results[item["name"]] = item["value"]
return results
def remove_xattr(hdfs_path, xattr_name):
"""
Remove an extended attribute attached to an hdfs_path
Args:
:hdfs_path: path of a file or directory
:xattr_name: name of the extended attribute
Returns:
None
"""
hdfs_path = urllib.parse.quote(hdfs._expand_path(hdfs_path))
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_DELETE
resource_url = constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_XATTR_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs_path + constants.DELIMITERS.QUESTION_MARK_DELIMITER + constants.XATTRS.XATTRS_PARAM_NAME + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + xattr_name
response = util.send_request(method, resource_url, headers=headers)
if response.status_code >= 400:
response_object = response.json()
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not remove extened attributes from a path (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status_code, response.reason, error_code, error_msg, user_msg))
| 53.074074
| 116
| 0.694348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,442
| 0.25157
|
e66279995933e8ecb67f6f34946455046a6bef43
| 96
|
py
|
Python
|
protonn/vis/__init__.py
|
protoNN-ai/protoNN
|
812fd524a8c2de49612bbb1fc991c503fe3f1202
|
[
"Apache-2.0"
] | 3
|
2018-06-20T08:37:13.000Z
|
2019-02-21T00:14:47.000Z
|
protonn/vis/__init__.py
|
protoNN-ai/protoNN
|
812fd524a8c2de49612bbb1fc991c503fe3f1202
|
[
"Apache-2.0"
] | null | null | null |
protonn/vis/__init__.py
|
protoNN-ai/protoNN
|
812fd524a8c2de49612bbb1fc991c503fe3f1202
|
[
"Apache-2.0"
] | null | null | null |
from .vis import df_from_file, df_from_dir, filter_by, PivotTable
from .lines import plot_lines
| 32
| 65
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e663a08bf8bd9abb5a531e95d22eb32be3364bee
| 2,173
|
py
|
Python
|
slidingWindow/smallestSubarrayWithGivenSum.py
|
YasinEhsan/interview-prep
|
ed9f95af5a37b05304e45b41511068b6f72533e7
|
[
"Apache-2.0"
] | 11
|
2019-05-02T22:27:01.000Z
|
2020-10-30T08:43:02.000Z
|
slidingWindow/smallestSubarrayWithGivenSum.py
|
YasinEhsan/interview-prep
|
ed9f95af5a37b05304e45b41511068b6f72533e7
|
[
"Apache-2.0"
] | null | null | null |
slidingWindow/smallestSubarrayWithGivenSum.py
|
YasinEhsan/interview-prep
|
ed9f95af5a37b05304e45b41511068b6f72533e7
|
[
"Apache-2.0"
] | 3
|
2019-11-01T01:35:01.000Z
|
2020-01-11T18:00:39.000Z
|
# 5 27 20
def smallest_subarray_with_given_sum(s, arr):
# TODO: Write your code here
windowStart, minLen, currSum = 0,100,0
for windowEnd in range(len(arr)):
currSum += arr[windowEnd]
while currSum >= s:
minLen = min(minLen, windowEnd - windowStart +1)
currSum -= arr[windowStart]
windowStart +=1
return minLen
# 5 25 20
# added import
import math
def smallest_subarray_with_given_sum(s, arr):
windowStart, currSum, minLen = 0,0,math.inf
for windowEnd in range(len(arr)):
currSum += arr[windowEnd]
while currSum >= s:
minLen = min(minLen, windowEnd - windowStart +1)
currSum -= arr[windowStart]
windowStart +=1
# check edge case
if math.inf == currSum:
return 0
return minLen
# time O(N) space O(1)
# 3 tries
# 5.18.20
# forgot that sliding window is in while loop shinking bc last val might ewaql 3 first vals
def smallest_subarray_with_given_sum(s, arr):
# TODO: Write your code here
windowStart, minLen, currSum = 0,100,0
for windowEnd in range(len(arr)):
currSum += arr[windowEnd]
while currSum >= s:
minLen = min(minLen, windowEnd - windowStart + 1)
currSum -= arr[windowStart]
windowStart+=1
return minLen
def smallest_subarray_with_given_sum(s, arr):
# TODO: Write your code here
'''
- have starting index, have currLen, minLen
- one found then cut down others in while loop style
- update vars
'''
startIndex, currLen, minLen, currSum = 0,0,len(arr), 0
for endIndex in range(len(arr)):
currSum += arr[endIndex]
currLen += 1
print(endIndex, arr[endIndex], currSum)
while currSum >= s:
minLen = min(minLen, currLen)
currLen -=1
currSum -= arr[startIndex]
startIndex += 1
return minLen
smallest_subarray_with_given_sum(8, [3, 4, 1, 1, 6])
# Given an array of positive numbers and a positive number ‘S’, find the length of the smallest contiguous subarray whose sum is greater than or equal to ‘S’. Return 0, if no such subarray exists.
# https://www.educative.io/courses/grokking-the-coding-interview/7XMlMEQPnnQ
| 23.879121
| 196
| 0.658997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 680
| 0.311784
|
e666c5e9e0189a92959abe01ef942dcddf54c96d
| 16,028
|
py
|
Python
|
build/build.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | 10
|
2016-12-28T22:06:31.000Z
|
2021-05-24T13:42:30.000Z
|
build/build.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | 4
|
2015-10-09T23:55:10.000Z
|
2020-04-04T08:09:22.000Z
|
build/build.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import configparser
import os
import subprocess
import sys
import faint_info
join_path = os.path.join
build_dir = os.path.split(os.path.realpath(__file__))[0]
os.chdir(build_dir) # Fixme: Don't change dir, use absolute paths.
root_dir = os.path.split(build_dir)[0]
sys.path.append(join_path(root_dir, "build-sys/"))
sys.path.append(join_path(root_dir, "test-sys/"))
import build_sys as bs # noqa: E402
from build_sys.util import list_cpp, strip_ext # noqa: E402
from build_sys.util.scoped import working_dir, no_output # noqa: E402
from test_sys import gen_runner # noqa: E402
import gencpp # noqa: E402
def recreate_config(platform):
with open("build.cfg", 'w') as f:
f = open("build.cfg", 'w')
f.write("[folders]\n")
f.write("wx_root=\n")
f.write("cairo_include=\n")
f.write("cairo_lib=\n")
f.write("python_include=\n")
f.write("python_lib=\n")
f.write("pango_include=\n")
f.write("pango_lib=\n")
f.write("glib_include=\n")
f.write("glib_lib=\n")
f.write("glib_config_include=\n")
f.write("pnglib_include=\n")
if platform == 'msw':
f.write("[nsis]\n")
f.write("makensis=\n")
f.write("[other]\n")
if platform != 'msw':
f.write('compiler=gcc\n')
f.write("parallell_compiles=0\n")
f.write("etags_folder=\n")
print('Config file "build.cfg" created.\n'
'You must update the file with correct paths.')
def read_config(platform):
def check_folder(name, folder, expected_content):
"""Verify that this folder (from an entry in the build.cfg) contains
some expected file.
"""
full_path = os.path.expanduser(os.path.join(folder, expected_content))
if not os.path.exists(full_path):
print(f'Error in build.cfg:\n {name}: {expected_content} not found in \n {folder}') # noqa: E501
print(full_path)
exit(1)
bo = bs.BuildOptions()
bo.platform = platform
config = configparser.RawConfigParser()
config.read('build.cfg')
try:
wx_root = config.get('folders', 'wx_root')
wx_vc_lib = join_path(wx_root, "lib", "vc_lib")
cairo_include = config.get('folders', 'cairo_include')
cairo_lib = config.get('folders', 'cairo_lib')
pango_include = config.get('folders', 'pango_include')
pango_lib = config.get('folders', 'pango_lib')
python_include = config.get('folders', 'python_include')
python_lib = config.get('folders', 'python_lib')
glib_include = config.get('folders', 'glib_include')
glib_lib = config.get('folders', 'glib_lib')
glib_config_include = config.get('folders', 'glib_config_include')
bo.parallell_compiles = int(config.get('other', 'parallell_compiles'))
pnglib_include = config.get('folders', 'pnglib_include')
except configparser.NoOptionError as e:
print("Error in build.cfg:", e)
exit(1)
# Verify that the specified paths contain expected includes or folders
check_folder("wx_root", wx_root, "include/wx")
check_folder("cairo_include", cairo_include, "cairo.h")
check_folder("python_include", python_include, "Python.h")
check_folder("pango_include", pango_include, "pango/pango.h")
check_folder("pnglib_include", pnglib_include, "png.h")
check_folder("glib_include", glib_include, "glib.h")
check_folder("glib_config_include", glib_config_include, "glibconfig.h")
bo.extra_resource_root = wx_root
if bo.platform == 'msw':
bo.makensis_exe = config.get('nsis', 'makensis')
if bo.platform == 'linux':
compiler = config.get('other', 'compiler')
if compiler is None:
print("Error: Compiler not specified in build.cfg.")
print("Expected compiler=clang or compiler=gcc under [other].")
exit(1)
elif compiler not in ('gcc', 'clang', 'iwyu'):
print(f'Error: Unsupported compiler specified in build.cfg: "{compiler}"') # noqa: E501
print('Expected "clang", "gcc" or "iwyu"')
exit(1)
bo.compiler = compiler
elif bo.platform == 'msw':
bo.compiler = 'msvc'
required_path_empty = (wx_root == "" or
python_lib == "" or
python_include == "" or
cairo_include == "" or
pango_include == "" or
pnglib_include == "")
if required_path_empty:
print("Error: Incorrect paths in build.cfg")
exit(1)
if cairo_lib == "" and not platform.startswith("linux"):
print("Error: Incorrect paths in build.cfg")
exit(1)
bo.lib_paths = [
cairo_lib,
pango_lib,
python_lib,
glib_lib]
bo.lib_paths = [l for l in bo.lib_paths if len(l) != 0]
if bo.platform == "msw":
bo.lib_paths.append(join_path(wx_root, 'lib', 'vc_lib'))
bo.project_root = faint_info.FAINT_ROOT
bo.system_include_folders = [
join_path(wx_vc_lib, "mswu"),
join_path(wx_root, "include"),
python_include,
cairo_include,
pango_include,
glib_include,
glib_config_include,
pnglib_include
]
bo.include_folders = [bo.project_root]
bo.wx_root = wx_root
return bo
def read_build_options(platform):
if not os.path.exists("build.cfg"):
recreate_config(platform)
exit(1)
return read_config(platform)
def test_extra_objs(bo):
def excluded(obj):
return (obj.startswith('app.')
or obj.startswith('py-initialize-ifaint.'))
obj_root = join_path(os.getcwd(),
faint_info.target.faint.objs_folder_prefix)
obj_root = obj_root + ("-debug" if bo.debug_compile else "-release")
return [join_path(obj_root, strip_ext(item)) for item in
os.listdir(join_path(os.getcwd(), obj_root))
if (item.endswith('.obj') or item.endswith('.o')) and
not excluded(item)]
def get_test_source_files(bo, folder):
test_source_folder = join_path(bo.project_root, folder)
test_root = join_path(bo.project_root, "tests")
test_files = []
for folder in (test_source_folder,
join_path(test_source_folder, 'gen'),
join_path(test_root, "test-util")):
test_files.extend([join_path(folder, f)
for f in list_cpp(folder)])
return test_files
def no_source_folders_f(*args, **kwArgs):
return []
def build(caption,
platform,
cmdline,
obj_folder_prefix,
out_name,
precompile_steps,
source_files,
source_folders,
extra_objs,
msw_subsystem,
forced_include_func):
print(caption)
print("--------------------")
bo = read_build_options(platform)
bo.obj_root_release = join_path(
os.getcwd(),
f"{obj_folder_prefix}-release")
bo.obj_root_debug = join_path(
os.getcwd(),
f"{obj_folder_prefix}-debug")
bo.extra_objs = extra_objs(bo)
bo.out_name_release = out_name
bo.out_name_debug = out_name + "d"
opts, args = cmdline
bo.debug_compile = opts.debug
precompile_steps(bo)
bo.source_files = source_files(platform, bo)
bo.source_folders = source_folders(platform, False)
bo.forced_include = forced_include_func(bo)
bo.msw_subsystem = msw_subsystem
return bs.build(bo, cmdline)
def exit_on_error(function, args, blank_line=True):
if blank_line:
print()
return_code = function(*args)
if return_code != 0:
exit(return_code)
def run_unit_tests(platform, cmdline):
extension = ".exe" if platform == "msw" else ""
test_root = join_path(faint_info.FAINT_ROOT, "tests")
cmd = join_path(test_root, "run-unit-tests" + extension) + " --silent"
result = subprocess.call(cmd,
shell=True,
cwd=test_root)
if result == 0:
print("* C++ Unit tests OK")
else:
print("* C++ Unit tests failed!")
return result
def run_py_tests(platform, cmdline):
sys.path.append(faint_info.FAINT_TESTS_ROOT)
import run_py_tests as py_tests
with no_output(), working_dir(faint_info.FAINT_TESTS_ROOT):
ok = py_tests.run_tests()
if ok:
print('* Python Unit tests OK')
return 0
else:
print("* Error: Python Unit tests failed!")
return 1
def forced_include_func(bo):
return join_path(bo.project_root, "util", "msw_warn.hh")
def build_faint(platform, cmdline):
def precompile_steps(bo):
# Generate setting-handling code based on set_and_get.py
gencpp.run("../python/generate")
if not os.path.exists("../help/source/generated"):
os.mkdir("../help/source/generated")
bs.gen_method_def.generate_headers(
faint_info.HEADERS_TO_GENERATE,
faint_info.GENERATED_METHOD_DEF_PATH,
faint_info.GENERATED_HELP_PATH)
bs.gen_resource.run(bo.project_root)
bs.gen_text_expressions.generate(
hh_path=join_path(
bo.project_root,
"generated", "text-expression-constants.hh"),
help_path=join_path(
faint_info.GENERATED_HELP_PATH,
"text-expressions.txt"))
# HTML help
bs.gen_help.run()
def get_faint_src_files(platform, bo):
src_folders = faint_info.get_src_folders(platform)
src_folders = [join_path(bo.project_root, folder)
for folder in src_folders]
src_folders.append(bo.project_root)
files = []
for folder in src_folders:
files.extend([join_path(folder, f)
for f in list_cpp(folder)])
return files
def get_faint_extra_objs(bo):
return []
return build(
"Faint",
platform,
cmdline,
"objs",
"faint",
precompile_steps,
get_faint_src_files,
faint_info.get_src_folders,
get_faint_extra_objs,
"windows",
forced_include_func)
def build_benchmarks(platform, cmdline):
target = faint_info.target.benchmark
def precompile_steps(bo):
bench_root = join_path(bo.project_root, target.source_folder)
gen_runner.gen_bench_runner(
root_dir=bench_root,
out_file=join_path(bench_root, 'gen', 'bench-runner.cpp'))
bo.create_build_info = False
def get_benchmark_source_files(platform_, bo):
return get_test_source_files(bo, target.source_folder)
return build(
"Benchmarks",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_benchmark_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
def build_unit_tests(platform, cmdline):
target = faint_info.target.unit_test
def precompile_steps(bo):
tests_root = join_path(bo.project_root, target.source_folder)
gen_runner.gen_test_runner(
root_dir=tests_root,
out_file=join_path(tests_root, 'gen', 'test-runner.cpp'))
bo.create_build_info = False
def get_unit_test_source_files(platform, bo):
return get_test_source_files(bo, target.source_folder)
return build(
"Unit tests",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_unit_test_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
def build_image_tests(platform, cmdline):
target = faint_info.target.image_test
def precompile_steps(bo):
tests_root = join_path(bo.project_root, target.source_folder)
gen_runner.gen_image_runner(
root_dir=tests_root,
out_file=join_path(tests_root, 'gen', 'image-runner.cpp'))
bo.create_build_info = False
def get_image_test_source_files(platform, bo):
return get_test_source_files(bo, target.source_folder)
return build(
"Image tests",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_image_test_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
def build_gui_tests(platform, cmdline):
target = faint_info.target.gui_test
def precompile_steps(bo):
bo.create_build_info = False
def get_gui_test_source_files(platform, bo):
test_source_folder = join_path(bo.project_root, target.source_folder)
test_root = join_path(bo.project_root, "tests")
test_files = []
for folder in (test_source_folder,
join_path(test_root, "test-util")):
test_files.extend([join_path(folder, f)
for f in list_cpp(folder)])
return test_files
return build(
"GUI-tests",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_gui_test_source_files,
no_source_folders_f,
test_extra_objs,
"windows",
forced_include_func)
def build_python_extension(platform, cmdline):
def precompile_steps(bo):
bo.create_build_info = False
bo.target_type = bo.Target.shared_python_library
if not os.path.exists("../ext/out"):
os.mkdir("../ext/out")
target = faint_info.target.python_extension
def extension_source_files(platform, bo):
src_folder = join_path(bo.project_root, target.source_folder)
return [join_path(src_folder, f) for f in list_cpp(src_folder)]
result = build(
"Python extension",
platform,
cmdline,
target.objs_folder_prefix,
target.out_lib,
precompile_steps,
extension_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
return result
if __name__ == '__main__':
platform = ("linux" if sys.platform.startswith('linux') else "msw")
cmdline = bs.parse_command_line()
opts, args = cmdline
exit_on_error(build_faint, (platform, cmdline), blank_line=False)
if platform == 'msw': # Py-extension build not implemented for Linux yet.
exit_on_error(build_python_extension, (platform, cmdline))
if opts.debug:
print("Fixme: Not building tests in debug.")
else:
exit_on_error(build_unit_tests, (platform, cmdline))
exit_on_error(build_image_tests, (platform, cmdline))
exit_on_error(build_benchmarks, (platform, cmdline))
exit_on_error(build_gui_tests, (platform, cmdline))
exit_on_error(run_unit_tests, (platform, cmdline))
if platform == 'msw':
exit_on_error(run_py_tests, (platform, cmdline))
if opts.version != bs.unknown_version_str and platform == 'msw':
bo = read_build_options(platform)
bs.build_installer(opts.version, bo.makensis_exe)
exit(0)
| 31.12233
| 109
| 0.630334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,257
| 0.203207
|
e6671dd4f2c0b71c8a3b385713a43ac751148356
| 2,119
|
py
|
Python
|
printAlternatively.py
|
kamwithak/competitiveProgramming
|
ab4433568081900212a8a987d7bf8cb78d2698d1
|
[
"MIT"
] | null | null | null |
printAlternatively.py
|
kamwithak/competitiveProgramming
|
ab4433568081900212a8a987d7bf8cb78d2698d1
|
[
"MIT"
] | 1
|
2020-07-19T15:40:25.000Z
|
2020-07-19T15:40:25.000Z
|
printAlternatively.py
|
kamwithak/competitiveProgramming
|
ab4433568081900212a8a987d7bf8cb78d2698d1
|
[
"MIT"
] | null | null | null |
class Solution():
def __init__(self, A, B):
self.A = A
self.B = B
def printAlternativelySameSize(self):
"""
Assumes that len(self.A) == len(self.B) != 0
Alternatively print each element in the two Lists
"""
if (len(self.A) != len(self.B)):
raise Exception("the two lists must be of same length")
if (len(self.A) == len(self.B) == 0):
raise Exception("Empty lists")
#
ptrA = 0 ; ptrB = 0 ; decisionPoint = False
while (ptrA < len(self.A) or ptrB < len(self.B)):
if (not decisionPoint):
print(self.A[ptrA])
ptrA+=1
decisionPoint = True
else:
print(self.B[ptrB])
ptrB+=1
decisionPoint = False
def printAlternativelyDifferentSize(self):
"""
Alternatively print each element in the two Lists, regardless of List size
"""
ptrA = 0 ; ptrB = 0 ; decisionPoint = False
while (ptrA < len(self.A) and ptrB < len(self.B)):
if (not decisionPoint):
print(self.A[ptrA])
ptrA+=1
decisionPoint = True
else:
print(self.B[ptrB])
ptrB+=1
decisionPoint = False
while (ptrA < len(self.A)):
print(self.A[ptrA])
ptrA += 1
while (ptrB < len(self.B)):
print(self.B[ptrB])
ptrB += 1
obj = Solution(A=[3,2,1], B=[3,2,1])
obj.printAlternativelySameSize()
"""
Given two arrays, print each element alternatively
For example)
arr1 = [a,b,c,d]
arr2 = [e,f,g,h,i,j,k]
=> a e b f c g d h i j k
"""
class Solution():
def __init__(self, arr1, arr2):
self.arr1 = arr1
self.arr2 = arr2
self.n = len(self.arr1)
self.m = len(self.arr2)
def print_lists(self):
i, j = 0, 0
config = True
while(i < self.n and j < self.m):
if (config):
print(self.arr1[i])
i += 1
config = False
else:
print(self.arr2[j])
j += 1
config = True
while (i < self.n):
print(self.arr1[i])
i += 1
while (j < self.m):
print(self.arr2[j])
j += 1
obj = Solution(['a', 'b', 'c', 'd'], ['e','f','g','h','i','j','k'])
obj.print_lists()
| 21.40404
| 77
| 0.547428
| 1,789
| 0.844266
| 0
| 0
| 0
| 0
| 0
| 0
| 440
| 0.207645
|
e667758e13389c3d1155786a731f2598edf57be3
| 981
|
py
|
Python
|
test/test_theaigame_bot.py
|
gnmerritt/poker
|
5e7241efac1b0757f39c28f6d485f4d79960095b
|
[
"MIT"
] | 5
|
2015-04-09T02:45:12.000Z
|
2018-06-27T05:34:41.000Z
|
test/test_theaigame_bot.py
|
gnmerritt/poker
|
5e7241efac1b0757f39c28f6d485f4d79960095b
|
[
"MIT"
] | null | null | null |
test/test_theaigame_bot.py
|
gnmerritt/poker
|
5e7241efac1b0757f39c28f6d485f4d79960095b
|
[
"MIT"
] | 2
|
2017-09-19T04:49:07.000Z
|
2018-12-09T19:58:18.000Z
|
import unittest
from pokeher.theaigame_bot import TheAiGameBot
class QuietBot(TheAiGameBot):
def log(self, msg):
pass
class TheAiGameBotTest(unittest.TestCase):
"""Test that the bot class is instantiated properly and has all the methods
that it's supposed to"""
def setUp(self):
self.bot = QuietBot(None, None, None)
def test_bot_instantiation(self):
"""Tests instantiating the bot"""
self.assertTrue(self.bot)
def test_bot_methods(self):
"""Tests that the bot has all the I/O methods"""
self.assertTrue(self.bot.run)
self.assertTrue(self.bot.say)
self.assertTrue(self.bot.log)
def test_mixed_in_methods(self):
"""Tests that the bot has all the parser & action methods"""
self.assertTrue(self.bot.set_up_parser)
self.assertTrue(self.bot.bet)
self.assertTrue(self.bot.fold)
self.assertTrue(self.bot.call)
self.assertTrue(self.bot.check)
| 31.645161
| 79
| 0.670744
| 914
| 0.931702
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.249745
|
e66883315cccecf4d95a549214dcc1704e5e4e46
| 429
|
py
|
Python
|
tests/test_exp.py
|
SiddeshSambasivam/MatterIx
|
e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139
|
[
"MIT"
] | 9
|
2020-07-25T12:00:30.000Z
|
2021-07-07T09:30:57.000Z
|
tests/test_exp.py
|
SiddeshSambasivam/MatterIx
|
e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139
|
[
"MIT"
] | null | null | null |
tests/test_exp.py
|
SiddeshSambasivam/MatterIx
|
e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139
|
[
"MIT"
] | null | null | null |
import unittest
from matterix import Tensor
import numpy as np
class TestTensorExponents(unittest.TestCase):
def test_simple_exp(self):
an = np.random.randint(0, 10, (10, 10))
at = Tensor(an, requires_grad=True)
result = at * at
result.backward(gradient=Tensor.ones_like(result))
assert result.tolist() == (an ** 2).tolist()
assert at.grad.tolist() == (2.0 * an).tolist()
| 25.235294
| 58
| 0.638695
| 363
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e6691884e90ad61e89f19cad4d887cbd1d5007c5
| 2,161
|
py
|
Python
|
tests/scripts/run_lookups.py
|
abelard2008/overlog
|
8df2bb95d2e39e41dd8e30249da6bb8a1615f39f
|
[
"BSD-3-Clause"
] | 3
|
2016-01-26T22:19:12.000Z
|
2019-07-10T02:12:38.000Z
|
tests/scripts/run_lookups.py
|
abelard2008/overlog
|
8df2bb95d2e39e41dd8e30249da6bb8a1615f39f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/run_lookups.py
|
abelard2008/overlog
|
8df2bb95d2e39e41dd8e30249da6bb8a1615f39f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2
# -*- Mode: python -*-
#
# DESCRIPTION: Setup and run n chord nodes.
#
#
import getopt
import os
import sys
import time
import random
import signal
import threading
def print_usage():
print
print "Usage: run_lookups -p <port> [-t <sleep_time>] [-v <vantages>] [-s <seed>] lookup_exec output_dir ip0 [ ip1 [...]]"
print
def parse_cmdline(argv):
global log
shortopts = "v:p:s:t:"
flags = {"seed" : random.random()*sys.maxint, "vantages" : 1, "port" : 0, "sleep_time" : 1}
opts, args = getopt.getopt(argv[1:], shortopts)
for o, v in opts:
if o == "-s": flags["seed"] = int(v)
elif o == "-v": flags["vantages"] = int(v)
elif o == "-p": flags["port"] = int(v)
elif o == "-n": flags["num_lookups"] = int(v)
elif o == "-t": flags["sleep_time"] = int(v)
if args[1]:
log = open(args[1]+"/lookups.log", 'w')
return flags, args
def run_lookup(lookup_exec, seed, node, out):
try:
rv = os.system(r"%s NONE %d %s %s %s >> %s 2>&1" \
% (lookup_exec, seed, "simple_lookup:%s%d" % (node, random.random()*sys.maxint), node, node, out))
except:
print >> log, "EXCEPTION RUN LOOKUP: %s\n" % str(sys.exc_info()[:2])
if __name__ == "__main__":
try:
flags, args = parse_cmdline(sys.argv)
except:
print_usage()
sys.exit(3)
if len(args) < 3 or not int(flags["port"]):
print_usage()
sys.exit(3)
seed = int(flags["seed"])
port = int(flags["port"])
vantages = int(flags["vantages"])
sleep_time = int(flags["sleep_time"])
ips = args[2:]
print "IPS: ", ips
while 1:
try:
for v in range(vantages):
if len(ips) == 1: run_lookup(args[0], seed, "%s:%s" % (ips[0], int(port)+int(v)), args[1]+"/lookups.log")
else: run_lookup(args[0], seed, "%s:%s" % (ips[v], port), args[1]+"/lookups.log")
seed += 1
time.sleep(sleep_time)
except:
print >> log, "EXCEPTION WHILE LOOP: %s\n" % str(sys.exc_info()[:2])
| 30.43662
| 126
| 0.529847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 542
| 0.25081
|
e6692d7fe75e939ec528720c041175b24637e974
| 1,722
|
py
|
Python
|
src/tests/test_task_2_4.py
|
Python-course/Python-course
|
59de0ef9928aeaa5dd185ceaafa334eb8e719217
|
[
"MIT"
] | null | null | null |
src/tests/test_task_2_4.py
|
Python-course/Python-course
|
59de0ef9928aeaa5dd185ceaafa334eb8e719217
|
[
"MIT"
] | null | null | null |
src/tests/test_task_2_4.py
|
Python-course/Python-course
|
59de0ef9928aeaa5dd185ceaafa334eb8e719217
|
[
"MIT"
] | null | null | null |
"""
Тесты для задания 2.4.
"""
from unittest import TestCase, main
from fractions import Fraction
from tasks import task_2_4
class TestFractionFromString(TestCase):
def test_fraction_from_string__CorrectArguments__ShouldReturnCorrectResult(self):
"""
Проверяет работу с корректными данными.
"""
data = [
("-2#1/2", Fraction(-5, 2)),
( "1#1/3", Fraction( 4, 3)),
("-1#1/6", Fraction(-7, 6)),
( "0#1/7", Fraction( 1, 7)),
("-0#1/7", Fraction(-1, 7))
]
for representation, result in data:
with self.subTest():
self.assertEqual(task_2_4.fraction_from_string(representation), result,
f'representation="{representation}"')
def test_fraction_from_string__DenominatorIsZero__ShouldRaiseValueError(self):
"""
Проверяет генерацию исключения при передаче нулевого знаменателя.
"""
with self.assertRaises(ValueError):
task_2_4.fraction_from_string("1#1/0")
class TestFractionToString(TestCase):
def test_fraction_to_string__CorrectArguments__ShouldReturnCorrectResult(self):
"""
Проверяет работу с корректными данными.
"""
data = \
[(Fraction(-5, 2), "-2#1/2"),
(Fraction(4, 3), "1#1/3"),
(Fraction(-7, 6), "-1#1/6"),
(Fraction(1, 7), "0#1/7"),
(Fraction(-1, 7), "-0#1/7")]
for fraction, result in data:
with self.subTest():
self.assertEqual(task_2_4.fraction_to_string(fraction), result, f"fraction={fraction}")
if __name__ == "__main__":
main(verbosity=2)
| 28.7
| 103
| 0.577236
| 1,665
| 0.89372
| 0
| 0
| 0
| 0
| 0
| 0
| 537
| 0.288245
|
e6693d31028174fac6a03f7991d1cc9f5830e4f5
| 1,007
|
py
|
Python
|
aioweb_auth/helpers.py
|
kreopt/aioweb_auth
|
e6a982296b52fc2068dd09afb0827dab527ef9b7
|
[
"MIT"
] | null | null | null |
aioweb_auth/helpers.py
|
kreopt/aioweb_auth
|
e6a982296b52fc2068dd09afb0827dab527ef9b7
|
[
"MIT"
] | null | null | null |
aioweb_auth/helpers.py
|
kreopt/aioweb_auth
|
e6a982296b52fc2068dd09afb0827dab527ef9b7
|
[
"MIT"
] | null | null | null |
from aiohttp import web
from aiohttp_security import authorized_userid
from aioweb.conf import settings
async def redirect_authenticated(request):
user_id = await authorized_userid(request)
if user_id and not request.is_ajax():
redirect_url = request.query.get('redirect_to')
if not redirect_url:
redirect_url = getattr(settings, 'AUTH_PRIVATE_URL', '/')
raise web.HTTPFound(redirect_url)
def auth_error_response(controller, reason, detail=None):
if controller.request.is_ajax():
return web.HTTPForbidden(reason=reason)
else:
controller.flash['AUTH_ERROR'] = detail if detail else reason
return web.HTTPFound(controller.path_for('index'))
async def auth_success_response(controller):
if not controller.request.is_ajax():
await redirect_authenticated(controller.request)
else:
user_id = await authorized_userid(controller.request)
return {'id': user_id, 'token': controller.request.csrf_token}
| 34.724138
| 70
| 0.725919
| 0
| 0
| 0
| 0
| 0
| 0
| 613
| 0.608739
| 64
| 0.063555
|
e669828a1fd8d946f628655596de52579956c2b4
| 442
|
py
|
Python
|
Leetcode/560-Subarray_Sum.py
|
EdwaRen/Competitve-Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | 1
|
2021-05-03T21:48:25.000Z
|
2021-05-03T21:48:25.000Z
|
Leetcode/560-Subarray_Sum.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
Leetcode/560-Subarray_Sum.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
class Solution(object):
def subarraySum(self, nums, k):
sum = 0
res = 0
sum_history = {0:1}
for i in nums:
sum+=i
if sum - k in sum_history:
res+=sum_history[sum-k]
if sum in sum_history:
sum_history[sum]+=1
else:
sum_history[sum] = 1
return res
a = Solution()
print(a.subarraySum([1, 2, 1, 2], 3))
| 22.1
| 39
| 0.466063
| 387
| 0.875566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c6fd9ed01bdcac2a90cc2cff054eefd30d07deb0
| 3,901
|
py
|
Python
|
functions/aou/tests/upload_test_files.py
|
broadinstitute/wfl
|
1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2
|
[
"BSD-3-Clause"
] | 15
|
2020-03-04T17:30:25.000Z
|
2022-03-09T14:57:26.000Z
|
functions/aou/tests/upload_test_files.py
|
broadinstitute/wfl
|
1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2
|
[
"BSD-3-Clause"
] | 184
|
2020-03-06T20:55:15.000Z
|
2022-03-15T18:24:57.000Z
|
functions/aou/tests/upload_test_files.py
|
broadinstitute/wfl
|
1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2
|
[
"BSD-3-Clause"
] | 2
|
2020-07-08T19:16:26.000Z
|
2020-07-10T18:47:30.000Z
|
""" Helper script that copies all of the files for an arrays sample into the dev aou input bucket. This will trigger
the submit_aou_workload cloud function for each file. When all files have been uploaded, it will launch an arrays
workflow via the workflow launcher (but only if a workflow with that chipwell barcode & analysis version has not
been run before).
Usage: python upload_test_files.py -b <bucket>
"""
import argparse
import json
import random
import sys
import subprocess
import tempfile
arrays_path = "gs://broad-gotc-dev-wfl-ptc-test-inputs/arrays/HumanExome-12v1-1_A/"
arrays_metadata_path = "gs://broad-gotc-dev-wfl-ptc-test-inputs/arrays/metadata/HumanExome-12v1-1_A/"
def get_destination_paths(bucket, prefix):
return {
"arrays": f"gs://{bucket}/{prefix}/arrays/",
"arrays_metadata": f"gs://{bucket}/{prefix}/arrays/metadata/",
"ptc": f"gs://{bucket}/{prefix}/ptc.json"
}
def get_ptc_json(bucket, prefix, chip_well_barcode, analysis_version, prod):
return {
"executor":
"https://cromwell-aou.gotc-prod.broadinstitute.org" if prod
else "https://cromwell-gotc-auth.gotc-dev.broadinstitute.org/",
"environment": "aou-prod" if prod else "aou-dev",
"uuid": None,
"notifications": [{
"analysis_version_number": analysis_version,
"call_rate_threshold": 0.98,
"chip_well_barcode": chip_well_barcode,
"green_idat_cloud_path": f"gs://{bucket}/{prefix}/arrays/HumanExome-12v1-1_A/idats/7991775143_R01C01/7991775143_R01C01_Grn.idat",
"params_file": f"gs://{bucket}/{prefix}/arrays/HumanExome-12v1-1_A/inputs/7991775143_R01C01/params.txt",
"red_idat_cloud_path": f"gs://{bucket}/{prefix}/arrays/HumanExome-12v1-1_A/idats/7991775143_R01C01/7991775143_R01C01_Red.idat",
"reported_gender": "Female",
"sample_alias": "NA12878",
"sample_lsid": "broadinstitute.org:bsp.dev.sample:NOTREAL.NA12878",
"bead_pool_manifest_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExome-12v1-1_A.bpm",
"cluster_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExomev1_1_CEPH_A.egt",
"zcall_thresholds_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/IBDPRISM_EX.egt.thresholds.txt",
"gender_cluster_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExomev1_1_gender.egt",
"extended_chip_manifest_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExome-12v1-1_A.1.3.extended.csv"
}]
}
def main(bucket, prod):
chip_well_barcode = "7991775143_R01C01"
analysis_version = random.randrange(sys.maxsize)
prefix = f"chip_name/{chip_well_barcode}/{analysis_version}"
ptc_json = get_ptc_json(bucket, prefix, chip_well_barcode, analysis_version, prod)
destination_paths = get_destination_paths(bucket, prefix)
with tempfile.TemporaryDirectory() as tmpdirname:
with open(f'{tmpdirname}/ptc.json', 'w') as f:
json.dump(ptc_json, f)
subprocess.run(["gsutil", "cp", "-r", arrays_path, destination_paths["arrays"]])
subprocess.run(["gsutil", "cp", "-r", arrays_metadata_path, destination_paths["arrays_metadata"]])
subprocess.run(["gsutil", "cp", f"{tmpdirname}/ptc.json", destination_paths["ptc"]])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
default="dev-aou-arrays-input",
help="The upload destination bucket."
)
parser.add_argument(
"-p",
"--prod",
action="store_true",
help="Use infrastructure in broad-aou rather than broad-gotc-dev."
)
args = parser.parse_args()
main(args.bucket, args.prod)
| 47.573171
| 141
| 0.681364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,325
| 0.596001
|
c6fe87b224a7fdc40686930d3055375689c20f4c
| 2,019
|
py
|
Python
|
warp_gui.py
|
maciejczechowski/CarND-Advanced-Lane-Lines
|
058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230
|
[
"MIT"
] | null | null | null |
warp_gui.py
|
maciejczechowski/CarND-Advanced-Lane-Lines
|
058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230
|
[
"MIT"
] | null | null | null |
warp_gui.py
|
maciejczechowski/CarND-Advanced-Lane-Lines
|
058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
from src import lane_finder as lf
from src import parameters
import argparse
class WarpFinder:
def __init__(self, image, horizon = 400, x1 = 500):
self.image1 = image
self._horizon = horizon
self._x1 = x1
def onChangeHorizon(pos):
self._horizon = pos
self._render()
def onChangeX1(pos):
self._x1 = pos
self._render()
cv2.namedWindow('result')
cv2.createTrackbar('horizon', 'result', self._horizon, 720, onChangeHorizon)
cv2.createTrackbar('x1', 'result', self._x1, 640, onChangeX1)
self._render()
print("Adjust the parameters as desired. Hit any key to close.")
cv2.waitKey(0)
cv2.destroyWindow('result')
def draw_grid(self, img, w, h, line_color=(0, 255, 0), thickness=1, type_= cv2.LINE_AA, pxstep=50):
'''(ndarray, 3-tuple, int, int) -> void
draw gridlines on img
line_color:
BGR representation of colour
thickness:
line thickness
type:
8, 4 or cv2.LINE_AA
pxstep:
grid line frequency in pixels
'''
x = pxstep
y = pxstep
while x < w:
cv2.line(img, (x, 0), (x, h), color=line_color, lineType=type_, thickness=thickness)
x += pxstep
while y < h:
cv2.line(img, (0, y), (w, y), color=line_color, lineType=type_, thickness=thickness)
y += pxstep
def _render(self):
warped1 = lf.toBirdsEye(self.image1, self._x1, self._horizon)
self.draw_grid(warped1, 1280, 720)
self._result = warped1
cv2.imshow('result', self._result)
parser = argparse.ArgumentParser(description='Visualizes the warp transform.')
parser.add_argument('filename')
args = parser.parse_args()
image = cv2.imread(args.filename)
params = parameters.LaneFinderParams()
thresh = WarpFinder(image, params.warp_horizon, params.warp_x1)
| 25.884615
| 103
| 0.602278
| 1,630
| 0.80733
| 0
| 0
| 0
| 0
| 0
| 0
| 445
| 0.220406
|
05007036c73f4b4b153318ac832ce22662ff0e07
| 2,041
|
py
|
Python
|
election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py
|
dkupsh/stvote
|
dbe906681a171c5654341b93dc0fb5b0208cfd33
|
[
"MIT"
] | null | null | null |
election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py
|
dkupsh/stvote
|
dbe906681a171c5654341b93dc0fb5b0208cfd33
|
[
"MIT"
] | null | null | null |
election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py
|
dkupsh/stvote
|
dbe906681a171c5654341b93dc0fb5b0208cfd33
|
[
"MIT"
] | null | null | null |
###############
# Ballot Parser for UC Berkeley Results
#
# This ballot parser has been tailored to the ballot
# system used by UCB. If you use another software
# to define ballots, ensure the data returned by the
# ballot parser returns data in the following fashion:
#
# [
# {
# "ballot_id": "unique_ballot_id",
# "ballot_data": {
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# ...
# }
# },
# {
# "ballot_id": "unique_ballot_id",
# "ballot_data": {
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# ...
# }
# },
# ...
# ]
#
# The race_id value should correspond to the value
# specified in the configuration file.
#
# Each list identified by the race_id should be in
# voting-choice order, where the first candidate
# within the list corresponds to the ballot's first
# choice vote.
#
# The candidate_id should correspond to the value
# returned by the election candidate parser.
#
# Last Modified: April 12, 2016
###############
import json
import uuid
def parse(ballot_file_path, races):
ballots_data = []
# Open the ballot file.
with open(ballot_file_path, encoding="UTF-8", errors="ignore") as ballot_file:
ballot_file_data = json.loads(ballot_file.read())
for ballot in ballot_file_data["ballots"]:
ballot_data = {}
ballot_data["ballot_id"] = str(uuid.uuid4())
ballot_data["ballot_data"] = {}
for race in races:
ballot_data["ballot_data"][race.id()] = ballot[race.id()]
ballots_data.append(ballot_data)
return ballots_data
| 26.166667
| 82
| 0.526213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,457
| 0.713866
|
05018611063b1ec5bb0bc5adba5e6965095d97d4
| 5,971
|
py
|
Python
|
deco/__init__.py
|
patdex/deco
|
83141719b3b68fb1e99b43384a25288aea5c3e8c
|
[
"MIT"
] | null | null | null |
deco/__init__.py
|
patdex/deco
|
83141719b3b68fb1e99b43384a25288aea5c3e8c
|
[
"MIT"
] | null | null | null |
deco/__init__.py
|
patdex/deco
|
83141719b3b68fb1e99b43384a25288aea5c3e8c
|
[
"MIT"
] | null | null | null |
import collections
import inspect
import time
import re
# module config:
disable_tracing = False
indent = True
# indentation for log output
_log_indent = dict()
def indent_str(cnt, end=False):
"""
indent string
:param cnt: indentation count
:param end: close actual indentation?
:return:
"""
if not indent:
return ''
return '| ' * cnt + ('/ ' if not end else '\\ ')
class _MyOrderedDict(collections.OrderedDict):
"""
format representation string vor log output
"""
def __repr__(self):
ret = str()
for key, val in self.items():
ret += '{0}={2}({1}), '.format(key, val, val.__class__.__name__)
return ret[:-2]
class _MyList(list):
"""
format representation string vor log output
"""
def __repr__(self):
ret = str()
for val in self:
ret += '{0}({1}), '.format(val, val.__class__.__name__)
return ret[:-2]
def _get_wrapped_method(func):
"""
get inner method if multiple decorators are used
:param func:
:return:
"""
while hasattr(func, '__wrapped__'):
func = getattr(func, '__wrapped__')
return func
def _wrap(wrapper, func):
"""
save wrapped function if multiple decorators are used
:param func:
:return:
"""
setattr(wrapper, '__wrapped__', func)
def argument_types(func):
"""
:param func:
:return: dictionary with argument name and type
"""
signature = inspect.signature(func)
sig = re.match(r"\(([^)]+)\)", str(signature)).group(1)
param_list = str(sig).split(', ')
types = dict()
for param in param_list:
try:
elements = param.split(':')
types[elements[0]] = elements[1].split('=')[0]
except IndexError:
pass
return types
def collect_all_arguments_to_dict(func, args, kwargs):
"""
:param func:
:param args:
:param kwargs:
:return: dictionary with all method arguments and their values (like kwargs)
"""
arg_names = [arg_name for arg_name in inspect.signature(func).parameters]
all_as_kwargs = _MyOrderedDict()
# collect args
for arg_name, arg_val in zip(arg_names, args):
all_as_kwargs[arg_name] = arg_val
# collect kwargs
for arg_name in arg_names:
if arg_name in kwargs:
all_as_kwargs[arg_name] = kwargs[arg_name]
# collect default arguments:
for arg_name, arg_val in inspect.signature(func).parameters.items():
if arg_name in arg_names and arg_name not in all_as_kwargs:
all_as_kwargs[arg_name] = arg_val.default
return all_as_kwargs
class Trace:
"""
Decorator Class
"""
def __init__(self, log_method, disable=False):
"""
:param log_method: logging method
:param disable: disable logging
"""
self.log_method = log_method
self.disabled = disable
def __call__(self, func):
"""
:param func: decorated method
:return:
"""
def wrapper(*args, **kwargs):
if self.disabled or disable_tracing:
return func
inner_func = _get_wrapped_method(func)
ind = self._increment_indent() # indent log message
all_as_kwargs = collect_all_arguments_to_dict(inner_func, args, kwargs) # all arguments to OrderedDict
self.log_method(indent_str(ind) + self._call_message(inner_func, all_as_kwargs))
start_time = time.time()
ret = func(*args, **kwargs) # run decorated method
exec_time = time.time() - start_time
self.log_method(indent_str(ind, True) + self._return_message(inner_func, ret, exec_time))
self._decrement_indent() # redo indent log message
return ret
_wrap(wrapper, func)
return wrapper
@staticmethod
def _call_message(func, all_as_kwargs):
"""
format call log message
:param func:
:param all_as_kwargs:
:return:
"""
message = '{0}({1})'.format(func.__name__, all_as_kwargs)
return message
@staticmethod
def _return_message(func, ret, exec_time):
"""
format return log message
:param func:
:param ret:
:return:
"""
ret_arg_str = str(_MyList(ret)) if isinstance(ret, tuple) else '{1}({0})'.format(ret, ret.__class__.__name__)
message = '{1} in {2:.3f}ms'.format(func.__name__, ret_arg_str, exec_time * 1000)
return message
def _increment_indent(self):
if not indent:
return ''
if self.log_method not in _log_indent:
_log_indent[self.log_method] = 0
else:
_log_indent[self.log_method] += 1
return _log_indent[self.log_method]
def _decrement_indent(self):
if not indent:
return ''
_log_indent[self.log_method] -= 1
def cast_std_arguments(func):
"""
cast arguments with standard and defined type
:param func:
:return:
"""
def wrapper(*args, **kwargs):
inner_func = _get_wrapped_method(func)
all_as_kwargs_casted = collections.OrderedDict()
all_as_kwargs = collect_all_arguments_to_dict(inner_func, args, kwargs) # all arguments to OrderedDict
arg_types = argument_types(inner_func)
for arg_name, arg_value in all_as_kwargs.items():
arg_type = arg_types.get(arg_name, None)
if arg_type: # if type defined:
try: # try to cast
arg_value = eval('{0}(arg_value)'.format(arg_type))
except NameError: # unknown namespace
pass
all_as_kwargs_casted[arg_name] = arg_value
# run decorated method with casted arguments
return func(**all_as_kwargs_casted)
_wrap(wrapper, func)
return wrapper
| 26.420354
| 117
| 0.601574
| 2,856
| 0.478312
| 0
| 0
| 684
| 0.114554
| 0
| 0
| 1,671
| 0.279853
|
0501d436e365fc40c731e765ab901eb50645cb02
| 1,489
|
py
|
Python
|
main.py
|
ytyaru/Hatena.WebSite.Login.201703040757
|
11ffc5549398478146a9966189e06cf535b34092
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
ytyaru/Hatena.WebSite.Login.201703040757
|
11ffc5549398478146a9966189e06cf535b34092
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
ytyaru/Hatena.WebSite.Login.201703040757
|
11ffc5549398478146a9966189e06cf535b34092
|
[
"CC0-1.0"
] | null | null | null |
#!python3
#encoding:utf-8
from urllib.request import build_opener, HTTPCookieProcessor
from urllib.parse import urlencode
from http.cookiejar import CookieJar
import pprint
import dataset
class HatenaSite(object):
def __init__(self, path_hatena_accounts_sqlite3):
self.path_hatena_accounts_sqlite3 = path_hatena_accounts_sqlite3
self.db_accounts = dataset.connect('sqlite:///' + path_hatena_accounts_sqlite3)
def login(self, hatena_id):
account = self.db_accounts['Accounts'].find_one(HatenaId=hatena_id)
if (None == account):
print('{0} のはてなIDを持ったアカウント情報は次のDBに存在しません。: {1}'.format(hatena, self.path_hatena_accounts_sqlite3))
return
print(account['Password'])
opener = build_opener(HTTPCookieProcessor(CookieJar()))
post = {
'name': hatena_id,
'password': account['Password']
}
data = urlencode(post).encode('utf-8')
res = opener.open('https://www.hatena.ne.jp/login', data)
pprint.pprint(res.getheaders())
res.close()
url = 'http://f.hatena.ne.jp/{0}/{1}/rss'.format(hatena_id, 'Hatena Blog')
res = opener.open(url)
with open('photo_life.xml', 'wb') as f:
f.write(res.read())
res.close()
if __name__ == '__main__':
hatena_id = 'ytyaru'
client = HatenaSite(
path_hatena_accounts_sqlite3 = "meta_Hatena.Accounts.sqlite3"
)
client.login(hatena_id)
| 33.088889
| 110
| 0.646071
| 1,151
| 0.746918
| 0
| 0
| 0
| 0
| 0
| 0
| 330
| 0.214147
|
05031a4fb3f43f4e15927e78ef77f8dcad229be0
| 767
|
py
|
Python
|
csf_tz/fleet_management/doctype/vehicle/vehicle.py
|
Craftint/CSF_TZ
|
b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5
|
[
"MIT"
] | 4
|
2021-09-24T12:30:32.000Z
|
2022-03-19T14:55:34.000Z
|
csf_tz/fleet_management/doctype/vehicle/vehicle.py
|
Craftint/CSF_TZ
|
b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5
|
[
"MIT"
] | null | null | null |
csf_tz/fleet_management/doctype/vehicle/vehicle.py
|
Craftint/CSF_TZ
|
b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5
|
[
"MIT"
] | 7
|
2021-09-24T12:30:33.000Z
|
2022-03-21T11:34:02.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Bravo Logistics and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Vehicle(Document):
pass
@frappe.whitelist(allow_guest=True)
def change_status(**args):
args = frappe._dict(args)
#Edit vehicle status
vehicle = frappe.get_doc("Vehicle", args.vehicle)
if args.status != 'Booked':
vehicle.status = args.status
vehicle.hidden_status = args.hidden_status
vehicle.save()
return 'Vehicle Status Set'
if args.status == 'Booked' and vehicle.status == 'Available':
vehicle.status = "Booked"
vehicle.hidden_status = args.hidden_status
vehicle.save()
return 'Vehicle Status Set'
| 25.566667
| 62
| 0.741851
| 30
| 0.039113
| 0
| 0
| 499
| 0.650587
| 0
| 0
| 230
| 0.29987
|
05067ca48cd1bf1cfe7a6e17e6b2e4d00c579d5b
| 3,780
|
py
|
Python
|
app/mysql2json.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/mysql2json.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/mysql2json.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
#!coding:utf-8
import json
import pymysql
import pandas as pd
class ReadJson():
def __init__(self, host, user, passwd, db, table, sort=None, _filter=None):
self.host =host
self.user =user
self.passwd = passwd
self.db = db
self.table = table
self.sort = sort
self.filter = _filter
self.data = ''
self.jsondata = ''
def _filter_data(self, col, inlist):
"""
table filter funcion
"""
bools = []
for ele in inlist:
bools.append(self.data[col] == ele)
bools = [any(elist) for elist in zip(*bools)]
return bools
def conecter_to_mysql(self):
connec = pymysql.connect(
host=self.host,
user=self.user,
password=self.passwd,
database=self.db,
charset='utf8',
use_unicode=True
)
return connec
def select_row(self, rowname, colname):
connec = self.conecter_to_mysql()
Cursor = connec.cursor()
sql = "SELECT * FROM `" + self.table + "` where " + colname + "=" + "'" + str(rowname) + "'"
Cursor.execute(sql)
row = Cursor.fetchall()
return row
def read_receptor(self):
connec = self.conecter_to_mysql()
# prepare data
sort_order = self.sort["order"]
sort_prop = self.sort["prop"]
filters = self.filter['cluster'][0]
_type = ""
if sort_order == "ascending":
_type = True
elif sort_order == "descending":
_type = False
# read MySQL data to DataFrame
sql = "SELECT * FROM " + "`" + self.table + "`;"
self.data = pd.read_sql(sql, connec)
bools = self._filter_data("cluster", filters)
if bools:
self.data = self.data[bools]
if sort_order:
self.data.sort_values(by=[sort_prop], ascending=_type, inplace=True)
self.jsondata = json.loads(self.data.to_json(orient="records"))
def read_json(self):
"""
connect to MySQL database
"""
connec = self.conecter_to_mysql()
# prepare data
sort_order = self.sort["order"]
sort_prop = self.sort["prop"]
filter_order = self.filter["order"][0]
filter_Family = self.filter["Family"][0]
filter_Genus = self.filter["Genus"][0]
_type = ""
if sort_order == "ascending":
_type = True
elif sort_order == "descending":
_type = False
# read MySQL data to DataFrame
sql = "SELECT * FROM " + self.table + ";"
self.data = pd.read_sql(sql, connec)
# filter funtion
order_bools = self._filter_data("Order", filter_order)
family_bools = self._filter_data("Family", filter_Family)
genus_bools = self._filter_data("Genus", filter_Genus)
bools = [elist for elist in (order_bools, family_bools, genus_bools) if elist]
bools = [all(elist) for elist in zip(*bools)]
if bools:
self.data = self.data[bools]
# sort funtion
if sort_order:
self.data.sort_values(by=[sort_prop], ascending=_type, inplace=True)
# convert DataFrame to json
self.jsondata = json.loads(self.data.to_json(orient="records"))
def build_filter(self, colname):
"""
Build a filter list
"""
elems = list(set(self.data[colname].values))
if None in elems:
elems = list(filter(None, elems))
elems = sorted(elems)
elems.append(None)
else:
elems = sorted(elems)
outfilter = [{'text': ele, 'value': ele} for ele in elems]
return outfilter
| 30.983607
| 100
| 0.552381
| 3,704
| 0.979894
| 0
| 0
| 0
| 0
| 0
| 0
| 552
| 0.146032
|
0506e61a9ace0c2d5bc6f23b2cc7e615718656a8
| 3,583
|
py
|
Python
|
dict2xml.py
|
lucasicf/dict2xml
|
7421414c71e1d95a4d60e84f942379edb4df2df5
|
[
"BSD-3-Clause"
] | 12
|
2015-07-12T20:07:10.000Z
|
2022-02-10T05:16:14.000Z
|
dict2xml.py
|
lucasicf/dict2xml
|
7421414c71e1d95a4d60e84f942379edb4df2df5
|
[
"BSD-3-Clause"
] | null | null | null |
dict2xml.py
|
lucasicf/dict2xml
|
7421414c71e1d95a4d60e84f942379edb4df2df5
|
[
"BSD-3-Clause"
] | 7
|
2015-05-21T09:39:52.000Z
|
2021-02-28T22:01:15.000Z
|
# -*- coding: utf-8 -*-
from xml.dom import minidom
import re
# Thrown on any dictionary error
class Dict2XMLException(Exception):
pass
def _dict_sort_key(key_value):
key = key_value[0]
match = re.match('(\d+)__.*', key)
return match and int(match.groups()[0]) or key
_iter_dict_sorted = lambda dic: sorted(
dic.iteritems(), key=(lambda key_value: _dict_sort_key(key_value))
)
def _remove_order_id(key):
match = re.match('\d+__(.*)', key)
return match and match.groups()[0] or key
DATATYPE_ROOT_DICT = 0
DATATYPE_KEY = 1
DATATYPE_ATTR = 2
DATATYPE_ATTRS = 3
def _check_errors(value, data_type):
if data_type == DATATYPE_ROOT_DICT:
if isinstance(value, dict):
values = value.values()
if len(values) != 1:
raise Dict2XMLException(
'Must have exactly one root element in the dictionary.')
elif isinstance(values[0], list):
raise Dict2XMLException(
'The root element of the dictionary cannot have a list as value.')
else:
raise Dict2XMLException('Must pass a dictionary as an argument.')
elif data_type == DATATYPE_KEY:
if not isinstance(value, basestring):
raise Dict2XMLException('A key must be a string.')
elif data_type == DATATYPE_ATTR:
(attr, attrValue) = value
if not isinstance(attr, basestring):
raise Dict2XMLException('An attribute\'s key must be a string.')
if not isinstance(attrValue, basestring):
raise Dict2XMLException('An attribute\'s value must be a string.')
elif data_type == DATATYPE_ATTRS:
if not isinstance(value, dict):
raise Dict2XMLException('The first element of a tuple must be a dictionary '
'with a set of attributes for the main element.')
# Recursive core function
def _buildXMLTree(rootXMLElement, key, content, document):
_check_errors(key, DATATYPE_KEY)
keyElement = document.createElement(_remove_order_id(key))
if isinstance(content, tuple) and len(content) == 2:
(attrs, value) = content
else:
(attrs, value) = ({}, content)
_check_errors(attrs, DATATYPE_ATTRS)
for (attr, attrValue) in attrs.iteritems():
_check_errors((attr, attrValue), DATATYPE_ATTR)
keyElement.setAttribute(attr, '%s' % attrValue)
if isinstance(value, basestring):
# Simple text value inside the node
keyElement.appendChild(document.createTextNode('%s' % value))
rootXMLElement.appendChild(keyElement)
elif isinstance(value, dict):
# Iterating over the children
for (k, cont) in _iter_dict_sorted(value):
# Recursively parse the subdictionaries
_buildXMLTree(keyElement, k, cont, document)
rootXMLElement.appendChild(keyElement)
elif isinstance(value, list):
# Recursively replicate this key element for each value in the list
for subcontent in value:
_buildXMLTree(rootXMLElement, key, subcontent, document)
else:
raise Dict2XMLException('Invalid value.')
def dict2XML(dic, indent=True, utf8=False):
document = minidom.Document()
# Root call of the recursion
_check_errors(dic, DATATYPE_ROOT_DICT)
(key, content) = dic.items()[0]
_buildXMLTree(document, key, content, document)
encoding = utf8 and 'utf-8' or None
return (indent and document.toprettyxml(indent=' ', encoding=encoding)
or document.toxml(encoding=encoding))
| 34.786408
| 88
| 0.6542
| 44
| 0.01228
| 0
| 0
| 0
| 0
| 0
| 0
| 700
| 0.195367
|
05071a1ee7761ffc57199c77291dcea3601a853d
| 1,247
|
py
|
Python
|
06_rotation_transformation.py
|
Mathanraj-Sharma/OpenCV_Sample_Codes
|
a20710fa05d7817b9c4c78acc64b852b0cde7583
|
[
"Apache-2.0"
] | 1
|
2019-11-23T06:52:58.000Z
|
2019-11-23T06:52:58.000Z
|
06_rotation_transformation.py
|
Mathanraj-Sharma/OpenCV_Sample_Codes
|
a20710fa05d7817b9c4c78acc64b852b0cde7583
|
[
"Apache-2.0"
] | null | null | null |
06_rotation_transformation.py
|
Mathanraj-Sharma/OpenCV_Sample_Codes
|
a20710fa05d7817b9c4c78acc64b852b0cde7583
|
[
"Apache-2.0"
] | 1
|
2019-11-23T11:18:37.000Z
|
2019-11-23T11:18:37.000Z
|
import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument('-i', required = True, help = 'Enter the path of Image')
args = vars(ap.parse_args())
image = cv2.imread(args['i'])
def wheel(image, center):
i = 1
while (True):
if i > 359:
cv2.imshow('Wheel', image)
cv2.waitKey(1)
i = 1
else:
rotated_image = rotate(image, center, i, 1.0)
cv2.imshow('Wheel', rotated_image)
cv2.waitKey(10)
i += 1
def rotate(image, point, angle, scale):
"""
this function will take an image and rotate it through the given angle
with respect to given point.
Optionally we can scale the image 1.0 = original, 2.0 = double etc.
"""
# M is the rotation Matrix for derived using angel, Point, and Scale
M = cv2.getRotationMatrix2D(point, angle, scale)
rotated_image = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return rotated_image
if __name__ == '__main__':
#tranforming image with respect to its center and through -45*
center = (image.shape[1]//2, image.shape[0]//2)
angel = -45
cv2.imshow('Original Image', image)
cv2.waitKey(0)
rotated_image = rotate(image, center, angel, 1.0)
cv2.imshow('Rotated Image', rotated_image)
cv2.waitKey(0)
wheel(image, center)
| 22.267857
| 75
| 0.690457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 397
| 0.318364
|
0507429bfe72a62ce8131002bc3538a2af143672
| 3,972
|
py
|
Python
|
feichangzun/directGetFlightData.py
|
Octoberr/weizhuangIP
|
d37e82df35d0b8b84bfa38f3a487fd81ab969070
|
[
"Apache-2.0"
] | null | null | null |
feichangzun/directGetFlightData.py
|
Octoberr/weizhuangIP
|
d37e82df35d0b8b84bfa38f3a487fd81ab969070
|
[
"Apache-2.0"
] | null | null | null |
feichangzun/directGetFlightData.py
|
Octoberr/weizhuangIP
|
d37e82df35d0b8b84bfa38f3a487fd81ab969070
|
[
"Apache-2.0"
] | null | null | null |
import getflightdata
import requests
from bs4 import BeautifulSoup
import random
import json
import pymongo
import datetime
from Utils.config import config
# import config
mongoConf = config['mongo']
feichangzun = 'http://www.variflight.com/flight/fnum/'
feichangzunhouzui = '.html?AE71649A58c77&fdate='
def get_headers():
headers = {
"X-Forwarded-For": '%s.%s.%s.%s' % (
random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
'Host': "www.variflight.com",
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate'}
return headers
def getqueryflight(flight, flightdate):
allflightlinks = []
client = pymongo.MongoClient(host=mongoConf['host'], port=mongoConf['port'])
db = client.swmdb
feichangzhundata = db.feichangzun
cursor = feichangzhundata.find({"Info.fno": flight, "Info.Date": flightdate})
for el in cursor:
allflightlinks.append(el)
return allflightlinks
def getDirectFlight(flight, flightdate):
strDate = datetime.datetime.strptime(flightdate, "%Y-%m-%d").strftime("%Y%m%d")
gt = getflightdata.GETFLIGHTDATA()
url = feichangzun + flight + feichangzunhouzui + strDate
flightlist = []
listHtml = requests.get(url, headers=get_headers())
listSoup = BeautifulSoup(listHtml.text, 'lxml')
listUrl = listSoup.find('div', class_='fly_list')
if listUrl is not None:
listhref = listUrl.find('div', class_='li_box').find_all('a')
for link in listhref:
if '/schedule' in link.get('href'):
flightlist.append(link.get('href'))
flightdictlist = gt.getaflightinfo(flightlist)
if len(flightdictlist) == 0:
return None
flightdict = getFlightJsonData(flightdictlist)
querdata = getqueryflight(flight, flightdate)
if len(querdata) == 0:
gt.insertintomongo(flightdict)
del(flightdict['_id'])
# flightdictr = json.dumps(flightdict)
return flightdict
def getFlightJsonData(flightinfo):
flightdic = {}
info = {}
if len(flightinfo) == 1:
init = 0
info['from'] = flightinfo[init]['qf']
info['to'] = flightinfo[init]['dd']
info['from_simple'] = flightinfo[init]['qf_simple']
info['to_simple'] = flightinfo[init]['dd_simple']
info['FromTerminal'] = flightinfo[init]['qfTerminal']
info['ToTerminal'] = flightinfo[init]['ddTerminal']
info['from_city'] = flightinfo[init]['qf_city']
info['to_city'] = flightinfo[init]['dd_city']
info['from_code'] = flightinfo[init]['qf_citycode']
info['to_code'] = flightinfo[init]['dd_citycode']
info['fno'] = flightinfo[init]['fno']
info['Company'] = '3U'
info['Date'] = flightinfo[init]['date']
info['zql'] = ""
else:
init = 1
info['from'] = flightinfo[init]['qf']
info['to'] = flightinfo[init]['dd']
info['from_simple'] = flightinfo[init]['qf_simple']
info['to_simple'] = flightinfo[init]['dd_simple']
info['FromTerminal'] = flightinfo[init]['qfTerminal']
info['ToTerminal'] = flightinfo[init]['ddTerminal']
info['from_city'] = flightinfo[init]['qf_city']
info['to_city'] = flightinfo[init]['dd_city']
info['from_code'] = flightinfo[init]['qf_citycode']
info['to_code'] = flightinfo[init]['dd_citycode']
info['fno'] = flightinfo[init]['fno']
info['Company'] = '3U'
info['Date'] = flightinfo[init]['date']
info['zql'] = ""
flightdic['Info'] = info
flightdic['List'] = flightinfo
return flightdic
#
# flight = '3U3048'
# flightdate ='2017-08-02'
#
# jsodater = getDirectFlight(flight, flightdate)
# print(jsodater)
| 34.842105
| 108
| 0.629909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,132
| 0.284995
|
0507ce8c6b29b5cd6c3e947a8e5f6cea05343e0b
| 2,402
|
py
|
Python
|
face/face-30sec.py
|
eric-erki/ai-smarthome
|
ca7316ebe72b0ad26f0b59e3186426633807cac8
|
[
"BSD-2-Clause"
] | 28
|
2018-08-09T13:10:34.000Z
|
2022-01-07T13:39:31.000Z
|
face/face-30sec.py
|
eric-erki/ai-smarthome
|
ca7316ebe72b0ad26f0b59e3186426633807cac8
|
[
"BSD-2-Clause"
] | 4
|
2018-08-09T13:18:12.000Z
|
2021-04-06T19:04:54.000Z
|
face/face-30sec.py
|
eric-erki/ai-smarthome
|
ca7316ebe72b0ad26f0b59e3186426633807cac8
|
[
"BSD-2-Clause"
] | 15
|
2018-12-17T09:17:28.000Z
|
2021-03-02T11:25:05.000Z
|
import numpy as np
import cv2
import face_recognition
import time
# Load a sample picture and learn how to recognize it.
me_image = face_recognition.load_image_file("known/joakim.png")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
known_face_encodings = [
me_face_encoding,
]
known_face_names = [
"Joakim Eriksson",
]
cap = cv2.VideoCapture(0)
photo_time = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
print(face_locations)
name = "Unknown"
match = False
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
match = True
cut = frame[top:bottom, left:right]
cv2.rectangle(frame,(left, top), (right, bottom),(0,255,0),3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, name,(left, top - 5), font, 0.7, (255,255,255),2,cv2.LINE_AA)
cv2.imshow('cut', cut)
print("Name: ", name)
if match == False:
print("no match")
# Display the resulting frame
cv2.imshow('frame', frame)
if time.time() - photo_time > 30.0:
print("the photo is old...")
known_face_encodings = known_face_encodings[0:1]
known_face_names = known_face_names[0:1]
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
break
if key == ord('p'):
if(len(known_face_encodings) < 2):
print("Storing new encoding")
photo_time = time.time()
known_face_encodings = known_face_encodings + [face_encoding]
known_face_names = known_face_names + ["Newly Photoed"]
if key == ord('o'):
if name == "Newly Photoed":
print("Door will open for you!")
else:
print("Door is closed for you!")
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 32.459459
| 89
| 0.651957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 528
| 0.219817
|
0509cd66d3399c704328d6c31b4db43646200a86
| 8,576
|
py
|
Python
|
sdtables/sdtables.py
|
cunningr/sdtables
|
a698acbeca30e5451ca3285292f089109b360a04
|
[
"MIT"
] | null | null | null |
sdtables/sdtables.py
|
cunningr/sdtables
|
a698acbeca30e5451ca3285292f089109b360a04
|
[
"MIT"
] | 1
|
2020-06-25T08:55:00.000Z
|
2021-03-22T12:54:48.000Z
|
sdtables/sdtables.py
|
cunningr/sdtables
|
a698acbeca30e5451ca3285292f089109b360a04
|
[
"MIT"
] | 1
|
2020-07-03T10:18:58.000Z
|
2020-07-03T10:18:58.000Z
|
# coding: utf-8
"""
xlTables - Load/generate table data with Excel
from python dictionary structures
cunningr - 2020
Requires openpyxl >= 2.6.2, jsonschema
"""
import os
import openpyxl
from openpyxl import Workbook
from sdtables import xlTables
from tabulate import tabulate
class SdTables:
def __init__(self, wb=None):
self.sheetnames = []
self.table_names = {}
self.schemas = {}
self.validation_results = {}
if wb is not None:
self.load_xlsx_file(wb)
else:
self.wb = Workbook()
ws = self.wb.active
self.wb.remove(ws)
def load_xlsx_file(self, file, data_only=False):
"""
Method used to load an xlsx file containing one or more tables
:return:
"""
self.wb = openpyxl.load_workbook(filename=file, data_only=data_only)
self.sheetnames = self.wb.sheetnames
self._get_xl_table_data()
def _get_xl_table_data(self):
"""
Internal method used to index tables from openpyxl workbook object
:return:
"""
_tables_dict = {}
for sheet in self.wb.sheetnames:
for table in self.wb[sheet].tables.values():
_tables_dict.update({table.name: sheet})
self.table_names = _tables_dict
def get_table_as_dict(self, table_name, fill_empty=False, string_only=False):
"""
Takes a table name and returns the data as list of dictionaries
Args:
table_name: Name of the table name
fill_empty: By default and empty cell will have a value None.
fill_empty will replace None with the empty string ""
string_only: Enforce that all cell values convert to strings
Returns:
A dictionary (key=table_name) with a list of dictionaries (rows)
"""
worksheet_name = self.table_names[table_name]
ws = self.wb[worksheet_name]
return xlTables.build_dict_from_table(ws, table_name, fill_empty=fill_empty, string_only=string_only)
def get_all_tables_as_dict(self, flatten=False, squash=False, fill_empty=False, string_only=False):
"""
Returns all table data. When dealing with tables from xlsx, by default each table is nested in a dictionary
using the worksheet names as the key E.g.
{ "worksheet_name":
[
{ "table_name": [{"col1": "value", "col2": "value"}]}
]
}
Args:
flatten: Removes the worksheet_name hierarchy from the returned dictionary when using xlxs as source
squash: Replaces the table_name with the worksheet_name when using xlsx as source.
Only one table per worksheet allowed and ignores additional tables
fill_empty: By default and empty cell will have a value None.
fill_empty will replace None with the empty string ""
string_only: Enforce that all cell values convert to strings (E.g. for xlsx formulae)
Returns:
A list of dictionaries (rows)
"""
_dict = {}
for table_name, worksheet_name in self.table_names.items():
ws = self.wb[worksheet_name]
table_dict = xlTables.build_dict_from_table(ws, table_name, fill_empty=fill_empty, string_only=string_only)
if flatten:
if squash:
print('ERROR: Do not set flatten=True and squash=True together')
return
_dict.update(table_dict)
elif squash:
_dict_key = list(table_dict.keys())[0]
_dict.update({worksheet_name: table_dict[table_name]})
else:
if not _dict.get(worksheet_name):
_dict.update({worksheet_name: {}})
_dict[worksheet_name].update(table_dict)
return _dict
def add_xlsx_table_from_data(self, table_name, data, worksheet_name='Sheet1', table_style='TableStyleMedium2', row_offset=2, col_offset=1):
if type(table_name) is not str or type(data) is not list:
print('ERROR: table name must be of type str and data of type list')
if worksheet_name not in self.wb.sheetnames:
_ws = self.wb.create_sheet(worksheet_name)
else:
_ws = self.wb[worksheet_name]
schema = {'properties': xlTables._build_schema_from_row(data[0])}
xlTables.add_schema_table_to_worksheet(_ws, table_name, schema, data=data, table_style=table_style, row_offset=row_offset, col_offset=col_offset)
self._get_xl_table_data()
def update_xlsx_table_data(self, table_name, data, append=True, schema=None):
print('WARNING: update data is experimental and is known to break data validation')
self._get_xl_table_data()
if self.table_names.get(table_name):
worksheet_name = self.table_names.get(table_name)
else:
print('ERROR: table with name {} not found'.format(table_name))
return
xlTables.update_table_data(self.wb, worksheet_name, table_name, data, append=append, schema=schema)
def add_xlsx_table_from_schema(self, table_name, schema, worksheet_name='default', data=None, table_style='TableStyleMedium2', row_offset=2, col_offset=1):
if type(table_name) is not str or type(schema) is not dict:
print('ERROR: table name must be of type str and schema of type dict')
if worksheet_name not in self.wb.sheetnames:
_ws = self.wb.create_sheet(worksheet_name)
else:
_ws = self.wb[worksheet_name]
self.schemas.update({table_name: schema})
return xlTables.add_schema_table_to_worksheet(_ws, table_name, schema, data=data, table_style=table_style, row_offset=row_offset, col_offset=col_offset)
def validate_table_data_with_schema(self, table_name, schema):
self._get_xl_table_data()
ws = self.wb[self.table_names[table_name]]
data = xlTables.build_dict_from_table(ws, table_name, fill_empty=False, string_only=False)
results = {'results.summary': {}, 'results.details': {}}
_validate_results = xlTables.validate_data(schema, data[table_name])
results['results.summary'] = {'table': table_name, 'result': _validate_results['result']}
results['results.details'] = {'table': table_name, 'result': _validate_results['details']}
return results
def validate_table_data(self, stdout=False):
self._get_xl_table_data()
results = {'results.summary': [], 'results.details': []}
for table_name, worksheet_name in self.table_names.items():
if table_name in self.schemas.keys():
_validate_results = self.validate_table_data_with_schema(table_name, self.schemas[table_name])
results['results.summary'].append(_validate_results['results.summary'])
results['results.details'].append(_validate_results['results.details'])
else:
print('WARNING: No schema found for table: "{}"'.format(table_name))
self.validation_results.update(results)
if stdout:
self.print_validation_results()
return results
def delete_xlsx_table(self, table_name, row_offset=2, col_offset=1):
self._get_xl_table_data()
if self.table_names.get(table_name):
worksheet_name = self.table_names[table_name]
xlTables.delete_table(self.wb, worksheet_name, table_name, row_offset=row_offset, col_offset=col_offset)
self.table_names.pop(table_name, None)
def add_schema(self, schema_name, schema):
self.schemas.update({schema_name: schema})
def delete_schema(self, schema_name):
self.schemas.pop(schema_name, None)
def save_xlsx(self, filename):
xlsx_filename = '{}/{}.xlsx'.format(os.getcwd(), filename)
self.wb.save(xlsx_filename)
def print_validation_results(self):
print('\nValidation Summary:\n')
print(tabulate(self.validation_results['results.summary'], headers='keys', tablefmt="grid"))
print('\nValidation Details:\n')
for table in self.validation_results['results.details']:
print('Table: {}'.format(table['table']))
print(tabulate(table['result'], headers='keys', tablefmt="grid"))
# Retrieve a list of schema names under a given worksheet
# list(filter(lambda item: "network_settings" in item.keys(), meme.schemanames))
| 40.262911
| 160
| 0.649021
| 8,135
| 0.948577
| 0
| 0
| 0
| 0
| 0
| 0
| 2,693
| 0.314016
|
050a2b44b8dd6b46945c916a81b519efe47b76fb
| 2,473
|
py
|
Python
|
solutions/dropbox/compare_functions.py
|
roman-kachanovsky/checkio
|
3134cbc04ed56e92006d1e2f09d7365e900953db
|
[
"BSD-3-Clause"
] | 1
|
2017-02-07T19:50:52.000Z
|
2017-02-07T19:50:52.000Z
|
solutions/dropbox/compare_functions.py
|
roman-kachanovsky/checkio-python
|
3134cbc04ed56e92006d1e2f09d7365e900953db
|
[
"BSD-3-Clause"
] | null | null | null |
solutions/dropbox/compare_functions.py
|
roman-kachanovsky/checkio-python
|
3134cbc04ed56e92006d1e2f09d7365e900953db
|
[
"BSD-3-Clause"
] | null | null | null |
""" --- Compare Functions --- Simple
Two functions f and g are provided as inputs to checkio.
The first function f is the primary function and the second
function g is the backup. Use your coding skills to return
a third function h which returns the same output as f unless
f raises an exception or returns None. In this case h should
return the same output as g. If both f and g raise exceptions
or return None, then h should return None.
As a second output, h should return a status string indicating
whether the function values are the same and if either function
erred. A function errs if it raises an exception or returns
a null value (None).
The status string should be set to: "same" if f and g return
the same output and neither errs, "different" if f and g return
different outputs and neither errs, "f_error" if f errs but not g,
"g_error" if g errs but not f, or "both_error" if both err.
Input: Two functions: f (primary) and g (backup).
Output: A function h which takes arbitrary inputs
and returns a two-tuple.
How it is used: This is an exercise in working with functions
as first class objects.
Precondition: hasattr(f,'__call__');
hasattr(g,'__call__')
"""
def my_solution(f, g):
def h(*args, **kwargs):
f_res, f_err, g_res, g_err = None, False, None, False
try:
f_res = f(*args, **kwargs)
f_err = f_res is None
except:
f_err = True
try:
g_res = g(*args, **kwargs)
g_err = g_res is None
except:
g_err = True
if f_err and g_err:
return None, 'both_error'
elif g_err or f_err:
return (f_res, 'g_error') if g_err else (g_res, 'f_error')
else:
return (g_res, 'same') if f_res == g_res else (f_res, 'different')
return h
def diz_solution(*funcs):
def helper(*args, **kwargs):
output = None
status = 'same'
for i, f in enumerate(funcs, ord('f')):
try:
result = f(*args, **kwargs)
except:
result = None
if result is None:
status = [chr(i), 'both']['error' in status] + '_error'
elif output is None:
output = result
elif result != output:
status = 'different'
return output, status
return helper
| 32.973333
| 78
| 0.595633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,361
| 0.550344
|
050b0bea353171a3c51a6088825350acb0d9291f
| 3,402
|
py
|
Python
|
yawndb/sync.py
|
selectel/python-yawndb
|
6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6
|
[
"MIT"
] | null | null | null |
yawndb/sync.py
|
selectel/python-yawndb
|
6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6
|
[
"MIT"
] | null | null | null |
yawndb/sync.py
|
selectel/python-yawndb
|
6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6
|
[
"MIT"
] | null | null | null |
"""
yawndb.sync
~~~~~~~~~~~
Sync YAWNDB transport. Use standart socket object methods.
"""
import time
import json
import socket
import urllib2
import logging
from collections import deque
from yawndb._base import _YAWNDBBase
_logger = logging.getLogger(__name__)
class YAWNDB(_YAWNDBBase):
"""Sync YAWNDB transport.
Store not sent data in cache. Try to resend it on the next
:py:meth:`.send_msgs` call or you can do it manually via
:py:meth:`.send_cached` method.
Try to reconnect if connection has lost on each :py:meth:`.send`
and :py:meth:`.send_msgs` call.
"""
def __init__(self, host, tcp_port=2011, json_port=8081, cache_size=100000):
super(YAWNDB, self).__init__(host, tcp_port, json_port)
self._socket = None
self._disconnected = 0
self._data_cache = deque([], cache_size)
def slice(self, path, rule, from_t, to_t):
url = 'http://{0}:{1}/paths/{2}/{3}/slice?from={4}&to={5}'.format(
self._host, self._json_port, path, rule, from_t, to_t)
return self._request(url)
def last(self, path, rule, n):
url = 'http://{0}:{1}/paths/{2}/{3}/last?n={4}'.format(
self._host, self._json_port, path, rule, n)
return self._request(url)
def _request(self, url):
try:
res = urllib2.urlopen(url).read()
except Exception:
_logger.exception('JSON API IO error on %s', url)
return []
else:
res = json.loads(res)
if res['status'] != 'ok':
_logger.error('JSON API error on %s: %s', url, res)
return []
if res['answer'] == 'empty':
return []
return res['answer']
def start(self):
try:
self._socket = socket.socket()
self._socket.settimeout(2)
self._socket.connect((self._host, self._tcp_port))
except IOError:
_logger.error(
'Couldn\'t to connect to YAWNDB at %s:%s',
self._host, self._tcp_port)
self.stop()
def stop(self):
if self._socket:
try:
self._socket.close()
except IOError:
pass
self._disconnected = time.time()
self._socket = None
def _send(self, data):
if not self._socket:
return False
try:
self._socket.sendall(data)
return True
except IOError:
_logger.error(
'Couldn\'t send data to YAWNDB at %s:%s',
self._host, self._tcp_port)
self.stop()
self._socket = None
return False
def send(self, data):
if not self._send(data):
self._data_cache.append(data)
def send_msgs(self, msgs):
super(YAWNDB, self).send_msgs(msgs)
self.send_cached()
def send_cached(self):
"""Try to re-send data that was failed to sent."""
if not self._socket and time.time() - self._disconnected > 10:
self.start()
while True:
if not self._socket:
break
try:
data = self._data_cache.popleft()
except IndexError:
break
if not self._send(data):
self._data_cache.appendleft(data)
| 29.076923
| 79
| 0.546149
| 3,118
| 0.91652
| 0
| 0
| 0
| 0
| 0
| 0
| 711
| 0.208995
|
050b23d1c21cc11db93c4c94dba0b845a1f1693e
| 1,209
|
py
|
Python
|
setup.py
|
ofek/depq
|
370e3ad503d3e9cedc3c49dc64add393ba945764
|
[
"MIT"
] | 1
|
2019-02-12T13:17:56.000Z
|
2019-02-12T13:17:56.000Z
|
setup.py
|
ofek/depq
|
370e3ad503d3e9cedc3c49dc64add393ba945764
|
[
"MIT"
] | 4
|
2016-12-10T20:17:38.000Z
|
2017-06-16T19:02:47.000Z
|
setup.py
|
ofek/depq
|
370e3ad503d3e9cedc3c49dc64add393ba945764
|
[
"MIT"
] | 5
|
2016-12-10T20:13:42.000Z
|
2020-09-28T09:02:10.000Z
|
from setuptools import setup, find_packages
with open('README.rst', 'r') as infile:
read_me = infile.read()
setup(
packages=find_packages(),
name='depq',
version='1.5.5',
description='Double-ended priority queue',
long_description=read_me,
author='Ofek Lev',
author_email='ofekmeister@gmail.com',
maintainer='Ofek Lev',
maintainer_email='ofekmeister@gmail.com',
url='https://github.com/Ofekmeister/depq',
download_url='https://github.com/Ofekmeister/depq',
license='MIT',
platforms=None,
keywords=[
'double ended priority queue',
'depq',
'priority queue',
'data structure',
'scheduling',
'heuristic analysis',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 28.116279
| 71
| 0.612076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 663
| 0.548387
|
050bc5ae6e8eba8aac368023fb49c3014cb5ef03
| 880
|
py
|
Python
|
tests/exact_tests/contour_tests/strategies.py
|
lycantropos/rene
|
c73c616f3e360b994e92c950a3616a8ccb1136b9
|
[
"MIT"
] | null | null | null |
tests/exact_tests/contour_tests/strategies.py
|
lycantropos/rene
|
c73c616f3e360b994e92c950a3616a8ccb1136b9
|
[
"MIT"
] | null | null | null |
tests/exact_tests/contour_tests/strategies.py
|
lycantropos/rene
|
c73c616f3e360b994e92c950a3616a8ccb1136b9
|
[
"MIT"
] | null | null | null |
from hypothesis import strategies
from rithm import Fraction
from rene import MIN_CONTOUR_VERTICES_COUNT
from rene.exact import (Contour,
Point)
integers = strategies.integers()
non_zero_integers = integers.filter(bool)
scalars = (integers | strategies.fractions()
| strategies.builds(Fraction, integers, non_zero_integers)
| strategies.floats(allow_infinity=False,
allow_nan=False))
points = strategies.builds(Point, scalars, scalars)
contours_vertices = strategies.lists(points,
unique=True,
min_size=MIN_CONTOUR_VERTICES_COUNT)
invalid_count_contours_vertices = strategies.lists(
points,
unique=True,
max_size=MIN_CONTOUR_VERTICES_COUNT - 1
)
contours = strategies.builds(Contour, contours_vertices)
| 36.666667
| 73
| 0.664773
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
050d54bffebd30db922715e17f24f419261765d4
| 2,834
|
py
|
Python
|
bendyprank.py
|
gazhay/bendyprank
|
9af6b2536fb54001fe1681551362418f1ae78ed3
|
[
"Unlicense"
] | null | null | null |
bendyprank.py
|
gazhay/bendyprank
|
9af6b2536fb54001fe1681551362418f1ae78ed3
|
[
"Unlicense"
] | null | null | null |
bendyprank.py
|
gazhay/bendyprank
|
9af6b2536fb54001fe1681551362418f1ae78ed3
|
[
"Unlicense"
] | null | null | null |
/*
Bendy and the Ink Machine, BATIM, and all graphics and sounds are © The Meatly
NOT AN OFFICIAL BENDY AND THE INK MACHINE PRODUCT. NOT APPROVED BY OR ASSOCIATED WITH THEMEATLY GAMES, LTD.
Code below released under GPLv2
*/
import wx
import subprocess
from random import randint
from time import sleep
IMAGE_PATH = 'Bendy.png'
WAKE_SPEAKERS = True
class ShapedFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "Shaped Window", style = wx.FRAME_SHAPED | wx.SIMPLE_BORDER)
self.hasShape = False
self.delta = wx.Point(0,0)
image = wx.Image(IMAGE_PATH, wx.BITMAP_TYPE_PNG)
self.bmp = wx.BitmapFromImage(image)
self.SetClientSize((self.bmp.GetWidth(), self.bmp.GetHeight()))
dc = wx.ClientDC(self)
dc.DrawBitmap(self.bmp, 0,0, True)
self.SetWindowShape()
self.hider = wx.Timer(self)
self.shower = wx.Timer(self)
self.Bind(wx.EVT_RIGHT_UP, self.OnExit )
self.Bind(wx.EVT_PAINT, self.OnPaint )
self.Bind(wx.EVT_WINDOW_CREATE, self.SetWindowShape )
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground )
self.Bind(wx.EVT_TIMER, self.timertrigger, self.hider )
self.Bind(wx.EVT_TIMER, self.showagain , self.shower)
(w,h) = wx.GetDisplaySize()
self.SetPosition(wx.Point( (w-self.bmp.GetWidth())/2, (h-self.bmp.GetHeight())/2 ))
self.hider.Start(500, True)
def OnEraseBackground(self,evt=None):
pass
def SetWindowShape(self, evt=None):
r = wx.RegionFromBitmap(self.bmp)
self.hasShape = self.SetShape(r)
def OnPaint(self, evt):
dc = wx.PaintDC(self)
dc.DrawBitmap(self.bmp, 0,0, True)
def OnExit(self, evt):
self.Close()
def timertrigger(self, evt):
self.hider.Stop()
self.Show(False)
self.shower.Start(randint(300,1500)*1000, True)
def showagain(self, evt):
self.shower.Stop()
# Wake up speakers too
if WAKE_SPEAKERS:
cmdstr = "aplay Silent.wav"
subprocess.call(cmdstr, shell=True)
cmdstr = "aplay SFX_Jumpscare_01.wav".split()
subprocess.Popen(cmdstr, stdin=None, stdout=None, stderr=None, close_fds=True)
self.Show(True)
self.hider.Start(500, True)
if __name__ == '__main__':
try:
app = wx.App(False)
if WAKE_SPEAKERS:
cmdstr = "aplay Silent.wav"
subprocess.call(cmdstr, shell=True)
cmdstr = "aplay SFX_Jumpscare_01.wav".split()
subprocess.Popen(cmdstr, stdin=None, stdout=None, stderr=None, close_fds=True)
ShapedFrame().Show()
app.MainLoop()
except KeyboardInterrupt:
exit(0)
| 34.560976
| 111
| 0.617855
| 2,041
| 0.719929
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.05291
|
050f7c817ca7e48958c01acf2a63b083dd36ff69
| 1,347
|
py
|
Python
|
src/answer_key.py
|
quuu/ASCIImage
|
d276b9428b8b127069999ffe8e025e8e0ad43c0c
|
[
"MIT"
] | 2
|
2018-08-01T02:13:31.000Z
|
2018-08-01T03:02:31.000Z
|
src/answer_key.py
|
quuu/Genetic-ASCIImage
|
d276b9428b8b127069999ffe8e025e8e0ad43c0c
|
[
"MIT"
] | 11
|
2018-08-12T13:28:19.000Z
|
2018-09-08T19:15:30.000Z
|
src/answer_key.py
|
quuu/ASCIImage
|
d276b9428b8b127069999ffe8e025e8e0ad43c0c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from skimage import io
from skimage import color
from PIL import Image
#TODO Multi-thread
def avg_brightness(image_list):
"""
A list of grey scale images
"""
brightness_per_block=[]
for image in image_list:
img_shape = image.shape
img_Size = image.size
total=0
for i in range(0,img_shape[0]):
for j in range(0,img_shape[1]):
total+=image[i][j]
total/=img_Size
brightness_per_block.append(total)
return brightness_per_block
def make_image_list(image_paths):
images = []
for image in image_paths:
colorImg = io.imread(image)
greyImg = color.rgb2grey(colorImg)
images.append(greyImg)
return images
def my_crop(input, height, width, k, page):
image_list = []
im = Image.open(input)
imgwidth, imgheight = im.size
for i in range(0,imgheight,height):
for j in range(0,imgwidth,width):
box = (j, i, j+width, i+height)
a = im.crop(box)
a.save("IMG-%d.png"%k)
image_list.append("IMG-%d.png"%k)
k +=1
return image_list
image_list = my_crop("picture.jpg",80,80,(220*220)/(80*80),"page_name")
images = make_image_list(image_list)
bright_per_block = avg_brightness(images)
print(bright_per_block)
| 26.94
| 71
| 0.626578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.097253
|
05108d99ff3259ead7d1205123464ffd5c4850a2
| 5,504
|
py
|
Python
|
app.py
|
makerdao-data/gov-tracker
|
52b7588e5c200b0af5b64a2891b276cbcc149ff1
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
makerdao-data/gov-tracker
|
52b7588e5c200b0af5b64a2891b276cbcc149ff1
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
makerdao-data/gov-tracker
|
52b7588e5c200b0af5b64a2891b276cbcc149ff1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Public version #
from flask import Flask, request, jsonify
import atexit
from datetime import datetime
import csv
from io import StringIO
from werkzeug.wrappers import Response
from sqlalchemy import func
from deps import get_db
from utils.query import pull_filtered_data
from views.main_view import main_page_view, main_page_data
from views.address_views import address_page_view, address_data_view
from views.yay_views import yay_page_view, yay_data_view
from views.poll_views import poll_page_view, poll_data_view
from views.proxy_views import proxy_page_view, proxy_data_view
from views.protocol_parameters_views import parameters_page_view, parameters_data_view
from connectors.sf import sf, sf_disconnect
from models import ParameterEvent
from utils.query import pull_filtered_data
app = Flask(__name__)
app.config["JSON_SORT_KEYS"] = False
# HTML endpoints -------------------------------------------
@app.route("/")
def main_page():
return main_page_view(sf)
@app.route("/address/<address>")
def address_page(address):
return address_page_view(sf, address.lower())
@app.route("/proxy/<proxy>")
def proxy_page(proxy):
return proxy_page_view(sf, proxy.lower())
@app.route("/yay/<yay_id>")
def yay_page(yay_id):
return yay_page_view(sf, yay_id)
@app.route("/poll/<poll_id>")
def poll_page(poll_id):
return poll_page_view(sf, poll_id)
@app.route("/protocol_parameters")
def parameters_page():
return parameters_page_view(sf)
# DATA endpoints -------------------------------------------
@app.route("/data/main", methods=["GET"])
def get_main_page_data():
dataset = main_page_data(sf)
return jsonify(dataset)
@app.route("/data/address/<address>", methods=["GET"])
def get_address_page_data(address):
dataset = address_data_view(sf, address.lower())
return jsonify(dataset)
@app.route("/data/proxy/<proxy>", methods=["GET"])
def get_proxy_page_data(proxy):
dataset = proxy_data_view(sf, proxy.lower())
return jsonify(dataset)
@app.route("/data/yay/<yay>", methods=["GET"])
def get_yay_page_data(yay):
dataset = yay_data_view(sf, yay)
return jsonify(dataset)
@app.route("/data/poll/<poll>", methods=["GET"])
def get_poll_page_data(poll):
dataset = poll_data_view(sf, poll)
return jsonify(dataset)
# @app.route("/data/protocol_parameters", methods=["GET"])
# def get_parameters_page_data():
# dataset = parameters_data_view(sf)
# return jsonify(dataset)
@app.route("/data/protocol_parameters/<s>/<e>", methods=["GET"])
def get_parameters_page_data(s, e):
session = next(get_db())
query = pull_filtered_data(request, s, e, session, ParameterEvent)
total_filtered = query.count()
# sorting
order = []
i = 0
while True:
col_index = request.args.get(f'order[{i}][column]')
if col_index is None:
break
col_name = request.args.get(f'columns[{col_index}][data]')
if col_name not in ['block', 'timestamp', 'tx_hash', 'source', 'parameter', 'ilk', 'from_value', 'to_value']:
col_name = 'block'
descending = request.args.get(f'order[{i}][dir]') == 'desc'
col = getattr(ParameterEvent, col_name)
if descending:
col = col.desc()
order.append(col)
i += 1
if order:
query = query.order_by(*order)
# pagination
start = request.args.get('start', type=int)
length = request.args.get('length', type=int)
query = query.offset(start).limit(length)
records_total = session.query(ParameterEvent).count()
# response
return {
'data': [record.to_dict() for record in query],
'recordsFiltered': total_filtered,
'recordsTotal': records_total,
'draw': request.args.get('draw', type=int),
}
@app.route("/data/parameters_history_export/<s>/<e>", methods=["GET"])
def parameters_history_export(s, e):
session = next(get_db())
query = pull_filtered_data(request, s, e, session, ParameterEvent)
def generate():
data = StringIO()
w = csv.writer(data)
# write header
w.writerow(('block', 'timestamp', 'tx_hash', 'source', 'parameter', 'ilk', 'from_value', 'to_value'))
yield data.getvalue()
data.seek(0)
data.truncate(0)
# write each log item
for item in query:
w.writerow(tuple(item.to_list()))
yield data.getvalue()
data.seek(0)
data.truncate(0)
# stream the response as the data is generated
response = Response(generate(), mimetype='text/csv')
# add a filename
response.headers.set("Content-Disposition", "attachment", filename="export.csv")
return response
# cleanup tasks
def cleanup_task():
if not sf.is_closed():
sf_disconnect(sf)
print("SF connection closed.")
atexit.register(cleanup_task)
if __name__ == "__main__":
app.run(debug=False)
| 28.518135
| 117
| 0.677871
| 0
| 0
| 851
| 0.154615
| 3,527
| 0.640807
| 0
| 0
| 1,703
| 0.309411
|
051130482cb3691a34b0be84581c86dd2a4ce54f
| 3,280
|
py
|
Python
|
open_spiel/python/mst/run_mst.py
|
BrandonKates/open_spiel
|
f820abe9bdfdbc4bd45c2e933439393d4ad3622a
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/mst/run_mst.py
|
BrandonKates/open_spiel
|
f820abe9bdfdbc4bd45c2e933439393d4ad3622a
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/mst/run_mst.py
|
BrandonKates/open_spiel
|
f820abe9bdfdbc4bd45c2e933439393d4ad3622a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import app
from absl import flags
import numpy as np
from scipy.spatial import distance_matrix
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "mst", "Name of the game")
flags.DEFINE_integer("num_nodes", None, "Number of nodes")
flags.DEFINE_string("load_state", None,
"A file containing a string to load a specific state")
def main(_):
action_string = None
print("Creating game: " + FLAGS.game)
if FLAGS.num_nodes is not None:
distances = np.random.random((FLAGS.num_nodes,2))
dist_mat = np.round(distance_matrix(distances, distances),2).flatten()
generated_weights = str(dist_mat[0])
for i in range(1,dist_mat.size):
generated_weights+="," + str(dist_mat[i])
game = pyspiel.load_game(FLAGS.game,
{"num_nodes": pyspiel.GameParameter(FLAGS.num_nodes),
"weights": pyspiel.GameParameter(generated_weights)})
else:
game = pyspiel.load_game(FLAGS.game, {"num_nodes": pyspiel.GameParameter(5),
"weights": pyspiel.GameParameter("0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")})
# Get a new state
if FLAGS.load_state is not None:
# Load a specific state
state_string = ""
with open(FLAGS.load_state, encoding="utf-8") as input_file:
for line in input_file:
state_string += line
state_string = state_string.rstrip()
print("Loading state:")
print(state_string)
print("")
state = game.deserialize_state(state_string)
else:
state = game.new_initial_state()
# Print the initial state
print(str(state))
while not state.is_terminal():
# The state can be three different types: chance node,
# simultaneous node, or decision node
legal_actions = state.legal_actions(state.current_player())
print("Legal Actions: ", [(i//FLAGS.num_nodes, i%FLAGS.num_nodes) for i in legal_actions])
# Decision node: sample action for the single current player
action = random.choice(legal_actions)
action_string = state.action_to_string(state.current_player(), action)
print("Player ", state.current_player(), ", randomly sampled action: ",
action_string)
state.apply_action(action)
print(str(state))
# Game is now done. Print utilities for each player
returns = state.returns()
for pid in range(game.num_players()):
print("Utility for player {} is {}".format(pid, returns[pid]))
if __name__ == "__main__":
app.run(main)
| 34.166667
| 129
| 0.695122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,246
| 0.379878
|
0511c5e889756be6d1498e4e5630fe4522e1af10
| 176
|
py
|
Python
|
dzien1/p1_start.py
|
angelm1974/przyklady
|
ee0483ca69b789270641f3cd6c945b0cd0afbea9
|
[
"MIT"
] | 1
|
2021-09-20T21:48:11.000Z
|
2021-09-20T21:48:11.000Z
|
dzien1/p1_start.py
|
angelm1974/przyklady
|
ee0483ca69b789270641f3cd6c945b0cd0afbea9
|
[
"MIT"
] | null | null | null |
dzien1/p1_start.py
|
angelm1974/przyklady
|
ee0483ca69b789270641f3cd6c945b0cd0afbea9
|
[
"MIT"
] | null | null | null |
from PyQt6.QtWidgets import QApplication, QWidget
import sys # komentarz
app = QApplication(sys.argv) # ([]) -bez argumentów
window = QWidget()
window.show()
app.exec()
| 14.666667
| 52
| 0.715909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.19209
|
0511cceb2ee442a4c70aeab49d84be0233b7fcac
| 10,952
|
py
|
Python
|
classify.py
|
clulab/incivility
|
82d8e8164b81e9f4d5737520f2cbf308d3fcd033
|
[
"Apache-2.0"
] | 1
|
2020-09-18T12:05:13.000Z
|
2020-09-18T12:05:13.000Z
|
classify.py
|
clulab/incivility
|
82d8e8164b81e9f4d5737520f2cbf308d3fcd033
|
[
"Apache-2.0"
] | null | null | null |
classify.py
|
clulab/incivility
|
82d8e8164b81e9f4d5737520f2cbf308d3fcd033
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import subprocess
from typing import List, Sequence, Text
import textwrap
import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
import data
import models
import ga
def train(model_path: Text,
train_data_paths: Sequence[Text],
dev_data_paths: Sequence[Text],
pretrained_model_name: Text,
label_col: Text,
n_rows: int,
learning_rate: float,
batch_size: int,
grad_accum_steps: int,
n_epochs: int,
qsub: bool,
time: Text,
singularity_image: Text,
use_gpu: bool):
if not qsub:
if time is not None:
raise ValueError("time limit not supported")
tokenizer_for = transformers.AutoTokenizer.from_pretrained
tokenizer = tokenizer_for(pretrained_model_name)
train_x, train_y = data.read_csvs_to_xy(
data_paths=train_data_paths,
n_rows=n_rows,
tokenizer=tokenizer,
label_col=label_col)
dev_x, dev_y = data.read_csvs_to_xy(
data_paths=dev_data_paths,
n_rows=n_rows,
tokenizer=tokenizer,
label_col=label_col)
# set class weight inversely proportional to class counts
counts = np.bincount(train_y)
class_weight = dict(enumerate(counts.max() / counts))
# determine optimizer
optimizer_kwargs = dict(
learning_rate=learning_rate, epsilon=1e-08, clipnorm=1.0)
if grad_accum_steps != 1:
optimizer_class = ga.AdamGA
optimizer_kwargs.update(grad_accum_steps=grad_accum_steps)
else:
optimizer_class = tf.optimizers.Adam
model_for = transformers.TFAutoModel.from_pretrained
model = models.from_transformer(
transformer=model_for(pretrained_model_name),
n_outputs=1)
model.compile(
optimizer=optimizer_class(**optimizer_kwargs),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
tfa.metrics.F1Score(num_classes=1, threshold=0.5),
])
model.fit(x=train_x, y=train_y,
validation_data=(dev_x, dev_y),
epochs=n_epochs,
batch_size=batch_size,
class_weight=class_weight,
callbacks=tf.keras.callbacks.ModelCheckpoint(
filepath=model_path,
monitor="val_f1_score",
mode="max",
verbose=1,
save_weights_only=True,
save_best_only=True))
else:
if time is None:
raise ValueError("time limit required for qsub")
model_prefix, _ = os.path.splitext(model_path)
n_rows_str = "all" if n_rows is None else n_rows
prefix = f"{model_prefix}.{label_col}.{pretrained_model_name}.r{n_rows_str}.b{batch_size}.ga{grad_accum_steps}.lr{learning_rate}"
pbs_path = f"{prefix}.pbs"
def format_paths(paths):
return ' '.join(f'"{p}"' for p in paths)
with open(pbs_path, "w") as pbs_file:
pbs_file.write(textwrap.dedent(f"""
#!/bin/bash
#PBS -q windfall
#PBS -l select=1{":ncpus=16:ngpus=1" if use_gpu else ":ncpus=4"}:mem=64gb
#PBS -N {prefix}
#PBS -W group_list=nlp
#PBS -l walltime={time}
module load singularity
module load cuda10/10.1
{"export CUDA_VISIBLE_DEVICES=-1" if not use_gpu else ""}
cd {os.path.dirname(os.path.realpath(__file__))}
singularity exec --nv \\
{singularity_image} \\
python3.7 classify.py \\
--pretrained-model-name {pretrained_model_name} \\
--label-col {label_col} \\
train \\
{'' if n_rows is None else f'--n-rows={n_rows}'} \\
--n-epochs={n_epochs} \\
--batch-size={batch_size} \\
--grad-accum-steps={grad_accum_steps} \\
--learning-rate={learning_rate} \\
{prefix}.model \\
--train-data {format_paths(train_data_paths)} \\
--dev-data {format_paths(dev_data_paths)}
"""))
subprocess.run(["qsub", pbs_path])
def test(model_paths: Sequence[Text],
test_data_paths: Sequence[Text],
pretrained_model_name: Text,
label_col: Text,
n_rows: int,
batch_size: int,
verbose: bool):
width = max(len(p) for p in model_paths + test_data_paths)
headers = ["precision", "recall", "f1-score", "support"]
header_fmt = f'{{:<{width}s}} ' + ' {:>9}' * 4
row_fmt = f'{{:<{width}s}} ' + ' {:>9.3f}' * 3 + ' {:>9}'
# load the tokenizer model
tokenizer_for = transformers.AutoTokenizer.from_pretrained
tokenizer = tokenizer_for(pretrained_model_name)
# load the pre-trained transformer model
model_for = transformers.TFAutoModel.from_pretrained
transformer = model_for(pretrained_model_name)
test_data_rows = {p: [] for p in test_data_paths}
for model_path in model_paths:
tf.keras.backend.clear_session()
# load the fine-tuned transformer model
model = models.from_transformer(transformer=transformer, n_outputs=1)
model.load_weights(model_path).expect_partial()
for data_path in test_data_paths:
# tokenize the test data
df = data.read_csv(data_path=data_path,
label_col=label_col,
n_rows=n_rows)
x, y_ref = data.df_to_xy(df=df,
tokenizer=tokenizer,
label_col=label_col)
# predict on the test data
y_pred_scores = model.predict(x, batch_size=batch_size)
y_pred = (y_pred_scores >= 0.5).astype(int).ravel()
# evaluate predictions
stats_arrays = sklearn.metrics.precision_recall_fscore_support(
y_ref, y_pred, labels=[1])
stats = [a.item() for a in stats_arrays]
row = [model_path] + stats
test_data_rows[data_path].append(row_fmt.format(*row))
# if requested, print detailed results for this model
if verbose:
header = header_fmt.format(data_path, *headers)
print("=" * len(header))
print(header)
print(row_fmt.format(*row))
print("=" * len(header))
df.insert(1, "prediction", y_pred_scores)
print(df)
print()
# print results for all models on all datasets
for data_path, rows in test_data_rows.items():
print(header_fmt.format(data_path, *headers))
for row in rows:
print(row)
print()
def predict_csv(model_path: Text,
input_path: Text,
output_path: Text,
text_col: Text,
label_col: Text,
pretrained_model_name: Text,
output_scores: bool,
n_rows: int,
batch_size: int):
# load the tokenizer model
tokenizer_for = transformers.AutoTokenizer.from_pretrained
tokenizer = tokenizer_for(pretrained_model_name)
# read input data
with open(input_path, encoding="utf-8", errors="ignore") as input_file:
df = pd.read_csv(input_file, nrows=n_rows)
x = data.from_tokenizer(tokenizer, df[text_col])
# load the pre-trained transformer model
model_for = transformers.TFAutoModel.from_pretrained
transformer = model_for(pretrained_model_name)
# load the fine-tuned transformer model
model = models.from_transformer(transformer=transformer, n_outputs=1)
model.load_weights(model_path).expect_partial()
# predict on the test data
y_pred = model.predict(x, batch_size=batch_size)
df[label_col] = (y_pred >= 0.5).astype(int).ravel()
if output_scores:
df[f"{label_col}_score"] = y_pred
df.to_csv(output_path, encoding='utf-8-sig')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained-model-name", default="roberta-base")
parser.add_argument("--label-col", default="namecalling")
subparsers = parser.add_subparsers()
train_parser = subparsers.add_parser("train")
train_parser.add_argument("model_path")
train_parser.add_argument("--train-data", dest="train_data_paths", nargs='+',
metavar="PATH", required=True)
train_parser.add_argument("--dev-data", dest="dev_data_paths", nargs='+',
metavar="PATH", required=True)
train_parser.add_argument("--qsub", action="store_true")
train_parser.add_argument("--time")
train_parser.add_argument("--no-gpu", dest="use_gpu", action="store_false")
train_parser.add_argument(
"--singularity-image",
default="/xdisk/bethard/hpc-ml_centos7-python3.7-transformers3.2.0.sif")
train_parser.add_argument("--n-rows", type=int)
train_parser.add_argument("--learning-rate", type=float, default=3e-5)
train_parser.add_argument("--batch-size", type=int, default=1)
train_parser.add_argument("--grad-accum-steps", type=int, default=1)
train_parser.add_argument("--n-epochs", type=int, default=10)
train_parser.set_defaults(func=train)
test_parser = subparsers.add_parser("test")
test_parser.add_argument("model_paths", nargs="+", metavar="model_path")
test_parser.add_argument("--test-data", dest="test_data_paths", nargs='+',
metavar="PATH", required=True)
test_parser.add_argument("--n-rows", type=int)
test_parser.add_argument("--batch-size", type=int, default=1)
test_parser.add_argument("--verbose", action="store_true")
test_parser.set_defaults(func=test)
predict_parser = subparsers.add_parser("predict")
predict_parser.add_argument("model_path")
predict_parser.add_argument("input_path")
predict_parser.add_argument("output_path")
predict_parser.add_argument("--text-col", default="tweet_text")
predict_parser.add_argument("--output-scores", action="store_true")
predict_parser.add_argument("--n-rows", type=int)
predict_parser.add_argument("--batch-size", type=int, default=1)
predict_parser.set_defaults(func=predict_csv)
args = parser.parse_args()
kwargs = vars(args)
kwargs.pop("func")(**kwargs)
| 38.975089
| 137
| 0.603543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,771
| 0.253013
|
051260c977d73217e66d8ef66398ae1931f7b899
| 814
|
py
|
Python
|
p2/src/prove.py
|
ruimgf/AIDS
|
72bc808ef5e21113f635f34581d18c0dc2c8c7da
|
[
"MIT"
] | null | null | null |
p2/src/prove.py
|
ruimgf/AIDS
|
72bc808ef5e21113f635f34581d18c0dc2c8c7da
|
[
"MIT"
] | null | null | null |
p2/src/prove.py
|
ruimgf/AIDS
|
72bc808ef5e21113f635f34581d18c0dc2c8c7da
|
[
"MIT"
] | null | null | null |
import sys
from kb import *
#receives a list of setences if it is in test mode
def main(lista=None):
sentences = []
if lista is None:
with sys.stdin as f : #open stdin as a file
lines = f.readlines()
for line in lines: # convert each line to a python object
line = line.rstrip()
a = eval(line)
if isinstance(a,list):
sentences.append(set(a))
else:
b = set([a])
sentences.append(b)
if DEBUG:
print(sentences)
else:
for x in lista:
if x is not None:
sentences.append(set(x))
knowledge = Kb(sentences)
return knowledge.pl_resolution()
if __name__ == '__main__':
print(main())
| 25.4375
| 69
| 0.5086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.146192
|
0513d4822718f78bada1c9c056ce41bfe1fb2ffe
| 472
|
py
|
Python
|
rest_framework_tracking/mixins.py
|
Zagrebelin/drf-tracking
|
5fe102439e7baaffc886253e39c21dd96481391f
|
[
"ISC"
] | 387
|
2015-05-26T08:23:52.000Z
|
2022-03-18T11:10:44.000Z
|
rest_framework_tracking/mixins.py
|
Zagrebelin/drf-tracking
|
5fe102439e7baaffc886253e39c21dd96481391f
|
[
"ISC"
] | 138
|
2015-05-26T16:20:25.000Z
|
2021-09-22T18:07:24.000Z
|
rest_framework_tracking/mixins.py
|
Zagrebelin/drf-tracking
|
5fe102439e7baaffc886253e39c21dd96481391f
|
[
"ISC"
] | 121
|
2015-09-25T16:53:48.000Z
|
2021-08-18T12:42:04.000Z
|
from .base_mixins import BaseLoggingMixin
from .models import APIRequestLog
class LoggingMixin(BaseLoggingMixin):
def handle_log(self):
"""
Hook to define what happens with the log.
Defaults on saving the data on the db.
"""
APIRequestLog(**self.log).save()
class LoggingErrorsMixin(LoggingMixin):
"""
Log only errors
"""
def should_log(self, request, response):
return response.status_code >= 400
| 21.454545
| 49
| 0.658898
| 390
| 0.826271
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.305085
|
0514df3dee36ec46f44f8239441b8f0b35d0374b
| 758
|
py
|
Python
|
stub_extractor/util.py
|
srittau/stub-extractor
|
f161c10a2f041a74040a04e00e0b0d33cb94a0fe
|
[
"MIT"
] | null | null | null |
stub_extractor/util.py
|
srittau/stub-extractor
|
f161c10a2f041a74040a04e00e0b0d33cb94a0fe
|
[
"MIT"
] | null | null | null |
stub_extractor/util.py
|
srittau/stub-extractor
|
f161c10a2f041a74040a04e00e0b0d33cb94a0fe
|
[
"MIT"
] | null | null | null |
from typing import Iterator, List, Optional, Sequence, Tuple, TypeVar
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
def rzip_longest(
seq1: Sequence[_T1], seq2: Sequence[_T2]
) -> Iterator[Tuple[_T1, Optional[_T2]]]:
"""Make an iterator over tuples, with elements from the input sequences.
If the second sequence is shorter than the first by N elements,
the second element of the first N tuples is set to None.
>>> list(rzip_longest([1,2,3], ["a", "b"]))
[(1, None), (2, "a"), (3, "b")]
"""
len_diff = len(seq1) - len(seq2)
if len_diff < 0:
raise ValueError("seq2 can't be longer than seq1")
padded_seq2: List[Optional[_T2]] = [None] * len_diff
padded_seq2.extend(seq2)
return zip(seq1, padded_seq2)
| 30.32
| 76
| 0.647757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 337
| 0.444591
|