text stringlengths 4 1.02M | meta dict |
|---|---|
from typing import Any
import pytest
from datadog_checks.base import AgentCheck
from datadog_checks.base.stubs.aggregator import AggregatorStub
from .common import CHECK_CONFIG
from .metrics import METRICS
@pytest.mark.e2e
def test_e2e(dd_agent_check):
# type: (Any) -> None
aggregator = dd_agent_check(CHECK_CONFIG, rate=True) # type: AggregatorStub
for metric in METRICS:
aggregator.assert_metric(metric)
aggregator.assert_all_metrics_covered()
for instance in CHECK_CONFIG['instances']:
tags = [
'instance:trino-{}-{}'.format(instance['host'], instance['port']),
'jmx_server:{}'.format(instance['host']),
]
aggregator.assert_service_check('trino.can_connect', status=AgentCheck.OK, tags=tags)
| {
"content_hash": "fd71c4f8bbac7c637ef258178e080ca6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 93,
"avg_line_length": 28.925925925925927,
"alnum_prop": 0.6850192061459667,
"repo_name": "DataDog/integrations-extras",
"id": "30c756f74d131409a3130305262316e585327141",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trino/tests/test_e2e.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4265"
},
{
"name": "Go",
"bytes": "4119"
},
{
"name": "PHP",
"bytes": "3192"
},
{
"name": "Python",
"bytes": "1219552"
},
{
"name": "Ruby",
"bytes": "8005"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
} |
"""Cloudhands DB server functions
This file contains functions which either make direct modifications to the
Cloudhands database, request information from the DB, or bundle a series of
functions which cause a number of DB changes to take effect.
"""
from collections import OrderedDict
#We need everything from the models
from eos_db.models import ( Artifact, Appliance, Registration,
Membership, GroupMembership,
Actor, Component, User, Ownership,
Touch, State, ArtifactState, Deboost,
Resource, Node, Password, Credit,
Specification, Base )
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
from sqlalchemy.exc import IntegrityError
from datetime import datetime, timedelta
engine = None # Assume no default database connection
# Load config.
DB = None
try:
from eos_db.settings import DBDetails as DB
except:
# This bare except statement is legit.
# If no settings file is supplied, we connect to the database eos_db without
# a username or password - ie. rely on PostgreSQL ident auth.
pass
def with_session(f):
"""Decorator that automatically passes a Session to a function and then shuts
the session down at the end, unless a session was already passed through.
The decorator itself takes no arguments. The function must have a session
argument.
"""
def inner(*args, **kwargs):
#Note that if session is passed in kwargs the local session
#variable is never set and therefore is left for the caller to close.
session = None
if not kwargs.get('session'):
Session = sessionmaker(bind=engine, expire_on_commit=False)
session = Session()
kwargs['session'] = session
res = None
try:
res = f(*args, **kwargs)
except Exception as e:
if session: session.close()
raise e
if session:
session.commit()
session.close()
return res
return inner
def choose_engine(enginestring, replace=True):
"""
Create a connection to a database. If Postgres is selected, this will
connect to the database specified in the settings.py file. If SQLite is
selected, then the system will use an in-memory SQLite database.
As stated in
http://docs.sqlalchemy.org/en/latest/core/engines.html#configuring-logging
one should only use echo=True for blanket debugging. Use the logger
settings for sqlalchemy.engine instead.
"""
global engine
if engine and not replace:
return
if enginestring == "PostgreSQL":
if DB and DB.username:
# Password auth
engine = create_engine('postgresql://%s:%s@%s/%s'
% (DB.username,
DB.password,
DB.host,
DB.database),
echo=False)
elif DB:
engine = create_engine('postgresql:///%s'
% (DB.database),
echo=False)
else:
engine = create_engine('postgresql:///eos_db', echo=False)
elif enginestring == "SQLite":
engine = create_engine('sqlite://', echo=False)
else:
raise LookupError("Invalid server type.")
# Always do this. This bootstraps the database for us, and ensures
# any new states are added.
setup_states()
def override_engine(engine_string, echo=True):
"""Sets the target database explicitly to a different location than that
specified in the server module.
Note that this doen not deploy the tables - you need to call setup_states()
or deploy_tables() explicitly afterwards if you want to do that.
:param engine_string: A SQLAlchemy server string, eg. 'sqlite://'
"""
global engine
engine = create_engine(engine_string, echo=echo)
def deploy_tables():
"""Create tables in their current state in the currently connected
database.
"""
Base.metadata.create_all(engine)
def get_state_list():
"""The state list is a union of the internal states we need to function
plus anything else in EXTRA_STATES
"""
state_list = (
'Started',
'Stopped',
'Restarting',
'Starting',
'Starting_Boosted',
'Stopping',
'Preparing',
'Prepared',
'Pre_Deboosting',
'Pre_Deboosted',
'Deboosted',
'Boosting', # Transitional state
'Deboosting', # Transitional state
'Error'
)
try:
from eos_db.settings import MachineStates as EXTRA_STATES
return state_list + tuple(s for s in EXTRA_STATES.state_list if s not in state_list )
except:
return state_list
def setup_states(ignore_dupes=True):
""" Write the list of valid states to the database.
The states are in server.py and may be supplemented in settings.py.
With ignore_dupes=False this will throw an exception if you try to
add the same state twice, otherwise it will just ignore the error - ie.
it will just add new states and will be idempotent.
"""
Base.metadata.create_all(engine)
states_added = 0
for state in get_state_list():
try:
create_artifact_state(state)
states_added += 1
except IntegrityError as e:
if not ignore_dupes: raise e
return states_added
@with_session
def list_user_ids(session):
"""Lists all active user IDs
"""
#Note that, like for servers, if a new user is created with the same name it
#overwrites the previous record, so I need to do it like this:
for n in session.query(User.username).distinct():
yield get_user_id_from_name(n[0])
def create_user(type, handle, name, username):
"""Create a new user record. Handle/uuid must be unique e-mail address"""
Base.metadata.create_all(engine)
user_id = _create_thingy(User(name=name, username=username, uuid=handle, handle=handle))
#Add this user to a group
if type:
create_group_membership(_create_touch(user_id, None, None), type)
return user_id
def touch_to_add_user_group(username, group):
""" Adds a touch to the database, then links it to a new user group
record.
"""
# FIXME? Should this use the user_id, not username, for consistency? Not yet sure.
user_id = get_user_id_from_name(username)
touch_id = _create_touch(user_id, None, None)
create_group_membership(touch_id, group)
return touch_id
def create_group_membership(touch_id, group):
""" Create a new group membership resource. """
# FIXME2 - this is only ever used by the function above so fold the code in.
Base.metadata.create_all(engine)
#return _create_thingy(GroupMembership(group=group))
# FIXME (Tim) - touch_id was unused, so clearly this was broken. Test as-is first.
return _create_thingy(GroupMembership(group=group, touch_id=touch_id))
@with_session
def get_user_group(username, session):
""" Get the group associated with a given username. """
if username is not None:
actor_id = get_user_id_from_name(username, session=session)
group = (session
.query(GroupMembership.group)
.filter(GroupMembership.touch_id == Touch.id)
.filter(Touch.actor_id == actor_id)
.order_by(Touch.touch_dt.desc())
.first())
#print("get_user_group: User %s is in group %s" % (username, group[0]))
return group[0]
else:
return None
def create_appliance(name, uuid):
""" Create a new VApp """ # FIXME: We shoehorn VMs into the Vapp record.
# VMs should go into the "Node" object.
Base.metadata.create_all(engine)
return _create_thingy(Appliance(uuid=uuid, name=name))
def create_artifact_state(state_name):
""" Create a new artifact state. ArtifactState subclasses State. See the
relevant docs in the model. """
return _create_thingy(ArtifactState(name=state_name))
@with_session
def _create_thingy(sql_entity, session):
"""Internal call that holds the boilerplate for putting a new SQLAlchemy object
into the database. BC suggested this should be a decorator but I don't think
that aids legibility. Maybe should rename this though.
"""
session.add(sql_entity)
#Note that this commit causes the .id to be populated.
session.commit()
return sql_entity.id
def change_node_state(node_id, state_id):
"""
Unused.
"""
pass # FIXME: See above for comments related to Vapps and VMs.
def create_node():
"""
Unused.
"""
pass # FIXME: See above for comments related to Vapps and VMs.
@with_session
def list_artifacts_for_user(user_id, session):
"""Returns a list of dictionaries listing pertinent information about
user's artifacts.
:param user_id: A valid user id for which we want to list details.
:returns: List of dictionaries containing pertinent info.
"""
# This bit was _list_artifacts_for_user(user_id)
servers = (session
.query(Artifact.id, Artifact.name, Artifact.uuid)
.all())
# Because of my logic that adding a new server with an existing name masks
# the old server, we actually want to get all the servers here and then
# post-filter them, or at least that is the simplest approach.
#OrderedDict gives me the property of updating any server listed
#twice while still maintaining database order.
artifacts = OrderedDict()
for server in servers:
#Cleanup CHAR values - really should fix this in the DB
server = list(server)
server[1] = server[1].rstrip()
server[2] = server[2].rstrip()
#END workaround
if server[1] in artifacts:
del artifacts[server[1]]
if check_ownership(server[0], user_id, session=session):
artifacts[server[1]] = return_artifact_details(*server, session=session)
return artifacts.values()
@with_session
def return_artifact_details(artifact_id, artifact_name=None, artifact_uuid=None, session=None):
""" Return basic information about each server, for display.
"""
change_dt = _get_most_recent_change(artifact_id, session=session)
create_dt = _get_artifact_creation_date(artifact_id, session=session)
state = check_state(artifact_id, session=session)
boosted = _get_server_boost_status(artifact_id)
boostremaining = "N/A"
deboost_time = 0
deboost_credit = 0
#Because get_time_until_deboost() might report a deboost time for an un-boosted
#server if it was manually deboosted, check the status
if boosted == "Boosted":
time_for_deboost = get_time_until_deboost(artifact_id, session=session)
boostremaining = time_for_deboost[2] or "Not set"
# Get deboost time as UNIX seconds-since-epoch
# Any browser will be able to render this as local time by using:
# var d = new Date(0) ; d.setUTCSeconds(deboost_time)
deboost_time = time_for_deboost[0].strftime("%s") if time_for_deboost[0] else 0
deboost_credit = time_for_deboost[3]
try:
cores, ram = get_latest_specification(artifact_id, session=session)
ram = str(ram) + " GB"
except:
cores, ram = "N/A", "N/A"
if state == None:
state = "Not yet initialised"
if not artifact_uuid:
artifact_uuid = get_server_uuid_from_id(artifact_id, session=session)
if not artifact_name:
artifact_name = get_server_name_from_id(artifact_id, session=session)
return({"artifact_id": artifact_id,
"artifact_uuid": artifact_uuid,
"artifact_name": artifact_name,
"change_dt": str(change_dt[0])[0:16],
"create_dt": str(create_dt[0])[0:16],
"state": state,
"boosted": boosted,
"cores": cores,
"ram": ram,
"boostremaining": boostremaining,
"deboost_time": deboost_time,
"deboost_credit": deboost_credit
})
#FIXME - rationalise these to three functions:
# get_server_by_name
# get_sever_by_id
# get_server_by_uuid
# That all return the same info as return_artifact_details(id)
@with_session
def get_server_name_from_id(artifact_id, session):
""" Get the name field from an artifact.
:param artifact_id: A valid artifact id.
:returns: name of artifact.
"""
#For some reason uuid and name have been declared as CHAR in the DB
#and so they come out space-padded on PostgreSQL. Strip them here.
artifact_name = (session
.query(Artifact.name)
.filter(Artifact.id == artifact_id)
.first())
return artifact_name[0].rstrip()
@with_session
def get_server_id_from_name(name, session):
""" Get the system ID of a server from its name.
:param name: The name of an artifact.
:returns: Internal ID of artifact.
"""
# FIXME - Check behaviour on duplicate names. This should not be a problem
# due to database constraints, but is worth looking at, just in case.
artifact_id = (session
.query(Artifact.id)
.filter(Artifact.name == name)
.order_by(Artifact.id.desc())
.first())
return artifact_id[0]
@with_session
def get_server_id_from_uuid(uuid, session):
""" Get the system ID of a server from its UUID.
:param name: The name of an artifact.
:returns: Internal ID of artifact.
"""
artifact_id = (session
.query(Artifact.id)
.filter(Artifact.uuid == uuid)
.first())
return artifact_id[0]
@with_session
def get_user_id_from_name(name, session):
""" Get the system ID of a user from his name.
:param name: The username of a user.
:returns: Internal ID of user.
"""
# FIXME - Behaviour with duplicates also applies here. Ensure constraints
# properly set.
user_id = (session
.query(User.id)
.filter(User.username == name)
.first())
if not user_id:
raise KeyError("No such user")
return user_id[0]
def _get_server_boost_status(artifact_id, session=None):
""" Return the boost status (either "Boosted" or "Unboosted" of the given
artifact by ID.
Get the system ID of a user from his name.
:param artifact_id: The artifact in question by ID.
:returns: String giving boost status.
"""
# FIXME: Ideally this should really return a boolean to indicate whether a
# machine is boosted or not.
try:
cores, ram = get_latest_specification(artifact_id, session=session)
except:
cores, ram = 0, 0
# FIXME - remove hard-coding of 40
if ram >= 40:
return "Boosted"
else:
return "Unboosted"
@with_session
def get_deboost_credits(artifact_id, hours, session):
""" Get the number of credits which should be refunded upon deboost.
If you don't know the number of hours call
get_time_until_deboost(vm_id)[3] instead.
:param artifact_id: The artifact in question by ID.
:param hours: The number of hours to credit.
:returns: Number of credits to be refunded.
"""
#Don't end up debiting credits if a deboost is processed late and hours goes negative!
#Also, this prevents get_latest_specification potentially throwing an exception we
#don't care about.
if not hours > 0: return 0
cores, ram = get_latest_specification(artifact_id, session=session)
multiplier = 0
if cores == 2:
multiplier = 1
if cores == 8:
multiplier = 3
if cores == 16:
multiplier = 12
return multiplier * hours
@with_session
def list_servers_by_state(session):
""" Iterates through servers and bins them by state. In a more
standard database layout we could do this with a single SQL
query.
:param state: A string containing the name of a state.
:returns: Artifact ID.
"""
servers = session.query(Artifact.name).distinct()
state_table = {}
for server_name in servers:
#Remember that adding a duplicate named server overwrites the old one,
#so we can't just grab all the server IDs in the table.
server_id = get_server_id_from_name(server_name[0])
s_state = check_state(server_id)
if not s_state:
#Uninitialised
pass
elif s_state in state_table:
state_table[s_state].append(server_id)
else:
state_table[s_state] = [ server_id ]
return state_table
def touch_to_add_ownership(artifact_id, user_id):
""" Adds an ownership resource to an artifact, effectively linking the VM
to the user specified. This is in order to prevent users from seeing each
other's VMs.
:param artifact_id: The artifact in question by ID.
:param user_id: The user in question by ID.
:returns: ownership_id: The ID of the ownership created.
"""
touch_id = _create_touch(None, artifact_id, None)
ownership_id = create_ownership(touch_id, user_id)
return ownership_id
@with_session
def get_server_uuid_from_id(id, session):
""" Get the uuid field from an artifact.
:param artifact_id: A valid artifact id.
:returns: uuid of artifact.
"""
#For some reason uuid and name have been declared as CHAR in the DB
#and so they come out space-padded on PostgreSQL. Strip them here.
server = session.query(Artifact.uuid).filter(Artifact.id == id).first()
return server[0].rstrip()
@with_session
def check_ownership(artifact_id, actor_id, session):
""" Check if an artifact belongs to a given user.
:param artifact_id: A valid artifact id.
:param actor_id: A valid actor (user) id.
:returns: boolean to indicate ownership.
"""
our_ownership = (session
.query(Ownership)
.filter(Ownership.user_id == actor_id)
.filter(Ownership.touch_id == Touch.id)
.filter(Touch.artifact_id == artifact_id)
.order_by(Touch.id.desc())
.first())
if our_ownership is None:
return False
else:
return True
@with_session
def get_state_id_by_name(name, session):
"""Gets the id of a state from the name associated with it.
:param name: A printable state name, as in get_state_list()
:returns: The corresponding internal state_id
:raises: IndexError if there is no such state
"""
state_id = (session.query(State.id)
.filter(State.name == name)
.first())[0]
return state_id
def touch_to_state(actor_id, artifact_id, state_name):
"""Creates a touch to move the VM into a given status.
The state must be a valid state name as found in get_state_list()
- eg. Started, Restarting.
:param actor_id: User who is initiating the touch. Can be None.
:param artifact_id: ID of the VM we want to state-shift.
:param state_name: Target state name, which will be mapped to an ID for us.
:returns: touch ID
"""
# Supplying an invalid state will trigger an exception here.
# Ensure the states were properly loaded in the DB.
state_id = get_state_id_by_name(state_name)
touch_id = _create_touch(actor_id, artifact_id, state_id)
return touch_id
def touch_to_add_deboost(vm_id, hours):
""" Set and number of hours in the future at which a VM ought to be
deboosted. Requires application of an associated touch in order to
link it to an artifact.
Note that hours can be fractional even though the user may only boost by the hour.
This is important for the extend_boost call.
"""
touch_id = _create_touch(None, vm_id, None)
deboost_dt = datetime.now()
deboost_dt += timedelta(hours=hours)
new_deboost = Deboost(deboost_dt=deboost_dt, touch_id=touch_id)
return _create_thingy(new_deboost)
def check_and_remove_credits(actor_id, ram, cores, hours):
"""Called when a machine is boosted to see if the user can afford it.
"""
if actor_id is None:
#This would happen if an agent called this function
return 0
#FIXME - should not be using hard-coded values here
multiplier = 0
if cores >= 2: multiplier = 1
if cores >= 8: multiplier = 3
if cores >= 10: multiplier = 12
cost = multiplier * hours
#See if the user can afford it...
current_credit = check_credit(actor_id)
if current_credit >= cost:
touch_to_add_credit(actor_id, -cost)
return cost
else:
return None
def touch_to_add_password(actor_id, password):
"""Sets the password for a user.
:param actor_id: An existing actor id.
:param password: The unencrypted password.
"""
touch_id = _create_touch(actor_id, None, None)
password_id = _create_thingy(Password(touch_id=touch_id, password=password))
return password_id
def touch_to_add_credit(actor_id, credit):
"""Creates a touch and an associated credit resource.
:param actor_id: An existing actor id.
:param credit: An integer from -2147483648 to +2147483647
:returns: ID of the new credit resource.
"""
touch_id = _create_touch(actor_id, None, None)
success = _create_credit(touch_id, credit)
return success
def touch_to_add_specification(vm_id, cores, ram):
"""Creates a touch and associated specification resource.
:param vm_id: The virtual machine which we want to change.
:param cores: The number of cores that we want the vm to have.
:param ram: The amount of RAM, in GB, that we want the vm to have.
:returns: ID of the new specification resource.
"""
touch_id = _create_touch(None, vm_id, None)
success = _create_specification(touch_id, cores, ram)
return success
@with_session
def get_latest_specification(vm_id, session):
""" Return the most recent / current state of a VM. Equivalent to
get_previous_specification(index=0).
:param vm_id: A valid VM id.
:returns: String containing current status.
"""
state = ( session
.query(Specification.cores, Specification.ram)
.filter(Specification.touch_id == Touch.id)
.filter(Touch.artifact_id == vm_id)
.filter(Touch.touch_dt != None)
.order_by(Touch.touch_dt.desc())
.first() )
return state
## Functions that query when a server needs to de-boost.
@with_session
def _get_latest_deboost_dt(vm_id, session):
"""Internal function. Return the most recent / current deboost date of a VM.
:param vm_id: A valid VM id.
:returns: String containing most recent deboost date.
"""
state = ( session
.query(Deboost.deboost_dt)
.filter(Deboost.touch_id == Touch.id)
.filter(Touch.artifact_id == vm_id)
.filter(Touch.touch_dt != None)
.order_by(Touch.touch_dt.desc())
.first() )
return state
#No with_session decorator actually needed, but a sesh might be passed through.
def get_time_until_deboost(vm_id, session=None):
""" Get the time when until a VM is due to deboost, invarious formats.
We return a quadruplet:
[ (datetime)deboost_time, (int)secs_until_deboost, (str)display_value, (int)credit ]
"""
now = datetime.now()
try:
deboost_dt = _get_latest_deboost_dt(vm_id,session=session)[0]
delta = deboost_dt - now
#Work out what to show the user...
display_value = None
if delta.days > 0:
display_value = "%i days, %02i hrs" % (delta.days, delta.seconds // 3600)
elif delta.days == 0:
display_value = "%02i hrs, %02i min" % divmod(delta.seconds // 60, 60)
else:
#Too fiddly, and pointless, displaying a negative delta as human-readble.
#The caller can still look at the first 2 values to inspect expired boosts.
display_value = "Expired"
#Work out what any unused time is worth. This will always be an integer >=0
credit = get_deboost_credits(vm_id, hours=delta.total_seconds() // 3600)
return (deboost_dt, int(delta.total_seconds()), display_value, credit)
except:
return (None, None, None, 0)
@with_session
def get_deboost_jobs(past, future, session):
""" Get a list of pending deboosts. Deboosts scheduled on un-boosted servers
will always be filtered out here, so there is no need to double-check.
Note that although you specify the past and future in minutes you get back
the boost_remain in seconds.
:param past: How many minutes far back to go.
:param future : How far forward to look, also in minutes.
:returns: Array of {server_name, server_id, seconds_remaining}
"""
now = datetime.now()
start_time = now - timedelta(minutes=past)
end_time = now + timedelta(minutes=future)
deboosts = ( session
.query(Deboost.deboost_dt, Touch.artifact_id)
.filter(Deboost.deboost_dt > start_time)
.filter(Deboost.touch_id == Touch.id)
.filter(Touch.touch_dt != None)
.order_by(Touch.touch_dt.asc()) )
#Collect tasks in a dict by server_name, allowing new touches to overwite
#old ones.
res = {}
for d in deboosts:
server_id = d[1]
server_name = get_server_name_from_id(server_id)
# It's possible the touch is attached to a server_id that was overwitten,
# but then either the real server is un-boosted or else it will have a later
# deboost set anyway. But that's why we need the 'del' here...
if _get_server_boost_status(server_id) != 'Boosted':
if server_name in res : del res[server_name]
continue
# If this Deboost if further than end_time minutes away we don't want it,
# but it will still invalidate any Deboost we already saw.
if not d[0] <= end_time:
if server_name in res : del res[server_name]
continue
res[server_name] = d
#And return an array of triples as promised
return [ dict(artifact_name=n,
artifact_id=i[1],
boost_remain=int( (i[0] - now).total_seconds() ) )
for n,i in res.items() ]
@with_session
def get_previous_specification(vm_id, index=1, session=None):
"""Get the previous machine spec, or indeed the last-but-one or whatever
index you like.
"""
state = ( session
.query(Specification.cores, Specification.ram)
.filter(Specification.touch_id == Touch.id)
.filter(Touch.artifact_id == vm_id)
.filter(Touch.touch_dt != None)
.order_by(Touch.touch_dt.desc())
.all() )
return state[index]
def touch_to_add_node():
"""
"""
# FIXME: Empty, remove
def touch_to_pre_provisioned():
"""
"""
# FIXME: Empty, remove.
def touch_to_provisioned():
"""
"""
# FIXME: Empty, remove.
def _create_touch(actor_id, artifact_id, state_id):
"""Add a touch to the database.
:param actor_id: The actor which is making the touch.
:param artifact_id: The artifact which is associated with the touch.
:returns: ID of new touch.
"""
new_touch = Touch(actor_id=actor_id,
artifact_id=artifact_id,
state_id=state_id,
touch_dt=datetime.now())
return _create_thingy(new_touch)
def create_ownership(touch_id, user_id):
""" Add an ownership to a user. This requires a touch to have been created
linking the artifact to this record. """
# FIXME: This seems odd - ideally this should just create a touch linking
# artifact and user, and then add the ownership resource to it. Consider
# refactoring the ownership mechanism.
new_ownership = Ownership(touch_id=touch_id, user_id=user_id)
return _create_thingy(new_ownership)
@with_session
def check_password(username, password, session):
""" Returns a Boolean to describe whether the username and password
combination is valid. """
our_password = (session
.query(Password)
.filter(Password.touch_id == Touch.id)
.filter(Touch.actor_id == User.id)
.filter(User.username == username)
.order_by(Touch.id.desc())
.first())
if our_password is None:
return False
else:
return our_password.check(password)
def _create_credit(touch_id, credit):
"""Creates a credit resource.
:param touch_id: A preexisting touch_id
:param credit: An integer from -2147483648 to +2147483647.
:returns: ID of newly created credit resource.
"""
return _create_thingy(Credit(touch_id=touch_id, credit=credit))
def _create_specification(touch_id, cores, ram):
"""Creates a credit resource.
:param touch_id: A preexisting touch_id
:param cores: An integer.
:param ram: An integer - GB of RAM for machine.
:returns: ID of newly created specification resource.
"""
return _create_thingy(Specification(touch_id=touch_id, cores=cores, ram=ram))
@with_session
def check_credit(actor_id, session):
"""Returns the credit currently available to the given actor / user.
:param actor_id: The system id of the user or actor for whom we are \
requesting credit details.
:returns: Current credit balance. If there is no credit record for the \
user will return zero.
"""
credit = (session
.query(func.sum(Credit.credit))
.filter(Credit.touch_id == Touch.id)
.filter(Touch.actor_id == Actor.id)
.filter(Actor.id == actor_id)
.scalar())
return credit or 0
@with_session
def check_actor_id(actor_id, session):
"""Checks to ensure an actor exists.
:param actor_id: The actor id which we are checking.
:returns: True or False
"""
return ( session
.query(Actor)
.filter(Actor.id == actor_id)
.count() )
@with_session
def check_user_details(user_id, session):
"""Generates a list of account details for an actor.
:param user_id: The actor id which we are checking.
:returns: Dictionary containing user details
"""
our_user = session.query(User).filter_by(id=user_id).first()
#TODO - add user group
return {'id':our_user.id,
'handle':our_user.handle,
'username': our_user.username,
'name': our_user.name
}
@with_session
def check_state(artifact_id, session):
"""Returns the current state of an artifact, or None if no state
has been set.
:param artifact_id: A valid artifact id.
:returns: current state of artifact (str)
"""
state = (session
.query(ArtifactState.name)
.filter(Touch.artifact_id == artifact_id)
.filter(ArtifactState.id == Touch.state_id)
.filter(Touch.touch_dt != None)
.order_by(Touch.touch_dt.desc())
.first())
return state[0] if state else None
@with_session
def _get_most_recent_change(artifact_id, session):
"""Returns the date on which an artifact was most recently changed.
:param artifact_id: A valid artifact id.
:returns: datetime of most recent change (str)
"""
change_dt = (session
.query(func.max(Touch.touch_dt))
.filter(Touch.artifact_id == artifact_id)
.first())
return change_dt
@with_session
def _get_artifact_creation_date(artifact_id, session):
"""Returns the data of the first touch recorded against an artifact.
:param artifact_id: A valid artifact id.
:returns: timestamp of first touch (str)
"""
change_dt = (session
.query(func.min(Touch.touch_dt))
.filter(Touch.artifact_id == artifact_id)
.first())
return change_dt
| {
"content_hash": "03968975e7c8f886ef80dbf225d3853f",
"timestamp": "",
"source": "github",
"line_count": 917,
"max_line_length": 95,
"avg_line_length": 35.55070883315158,
"alnum_prop": 0.6269018404907976,
"repo_name": "cedadev/eos-db",
"id": "a03980e5f41ab8b2da2d8ccd7096e8703b91eaa3",
"size": "32600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eos_db/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "163226"
}
],
"symlink_target": ""
} |
"""
The ``mlflow.projects`` module provides an API for running MLflow projects locally or remotely.
"""
import json
import yaml
import os
import logging
import mlflow.projects.databricks
from mlflow import tracking
from mlflow.entities import RunStatus
from mlflow.exceptions import ExecutionException, MlflowException
from mlflow.projects.submitted_run import SubmittedRun
from mlflow.projects.utils import (
PROJECT_SYNCHRONOUS,
get_entry_point_command,
get_run_env_vars,
fetch_and_validate_project,
get_or_create_run,
load_project,
MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG,
PROJECT_ENV_MANAGER,
PROJECT_STORAGE_DIR,
PROJECT_DOCKER_ARGS,
PROJECT_BUILD_IMAGE,
)
from mlflow.projects.backend import loader
from mlflow.tracking.fluent import _get_experiment_id
from mlflow.utils.mlflow_tags import (
MLFLOW_PROJECT_ENV,
MLFLOW_PROJECT_BACKEND,
MLFLOW_RUN_NAME,
MLFLOW_DOCKER_IMAGE_ID,
)
from mlflow.utils import env_manager as _EnvManager
import mlflow.utils.uri
_logger = logging.getLogger(__name__)
def _resolve_experiment_id(experiment_name=None, experiment_id=None):
"""
Resolve experiment.
Verifies either one or other is specified - cannot be both selected.
If ``experiment_name`` is provided and does not exist, an experiment
of that name is created and its id is returned.
:param experiment_name: Name of experiment under which to launch the run.
:param experiment_id: ID of experiment under which to launch the run.
:return: str
"""
if experiment_name and experiment_id:
raise MlflowException("Specify only one of 'experiment_name' or 'experiment_id'.")
if experiment_id:
return str(experiment_id)
if experiment_name:
client = tracking.MlflowClient()
exp = client.get_experiment_by_name(experiment_name)
if exp:
return exp.experiment_id
else:
_logger.info("'%s' does not exist. Creating a new experiment", experiment_name)
return client.create_experiment(experiment_name)
return _get_experiment_id()
def _run(
uri,
experiment_id,
entry_point,
version,
parameters,
docker_args,
backend_name,
backend_config,
storage_dir,
env_manager,
synchronous,
run_name,
build_image,
):
"""
Helper that delegates to the project-running method corresponding to the passed-in backend.
Returns a ``SubmittedRun`` corresponding to the project run.
"""
tracking_store_uri = tracking.get_tracking_uri()
backend_config[PROJECT_ENV_MANAGER] = env_manager
backend_config[PROJECT_SYNCHRONOUS] = synchronous
backend_config[PROJECT_DOCKER_ARGS] = docker_args
backend_config[PROJECT_STORAGE_DIR] = storage_dir
backend_config[PROJECT_BUILD_IMAGE] = build_image
# TODO: remove this check once kubernetes execution has been refactored
if backend_name not in {"databricks", "kubernetes"}:
backend = loader.load_backend(backend_name)
if backend:
submitted_run = backend.run(
uri,
entry_point,
parameters,
version,
backend_config,
tracking_store_uri,
experiment_id,
)
tracking.MlflowClient().set_tag(
submitted_run.run_id, MLFLOW_PROJECT_BACKEND, backend_name
)
if run_name is not None:
tracking.MlflowClient().set_tag(submitted_run.run_id, MLFLOW_RUN_NAME, run_name)
return submitted_run
work_dir = fetch_and_validate_project(uri, version, entry_point, parameters)
project = load_project(work_dir)
_validate_execution_environment(project, backend_name)
active_run = get_or_create_run(
None, uri, experiment_id, work_dir, version, entry_point, parameters
)
if run_name is not None:
tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_RUN_NAME, run_name)
if backend_name == "databricks":
tracking.MlflowClient().set_tag(
active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "databricks"
)
from mlflow.projects.databricks import run_databricks
return run_databricks(
remote_run=active_run,
uri=uri,
entry_point=entry_point,
work_dir=work_dir,
parameters=parameters,
experiment_id=experiment_id,
cluster_spec=backend_config,
env_manager=env_manager,
)
elif backend_name == "kubernetes":
from mlflow.projects.docker import (
build_docker_image,
validate_docker_env,
validate_docker_installation,
)
from mlflow.projects import kubernetes as kb
tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "docker")
tracking.MlflowClient().set_tag(
active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "kubernetes"
)
validate_docker_env(project)
validate_docker_installation()
kube_config = _parse_kubernetes_config(backend_config)
image = build_docker_image(
work_dir=work_dir,
repository_uri=kube_config["repository-uri"],
base_image=project.docker_env.get("image"),
run_id=active_run.info.run_id,
build_image=build_image,
)
image_digest = kb.push_image_to_registry(image.tags[0])
tracking.MlflowClient().set_tag(
active_run.info.run_id, MLFLOW_DOCKER_IMAGE_ID, image_digest
)
submitted_run = kb.run_kubernetes_job(
project.name,
active_run,
image.tags[0],
image_digest,
get_entry_point_command(project, entry_point, parameters, storage_dir),
get_run_env_vars(
run_id=active_run.info.run_uuid, experiment_id=active_run.info.experiment_id
),
kube_config.get("kube-context", None),
kube_config["kube-job-template"],
)
return submitted_run
supported_backends = ["databricks", "kubernetes"] + list(loader.MLFLOW_BACKENDS.keys())
raise ExecutionException(
"Got unsupported execution mode %s. Supported "
"values: %s" % (backend_name, supported_backends)
)
def run(
uri,
entry_point="main",
version=None,
parameters=None,
docker_args=None,
experiment_name=None,
experiment_id=None,
backend="local",
backend_config=None,
storage_dir=None,
synchronous=True,
run_id=None,
run_name=None,
env_manager=None,
build_image=False,
):
"""
Run an MLflow project. The project can be local or stored at a Git URI.
MLflow provides built-in support for running projects locally or remotely on a Databricks or
Kubernetes cluster. You can also run projects against other targets by installing an appropriate
third-party plugin. See `Community Plugins <../plugins.html#community-plugins>`_ for more
information.
For information on using this method in chained workflows, see `Building Multistep Workflows
<../projects.html#building-multistep-workflows>`_.
:raises: :py:class:`mlflow.exceptions.ExecutionException` If a run launched in blocking mode
is unsuccessful.
:param uri: URI of project to run. A local filesystem path
or a Git repository URI (e.g. https://github.com/mlflow/mlflow-example)
pointing to a project directory containing an MLproject file.
:param entry_point: Entry point to run within the project. If no entry point with the specified
name is found, runs the project file ``entry_point`` as a script,
using "python" to run ``.py`` files and the default shell (specified by
environment variable ``$SHELL``) to run ``.sh`` files.
:param version: For Git-based projects, either a commit hash or a branch name.
:param parameters: Parameters (dictionary) for the entry point command.
:param docker_args: Arguments (dictionary) for the docker command.
:param experiment_name: Name of experiment under which to launch the run.
:param experiment_id: ID of experiment under which to launch the run.
:param backend: Execution backend for the run: MLflow provides built-in support for "local",
"databricks", and "kubernetes" (experimental) backends. If running against
Databricks, will run against a Databricks workspace determined as follows:
if a Databricks tracking URI of the form ``databricks://profile`` has been set
(e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run
against the workspace specified by <profile>. Otherwise, runs against the
workspace specified by the default Databricks CLI profile.
:param backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which will
be passed as config to the backend. The exact content which should be
provided is different for each execution backend and is documented
at https://www.mlflow.org/docs/latest/projects.html.
:param storage_dir: Used only if ``backend`` is "local". MLflow downloads artifacts from
distributed URIs passed to parameters of type ``path`` to subdirectories of
``storage_dir``.
:param synchronous: Whether to block while waiting for a run to complete. Defaults to True.
Note that if ``synchronous`` is False and ``backend`` is "local", this
method will return, but the current process will block when exiting until
the local run completes. If the current process is interrupted, any
asynchronous runs launched via this method will be terminated. If
``synchronous`` is True and the run fails, the current process will
error out as well.
:param run_id: Note: this argument is used internally by the MLflow project APIs and should
not be specified. If specified, the run ID will be used instead of
creating a new run.
:param run_name: The name to give the MLflow Run associated with the project execution.
If ``None``, the MLflow Run name is left unset.
:param env_manager: Specify an environment manager to create a new environment for the run and
install project dependencies within that environment. The following values
are supported:
- local: use the local environment
- virtualenv: use virtualenv (and pyenv for Python version management)
- conda: use conda
If unspecified, MLflow automatically determines the environment manager to
use by inspecting files in the project directory. For example, if
``python_env.yaml`` is present, virtualenv will be used.
:param build_image: Whether to build a new docker image of the project or to reuse an existing
image. Default: False (reuse an existing image)
:return: :py:class:`mlflow.projects.SubmittedRun` exposing information (e.g. run ID)
about the launched run.
.. code-block:: python
:caption: Example
import mlflow
project_uri = "https://github.com/mlflow/mlflow-example"
params = {"alpha": 0.5, "l1_ratio": 0.01}
# Run MLflow project and create a reproducible conda environment
# on a local host
mlflow.run(project_uri, parameters=params)
.. code-block:: text
:caption: Output
...
...
Elasticnet model (alpha=0.500000, l1_ratio=0.010000):
RMSE: 0.788347345611717
MAE: 0.6155576449938276
R2: 0.19729662005412607
... mlflow.projects: === Run (ID '6a5109febe5e4a549461e149590d0a7c') succeeded ===
"""
backend_config_dict = backend_config if backend_config is not None else {}
if (
backend_config
and type(backend_config) != dict
and os.path.splitext(backend_config)[-1] == ".json"
):
with open(backend_config, "r") as handle:
try:
backend_config_dict = json.load(handle)
except ValueError:
_logger.error(
"Error when attempting to load and parse JSON cluster spec from file %s",
backend_config,
)
raise
if env_manager is not None:
_EnvManager.validate(env_manager)
if backend == "databricks":
mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), backend_config)
elif backend == "local" and run_id is not None:
backend_config_dict[MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG] = run_id
experiment_id = _resolve_experiment_id(
experiment_name=experiment_name, experiment_id=experiment_id
)
submitted_run_obj = _run(
uri=uri,
experiment_id=experiment_id,
entry_point=entry_point,
version=version,
parameters=parameters,
docker_args=docker_args,
backend_name=backend,
backend_config=backend_config_dict,
env_manager=env_manager,
storage_dir=storage_dir,
synchronous=synchronous,
run_name=run_name,
build_image=build_image,
)
if synchronous:
_wait_for(submitted_run_obj)
return submitted_run_obj
def _wait_for(submitted_run_obj):
"""Wait on the passed-in submitted run, reporting its status to the tracking server."""
run_id = submitted_run_obj.run_id
active_run = None
# Note: there's a small chance we fail to report the run's status to the tracking server if
# we're interrupted before we reach the try block below
try:
active_run = tracking.MlflowClient().get_run(run_id) if run_id is not None else None
if submitted_run_obj.wait():
_logger.info("=== Run (ID '%s') succeeded ===", run_id)
_maybe_set_run_terminated(active_run, "FINISHED")
else:
_maybe_set_run_terminated(active_run, "FAILED")
raise ExecutionException("Run (ID '%s') failed" % run_id)
except KeyboardInterrupt:
_logger.error("=== Run (ID '%s') interrupted, cancelling run ===", run_id)
submitted_run_obj.cancel()
_maybe_set_run_terminated(active_run, "FAILED")
raise
def _maybe_set_run_terminated(active_run, status):
"""
If the passed-in active run is defined and still running (i.e. hasn't already been terminated
within user code), mark it as terminated with the passed-in status.
"""
if active_run is None:
return
run_id = active_run.info.run_id
cur_status = tracking.MlflowClient().get_run(run_id).info.status
if RunStatus.is_terminated(cur_status):
return
tracking.MlflowClient().set_terminated(run_id, status)
def _validate_execution_environment(project, backend):
if project.docker_env and backend == "databricks":
raise ExecutionException(
"Running docker-based projects on Databricks is not yet supported."
)
def _parse_kubernetes_config(backend_config):
"""
Creates build context tarfile containing Dockerfile and project code, returning path to tarfile
"""
if not backend_config:
raise ExecutionException("Backend_config file not found.")
kube_config = backend_config.copy()
if "kube-job-template-path" not in backend_config.keys():
raise ExecutionException(
"'kube-job-template-path' attribute must be specified in backend_config."
)
kube_job_template = backend_config["kube-job-template-path"]
if os.path.exists(kube_job_template):
with open(kube_job_template, "r") as job_template:
yaml_obj = yaml.safe_load(job_template.read())
kube_job_template = yaml_obj
kube_config["kube-job-template"] = kube_job_template
else:
raise ExecutionException(
"Could not find 'kube-job-template-path': {}".format(kube_job_template)
)
if "kube-context" not in backend_config.keys():
_logger.debug(
"Could not find kube-context in backend_config."
" Using current context or in-cluster config."
)
if "repository-uri" not in backend_config.keys():
raise ExecutionException("Could not find 'repository-uri' in backend_config.")
return kube_config
__all__ = ["run", "SubmittedRun"]
| {
"content_hash": "98b4510259f0003e81dc811d66e6afaa",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 100,
"avg_line_length": 40.09715639810427,
"alnum_prop": 0.6359553217894923,
"repo_name": "mlflow/mlflow",
"id": "c26fffd3a4f92e6726b85e5e654d3f48484e8974",
"size": "16921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlflow/projects/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
} |
import unittest
from sqltxt.sql_tokenizer import select_stmt, parse
class SqlTokenizerTest(unittest.TestCase):
def test_parse_select_list(self):
parsed = select_stmt.parseString('select col1 from table1')
self.assertEqual(list(parsed.column_definitions), ['col1'])
parsed = select_stmt.parseString('select col1, col2 from table1')
self.assertEqual(list(parsed.column_definitions), ['col1', 'col2'])
parsed = select_stmt.parseString('select table1.col1 from table1')
self.assertEqual(list(parsed.column_definitions), ['table1.col1'])
def test_parse_select_list_with_wildcard(self):
parsed = select_stmt.parseString('select * from table1')
self.assertEqual(list(parsed.column_definitions), ['*'])
parsed = select_stmt.parseString('select *, col1 from table1')
self.assertEqual(list(parsed.column_definitions), ['*', 'col1'])
def test_parse_from_list(self):
parsed = select_stmt.parseString('select col1 from table1')
relation_path = parsed.from_clause.relation.path
self.assertEqual(relation_path, 'table1')
def test_parse_from_list_with_table_alias(self):
parsed = select_stmt.parseString('select col1 from table1 t1')
relation_path = parsed.from_clause.relation.path
relation_alias = parsed.from_clause.relation.alias[0]
self.assertEqual(relation_path, 'table1')
self.assertEqual(relation_alias, 't1')
def test_parse_from_list_with_joins_to_get_join_type(self):
parsed = select_stmt.parseString('''
select col1
from table1 join table2 on (table1.col1 = table2.col1)
''')
self.assertEqual(parsed.from_clause.joins[0][0].join_type, 'join')
parsed = select_stmt.parseString('''
select col1
from table1 inner join table2 on (table1.col1 = table2.col1)
''')
self.assertEqual(parsed.from_clause.joins[0][0].join_type, 'inner')
parsed = select_stmt.parseString('''
select col1
from table1 left join table2 t2 on (table1.col1 = t2.col1)
''')
self.assertEqual(parsed.from_clause.joins[0][0].join_type, 'left')
parsed = select_stmt.parseString('''
select col1
from table1 right join table2 on (table1.col1 = table2.col1)
''')
self.assertEqual(parsed.from_clause.joins[0][0].join_type, 'right')
parsed = select_stmt.parseString('''
select col1
from
table1
join table2 on (table1.col1 = table2.col1)
left join table3 on (table2.col1 = table3.col1)
''')
self.assertEqual(parsed.from_clause.joins[0][0].join_type, 'join')
self.assertEqual(parsed.from_clause.joins[0][1].join_type, 'left')
def test_parse_from_list_with_joins_to_get_join_conditions(self):
parsed = parse('''
select cola
from table1 join table2 on (table1.cola = table2.cola)
''')
self.assertEqual(parsed.from_clause[1]['join_conditions'][0], {
'left_operand': 'table1.cola',
'operator': '=',
'right_operand': 'table2.cola',
})
parsed = parse('''
select cola
from
table1
join table2 on (table1.cola = table2.cola)
join table3 t3 on (table1.colb = t3.colb)
''')
self.assertEqual(parsed.from_clause[2]['join_conditions'][0], {
'left_operand': 'table1.colb',
'operator': '=',
'right_operand': 't3.colb',
})
parsed = parse('''
select cola
from
table1
join table2 on (table1.cola = table2.cola)
join table3 t3 on (table1.colb = t3.colb and table2.colc = t3.colc)
''')
self.assertEqual(parsed.from_clause[2]['join_conditions'][1], 'and')
self.assertEqual(parsed.from_clause[2]['join_conditions'][2], {
'left_operand': 'table2.colc',
'operator': '=',
'right_operand': 't3.colc',
})
def test_parse_where_list(self):
parsed = parse('''
select cola
from
table1
join table2 on (table1.cola = table2.cola)
where colb = 1 and (colc = 0 or colz = 'a')
''')
self.assertEqual(parsed.where_clause, [
{'left_operand': 'colb', 'operator': '=', 'right_operand': '1'},
'and',
[
{'left_operand': 'colc', 'operator': '=', 'right_operand': '0'},
'or',
{'left_operand': 'colz', 'operator': '=', 'right_operand': "'a'"}
],
])
def test_parse_tablesample_clause(self):
parsed = parse('''
select cola
from table1
tablesample (5)
''')
self.assertEqual(parsed.tablesample_clause.asDict(), {'sample_size': 5})
parsed = parse('''
select cola
from
table1
join table2 on (table1.cola = table2.cola)
where colb = 1 and (colc = 0 or colz = 'a')
tablesample (50)
''')
self.assertEqual(parsed.tablesample_clause.asDict(), {'sample_size': 50})
| {
"content_hash": "09c2ef56fc7338d8fa2eefb661bdeb99",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 83,
"avg_line_length": 37.416666666666664,
"alnum_prop": 0.557720861172977,
"repo_name": "shahin/sqltxt",
"id": "124e6ee9b13a57e7fff0b1786752b7106de2bbc2",
"size": "5388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/sql_tokenizer_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87670"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem394.py
#
# Eating pie
# ==========
# Published on Saturday, 15th September 2012, 08:00 pm
#
# Jeff eats a pie in an unusual way. The pie is circular. He starts with
# slicing an initial cut in the pie along a radius. While there is at least a
# given fraction F of pie left, he performs the following procedure: - He
# makes two slices from the pie centre to any point of what is remaining of the
# pie border, any point on the remaining pie border equally likely. This will
# divide the remaining pie into three pieces. - Going counterclockwise from
# the initial cut, he takes the first two pie pieces and eats them. When less
# than a fraction F of pie remains, he does not repeat this procedure. Instead,
# he eats all of the remaining pie.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "5bc831914b8dfa7cc8b838353b145603",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 37.375,
"alnum_prop": 0.7134894091415831,
"repo_name": "olduvaihand/ProjectEuler",
"id": "8928326834d3edfaf8b3a7a5cd728d4e2ef5c7a3",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem394.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
} |
import pymongo
import ssl
MONGODB_URL = 'mongodb://admin:GGGRCAPOQYDGTXMN@bluemix-sandbox-dal-9-portal.7.dblayer.com:26175,bluemix-sandbox-dal-9-portal.6.dblayer.com:26175/admin?ssl=true'
client = pymongo.MongoClient(MONGODB_URL, ssl_cert_reqs=ssl.CERT_NONE)
db = client.get_default_database()
print(db.collection_names())
| {
"content_hash": "f26c95cbe871bc5054b33c83c4b41e88",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 161,
"avg_line_length": 40.625,
"alnum_prop": 0.7876923076923077,
"repo_name": "weizy1981/WatsonRobot",
"id": "8cf2499a0c50a96358c2dcad7106ba532cd7ad27",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run/runBluemixMongo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "89394"
},
{
"name": "HTML",
"bytes": "11618"
},
{
"name": "Python",
"bytes": "51911"
}
],
"symlink_target": ""
} |
"""
Device tracker platform that adds support for OwnTracks over MQTT.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
from homeassistant.components import zone as zone_comp
from homeassistant.components.device_tracker import (
ATTR_SOURCE_TYPE, SOURCE_TYPE_BLUETOOTH_LE, SOURCE_TYPE_GPS)
from homeassistant.const import STATE_HOME
from homeassistant.util import decorator, slugify
from . import DOMAIN as OT_DOMAIN
DEPENDENCIES = ['owntracks']
_LOGGER = logging.getLogger(__name__)
HANDLERS = decorator.Registry()
async def async_setup_entry(hass, entry, async_see):
"""Set up OwnTracks based off an entry."""
hass.data[OT_DOMAIN]['context'].async_see = async_see
hass.helpers.dispatcher.async_dispatcher_connect(
OT_DOMAIN, async_handle_message)
return True
def get_cipher():
"""Return decryption function and length of key.
Async friendly.
"""
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext, encoder=Base64Encoder)
return (SecretBox.KEY_SIZE, decrypt)
def _parse_topic(topic, subscribe_topic):
"""Parse an MQTT topic {sub_topic}/user/dev, return (user, dev) tuple.
Async friendly.
"""
subscription = subscribe_topic.split('/')
try:
user_index = subscription.index('#')
except ValueError:
_LOGGER.error("Can't parse subscription topic: '%s'", subscribe_topic)
raise
topic_list = topic.split('/')
try:
user, device = topic_list[user_index], topic_list[user_index + 1]
except IndexError:
_LOGGER.error("Can't parse topic: '%s'", topic)
raise
return user, device
def _parse_see_args(message, subscribe_topic):
"""Parse the OwnTracks location parameters, into the format see expects.
Async friendly.
"""
user, device = _parse_topic(message['topic'], subscribe_topic)
dev_id = slugify('{}_{}'.format(user, device))
kwargs = {
'dev_id': dev_id,
'host_name': user,
'gps': (message['lat'], message['lon']),
'attributes': {}
}
if 'acc' in message:
kwargs['gps_accuracy'] = message['acc']
if 'batt' in message:
kwargs['battery'] = message['batt']
if 'vel' in message:
kwargs['attributes']['velocity'] = message['vel']
if 'tid' in message:
kwargs['attributes']['tid'] = message['tid']
if 'addr' in message:
kwargs['attributes']['address'] = message['addr']
if 'cog' in message:
kwargs['attributes']['course'] = message['cog']
if 't' in message:
if message['t'] == 'c':
kwargs['attributes'][ATTR_SOURCE_TYPE] = SOURCE_TYPE_GPS
if message['t'] == 'b':
kwargs['attributes'][ATTR_SOURCE_TYPE] = SOURCE_TYPE_BLUETOOTH_LE
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters.
Async friendly.
"""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
def _decrypt_payload(secret, topic, ciphertext):
"""Decrypt encrypted payload."""
try:
keylen, decrypt = get_cipher()
except OSError:
_LOGGER.warning(
"Ignoring encrypted payload because libsodium not installed")
return None
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning(
"Ignoring encrypted payload because no decryption key known "
"for topic %s", topic)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
try:
message = decrypt(ciphertext, key)
message = message.decode("utf-8")
_LOGGER.debug("Decrypted payload: %s", message)
return message
except ValueError:
_LOGGER.warning(
"Ignoring encrypted payload because unable to decrypt using "
"key for topic %s", topic)
return None
@HANDLERS.register('location')
async def async_handle_location_message(hass, context, message):
"""Handle a location message."""
if not context.async_valid_accuracy(message):
return
if context.events_only:
_LOGGER.debug("Location update ignored due to events_only setting")
return
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if context.regions_entered[dev_id]:
_LOGGER.debug(
"Location update ignored, inside region %s",
context.regions_entered[-1])
return
await context.async_see(**kwargs)
await context.async_see_beacons(hass, dev_id, kwargs)
async def _async_transition_message_enter(hass, context, message, location):
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(slugify(location)))
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if zone is None and message.get('t') == 'b':
# Not a HA zone, and a beacon so mobile beacon.
# kwargs will contain the lat/lon of the beacon
# which is not where the beacon actually is
# and is probably set to 0/0
beacons = context.mobile_beacons_active[dev_id]
if location not in beacons:
beacons.add(location)
_LOGGER.info("Added beacon %s", location)
await context.async_see_beacons(hass, dev_id, kwargs)
else:
# Normal region
regions = context.regions_entered[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
await context.async_see(**kwargs)
await context.async_see_beacons(hass, dev_id, kwargs)
async def _async_transition_message_leave(hass, context, message, location):
"""Execute leave event."""
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
regions = context.regions_entered[dev_id]
if location in regions:
regions.remove(location)
beacons = context.mobile_beacons_active[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
await context.async_see_beacons(hass, dev_id, kwargs)
else:
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get(
"zone.{}".format(slugify(new_region)))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
await context.async_see(**kwargs)
await context.async_see_beacons(hass, dev_id, kwargs)
return
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if context.async_valid_accuracy(message):
await context.async_see(**kwargs)
await context.async_see_beacons(hass, dev_id, kwargs)
@HANDLERS.register('transition')
async def async_handle_transition_message(hass, context, message):
"""Handle a transition message."""
if message.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = message['desc'].lstrip("-")
# Create a layer of indirection for Owntracks instances that may name
# regions differently than their HA names
if location in context.region_mapping:
location = context.region_mapping[location]
if location.lower() == 'home':
location = STATE_HOME
if message['event'] == 'enter':
await _async_transition_message_enter(
hass, context, message, location)
elif message['event'] == 'leave':
await _async_transition_message_leave(
hass, context, message, location)
else:
_LOGGER.error(
"Misformatted mqtt msgs, _type=transition, event=%s",
message['event'])
async def async_handle_waypoint(hass, name_base, waypoint):
"""Handle a waypoint."""
name = waypoint['desc']
pretty_name = '{} - {}'.format(name_base, name)
lat = waypoint['lat']
lon = waypoint['lon']
rad = waypoint['rad']
# check zone exists
entity_id = zone_comp.ENTITY_ID_FORMAT.format(slugify(pretty_name))
# Check if state already exists
if hass.states.get(entity_id) is not None:
return
zone = zone_comp.Zone(hass, pretty_name, lat, lon, rad,
zone_comp.ICON_IMPORT, False)
zone.entity_id = entity_id
await zone.async_update_ha_state()
@HANDLERS.register('waypoint')
@HANDLERS.register('waypoints')
async def async_handle_waypoints_message(hass, context, message):
"""Handle a waypoints message."""
if not context.import_waypoints:
return
if context.waypoint_whitelist is not None:
user = _parse_topic(message['topic'], context.mqtt_topic)[0]
if user not in context.waypoint_whitelist:
return
if 'waypoints' in message:
wayps = message['waypoints']
else:
wayps = [message]
_LOGGER.info("Got %d waypoints from %s", len(wayps), message['topic'])
name_base = ' '.join(_parse_topic(message['topic'], context.mqtt_topic))
for wayp in wayps:
await async_handle_waypoint(hass, name_base, wayp)
@HANDLERS.register('encrypted')
async def async_handle_encrypted_message(hass, context, message):
"""Handle an encrypted message."""
if 'topic' not in message and isinstance(context.secret, dict):
_LOGGER.error("You cannot set per topic secrets when using HTTP")
return
plaintext_payload = _decrypt_payload(context.secret, message.get('topic'),
message['data'])
if plaintext_payload is None:
return
decrypted = json.loads(plaintext_payload)
if 'topic' in message and 'topic' not in decrypted:
decrypted['topic'] = message['topic']
await async_handle_message(hass, context, decrypted)
@HANDLERS.register('lwt')
@HANDLERS.register('configuration')
@HANDLERS.register('beacon')
@HANDLERS.register('cmd')
@HANDLERS.register('steps')
@HANDLERS.register('card')
async def async_handle_not_impl_msg(hass, context, message):
"""Handle valid but not implemented message types."""
_LOGGER.debug('Not handling %s message: %s', message.get("_type"), message)
async def async_handle_unsupported_msg(hass, context, message):
"""Handle an unsupported or invalid message type."""
_LOGGER.warning('Received unsupported message type: %s.',
message.get('_type'))
async def async_handle_message(hass, context, message):
"""Handle an OwnTracks message."""
msgtype = message.get('_type')
_LOGGER.debug("Received %s", message)
handler = HANDLERS.get(msgtype, async_handle_unsupported_msg)
await handler(hass, context, message)
| {
"content_hash": "f0fc808fbd3e6fca9e6628175cb51388",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 79,
"avg_line_length": 32.07821229050279,
"alnum_prop": 0.6359282479972135,
"repo_name": "jamespcole/home-assistant",
"id": "f1214b62b0edc0e040c9c7f957414b6ed3c9881a",
"size": "11484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/owntracks/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import re
from typing import Tuple, Union, List
from refprocessor.common import RANGE_REGEXP, SIGN_GT, SIGN_GTE, SIGN_LT, SIGN_LTE, get_sign_by_string, ValueRange, POINT_STRICT, Value, RANGE_IN
class AgeRight:
MODE_DAY = "day"
MODE_MONTH = "month"
MODE_YEAR = "year"
MODE_UNKNOWN = "unknow"
DAY_ORIGS = (
"дней",
"день",
"дня",
"дн",
"дн.",
"д",
"д.",
)
MONTH_ORIGS = (
"месяц",
"месяцев",
"месяца",
"мес",
"мес.",
"м",
"м.",
)
YEAR_ORIGS = (
"год",
"года",
"лет",
"г",
"г.",
"л",
"л.",
)
MODES_FROM_ORIGS = (
(DAY_ORIGS, MODE_DAY),
(MONTH_ORIGS, MODE_MONTH),
(YEAR_ORIGS, MODE_YEAR),
)
def __init__(self, orig_str: str):
orig_str = orig_str.strip().lower()
if AgeRight.check_is_all(orig_str):
self.age_range = ValueRange(0, float('inf'))
self.mode = AgeRight.MODE_YEAR
return
if "един" in orig_str:
orig_str = "0-2"
if "отсутств" in orig_str:
orig_str = "0-0"
constant_simple_year = AgeRight.check_is_constant_simple_year(orig_str)
if constant_simple_year:
self.age_range = ValueRange(constant_simple_year, constant_simple_year)
self.mode = AgeRight.MODE_YEAR
return
simple_year_age = AgeRight.check_is_simple_year_range(orig_str)
if simple_year_age:
self.age_range = ValueRange(int(simple_year_age.group(1)), int(simple_year_age.group(2)))
self.mode = AgeRight.MODE_YEAR
return
orig_str = re.sub(' +', ' ', orig_str)
constant_age_with_mode = AgeRight.check_is_constant_age_with_mode(orig_str)
if constant_age_with_mode:
self.age_range = ValueRange(constant_age_with_mode[0], constant_age_with_mode[0])
self.mode = constant_age_with_mode[1]
return
constant_age_with_sign = AgeRight.check_is_constant_age_with_sign_and_optional_mode(orig_str)
if constant_age_with_sign:
self.age_range = ValueRange(constant_age_with_sign[1], constant_age_with_sign[2])
self.mode = constant_age_with_sign[0]
return
full_range = AgeRight.check_is_full_range(orig_str)
if full_range:
self.age_range = ValueRange(full_range[1], full_range[2])
self.mode = full_range[0]
return
self.age_range = ValueRange(Value(0, POINT_STRICT), Value(0, POINT_STRICT))
self.mode = AgeRight.MODE_UNKNOWN
def test(self, age: List[int]) -> bool:
if self.mode == AgeRight.MODE_UNKNOWN:
return False
if self.mode == AgeRight.MODE_DAY:
if age[1] > 0 or age[2] > 0:
return False
age_var = age[0]
elif self.mode == AgeRight.MODE_MONTH:
if age[2] > 0:
return False
age_var = age[1]
else:
age_var = age[2]
return self.age_range.in_range(age_var) == RANGE_IN
@staticmethod
def check_is_all(orig_str: str) -> bool:
return orig_str in ["все", ""]
@staticmethod
def check_is_simple_year_range(orig_str: str):
orig_str = orig_str.replace(" ", "")
return re.match(r"^(\d+)-(\d+)$", orig_str)
@staticmethod
def check_is_constant_simple_year(orig_str: str) -> Union[bool, int]:
orig_str = orig_str.replace(" ", "")
if not orig_str.isdigit():
return False
return int(orig_str)
@staticmethod
def check_is_constant_age_with_mode(orig_str: str) -> Union[bool, Tuple[int, str]]:
matched = re.match(r"^(\d+) ([\w.]+)$", orig_str)
if not matched:
return False
value = int(matched.group(1))
mode_orig = matched.group(2).lower()
mode = AgeRight.get_mode_by_string(mode_orig)
if mode != AgeRight.MODE_UNKNOWN:
return value, mode
return False
@staticmethod
def check_is_constant_age_with_sign_and_optional_mode(orig_str: str) -> Union[bool, Tuple[str, Value, Value]]:
matched = re.match(r"^([\w<>≤≥&;=]+) (\d+)( )?(\w+)?$", orig_str)
if matched:
g = list(matched.groups())
if g[3]:
mode = AgeRight.get_mode_by_string(g[3])
if mode == AgeRight.MODE_UNKNOWN:
return False
else:
mode = AgeRight.MODE_YEAR
sign_orig = g[0]
sign = get_sign_by_string(sign_orig)
if not sign:
return False
value = int(g[1])
if sign == SIGN_GT:
return mode, Value(value=value, mode=POINT_STRICT), Value(value=float('inf'))
if sign == SIGN_GTE:
return mode, Value(value=value), Value(value=float('inf'))
if sign == SIGN_LT:
return mode, Value(value=0), Value(value=value, mode=POINT_STRICT)
if sign == SIGN_LTE:
return mode, Value(value=0), Value(value=value)
return False
@staticmethod
def check_is_full_range(orig_str: str) -> Union[bool, Tuple[str, Union[int, Value], Union[int, Value]]]:
matched = re.match(RANGE_REGEXP, orig_str)
if matched:
g = list(map(lambda x: x if not x else x.strip(), matched.groups()))
if g[3] or g[7]:
mode = AgeRight.get_mode_by_string(g[3] or g[7])
if mode == AgeRight.MODE_UNKNOWN:
return False
else:
mode = AgeRight.MODE_YEAR
if g[4] == 'до':
return mode, int(g[1]), Value(int(g[5]), mode=POINT_STRICT)
return mode, int(g[1]), int(g[5])
return False
@staticmethod
def get_mode_by_string(s: str) -> Union[bool, str]:
for mode_origs, mode in AgeRight.MODES_FROM_ORIGS:
if s in mode_origs:
return mode
return AgeRight.MODE_UNKNOWN
| {
"content_hash": "a20d716165686e88568390b451893237",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 145,
"avg_line_length": 30.643564356435643,
"alnum_prop": 0.5274636510500808,
"repo_name": "moodpulse/l2",
"id": "4f005c42cc9df57ac53c8761ed25ea23356f6aaf",
"size": "6268",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "refprocessor/age_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38747"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "238498"
},
{
"name": "JavaScript",
"bytes": "425946"
},
{
"name": "Makefile",
"bytes": "1515"
},
{
"name": "Python",
"bytes": "3710422"
},
{
"name": "SCSS",
"bytes": "48493"
},
{
"name": "Shell",
"bytes": "1815"
},
{
"name": "TypeScript",
"bytes": "98237"
},
{
"name": "Vue",
"bytes": "1980612"
}
],
"symlink_target": ""
} |
"""
english.py
Tools for NLG.
"""
from utils import *
@instance
class number:
@instance
class singular: pass
@instance
class plural: pass
@instance
class gender:
@instance
class masculine: pass
@instance
class feminine: pass
@instance
class neuter: pass
| {
"content_hash": "0c1d266f9163e7e85912c4e9031e4e28",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 23,
"avg_line_length": 12.590909090909092,
"alnum_prop": 0.703971119133574,
"repo_name": "solsword/dunyazad",
"id": "64db7c67b1e801609c9049d3a118073c432b57bd",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/english.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "69531"
},
{
"name": "C++",
"bytes": "15582"
},
{
"name": "HTML",
"bytes": "30052"
},
{
"name": "Makefile",
"bytes": "2184"
},
{
"name": "Python",
"bytes": "516441"
},
{
"name": "R",
"bytes": "102986"
},
{
"name": "Shell",
"bytes": "15441"
},
{
"name": "TeX",
"bytes": "10878"
}
],
"symlink_target": ""
} |
import json
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.utils.functional import cached_property
from dateutil.rrule import rrule, MONTHLY
from slugify import slugify
from wye.base.constants import WorkshopStatus
from wye.regions.models import Location
from wye.workshops.models import Workshop, WorkshopSections
# from django.dispatch import receiver
# from rest_framework.authtoken.models import Token
class UserType(models.Model):
'''
USER_TYPE = ['Tutor', 'Regional Lead', 'College POC','admin']
'''
slug = models.CharField(max_length=100,
verbose_name="slug")
display_name = models.CharField(
max_length=300, verbose_name="Display Name")
active = models.BooleanField(default=1)
class Meta:
db_table = 'users_type'
verbose_name = 'UserType'
verbose_name_plural = 'UserTypes'
ordering = ('-id',)
def __str__(self):
return '{}'.format(self.display_name)
class Profile(models.Model):
user = models.OneToOneField(User, primary_key=True, related_name='profile')
mobile = models.CharField(max_length=10, blank=False, null=True)
is_mobile_visible = models.BooleanField(default=False)
is_email_visible = models.BooleanField(default=False)
usertype = models.ManyToManyField(UserType)
interested_sections = models.ManyToManyField(WorkshopSections)
interested_locations = models.ManyToManyField(Location)
location = models.ForeignKey(
Location, related_name="user_location", null=True)
github = models.URLField(null=True, blank=True)
facebook = models.URLField(null=True, blank=True)
googleplus = models.URLField(null=True, blank=True)
linkedin = models.URLField(null=True, blank=True)
twitter = models.URLField(null=True, blank=True)
slideshare = models.URLField(null=True, blank=True)
picture = models.ImageField(
upload_to='images/', default='images/newuser.png')
class Meta:
db_table = 'user_profile'
verbose_name = 'UserProfile'
verbose_name_plural = 'UserProfiles'
def __str__(self):
return '{} {}'.format(self.user, self.slug)
@cached_property
def slug(self):
return slugify(self.user.username, only_ascii=True)
@property
def get_workshop_details(self):
return Workshop.objects.filter(presenter=self.user).order_by('-id')
@property
def get_workshop_completed_count(self):
return len([x for x in
self.get_workshop_details if x.status == WorkshopStatus.COMPLETED])
@property
def get_workshop_upcoming_count(self):
return len([x for x in
self.get_workshop_details if x.status == WorkshopStatus.ACCEPTED])
@property
def get_total_no_of_participants(self):
return sum([x.no_of_participants for x in
self.get_workshop_details if x.status == WorkshopStatus.COMPLETED])
@property
def get_last_workshop_date(self):
pass
@property
def get_avg_workshop_rating(self):
# TODO: Complete!
return 0
@staticmethod
def get_user_with_type(user_type=None):
"""
Would return user with user type list in argument.
Eg Collage POC, admin etc
"""
return User.objects.filter(
profile__usertype__display_name__in=user_type
)
@property
def get_user_type(self):
return [x.slug for x in self.usertype.all()]
@property
def get_interested_locations(self):
return [x.name for x in self.interested_locations.all()]
@property
def get_graph_data(self):
sections = WorkshopSections.objects.all()
workshops = Workshop.objects.filter(
presenter=self.user,
status=WorkshopStatus.COMPLETED
)
if workshops:
max_workshop_date = workshops.aggregate(
models.Max('expected_date'))['expected_date__max']
min_workshop_date = workshops.aggregate(
models.Min('expected_date'))['expected_date__min']
data = []
if max_workshop_date and min_workshop_date:
dates = [dt for dt in rrule(
MONTHLY, dtstart=min_workshop_date, until=max_workshop_date)]
if dates:
for section in sections:
values = []
for d in dates:
y = workshops.filter(
expected_date__year=d.year,
expected_date__month=d.month,
workshop_section=section.pk).count()
values.append(
{'x': "{}-{}".format(d.year, d.month), 'y': y})
data.append({'key': section.name, 'values': values})
return json.dumps(data)
else:
return []
else:
return []
else:
return []
@classmethod
def is_presenter(cls, user):
return user.profile.usertype.filter(slug__iexact="tutor").exists()
@classmethod
def is_organiser(cls, user):
return user.profile.usertype.filter(slug__icontains="poc").exists()
@classmethod
def is_regional_lead(cls, user):
return user.profile.usertype.filter(slug__iexact="lead").exists()
@classmethod
def is_admin(cls, user):
return user.profile.usertype.filter(slug__iexact="admin").exists()
# @receiver(post_save, sender=settings.AUTH_USER_MODEL)
# def create_auth_token(sender, instance=None, created=False, **kwargs):
# if created:
# token, created = Token.objects.get_or_create(user=instance)
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = Profile.objects.get_or_create(user=instance)
post_save.connect(
create_user_profile, sender=User, dispatch_uid='create_user_profile')
| {
"content_hash": "0f2bd0f7849e80a55121ff0262eba4f8",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 87,
"avg_line_length": 34.76136363636363,
"alnum_prop": 0.6153971886237333,
"repo_name": "DESHRAJ/wye",
"id": "719d95af373e2ec64ec99e055fbec49be39228d3",
"size": "6118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wye/profiles/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19955"
},
{
"name": "HTML",
"bytes": "286868"
},
{
"name": "JavaScript",
"bytes": "26849"
},
{
"name": "Python",
"bytes": "125659"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
from pandasqt.compat import QtCore, QtGui, Qt, Signal, Slot
class OverlayProgressWidget(QtGui.QFrame):
def __init__(self, parent, workers=[], debug=True, margin=0):
super(OverlayProgressWidget, self).__init__(parent)
self._debug = debug
self._workers = workers
self._detailProgressBars = []
self._addedBars = 0
self._minHeight = 50
self._width = parent.width() * 0.38
self._margin = margin
self._totalProgress = 0
self.initUi()
for worker in workers:
self._addProgressBar(worker)
def initUi(self):
self.sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
self._pbHeight = 30
self.setMinimumWidth(self._width)
#self.setMaximumWidth(self._width)
self.setMinimumHeight(self._minHeight)
self.glayout = QtGui.QGridLayout(self)
self.totalProgressBar = QtGui.QProgressBar(self)
self.totalProgressBar.setMinimumHeight(self._pbHeight)
self.totalProgressBar.setMaximumHeight(self._pbHeight)
self.toggleButton = QtGui.QPushButton('Details', self)
self.toggleButton.setCheckable(True)
self.toggleButton.toggled.connect(self.showDetails)
self.glayout.addWidget(self.totalProgressBar, 0, 0, 1, 1)
self.glayout.addWidget(self.toggleButton, 0, 1, 1, 1)
#styleSheet = """.QProgressBar {
#border: none;
#border-radius: 3px;
#text-align: center;
#background-color: rgba(37, 37, 37, 50%);
#color: white;
#margin: 1px;
#border-bottom-left-radius:5px;
#border-top-left-radius:5px;
#}
#.QProgressBar::chunk {
#background-color: #05B8CC;
#border-radius: 3px;
#}
#.OverlayProgressWidget {
#background-color: white;
#}
#"""
## set stylesheet for all progressbars in this widget
#self.setStyleSheet(styleSheet)
parent = self.parent()
xAnchor = parent.width() - self._width - self._margin
yAnchor = self._margin
self.setGeometry(xAnchor, yAnchor, self._width, self._minHeight)
@Slot(bool)
def showDetails(self, toggled):
for (progressBar, label) in self._detailProgressBars:
progressBar.setVisible(toggled)
label.setVisible(toggled)
self.resizeFrame()
def _addProgressBar(self, worker):
progressBar = QtGui.QProgressBar(self)
progressBar.setMinimumHeight(self._pbHeight - 5)
progressBar.setMaximumHeight(self._pbHeight - 5)
label = QtGui.QLabel(worker.name, self)
if not self.toggleButton.isChecked():
progressBar.hide()
label.hide()
row = self._addedBars + 1
self.glayout.addWidget(progressBar, row, 0, 1, 1)
self.glayout.addWidget(label, row, 1, 1, 1)
self._addedBars += 1
self._detailProgressBars.append((progressBar, label))
worker.progressChanged.connect(progressBar.setValue)
worker.progressChanged.connect(self.calculateTotalProgress)
worker.progressChanged.connect(self.debugProgressChanged)
def debugProgressChanged(self, value):
print "debugProgressChanged", value
def addWorker(self, worker):
self._workers.append(worker)
self._addProgressBar(worker)
self.resizeFrame()
def resizeFrame(self):
size = self.glayout.sizeHint()
self.resize(size.width(), size.height())
@Slot()
def calculateTotalProgress(self):
bars = len(self._detailProgressBars)
if bars:
progress = 0
for (progressBar, label) in self._detailProgressBars:
value = progressBar.value()
progress += value
progress = progress / bars
else:
progress = 100
self.totalProgressBar.setValue(progress)
| {
"content_hash": "8d42c5feda123b523857a88d39b0de3a",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 108,
"avg_line_length": 33.37190082644628,
"alnum_prop": 0.612184249628529,
"repo_name": "datalyze-solutions/pandas-qt",
"id": "2ebce7da2ee77da6abce4abeb522023e6b935a1c",
"size": "4038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandasqt/views/OverlayProgressView.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "47855"
},
{
"name": "Python",
"bytes": "424901"
}
],
"symlink_target": ""
} |
import logging
import core
import models
from mapreduce import operation as op
from mapreduce.api.map_job import mapper
from google.appengine.ext import db
class RecreateMapper(mapper.Mapper):
"""This sets all the default values on an entity's fields.
It's useful when we add fields.
"""
def __call__(self, ctx, entity):
yield op.db.Put(entity)
class RandomizeMapper(mapper.Mapper):
def __call__(self, ctx, triple):
"""This regenerates the 'rand' field in a triple.
We run this periodically with a cronjob to mix up our sort order.
"""
if not isinstance(triple, models.Triple):
raise ValueError('value must be a Triple, got %r' % triple)
logging.info('RandomizeMapper: %r', triple)
if triple.enabled:
triple.enable() # re-enabling generates a new random ID
logging.debug('generate_rand: id=%s rand=%.15f',
triple.key().id(), triple.rand)
yield op.db.Put(triple)
class SetAssignmentTriple(mapper.Mapper):
"""We didn't always have the 'triple' field in Assignments.
It's straightforward, but a bit of a pain, to derive it -- this sets it on
the Assignments that didn't already have it.
"""
def __call__(self, ctx, assignment):
if not isinstance(assignment, models.Assignment):
raise ValueError('value must be a Assignment, got %r' % assignment)
m, f, k = (assignment.marry, assignment.fuck,
assignment.kill)
logging.info('m = %s, f = %s, k = %s', m, f, k)
for ref in [m.triple_reference_one_set.get(),
m.triple_reference_two_set.get(),
m.triple_reference_three_set.get()]:
if ref is not None:
triple = ref
logging.info('triple = %s', triple)
assignment.triple = triple
yield op.db.Put(assignment)
class CalculateVoteCounts(mapper.Mapper):
"""Calculates vote counts and puts them directly in Triples.
We used to always calculate them on the fly from Assignment counts, which is
expensive.
"""
def __call__(self, ctx, triple):
if not isinstance(triple, models.Triple):
raise ValueError('value must be a Triple, got %r' % triple)
votes = []
for entity in [triple.one, triple.two, triple.three]:
votes.append([entity.assignment_reference_marry_set.count(),
entity.assignment_reference_fuck_set.count(),
entity.assignment_reference_kill_set.count()])
assert(len(votes) == 3)
assert(len(votes[0]) == 3)
assert(len(votes[1]) == 3)
assert(len(votes[2]) == 3)
logging.info('Calculated vote counts for Triple %s (%s) = %s', triple, triple.key(), votes)
# This is the race -- someone else could have voted in the meantime.
db.run_in_transaction(core._UpdateTripleVoteCounts, triple.key(), votes)
class ClearVoteCounts(mapper.Mapper):
"""Clears all calculated vote counts. For debugging."""
def __call__(self, ctx, triple):
if not isinstance(triple, models.Triple):
raise ValueError('value must be a Triple, got %r' % triple)
logging.info('Clearing vote counts for Triple %s (%s)', triple, triple.key())
def _ClearVotes(triple_key):
triple = models.Triple.get(triple_key)
(triple.votes_one_m, triple.votes_one_f, triple.votes_one_k,
triple.votes_two_m, triple.votes_two_f, triple.votes_two_k,
triple.votes_three_m, triple.votes_three_f, triple.votes_three_k) = (
None, None, None, None, None, None, None, None, None)
triple.has_cached_votes = False
triple.put()
db.run_in_transaction(_ClearVotes, triple.key())
| {
"content_hash": "7a894b463b777bbf159c2d8c8263b6ed",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 95,
"avg_line_length": 33.95283018867924,
"alnum_prop": 0.6540705751597666,
"repo_name": "hjfreyer/marry-fuck-kill",
"id": "861aa36446917d85190cf1afe2973b9cf26525a6",
"size": "4219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/mapreduces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19318"
},
{
"name": "JavaScript",
"bytes": "92889"
},
{
"name": "Python",
"bytes": "808529"
}
],
"symlink_target": ""
} |
"""
WSGI config for myforum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myforum.settings")
application = get_wsgi_application()
| {
"content_hash": "dc2e3869c2bde5002b5a80c295234fcd",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.4375,
"alnum_prop": 0.7698209718670077,
"repo_name": "zalax303/test_django",
"id": "437b10ae9d0c555d23227a73c698f9d5fdce2bc7",
"size": "391",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "myforum/myforum/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13600"
},
{
"name": "Python",
"bytes": "19598"
}
],
"symlink_target": ""
} |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
from smtplib import SMTP
from socket import gethostname
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from httplib import HTTPConnection
from urlparse import urlsplit
from time import ctime, strftime
from traceback import format_exc
from website.graphite.util import getProfile
from website.graphite.logger import log
from website.graphite.account.models import MyGraph
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
def composer(request):
profile = getProfile(request)
context = {
'queryString' : request.GET.urlencode().replace('+','%20'),
'showTarget' : request.GET.get('showTarget',''),
'user' : request.user,
'profile' : profile,
'showMyGraphs' : int( profile.user.username != 'default' ),
'searchEnabled' : int( os.access(settings.INDEX_FILE, os.R_OK) ),
'debug' : settings.DEBUG,
'jsdebug' : settings.DEBUG,
}
return render_to_response("graphite/composer.html",context)
def mygraph(request):
profile = getProfile(request, allowDefault=False)
if not profile:
return HttpResponse( "You are not logged in!" )
action = request.GET['action']
graphName = request.GET['graphName']
if not graphName:
return HttpResponse("You must type in a graph name.")
if action == 'save':
url = request.GET['url']
try:
existingGraph = profile.mygraph_set.get(name=graphName)
existingGraph.url = url
existingGraph.save()
except ObjectDoesNotExist:
try:
newGraph = MyGraph(profile=profile,name=graphName,url=url)
newGraph.save()
except:
log.exception("Failed to create new MyGraph in /graphite/composer/mygraph/, graphName=%s" % graphName)
return HttpResponse("Failed to save graph %s" % graphName)
return HttpResponse("SAVED")
elif action == 'delete':
try:
existingGraph = profile.mygraph_set.get(name=graphName)
existingGraph.delete()
except ObjectDoesNotExist:
return HttpResponse("No such graph '%s'" % graphName)
return HttpResponse("DELETED")
else:
return HttpResponse("Invalid operation '%s'" % action)
def send_email(request):
try:
recipients = request.GET['to'].split(',')
url = request.GET['url']
proto, server, path, query, frag = urlsplit(url)
if query: path += '?' + query
conn = HTTPConnection(server)
conn.request('GET',path)
resp = conn.getresponse()
assert resp.status == 200, "Failed HTTP response %s %s" % (resp.status, resp.reason)
rawData = resp.read()
conn.close()
message = MIMEMultipart()
message['Subject'] = "Graphite Image"
message['To'] = ', '.join(recipients)
message['From'] = 'composer@%s' % gethostname()
text = MIMEText( "Image generated by the following graphite URL at %s\r\n\r\n%s" % (ctime(),url) )
image = MIMEImage( rawData )
image.add_header('Content-Disposition', 'attachment', filename="composer_" + strftime("%b%d_%I%M%p.png"))
message.attach(text)
message.attach(image)
s = SMTP(settings.SMTP_SERVER)
s.sendmail('composer@%s' % gethostname(),recipients,message.as_string())
s.quit()
return HttpResponse( "OK" )
except:
return HttpResponse( format_exc() )
| {
"content_hash": "cf5362ce40eb4264a65088bf4cdd905e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 110,
"avg_line_length": 32.9,
"alnum_prop": 0.704660587639311,
"repo_name": "eberle1080/tesserae-ng",
"id": "a45db78b30bc59afc32c5ed370031542b3249ec6",
"size": "3948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/graphite/composer/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "12745"
},
{
"name": "HTML",
"bytes": "57268"
},
{
"name": "Java",
"bytes": "7933"
},
{
"name": "JavaScript",
"bytes": "393869"
},
{
"name": "Python",
"bytes": "415313"
},
{
"name": "Scala",
"bytes": "105423"
},
{
"name": "Shell",
"bytes": "30647"
}
],
"symlink_target": ""
} |
'''
simple shortcut for running nosetests via python
replacement for *.bat or *.sh wrappers
'''
import sys
from os.path import abspath, dirname
import nose
def run_all(argv=None):
sys.exitfunc = lambda msg = 'Process shutting down...': sys.stderr.write(msg + '\n')
if argv is None:
argv = [
'nosetests',
'--with-coverage', '--cover-package=ella_taggit', '--cover-erase',
'--nocapture', '--nologcapture',
'--verbose',
]
nose.run_exit(
argv=argv,
defaultTest=abspath(dirname(__file__)),
)
if __name__ == '__main__':
run_all(sys.argv)
| {
"content_hash": "bc00369c8449dbb58feff7c1085fe4f2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.565625,
"repo_name": "ella/ella-taggit",
"id": "9d66d3c96c52a7e7515bd4873e5ca00b034dfbdd",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_ella_taggit/run_tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43332"
}
],
"symlink_target": ""
} |
"""
This is a tutorial on how to create a Bayesian network, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cpds
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
node_sizes = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds[W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
"""
Intialize the BNET's inference engine to use EXACT inference
by setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Define observed evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute the sum-product algorithm"""
net.enter_evidence(evidence)
net.sum_product()
"""
Print out the marginal probability of each node.
"""
marginal = net.marginal_nodes([C])
print 'Probability it is cloudy: ', marginal.T[1]*100, '%'
marginal = net.marginal_nodes([S])
print 'Probability the sprinkler is on: ', 0, '%' #Observed node
marginal = net.marginal_nodes([R])
print 'Probability it is raining: ',marginal.T[1]*100, '%'
marginal = net.marginal_nodes([W])
print 'Probability the grass is wet: ', marginal.T[1]*100, '%'
| {
"content_hash": "5574c11082377743f73b9619bed10665",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 75,
"avg_line_length": 31.398058252427184,
"alnum_prop": 0.5333951762523191,
"repo_name": "bhrzslm/uncertainty-reasoning",
"id": "0d610c33eee0c60ff37e739ea7b237b7fd0b7505",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/BNET/Inference/Exact/Tut_BNET_sumproduct.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "42"
},
{
"name": "Julia",
"bytes": "11778"
},
{
"name": "Jupyter Notebook",
"bytes": "38693"
},
{
"name": "Matlab",
"bytes": "280057"
},
{
"name": "Python",
"bytes": "403955"
},
{
"name": "Shell",
"bytes": "33"
}
],
"symlink_target": ""
} |
import json
from chargebee.model import Model
from chargebee import request
from chargebee import APIError
class Export(Model):
class Download(Model):
fields = ["download_url", "valid_till", "mime_type"]
pass
fields = ["id", "operation_type", "mime_type", "status", "created_at", "download"]
def wait_for_export_completion(self):
return wait_for_export_completion()
def wait_for_export_completion(self, env=None, headers=None):
import time
count = 0
sleep_time_millis = (10000 if env == None else env.export_sleep_millis)/1000.0
while self.status == 'in_process':
if count > 50:
raise RuntimeError('Export is taking too long')
count+=1
time.sleep(sleep_time_millis);
self.values = Export.retrieve(self.id, env, headers).export.values
self.load(self.values)
return self
@staticmethod
def retrieve(id, env=None, headers=None):
return request.send('get', request.uri_path("exports",id), None, env, headers)
@staticmethod
def revenue_recognition(params, env=None, headers=None):
return request.send('post', request.uri_path("exports","revenue_recognition"), params, env, headers)
@staticmethod
def deferred_revenue(params, env=None, headers=None):
return request.send('post', request.uri_path("exports","deferred_revenue"), params, env, headers)
@staticmethod
def plans(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","plans"), params, env, headers)
@staticmethod
def addons(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","addons"), params, env, headers)
@staticmethod
def coupons(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","coupons"), params, env, headers)
@staticmethod
def customers(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","customers"), params, env, headers)
@staticmethod
def subscriptions(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","subscriptions"), params, env, headers)
@staticmethod
def invoices(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","invoices"), params, env, headers)
@staticmethod
def credit_notes(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","credit_notes"), params, env, headers)
@staticmethod
def transactions(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","transactions"), params, env, headers)
@staticmethod
def orders(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","orders"), params, env, headers)
@staticmethod
def item_families(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","item_families"), params, env, headers)
@staticmethod
def items(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","items"), params, env, headers)
@staticmethod
def item_prices(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","item_prices"), params, env, headers)
@staticmethod
def attached_items(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","attached_items"), params, env, headers)
@staticmethod
def differential_prices(params=None, env=None, headers=None):
return request.send('post', request.uri_path("exports","differential_prices"), params, env, headers)
| {
"content_hash": "6d9ea60cd31ef81fa32e6ad85a2ded81",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 108,
"avg_line_length": 40.770833333333336,
"alnum_prop": 0.6699029126213593,
"repo_name": "chargebee/chargebee-python",
"id": "32c3be660318a620aef2cb93c78223bd51d863ef",
"size": "3914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chargebee/models/export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153072"
}
],
"symlink_target": ""
} |
# Lint as: python2, python3
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
import argparse
from builtins import bytes # pylint: disable=redefined-builtin
import json
import os
import shutil
import subprocess
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
elif not os.path.exists(src):
# Git repo is configured in a way we don't support such as having
# packed refs. Even though in a git repo, tf.__git_version__ will not
# be accurate.
# TODO(mikecase): Support grabbing git info when using packed refs.
open(os.path.join(gen_path, target), "w").write("")
spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, "symlink"):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path, git_tag_override):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
# Force to bytes so this works on python 2 and python 3
val = bytes(
subprocess.check_output([
"git",
str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=%s" % git_base_path), "describe", "--long",
"--tags"
]).strip())
version_separator = b"-"
if git_tag_override and val:
split_val = val.split(version_separator)
if len(split_val) < 3:
raise Exception(
("Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' "
"but got '%s'") % val)
# There might be "-" in the tag name. But we can be sure that the final
# two "-" are those inserted by the git describe command.
abbrev_commit = split_val[-1]
val = version_separator.join(
[bytes(git_tag_override, "utf-8"), b"0", abbrev_commit])
return val if val else unknown_label
except (subprocess.CalledProcessError, OSError):
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = b"git_version_is_invalid" # do not cause build to fail!
contents = """
/* Generated by gen_git_source.py */
#ifndef TENSORFLOW_CORE_UTIL_VERSION_INFO_H_
#define TENSORFLOW_CORE_UTIL_VERSION_INFO_H_
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define TF_GIT_VERSION "%s"
#ifdef _MSC_VER
#define TF_COMPILER_VERSION "MSVC " TOSTRING(_MSC_FULL_VER)
#else
#define TF_COMPILER_VERSION __VERSION__
#endif
#ifdef _GLIBCXX_USE_CXX11_ABI
#define TF_CXX11_ABI_FLAG _GLIBCXX_USE_CXX11_ABI
#else
#define TF_CXX11_ABI_FLAG 0
#endif
#ifdef TENSORFLOW_MONOLITHIC_BUILD
#define TF_MONOLITHIC_BUILD 1
#else
#define TF_MONOLITHIC_BUILD 0
#endif
#endif // TENSORFLOW_CORE_UTIL_VERSION_INFO_H_
""" % git_version.decode("utf-8")
open(filename, "w").write(contents)
def generate(arglist, git_tag_override=None):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"], git_tag_override)
write_version_info(dest_file, git_version)
def raw_generate(output_file, source_dir, git_tag_override=None):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
source_dir: Base path of the source code
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
"""
git_version = get_git_version(source_dir, git_tag_override)
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--git_tag_override", type=str,
help="Override git tag value in the __git_version__ string. Useful when "
"creating release builds before the release tag is created.")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
parser.add_argument(
"--source_dir",
type=str,
help="Base path of the source code (used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate, args.git_tag_override)
elif args.raw_generate is not None:
source_path = "."
if args.source_dir is not None:
source_path = args.source_dir
raw_generate(args.raw_generate, source_path, args.git_tag_override)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
| {
"content_hash": "8d30e4d5fbfde01f1bbd3d7080915747",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 80,
"avg_line_length": 34.78425655976677,
"alnum_prop": 0.6737071494426284,
"repo_name": "Intel-Corporation/tensorflow",
"id": "1eeca809d476782e08ff8782ca80c161e3dfb895",
"size": "11931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/tools/git/gen_git_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
from django import forms
from django.core.urlresolvers import reverse
from django.conf import settings
from django.views.generic import FormView
from django.utils.translation import ugettext as _
from mailsnake import MailSnake
class SubscriptionForm(forms.Form):
name = forms.CharField(label=_('Name'), max_length=100)
email = forms.EmailField(label=_('Email'))
class SubscribeView(FormView):
template_name = 'chimps/subscribe.html'
form_class = SubscriptionForm
def get_success_url(self):
return reverse('chimps_subscribed')
def form_valid(self, form):
ms = MailSnake(getattr(settings, 'MAILCHIMP_KEY'))
double_optin = getattr(settings, 'MAILCHIMP_CONFIRM', True)
list_id = getattr(settings, 'MAILCHIMP_LIST_ID', None)
if not list_id:
list_id = ms.lists()['data'][0]['id']
ms.listSubscribe(id=list_id, email_address=form.cleaned_data['email'],
merge_vars={'NAME': form.cleaned_data['name']},
update_existing=True, double_optin=double_optin)
return super(SubscribeView, self).form_valid(form)
| {
"content_hash": "5a8368b2bbc7d9e56a103d6b6be2c0cb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 34.696969696969695,
"alnum_prop": 0.6698689956331878,
"repo_name": "simonluijk/django-chimps",
"id": "9a9cbde0e23b0778929ed09620d85f987f8586f8",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chimps/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7998"
}
],
"symlink_target": ""
} |
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitmex(Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'], # Seychelles
'version': 'v1',
'userAgent': None,
# cheapest endpoints are 10 requests per second(trading)
# 10 per second => rateLimit = 1000ms / 10 = 100ms
# 120 per minute => 2 per second => weight = 5(authenticated)
# 30 per minute => 0.5 per second => weight = 20(unauthenticated)
'rateLimit': 100,
'pro': True,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': True,
'future': True,
'option': False,
'addMargin': None,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'createReduceOnlyOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDepositAddress': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': False,
'fetchLedger': True,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarketLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': 'emulated',
'fetchTransfer': False,
'fetchTransfers': False,
'reduceMargin': None,
'setLeverage': True,
'setMargin': None,
'setMarginMode': True,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': {
'public': 'https://testnet.bitmex.com',
'private': 'https://testnet.bitmex.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': {
'public': 'https://www.bitmex.com',
'private': 'https://www.bitmex.com',
},
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/upZpOX',
},
'api': {
'public': {
'get': {
'announcement': 5,
'announcement/urgent': 5,
'funding': 5,
'instrument': 5,
'instrument/active': 5,
'instrument/activeAndIndices': 5,
'instrument/activeIntervals': 5,
'instrument/compositeIndex': 5,
'instrument/indices': 5,
'insurance': 5,
'leaderboard': 5,
'liquidation': 5,
'orderBook': 5,
'orderBook/L2': 5,
'quote': 5,
'quote/bucketed': 5,
'schema': 5,
'schema/websocketHelp': 5,
'settlement': 5,
'stats': 5,
'stats/history': 5,
'trade': 5,
'trade/bucketed': 5,
'wallet/assets': 5,
'wallet/networks': 5,
},
},
'private': {
'get': {
'apiKey': 5,
'chat': 5,
'chat/channels': 5,
'chat/connected': 5,
'execution': 5,
'execution/tradeHistory': 5,
'notification': 5,
'order': 5,
'position': 5,
'user': 5,
'user/affiliateStatus': 5,
'user/checkReferralCode': 5,
'user/commission': 5,
'user/depositAddress': 5,
'user/executionHistory': 5,
'user/margin': 5,
'user/minWithdrawalFee': 5,
'user/wallet': 5,
'user/walletHistory': 5,
'user/walletSummary': 5,
'wallet/assets': 5,
'wallet/networks': 5,
'userEvent': 5,
},
'post': {
'apiKey': 5,
'apiKey/disable': 5,
'apiKey/enable': 5,
'chat': 5,
'order': 1,
'order/bulk': 5,
'order/cancelAllAfter': 5,
'order/closePosition': 5,
'position/isolate': 1,
'position/leverage': 1,
'position/riskLimit': 5,
'position/transferMargin': 1,
'user/cancelWithdrawal': 5,
'user/confirmEmail': 5,
'user/confirmEnableTFA': 5,
'user/confirmWithdrawal': 5,
'user/disableTFA': 5,
'user/logout': 5,
'user/logoutAll': 5,
'user/preferences': 5,
'user/requestEnableTFA': 5,
'user/requestWithdrawal': 5,
},
'put': {
'order': 1,
'order/bulk': 5,
'user': 5,
},
'delete': {
'apiKey': 5,
'order': 1,
'order/all': 1,
},
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'This key is disabled.': PermissionDenied,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
'Service unavailable': ExchangeNotAvailable, # {"error":{"message":"Service unavailable","name":"HTTPError"}}
'Server Error': ExchangeError, # {"error":{"message":"Server Error","name":"HTTPError"}}
'Unable to cancel order due to existing state': InvalidOrder,
'We require all new traders to verify': PermissionDenied, # {"message":"We require all new traders to verify their identity before their first deposit. Please visit bitmex.com/verify to complete the process.","name":"HTTPError"}
},
},
'precisionMode': TICK_SIZE,
'options': {
# https://blog.bitmex.com/api_announcement/deprecation-of-api-nonce-header/
# https://github.com/ccxt/ccxt/issues/4789
'api-expires': 5, # in seconds
'fetchOHLCVOpenTimestamp': True,
},
'commonCurrencies': {
'USDt': 'USDT',
'XBt': 'BTC',
'XBT': 'BTC',
'Gwei': 'ETH',
'GWEI': 'ETH',
},
})
def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitmex
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = self.publicGetInstrumentActiveAndIndices(params)
#
# {
# "symbol": "LTCUSDT",
# "rootSymbol": "LTC",
# "state": "Open",
# "typ": "FFWCSX",
# "listing": "2021-11-10T04:00:00.000Z",
# "front": "2021-11-10T04:00:00.000Z",
# "expiry": null,
# "settle": null,
# "listedSettle": null,
# "relistInterval": null,
# "inverseLeg": "",
# "sellLeg": "",
# "buyLeg": "",
# "optionStrikePcnt": null,
# "optionStrikeRound": null,
# "optionStrikePrice": null,
# "optionMultiplier": null,
# "positionCurrency": "LTC",
# "underlying": "LTC",
# "quoteCurrency": "USDT",
# "underlyingSymbol": "LTCT=",
# "reference": "BMEX",
# "referenceSymbol": ".BLTCT",
# "calcInterval": null,
# "publishInterval": null,
# "publishTime": null,
# "maxOrderQty": 1000000000,
# "maxPrice": 1000000,
# "lotSize": 1000,
# "tickSize": 0.01,
# "multiplier": 100,
# "settlCurrency": "USDt",
# "underlyingToPositionMultiplier": 10000,
# "underlyingToSettleMultiplier": null,
# "quoteToSettleMultiplier": 1000000,
# "isQuanto": False,
# "isInverse": False,
# "initMargin": 0.03,
# "maintMargin": 0.015,
# "riskLimit": 1000000000000,
# "riskStep": 1000000000000,
# "limit": null,
# "capped": False,
# "taxed": True,
# "deleverage": True,
# "makerFee": -0.0001,
# "takerFee": 0.0005,
# "settlementFee": 0,
# "insuranceFee": 0,
# "fundingBaseSymbol": ".LTCBON8H",
# "fundingQuoteSymbol": ".USDTBON8H",
# "fundingPremiumSymbol": ".LTCUSDTPI8H",
# "fundingTimestamp": "2022-01-14T20:00:00.000Z",
# "fundingInterval": "2000-01-01T08:00:00.000Z",
# "fundingRate": 0.0001,
# "indicativeFundingRate": 0.0001,
# "rebalanceTimestamp": null,
# "rebalanceInterval": null,
# "openingTimestamp": "2022-01-14T17:00:00.000Z",
# "closingTimestamp": "2022-01-14T18:00:00.000Z",
# "sessionInterval": "2000-01-01T01:00:00.000Z",
# "prevClosePrice": 138.511,
# "limitDownPrice": null,
# "limitUpPrice": null,
# "bankruptLimitDownPrice": null,
# "bankruptLimitUpPrice": null,
# "prevTotalVolume": 12699024000,
# "totalVolume": 12702160000,
# "volume": 3136000,
# "volume24h": 114251000,
# "prevTotalTurnover": 232418052349000,
# "totalTurnover": 232463353260000,
# "turnover": 45300911000,
# "turnover24h": 1604331340000,
# "homeNotional24h": 11425.1,
# "foreignNotional24h": 1604331.3400000003,
# "prevPrice24h": 135.48,
# "vwap": 140.42165,
# "highPrice": 146.42,
# "lowPrice": 135.08,
# "lastPrice": 144.36,
# "lastPriceProtected": 144.36,
# "lastTickDirection": "MinusTick",
# "lastChangePcnt": 0.0655,
# "bidPrice": 143.75,
# "midPrice": 143.855,
# "askPrice": 143.96,
# "impactBidPrice": 143.75,
# "impactMidPrice": 143.855,
# "impactAskPrice": 143.96,
# "hasLiquidity": True,
# "openInterest": 38103000,
# "openValue": 547963053300,
# "fairMethod": "FundingRate",
# "fairBasisRate": 0.1095,
# "fairBasis": 0.004,
# "fairPrice": 143.811,
# "markMethod": "FairPrice",
# "markPrice": 143.811,
# "indicativeTaxRate": null,
# "indicativeSettlePrice": 143.807,
# "optionUnderlyingPrice": null,
# "settledPriceAdjustmentRate": null,
# "settledPrice": null,
# "timestamp": "2022-01-14T17:49:55.000Z"
# }
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'underlying')
quoteId = self.safe_string(market, 'quoteCurrency')
settleId = self.safe_string(market, 'settlCurrency', '')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
basequote = baseId + quoteId
swap = (id == basequote)
# 'positionCurrency' may be empty("", as Bitmex currently returns for ETHUSD)
# so let's take the settlCurrency first and then adjust if needed
type = None
future = False
prediction = False
index = False
symbol = base + '/' + quote + ':' + settle
expiryDatetime = self.safe_string(market, 'expiry')
expiry = self.parse8601(expiryDatetime)
inverse = self.safe_value(market, 'isInverse')
status = self.safe_string(market, 'state')
active = status != 'Unlisted'
if swap:
type = 'swap'
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
symbol = id
elif expiry is not None:
future = True
type = 'future'
symbol = symbol + '-' + self.yymmdd(expiry)
else:
index = True
type = 'index'
symbol = id
active = False
positionId = self.safe_string_2(market, 'positionCurrency', 'underlying')
position = self.safe_currency_code(positionId)
positionIsQuote = (position == quote)
maxOrderQty = self.safe_number(market, 'maxOrderQty')
contract = not index
initMargin = self.safe_string(market, 'initMargin', '1')
maxLeverage = self.parse_number(Precise.string_div('1', initMargin))
multiplierString = Precise.string_abs(self.safe_string(market, 'multiplier'))
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'prediction': prediction,
'index': index,
'active': active,
'contract': contract,
'linear': not inverse if contract else None,
'inverse': inverse if contract else None,
'taker': self.safe_number(market, 'takerFee'),
'maker': self.safe_number(market, 'makerFee'),
'contractSize': self.parse_number(multiplierString),
'expiry': expiry,
'expiryDatetime': expiryDatetime,
'strike': self.safe_number(market, 'optionStrikePrice'),
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
'quote': self.safe_number(market, 'tickSize'),
'base': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': self.parse_number('1') if contract else None,
'max': maxLeverage if contract else None,
},
'amount': {
'min': None,
'max': None if positionIsQuote else maxOrderQty,
},
'price': {
'min': None,
'max': self.safe_number(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': maxOrderQty if positionIsQuote else None,
},
},
'info': market,
})
return result
def parse_balance(self, response):
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
free = self.safe_string(balance, 'availableMargin')
total = self.safe_string(balance, 'marginBalance')
if code != 'USDT':
free = Precise.string_div(free, '1e8')
total = Precise.string_div(total, '1e8')
else:
free = Precise.string_div(free, '1e6')
total = Precise.string_div(total, '1e6')
account['free'] = free
account['total'] = total
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_number(order, 'size')
price = self.safe_number(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
# Bitmex barfs if you set 'open': False in the filter...
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'UnrealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
# ButMEX returns the unrealized pnl from the wallet history endpoint.
# The unrealized pnl transaction has an empty timestamp.
# It is not related to historical pnl it has status set to "Pending".
# Therefore it's not a part of the history at all.
# https://github.com/ccxt/ccxt/issues/6047
#
# {
# "transactID":"00000000-0000-0000-0000-000000000000",
# "account":121210,
# "currency":"XBt",
# "transactType":"UnrealisedPNL",
# "amount":-5508,
# "fee":0,
# "transactStatus":"Pending",
# "address":"XBTUSD",
# "tx":"",
# "text":"",
# "transactTime":null, # ←---------------------------- null
# "walletBalance":139198767,
# "marginBalance":139193259,
# "timestamp":null # ←---------------------------- null
# }
#
id = self.safe_string(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
if amount is not None:
amount = amount / 100000000
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
if timestamp is None:
# https://github.com/ccxt/ccxt/issues/6047
# set the timestamp to zero, 1970 Jan 1 00:00:00
# for unrealized pnl and other transactions without a timestamp
timestamp = 0 # see comments above
feeCost = self.safe_number(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost / 100000000
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_number(item, 'walletBalance')
if after is not None:
after = after / 100000000
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
"""
fetch the history of changes, actions done by the user or operations that altered balance of the user
:param str|None code: unified currency code, default is None
:param int|None since: timestamp in ms of the earliest ledger entry, default is None
:param int|None limit: max number of ledger entrys to return, default is None
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: a `ledger structure <https://docs.ccxt.com/en/latest/manual.html#ledger-structure>`
"""
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
#
# [
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
"""
fetch history of deposits and withdrawals
:param str|None code: unified currency code for the currency of the transactions, default is None
:param int|None since: timestamp in ms of the earliest transaction, default is None
:param int|None limit: max number of transactions to return, default is None
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: a list of `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
self.load_markets()
request = {
'currency': 'all',
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# 'transactID': 'ffe699c2-95ee-4c13-91f9-0faf41daec25',
# 'account': 123456,
# 'currency': 'XBt',
# 'transactType': 'Withdrawal',
# 'amount': -100100000,
# 'fee': 100000,
# 'transactStatus': 'Completed',
# 'address': '385cR5DM96n1HvBDMzLHPYcw89fZAXULJP',
# 'tx': '3BMEXabcdefghijklmnopqrstuvwxyz123',
# 'text': '',
# 'transactTime': '2019-01-02T01:00:00.000Z',
# 'walletBalance': 99900000,
# 'marginBalance': None,
# 'timestamp': '2019-01-02T13:00:00.000Z'
# }
#
id = self.safe_string(transaction, 'transactID')
currencyId = self.safe_string(transaction, 'currency')
currency = self.safe_currency(currencyId, currency)
# For deposits, transactTime == timestamp
# For withdrawals, transactTime is submission, timestamp is processed
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string_lower(transaction, 'transactType')
# Deposits have no from address or to address, withdrawals have both
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amountString = self.safe_string(transaction, 'amount')
scale = '1e8' if (currency['code'] == 'BTC') else '1e6'
amountString = Precise.string_div(Precise.string_abs(amountString), scale)
feeCostString = self.safe_string(transaction, 'fee')
feeCostString = Precise.string_div(feeCostString, scale)
fee = {
'cost': self.parse_number(feeCostString),
'currency': currency['code'],
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'network': None,
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': self.parse_number(amountString),
'currency': currency['code'],
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
tickers = self.fetch_tickers([market['symbol']], params)
ticker = self.safe_value(tickers, market['symbol'])
if ticker is None:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
symbols = self.market_symbols(symbols)
response = self.publicGetInstrumentActiveAndIndices(params)
#
# [
# {
# "symbol":".EVOL7D",
# "rootSymbol":"EVOL",
# "state":"Unlisted",
# "typ":"MRIXXX",
# "listing":null,
# "front":null,
# "expiry":null,
# "settle":null,
# "listedSettle":null,
# "relistInterval":null,
# "inverseLeg":"",
# "sellLeg":"",
# "buyLeg":"",
# "optionStrikePcnt":null,
# "optionStrikeRound":null,
# "optionStrikePrice":null,
# "optionMultiplier":null,
# "positionCurrency":"",
# "underlying":"ETH",
# "quoteCurrency":"XXX",
# "underlyingSymbol":".EVOL7D",
# "reference":"BMI",
# "referenceSymbol":".BETHXBT",
# "calcInterval":"2000-01-08T00:00:00.000Z",
# "publishInterval":"2000-01-01T00:05:00.000Z",
# "publishTime":null,
# "maxOrderQty":null,
# "maxPrice":null,
# "lotSize":null,
# "tickSize":0.01,
# "multiplier":null,
# "settlCurrency":"",
# "underlyingToPositionMultiplier":null,
# "underlyingToSettleMultiplier":null,
# "quoteToSettleMultiplier":null,
# "isQuanto":false,
# "isInverse":false,
# "initMargin":null,
# "maintMargin":null,
# "riskLimit":null,
# "riskStep":null,
# "limit":null,
# "capped":false,
# "taxed":false,
# "deleverage":false,
# "makerFee":null,
# "takerFee":null,
# "settlementFee":null,
# "insuranceFee":null,
# "fundingBaseSymbol":"",
# "fundingQuoteSymbol":"",
# "fundingPremiumSymbol":"",
# "fundingTimestamp":null,
# "fundingInterval":null,
# "fundingRate":null,
# "indicativeFundingRate":null,
# "rebalanceTimestamp":null,
# "rebalanceInterval":null,
# "openingTimestamp":null,
# "closingTimestamp":null,
# "sessionInterval":null,
# "prevClosePrice":null,
# "limitDownPrice":null,
# "limitUpPrice":null,
# "bankruptLimitDownPrice":null,
# "bankruptLimitUpPrice":null,
# "prevTotalVolume":null,
# "totalVolume":null,
# "volume":null,
# "volume24h":null,
# "prevTotalTurnover":null,
# "totalTurnover":null,
# "turnover":null,
# "turnover24h":null,
# "homeNotional24h":null,
# "foreignNotional24h":null,
# "prevPrice24h":5.27,
# "vwap":null,
# "highPrice":null,
# "lowPrice":null,
# "lastPrice":4.72,
# "lastPriceProtected":null,
# "lastTickDirection":"ZeroMinusTick",
# "lastChangePcnt":-0.1044,
# "bidPrice":null,
# "midPrice":null,
# "askPrice":null,
# "impactBidPrice":null,
# "impactMidPrice":null,
# "impactAskPrice":null,
# "hasLiquidity":false,
# "openInterest":null,
# "openValue":0,
# "fairMethod":"",
# "fairBasisRate":null,
# "fairBasis":null,
# "fairPrice":null,
# "markMethod":"LastPrice",
# "markPrice":4.72,
# "indicativeTaxRate":null,
# "indicativeSettlePrice":null,
# "optionUnderlyingPrice":null,
# "settledPriceAdjustmentRate":null,
# "settledPrice":null,
# "timestamp":"2022-05-21T04:30:00.000Z"
# }
# ]
#
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "ETHH19",
# rootSymbol: "ETH",
# state: "Open",
# typ: "FFCCSX",
# listing: "2018-12-17T04:00:00.000Z",
# front: "2019-02-22T12:00:00.000Z",
# expiry: "2019-03-29T12:00:00.000Z",
# settle: "2019-03-29T12:00:00.000Z",
# relistInterval: null,
# inverseLeg: "",
# sellLeg: "",
# buyLeg: "",
# optionStrikePcnt: null,
# optionStrikeRound: null,
# optionStrikePrice: null,
# optionMultiplier: null,
# positionCurrency: "ETH",
# underlying: "ETH",
# quoteCurrency: "XBT",
# underlyingSymbol: "ETHXBT=",
# reference: "BMEX",
# referenceSymbol: ".BETHXBT30M",
# calcInterval: null,
# publishInterval: null,
# publishTime: null,
# maxOrderQty: 100000000,
# maxPrice: 10,
# lotSize: 1,
# tickSize: 0.00001,
# multiplier: 100000000,
# settlCurrency: "XBt",
# underlyingToPositionMultiplier: 1,
# underlyingToSettleMultiplier: null,
# quoteToSettleMultiplier: 100000000,
# isQuanto: False,
# isInverse: False,
# initMargin: 0.02,
# maintMargin: 0.01,
# riskLimit: 5000000000,
# riskStep: 5000000000,
# limit: null,
# capped: False,
# taxed: True,
# deleverage: True,
# makerFee: -0.0005,
# takerFee: 0.0025,
# settlementFee: 0,
# insuranceFee: 0,
# fundingBaseSymbol: "",
# fundingQuoteSymbol: "",
# fundingPremiumSymbol: "",
# fundingTimestamp: null,
# fundingInterval: null,
# fundingRate: null,
# indicativeFundingRate: null,
# rebalanceTimestamp: null,
# rebalanceInterval: null,
# openingTimestamp: "2019-02-13T08:00:00.000Z",
# closingTimestamp: "2019-02-13T09:00:00.000Z",
# sessionInterval: "2000-01-01T01:00:00.000Z",
# prevClosePrice: 0.03347,
# limitDownPrice: null,
# limitUpPrice: null,
# bankruptLimitDownPrice: null,
# bankruptLimitUpPrice: null,
# prevTotalVolume: 1386531,
# totalVolume: 1387062,
# volume: 531,
# volume24h: 17118,
# prevTotalTurnover: 4741294246000,
# totalTurnover: 4743103466000,
# turnover: 1809220000,
# turnover24h: 57919845000,
# homeNotional24h: 17118,
# foreignNotional24h: 579.19845,
# prevPrice24h: 0.03349,
# vwap: 0.03383564,
# highPrice: 0.03458,
# lowPrice: 0.03329,
# lastPrice: 0.03406,
# lastPriceProtected: 0.03406,
# lastTickDirection: "ZeroMinusTick",
# lastChangePcnt: 0.017,
# bidPrice: 0.03406,
# midPrice: 0.034065,
# askPrice: 0.03407,
# impactBidPrice: 0.03406,
# impactMidPrice: 0.034065,
# impactAskPrice: 0.03407,
# hasLiquidity: True,
# openInterest: 83679,
# openValue: 285010674000,
# fairMethod: "ImpactMidPrice",
# fairBasisRate: 0,
# fairBasis: 0,
# fairPrice: 0.03406,
# markMethod: "FairPrice",
# markPrice: 0.03406,
# indicativeTaxRate: 0,
# indicativeSettlePrice: 0.03406,
# optionUnderlyingPrice: null,
# settledPrice: null,
# timestamp: "2019-02-13T08:40:30.000Z",
# }
#
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_string(ticker, 'prevPrice24h')
last = self.safe_string(ticker, 'lastPrice')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'highPrice'),
'low': self.safe_string(ticker, 'lowPrice'),
'bid': self.safe_string(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_string(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_string(ticker, 'foreignNotional24h'),
'info': ticker,
}, market)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "timestamp":"2015-09-25T13:38:00.000Z",
# "symbol":"XBTUSD",
# "open":237.45,
# "high":237.45,
# "low":237.45,
# "close":237.45,
# "trades":0,
# "volume":0,
# "vwap":null,
# "lastSize":null,
# "turnover":0,
# "homeNotional":0,
# "foreignNotional":0
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
self.load_markets()
# send JSON key/value pairs, such as {"key": "value"}
# filter by individual fields and do advanced queries on timestamps
# filter = {'key': 'value'}
# send a bare series(e.g. XBU) to nearest expiring contract in that series
# you can also send a timeframe, e.g. XBU:monthly
# timeframes: daily, weekly, monthly, quarterly, and biquarterly
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True, # True == include yet-incomplete current bins
# 'filter': filter, # filter by individual fields and do advanced queries
# 'columns': [], # will return all columns if omitted
# 'start': 0, # starting point for results(wtf?)
# 'reverse': False, # True == newest first
# 'endTime': '', # ending date filter for results
}
if limit is not None:
request['count'] = limit # default 100, max 500
duration = self.parse_timeframe(timeframe) * 1000
fetchOHLCVOpenTimestamp = self.safe_value(self.options, 'fetchOHLCVOpenTimestamp', True)
# if since is not set, they will return candles starting from 2017-01-01
if since is not None:
timestamp = since
if fetchOHLCVOpenTimestamp:
timestamp = self.sum(timestamp, duration)
ymdhms = self.ymdhms(timestamp)
request['startTime'] = ymdhms # starting date filter for results
else:
request['reverse'] = True
response = self.publicGetTradeBucketed(self.extend(request, params))
#
# [
# {"timestamp":"2015-09-25T13:38:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:39:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:40:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0}
# ]
#
result = self.parse_ohlcvs(response, market, timeframe, since, limit)
if fetchOHLCVOpenTimestamp:
# bitmex returns the candle's close timestamp - https://github.com/ccxt/ccxt/issues/4446
# we can emulate the open timestamp by shifting all the timestamps one place
# so the previous close becomes the current open, and we drop the first candle
for i in range(0, len(result)):
result[i][0] = result[i][0] - duration
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
priceString = self.safe_string_2(trade, 'avgPx', 'price')
amountString = self.safe_string_2(trade, 'size', 'lastQty')
execCost = self.safe_string(trade, 'execCost')
costString = Precise.string_div(Precise.string_abs(execCost), '1e8')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
fee = None
feeCostString = Precise.string_div(self.safe_string(trade, 'execComm'), '1e8')
if feeCostString is not None:
currencyId = self.safe_string(trade, 'settlCurrency')
feeCurrencyCode = self.safe_currency_code(currencyId)
feeRateString = self.safe_string(trade, 'commission')
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': feeRateString,
}
# Trade or Funding
execType = self.safe_string(trade, 'execType')
takerOrMaker = None
if feeCostString is not None and execType == 'Trade':
takerOrMaker = 'maker' if Precise.string_lt(feeCostString, '0') else 'taker'
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
type = self.safe_string_lower(trade, 'ordType')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'cost': costString,
'amount': amountString,
'fee': fee,
}, market)
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'Day': 'Day',
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# {
# "orderID":"56222c7a-9956-413a-82cf-99f4812c214b",
# "clOrdID":"",
# "clOrdLinkID":"",
# "account":1455728,
# "symbol":"XBTUSD",
# "side":"Sell",
# "simpleOrderQty":null,
# "orderQty":1,
# "price":40000,
# "displayQty":null,
# "stopPx":null,
# "pegOffsetValue":null,
# "pegPriceType":"",
# "currency":"USD",
# "settlCurrency":"XBt",
# "ordType":"Limit",
# "timeInForce":"GoodTillCancel",
# "execInst":"",
# "contingencyType":"",
# "exDestination":"XBME",
# "ordStatus":"New",
# "triggered":"",
# "workingIndicator":true,
# "ordRejReason":"",
# "simpleLeavesQty":null,
# "leavesQty":1,
# "simpleCumQty":null,
# "cumQty":0,
# "avgPx":null,
# "multiLegReportingType":"SingleSecurity",
# "text":"Submitted via API.",
# "transactTime":"2021-01-02T21:38:49.246Z",
# "timestamp":"2021-01-02T21:38:49.246Z"
# }
#
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
filled = self.safe_string(order, 'cumQty')
average = self.safe_string(order, 'avgPx')
id = self.safe_string(order, 'orderID')
type = self.safe_string_lower(order, 'ordType')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'clOrdID')
timeInForce = self.parse_time_in_force(self.safe_string(order, 'timeInForce'))
stopPrice = self.safe_number(order, 'stopPx')
execInst = self.safe_string(order, 'execInst')
postOnly = None
if execInst is not None:
postOnly = (execInst == 'ParticipateDoNotInitiate')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
else:
# by default reverse=false, i.e. trades are fetched since the time of market inception(year 2015 for XBTUSD)
request['reverse'] = True
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# [
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# },
# {
# timestamp: '2018-08-28T00:00:03.778Z',
# symbol: 'XBTUSD',
# side: 'Sell',
# size: 1000,
# price: 6906,
# tickDirection: 'MinusTick',
# trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',
# grossValue: 14480000,
# homeNotional: 0.1448,
# foreignNotional: 1000
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
orderType = self.capitalize(type)
reduceOnly = self.safe_value(params, 'reduceOnly')
if reduceOnly is not None:
if (market['type'] != 'swap') and (market['type'] != 'future'):
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + market['type'] + ' orders, reduceOnly orders are supported for swap and future markets only')
request = {
'symbol': market['id'],
'side': self.capitalize(side),
'orderQty': float(self.amount_to_precision(symbol, amount)), # lot size multiplied by the number of contracts
'ordType': orderType,
}
if reduceOnly:
request['execInst'] = 'ReduceOnly'
if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):
stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')
else:
request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))
params = self.omit(params, ['stopPx', 'stopPrice'])
if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):
request['price'] = float(self.price_to_precision(symbol, price))
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {}
origClOrdID = self.safe_string_2(params, 'origClOrdID', 'clientOrderId')
if origClOrdID is not None:
request['origClOrdID'] = origClOrdID
clientOrderId = self.safe_string(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['origClOrdID', 'clOrdID', 'clientOrderId'])
else:
request['orderID'] = id
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: not used by bitmex cancelOrder()
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
# https://github.com/ccxt/ccxt/issues/6507
clientOrderId = self.safe_value_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = id
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
order = self.safe_value(response, 0, {})
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
return self.parse_order(order)
def cancel_orders(self, ids, symbol=None, params={}):
"""
cancel multiple orders
:param [str] ids: order ids
:param str|None symbol: not used by bitmex cancelOrders()
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: an list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
# return self.cancel_order(ids, symbol, params)
self.load_markets()
# https://github.com/ccxt/ccxt/issues/6507
clientOrderId = self.safe_value_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = ids
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
return self.parse_orders(response)
def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateDeleteOrderAll(self.extend(request, params))
#
# [
# {
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "multiLegReportingType": "string",
# "text": "string",
# "transactTime": "2020-06-01T09:36:35.290Z",
# "timestamp": "2020-06-01T09:36:35.290Z"
# }
# ]
#
return self.parse_orders(response, market)
def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the bitmex api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
self.load_markets()
response = self.privateGetPosition(params)
#
# [
# {
# "account": 0,
# "symbol": "string",
# "currency": "string",
# "underlying": "string",
# "quoteCurrency": "string",
# "commission": 0,
# "initMarginReq": 0,
# "maintMarginReq": 0,
# "riskLimit": 0,
# "leverage": 0,
# "crossMargin": True,
# "deleveragePercentile": 0,
# "rebalancedPnl": 0,
# "prevRealisedPnl": 0,
# "prevUnrealisedPnl": 0,
# "prevClosePrice": 0,
# "openingTimestamp": "2020-11-09T06:53:59.892Z",
# "openingQty": 0,
# "openingCost": 0,
# "openingComm": 0,
# "openOrderBuyQty": 0,
# "openOrderBuyCost": 0,
# "openOrderBuyPremium": 0,
# "openOrderSellQty": 0,
# "openOrderSellCost": 0,
# "openOrderSellPremium": 0,
# "execBuyQty": 0,
# "execBuyCost": 0,
# "execSellQty": 0,
# "execSellCost": 0,
# "execQty": 0,
# "execCost": 0,
# "execComm": 0,
# "currentTimestamp": "2020-11-09T06:53:59.893Z",
# "currentQty": 0,
# "currentCost": 0,
# "currentComm": 0,
# "realisedCost": 0,
# "unrealisedCost": 0,
# "grossOpenCost": 0,
# "grossOpenPremium": 0,
# "grossExecCost": 0,
# "isOpen": True,
# "markPrice": 0,
# "markValue": 0,
# "riskValue": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "posState": "string",
# "posCost": 0,
# "posCost2": 0,
# "posCross": 0,
# "posInit": 0,
# "posComm": 0,
# "posLoss": 0,
# "posMargin": 0,
# "posMaint": 0,
# "posAllowance": 0,
# "taxableMargin": 0,
# "initMargin": 0,
# "maintMargin": 0,
# "sessionMargin": 0,
# "targetExcessMargin": 0,
# "varMargin": 0,
# "realisedGrossPnl": 0,
# "realisedTax": 0,
# "realisedPnl": 0,
# "unrealisedGrossPnl": 0,
# "longBankrupt": 0,
# "shortBankrupt": 0,
# "taxBase": 0,
# "indicativeTaxRate": 0,
# "indicativeTax": 0,
# "unrealisedTax": 0,
# "unrealisedPnl": 0,
# "unrealisedPnlPcnt": 0,
# "unrealisedRoePcnt": 0,
# "simpleQty": 0,
# "simpleCost": 0,
# "simpleValue": 0,
# "simplePnl": 0,
# "simplePnlPcnt": 0,
# "avgCostPrice": 0,
# "avgEntryPrice": 0,
# "breakEvenPrice": 0,
# "marginCallPrice": 0,
# "liquidationPrice": 0,
# "bankruptPrice": 0,
# "timestamp": "2020-11-09T06:53:59.894Z",
# "lastPrice": 0,
# "lastValue": 0
# }
# ]
#
return self.parse_positions(response, symbols)
def parse_position(self, position, market=None):
#
# {
# "account": 9371654,
# "symbol": "ETHUSDT",
# "currency": "USDt",
# "underlying": "ETH",
# "quoteCurrency": "USDT",
# "commission": 0.00075,
# "initMarginReq": 0.3333333333333333,
# "maintMarginReq": 0.01,
# "riskLimit": 1000000000000,
# "leverage": 3,
# "crossMargin": False,
# "deleveragePercentile": 1,
# "rebalancedPnl": 0,
# "prevRealisedPnl": 0,
# "prevUnrealisedPnl": 0,
# "prevClosePrice": 2053.738,
# "openingTimestamp": "2022-05-21T04:00:00.000Z",
# "openingQty": 0,
# "openingCost": 0,
# "openingComm": 0,
# "openOrderBuyQty": 0,
# "openOrderBuyCost": 0,
# "openOrderBuyPremium": 0,
# "openOrderSellQty": 0,
# "openOrderSellCost": 0,
# "openOrderSellPremium": 0,
# "execBuyQty": 2000,
# "execBuyCost": 39260000,
# "execSellQty": 0,
# "execSellCost": 0,
# "execQty": 2000,
# "execCost": 39260000,
# "execComm": 26500,
# "currentTimestamp": "2022-05-21T04:35:16.397Z",
# "currentQty": 2000,
# "currentCost": 39260000,
# "currentComm": 26500,
# "realisedCost": 0,
# "unrealisedCost": 39260000,
# "grossOpenCost": 0,
# "grossOpenPremium": 0,
# "grossExecCost": 39260000,
# "isOpen": True,
# "markPrice": 1964.195,
# "markValue": 39283900,
# "riskValue": 39283900,
# "homeNotional": 0.02,
# "foreignNotional": -39.2839,
# "posState": "",
# "posCost": 39260000,
# "posCost2": 39260000,
# "posCross": 0,
# "posInit": 13086667,
# "posComm": 39261,
# "posLoss": 0,
# "posMargin": 13125928,
# "posMaint": 435787,
# "posAllowance": 0,
# "taxableMargin": 0,
# "initMargin": 0,
# "maintMargin": 13149828,
# "sessionMargin": 0,
# "targetExcessMargin": 0,
# "varMargin": 0,
# "realisedGrossPnl": 0,
# "realisedTax": 0,
# "realisedPnl": -26500,
# "unrealisedGrossPnl": 23900,
# "longBankrupt": 0,
# "shortBankrupt": 0,
# "taxBase": 0,
# "indicativeTaxRate": null,
# "indicativeTax": 0,
# "unrealisedTax": 0,
# "unrealisedPnl": 23900,
# "unrealisedPnlPcnt": 0.0006,
# "unrealisedRoePcnt": 0.0018,
# "simpleQty": null,
# "simpleCost": null,
# "simpleValue": null,
# "simplePnl": null,
# "simplePnlPcnt": null,
# "avgCostPrice": 1963,
# "avgEntryPrice": 1963,
# "breakEvenPrice": 1964.35,
# "marginCallPrice": 1328.5,
# "liquidationPrice": 1328.5,
# "bankruptPrice": 1308.7,
# "timestamp": "2022-05-21T04:35:16.397Z",
# "lastPrice": 1964.195,
# "lastValue": 39283900
# }
#
market = self.safe_market(self.safe_string(position, 'symbol'), market)
symbol = market['symbol']
datetime = self.safe_string(position, 'timestamp')
crossMargin = self.safe_value(position, 'crossMargin')
marginMode = 'cross' if (crossMargin is True) else 'isolated'
notional = None
if market['quote'] == 'USDT' or market['quote'] == 'USD' or market['quote'] == 'EUR':
notional = Precise.string_mul(self.safe_string(position, 'foreignNotional'), '-1')
else:
notional = self.safe_string(position, 'homeNotional')
maintenanceMargin = self.safe_number(position, 'maintMargin')
unrealisedPnl = self.safe_number(position, 'unrealisedPnl')
contracts = self.omit_zero(self.safe_number(position, 'currentQty'))
return {
'info': position,
'id': self.safe_string(position, 'account'),
'symbol': symbol,
'timestamp': self.parse8601(datetime),
'datetime': datetime,
'hedged': None,
'side': None,
'contracts': self.convert_value(contracts, market),
'contractSize': None,
'entryPrice': self.safe_number(position, 'avgEntryPrice'),
'markPrice': self.safe_number(position, 'markPrice'),
'notional': notional,
'leverage': self.safe_number(position, 'leverage'),
'collateral': None,
'initialMargin': self.safe_number(position, 'initMargin'),
'initialMarginPercentage': self.safe_number(position, 'initMarginReq'),
'maintenanceMargin': self.convert_value(maintenanceMargin, market),
'maintenanceMarginPercentage': self.safe_number(position, 'maintMarginReq'),
'unrealizedPnl': self.convert_value(unrealisedPnl, market),
'liquidationPrice': self.safe_number(position, 'liquidationPrice'),
'marginMode': marginMode,
'marginRatio': None,
'percentage': self.safe_number(position, 'unrealisedPnlPcnt'),
}
def convert_value(self, value, market=None):
if (value is None) or (market is None):
return value
resultValue = None
value = self.number_to_string(value)
if (market['quote'] == 'USD') or (market['quote'] == 'EUR'):
resultValue = Precise.string_mul(value, '0.00000001')
elif market['quote'] == 'USDT':
resultValue = Precise.string_mul(value, '0.000001')
else:
currency = None
quote = market['quote']
if quote is not None:
currency = self.currency(market['quote'])
if currency is not None:
resultValue = Precise.string_mul(value, self.number_to_string(currency['precision']))
resultValue = float(resultValue) if (resultValue is not None) else None
return resultValue
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
# currency = self.currency(code)
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
currency = self.currency(code)
request = {
'currency': 'XBt', # temporarily
'amount': amount,
'address': address,
# 'otpToken': '123456', # requires if two-factor auth(OTP) is enabled
# 'fee': 0.001, # bitcoin network fee
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
return self.parse_transaction(response, currency)
def fetch_funding_rates(self, symbols=None, params={}):
"""
fetch the funding rate for multiple markets
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: a dictionary of `funding rates structures <https://docs.ccxt.com/en/latest/manual.html#funding-rates-structure>`, indexe by market symbols
"""
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
#
# [
# {
# "symbol": "LTCUSDT",
# "rootSymbol": "LTC",
# "state": "Open",
# "typ": "FFWCSX",
# "listing": "2021-11-10T04:00:00.000Z",
# "front": "2021-11-10T04:00:00.000Z",
# "expiry": null,
# "settle": null,
# "listedSettle": null,
# "relistInterval": null,
# "inverseLeg": "",
# "sellLeg": "",
# "buyLeg": "",
# "optionStrikePcnt": null,
# "optionStrikeRound": null,
# "optionStrikePrice": null,
# "optionMultiplier": null,
# "positionCurrency": "LTC",
# "underlying": "LTC",
# "quoteCurrency": "USDT",
# "underlyingSymbol": "LTCT=",
# "reference": "BMEX",
# "referenceSymbol": ".BLTCT",
# "calcInterval": null,
# "publishInterval": null,
# "publishTime": null,
# "maxOrderQty": 1000000000,
# "maxPrice": 1000000,
# "lotSize": 1000,
# "tickSize": 0.01,
# "multiplier": 100,
# "settlCurrency": "USDt",
# "underlyingToPositionMultiplier": 10000,
# "underlyingToSettleMultiplier": null,
# "quoteToSettleMultiplier": 1000000,
# "isQuanto": False,
# "isInverse": False,
# "initMargin": 0.03,
# "maintMargin": 0.015,
# "riskLimit": 1000000000000,
# "riskStep": 1000000000000,
# "limit": null,
# "capped": False,
# "taxed": True,
# "deleverage": True,
# "makerFee": -0.0001,
# "takerFee": 0.0005,
# "settlementFee": 0,
# "insuranceFee": 0,
# "fundingBaseSymbol": ".LTCBON8H",
# "fundingQuoteSymbol": ".USDTBON8H",
# "fundingPremiumSymbol": ".LTCUSDTPI8H",
# "fundingTimestamp": "2022-01-14T20:00:00.000Z",
# "fundingInterval": "2000-01-01T08:00:00.000Z",
# "fundingRate": 0.0001,
# "indicativeFundingRate": 0.0001,
# "rebalanceTimestamp": null,
# "rebalanceInterval": null,
# "openingTimestamp": "2022-01-14T17:00:00.000Z",
# "closingTimestamp": "2022-01-14T18:00:00.000Z",
# "sessionInterval": "2000-01-01T01:00:00.000Z",
# "prevClosePrice": 138.511,
# "limitDownPrice": null,
# "limitUpPrice": null,
# "bankruptLimitDownPrice": null,
# "bankruptLimitUpPrice": null,
# "prevTotalVolume": 12699024000,
# "totalVolume": 12702160000,
# "volume": 3136000,
# "volume24h": 114251000,
# "prevTotalTurnover": 232418052349000,
# "totalTurnover": 232463353260000,
# "turnover": 45300911000,
# "turnover24h": 1604331340000,
# "homeNotional24h": 11425.1,
# "foreignNotional24h": 1604331.3400000003,
# "prevPrice24h": 135.48,
# "vwap": 140.42165,
# "highPrice": 146.42,
# "lowPrice": 135.08,
# "lastPrice": 144.36,
# "lastPriceProtected": 144.36,
# "lastTickDirection": "MinusTick",
# "lastChangePcnt": 0.0655,
# "bidPrice": 143.75,
# "midPrice": 143.855,
# "askPrice": 143.96,
# "impactBidPrice": 143.75,
# "impactMidPrice": 143.855,
# "impactAskPrice": 143.96,
# "hasLiquidity": True,
# "openInterest": 38103000,
# "openValue": 547963053300,
# "fairMethod": "FundingRate",
# "fairBasisRate": 0.1095,
# "fairBasis": 0.004,
# "fairPrice": 143.811,
# "markMethod": "FairPrice",
# "markPrice": 143.811,
# "indicativeTaxRate": null,
# "indicativeSettlePrice": 143.807,
# "optionUnderlyingPrice": null,
# "settledPriceAdjustmentRate": null,
# "settledPrice": null,
# "timestamp": "2022-01-14T17:49:55.000Z"
# }
# ]
#
filteredResponse = []
for i in range(0, len(response)):
item = response[i]
marketId = self.safe_string(item, 'symbol')
market = self.safe_market(marketId)
swap = self.safe_value(market, 'swap', False)
if swap:
filteredResponse.append(item)
return self.parse_funding_rates(filteredResponse, symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "symbol": "LTCUSDT",
# "rootSymbol": "LTC",
# "state": "Open",
# "typ": "FFWCSX",
# "listing": "2021-11-10T04:00:00.000Z",
# "front": "2021-11-10T04:00:00.000Z",
# "expiry": null,
# "settle": null,
# "listedSettle": null,
# "relistInterval": null,
# "inverseLeg": "",
# "sellLeg": "",
# "buyLeg": "",
# "optionStrikePcnt": null,
# "optionStrikeRound": null,
# "optionStrikePrice": null,
# "optionMultiplier": null,
# "positionCurrency": "LTC",
# "underlying": "LTC",
# "quoteCurrency": "USDT",
# "underlyingSymbol": "LTCT=",
# "reference": "BMEX",
# "referenceSymbol": ".BLTCT",
# "calcInterval": null,
# "publishInterval": null,
# "publishTime": null,
# "maxOrderQty": 1000000000,
# "maxPrice": 1000000,
# "lotSize": 1000,
# "tickSize": 0.01,
# "multiplier": 100,
# "settlCurrency": "USDt",
# "underlyingToPositionMultiplier": 10000,
# "underlyingToSettleMultiplier": null,
# "quoteToSettleMultiplier": 1000000,
# "isQuanto": False,
# "isInverse": False,
# "initMargin": 0.03,
# "maintMargin": 0.015,
# "riskLimit": 1000000000000,
# "riskStep": 1000000000000,
# "limit": null,
# "capped": False,
# "taxed": True,
# "deleverage": True,
# "makerFee": -0.0001,
# "takerFee": 0.0005,
# "settlementFee": 0,
# "insuranceFee": 0,
# "fundingBaseSymbol": ".LTCBON8H",
# "fundingQuoteSymbol": ".USDTBON8H",
# "fundingPremiumSymbol": ".LTCUSDTPI8H",
# "fundingTimestamp": "2022-01-14T20:00:00.000Z",
# "fundingInterval": "2000-01-01T08:00:00.000Z",
# "fundingRate": 0.0001,
# "indicativeFundingRate": 0.0001,
# "rebalanceTimestamp": null,
# "rebalanceInterval": null,
# "openingTimestamp": "2022-01-14T17:00:00.000Z",
# "closingTimestamp": "2022-01-14T18:00:00.000Z",
# "sessionInterval": "2000-01-01T01:00:00.000Z",
# "prevClosePrice": 138.511,
# "limitDownPrice": null,
# "limitUpPrice": null,
# "bankruptLimitDownPrice": null,
# "bankruptLimitUpPrice": null,
# "prevTotalVolume": 12699024000,
# "totalVolume": 12702160000,
# "volume": 3136000,
# "volume24h": 114251000,
# "prevTotalTurnover": 232418052349000,
# "totalTurnover": 232463353260000,
# "turnover": 45300911000,
# "turnover24h": 1604331340000,
# "homeNotional24h": 11425.1,
# "foreignNotional24h": 1604331.3400000003,
# "prevPrice24h": 135.48,
# "vwap": 140.42165,
# "highPrice": 146.42,
# "lowPrice": 135.08,
# "lastPrice": 144.36,
# "lastPriceProtected": 144.36,
# "lastTickDirection": "MinusTick",
# "lastChangePcnt": 0.0655,
# "bidPrice": 143.75,
# "midPrice": 143.855,
# "askPrice": 143.96,
# "impactBidPrice": 143.75,
# "impactMidPrice": 143.855,
# "impactAskPrice": 143.96,
# "hasLiquidity": True,
# "openInterest": 38103000,
# "openValue": 547963053300,
# "fairMethod": "FundingRate",
# "fairBasisRate": 0.1095,
# "fairBasis": 0.004,
# "fairPrice": 143.811,
# "markMethod": "FairPrice",
# "markPrice": 143.811,
# "indicativeTaxRate": null,
# "indicativeSettlePrice": 143.807,
# "optionUnderlyingPrice": null,
# "settledPriceAdjustmentRate": null,
# "settledPrice": null,
# "timestamp": "2022-01-14T17:49:55.000Z"
# }
#
datetime = self.safe_string(contract, 'timestamp')
marketId = self.safe_string(contract, 'symbol')
fundingDatetime = self.safe_string(contract, 'fundingTimestamp')
return {
'info': contract,
'symbol': self.safe_symbol(marketId, market),
'markPrice': self.safe_number(contract, 'markPrice'),
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': self.safe_number(contract, 'indicativeSettlePrice'),
'timestamp': self.parse8601(datetime),
'datetime': datetime,
'fundingRate': self.safe_number(contract, 'fundingRate'),
'fundingTimestamp': self.iso8601(fundingDatetime),
'fundingDatetime': fundingDatetime,
'nextFundingRate': self.safe_number(contract, 'indicativeFundingRate'),
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
"""
Fetches the history of funding rates
:param str|None symbol: unified symbol of the market to fetch the funding rate history for
:param int|None since: timestamp in ms of the earliest funding rate to fetch
:param int|None limit: the maximum amount of `funding rate structures <https://docs.ccxt.com/en/latest/manual.html?#funding-rate-history-structure>` to fetch
:param dict params: extra parameters specific to the bitmex api endpoint
:param int|None params['until']: timestamp in ms for ending date filter
:param bool|None params['reverse']: if True, will sort results newest first
:param int|None params['start']: starting point for results
:param str|None params['columns']: array of column names to fetch in info, if omitted, will return all columns
:param str|None params['filter']: generic table filter, send json key/value pairs, such as {"key": "value"}, you can key on individual fields, and do more advanced querying on timestamps, see the `timestamp docs <https://www.bitmex.com/app/restAPI#Timestamp-Filters>` for more details
:returns [dict]: a list of `funding rate structures <https://docs.ccxt.com/en/latest/manual.html?#funding-rate-history-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol in self.currencies:
code = self.currency(symbol)
request['symbol'] = code['id']
elif symbol is not None:
splitSymbol = symbol.split(':')
splitSymbolLength = len(splitSymbol)
timeframes = ['nearest', 'daily', 'weekly', 'monthly', 'quarterly', 'biquarterly', 'perpetual']
if (splitSymbolLength > 1) and self.in_array(splitSymbol[1], timeframes):
code = self.currency(splitSymbol[0])
symbol = code['id'] + ':' + splitSymbol[1]
request['symbol'] = symbol
else:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
until = self.safe_integer_2(params, 'until', 'till')
params = self.omit(params, ['until', 'till'])
if until is not None:
request['endTime'] = self.iso8601(until)
response = self.publicGetFunding(self.extend(request, params))
#
# [
# {
# "timestamp": "2016-05-07T12:00:00.000Z",
# "symbol": "ETHXBT",
# "fundingInterval": "2000-01-02T00:00:00.000Z",
# "fundingRate": 0.0010890000000000001,
# "fundingRateDaily": 0.0010890000000000001
# }
# ]
#
return self.parse_funding_rate_histories(response, market, since, limit)
def parse_funding_rate_history(self, info, market=None):
#
# {
# "timestamp": "2016-05-07T12:00:00.000Z",
# "symbol": "ETHXBT",
# "fundingInterval": "2000-01-02T00:00:00.000Z",
# "fundingRate": 0.0010890000000000001,
# "fundingRateDaily": 0.0010890000000000001
# }
#
marketId = self.safe_string(info, 'symbol')
datetime = self.safe_string(info, 'timestamp')
return {
'info': info,
'symbol': self.safe_symbol(marketId, market),
'fundingRate': self.safe_number(info, 'fundingRate'),
'timestamp': self.parse8601(datetime),
'datetime': datetime,
}
def set_leverage(self, leverage, symbol=None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: response from the exchange
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
if (leverage < 0.01) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 0.01 and 100')
self.load_markets()
market = self.market(symbol)
if market['type'] != 'swap' and market['type'] != 'future':
raise BadSymbol(self.id + ' setLeverage() supports future and swap contracts only')
request = {
'symbol': market['id'],
'leverage': leverage,
}
return self.privatePostPositionLeverage(self.extend(request, params))
def set_margin_mode(self, marginMode, symbol=None, params={}):
"""
set margin mode to 'cross' or 'isolated'
:param str marginMode: 'cross' or 'isolated'
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the bitmex api endpoint
:returns dict: response from the exchange
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' setMarginMode() requires a symbol argument')
marginMode = marginMode.lower()
if marginMode != 'isolated' and marginMode != 'cross':
raise BadRequest(self.id + ' setMarginMode() marginMode argument should be isolated or cross')
self.load_markets()
market = self.market(symbol)
if (market['type'] != 'swap') and (market['type'] != 'future'):
raise BadSymbol(self.id + ' setMarginMode() supports swap and future contracts only')
enabled = False if (marginMode == 'cross') else True
request = {
'symbol': market['id'],
'enabled': enabled,
}
return self.privatePostPositionIsolate(self.extend(request, params))
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
isAuthenticated = self.check_required_credentials(False)
cost = self.safe_value(config, 'cost', 1)
if cost != 1: # trading endpoints
if isAuthenticated:
return cost
else:
return 20
return cost
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'][api] + query
isAuthenticated = self.check_required_credentials(False)
if api == 'private' or (api == 'public' and isAuthenticated):
self.check_required_credentials()
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| {
"content_hash": "3b8bd76cd3f2845f5878d59499bafc8e",
"timestamp": "",
"source": "github",
"line_count": 2565,
"max_line_length": 292,
"avg_line_length": 44.54307992202729,
"alnum_prop": 0.47934846349767624,
"repo_name": "ccxt/ccxt",
"id": "81342e19f3b2f5efba994b39ace16ea51814100e",
"size": "114438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ccxt/bitmex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
import os
from email.mime.image import MIMEImage
from django.conf import settings
from django.core.files.images import File
from django.core.mail import EmailMultiAlternatives, send_mail, EmailMessage
from django.core.mail.backends.base import BaseEmailBackend
from django.test import TestCase
from django.test.utils import override_settings
from ..models import Email, STATUS, PRIORITY
from ..settings import get_backend
class ErrorRaisingBackend(BaseEmailBackend):
"""
An EmailBackend that always raises an error during sending
to test if django_mailer handles sending error correctly
"""
def send_messages(self, email_messages):
raise Exception('Fake Error')
class BackendTest(TestCase):
@override_settings(EMAIL_BACKEND='post_office.EmailBackend')
def test_email_backend(self):
"""
Ensure that email backend properly queue email messages.
"""
send_mail('Test', 'Message', 'from@example.com', ['to@example.com'])
email = Email.objects.latest('id')
self.assertEqual(email.subject, 'Test')
self.assertEqual(email.status, STATUS.queued)
self.assertEqual(email.priority, PRIORITY.medium)
def test_email_backend_setting(self):
"""
"""
old_email_backend = getattr(settings, 'EMAIL_BACKEND', None)
old_post_office_backend = getattr(settings, 'POST_OFFICE_BACKEND', None)
if hasattr(settings, 'EMAIL_BACKEND'):
delattr(settings, 'EMAIL_BACKEND')
if hasattr(settings, 'POST_OFFICE_BACKEND'):
delattr(settings, 'POST_OFFICE_BACKEND')
previous_settings = settings.POST_OFFICE
delattr(settings, 'POST_OFFICE')
# If no email backend is set, backend should default to SMTP
self.assertEqual(get_backend(), 'django.core.mail.backends.smtp.EmailBackend')
# If EMAIL_BACKEND is set to PostOfficeBackend, use SMTP to send by default
setattr(settings, 'EMAIL_BACKEND', 'post_office.EmailBackend')
self.assertEqual(get_backend(), 'django.core.mail.backends.smtp.EmailBackend')
# If EMAIL_BACKEND is set on new dictionary-styled settings, use that
setattr(settings, 'POST_OFFICE', {'EMAIL_BACKEND': 'test'})
self.assertEqual(get_backend(), 'test')
delattr(settings, 'POST_OFFICE')
if old_email_backend:
setattr(settings, 'EMAIL_BACKEND', old_email_backend)
else:
delattr(settings, 'EMAIL_BACKEND')
setattr(settings, 'POST_OFFICE', previous_settings)
@override_settings(EMAIL_BACKEND='post_office.EmailBackend')
def test_sending_html_email(self):
"""
"text/html" attachments to Email should be persisted into the database
"""
message = EmailMultiAlternatives('subject', 'body', 'from@example.com',
['recipient@example.com'])
message.attach_alternative('html', "text/html")
message.send()
email = Email.objects.latest('id')
self.assertEqual(email.html_message, 'html')
@override_settings(EMAIL_BACKEND='post_office.EmailBackend')
def test_headers_sent(self):
"""
Test that headers are correctly set on the outgoing emails.
"""
message = EmailMessage('subject', 'body', 'from@example.com',
['recipient@example.com'],
headers={'Reply-To': 'reply@example.com'})
message.send()
email = Email.objects.latest('id')
self.assertEqual(email.headers, {'Reply-To': 'reply@example.com'})
@override_settings(EMAIL_BACKEND='post_office.EmailBackend')
def test_backend_attachments(self):
message = EmailMessage('subject', 'body', 'from@example.com',
['recipient@example.com'])
message.attach('attachment.txt', b'attachment content')
message.send()
email = Email.objects.latest('id')
self.assertEqual(email.attachments.count(), 1)
self.assertEqual(email.attachments.all()[0].name, 'attachment.txt')
self.assertEqual(email.attachments.all()[0].file.read(), b'attachment content')
@override_settings(EMAIL_BACKEND='post_office.EmailBackend')
def test_backend_image_attachments(self):
message = EmailMessage('subject', 'body', 'from@example.com',
['recipient@example.com'])
filename = os.path.join(os.path.dirname(__file__), 'static/dummy.png')
fileobj = File(open(filename, 'rb'), name='dummy.png')
image = MIMEImage(fileobj.read())
image.add_header('Content-Disposition', 'inline', filename='dummy.png')
image.add_header('Content-ID', '<{dummy.png}>')
message.attach(image)
message.send()
email = Email.objects.latest('id')
self.assertEqual(email.attachments.count(), 1)
self.assertEqual(email.attachments.all()[0].name, 'dummy.png')
self.assertEqual(email.attachments.all()[0].file.read(), image.get_payload().encode())
self.assertEqual(email.attachments.all()[0].headers.get('Content-ID'), '<{dummy.png}>')
self.assertEqual(email.attachments.all()[0].headers.get('Content-Disposition'), 'inline; filename="dummy.png"')
@override_settings(
EMAIL_BACKEND='post_office.EmailBackend',
POST_OFFICE={
'DEFAULT_PRIORITY': 'now',
'BACKENDS': {'default': 'django.core.mail.backends.dummy.EmailBackend'}
}
)
def test_default_priority_now(self):
# If DEFAULT_PRIORITY is "now", mails should be sent right away
send_mail('Test', 'Message', 'from1@example.com', ['to@example.com'])
email = Email.objects.latest('id')
self.assertEqual(email.status, STATUS.sent)
| {
"content_hash": "38dab4725f549cd260db368590078799",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 119,
"avg_line_length": 42.81617647058823,
"alnum_prop": 0.6426240769362871,
"repo_name": "jrief/django-post_office",
"id": "083b64288a43f401d44a999bd3c8f05bbde8eca6",
"size": "5823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "post_office/tests/test_backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163953"
}
],
"symlink_target": ""
} |
import pytest
from selenium.common.exceptions import (
ElementNotSelectableException,
NoSuchElementException,
UnexpectedTagNameException)
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
disabledSelect = {'name': 'no-select', 'values': ['Foo']}
singleSelectValues1 = {'name': 'selectomatic', 'values': ['One', 'Two', 'Four', 'Still learning how to count, apparently']}
singleSelectValues2 = {'name': 'redirect', 'values': ['One', 'Two']}
singleSelectValuesWithSpaces = {'name': 'select_with_spaces', 'values': ['One', 'Two', 'Four', 'Still learning how to count, apparently']}
multiSelectValues1 = {'name': 'multi', 'values': ['Eggs', 'Ham', 'Sausages', 'Onion gravy']}
multiSelectValues2 = {'name': 'select_empty_multiple', 'values': ['select_1', 'select_2', 'select_3', 'select_4']}
def testSelectByIndexSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1]:
sel = Select(driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
sel.select_by_index(x)
assert sel.first_selected_option.text == select['values'][x]
@pytest.mark.xfail_chrome
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
@pytest.mark.xfail_marionette
@pytest.mark.xfail_safari
def testSelectDisabledByIndexShouldThrowException(driver, pages):
pages.load("formPage.html")
sel = Select(driver.find_element(By.NAME, disabledSelect['name']))
with pytest.raises(ElementNotSelectableException):
sel.select_by_index(1)
def testSelectByValueSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1]:
sel = Select(driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
sel.select_by_value(select['values'][x].lower())
assert sel.first_selected_option.text == select['values'][x]
@pytest.mark.xfail_chrome
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
@pytest.mark.xfail_marionette
@pytest.mark.xfail_safari
def testSelectDisabledByValueShouldThrowException(driver, pages):
pages.load("formPage.html")
sel = Select(driver.find_element(By.NAME, disabledSelect['name']))
with pytest.raises(ElementNotSelectableException):
sel.select_by_value('bar')
def testSelectByVisibleTextSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1]:
sel = Select(driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
print(select['values'][x])
sel.select_by_visible_text(select['values'][x])
assert sel.first_selected_option.text == select['values'][x]
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=822')
def testSelectByVisibleTextShouldNormalizeSpaces(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValuesWithSpaces]:
sel = Select(driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
print(select['values'][x])
sel.select_by_visible_text(select['values'][x])
assert sel.first_selected_option.text == select['values'][x]
@pytest.mark.xfail_chrome
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
@pytest.mark.xfail_marionette
@pytest.mark.xfail_safari
def testSelectDisabledByVisibleTextShouldThrowException(driver, pages):
pages.load("formPage.html")
sel = Select(driver.find_element(By.NAME, disabledSelect['name']))
with pytest.raises(ElementNotSelectableException):
sel.select_by_visible_text('Bar')
def testSelectByIndexMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
for x in range(len(select['values'])):
sel.select_by_index(x)
selected = sel.all_selected_options
assert len(selected) == x + 1
for j in range(len(selected)):
assert selected[j].text == select['values'][j]
def testSelectByValueMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
for x in range(len(select['values'])):
sel.select_by_value(select['values'][x].lower())
selected = sel.all_selected_options
assert len(selected) == x + 1
for j in range(len(selected)):
assert selected[j].text == select['values'][j]
def testSelectByVisibleTextMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
for x in range(len(select['values'])):
sel.select_by_visible_text(select['values'][x])
selected = sel.all_selected_options
assert len(selected) == x + 1
for j in range(len(selected)):
assert selected[j].text == select['values'][j]
def testDeselectAllSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2]:
with pytest.raises(NotImplementedError):
Select(driver.find_element(By.NAME, select['name'])).deselect_all()
def testDeselectAllMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
assert len(sel.all_selected_options) == 0
def testDeselectByIndexSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2]:
with pytest.raises(NotImplementedError):
Select(driver.find_element(By.NAME, select['name'])).deselect_by_index(0)
def testDeselectByValueSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2]:
with pytest.raises(NotImplementedError):
Select(driver.find_element(By.NAME, select['name'])).deselect_by_value(select['values'][0].lower())
def testDeselectByVisibleTextSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2]:
with pytest.raises(NotImplementedError):
Select(driver.find_element(By.NAME, select['name'])).deselect_by_visible_text(select['values'][0])
def testDeselectByIndexMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
sel.select_by_index(0)
sel.select_by_index(1)
sel.select_by_index(2)
sel.select_by_index(3)
sel.deselect_by_index(1)
sel.deselect_by_index(3)
selected = sel.all_selected_options
assert len(selected) == 2
assert selected[0].text == select['values'][0]
assert selected[1].text == select['values'][2]
def testDeselectByValueMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
sel.select_by_index(0)
sel.select_by_index(1)
sel.select_by_index(2)
sel.select_by_index(3)
sel.deselect_by_value(select['values'][1].lower())
sel.deselect_by_value(select['values'][3].lower())
selected = sel.all_selected_options
assert len(selected) == 2
assert selected[0].text == select['values'][0]
assert selected[1].text == select['values'][2]
def testDeselectByVisibleTextMultiple(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(driver.find_element(By.NAME, select['name']))
sel.deselect_all()
sel.select_by_index(0)
sel.select_by_index(1)
sel.select_by_index(2)
sel.select_by_index(3)
sel.deselect_by_visible_text(select['values'][1])
sel.deselect_by_visible_text(select['values'][3])
selected = sel.all_selected_options
assert len(selected) == 2
assert selected[0].text == select['values'][0]
assert selected[1].text == select['values'][2]
def testGetOptions(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2, multiSelectValues1, multiSelectValues2]:
opts = Select(driver.find_element(By.NAME, select['name'])).options
assert len(opts) == len(select['values'])
for i in range(len(opts)):
assert opts[i].text == select['values'][i]
def testGetAllSelectedOptionsSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2, disabledSelect]:
opts = Select(driver.find_element(By.NAME, select['name'])).all_selected_options
assert len(opts) == 1
assert opts[0].text == select['values'][0]
def testGetAllSelectedOptionsMultiple(driver, pages):
pages.load("formPage.html")
opts = Select(driver.find_element(By.NAME, multiSelectValues1['name'])).all_selected_options
assert len(opts) == 2
assert opts[0].text, multiSelectValues1['values'][0]
assert opts[1].text, multiSelectValues1['values'][2]
opts = Select(driver.find_element(By.NAME, multiSelectValues2['name'])).all_selected_options
assert len(opts) == 0
def testGetFirstSelectedOptionSingle(driver, pages):
pages.load("formPage.html")
for select in [singleSelectValues1, singleSelectValues2]:
opt = Select(driver.find_element(By.NAME, select['name'])).first_selected_option
assert opt.text == select['values'][0]
def testGetFirstSelectedOptionMultiple(driver, pages):
pages.load("formPage.html")
opt = Select(driver.find_element(By.NAME, multiSelectValues1['name'])).first_selected_option
assert opt.text == multiSelectValues1['values'][0]
opt = Select(driver.find_element(By.NAME, multiSelectValues2['name'])).all_selected_options
assert len(opt) == 0
def testRaisesExceptionForInvalidTagName(driver, pages):
pages.load("formPage.html")
with pytest.raises(UnexpectedTagNameException):
Select(driver.find_element(By.TAG_NAME, "div"))
def testDeselectByIndexNonExistent(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
with pytest.raises(NoSuchElementException):
Select(driver.find_element(By.NAME, select['name'])).deselect_by_index(10)
def testDeselectByValueNonExistent(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
with pytest.raises(NoSuchElementException):
Select(driver.find_element(By.NAME, select['name'])).deselect_by_value('not there')
def testDeselectByTextNonExistent(driver, pages):
pages.load("formPage.html")
for select in [multiSelectValues1, multiSelectValues2]:
with pytest.raises(NoSuchElementException):
Select(driver.find_element(By.NAME, select['name'])).deselect_by_visible_text('not there')
| {
"content_hash": "ec19782bb2c6fc1ccbfa5096800921a7",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 138,
"avg_line_length": 39.50511945392491,
"alnum_prop": 0.6790496760259179,
"repo_name": "GorK-ChO/selenium",
"id": "4c287b4d3569d9b8bdf179fbd6ac3638da93bafe",
"size": "12363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/common/select_class_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "AppleScript",
"bytes": "2614"
},
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C",
"bytes": "59599"
},
{
"name": "C#",
"bytes": "2674680"
},
{
"name": "C++",
"bytes": "1845611"
},
{
"name": "CSS",
"bytes": "25162"
},
{
"name": "HTML",
"bytes": "1866100"
},
{
"name": "Java",
"bytes": "5271206"
},
{
"name": "JavaScript",
"bytes": "5084163"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "669071"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "972276"
},
{
"name": "Shell",
"bytes": "1230"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
'''
Created on Mar 18, 2014
@author: nathan
'''
from functools import wraps
SHORT_SIZE = 99
CHUNK_SIZE = 999
def make_sender_line(name):
'''
Create a formatted, encoded sender line
'''
return 'FROM {name}\n'.format(name=name).encode('ascii')
def make_body(*body):
'''
Create an encoded body, with correct length prefixing. Body should be one
or more strings to be joined into a single, encoded message.
'''
body = ''.join(body).encode('ascii')
if len(body) <= SHORT_SIZE:
yield '{size}\n'.format(size=len(body)).encode('ascii')
yield body
else:
# Prevent unnecessary copies with memoryview.
# Look, kids! Python can do arrays via pointer manipulation, too!
body = memoryview(body)
for i in range(0, len(body), CHUNK_SIZE):
chunk = body[i:i + CHUNK_SIZE]
yield 'C{size}\n'.format(size=len(chunk)).encode('ascii')
yield chunk
yield 'C0\n'.encode('ascii')
def assemble_full_body(sender_line, body_parts):
'''
Concatenate a sender line and body. Performs no formatting. Designed to be
used with make_sender_line and make_body.
'''
yield sender_line
yield from body_parts
def prepare_full_body(name, body_parts):
'''
Add a sender line to some body parts. Designed to be used with just
make_body
'''
return assemble_full_body(make_sender_line(name), body_parts)
def make_full_body(name, *body):
'''
Encode a sender line and body.
'''
return prepare_full_body(name, make_body(*body))
def consumer(generator):
'''
Immediately advance a generator to the first yield. Attach to generators
that consume data via send.
'''
@wraps(generator)
def wrapper(*args, **kwargs):
g = generator(*args, **kwargs)
next(g)
return g
return wrapper
| {
"content_hash": "dc63ac41dcfdfb0587f9d4ca64eb519d",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 25.14666666666667,
"alnum_prop": 0.6261930010604454,
"repo_name": "Lucretiel/NPproject3",
"id": "75d0bcb8ae2dc6c5d7c16a2e65b1d0eec2a0928c",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "npchat/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30076"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
} |
"""Utility functions for processes.
"""
import os
import sys
import subprocess
import errno
import select
import logging
import signal
import resource
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import compat
from ganeti.utils import retry as utils_retry
from ganeti.utils import wrapper as utils_wrapper
from ganeti.utils import text as utils_text
from ganeti.utils import io as utils_io
from ganeti.utils import algo as utils_algo
#: when set to True, L{RunCmd} is disabled
_no_fork = False
(_TIMEOUT_NONE,
_TIMEOUT_TERM,
_TIMEOUT_KILL) = range(3)
def DisableFork():
"""Disables the use of fork(2).
"""
global _no_fork # pylint: disable=W0603
_no_fork = True
class RunResult(object):
"""Holds the result of running external programs.
@type exit_code: int
@ivar exit_code: the exit code of the program, or None (if the program
didn't exit())
@type signal: int or None
@ivar signal: the signal that caused the program to finish, or None
(if the program wasn't terminated by a signal)
@type stdout: str
@ivar stdout: the standard output of the program
@type stderr: str
@ivar stderr: the standard error of the program
@type failed: boolean
@ivar failed: True in case the program was
terminated by a signal or exited with a non-zero exit code
@ivar fail_reason: a string detailing the termination reason
"""
__slots__ = ["exit_code", "signal", "stdout", "stderr",
"failed", "fail_reason", "cmd"]
def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
timeout):
self.cmd = cmd
self.exit_code = exit_code
self.signal = signal_
self.stdout = stdout
self.stderr = stderr
self.failed = (signal_ is not None or exit_code != 0)
fail_msgs = []
if self.signal is not None:
fail_msgs.append("terminated by signal %s" % self.signal)
elif self.exit_code is not None:
fail_msgs.append("exited with exit code %s" % self.exit_code)
else:
fail_msgs.append("unable to determine termination reason")
if timeout_action == _TIMEOUT_TERM:
fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
elif timeout_action == _TIMEOUT_KILL:
fail_msgs.append(("force termination after timeout of %.2f seconds"
" and linger for another %.2f seconds") %
(timeout, constants.CHILD_LINGER_TIMEOUT))
if fail_msgs and self.failed:
self.fail_reason = utils_text.CommaJoin(fail_msgs)
else:
self.fail_reason = None
if self.failed:
logging.debug("Command '%s' failed (%s); output: %s",
self.cmd, self.fail_reason, self.output)
def _GetOutput(self):
"""Returns the combined stdout and stderr for easier usage.
"""
return self.stdout + self.stderr
output = property(_GetOutput, None, None, "Return full output")
def _BuildCmdEnvironment(env, reset):
"""Builds the environment for an external program.
"""
if reset:
cmd_env = {}
else:
cmd_env = os.environ.copy()
cmd_env["LC_ALL"] = "C"
if env is not None:
cmd_env.update(env)
return cmd_env
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
interactive=False, timeout=None, noclose_fds=None,
input_fd=None, postfork_fn=None):
"""Execute a (shell) command.
The command should not read from its standard input, as it will be
closed.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: Additional environment variables
@type output: str
@param output: if desired, the output of the command can be
saved in a file instead of the RunResult instance; this
parameter denotes the file name (if not None)
@type cwd: string
@param cwd: if specified, will be used as the working
directory for the command; the default will be /
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
@type interactive: boolean
@param interactive: whether we pipe stdin, stdout and stderr
(default behaviour) or run the command interactive
@type timeout: int
@param timeout: If not None, timeout in seconds until child process gets
killed
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@type input_fd: C{file}-like object or numeric file descriptor
@param input_fd: File descriptor for process' standard input
@type postfork_fn: Callable receiving PID as parameter
@param postfork_fn: Callback run after fork but before timeout
@rtype: L{RunResult}
@return: RunResult instance
@raise errors.ProgrammerError: if we call this when forks are disabled
"""
if _no_fork:
raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
if output and interactive:
raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
" not be provided at the same time")
if not (output is None or input_fd is None):
# The current logic in "_RunCmdFile", which is used when output is defined,
# does not support input files (not hard to implement, though)
raise errors.ProgrammerError("Parameters 'output' and 'input_fd' can"
" not be used at the same time")
if isinstance(cmd, basestring):
strcmd = cmd
shell = True
else:
cmd = [str(val) for val in cmd]
strcmd = utils_text.ShellQuoteArgs(cmd)
shell = False
if output:
logging.info("RunCmd %s, output file '%s'", strcmd, output)
else:
logging.info("RunCmd %s", strcmd)
cmd_env = _BuildCmdEnvironment(env, reset_env)
try:
if output is None:
out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
interactive, timeout,
noclose_fds, input_fd,
postfork_fn=postfork_fn)
else:
if postfork_fn:
raise errors.ProgrammerError("postfork_fn is not supported if output"
" should be captured")
assert input_fd is None
timeout_action = _TIMEOUT_NONE
status = _RunCmdFile(cmd, cmd_env, shell, output, cwd, noclose_fds)
out = err = ""
except OSError, err:
if err.errno == errno.ENOENT:
raise errors.OpExecError("Can't execute '%s': not found (%s)" %
(strcmd, err))
else:
raise
if status >= 0:
exitcode = status
signal_ = None
else:
exitcode = None
signal_ = -status
return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
def SetupDaemonEnv(cwd="/", umask=077):
"""Setup a daemon's environment.
This should be called between the first and second fork, due to
setsid usage.
@param cwd: the directory to which to chdir
@param umask: the umask to setup
"""
os.chdir(cwd)
os.umask(umask)
os.setsid()
def SetupDaemonFDs(output_file, output_fd):
"""Setups up a daemon's file descriptors.
@param output_file: if not None, the file to which to redirect
stdout/stderr
@param output_fd: if not None, the file descriptor for stdout/stderr
"""
# check that at most one is defined
assert [output_file, output_fd].count(None) >= 1
# Open /dev/null (read-only, only for stdin)
devnull_fd = os.open(os.devnull, os.O_RDONLY)
output_close = True
if output_fd is not None:
output_close = False
elif output_file is not None:
# Open output file
try:
output_fd = os.open(output_file,
os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
except EnvironmentError, err:
raise Exception("Opening output file failed: %s" % err)
else:
output_fd = os.open(os.devnull, os.O_WRONLY)
# Redirect standard I/O
os.dup2(devnull_fd, 0)
os.dup2(output_fd, 1)
os.dup2(output_fd, 2)
if devnull_fd > 2:
utils_wrapper.CloseFdNoError(devnull_fd)
if output_close and output_fd > 2:
utils_wrapper.CloseFdNoError(output_fd)
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
pidfile=None):
"""Start a daemon process after forking twice.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: Additional environment variables
@type cwd: string
@param cwd: Working directory for the program
@type output: string
@param output: Path to file in which to save the output
@type output_fd: int
@param output_fd: File descriptor for output
@type pidfile: string
@param pidfile: Process ID file
@rtype: int
@return: Daemon process ID
@raise errors.ProgrammerError: if we call this when forks are disabled
"""
if _no_fork:
raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
" disabled")
if output and not (bool(output) ^ (output_fd is not None)):
raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
" specified")
if isinstance(cmd, basestring):
cmd = ["/bin/sh", "-c", cmd]
strcmd = utils_text.ShellQuoteArgs(cmd)
if output:
logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
else:
logging.debug("StartDaemon %s", strcmd)
cmd_env = _BuildCmdEnvironment(env, False)
# Create pipe for sending PID back
(pidpipe_read, pidpipe_write) = os.pipe()
try:
try:
# Create pipe for sending error messages
(errpipe_read, errpipe_write) = os.pipe()
try:
try:
# First fork
pid = os.fork()
if pid == 0:
try:
# Child process, won't return
_StartDaemonChild(errpipe_read, errpipe_write,
pidpipe_read, pidpipe_write,
cmd, cmd_env, cwd,
output, output_fd, pidfile)
finally:
# Well, maybe child process failed
os._exit(1) # pylint: disable=W0212
finally:
utils_wrapper.CloseFdNoError(errpipe_write)
# Wait for daemon to be started (or an error message to
# arrive) and read up to 100 KB as an error message
errormsg = utils_wrapper.RetryOnSignal(os.read, errpipe_read,
100 * 1024)
finally:
utils_wrapper.CloseFdNoError(errpipe_read)
finally:
utils_wrapper.CloseFdNoError(pidpipe_write)
# Read up to 128 bytes for PID
pidtext = utils_wrapper.RetryOnSignal(os.read, pidpipe_read, 128)
finally:
utils_wrapper.CloseFdNoError(pidpipe_read)
# Try to avoid zombies by waiting for child process
try:
os.waitpid(pid, 0)
except OSError:
pass
if errormsg:
raise errors.OpExecError("Error when starting daemon process: %r" %
errormsg)
try:
return int(pidtext)
except (ValueError, TypeError), err:
raise errors.OpExecError("Error while trying to parse PID %r: %s" %
(pidtext, err))
def _StartDaemonChild(errpipe_read, errpipe_write,
pidpipe_read, pidpipe_write,
args, env, cwd,
output, fd_output, pidfile):
"""Child process for starting daemon.
"""
try:
# Close parent's side
utils_wrapper.CloseFdNoError(errpipe_read)
utils_wrapper.CloseFdNoError(pidpipe_read)
# First child process
SetupDaemonEnv()
# And fork for the second time
pid = os.fork()
if pid != 0:
# Exit first child process
os._exit(0) # pylint: disable=W0212
# Make sure pipe is closed on execv* (and thereby notifies
# original process)
utils_wrapper.SetCloseOnExecFlag(errpipe_write, True)
# List of file descriptors to be left open
noclose_fds = [errpipe_write]
# Open PID file
if pidfile:
fd_pidfile = utils_io.WritePidFile(pidfile)
# Keeping the file open to hold the lock
noclose_fds.append(fd_pidfile)
utils_wrapper.SetCloseOnExecFlag(fd_pidfile, False)
else:
fd_pidfile = None
SetupDaemonFDs(output, fd_output)
# Send daemon PID to parent
utils_wrapper.RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
# Close all file descriptors except stdio and error message pipe
CloseFDs(noclose_fds=noclose_fds)
# Change working directory
os.chdir(cwd)
if env is None:
os.execvp(args[0], args)
else:
os.execvpe(args[0], args, env)
except: # pylint: disable=W0702
try:
# Report errors to original process
WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
except: # pylint: disable=W0702
# Ignore errors in error handling
pass
os._exit(1) # pylint: disable=W0212
def WriteErrorToFD(fd, err):
"""Possibly write an error message to a fd.
@type fd: None or int (file descriptor)
@param fd: if not None, the error will be written to this fd
@param err: string, the error message
"""
if fd is None:
return
if not err:
err = "<unknown error>"
utils_wrapper.RetryOnSignal(os.write, fd, err)
def _CheckIfAlive(child):
"""Raises L{utils_retry.RetryAgain} if child is still alive.
@raises utils_retry.RetryAgain: If child is still alive
"""
if child.poll() is None:
raise utils_retry.RetryAgain()
def _WaitForProcess(child, timeout):
"""Waits for the child to terminate or until we reach timeout.
"""
try:
utils_retry.Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout),
args=[child])
except utils_retry.RetryTimeout:
pass
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, noclose_fds,
input_fd, postfork_fn=None,
_linger_timeout=constants.CHILD_LINGER_TIMEOUT):
"""Run a command and return its output.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: The environment to use
@type via_shell: bool
@param via_shell: if we should run via the shell
@type cwd: string
@param cwd: the working directory for the program
@type interactive: boolean
@param interactive: Run command interactive (without piping)
@type timeout: int
@param timeout: Timeout after the programm gets terminated
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@type input_fd: C{file}-like object or numeric file descriptor
@param input_fd: File descriptor for process' standard input
@type postfork_fn: Callable receiving PID as parameter
@param postfork_fn: Function run after fork but before timeout
@rtype: tuple
@return: (out, err, status)
"""
poller = select.poll()
if interactive:
stderr = None
stdout = None
else:
stderr = subprocess.PIPE
stdout = subprocess.PIPE
if input_fd:
stdin = input_fd
elif interactive:
stdin = None
else:
stdin = subprocess.PIPE
if noclose_fds:
preexec_fn = lambda: CloseFDs(noclose_fds)
close_fds = False
else:
preexec_fn = None
close_fds = True
child = subprocess.Popen(cmd, shell=via_shell,
stderr=stderr,
stdout=stdout,
stdin=stdin,
close_fds=close_fds, env=env,
cwd=cwd,
preexec_fn=preexec_fn)
if postfork_fn:
postfork_fn(child.pid)
out = StringIO()
err = StringIO()
linger_timeout = None
if timeout is None:
poll_timeout = None
else:
poll_timeout = utils_algo.RunningTimeout(timeout, True).Remaining
msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
(cmd, child.pid))
msg_linger = ("Command %s (%d) run into linger timeout, killing" %
(cmd, child.pid))
timeout_action = _TIMEOUT_NONE
# subprocess: "If the stdin argument is PIPE, this attribute is a file object
# that provides input to the child process. Otherwise, it is None."
assert (stdin == subprocess.PIPE) ^ (child.stdin is None), \
"subprocess' stdin did not behave as documented"
if not interactive:
if child.stdin is not None:
child.stdin.close()
poller.register(child.stdout, select.POLLIN)
poller.register(child.stderr, select.POLLIN)
fdmap = {
child.stdout.fileno(): (out, child.stdout),
child.stderr.fileno(): (err, child.stderr),
}
for fd in fdmap:
utils_wrapper.SetNonblockFlag(fd, True)
while fdmap:
if poll_timeout:
pt = poll_timeout() * 1000
if pt < 0:
if linger_timeout is None:
logging.warning(msg_timeout)
if child.poll() is None:
timeout_action = _TIMEOUT_TERM
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid,
signal.SIGTERM)
linger_timeout = \
utils_algo.RunningTimeout(_linger_timeout, True).Remaining
pt = linger_timeout() * 1000
if pt < 0:
break
else:
pt = None
pollresult = utils_wrapper.RetryOnSignal(poller.poll, pt)
for fd, event in pollresult:
if event & select.POLLIN or event & select.POLLPRI:
data = fdmap[fd][1].read()
# no data from read signifies EOF (the same as POLLHUP)
if not data:
poller.unregister(fd)
del fdmap[fd]
continue
fdmap[fd][0].write(data)
if (event & select.POLLNVAL or event & select.POLLHUP or
event & select.POLLERR):
poller.unregister(fd)
del fdmap[fd]
if timeout is not None:
assert callable(poll_timeout)
# We have no I/O left but it might still run
if child.poll() is None:
_WaitForProcess(child, poll_timeout())
# Terminate if still alive after timeout
if child.poll() is None:
if linger_timeout is None:
logging.warning(msg_timeout)
timeout_action = _TIMEOUT_TERM
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
lt = _linger_timeout
else:
lt = linger_timeout()
_WaitForProcess(child, lt)
# Okay, still alive after timeout and linger timeout? Kill it!
if child.poll() is None:
timeout_action = _TIMEOUT_KILL
logging.warning(msg_linger)
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
out = out.getvalue()
err = err.getvalue()
status = child.wait()
return out, err, status, timeout_action
def _RunCmdFile(cmd, env, via_shell, output, cwd, noclose_fds):
"""Run a command and save its output to a file.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: The environment to use
@type via_shell: bool
@param via_shell: if we should run via the shell
@type output: str
@param output: the filename in which to save the output
@type cwd: string
@param cwd: the working directory for the program
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@rtype: int
@return: the exit status
"""
fh = open(output, "a")
if noclose_fds:
preexec_fn = lambda: CloseFDs(noclose_fds + [fh.fileno()])
close_fds = False
else:
preexec_fn = None
close_fds = True
try:
child = subprocess.Popen(cmd, shell=via_shell,
stderr=subprocess.STDOUT,
stdout=fh,
stdin=subprocess.PIPE,
close_fds=close_fds, env=env,
cwd=cwd,
preexec_fn=preexec_fn)
child.stdin.close()
status = child.wait()
finally:
fh.close()
return status
def RunParts(dir_name, env=None, reset_env=False):
"""Run Scripts or programs in a directory
@type dir_name: string
@param dir_name: absolute path to a directory
@type env: dict
@param env: The environment to use
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
@rtype: list of tuples
@return: list of (name, (one of RUNDIR_STATUS), RunResult)
"""
rr = []
try:
dir_contents = utils_io.ListVisibleFiles(dir_name)
except OSError, err:
logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
return rr
for relname in sorted(dir_contents):
fname = utils_io.PathJoin(dir_name, relname)
if not (constants.EXT_PLUGIN_MASK.match(relname) is not None and
utils_wrapper.IsExecutable(fname)):
rr.append((relname, constants.RUNPARTS_SKIP, None))
else:
try:
result = RunCmd([fname], env=env, reset_env=reset_env)
except Exception, err: # pylint: disable=W0703
rr.append((relname, constants.RUNPARTS_ERR, str(err)))
else:
rr.append((relname, constants.RUNPARTS_RUN, result))
return rr
def _GetProcStatusPath(pid):
"""Returns the path for a PID's proc status file.
@type pid: int
@param pid: Process ID
@rtype: string
"""
return "/proc/%d/status" % pid
def IsProcessAlive(pid):
"""Check if a given pid exists on the system.
@note: zombie status is not handled, so zombie processes
will be returned as alive
@type pid: int
@param pid: the process ID to check
@rtype: boolean
@return: True if the process exists
"""
def _TryStat(name):
try:
os.stat(name)
return True
except EnvironmentError, err:
if err.errno in (errno.ENOENT, errno.ENOTDIR):
return False
elif err.errno == errno.EINVAL:
raise utils_retry.RetryAgain(err)
raise
assert isinstance(pid, int), "pid must be an integer"
if pid <= 0:
return False
# /proc in a multiprocessor environment can have strange behaviors.
# Retry the os.stat a few times until we get a good result.
try:
return utils_retry.Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
args=[_GetProcStatusPath(pid)])
except utils_retry.RetryTimeout, err:
err.RaiseInner()
def _ParseSigsetT(sigset):
"""Parse a rendered sigset_t value.
This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
function.
@type sigset: string
@param sigset: Rendered signal set from /proc/$pid/status
@rtype: set
@return: Set of all enabled signal numbers
"""
result = set()
signum = 0
for ch in reversed(sigset):
chv = int(ch, 16)
# The following could be done in a loop, but it's easier to read and
# understand in the unrolled form
if chv & 1:
result.add(signum + 1)
if chv & 2:
result.add(signum + 2)
if chv & 4:
result.add(signum + 3)
if chv & 8:
result.add(signum + 4)
signum += 4
return result
def _GetProcStatusField(pstatus, field):
"""Retrieves a field from the contents of a proc status file.
@type pstatus: string
@param pstatus: Contents of /proc/$pid/status
@type field: string
@param field: Name of field whose value should be returned
@rtype: string
"""
for line in pstatus.splitlines():
parts = line.split(":", 1)
if len(parts) < 2 or parts[0] != field:
continue
return parts[1].strip()
return None
def IsProcessHandlingSignal(pid, signum, status_path=None):
"""Checks whether a process is handling a signal.
@type pid: int
@param pid: Process ID
@type signum: int
@param signum: Signal number
@rtype: bool
"""
if status_path is None:
status_path = _GetProcStatusPath(pid)
try:
proc_status = utils_io.ReadFile(status_path)
except EnvironmentError, err:
# In at least one case, reading /proc/$pid/status failed with ESRCH.
if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
return False
raise
sigcgt = _GetProcStatusField(proc_status, "SigCgt")
if sigcgt is None:
raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
# Now check whether signal is handled
return signum in _ParseSigsetT(sigcgt)
def Daemonize(logfile):
"""Daemonize the current process.
This detaches the current process from the controlling terminal and
runs it in the background as a daemon.
@type logfile: str
@param logfile: the logfile to which we should redirect stdout/stderr
@rtype: tuple; (int, callable)
@return: File descriptor of pipe(2) which must be closed to notify parent
process and a callable to reopen log files
"""
# pylint: disable=W0212
# yes, we really want os._exit
# TODO: do another attempt to merge Daemonize and StartDaemon, or at
# least abstract the pipe functionality between them
# Create pipe for sending error messages
(rpipe, wpipe) = os.pipe()
# this might fail
pid = os.fork()
if (pid == 0): # The first child.
SetupDaemonEnv()
# this might fail
pid = os.fork() # Fork a second child.
if (pid == 0): # The second child.
utils_wrapper.CloseFdNoError(rpipe)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
utils_wrapper.CloseFdNoError(wpipe)
# Wait for daemon to be started (or an error message to
# arrive) and read up to 100 KB as an error message
errormsg = utils_wrapper.RetryOnSignal(os.read, rpipe, 100 * 1024)
if errormsg:
sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
rcode = 1
else:
rcode = 0
os._exit(rcode) # Exit parent of the first child.
reopen_fn = compat.partial(SetupDaemonFDs, logfile, None)
# Open logs for the first time
reopen_fn()
return (wpipe, reopen_fn)
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
waitpid=False):
"""Kill a process given by its pid.
@type pid: int
@param pid: The PID to terminate.
@type signal_: int
@param signal_: The signal to send, by default SIGTERM
@type timeout: int
@param timeout: The timeout after which, if the process is still alive,
a SIGKILL will be sent. If not positive, no such checking
will be done
@type waitpid: boolean
@param waitpid: If true, we should waitpid on this process after
sending signals, since it's our own child and otherwise it
would remain as zombie
"""
def _helper(pid, signal_, wait):
"""Simple helper to encapsulate the kill/waitpid sequence"""
if utils_wrapper.IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
try:
os.waitpid(pid, os.WNOHANG)
except OSError:
pass
if pid <= 0:
# kill with pid=0 == suicide
raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
if not IsProcessAlive(pid):
return
_helper(pid, signal_, waitpid)
if timeout <= 0:
return
def _CheckProcess():
if not IsProcessAlive(pid):
return
try:
(result_pid, _) = os.waitpid(pid, os.WNOHANG)
except OSError:
raise utils_retry.RetryAgain()
if result_pid > 0:
return
raise utils_retry.RetryAgain()
try:
# Wait up to $timeout seconds
utils_retry.Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
except utils_retry.RetryTimeout:
pass
if IsProcessAlive(pid):
# Kill process if it's still alive
_helper(pid, signal.SIGKILL, waitpid)
def RunInSeparateProcess(fn, *args):
"""Runs a function in a separate process.
Note: Only boolean return values are supported.
@type fn: callable
@param fn: Function to be called
@rtype: bool
@return: Function's result
"""
pid = os.fork()
if pid == 0:
# Child process
try:
# In case the function uses temporary files
utils_wrapper.ResetTempfileModule()
# Call function
result = int(bool(fn(*args)))
assert result in (0, 1)
except: # pylint: disable=W0702
logging.exception("Error while calling function in separate process")
# 0 and 1 are reserved for the return value
result = 33
os._exit(result) # pylint: disable=W0212
# Parent process
# Avoid zombies and check exit code
(_, status) = os.waitpid(pid, 0)
if os.WIFSIGNALED(status):
exitcode = None
signum = os.WTERMSIG(status)
else:
exitcode = os.WEXITSTATUS(status)
signum = None
if not (exitcode in (0, 1) and signum is None):
raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
(exitcode, signum))
return bool(exitcode)
def CloseFDs(noclose_fds=None):
"""Close file descriptors.
This closes all file descriptors above 2 (i.e. except
stdin/out/err).
@type noclose_fds: list or None
@param noclose_fds: if given, it denotes a list of file descriptor
that should not be closed
"""
# Default maximum for the number of available file descriptors.
if 'SC_OPEN_MAX' in os.sysconf_names:
try:
MAXFD = os.sysconf('SC_OPEN_MAX')
if MAXFD < 0:
MAXFD = 1024
except OSError:
MAXFD = 1024
else:
MAXFD = 1024
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors (except the standard ones)
for fd in range(3, maxfd):
if noclose_fds and fd in noclose_fds:
continue
utils_wrapper.CloseFdNoError(fd)
| {
"content_hash": "cf5eafe3190bc6aec9d9d479888ca79b",
"timestamp": "",
"source": "github",
"line_count": 1043,
"max_line_length": 80,
"avg_line_length": 28.53307766059444,
"alnum_prop": 0.6380040322580646,
"repo_name": "apyrgio/snf-ganeti",
"id": "dccee318fb56c9e4c962b1e00d4b04c9888223ae",
"size": "31125",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable-2.10-bpo2",
"path": "lib/utils/process.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "1557814"
},
{
"name": "Python",
"bytes": "5311638"
},
{
"name": "Shell",
"bytes": "96816"
}
],
"symlink_target": ""
} |
from djangae.test import TestCase
from ..fields import Field
class fieldTestCase(TestCase):
def test_clean_token_trailing_plus(self):
field = Field()
token = "a+"
cleaned_token = field.clean_token(token)
self.assertEqual(token, cleaned_token)
def test_clean_token_plus_in_word(self):
field = Field()
token = "a+a"
cleaned_token = field.clean_token(token)
self.assertEqual('aa', cleaned_token)
def test_clean_token_only_plusses(self):
field = Field()
token = "++"
cleaned_token = field.clean_token(token)
self.assertEqual(token, cleaned_token)
def test_clean_token_only_spaces_strip(self):
field = Field()
token = " "
cleaned_token = field.clean_token(token)
self.assertEqual('', cleaned_token)
def test_clean_token_strip_spaces(self):
field = Field()
token = " token "
cleaned_token = field.clean_token(token)
self.assertEqual('token', cleaned_token)
| {
"content_hash": "0ff408436ca3a2ef5becfb393c96fc04",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 49,
"avg_line_length": 30.764705882352942,
"alnum_prop": 0.607074569789675,
"repo_name": "potatolondon/djangae",
"id": "a6dabd6594bcebd4edc1375a240414b0dba1ac2d",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangae/contrib/search/tests/test_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1620"
},
{
"name": "Python",
"bytes": "414429"
}
],
"symlink_target": ""
} |
import unittest
from airflow.utils.trigger_rule import TriggerRule
class TestTriggerRule(unittest.TestCase):
def test_valid_trigger_rules(self):
self.assertTrue(TriggerRule.is_valid(TriggerRule.ALL_SUCCESS))
self.assertTrue(TriggerRule.is_valid(TriggerRule.ALL_FAILED))
self.assertTrue(TriggerRule.is_valid(TriggerRule.ALL_DONE))
self.assertTrue(TriggerRule.is_valid(TriggerRule.ONE_SUCCESS))
self.assertTrue(TriggerRule.is_valid(TriggerRule.ONE_FAILED))
self.assertTrue(TriggerRule.is_valid(TriggerRule.NONE_FAILED))
self.assertTrue(TriggerRule.is_valid(TriggerRule.NONE_FAILED_OR_SKIPPED))
self.assertTrue(TriggerRule.is_valid(TriggerRule.NONE_SKIPPED))
self.assertTrue(TriggerRule.is_valid(TriggerRule.DUMMY))
self.assertEqual(len(TriggerRule.all_triggers()), 9)
| {
"content_hash": "4bdf3cad4bcd91992e847e9fbde46e33",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 81,
"avg_line_length": 50.1764705882353,
"alnum_prop": 0.7502930832356389,
"repo_name": "owlabs/incubator-airflow",
"id": "844f98c6852a542d634bf2d2ba7f0f28c2c13c76",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils/test_trigger_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
} |
"""A non-blocking, single-threaded HTTP server.
Typical applications have little direct interaction with the `HTTPServer`
class except to start a server at the beginning of the process
(and even that is often done indirectly via `tornado.web.Application.listen`).
This module also defines the `HTTPRequest` class which is exposed via
`tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, with_statement
import Cookie
import logging
import socket
import time
import urlparse
from tornado.escape import utf8, native_str, parse_qs_bytes
from tornado import httputil
from tornado import iostream
from tornado.netutil import TCPServer
from tornado import stack_context
from tornado.util import b, bytes_type
from zygote.util import sanitize_headers
try:
import ssl # Python 2.6+
except ImportError:
ssl = None
class HTTPServer(TCPServer):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a request callback that takes an HTTPRequest
instance as an argument and writes a valid HTTP response with
`HTTPRequest.write`. `HTTPRequest.finish` finishes the request (but does
not necessarily close the connection in the case of HTTP/1.1 keep-alive
requests). A simple example server that echoes back the URI you
requested::
import httpserver
import ioloop
def handle_request(request):
message = "You requested %s\n" % request.uri
request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % (
len(message), message))
request.finish()
http_server = httpserver.HTTPServer(handle_request)
http_server.listen(8888)
ioloop.IOLoop.instance().start()
`HTTPServer` is a very basic connection handler. Beyond parsing the
HTTP request body and headers, the only HTTP semantics implemented
in `HTTPServer` is HTTP/1.1 keep-alive connections. We do not, however,
implement chunked encoding, so the request callback must provide a
``Content-Length`` header or implement chunked encoding for HTTP/1.1
requests for the server to run correctly for HTTP/1.1 clients. If
the request handler is unable to do this, you can provide the
``no_keep_alive`` argument to the `HTTPServer` constructor, which will
ensure the connection is closed on every request no matter what HTTP
version the client is using.
If ``xheaders`` is ``True``, we support the ``X-Real-Ip`` and ``X-Scheme``
headers, which override the remote IP and HTTP scheme for all requests.
These headers are useful when running Tornado behind a reverse proxy or
load balancer.
`HTTPServer` can serve SSL traffic with Python 2.6+ and OpenSSL.
To make this server serve SSL traffic, send the ssl_options dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including "certfile" and "keyfile"::
HTTPServer(applicaton, ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.netutil.TCPServer`):
1. `~tornado.netutil.TCPServer.listen`: simple single-process::
server = HTTPServer(app)
server.listen(8888)
IOLoop.instance().start()
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
2. `~tornado.netutil.TCPServer.bind`/`~tornado.netutil.TCPServer.start`:
simple multi-process::
server = HTTPServer(app)
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `IOLoop` must *not* be passed
to the `HTTPServer` constructor. `start` will always start
the server on the default singleton `IOLoop`.
3. `~tornado.netutil.TCPServer.add_sockets`: advanced multi-process::
sockets = tornado.netutil.bind_sockets(8888)
tornado.process.fork_processes(0)
server = HTTPServer(app)
server.add_sockets(sockets)
IOLoop.instance().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`tornado.netutil.bind_sockets`.
"""
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None, headers_callback = None,
close_callback = None, **kwargs):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self._headers_callback = headers_callback
self._close_callback = close_callback
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
**kwargs)
def handle_stream(self, stream, address):
HTTPConnection(stream, address, self.request_callback,
self.no_keep_alive, self.xheaders,
self._headers_callback, self._close_callback)
class _BadRequestException(Exception):
"""Exception class for malformed HTTP requests."""
pass
class HTTPConnection(object):
"""Handles a connection to an HTTP client, executing HTTP requests.
We parse HTTP headers and bodies, and execute the request callback
until the HTTP conection is closed.
"""
def __init__(self, stream, address, request_callback, no_keep_alive=False,
xheaders=False, headers_callback=None, close_callback=None):
self.stream = stream
if self.stream.socket.family not in (socket.AF_INET, socket.AF_INET6):
# Unix (or other) socket; fake the remote address
address = ('0.0.0.0', 0)
self.address = address
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self._request = None
self._request_finished = False
# Save stack context here, outside of any request. This keeps
# contexts from one request from leaking into the next.
self._header_callback = stack_context.wrap(self._on_headers)
if headers_callback:
self.on_headers = stack_context.wrap(headers_callback)
else:
self.on_headers = lambda *args: None
if close_callback:
self.on_finish = stack_context.wrap(close_callback)
else:
self.on_finish = lambda *args: None
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
self._write_callback = None
def write(self, chunk, callback=None):
"""Writes a chunk of output to the stream."""
assert self._request, "Request closed"
if not self.stream.closed():
self._write_callback = stack_context.wrap(callback)
self.stream.write(chunk, self._on_write_complete)
def finish(self):
"""Finishes the request."""
assert self._request, "Request closed"
self._request_finished = True
if not self.stream.writing():
self._finish_request()
def _on_write_complete(self):
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
callback()
# _on_write_complete is enqueued on the IOLoop whenever the
# IOStream's write buffer becomes empty, but it's possible for
# another callback that runs on the IOLoop before it to
# simultaneously write more data and finish the request. If
# there is still data in the IOStream, a future
# _on_write_complete will be responsible for calling
# _finish_request.
if self._request_finished and not self.stream.writing():
self._finish_request()
def _finish_request(self):
if self.no_keep_alive:
disconnect = True
else:
connection_header = self._request.headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if self._request.supports_http_1_1():
disconnect = connection_header == "close"
elif ("Content-Length" in self._request.headers
or self._request.method in ("HEAD", "GET")):
disconnect = connection_header != "keep-alive"
else:
disconnect = True
self._request = None
self._request_finished = False
self.on_finish(disconnect)
if disconnect:
self.stream.close()
return
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
def _on_headers(self, data):
try:
data = native_str(data.decode('latin1'))
eol = data.find("\r\n")
start_line = data[:eol]
try:
method, uri, version = start_line.split(" ")
except ValueError:
raise _BadRequestException("Malformed HTTP request line")
if not version.startswith("HTTP/"):
raise _BadRequestException("Malformed HTTP version in HTTP Request-Line")
headers = httputil.HTTPHeaders.parse(data[eol:])
self._request = HTTPRequest(
connection=self, method=method, uri=uri, version=version,
headers=headers, remote_ip=self.address[0])
content_length = headers.get("Content-Length")
if content_length:
content_length = int(content_length)
if content_length > self.stream.max_buffer_size:
raise _BadRequestException("Content-Length too long")
if headers.get("Expect") == "100-continue":
self.stream.write(b("HTTP/1.1 100 (Continue)\r\n\r\n"))
self.stream.read_bytes(content_length, self._on_request_body)
return
self.on_headers(start_line, self.address[0], headers)
self.request_callback(self._request)
except _BadRequestException, e:
logging.info("Malformed HTTP request from %s: %s",
self.address[0], e)
self.stream.close()
return
def _on_request_body(self, data):
self._request.body = data
content_type = self._request.headers.get("Content-Type", "")
if self._request.method in ("POST", "PUT"):
if content_type.startswith("application/x-www-form-urlencoded"):
arguments = parse_qs_bytes(native_str(self._request.body))
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self._request.arguments.setdefault(name, []).extend(
values)
elif content_type.startswith("multipart/form-data"):
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
httputil.parse_multipart_form_data(
utf8(v), data,
self._request.arguments,
self._request.files)
break
else:
logging.warning("Invalid multipart/form-data")
self.request_callback(self._request)
class HTTPRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`HTTPHeader` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If `HTTPServer.xheaders` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` header
.. attribute:: protocol
The protocol used, either "http" or "https". If `HTTPServer.xheaders`
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of :class:`HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
"""
def __init__(self, method, uri, version="HTTP/1.0", headers=None,
body=None, remote_ip=None, protocol=None, host=None,
files=None, connection=None):
self.method = method
self.uri = uri
self.version = version
self.headers = headers or httputil.HTTPHeaders()
self.body = body or ""
if connection and connection.xheaders:
# Squid uses X-Forwarded-For, others use X-Real-Ip
self.remote_ip = self.headers.get(
"X-Real-Ip", self.headers.get("X-Forwarded-For", remote_ip))
if not self._valid_ip(self.remote_ip):
self.remote_ip = remote_ip
# AWS uses X-Forwarded-Proto
self.protocol = self.headers.get(
"X-Scheme", self.headers.get("X-Forwarded-Proto", protocol))
if self.protocol not in ("http", "https"):
self.protocol = "http"
else:
self.remote_ip = remote_ip
if protocol:
self.protocol = protocol
elif connection and isinstance(connection.stream,
iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
scheme, netloc, path, query, fragment = urlparse.urlsplit(native_str(uri))
self.path = path
self.query = query
arguments = parse_qs_bytes(query)
self.arguments = {}
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self.arguments[name] = values
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = {}
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream."""
assert isinstance(chunk, bytes_type)
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection."""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer must have been constructed
with cert_reqs set in ssl_options, e.g.::
server = HTTPServer(app,
ssl_options=dict(
certfile="foo.crt",
keyfile="foo.key",
cert_reqs=ssl.CERT_REQUIRED,
ca_certs="cacert.crt"))
The return value is a dictionary, see SSLSocket.getpeercert() in
the standard library for more details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert()
except ssl.SSLError:
return None
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, sanitize_headers(self.headers))
def _valid_ip(self, ip):
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror, e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
| {
"content_hash": "8f8d8943d73b7295846e65780aa8e0a1",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 89,
"avg_line_length": 38.99584199584199,
"alnum_prop": 0.6047342325531802,
"repo_name": "Yelp/zygote",
"id": "ed6381d8888c14e8113d2b269aa6862bb64c7fb4",
"size": "19939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zygote/_httpserver_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "715"
},
{
"name": "HTML",
"bytes": "2322"
},
{
"name": "JavaScript",
"bytes": "10620"
},
{
"name": "Makefile",
"bytes": "266"
},
{
"name": "Python",
"bytes": "118121"
}
],
"symlink_target": ""
} |
import itertools
import uuid
import numpy as np
from google.protobuf.json_format import MessageToDict
from sklearn import metrics
from sklearn.linear_model import LogisticRegression, LinearRegression
from federatedml.model_base import Metric, MetricMeta
from federatedml.evaluation.metrics.regression_metric import IC, IC_Approx
from federatedml.model_selection.stepwise.step import Step
from federatedml.statistic import data_overview
from federatedml.transfer_variable.transfer_class.stepwise_transfer_variable import StepwiseTransferVariable
from federatedml.util import consts
from federatedml.util import LOGGER
class ModelInfo(object):
def __init__(self, n_step, n_model, score, loss, direction):
self.score = score
self.n_step = n_step
self.n_model = n_model
self.direction = direction
self.loss = loss
self.uid = str(uuid.uuid1())
def get_score(self):
return self.score
def get_loss(self):
return self.loss
def get_key(self):
return self.uid
class HeteroStepwise(object):
def __init__(self):
self.mode = None
self.role = None
self.forward = False
self.backward = False
self.n_step = 0
self.has_test = False
self.n_count = 0
self.stop_stepwise = False
self.models = None
self.metric_namespace = "train"
self.metric_type = "STEPWISE"
self.intercept = None
self.models = {}
self.models_trained = {}
self.IC_computer = None
self.step_direction = None
self.anonymous_header_guest = None
self.anonymous_header_host = None
def _init_model(self, param):
self.model_param = param
self.mode = param.mode
self.role = param.role
self.score_name = param.score_name
self.direction = param.direction
self.max_step = param.max_step
self.nvmin = param.nvmin
self.nvmax = param.nvmax
self.transfer_variable = StepwiseTransferVariable()
self._get_direction()
def _get_direction(self):
if self.direction == "forward":
self.forward = True
elif self.direction == "backward":
self.backward = True
elif self.direction == "both":
self.forward = True
self.backward = True
else:
raise ValueError("Wrong stepwise direction given.")
def _put_model(self, key, model):
"""
wrapper to put key, model dict pair into models dict
"""
model_dict = {'model': {'stepwise': model.export_model()}}
self.models[key] = model_dict
def _get_model(self, key):
"""
wrapper to get value of a given model key from models dict
"""
value = self.models.get(key)
return value
def _set_k(self):
"""
Helper function, get the penalty coefficient for AIC/BIC calculation.
"""
if self.score_name == "aic":
self.k = 2
elif self.score_name == "bic":
self.k = np.log(self.n_count)
else:
raise ValueError("Wrong score name given: {}. Only 'aic' or 'bic' acceptable.".format(self.score_name))
@staticmethod
def get_dfe(model, str_mask):
dfe = sum(HeteroStepwise.string2mask(str_mask))
if model.fit_intercept:
dfe += 1
LOGGER.debug("fit_intercept detected, 1 is added to dfe")
return dfe
def get_step_best(self, step_models):
best_score = None
best_model = ""
for model in step_models:
model_info = self.models_trained[model]
score = model_info.get_score()
if score is None:
continue
if best_score is None or score < best_score:
best_score = score
best_model = model
LOGGER.info(f"step {self.n_step}, best model {best_model}")
return best_model
@staticmethod
def drop_one(mask_to_drop):
for i in np.nonzero(mask_to_drop)[0]:
new_mask = np.copy(mask_to_drop)
new_mask[i] = 0
if sum(new_mask) > 0:
yield new_mask
@staticmethod
def add_one(mask_to_add):
for i in np.where(mask_to_add < 1)[0]:
new_mask = np.copy(mask_to_add)
new_mask[i] = 1
yield new_mask
def check_stop(self, new_host_mask, new_guest_mask, host_mask, guest_mask):
# initial step
if self.n_step == 0:
return False
# if model not updated
if np.array_equal(new_host_mask, host_mask) and np.array_equal(new_guest_mask, guest_mask):
LOGGER.debug("masks not changed, check_stop returns True")
return True
# if full model is the best
if sum(new_host_mask < 1) == 0 and sum(new_guest_mask < 1) == 0 and self.n_step > 0:
LOGGER.debug("masks are full model, check_stop returns True")
return True
# if new best reach variable count lower limit
new_total_nv = sum(new_host_mask) + sum(new_guest_mask)
total_nv = sum(host_mask) + sum(guest_mask)
if new_total_nv == self.nvmin and total_nv >= self.nvmin:
LOGGER.debug("variable count min reached, check_stop returns True")
return True
# if new best reach variable count upper limit
if self.nvmax is not None:
if new_total_nv == self.nvmax and total_nv <= self.nvmax:
LOGGER.debug("variable count max reached, check_stop returns True")
return True
# if reach max step
if self.n_step >= self.max_step:
LOGGER.debug("max step reached, check_stop returns True")
return True
return False
def get_intercept_loss(self, model, data):
y = np.array([x[1] for x in data.mapValues(lambda v: v.label).collect()])
X = np.ones((len(y), 1))
if model.model_name == 'HeteroLinearRegression' or model.model_name == 'HeteroPoissonRegression':
intercept_model = LinearRegression(fit_intercept=False)
trained_model = intercept_model.fit(X, y)
pred = trained_model.predict(X)
loss = metrics.mean_squared_error(y, pred) / 2
elif model.model_name == 'HeteroLogisticRegression':
intercept_model = LogisticRegression(penalty='l1', C=1e8, fit_intercept=False, solver='liblinear')
trained_model = intercept_model.fit(X, y)
pred = trained_model.predict(X)
loss = metrics.log_loss(y, pred)
else:
raise ValueError("Unknown model received. Stepwise stopped.")
self.intercept = intercept_model.intercept_
return loss
def get_ic_val(self, model, model_mask):
if self.role != consts.ARBITER:
return None, None
if len(model.loss_history) == 0:
raise ValueError("Arbiter has no loss history. Stepwise does not support model without total loss.")
# get final loss from loss history for criteria calculation
loss = model.loss_history[-1]
dfe = HeteroStepwise.get_dfe(model, model_mask)
ic_val = self.IC_computer.compute(self.k, self.n_count, dfe, loss)
if np.isinf(ic_val):
raise ValueError("Loss value of infinity obtained. Stepwise stopped.")
return loss, ic_val
def get_ic_val_guest(self, model, train_data):
if not model.fit_intercept:
return None, None
loss = self.get_intercept_loss(model, train_data)
# intercept only model has dfe = 1
dfe = 1
ic_val = self.IC_computer.compute(self.k, self.n_count, dfe, loss)
return loss, ic_val
def _run_step(self, model, train_data, validate_data, feature_mask, n_model, model_mask):
if self.direction == 'forward' and self.n_step == 0:
if self.role == consts.GUEST:
loss, ic_val = self.get_ic_val_guest(model, train_data)
LOGGER.info("step {} n_model {}".format(self.n_step, n_model))
model_info = ModelInfo(self.n_step, n_model, ic_val, loss, self.step_direction)
self.models_trained[model_mask] = model_info
model_key = model_info.get_key()
self._put_model(model_key, model)
else:
model_info = ModelInfo(self.n_step, n_model, None, None, self.step_direction)
self.models_trained[model_mask] = model_info
model_key = model_info.get_key()
self._put_model(model_key, model)
return
curr_step = Step()
curr_step.set_step_info((self.n_step, n_model))
trained_model = curr_step.run(model, train_data, validate_data, feature_mask)
loss, ic_val = self.get_ic_val(trained_model, model_mask)
LOGGER.info("step {} n_model {}: ic_val {}".format(self.n_step, n_model, ic_val))
model_info = ModelInfo(self.n_step, n_model, ic_val, loss, self.step_direction)
self.models_trained[model_mask] = model_info
model_key = model_info.get_key()
self._put_model(model_key, trained_model)
def sync_data_info(self, data):
if self.role == consts.ARBITER:
return self.arbiter_sync_data_info()
else:
return self.client_sync_data_info(data)
def arbiter_sync_data_info(self):
n_host, j_host, self.anonymous_header_host = self.transfer_variable.host_data_info.get(idx=0)
n_guest, j_guest, self.anonymous_header_guest = self.transfer_variable.guest_data_info.get(idx=0)
self.n_count = n_host
return j_host, j_guest
def client_sync_data_info(self, data):
n, j = data.count(), data_overview.get_features_shape(data)
anonymous_header = data_overview.get_anonymous_header(data)
self.n_count = n
if self.role == consts.HOST:
self.transfer_variable.host_data_info.remote((n, j, anonymous_header), role=consts.ARBITER, idx=0)
self.transfer_variable.host_data_info.remote((n, j, anonymous_header), role=consts.GUEST, idx=0)
j_host = j
n_guest, j_guest, self.anonymous_header_guest = self.transfer_variable.guest_data_info.get(idx=0)
self.anonymous_header_host = anonymous_header
else:
self.transfer_variable.guest_data_info.remote((n, j, anonymous_header), role=consts.ARBITER, idx=0)
self.transfer_variable.guest_data_info.remote((n, j, anonymous_header), role=consts.HOST, idx=0)
j_guest = j
n_host, j_host, self.anonymous_header_host = self.transfer_variable.host_data_info.get(idx=0)
self.anonymous_header_guest = anonymous_header
return j_host, j_guest
def get_to_enter(self, host_mask, guest_mask, all_features):
if self.role == consts.GUEST:
to_enter = [all_features[i] for i in np.where(guest_mask < 1)[0]]
elif self.role == consts.HOST:
to_enter = [all_features[i] for i in np.where(host_mask < 1)[0]]
else:
to_enter = []
return to_enter
def update_summary_client(self, model, host_mask, guest_mask, unilateral_features, host_anonym, guest_anonym):
step_summary = {}
if self.role == consts.GUEST:
guest_features = [unilateral_features[i] for i in np.where(guest_mask == 1)[0]]
host_features = [host_anonym[i] for i in np.where(host_mask == 1)[0]]
elif self.role == consts.HOST:
guest_features = [guest_anonym[i] for i in np.where(guest_mask == 1)[0]]
host_features = [unilateral_features[i] for i in np.where(host_mask == 1)[0]]
else:
raise ValueError(f"upload summary on client only applies to host or guest.")
step_summary["guest_features"] = guest_features
step_summary["host_features"] = host_features
model.add_summary(f"step_{self.n_step}", step_summary)
def update_summary_arbiter(self, model, loss, ic_val):
step_summary = {}
step_summary["loss"] = loss
step_summary["ic_val"] = ic_val
model.add_summary(f"step_{self.n_step}", step_summary)
def record_step_best(self, step_best, host_mask, guest_mask, data_instances, model):
metas = {"host_mask": host_mask.tolist(), "guest_mask": guest_mask.tolist(),
"score_name": self.score_name}
metas["number_in"] = int(sum(host_mask) + sum(guest_mask))
metas["direction"] = self.direction
metas["n_count"] = int(self.n_count)
"""host_anonym = [
anonymous_generator.generate_anonymous(
fid=i,
role='host',
model=model) for i in range(
len(host_mask))]
guest_anonym = [
anonymous_generator.generate_anonymous(
fid=i,
role='guest',
model=model) for i in range(
len(guest_mask))]
metas["host_features_anonym"] = host_anonym
metas["guest_features_anonym"] = guest_anonym
"""
metas["host_features_anonym"] = self.anonymous_header_host
metas["guest_features_anonym"] = self.anonymous_header_guest
model_info = self.models_trained[step_best]
loss = model_info.get_loss()
ic_val = model_info.get_score()
metas["loss"] = loss
metas["current_ic_val"] = ic_val
metas["fit_intercept"] = model.fit_intercept
model_key = model_info.get_key()
model_dict = self._get_model(model_key)
if self.role != consts.ARBITER:
all_features = data_instances.schema.get('header')
metas["all_features"] = all_features
metas["to_enter"] = self.get_to_enter(host_mask, guest_mask, all_features)
model_param = list(model_dict.get('model').values())[0].get(
model.model_param_name)
param_dict = MessageToDict(model_param)
metas["intercept"] = param_dict.get("intercept", None)
metas["weight"] = param_dict.get("weight", {})
metas["header"] = param_dict.get("header", [])
if self.n_step == 0 and self.direction == "forward":
metas["intercept"] = self.intercept
self.update_summary_client(model,
host_mask,
guest_mask,
all_features,
self.anonymous_header_host,
self.anonymous_header_guest)
else:
self.update_summary_arbiter(model, loss, ic_val)
metric_name = f"stepwise_{self.n_step}"
metric = [Metric(metric_name, float(self.n_step))]
model.callback_metric(metric_name=metric_name, metric_namespace=self.metric_namespace, metric_data=metric)
model.tracker.set_metric_meta(metric_name=metric_name, metric_namespace=self.metric_namespace,
metric_meta=MetricMeta(name=metric_name, metric_type=self.metric_type,
extra_metas=metas))
LOGGER.info(f"metric_name: {metric_name}, metas: {metas}")
return
def sync_step_best(self, step_models):
if self.role == consts.ARBITER:
step_best = self.get_step_best(step_models)
self.transfer_variable.step_best.remote(step_best, role=consts.HOST, suffix=(self.n_step,))
self.transfer_variable.step_best.remote(step_best, role=consts.GUEST, suffix=(self.n_step,))
LOGGER.info(f"step {self.n_step}, step_best sent is {step_best}")
else:
step_best = self.transfer_variable.step_best.get(suffix=(self.n_step,))[0]
LOGGER.info(f"step {self.n_step}, step_best received is {step_best}")
return step_best
@staticmethod
def mask2string(host_mask, guest_mask):
mask = np.append(host_mask, guest_mask)
string_repr = ''.join('1' if i else '0' for i in mask)
return string_repr
@staticmethod
def string2mask(string_repr):
mask = np.fromiter(map(int, string_repr), dtype=bool)
return mask
@staticmethod
def predict(data_instances, model):
if data_instances is None:
return
pred_result = model.predict(data_instances)
return pred_result
def get_IC_computer(self, model):
if model.model_name == 'HeteroLinearRegression':
return IC_Approx()
else:
return IC()
def run(self, component_parameters, train_data, validate_data, model):
LOGGER.info("Enter stepwise")
self._init_model(component_parameters)
j_host, j_guest = self.sync_data_info(train_data)
if train_data is not None:
self.anonymous_header = data_overview.get_anonymous_header(train_data)
if self.backward:
host_mask, guest_mask = np.ones(j_host, dtype=bool), np.ones(j_guest, dtype=bool)
else:
host_mask, guest_mask = np.zeros(j_host, dtype=bool), np.zeros(j_guest, dtype=bool)
self.IC_computer = self.get_IC_computer(model)
self._set_k()
while self.n_step <= self.max_step:
LOGGER.info("Enter step {}".format(self.n_step))
step_models = set()
step_models.add(HeteroStepwise.mask2string(host_mask, guest_mask))
n_model = 0
if self.backward:
self.step_direction = "backward"
LOGGER.info("step {}, direction: {}".format(self.n_step, self.step_direction))
if self.n_step == 0:
backward_gen = [[host_mask, guest_mask]]
else:
backward_host, backward_guest = HeteroStepwise.drop_one(host_mask), HeteroStepwise.drop_one(
guest_mask)
backward_gen = itertools.chain(zip(backward_host, itertools.cycle([guest_mask])),
zip(itertools.cycle([host_mask]), backward_guest))
for curr_host_mask, curr_guest_mask in backward_gen:
model_mask = HeteroStepwise.mask2string(curr_host_mask, curr_guest_mask)
step_models.add(model_mask)
if model_mask not in self.models_trained:
if self.role == consts.ARBITER:
feature_mask = None
elif self.role == consts.HOST:
feature_mask = curr_host_mask
else:
feature_mask = curr_guest_mask
self._run_step(model, train_data, validate_data, feature_mask, n_model, model_mask)
n_model += 1
if self.forward:
self.step_direction = "forward"
LOGGER.info("step {}, direction: {}".format(self.n_step, self.step_direction))
forward_host, forward_guest = HeteroStepwise.add_one(host_mask), HeteroStepwise.add_one(guest_mask)
if sum(guest_mask) + sum(host_mask) == 0:
if self.n_step == 0:
forward_gen = [[host_mask, guest_mask]]
else:
forward_gen = itertools.product(list(forward_host), list(forward_guest))
else:
forward_gen = itertools.chain(zip(forward_host, itertools.cycle([guest_mask])),
zip(itertools.cycle([host_mask]), forward_guest))
for curr_host_mask, curr_guest_mask in forward_gen:
model_mask = HeteroStepwise.mask2string(curr_host_mask, curr_guest_mask)
step_models.add(model_mask)
LOGGER.info(f"step {self.n_step}, mask {model_mask}")
if model_mask not in self.models_trained:
if self.role == consts.ARBITER:
feature_mask = None
elif self.role == consts.HOST:
feature_mask = curr_host_mask
else:
feature_mask = curr_guest_mask
self._run_step(model, train_data, validate_data, feature_mask, n_model, model_mask)
n_model += 1
# forward step 0
if sum(host_mask) + sum(guest_mask) == 0 and self.n_step == 0:
model_mask = HeteroStepwise.mask2string(host_mask, guest_mask)
self.record_step_best(model_mask, host_mask, guest_mask, train_data, model)
self.n_step += 1
continue
old_host_mask, old_guest_mask = host_mask, guest_mask
step_best = self.sync_step_best(step_models)
step_best_mask = HeteroStepwise.string2mask(step_best)
host_mask, guest_mask = step_best_mask[:j_host], step_best_mask[j_host:]
LOGGER.debug("step {}, best_host_mask {}, best_guest_mask {}".format(self.n_step, host_mask, guest_mask))
self.stop_stepwise = self.check_stop(host_mask, guest_mask, old_host_mask, old_guest_mask)
if self.stop_stepwise:
break
self.record_step_best(step_best, host_mask, guest_mask, train_data, model)
self.n_step += 1
mask_string = HeteroStepwise.mask2string(host_mask, guest_mask)
model_info = self.models_trained[mask_string]
best_model_key = model_info.get_key()
best_model = self._get_model(best_model_key)
model.load_model(best_model)
| {
"content_hash": "38630d9c366c7ba0f9d6f090c745adab",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 117,
"avg_line_length": 45.28630705394191,
"alnum_prop": 0.5764614256917721,
"repo_name": "FederatedAI/FATE",
"id": "acb280b343a22f201141a1ef778f4fe0e51acbc1",
"size": "22445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/federatedml/model_selection/stepwise/hetero_stepwise.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('application', '0012_application_duplicate'),
]
operations = [
migrations.AddField(
model_name='application',
name='approval_level',
field=models.IntegerField(null=True, blank=True),
),
]
| {
"content_hash": "2b0bc3477ed0e3d0e760ff9e06ce294c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 22.61111111111111,
"alnum_prop": 0.6142506142506142,
"repo_name": "RightToResearch/OpenCon-Rating-App",
"id": "11a0e96df8f37eb09cc4ada5f07a0263a28c6da6",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/application/migrations/0013_application_approval_level.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24339"
},
{
"name": "HTML",
"bytes": "12437"
},
{
"name": "Python",
"bytes": "58815"
}
],
"symlink_target": ""
} |
class ModelTestCase(object):
def runTest(self):
pass | {
"content_hash": "c0dc1e905ef70e73903adb474064723d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 28,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.65625,
"repo_name": "TeamADEA/Hunger_Games",
"id": "d56ce5f1d8ced70cd199d0e42cf1a6b3a4e19f64",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HG_Code/HG_Test/test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56032"
}
],
"symlink_target": ""
} |
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain
from linecache import getline
import mimetypes
from optparse import OptionParser
from os import listdir
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
activate('pip>=0.6.2') # Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 2, 2, 0
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
$""", re.X) # and are optional.
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links', 'use_wheel', 'allow_external', 'allow_unverified',
'allow_all_external', ('allow_all_prereleases', 'pre'),
'process_dependency_links']
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name '%s', so I couldn't figure out the version number. My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._req.url
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
def _path_and_line(self):
"""Return the path and line number of the file from which our
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
self._req.comes_from).groups())
return path, int(line)
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line
``line_number``.
"""
for line_number in xrange(line_number - 1, 0, -1):
line = getline(path, line_number)
match = HASH_COMMENT_RE.match(line)
if match:
yield match.groupdict()['hash']
elif not line.lstrip().startswith('#'):
# If we hit a non-comment line, abort
break
hashes = list(hashes_above(*self._path_and_line()))
hashes.reverse() # because we read them backwards
return hashes
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
finder = package_finder(self._argv)
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = (finder.find_requirement(self._req, upgrade=False)
if self._req.url is None
else Link(self._req.url))
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _url(self):
return self._req.url
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
line = self._url()
if self._name() not in filename_from_url(self._url()):
line = '%s#egg=%s' % (line, self._name())
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected%s' % (
self._project_name(),
' one of' if len(self._expected_hashes()) > 1 else '')
return '%s %s\n%s got %s' % (
preamble,
('\n' + ' ' * (len(preamble) + 1)).join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
def downloaded_reqs(parsed_reqs):
"""Just avoid repeating this list comp."""
return [DownloadedReq(req, argv) for req in parsed_reqs]
try:
return downloaded_reqs(parse_requirements(path, options=EmptyOptions()))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return downloaded_reqs(parse_requirements(
path, options=EmptyOptions(), session=PipSession()))
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(pip.__version__))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(SOMETHING_WENT_WRONG)
| {
"content_hash": "2f700a649e99165087fafec223288046",
"timestamp": "",
"source": "github",
"line_count": 878,
"max_line_length": 157,
"avg_line_length": 36.90432801822323,
"alnum_prop": 0.6079254367014382,
"repo_name": "DESHRAJ/fjord",
"id": "c3f490d9965759b01fc9c3bc9a3ef71d0c9bcb20",
"size": "32424",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "bin/peep-2.2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "168457"
},
{
"name": "JavaScript",
"bytes": "299449"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "709245"
},
{
"name": "Shell",
"bytes": "13991"
}
],
"symlink_target": ""
} |
import re
def getVtt(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
p = re.compile('\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3}')
pureText = ""
pay_attention = False
for line in content:
if p.match(line) != None:
pay_attention = True # gets past the metadata in the first few lines
if pay_attention:
#do whatever you want
#print (line)
if p.match(line) != None:
pass
elif line == '':
pass
else:
pureText += line + " "
return pureText
# references:
# http://stackoverflow.com/questions/3277503/how-do-i-read-a-file-line-by-line-into-a-list
# http://stackoverflow.com/questions/11665582/regex-for-timestamp
# http://stackoverflow.com/questions/27805919/how-to-only-read-lines-in-a-text-file-after-a-certain-string-using-python
| {
"content_hash": "2128090b724d4f7ac02e561d06fbd480",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 119,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.5689119170984456,
"repo_name": "aktivkohle/youtube-curation",
"id": "5841d766c43ec6f6373ccf561c3d4afa0bb2e153",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parseSubtitles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1809"
},
{
"name": "HTML",
"bytes": "4767"
},
{
"name": "JavaScript",
"bytes": "6886"
},
{
"name": "Jupyter Notebook",
"bytes": "154943"
},
{
"name": "PLpgSQL",
"bytes": "12646"
},
{
"name": "Python",
"bytes": "52499"
},
{
"name": "Shell",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import gettext_lazy as _lazy
from kitsune.sumo.models import ModelBase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.parser import wiki_to_html
class GroupProfile(ModelBase):
"""Profile model for groups."""
slug = models.SlugField(unique=True, editable=False, blank=False, null=False, max_length=80)
group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name="profile")
leaders = models.ManyToManyField(User)
information = models.TextField(help_text="Use Wiki Syntax")
information_html = models.TextField(editable=False)
avatar = models.ImageField(
upload_to=settings.GROUP_AVATAR_PATH,
null=True,
blank=True,
verbose_name=_lazy("Avatar"),
max_length=settings.MAX_FILEPATH_LENGTH,
)
class Meta:
ordering = ["slug"]
def __str__(self):
return str(self.group)
def get_absolute_url(self):
return reverse("groups.profile", args=[self.slug])
def save(self, *args, **kwargs):
"""Set slug on first save and parse information to html."""
if not self.slug:
self.slug = slugify(self.group.name)
self.information_html = wiki_to_html(self.information)
super(GroupProfile, self).save(*args, **kwargs)
| {
"content_hash": "8c7f8e27f3809cc0102bad516e7114f2",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 96,
"avg_line_length": 33.59090909090909,
"alnum_prop": 0.6928281461434371,
"repo_name": "mozilla/kitsune",
"id": "dc912e5b1f5f5fbba3e36d8c35c3def498377c87",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kitsune/groups/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "HTML",
"bytes": "535448"
},
{
"name": "JavaScript",
"bytes": "658477"
},
{
"name": "Jinja",
"bytes": "4837"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Nunjucks",
"bytes": "68656"
},
{
"name": "Python",
"bytes": "2827116"
},
{
"name": "SCSS",
"bytes": "240092"
},
{
"name": "Shell",
"bytes": "10759"
},
{
"name": "Svelte",
"bytes": "26864"
}
],
"symlink_target": ""
} |
"""Tornado handlers for kernel specifications."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
import json
import os
pjoin = os.path.join
from tornado import web
from ...base.handlers import IPythonHandler, json_errors
from ...utils import url_path_join
def kernelspec_model(handler, name):
"""Load a KernelSpec by name and return the REST API model"""
ksm = handler.kernel_spec_manager
spec = ksm.get_kernel_spec(name)
d = {'name': name}
d['spec'] = spec.to_dict()
d['resources'] = resources = {}
resource_dir = spec.resource_dir
for resource in ['kernel.js', 'kernel.css']:
if os.path.exists(pjoin(resource_dir, resource)):
resources[resource] = url_path_join(
handler.base_url,
'kernelspecs',
name,
resource
)
for logo_file in glob.glob(pjoin(resource_dir, 'logo-*')):
fname = os.path.basename(logo_file)
no_ext, _ = os.path.splitext(fname)
resources[no_ext] = url_path_join(
handler.base_url,
'kernelspecs',
name,
fname
)
return d
class MainKernelSpecHandler(IPythonHandler):
SUPPORTED_METHODS = ('GET',)
@web.authenticated
@json_errors
def get(self):
ksm = self.kernel_spec_manager
km = self.kernel_manager
model = {}
model['default'] = km.default_kernel_name
model['kernelspecs'] = specs = {}
for kernel_name in ksm.find_kernel_specs():
try:
d = kernelspec_model(self, kernel_name)
except Exception:
self.log.error("Failed to load kernel spec: '%s'", kernel_name, exc_info=True)
continue
specs[kernel_name] = d
self.set_header("Content-Type", 'application/json')
self.finish(json.dumps(model))
class KernelSpecHandler(IPythonHandler):
SUPPORTED_METHODS = ('GET',)
@web.authenticated
@json_errors
def get(self, kernel_name):
try:
model = kernelspec_model(self, kernel_name)
except KeyError:
raise web.HTTPError(404, u'Kernel spec %s not found' % kernel_name)
self.set_header("Content-Type", 'application/json')
self.finish(json.dumps(model))
# URL to handler mappings
kernel_name_regex = r"(?P<kernel_name>\w+)"
default_handlers = [
(r"/api/kernelspecs", MainKernelSpecHandler),
(r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler),
]
| {
"content_hash": "4e08feb29efa86a085893265fa29d88b",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 94,
"avg_line_length": 30.151162790697676,
"alnum_prop": 0.6047049749325106,
"repo_name": "initNirvana/Easyphotos",
"id": "72397788f4f1e27baa692e350f8664d67af8f32f",
"size": "2593",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/lib/python3.4/site-packages/IPython/html/services/kernelspecs/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "13653"
},
{
"name": "HTML",
"bytes": "129191"
},
{
"name": "JavaScript",
"bytes": "1401324"
},
{
"name": "Python",
"bytes": "11874458"
},
{
"name": "Shell",
"bytes": "3668"
},
{
"name": "Smarty",
"bytes": "21402"
}
],
"symlink_target": ""
} |
import rules
| {
"content_hash": "880aa50730a5d7c617be48bcfcd4f985",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 12,
"avg_line_length": 13,
"alnum_prop": 0.8461538461538461,
"repo_name": "asyncee/django-application-template",
"id": "1239971dc264fd5b8ea363850c3db2d41b64266b",
"size": "13",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1581"
}
],
"symlink_target": ""
} |
import sys
import os
import random
import math
from PIL import Image, ImageEnhance, ImageFilter
import numpy as np
import h5py
import threading
import signal
if len(sys.argv) != 5:
print('Usage: %s datasetDir numSamples batchSize sphereProbability' % sys.argv[0])
sys.exit(1)
datasetDir, N, BATCH_SIZE, probability = sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), float(sys.argv[4])
# handle process killing
quitSignal = False
def quit(signum, frame):
print('Quitting...')
# join all remaining threads
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
sys.exit(signum)
signal.signal(signal.SIGTERM, quit)
signal.signal(signal.SIGINT, quit)
# get all backgrounds filenames in 'backgrounds' directory
bkgDir = os.path.join(os.getcwd(), './backgrounds')
bkgFilenames = [f for f in os.listdir(bkgDir) if os.path.isfile(os.path.join(bkgDir, f))]
# get all spheres filenames in 'spheres' directory
spheresDir = os.path.join(os.getcwd(), './spheres')
spheresFilenames = [f for f in os.listdir(spheresDir) if os.path.isfile(os.path.join(spheresDir, f))]
# create directory for dataset, if it doesn't exist
datasetDir = os.path.join(os.getcwd(), datasetDir)
if not os.path.exists(datasetDir):
os.makedirs(datasetDir)
# create HDF5 file for mean
meanHdf = h5py.File(os.path.join(datasetDir, './dataset_mean.hdf5'), 'w')
# prepare mean matrix
datasetMean = np.zeros((100, 100), dtype='double')
N_THREADS = 8
numBatches = N / BATCH_SIZE
class Worker (threading.Thread):
def __init__(self, batch_id):
threading.Thread.__init__(self)
self.batch_id = batch_id
def run(self):
# create HDF5 file for dataset
f = h5py.File(os.path.join(datasetDir, './dataset_batch%d.hdf5' % self.batch_id), 'w')
batchMean = np.zeros((100, 100), dtype='double')
x = 0
while x < BATCH_SIZE:
try:
# choose a random background
bkgListLock.acquire()
bkgFilename = random.choice(bkgFilenames)
bkgListLock.release()
background = Image.open(os.path.join(bkgDir, bkgFilename))
# create new greyscale image
result = Image.new('L', (200, 200))
# check that the background image is large enough
if (background.size[0] < 100 or background.size[1] < 100):
bkgListLock.acquire()
print('Warning: background must be at least 100x100px. %s deleted.' % bkgFilename)
os.remove(os.path.join(bkgDir, bkgFilename))
bkgFilenames.remove(bkgFilename)
bkgListLock.release()
continue
# copy a 100x100px patch from background and paste it into result
left, top = int(random.random() * (background.size[0] - 100)), int(random.random() * (background.size[1] - 100))
bkgPatch = background.crop((left, top, left + 100, top + 100))
result.paste(bkgPatch, (50, 50, 150, 150))
# defaults, used when saving the HDF5 file
hasSphere = False
sphereDiameter = 1
sphereCenter = (0, 0)
# with probability 50% add a sphere to the image
if random.random() < probability:
hasSphere = True
# choose a random sphere
sphereFilename = random.choice(spheresFilenames)
sphere = Image.open(os.path.join(spheresDir, sphereFilename))
# copy and resize patch from sphere and paste it into result
sphereDiameter = int(random.random() * 90 + 10) # random integer value in [10, 100]
sphereCenter = (random.random() * 100 + 50, random.random() * 100 + 50)
sphere= sphere.resize((sphereDiameter, sphereDiameter))
pasteBox = (int(sphereCenter[0] - math.floor(sphereDiameter / 2.0)), int(sphereCenter[1] - math.floor(sphereDiameter / 2.0)), \
int(sphereCenter[0] + math.ceil(sphereDiameter / 2.0)), int(sphereCenter[1] + math.ceil(sphereDiameter / 2.0)))
# random brightness
sphereEnhanced = sphere.convert('RGB')
enhancer = ImageEnhance.Brightness(sphereEnhanced)
sphereEnhanced = enhancer.enhance(random.random() + 0.5)
# random blur
blur = ImageFilter.GaussianBlur(int(random.random() * sphereDiameter / 20))
sphereEnhanced = sphereEnhanced.filter(blur)
blurredAlpha = sphere.split()[-1].filter(blur)
sphere.putalpha(blurredAlpha)
result.paste(sphereEnhanced, \
pasteBox,
sphere) # the second sphere is used as alpha mask
# crop to 100x100
result = result.crop((50, 50, 150, 150))
# random brightness
enhancer = ImageEnhance.Brightness(result)
result = enhancer.enhance(random.random() + 0.5)
# save result to HDF5 DB
dset = f.create_dataset('%07d' % x, (100, 100), dtype='uint8')
dset[...] = np.array(result)
# update mean
batchMean += dset[...].astype('double') / N
# set attributes for grayscale images
dset.attrs['CLASS'] = 'IMAGE'
dset.attrs['VERSION'] = '1.2'
dset.attrs['IMAGE_SUBCLASS'] = 'IMAGE_GRAYSCALE'
dset.attrs['IMAGE_WHITE_IS_ZERO'] = np.uint8(0)
# save attributes for training
dset.attrs['HAS_SPHERE'] = np.uint8(hasSphere)
if (hasSphere):
dset.attrs['RADIUS'] = np.float(sphereDiameter / 2)
dset.attrs['CENTER_X'] = np.float(sphereCenter[0] - 50)
dset.attrs['CENTER_Y'] = np.float(sphereCenter[1] - 50)
except IOError as e:
print('I/O Error(%d): %s' % (e.errno, e.strerror))
x += 1
# update global mean
global datasetMean
meanLock.acquire()
datasetMean += batchMean
meanLock.release()
# release resources
f.flush()
f.close()
# notify main thread that this worker has finished
batchFinishCondition.acquire()
global workersBusy
global batchFinished
workersBusy -= 1
batchFinished += 1
batchFinishCondition.notify()
batchFinishCondition.release()
# print progress
if batchFinished % 10 == 0:
print('%d / %d %f%%' % (batchFinished, numBatches, float(batchFinished) / numBatches * 100))
# synchronisation variables
bkgListLock = threading.Lock()
meanLock = threading.Lock()
batchFinishCondition = threading.Condition()
workersBusy = 0
batchFinished = 0
# start the threads
batch = 0
while batch < numBatches and not quitSignal:
batchFinishCondition.acquire()
while workersBusy >= N_THREADS:
batchFinishCondition.wait()
while workersBusy < N_THREADS:
workersBusy += 1
worker = Worker(batch)
worker.start()
batch += 1
batchFinishCondition.release()
# join all remaining threads
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
# save mean to HDF5 DB
dset = meanHdf.create_dataset('mean', (100, 100), dtype='double')
dset[...] = datasetMean / 256.0
# set attributes for grayscale images
dset.attrs['CLASS'] = 'IMAGE'
dset.attrs['VERSION'] = '1.2'
dset.attrs['IMAGE_SUBCLASS'] = 'IMAGE_GRAYSCALE'
dset.attrs['IMAGE_WHITE_IS_ZERO'] = np.uint8(0)
# release resources
meanHdf.flush()
meanHdf.close()
| {
"content_hash": "abd067aee3450f7da263d2ba6e2de376",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 147,
"avg_line_length": 35.375545851528386,
"alnum_prop": 0.5788174299469201,
"repo_name": "Hornobster/Ball-Tracking",
"id": "af626aa92255e6ad01b87bcd20357fb25945b8c2",
"size": "8123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate_dataset_multithreaded.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54824"
},
{
"name": "Shell",
"bytes": "1667"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class UpdateUserResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'UpdateUserResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # UpdateUserResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
| {
"content_hash": "fc324a4d04ec619512e05a9ed780656f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 30.37837837837838,
"alnum_prop": 0.6432384341637011,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "bfed8e15359d40d88049c65aa3b70a02b4a7d9ea",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupdocs/models/UpdateUserResponse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
} |
import os
from chainer import computational_graph
from chainer import configuration
from chainer.training import extension
from chainer import variable
_var_style = {'shape': 'octagon', 'fillcolor': '#E0E0E0', 'style': 'filled'}
_func_style = {'shape': 'record', 'fillcolor': '#6495ED', 'style': 'filled'}
def dump_graph(root_name, out_name='cg.dot',
variable_style=None, function_style=None):
"""Returns a trainer extension to dump a computational graph.
This extension dumps a computational graph. The graph is output in DOT
language.
It only dumps a graph at the first invocation.
.. note::
The computational graph is not kept by default. This
extension changes this behavior until the first invocation. **It is
strongly recommended to use it with the default trigger setting.**
The detailed behavior of this extension is as follows.
1. In its initializer, it turns on the
``chainer.config.keep_graph_on_report`` flag.
2. At the first iteration, it dumps the graph using the graph held by
the reported variable.
3. After dumping the graph, it turns off the flag (if it was originally
turned off) so that any variable reported afterward does not hold
a computational graph.
When the ``keep_graph_on_report`` flag is turned on, the computational
graph created by the updater is kept during the invocation of
extensions. It will cause an unnecessarily large memory consumption
when an extension also uses a large amount of memory, e.g.
:class:`~chainer.training.extensions.Evaluator`.
With the default setting, the ``dump_graph`` extension is called at the
first iteration. Since :class:`~chainer.training.extensions.Evaluator`
is not called at the first iteration in most cases, it does not cause
any memory problem.
Args:
root_name (str): Name of the root of the computational graph. The
root variable is retrieved by this name from the observation
dictionary of the trainer.
out_name (str): Output file name.
variable_style (dict): Dot node style for variables. Each variable is
rendered by an octagon by default.
function_style (dict): Dot node style for functions. Each function is
rendered by a rectangular by default.
.. seealso::
See :func:`~chainer.computational_graph.build_computational_graph`
for the ``variable_style`` and ``function_style`` arguments.
"""
def trigger(trainer):
return trainer.updater.iteration == 1
if variable_style is None:
variable_style = _var_style
if function_style is None:
function_style = _func_style
original_flag = [None]
def initializer(_):
original_flag[0] = configuration.config.keep_graph_on_report
configuration.config.keep_graph_on_report = True
@extension.make_extension(trigger=trigger, initializer=initializer)
def dump_graph(trainer):
try:
var = trainer.observation[root_name]
if not isinstance(var, variable.Variable):
raise TypeError('root value is not a Variable')
cg = computational_graph.build_computational_graph(
[var],
variable_style=variable_style,
function_style=function_style
).dump()
out_path = os.path.join(trainer.out, out_name)
# TODO(beam2d): support outputting images by the dot command
with open(out_path, 'w') as f:
f.write(cg)
finally:
configuration.config.keep_graph_on_report = original_flag[0]
return dump_graph
| {
"content_hash": "ffd023623dbde40894ffc94101b5b7de",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 39.229166666666664,
"alnum_prop": 0.6577270313329793,
"repo_name": "jnishi/chainer",
"id": "5d3d02a103a494d56fea460ed571754ed81cfb84",
"size": "3766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/training/extensions/computational_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1460543"
},
{
"name": "CMake",
"bytes": "42279"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5121452"
},
{
"name": "Shell",
"bytes": "22130"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import webnotes
import inspect, os, json, datetime, shutil
from jinja2 import Environment, FileSystemLoader
from webnotes.modules import get_doc_path, get_module_path, scrub
from webnotes.utils import get_path, get_base_path
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def onload(self):
prepare_docs()
gh_prefix = "https://github.com/webnotes/"
@webnotes.whitelist()
def get_docs(options):
docs = {}
options = webnotes._dict(json.loads(options))
if options.build_server_api:
get_docs_for(docs, "webnotes")
if options.build_modules:
docs["modules"] = get_modules(options.module_name)
if options.build_pages:
docs["pages"] = get_static_pages()
return docs
def get_static_pages():
mydocs = {}
for repo in ("lib", "app"):
for basepath, folders, files in os.walk(get_path(repo, "docs")):
for fname in files:
if fname.endswith(".md"):
fpath = get_path(basepath, fname)
with open(fpath, "r") as docfile:
src = unicode(docfile.read(), "utf-8")
try:
temp, headers, body = src.split("---", 2)
d = json.loads(headers)
except Exception, e:
webnotes.msgprint("Bad Headers in: " + fname)
continue
d["_intro"] = body
d["_gh_source"] = get_gh_url(fpath)
d["_modified"] = get_timestamp(fpath)
mydocs[fname[:-3]] = d
return mydocs
def get_docs_for(docs, name):
"""build docs for python module"""
import importlib
classname = ""
parts = name.split(".")
if not parts[-1] in docs:
docs[parts[-1]] = {}
mydocs = docs[parts[-1]]
try:
obj = importlib.import_module(name)
except ImportError:
# class
name, classname = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(name)
obj = getattr(module, classname)
inspect_object_and_update_docs(mydocs, obj)
# if filename is __init__, list python files and folders with init in folder as _toc
if hasattr(obj, "__file__") and os.path.basename(obj.__file__).split(".")[0]=="__init__":
mydocs["_toc"] = []
dirname = os.path.dirname(obj.__file__)
for fname in os.listdir(dirname):
fpath = os.path.join(dirname, fname)
if os.path.isdir(fpath):
# append if package
if "__init__.py" in os.listdir(fpath):
mydocs["_toc"].append(name + "." + fname)
elif fname.endswith(".py") and not fname.startswith("__init__") and \
not fname.startswith("test_"):
# append if module
mydocs["_toc"].append(name + "." + fname.split(".")[0])
if mydocs.get("_toc"):
for name in mydocs["_toc"]:
get_docs_for(mydocs, name)
return mydocs
def inspect_object_and_update_docs(mydocs, obj):
mydocs["_toc"] = getattr(obj, "_toc", "")
if inspect.ismodule(obj):
obj_module = obj
mydocs["_type"] = "module"
else:
obj_module = inspect.getmodule(obj)
mydocs["_type"] = "class"
mydocs["_icon"] = "code"
mydocs["_gh_source"] = get_gh_url(obj_module.__file__)
mydocs["_modified"] = get_timestamp(obj_module.__file__)
if not mydocs.get("_intro"):
mydocs["_intro"] = getattr(obj, "__doc__", "")
for name in dir(obj):
try:
value = getattr(obj, name)
except AttributeError, e:
value = None
if value:
if (mydocs["_type"]=="module" and inspect.getmodule(value)==obj)\
or (mydocs["_type"]=="class" and getattr(value, "im_class", None)==obj):
if inspect.ismethod(value) or inspect.isfunction(value):
mydocs[name] = {
"_type": "function",
"_args": inspect.getargspec(value)[0],
"_help": getattr(value, "__doc__", ""),
"_source": inspect.getsource(value)
}
elif inspect.isclass(value):
if not mydocs.get("_toc"):
mydocs["_toc"] = []
mydocs["_toc"].append(obj.__name__ + "." + value.__name__)
def get_gh_url(path):
sep = "/lib/" if "/lib/" in path else "/app/"
url = gh_prefix \
+ ("wnframework" if sep=="/lib/" else "erpnext") \
+ ("/blob" if ("." in path) else "/tree") \
+"/master/" + path.split(sep)[1]
if url.endswith(".pyc"):
url = url[:-1]
return url
def get_modules(for_module=None):
import importlib
docs = {
"_label": "Modules"
}
if for_module:
modules = [for_module]
else:
modules = webnotes.conn.sql_list("select name from `tabModule Def` order by name")
docs["_toc"] = ["docs.dev.modules." + d for d in modules]
for m in modules:
prefix = "docs.dev.modules." + m
mydocs = docs[m] = {
"_icon": "th",
"_label": m,
"_toc": [
prefix + ".doctype",
prefix + ".page",
prefix + ".py_modules"
],
"doctype": get_doctypes(m),
"page": get_pages(m),
#"report": {},
"py_modules": {
"_label": "Independent Python Modules for " + m,
"_toc": []
}
}
# add stand alone modules
module_path = get_module_path(m)
prefix = prefix + ".py_modules."
for basepath, folders, files in os.walk(module_path):
for f in files:
if f.endswith(".py") and \
(not f.split(".")[0] in os.path.split(basepath)) and \
(not f.startswith("__")):
module_name = ".".join(os.path.relpath(os.path.join(basepath, f),
"../app").split(os.path.sep))[:-3]
# import module
try:
module = importlib.import_module(module_name)
# create a new namespace for the module
module_docs = mydocs["py_modules"][f.split(".")[0]] = {}
# add to toc
mydocs["py_modules"]["_toc"].append(prefix + f.split(".")[0])
inspect_object_and_update_docs(module_docs, module)
except TypeError, e:
webnotes.errprint("TypeError in importing " + module_name)
except IndentationError, e:
continue
module_docs["_label"] = module_name
module_docs["_function_namespace"] = module_name
update_readme(docs[m], m)
docs[m]["_gh_source"] = get_gh_url(module_path)
return docs
def get_pages(m):
import importlib
pages = webnotes.conn.sql_list("""select name from tabPage where module=%s""", m)
prefix = "docs.dev.modules." + m + ".page."
docs = {
"_icon": "file-alt",
"_label": "Pages",
"_toc": [prefix + d for d in pages]
}
for p in pages:
page = webnotes.doc("Page", p)
mydocs = docs[p] = {
"_label": page.title or p,
"_type": "page",
}
update_readme(mydocs, m, "page", p)
mydocs["_modified"] = page.modified
# controller
page_name = scrub(p)
try:
page_controller = importlib.import_module(scrub(m) + ".page." + page_name + "." + page_name)
inspect_object_and_update_docs(mydocs, page_controller)
except ImportError, e:
pass
return docs
def get_doctypes(m):
doctypes = webnotes.conn.sql_list("""select name from
tabDocType where module=%s order by name""", m)
prefix = "docs.dev.modules." + m + ".doctype."
docs = {
"_icon": "th",
"_label": "DocTypes",
"_toc": [prefix + d for d in doctypes]
}
for d in doctypes:
meta = webnotes.get_doctype(d)
meta_p = webnotes.get_doctype(d, True)
doc_path = get_doc_path(m, "DocType", d)
mydocs = docs[d] = {
"_label": d,
"_icon": meta[0].icon,
"_type": "doctype",
"_gh_source": get_gh_url(doc_path),
"_toc": [
prefix + d + ".model",
prefix + d + ".permissions",
prefix + d + ".controller_server"
],
}
update_readme(mydocs, m, "DocType", d)
# parents and links
links, parents = [], []
for df in webnotes.conn.sql("""select * from tabDocField where options=%s""",
d, as_dict=True):
if df.parent:
if df.fieldtype=="Table":
parents.append(df.parent)
if df.fieldtype=="Link":
links.append(df.parent)
if parents:
mydocs["_intro"] += "\n\n#### Child Table Of:\n\n- " + "\n- ".join(list(set(parents))) + "\n\n"
if links:
mydocs["_intro"] += "\n\n#### Linked In:\n\n- " + "\n- ".join(list(set(links))) + "\n\n"
if meta[0].issingle:
mydocs["_intro"] += "\n\n#### Single DocType\n\nThere is no table for this DocType and the values of the Single instance are stored in `tabSingles`"
# model
modeldocs = mydocs["model"] = {
"_label": d + " Model",
"_icon": meta[0].icon,
"_type": "model",
"_intro": "Properties and fields for " + d,
"_gh_source": get_gh_url(os.path.join(doc_path, scrub(d) + ".txt")),
"_fields": [df.fields for df in meta.get({"doctype": "DocField"})],
"_properties": meta[0].fields,
"_modified": meta[0].modified
}
# permissions
from webnotes.modules.utils import peval_doclist
with open(os.path.join(doc_path,
scrub(d) + ".txt"), "r") as txtfile:
doclist = peval_doclist(txtfile.read())
permission_docs = mydocs["permissions"] = {
"_label": d + " Permissions",
"_type": "permissions",
"_icon": meta[0].icon,
"_gh_source": get_gh_url(os.path.join(doc_path, scrub(d) + ".txt")),
"_intro": "Standard Permissions for " + d + ". These can be changed by the user.",
"_permissions": [p for p in doclist if p.doctype=="DocPerm"],
"_modified": doclist[0]["modified"]
}
# server controller
server_controller_path = os.path.join(doc_path, scrub(d) + ".py")
controller_docs = mydocs["controller_server"] = {
"_label": d + " Server Controller",
"_type": "_class",
"_gh_source": get_gh_url(server_controller_path)
}
b = webnotes.bean([{"doctype": d}])
b.make_controller()
if not getattr(b.controller, "__doc__"):
b.controller.__doc__ = "Controller Class for handling server-side events for " + d
inspect_object_and_update_docs(controller_docs, b.controller)
# client controller
if meta_p[0].fields.get("__js"):
client_controller_path = os.path.join(doc_path, scrub(d) + ".js")
if(os.path.exists(client_controller_path)):
mydocs["_toc"].append(prefix + d + ".controller_client")
client_controller = mydocs["controller_client"] = {
"_label": d + " Client Controller",
"_icon": meta[0].icon,
"_type": "controller_client",
"_gh_source": get_gh_url(client_controller_path),
"_modified": get_timestamp(client_controller_path),
"_intro": "Client side triggers and functions for " + d,
"_code": meta_p[0].fields["__js"],
"_fields": [d.fieldname for d in meta_p if d.doctype=="DocField"]
}
return docs
def update_readme(mydocs, module, doctype=None, name=None):
if doctype:
readme_path = os.path.join(get_doc_path(module, doctype, name), "README.md")
else:
readme_path = os.path.join(get_module_path(module), "README.md")
mydocs["_intro"] = ""
if os.path.exists(readme_path):
with open(readme_path, "r") as readmefile:
mydocs["_intro"] = readmefile.read()
mydocs["_modified"] = get_timestamp(readme_path)
def prepare_docs(force=False):
os.chdir(get_path("public"))
if not os.path.exists("docs"):
os.mkdir("docs")
if force:
shutil.rmtree("docs/css")
if not os.path.exists("docs/css"):
os.mkdir("docs/css")
os.mkdir("docs/css/font")
os.system("cp ../lib/public/css/bootstrap.css docs/css")
os.system("cp ../lib/public/css/font-awesome.css docs/css")
os.system("cp ../lib/public/css/font/* docs/css/font")
os.system("cp ../lib/public/css/prism.css docs/css")
# clean links in font-awesome
with open("docs/css/font-awesome.css", "r") as fontawesome:
t = fontawesome.read()
t = t.replace("../lib/css/", "")
with open("docs/css/font-awesome.css", "w") as fontawesome:
fontawesome.write(t)
# copy latest docs.css
os.system("cp ../lib/core/doctype/documentation_tool/docs.css docs/css")
if force:
shutil.rmtree("docs/js")
if not os.path.exists("docs/js"):
os.mkdir("docs/js")
os.system("cp ../lib/public/js/lib/bootstrap.min.js docs/js")
os.system("cp ../lib/public/js/lib/jquery/jquery.min.js docs/js")
os.system("cp ../lib/public/js/lib/prism.js docs/js")
if force:
os.remove("docs/img/splash.svg")
if not os.path.exists("docs/img/splash.svg"):
if not os.path.exists("docs/img"):
os.mkdir("docs/img")
os.system("cp ../app/public/images/splash.svg docs/img")
@webnotes.whitelist()
def write_docs(data, build_sitemap=None, domain=None):
from webnotes.utils import global_date_format
if webnotes.session.user != "Administrator":
raise webnotes.PermissionError
if isinstance(data, basestring):
data = json.loads(data)
jenv = Environment(loader = FileSystemLoader(webnotes.utils.get_base_path()))
template = jenv.get_template("app/docs/templates/docs.html")
data["index"] = data["docs"]
data["docs"] = None
for name, d in data.items():
if d:
if not d.get("title"):
d["title"] = d["_label"]
if d.get("_parent_page")=="docs.html":
d["_parent_page"] = "index.html"
if not d.get("_icon"):
d["_icon"] = "icon-file-alt"
if not d["_icon"].startswith("icon-"):
d["_icon"] = "icon-" + d["_icon"]
if d.get("_modified"):
d["_modified"] = global_date_format(d["_modified"])
with open(get_path("public", "docs", name + ".html"), "w") as docfile:
if not d.get("description"):
d["description"] = "Help pages for " + d["title"]
html = template.render(d)
docfile.write(html.encode("utf-8", errors="ignore"))
if build_sitemap and domain:
if not domain.endswith("/"):
domain = domain + "/"
content = ""
for fname in os.listdir(get_path("public", "docs")):
if fname.endswith(".html"):
content += sitemap_link_xml % (domain + fname,
get_timestamp(get_path("public", "docs", fname)))
with open(get_path("public", "docs", "sitemap.xml"), "w") as sitemap:
sitemap.write(sitemap_frame_xml % content)
def write_static():
webnotes.session = webnotes._dict({"user":"Administrator"})
from markdown2 import markdown
pages = get_static_pages()
autogenerated_roots = ["docs.dev.framework.server", "docs.dev.framework.client",
"docs.dev.modules"]
# build toc
for name, page in pages.items():
if name in autogenerated_roots:
del pages[name]
continue
# toc
if page.get("_toc"):
prev = None
page["_toc_links"] = []
for child in page["_toc"]:
if child in pages:
page["_toc_links"].append({
"link": child + ".html",
"label": pages[child]["_label"]
})
if not "_child_title" in page:
page["_child_title"] = pages[child]["_label"]
page["_child_page"] = child + ".html"
pages[child]["_parent_title"] = page["_label"]
pages[child]["_parent_page"] = name + ".html"
if prev:
prev["_next_title"] = pages[child]["_label"]
prev["_next_page"] = child + ".html"
prev = pages[child]
# breadcrumbs
if name!="docs":
fullname = ""
page["_breadcrumbs"] = []
for p in name.split(".")[:-1]:
fullname = fullname + (fullname and "." or "") + p
page["_breadcrumbs"].append({
"link": (fullname=="docs" and "index" or fullname) + ".html",
"label": pages[fullname]["_label"]
})
page["content"] = markdown(page["_intro"])
write_docs(pages)
prepare_docs()
def get_timestamp(path):
return datetime.datetime.fromtimestamp(os.path.getmtime(path)).strftime("%Y-%m-%d")
sitemap_frame_xml = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">%s
</urlset>"""
sitemap_link_xml = """\n<url><loc>%s</loc><lastmod>%s</lastmod></url>"""
if __name__=="__main__":
write_static()
| {
"content_hash": "0cb98d9dd54b39958c871e4037241bcb",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 151,
"avg_line_length": 29.59491193737769,
"alnum_prop": 0.6138993585928718,
"repo_name": "rohitw1991/latestadbwnf",
"id": "b052672cedefaec7f56ac8780d1326a00c936ffe",
"size": "15261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/doctype/documentation_tool/documentation_tool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "213083"
},
{
"name": "JavaScript",
"bytes": "1686472"
},
{
"name": "Python",
"bytes": "523801"
}
],
"symlink_target": ""
} |
"""Tests for augment.inline."""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
import unittest
from pasta.augment import inline
from pasta.base import test_utils
class InlineTest(test_utils.TestCase):
def test_inline_simple(self):
src = 'x = 1\na = x\n'
t = ast.parse(src)
inline.inline_name(t, 'x')
self.checkAstsEqual(t, ast.parse('a = 1\n'))
def test_inline_multiple_targets(self):
src = 'x = y = z = 1\na = x + y\n'
t = ast.parse(src)
inline.inline_name(t, 'y')
self.checkAstsEqual(t, ast.parse('x = z = 1\na = x + 1\n'))
def test_inline_multiple_reads(self):
src = textwrap.dedent('''\
CONSTANT = "foo"
def a(b=CONSTANT):
return b == CONSTANT
''')
expected = textwrap.dedent('''\
def a(b="foo"):
return b == "foo"
''')
t = ast.parse(src)
inline.inline_name(t, 'CONSTANT')
self.checkAstsEqual(t, ast.parse(expected))
def test_inline_non_constant_fails(self):
src = textwrap.dedent('''\
NOT_A_CONSTANT = "foo"
NOT_A_CONSTANT += "bar"
''')
t = ast.parse(src)
with self.assertRaisesRegexp(inline.InlineError,
'\'NOT_A_CONSTANT\' is not a constant'):
inline.inline_name(t, 'NOT_A_CONSTANT')
def test_inline_function_fails(self):
src = 'def func(): pass\nfunc()\n'
t = ast.parse(src)
with self.assertRaisesRegexp(
inline.InlineError,
'\'func\' is not a constant; it has type %r' % ast.FunctionDef):
inline.inline_name(t, 'func')
def test_inline_conditional_fails(self):
src = 'if define:\n x = 1\na = x\n'
t = ast.parse(src)
with self.assertRaisesRegexp(inline.InlineError,
'\'x\' is not a top-level name'):
inline.inline_name(t, 'x')
def test_inline_non_assign_fails(self):
src = 'CONSTANT1, CONSTANT2 = values'
t = ast.parse(src)
with self.assertRaisesRegexp(
inline.InlineError, '\'CONSTANT1\' is not declared in an assignment'):
inline.inline_name(t, 'CONSTANT1')
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(InlineTest))
return result
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9a5a19683eb2ac443b5ee49d4bd62caf",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 30.375,
"alnum_prop": 0.6340877914951989,
"repo_name": "google/pasta",
"id": "8299a5e9d809643614a8be0d4f9d911103ca1da7",
"size": "2931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pasta/augment/inline_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "191244"
}
],
"symlink_target": ""
} |
import sys, getopt
from github import Github
def main(argv):
opts, args = getopt.getopt(argv,"r:p:t:")
for opt, arg in opts:
if opt == "-p":
pull = int(arg)
elif opt == "-r":
repo = arg.strip()
elif opt == "-t":
token = arg.strip()
print pull, repo
# First create a Github instance:
g = Github(token)
# post a comment to an issue
if g.get_repo(repo).get_issue(pull).assignee == None:
g.get_repo(repo).get_issue(pull).create_comment("You forgot to assign your PR! It's a muffin offence, you know...")
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "550cc372befad95e6e533758e91e7a3e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 123,
"avg_line_length": 25.115384615384617,
"alnum_prop": 0.555895865237366,
"repo_name": "AleksueiR/glitch",
"id": "aee3197fb3e3b2287c2198e7f391a1b767a99945",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/check-pr-assignee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "CoffeeScript",
"bytes": "19456"
},
{
"name": "JavaScript",
"bytes": "1255"
},
{
"name": "Python",
"bytes": "1300"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
import openpnm as op
ws = op.Workspace()
class LabelTest:
def setup_class(self):
pass
def teardown_class(self):
ws.clear()
def test_bravais_fcc(self):
net = op.network.FaceCenteredCubic(shape=[4, 4, 4])
assert 'pore.surface' in net.keys()
assert net.num_pores('left') == 25
assert net.num_pores(['left', 'top'], mode='xnor') == 4
def test_bravais_bcc(self):
net = op.network.BodyCenteredCubic(shape=[4, 4, 4])
assert 'pore.surface' in net.keys()
assert net.num_pores('left') == 16
assert net.num_pores(['left', 'top'], mode='xnor') == 4
def test_bravais_sc(self):
net = op.network.Cubic(shape=[4, 4, 4])
assert 'pore.surface' in net.keys()
assert net.num_pores('left') == 16
assert net.num_pores(['left', 'top'], mode='xnor') == 4
def test_cubic_2d(self):
net = op.network.Cubic(shape=[5, 5, 1])
assert 'pore.top' not in net.labels()
assert 'pore.bottom' not in net.labels()
assert net.num_pores('surface') == 16
net = op.network.Cubic(shape=[5, 1, 5])
assert 'pore.front' not in net.labels()
assert 'pore.back' not in net.labels()
assert net.num_pores('surface') == 16
net = op.network.Cubic(shape=[1, 5, 5])
assert 'pore.left' not in net.labels()
assert 'pore.right' not in net.labels()
assert net.num_pores('surface') == 16
if __name__ == '__main__':
t = LabelTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
t.__getattribute__(item)()
| {
"content_hash": "0e84b7f85ab6b81177d2d0b25ac6d30a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 63,
"avg_line_length": 32,
"alnum_prop": 0.5589622641509434,
"repo_name": "PMEAL/OpenPNM",
"id": "5a157565ed32defb4a2feb179c197e444cfa0eae",
"size": "1696",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/unit/network/LabelTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "1437146"
}
],
"symlink_target": ""
} |
import os, sys
from django.core.management import setup_environ
# Add root project directory to import path
sys.path.append(os.path.abspath('.'))
# Include settings, so it's not necessary to set DJANGO_SETTINGS_MODULE
import settings
setup_environ(settings)
| {
"content_hash": "f2eac99314cbe50988dd66282039aa97",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 26.1,
"alnum_prop": 0.7854406130268199,
"repo_name": "wapcaplet/vittles",
"id": "fb20717d939573ce3cd9801f2fb1094ad242bce0",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terrain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "28100"
},
{
"name": "Python",
"bytes": "102609"
}
],
"symlink_target": ""
} |
"""Openweathermap service"""
import time
import threading
import urllib.error
import urllib.request
import json
import datetime
from service.openweather_codes import CODES
import socket
class Openweather(threading.Thread):
"""Openweather class to get data from openweathermap"""
def __init__(self, cities, apikey):
threading.Thread.__init__(self)
self.cities = cities
self.work = True
self.tick = {
'sleep': 10,
'weather_counter': 6*10,
'forecast_counter': 6*60
}
self.tick['_wcounter'] = self.tick['weather_counter'] + 1
self.tick['_fcounter'] = self.tick['forecast_counter'] + 1
self.weather_row_stub = {
'update': 0,
'temperature_min': 0,
'temperature_max': 0,
'temperature_current': 0,
'humidity': 0,
'pressure': 0,
'wind_speed': 0,
'wind_deg': 0,
'clouds': 0,
'weather_id': 0,
'weather': ''
}
self.current_weather_raw = {i: "" for i in cities}
self.current_weather = {
i: self.weather_row_stub.copy() for i in cities
}
self.forecast_weather_raw = {i: {} for i in cities}
self.forecast_weather = {i: {} for i in cities}
self.url_current = (
"http://api.openweathermap.org/data/2.5/weather?"
"id=%CITY_ID%&units=metric&mode=json&APPID="+apikey
)
self.url_forecast = (
"http://api.openweathermap.org/data/2.5/forecast/"
"daily?id=%CITY_ID%&mode=json&units=metric&cnt=4&APPID="+apikey
)
def run(self):
"""main loop, reads data from openweather server"""
while self.work:
if self.tick['_wcounter'] > self.tick['weather_counter']:
for city_id in self.cities:
"""current weather"""
url = self.url_current.replace("%CITY_ID%", str(city_id))
json_data = self._fetch_data(url)
if json_data:
self.current_weather_raw[city_id] = json_data
self._decode(city_id)
self.tick['_wcounter'] = 0
if self.tick['_fcounter'] > self.tick['forecast_counter']:
for city_id in self.cities:
""" forecast """
url = self.url_forecast.replace("%CITY_ID%", str(city_id))
json_data = self._fetch_data(url)
if json_data:
for row in json_data['list']:
date = (datetime.datetime.fromtimestamp(int(row['dt'])))\
.strftime("%Y-%m-%d")
self.forecast_weather_raw[city_id][date] = row
self.forecast_weather[city_id][date] = self._decode_forecast(row)
self.tick['_fcounter'] = 0
time.sleep(self.tick['sleep'])
self.tick['_wcounter'] += 1
self.tick['_fcounter'] += 1
def stop(self):
"""stops a thread"""
self.work = False
def get_raw_data(self, city_id=None):
"""return raw weather data"""
if city_id is None:
return self.current_weather_raw.itervalues().next()
else:
return self.current_weather_raw[city_id]
def weather(self, city_id=None):
"""return decoded weather"""
if not city_id:
city_id = list(self.cities.keys())[0]
return self.current_weather[city_id]
def forecast(self, city_id=None, date=None):
"""return forecast"""
if not city_id:
city_id = list(self.cities.keys())[0]
if date is None:
date = time.strftime(
"%Y-%m-%d", time.localtime(time.time() + 24*3600)
)
if date not in self.forecast_weather[city_id]:
return self.weather_row_stub
return self.forecast_weather[city_id][date]
def _fetch_data(self, url):
"""fetch json data from server"""
try:
request = urllib.request.Request(
url, None, {'User-Agent': 'RaspberryPI / Doton'}
)
response = urllib.request.urlopen(request)
data = response.read()
json_data = json.loads(data.decode())
except urllib.error.URLError as e:
json_data = None
print(
time.strftime("%Y-%m-%d %H:%M:%S"),
"error fetching from url",
url,
"\nreason", e.reason
)
except socket.timeout as e:
json_data = None
print(
time.strftime("%Y-%m-%d %H:%M:%S"),
"time out error from url",
url,
"\nreason", e.strerror
)
except ValueError as e:
json_data = None
print("Decode failed")
return json_data
def _decode(self, city_id):
"""decode raw readings"""
self.current_weather[city_id] = {
'temperature_current': self.current_weather_raw[city_id]['main']['temp'],
'humidity': self.current_weather_raw[city_id]['main']['humidity'],
'pressure': self.current_weather_raw[city_id]['main']['pressure'],
'wind_speed': self.current_weather_raw[city_id]['wind']['speed'],
'wind_deg': self.current_weather_raw[city_id]['wind']['deg'] if 'deg' in self.current_weather_raw[city_id]['wind'] else 0,
'weather_id': self.current_weather_raw[city_id]['weather'][0]['id'],
'weather': CODES[self.current_weather_raw[city_id]['weather'][0]['id']],
'clouds': self.current_weather_raw[city_id]['clouds']['all'],
'update': self.current_weather_raw[city_id]['dt']
}
def _decode_forecast(self, row):
"""decode raw readings"""
return {
'temperature_min': row['temp']['min'],
'temperature_max': row['temp']['max'],
'humidity': row['humidity'],
'pressure': row['pressure'],
'wind_speed': row['speed'],
'wind_deg': row['deg'],
'weather_id': row['weather'][0]['id'],
'weather': CODES[row['weather'][0]['id']],
'clouds': row['clouds']
}
| {
"content_hash": "3e8c4117912cca4ece7529b767c31af8",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 134,
"avg_line_length": 36.822857142857146,
"alnum_prop": 0.5049658597144631,
"repo_name": "bkosciow/doton",
"id": "10c75fa7204534f5f15d445d1fa4dbf3ebd4f60a",
"size": "6444",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "service/openweather.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56249"
}
],
"symlink_target": ""
} |
__author__ = 'Gennady Kovalev <gik@bigur.ru>'
__copyright__ = '(c) 2016-2019 Development management business group'
__licence__ = 'For license information see LICENSE'
import datetime
def new(days=0,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=0):
return datetime.timedelta(days, seconds, microseconds, milliseconds,
minutes, hours, weeks)
__extension__ = {
'python_timedelta': {
'new': new,
}
}
| {
"content_hash": "678e377d4f431263c5b144be13719d3d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 72,
"avg_line_length": 22.782608695652176,
"alnum_prop": 0.5801526717557252,
"repo_name": "belolap/esl",
"id": "de94708361e027a558eccedcaa0a52dce6fef70c",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esl/extensions/python_timedelta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65826"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField
from clothstream.styletags.models import ItemStyleTag, StyleTag
class StyleTagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = StyleTag
fields = ('id', 'name',)
class ItemStyleTagSerializer(serializers.HyperlinkedModelSerializer):
item = PrimaryKeyRelatedField(write_only=True)
styletag = SlugRelatedField(write_only=True, slug_field='name')
class Meta:
model = ItemStyleTag
fields = ('item', 'styletag')
| {
"content_hash": "b0c3c2caf10fbb1300f66090ac621b6e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 33.22222222222222,
"alnum_prop": 0.754180602006689,
"repo_name": "julienaubert/clothstream",
"id": "c7781f530a92407afd916b81ea8b6602bbf2336b",
"size": "598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clothstream/styletags/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14962"
},
{
"name": "HTML",
"bytes": "38424"
},
{
"name": "JavaScript",
"bytes": "84357"
},
{
"name": "Makefile",
"bytes": "2712"
},
{
"name": "Python",
"bytes": "85665"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
import logging
from google.datacatalog_connectors.commons import utils
from google.api_core import exceptions
from google.cloud import datacatalog
class DataCatalogFacade:
"""Wraps Data Catalog's API calls."""
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
# This is the value automatically set up by the GRPC client.
__DEFAULT_COLUMN_MODE = 'NULLABLE'
def __init__(self, project_id):
self.__datacatalog = datacatalog.DataCatalogClient()
self.__project_id = project_id
def create_entry(self, entry_group_name, entry_id, entry):
"""Creates a Data Catalog Entry.
:param entry_group_name: Parent Entry Group name.
:param entry_id: Entry id.
:param entry: An Entry object.
:return: The created Entry.
"""
try:
entry = self.__datacatalog.create_entry(parent=entry_group_name,
entry_id=entry_id,
entry=entry)
self.__log_entry_operation('created', entry=entry)
return entry
except (exceptions.FailedPrecondition,
exceptions.PermissionDenied) as e:
entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)
self.__log_entry_operation('was not created',
entry_name=entry_name)
raise e
def get_entry(self, name):
"""Retrieves Data Catalog Entry.
:param name: The Entry name.
:return: An Entry object if it exists.
"""
return self.__datacatalog.get_entry(name=name)
def lookup_entry(self, linked_resource):
"""Get an Entry by target resource name.
:param linked_resource: The full name of the resource the Data Catalog
Entry represents.
:return: An Entry object if it exists.
"""
request = datacatalog.LookupEntryRequest()
request.linked_resource = linked_resource
return self.__datacatalog.lookup_entry(request=request)
def update_entry(self, entry):
"""Updates an Entry.
:param entry: An Entry object.
:return: The updated Entry.
"""
entry = self.__datacatalog.update_entry(entry=entry, update_mask=None)
self.__log_entry_operation('updated', entry=entry)
return entry
def upsert_entry(self, entry_group_name, entry_id, entry):
"""
Update a Data Catalog Entry if it exists and has been changed.
Creates a new Entry if it does not exist.
:param entry_group_name: Parent Entry Group name.
:param entry_id: Entry id.
:param entry: An Entry object.
:return: The updated or created Entry.
"""
entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)
try:
persisted_entry = self.get_entry(entry_name)
self.__log_entry_operation('already exists', entry_name=entry_name)
if self.__entry_was_updated(persisted_entry, entry):
persisted_entry = self.update_entry(entry)
else:
self.__log_entry_operation('is up-to-date',
entry=persisted_entry)
return persisted_entry
except exceptions.PermissionDenied:
self.__log_entry_operation('does not exist', entry_name=entry_name)
persisted_entry = self.create_entry(
entry_group_name=entry_group_name,
entry_id=entry_id,
entry=entry)
return persisted_entry
except exceptions.FailedPrecondition as e:
logging.warning('Entry was not updated: %s', entry_name)
raise e
@classmethod
def __entry_was_updated(cls, current_entry, new_entry):
# Update time comparison allows to verify whether the entry was
# updated on the source system.
current_update_time = 0
if current_entry.source_system_timestamps.update_time:
current_update_time = \
current_entry.source_system_timestamps.update_time.timestamp()
new_update_time = 0
if new_entry.source_system_timestamps.update_time:
new_update_time = \
new_entry.source_system_timestamps.update_time.timestamp()
updated_time_changed = \
new_update_time != 0 and current_update_time != new_update_time
return updated_time_changed or not cls.__entries_are_equal(
current_entry, new_entry)
@classmethod
def __entries_are_equal(cls, entry_1, entry_2):
object_1 = utils.ValuesComparableObject()
object_1.user_specified_system = entry_1.user_specified_system
object_1.user_specified_type = entry_1.user_specified_type
object_1.display_name = entry_1.display_name
object_1.description = entry_1.description
object_1.linked_resource = entry_1.linked_resource
object_2 = utils.ValuesComparableObject()
object_2.user_specified_system = entry_2.user_specified_system
object_2.user_specified_type = entry_2.user_specified_type
object_2.display_name = entry_2.display_name
object_2.description = entry_2.description
object_2.linked_resource = entry_2.linked_resource
return object_1 == object_2 and cls.__schemas_are_equal(
entry_1.schema, entry_2.schema)
@classmethod
def __schemas_are_equal(cls, schema_1, schema_2):
columns_1 = schema_1.columns
columns_2 = schema_2.columns
column_names_are_equal = set(
[column_1.column for column_1 in columns_1]) == \
set(column_2.column for column_2 in columns_2)
# No more checks needed if the column names don't match.
# For example, in case a column is deleted or
# a new column is created.
if not column_names_are_equal:
return False
for column_2 in columns_2:
column_to_evaluate = next((column_1 for column_1 in columns_1
if column_2.column == column_1.column),
None)
if not (column_to_evaluate and cls.__column_fields_are_equal(
column_to_evaluate, column_2)):
return False
return True
@classmethod
def __column_fields_are_equal(cls, column_1, column_2):
object_1 = utils.ValuesComparableObject()
object_1.column = column_1.column
object_1.description = column_1.description
object_1.type = column_1.type
# We need to initialize with the default MODE if it is not fulfilled.
if not column_1.mode:
column_1.mode = cls.__DEFAULT_COLUMN_MODE
object_1.mode = column_1.mode
# Currently, we simply compare the subcolumns length.
# The connectors do not handle this field at present.
object_1.subcolumns_len = len(column_1.subcolumns)
object_2 = utils.ValuesComparableObject()
object_2.column = column_2.column
object_2.description = column_2.description
object_2.type = column_2.type
# We need to initialize with the default MODE if it is not fulfilled.
if not column_2.mode:
column_2.mode = cls.__DEFAULT_COLUMN_MODE
object_2.mode = column_2.mode
# Currently, we simply compare the subcolumns length.
# The connectors do not handle this field at present.
object_2.subcolumns_len = len(column_2.subcolumns)
return object_1 == object_2
def delete_entry(self, name):
"""Deletes a Data Catalog Entry.
:param name: The Entry name.
"""
try:
self.__datacatalog.delete_entry(name=name)
self.__log_entry_operation('deleted', entry_name=name)
except Exception as e:
logging.info(
'An exception ocurred while attempting to'
' delete Entry: %s', name)
logging.debug(str(e))
@classmethod
def __log_entry_operation(cls, description, entry=None, entry_name=None):
formatted_description = 'Entry {}: '.format(description)
logging.info('%s%s', formatted_description,
entry.name if entry else entry_name)
if entry:
logging.info('%s^ [%s] %s', ' ' * len(formatted_description),
entry.user_specified_type, entry.linked_resource)
def create_entry_group(self, location_id, entry_group_id):
"""Creates a Data Catalog Entry Group.
:param location_id: Location id.
:param entry_group_id: Entry Group id.
:return: The created Entry Group.
"""
entry_group = self.__datacatalog.create_entry_group(
parent=f'projects/{self.__project_id}/locations/{location_id}',
entry_group_id=entry_group_id,
entry_group=datacatalog.EntryGroup())
logging.info('Entry Group created: %s', entry_group.name)
return entry_group
def delete_entry_group(self, name):
"""
Deletes a Data Catalog Entry Group.
:param name: The Entry Group name.
"""
self.__datacatalog.delete_entry_group(name=name)
def create_tag_template(self, location_id, tag_template_id, tag_template):
"""Creates a Data Catalog Tag Template.
:param location_id: Location id.
:param tag_template_id: Tag Template id.
:param tag_template: A Tag Template object.
:return: The created Tag Template.
"""
created_tag_template = self.__datacatalog.create_tag_template(
parent=f'projects/{self.__project_id}/locations/{location_id}',
tag_template_id=tag_template_id,
tag_template=tag_template)
logging.info('Tag Template created: %s', created_tag_template.name)
return created_tag_template
def get_tag_template(self, name):
"""Retrieves a Data Catalog Tag Template.
:param name: The Tag Templane name.
:return: A Tag Template object if it exists.
"""
return self.__datacatalog.get_tag_template(name=name)
def get_tag_field_values_for_search_results(self, query, template,
tag_field, tag_field_type):
"""Retrieves Data Catalog Tag field values for search results.
:param query: Query used on search.
:param template: The Tag Template name.
:param tag_field: The Tag Field name.
:param tag_field_type: The Tag Field type.
:return: List of tag field values.
"""
tag_field_values = []
table_entries_name = \
self.search_catalog_relative_resource_name(query)
for table_entry_name in table_entries_name:
tags = self.list_tags(table_entry_name)
for tag in tags:
if template in tag.template:
field = tag.fields[tag_field]
if self.__STRING_TYPE == tag_field_type:
tag_field_value = field.string_value
elif self.__BOOL_TYPE == tag_field_type:
tag_field_value = field.bool_value
elif self.__DOUBLE_TYPE == tag_field_type:
tag_field_value = field.double_value
elif self.__TIMESTAMP_TYPE == tag_field_type:
tag_field_value = field.timestamp_value
else:
tag_field_value = field.enum_value.display_name
tag_field_values.append(tag_field_value)
return tag_field_values
def delete_tag_template(self, name):
"""Deletes a Data Catalog Tag Template.
:param name: The Tag Template name.
"""
self.__datacatalog.delete_tag_template(name=name, force=True)
logging.info('Tag Template deleted: %s', name)
def create_tag(self, entry_name, tag):
"""Creates a Data Catalog Tag.
:param entry_name: Parent Entry name.
:param tag: A Tag object.
:return: The created Tag.
"""
return self.__datacatalog.create_tag(parent=entry_name, tag=tag)
def delete_tag(self, tag):
"""Deletes a Data Catalog Tag.
:param tag: A Tag object.
:return: The deleted Tag.
"""
return self.__datacatalog.delete_tag(name=tag.name)
def list_tags(self, entry_name):
"""List Tags for a given Entry.
:param entry_name: The parent Entry name.
:return: A list of Tag objects.
"""
return self.__datacatalog.list_tags(parent=entry_name)
def update_tag(self, tag):
"""Updates a Tag.
:param tag: A Tag object.
:return: The updated Tag.
"""
return self.__datacatalog.update_tag(tag=tag, update_mask=None)
def upsert_tags(self, entry, tags):
"""Updates or creates Tag for a given Entry.
:param entry: The Entry object.
:param tags: A list of Tag objects.
"""
if not tags:
return
persisted_tags = self.list_tags(entry.name)
# Fetch GRPCIterator.
persisted_tags = [tag for tag in persisted_tags]
for tag in tags:
logging.info('Processing Tag from Template: %s ...', tag.template)
tag_to_create = tag
tag_to_update = None
for persisted_tag in persisted_tags:
# The column field is not case sensitive.
if tag.template == persisted_tag.template and \
tag.column.lower() == persisted_tag.column.lower():
tag_to_create = None
tag.name = persisted_tag.name
if not self.__tag_fields_are_equal(tag, persisted_tag):
tag_to_update = tag
break
if tag_to_create:
created_tag = self.create_tag(entry.name, tag_to_create)
logging.info('Tag created: %s', created_tag.name)
elif tag_to_update:
self.update_tag(tag_to_update)
logging.info('Tag updated: %s', tag_to_update.name)
else:
logging.info('Tag is up-to-date: %s', tag.name)
def delete_tags(self, entry, tags, tag_template_name):
"""Deletes Tags for a given Entry if they don't exist
in Data Catalog.
:param entry: The Entry object.
:param tags: A list of Tag objects.
:param tag_template_name: Template name used to filter
templates out, it can be a part of the template name.
"""
persisted_tags = self.list_tags(entry.name)
# Fetch GRPCIterator.
persisted_tags = [tag for tag in persisted_tags]
for persisted_tag in persisted_tags:
logging.info('Processing Tag from Template: %s ...',
persisted_tag.template)
tag_to_delete = None
if tag_template_name in persisted_tag.template:
tag_to_delete = persisted_tag
for tag in tags:
if tag.template == persisted_tag.template and \
tag.column == persisted_tag.column:
tag_to_delete = None
break
if tag_to_delete:
self.delete_tag(tag_to_delete)
logging.info('Tag deleted: %s', tag_to_delete.name)
else:
logging.info('Tag is up-to-date: %s', persisted_tag.name)
@classmethod
def __tag_fields_are_equal(cls, tag_1, tag_2):
for field_id in tag_1.fields:
tag_1_field = tag_1.fields[field_id]
tag_2_field = tag_2.fields.get(field_id)
if tag_2_field is None:
return False
values_are_equal = tag_1_field.bool_value == \
tag_2_field.bool_value
values_are_equal = values_are_equal \
and tag_1_field.double_value == tag_2_field.double_value
values_are_equal = values_are_equal \
and tag_1_field.string_value == tag_2_field.string_value
values_are_equal = values_are_equal \
and cls.__timestamp_tag_fields_are_equal(
tag_1_field, tag_2_field)
values_are_equal = values_are_equal \
and tag_1_field.enum_value.display_name == \
tag_2_field.enum_value.display_name
if not values_are_equal:
return False
return True
@classmethod
def __timestamp_tag_fields_are_equal(cls, tag_1_field, tag_2_field):
if not (tag_1_field.timestamp_value and tag_2_field.timestamp_value):
return True
return tag_1_field.timestamp_value.timestamp() == \
tag_2_field.timestamp_value.timestamp()
def search_catalog(self, query):
"""Searches Data Catalog for a given query.
:param query: The query string.
:return: A Search Result list.
"""
scope = datacatalog.SearchCatalogRequest.Scope()
scope.include_project_ids.append(self.__project_id)
request = datacatalog.SearchCatalogRequest()
request.scope = scope
request.query = query
request.page_size = 1000
return [
result for result in self.__datacatalog.search_catalog(request)
]
def search_catalog_relative_resource_name(self, query):
"""Searches Data Catalog for a given query.
:param query: The query string.
:return: A string list in which each element represents
an Entry resource name.
"""
return [
result.relative_resource_name
for result in self.search_catalog(query)
]
| {
"content_hash": "bd17848c536f4c92358c950b0e80d6bd",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 79,
"avg_line_length": 37.52272727272727,
"alnum_prop": 0.5833929849677881,
"repo_name": "GoogleCloudPlatform/datacatalog-connectors",
"id": "4b56924adb99d73bf5cc44c40ff69d196b3ef4fe",
"size": "18757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-datacatalog-connectors-commons/src/google/datacatalog_connectors/commons/datacatalog_facade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "994"
},
{
"name": "Python",
"bytes": "180712"
},
{
"name": "Shell",
"bytes": "5300"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisSafenamesNetStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.safenames.net/status_registered.txt"
host = "whois.safenames.net"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "stripe.com")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 2)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "dns1.idp365.net")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "dns2.idp365.net")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 1)
eq_(self.record.admin_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[0].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[0].id, None)
eq_(self.record.admin_contacts[0].name, "Domain Admin")
eq_(self.record.admin_contacts[0].organization, "Stripe")
eq_(self.record.admin_contacts[0].address, "3180 18th St")
eq_(self.record.admin_contacts[0].city, "San Francisco")
eq_(self.record.admin_contacts[0].zip, "94110")
eq_(self.record.admin_contacts[0].state, "CA")
eq_(self.record.admin_contacts[0].country, None)
eq_(self.record.admin_contacts[0].country_code, "US")
eq_(self.record.admin_contacts[0].phone, "+1.8772544179")
eq_(self.record.admin_contacts[0].fax, None)
eq_(self.record.admin_contacts[0].email, "dns@stripe.com")
eq_(self.record.admin_contacts[0].created_on, None)
eq_(self.record.admin_contacts[0].updated_on, None)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('1995-09-12T04:00:00Z'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, "447")
eq_(self.record.registrar.name, "Safenames Ltd")
eq_(self.record.registrar.organization, "Safenames Ltd")
eq_(self.record.registrar.url, "http://www.safenames.net")
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, None)
eq_(self.record.registrant_contacts[0].name, "Domain Admin")
eq_(self.record.registrant_contacts[0].organization, "Stripe")
eq_(self.record.registrant_contacts[0].address, "3180 18th St")
eq_(self.record.registrant_contacts[0].city, "San Francisco")
eq_(self.record.registrant_contacts[0].zip, "94110")
eq_(self.record.registrant_contacts[0].state, "CA")
eq_(self.record.registrant_contacts[0].country, None)
eq_(self.record.registrant_contacts[0].country_code, "US")
eq_(self.record.registrant_contacts[0].phone, "+1.8772544179")
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, "dns@stripe.com")
eq_(self.record.registrant_contacts[0].created_on, None)
eq_(self.record.registrant_contacts[0].updated_on, None)
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, None)
eq_(self.record.technical_contacts[0].name, "Domain Admin")
eq_(self.record.technical_contacts[0].organization, "Stripe")
eq_(self.record.technical_contacts[0].address, "3180 18th St")
eq_(self.record.technical_contacts[0].city, "San Francisco")
eq_(self.record.technical_contacts[0].zip, "94110")
eq_(self.record.technical_contacts[0].state, "CA")
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, "US")
eq_(self.record.technical_contacts[0].phone, "+1.8772544179")
eq_(self.record.technical_contacts[0].fax, None)
eq_(self.record.technical_contacts[0].email, "dns@stripe.com")
eq_(self.record.technical_contacts[0].created_on, None)
eq_(self.record.technical_contacts[0].updated_on, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2014-10-02T15:33:46Z'))
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2024-09-11T04:00:00Z'))
| {
"content_hash": "f85b68d205935773b9cb58fb234511b0",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 92,
"avg_line_length": 50.690265486725664,
"alnum_prop": 0.6443784916201117,
"repo_name": "huyphan/pyyawhois",
"id": "380517659330caf503cad6c92172fdd7d207480d",
"size": "5996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_safenames_net_status_registered.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from enable.tools.viewport_pan_tool import *
| {
"content_hash": "03136fc8c0929353faed5eef38ff640a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 42,
"alnum_prop": 0.7857142857142857,
"repo_name": "enthought/etsproxy",
"id": "fa635e15ee478fc1a7823a2191b4b2ec52f1308a",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/enable/tools/viewport_pan_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2012-2014, Austin Benson and David Gleich
All rights reserved.
This file is part of MRTSQR and is under the BSD 2-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-2-Clause
"""
"""
This is a script to run the Direct TSQR algorithm with direct computation
of the matrix Q. This script can also be used for direct computation
of the left singular vectors (along with the singular values and right
singular vectors).
See options:
python run_direct_tsqr.py --help
Example usage:
python run_direct_tsqr.py --input=A_800M_10.bseq \
--ncols=10 --svd=2 --schedule=100,100,100 \
--hadoop=icme-hadoop1 --local_output=tsqr-tmp \
--output=DIRTSQR_TESTING
"""
import os
import shutil
import subprocess
import sys
import time
import util
from optparse import OptionParser
# Parse command-line options
#
# TODO(arbenson): use argparse instead of optparse when icme-hadoop1 defaults
# to python 2.7
parser = OptionParser()
parser.add_option('-i', '--input', dest='input', default='',
help='input matrix')
parser.add_option('-o', '--output', dest='out', default='',
help='base string for output of Hadoop jobs')
parser.add_option('-l', '--local_output', dest='local_out', default='direct_out_tmp',
help='Base directory for placing local files')
parser.add_option('-t', '--times_output', dest='times_out', default='times',
help='file for storing command times')
parser.add_option('-n', '--ncols', type='int', dest='ncols', default=0,
help='number of columns in the matrix')
parser.add_option('-s', '--schedule', dest='sched', default='100,100,100',
help='comma separated list of number of map tasks to use for'
+ ' the three jobs')
parser.add_option('-H', '--hadoop', dest='hadoop', default='',
help='name of hadoop for Dumbo')
parser.add_option('-x', '--svd', type='int', dest='svd', default=0,
help="""0: no SVD computed;
1: compute the singular values (R = USV^t);
2: compute the singular vectors as well as QR;
3: compute the SVD but not the QR factorization
"""
)
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
default=True, help='turn off some statement printing')
(options, args) = parser.parse_args()
cm = util.CommandManager(verbose=options.verbose)
# Store options in the appropriate variables
in1 = options.input
if in1 == '':
cm.error('no input matrix provided, use --input')
out = options.out
if out == '':
# TODO(arbenson): make sure in1 is clean
out = in1 + '_DIRECT'
local_out = options.local_out
out_file = lambda f: local_out + '/' + f
if os.path.exists(local_out):
shutil.rmtree(local_out)
os.mkdir(local_out)
times_out = options.times_out
ncols = options.ncols
if ncols == 0:
cm.error('number of columns not provided, use --ncols')
svd_opt = options.svd
sched = options.sched
try:
sched = [int(s) for s in sched.split(',')]
sched[2]
except:
cm.error('invalid schedule provided')
hadoop = options.hadoop
run_recursive = True
# Now run the MapReduce jobs
out1 = out + '_1'
cm.run_dumbo('dirtsqr1.py', hadoop, ['-mat ' + in1, '-output ' + out1,
'-nummaptasks %d' % sched[0],
'-libjar feathers.jar'])
out2 = out + '_2'
cm.run_dumbo('dirtsqr2.py', hadoop, ['-mat ' + out1 + '/R_*', '-output ' + out2,
'-svd ' + str(svd_opt),
'-nummaptasks %d' % sched[1],
'-libjar feathers.jar'])
# Q2 file needs parsing before being distributed to phase 3
Q2_file = out_file('Q2.txt')
if os.path.exists(Q2_file):
os.remove(Q2_file)
if os.path.exists(Q2_file + '.out'):
os.remove(Q2_file + '.out')
cm.copy_from_hdfs(out2 + '/Q2', Q2_file)
cm.parse_seq_file(Q2_file)
if svd_opt in [2, 3]:
small_U_file = out_file('U.txt')
if os.path.exists(small_U_file):
os.remove(small_U_file)
if os.path.exists(small_U_file + '.out'):
os.remove(small_U_file + '.out')
cm.copy_from_hdfs(out2 + '/U', small_U_file)
cm.parse_seq_file(small_U_file)
in3 = out1 + '/Q_*'
opts = ['-mat ' + in3, '-output ' + out + '_3', '-ncols ' + str(ncols),
'-q2path ' + Q2_file + '.out', '-nummaptasks %d' % sched[2],
'-libjar feathers.jar']
if svd_opt == 3:
opts += ['-upath ' + small_U_file + '.out']
cm.run_dumbo('dirtsqr3.py', hadoop, opts)
if svd_opt == 2:
# We need an addition TS matrix multiply to get the left singular vectors
out4 = out + '_4'
cm.run_dumbo('TSMatMul.py', hadoop, ['-mat ' + out + '_3', '-output ' + out4,
'-mpath ' + small_U_file + '.out',
'-nummaptasks %d' % sched[2]])
try:
f = open(times_out, 'a')
f.write('times: ' + str(cm.times) + '\n')
f.close
except:
print str(cm.times)
| {
"content_hash": "1c50c671373af1bcd7ba423c6fd0f303",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 85,
"avg_line_length": 32.120253164556964,
"alnum_prop": 0.6023645320197044,
"repo_name": "arbenson/mrtsqr",
"id": "b784bc710506ba51ceb85728f7977db639103e22",
"size": "5075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dumbo/run_dirtsqr.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2012"
},
{
"name": "C++",
"bytes": "76142"
},
{
"name": "Java",
"bytes": "21697"
},
{
"name": "Makefile",
"bytes": "1309"
},
{
"name": "Python",
"bytes": "292758"
},
{
"name": "Shell",
"bytes": "13614"
}
],
"symlink_target": ""
} |
import os
import socket
import struct
import select
import time
ICMP_ECHO_RESPONSE = 0
ICMP_ECHO_REQUEST = 8
class PingICMP():
'''
PingICMP reference Linux implemented and RFC 792
'''
def __init__(self):
pass
def __checksum(self, source_string):
_sum = 0
countTo = (len(source_string) / 2) * 2
count = 0
while count < countTo:
thisVal = ord(source_string[count + 1]) * 256 + ord(source_string[count])
_sum = _sum + thisVal
_sum = _sum & 0xffffffff
count = count + 2
if countTo < len(source_string):
_sum = _sum + ord(source_string[len(source_string) - 1])
_sum = _sum & 0xffffffff
_sum = (_sum >> 16) + (_sum & 0xffff)
_sum = _sum + (_sum >> 16)
answer = ~_sum
answer = answer & 0xffff
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def __receive_icmp_response(self, icmp_socket, packet_id, timeout):
time_left = timeout
while True:
start_time = time.time()
select_ready = select.select([icmp_socket], [], [], time_left)
select_interval = (time.time() - start_time)
if select_ready[0] == []:
return
time_received = time.time()
packet, _ = icmp_socket.recvfrom(1024)
icmp_header = packet[20 : 28]
rep_type, _, _, pkt_id, _ = struct.unpack(
'bbHHh', icmp_header
)
if rep_type is ICMP_ECHO_RESPONSE and pkt_id == packet_id:
packet_bytes = struct.calcsize('d')
time_sent = struct.unpack('d', packet[28 : 28 + packet_bytes])[0]
return time_received - time_sent
time_left = time_left - select_interval
if time_left <= 0:
return
def __send_icmp_request(self, icmp_socket, dest_addr, packet_id, sequence):
# ICMP Header :
# -------------------------------------------------------------------
# | type(8) | code(8) | checksum(16) | packet_id(16) | sequence(16) |
# -------------------------------------------------------------------
#
packet_checksum = 0
header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, packet_checksum, packet_id, sequence)
packet_bytes = struct.calcsize('d')
data = (128 - packet_bytes) * '0'
data = struct.pack('d', time.time()) + data
packet_checksum = self.__checksum(header + data)
header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, socket.htons(packet_checksum), packet_id, sequence)
packet = header + data
icmp_socket.sendto(packet, (dest_addr, 1))
def ping_once(self, dest_addr, timeout, sequence):
icmp = socket.getprotobyname('icmp')
try:
icmp_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.error, (errno, msg):
if errno == 1:
msg = '%s : Just root can send ICMP Message' % msg
raise socket.error(msg)
raise
packet_id = os.getpid() & 0xFFFF
self.__send_icmp_request(icmp_socket, dest_addr, packet_id, sequence)
delay = self.__receive_icmp_response(icmp_socket, packet_id, timeout)
icmp_socket.close()
return delay
def ping(self, host_name, timeout = 10, count = 5):
ping_result_list = []
try:
dest_addr = socket.gethostbyname(host_name)
except socket.gaierror, e:
raise IOError('%s is not right hostname or ip' % host_name)
for i in xrange(count):
ping_result = {'host_name' : host_name, 'dest_addr' : dest_addr}
try:
delay = self.ping_once(dest_addr, timeout, i)
if delay is not None:
ping_result['result'] = 'success'
ping_result['delay'] = delay * 1000
else:
ping_result['result'] = 'timeout'
except socket.gaierror, e:
ping_result['result'] = 'exception'
ping_result['message'] = e
ping_result_list.append(ping_result)
return ping_result_list
| {
"content_hash": "131f6e68b46a304e607ceded85dc60b6",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 111,
"avg_line_length": 33.97794117647059,
"alnum_prop": 0.4773858472192166,
"repo_name": "interhui/py-sys",
"id": "1e8c53bb8359f2bb20255961c6e4baf8f9db4e37",
"size": "4637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_sys/ping/ping_icmp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46886"
}
],
"symlink_target": ""
} |
"""Tests for homekit_controller config flow."""
import json
from unittest import mock
import homekit
import pytest
from homeassistant.components.homekit_controller import config_flow
from homeassistant.components.homekit_controller.const import KNOWN_DEVICES
from tests.common import MockConfigEntry
from tests.components.homekit_controller.common import (
Accessory, FakeService, setup_platform
)
PAIRING_START_FORM_ERRORS = [
(homekit.BusyError, 'busy_error'),
(homekit.MaxTriesError, 'max_tries_error'),
(KeyError, 'pairing_failed'),
]
PAIRING_START_ABORT_ERRORS = [
(homekit.AccessoryNotFoundError, 'accessory_not_found_error'),
(homekit.UnavailableError, 'already_paired'),
]
PAIRING_FINISH_FORM_ERRORS = [
(homekit.MaxPeersError, 'max_peers_error'),
(homekit.AuthenticationError, 'authentication_error'),
(homekit.UnknownError, 'unknown_error'),
(KeyError, 'pairing_failed'),
]
PAIRING_FINISH_ABORT_ERRORS = [
(homekit.AccessoryNotFoundError, 'accessory_not_found_error'),
]
def _setup_flow_handler(hass):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
flow.controller = mock.Mock()
flow.controller.pairings = {}
return flow
async def _setup_flow_zeroconf(hass, discovery_info):
result = await hass.config_entries.flow.async_init(
'homekit_controller',
context={'source': 'zeroconf'},
data=discovery_info,
)
return result
async def test_discovery_works(hass):
"""Test a device being discovered."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.controller.start_pairing.call_count == 1
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
# Pairing doesn't error error and pairing results
flow.controller.pairings = {
'00:00:00:00:00:00': pairing,
}
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_discovery_works_upper_case(hass):
"""Test a device being discovered."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'MD': 'TestDevice',
'ID': '00:00:00:00:00:00',
'C#': 1,
'SF': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.controller.start_pairing.call_count == 1
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
flow.controller.pairings = {
'00:00:00:00:00:00': pairing,
}
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_discovery_works_missing_csharp(hass):
"""Test a device being discovered that has missing mdns attrs."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.controller.start_pairing.call_count == 1
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
flow.controller.pairings = {
'00:00:00:00:00:00': pairing,
}
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_abort_duplicate_flow(hass):
"""Already paired."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
result = await _setup_flow_zeroconf(hass, discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
result = await _setup_flow_zeroconf(hass, discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_in_progress'
async def test_pair_already_paired_1(hass):
"""Already paired."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_paired'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
async def test_discovery_ignored_model(hass):
"""Already paired."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': config_flow.HOMEKIT_IGNORE[0],
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'ignored_model'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
async def test_discovery_invalid_config_entry(hass):
"""There is already a config entry for the pairing id but its invalid."""
MockConfigEntry(domain='homekit_controller', data={
'AccessoryPairingID': '00:00:00:00:00:00'
}).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
async def test_discovery_already_configured(hass):
"""Already configured."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
await setup_platform(hass)
conn = mock.Mock()
conn.config_num = 1
hass.data[KNOWN_DEVICES]['00:00:00:00:00:00'] = conn
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
assert conn.async_config_num_changed.call_count == 0
async def test_discovery_already_configured_config_change(hass):
"""Already configured."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 2,
'sf': 0,
}
}
await setup_platform(hass)
conn = mock.Mock()
conn.config_num = 1
hass.data[KNOWN_DEVICES]['00:00:00:00:00:00'] = conn
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
assert conn.async_refresh_entity_map.call_args == mock.call(2)
async def test_pair_unable_to_pair(hass):
"""Pairing completed without exception, but didn't create a pairing."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.controller.start_pairing.call_count == 1
# Pairing doesn't error but no pairing object is generated
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == 'unable_to_pair'
@pytest.mark.parametrize("exception,expected", PAIRING_START_ABORT_ERRORS)
async def test_pair_abort_errors_on_start(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device refuses to enter pairing mode
with mock.patch.object(flow.controller, 'start_pairing') as start_pairing:
start_pairing.side_effect = exception('error')
result = await flow.async_step_pair({})
assert result['type'] == 'abort'
assert result['reason'] == expected
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
@pytest.mark.parametrize("exception,expected", PAIRING_START_FORM_ERRORS)
async def test_pair_form_errors_on_start(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device refuses to enter pairing mode
with mock.patch.object(flow.controller, 'start_pairing') as start_pairing:
start_pairing.side_effect = exception('error')
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == expected
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_ABORT_ERRORS)
async def test_pair_abort_errors_on_finish(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.controller.start_pairing.call_count == 1
# User submits code - pairing fails but can be retried
flow.finish_pairing.side_effect = exception('error')
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'abort'
assert result['reason'] == expected
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_FORM_ERRORS)
async def test_pair_form_errors_on_finish(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.controller.start_pairing.call_count == 1
# User submits code - pairing fails but can be retried
flow.finish_pairing.side_effect = exception('error')
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == expected
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
async def test_import_works(hass):
"""Test a device being discovered."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
import_info = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
result = await flow.async_import_legacy_pairing(
discovery_info['properties'], import_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_import_already_configured(hass):
"""Test importing a device from .homekit that is already a ConfigEntry."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
import_info = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
config_entry = MockConfigEntry(
domain='homekit_controller',
data=import_info,
)
config_entry.add_to_hass(hass)
flow = _setup_flow_handler(hass)
result = await flow.async_import_legacy_pairing(
discovery_info['properties'], import_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
async def test_user_works(hass):
"""Test user initiated disovers devices."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
flow = _setup_flow_handler(hass)
flow.controller.pairings = {
'00:00:00:00:00:00': pairing,
}
flow.controller.discover.return_value = [
discovery_info,
]
result = await flow.async_step_user()
assert result['type'] == 'form'
assert result['step_id'] == 'user'
result = await flow.async_step_user({
'device': 'TestDevice',
})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_user_no_devices(hass):
"""Test user initiated pairing where no devices discovered."""
flow = _setup_flow_handler(hass)
flow.controller.discover.return_value = []
result = await flow.async_step_user()
assert result['type'] == 'abort'
assert result['reason'] == 'no_devices'
async def test_user_no_unpaired_devices(hass):
"""Test user initiated pairing where no unpaired devices discovered."""
flow = _setup_flow_handler(hass)
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
flow.controller.discover.return_value = [
discovery_info,
]
result = await flow.async_step_user()
assert result['type'] == 'abort'
assert result['reason'] == 'no_devices'
async def test_parse_new_homekit_json(hass):
"""Test migrating recent .homekit/pairings.json files."""
service = FakeService('public.hap.service.lightbulb')
on_char = service.add_characteristic('on')
on_char.value = 1
accessory = Accessory('TestDevice', 'example.com', 'Test', '0001', '0.1')
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_path = mock.Mock()
mock_path.exists.side_effect = [True, False]
read_data = {
'00:00:00:00:00:00': pairing.pairing_data,
}
mock_open = mock.mock_open(read_data=json.dumps(read_data))
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch('builtins.open', mock_open):
with mock.patch('os.path', mock_path):
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'TestDevice'
assert result['data']['AccessoryPairingID'] == '00:00:00:00:00:00'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
async def test_parse_old_homekit_json(hass):
"""Test migrating original .homekit/hk-00:00:00:00:00:00 files."""
service = FakeService('public.hap.service.lightbulb')
on_char = service.add_characteristic('on')
on_char.value = 1
accessory = Accessory('TestDevice', 'example.com', 'Test', '0001', '0.1')
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_path = mock.Mock()
mock_path.exists.side_effect = [False, True]
mock_listdir = mock.Mock()
mock_listdir.return_value = [
'hk-00:00:00:00:00:00',
'pairings.json'
]
read_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_open = mock.mock_open(read_data=json.dumps(read_data))
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch('builtins.open', mock_open):
with mock.patch('os.path', mock_path):
with mock.patch('os.listdir', mock_listdir):
result = await flow.async_step_zeroconf(discovery_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'TestDevice'
assert result['data']['AccessoryPairingID'] == '00:00:00:00:00:00'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
async def test_parse_overlapping_homekit_json(hass):
"""Test migrating .homekit/pairings.json files when hk- exists too."""
service = FakeService('public.hap.service.lightbulb')
on_char = service.add_characteristic('on')
on_char.value = 1
accessory = Accessory('TestDevice', 'example.com', 'Test', '0001', '0.1')
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_listdir = mock.Mock()
mock_listdir.return_value = [
'hk-00:00:00:00:00:00',
'pairings.json'
]
mock_path = mock.Mock()
mock_path.exists.side_effect = [True, True]
# First file to get loaded is .homekit/pairing.json
read_data_1 = {
'00:00:00:00:00:00': {
'AccessoryPairingID': '00:00:00:00:00:00',
}
}
mock_open_1 = mock.mock_open(read_data=json.dumps(read_data_1))
# Second file to get loaded is .homekit/hk-00:00:00:00:00:00
read_data_2 = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_open_2 = mock.mock_open(read_data=json.dumps(read_data_2))
side_effects = [mock_open_1.return_value, mock_open_2.return_value]
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch('builtins.open', side_effect=side_effects):
with mock.patch('os.path', mock_path):
with mock.patch('os.listdir', mock_listdir):
result = await flow.async_step_zeroconf(discovery_info)
await hass.async_block_till_done()
assert result['type'] == 'create_entry'
assert result['title'] == 'TestDevice'
assert result['data']['AccessoryPairingID'] == '00:00:00:00:00:00'
assert flow.context == {
'hkid': '00:00:00:00:00:00',
'title_placeholders': {'name': 'TestDevice'}
}
| {
"content_hash": "ffd35b00ccb1061de464bf5ef7df7389",
"timestamp": "",
"source": "github",
"line_count": 967,
"max_line_length": 78,
"avg_line_length": 28.985522233712512,
"alnum_prop": 0.5688394163188126,
"repo_name": "jabesq/home-assistant",
"id": "99562f600459308c34156ac3ab8b968d6da2139c",
"size": "28029",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/homekit_controller/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
} |
"""
Django settings for mrestweb project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%j53w23rdwgx02qh_017b+hn0t48sjwi-x4c1neb94m!6di7c@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# external apps
'rest_framework',
'rest_framework.authtoken',
# application apps
'webapp.apps.WebappConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
""" Django-restframework
"""
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
ROOT_URLCONF = 'mrestweb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mrestweb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
LOCAL = True
if LOCAL:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| {
"content_hash": "fbdd54e3b0d200db7969ab8d71d9f92d",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 91,
"avg_line_length": 26.312925170068027,
"alnum_prop": 0.6770941054808687,
"repo_name": "brunogabriel/mobile-rest-web",
"id": "08c5944dee35c3d8ae224f31f968b761d320e5ef",
"size": "3868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrestweb/mrestweb/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19260"
}
],
"symlink_target": ""
} |
from nodeGraph import NodeGraph
from node import Node
from plug import Plug
from wire import Wire | {
"content_hash": "c56034fb688e0105b2b45e8a379efbfa",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 31,
"avg_line_length": 24.25,
"alnum_prop": 0.845360824742268,
"repo_name": "Derfies/wxnodegraph",
"id": "3c2b9e6ae99a8ccec1b2d60d528f649e21a29ca8",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wxnodegraph/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13524"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript
from binascii import unhexlify
from io import BytesIO
import time
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
'''
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
'''
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + '\0' + i[-1])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
'''
This test is meant to exercise BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP66Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
''' 98 more version 2 blocks '''
test_blocks = []
for i in xrange(98):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in xrange(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 751st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in xrange(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
| {
"content_hash": "00481643d43927a88bf70aff2d1c4021",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 100,
"avg_line_length": 36.108695652173914,
"alnum_prop": 0.5982841661649608,
"repo_name": "Kangmo/bitcoin",
"id": "95be385d93fb0e9de91ab9295282e92939531753",
"size": "6860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/bipdersig-p2p.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "781508"
},
{
"name": "C++",
"bytes": "4160302"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3792"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "156462"
},
{
"name": "Makefile",
"bytes": "99766"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "712292"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "26024"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import shutil
import unittest
import numpy as np
from model.data_processor import DataProcessor
from model.one_hot_model import OneHotModel
class TestOneHotModel(unittest.TestCase):
def test_one_hot_forward(self):
vocab_size = 10
sequence_size = 20
checkpoint_path = os.path.join(os.path.dirname(__file__), "checkpoints")
dp = DataProcessor()
test_seq = np.random.randint(vocab_size, size=sequence_size)
samples = np.tile(test_seq, 10)
x, y = dp.format(samples, vocab_size, sequence_size)
x_t, y_t = dp.format(samples, vocab_size, sequence_size)
model = OneHotModel(vocab_size, sequence_size, checkpoint_path=checkpoint_path)
model.compile()
model.fit(x, y, x_t, y_t, epochs=20)
print(model.model.optimizer.get_config())
pred_seq = np.random.choice(test_seq, 3)
pred = model.predict(pred_seq)
# pred will emulates test_seq
print(test_seq)
for s, p in zip(pred_seq, pred):
print("{} -> {}".format(s, p))
shutil.rmtree(checkpoint_path)
def test_save_load(self):
model = OneHotModel(10, 20)
path = model.save(os.path.dirname(__file__))
self.assertTrue(os.path.exists(path))
model.load(path)
os.remove(path)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "73fe315edb5c6d7eecdb62d6654c8b2a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 30.851063829787233,
"alnum_prop": 0.6206896551724138,
"repo_name": "icoxfog417/tying-wv-and-wc",
"id": "9091ad5b0d0a98ffe207342916a1f9308b724058",
"size": "1450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_one_hot_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29333"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from concurrent.futures import ThreadPoolExecutor
from opentracing.mocktracer import MockTracer
from ..testcase import OpenTracingTestCase
class TestThreads(OpenTracingTestCase):
def setUp(self):
self.tracer = MockTracer()
self.executor = ThreadPoolExecutor(max_workers=3)
def test_main(self):
# Start an isolated task and query for its result -and finish it-
# in another task/thread
span = self.tracer.start_span('initial')
self.submit_another_task(span)
self.executor.shutdown(True)
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 3)
self.assertNamesEqual(spans, ['initial', 'subtask', 'task'])
# task/subtask are part of the same trace,
# and subtask is a child of task
self.assertSameTrace(spans[1], spans[2])
self.assertIsChildOf(spans[1], spans[2])
# initial task is not related in any way to those two tasks
self.assertNotSameTrace(spans[0], spans[1])
self.assertEqual(spans[0].parent_id, None)
def task(self, span):
# Create a new Span for this task
with self.tracer.start_active_span('task'):
with self.tracer.scope_manager.activate(span, True):
# Simulate work strictly related to the initial Span
pass
# Use the task span as parent of a new subtask
with self.tracer.start_active_span('subtask'):
pass
def submit_another_task(self, span):
self.executor.submit(self.task, span)
| {
"content_hash": "ed6c960b7f4112c011890ccbb87dd37f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 33.6875,
"alnum_prop": 0.6456400742115028,
"repo_name": "opentracing/opentracing-python",
"id": "f5a1a81ddf74836d105c8be659313ef7e1bc7ca2",
"size": "1617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testbed/test_active_span_replacement/test_threads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2598"
},
{
"name": "Python",
"bytes": "243543"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
from models.SimpleRecurrent import SimpleRecurrentModel
from keras.callbacks import ModelCheckpoint
def set_params(nturns = 3, input_wait = 3, quiet_gap = 4, stim_dur = 3,
var_delay_length = 0, stim_noise = 0, rec_noise = .1,
sample_size = 512, epochs = 100, N_rec = 50, dale_ratio=0.8, tau=100):
params = dict()
params['nturns'] = nturns
params['input_wait'] = input_wait
params['quiet_gap'] = quiet_gap
params['stim_dur'] = stim_dur
params['var_delay_length'] = var_delay_length
params['stim_noise'] = stim_noise
params['rec_noise'] = rec_noise
params['sample_size'] = sample_size
params['epochs'] = epochs
params['N_rec'] = N_rec
params['dale_ratio'] = dale_ratio
params['tau'] = tau
return params
# This generates the training data for our network
# It will be a set of input_times and output_times for when we expect input
# and when the corresponding output is expected
def generate_trials(params):
nturns = params['nturns']
input_wait = params['input_wait']
quiet_gap = params['quiet_gap']
stim_dur = params['stim_dur']
var_delay_length = params['var_delay_length']
stim_noise = params['stim_noise']
sample_size = int(params['sample_size'])
if var_delay_length == 0:
var_delay = np.zeros(sample_size, dtype=int)
else:
var_delay = np.random.randint(var_delay_length, size=sample_size) + 1
input_times = np.zeros([sample_size, nturns],dtype=np.int)
output_times = np.zeros([sample_size, nturns],dtype=np.int)
turn_time = np.zeros(sample_size, dtype=np.int)
for sample in np.arange(sample_size):
turn_time[sample] = stim_dur + quiet_gap + var_delay[sample]
for i in np.arange(nturns):
input_times[sample, i] = input_wait + i * turn_time[sample]
output_times[sample, i] = input_wait + i * turn_time[sample] + stim_dur
seq_dur = int(max([output_times[sample, nturns-1] + quiet_gap, sample in np.arange(sample_size)]))
x_train = np.zeros([sample_size, seq_dur, 2])
y_train = 0.5 * np.ones([sample_size, seq_dur, 1])
for sample in np.arange(sample_size):
for turn in np.arange(nturns):
firing_neuron = np.random.randint(2) # 0 or 1
x_train[sample,
input_times[sample, turn]:(input_times[sample, turn] + stim_dur),
firing_neuron] = 1
y_train[sample,
output_times[sample, turn]:(input_times[sample, turn] + turn_time[sample]),
0] = firing_neuron
mask = np.zeros((sample_size, seq_dur))
for sample in np.arange(sample_size):
mask[sample,:] = [0 if x == .5 else 1 for x in y_train[sample,:,:]]
x_train = x_train + stim_noise * np.random.randn(sample_size, seq_dur, 2)
params['input_times'] = input_times
params['output_times'] = output_times
return (x_train, y_train, params, mask)
# This is the train function, using the Adam modified SGD method
def train(x_train, y_train, params, mask):
epochs = params['epochs']
model = SimpleRecurrentModel(params)
checkpoint = ModelCheckpoint('../weights/flipflop_weights-{epoch:02d}.h5')
model.fit(x_train, y_train, nb_epoch=epochs, batch_size=64, callbacks = [checkpoint], sample_weight=mask)
return (model, params, x_train)
def run_flipflop(model, params, x_train):
x_pred = x_train[0:4,:,:]
y_pred = model.predict(x_train)
plt.plot(x_pred[0, :, 0])
plt.plot(x_pred[0, :, 1])
plt.plot(y_pred[0, :, 0])
plt.show()
if __name__ == '__main__':
params = set_params(epochs=20, sample_size= 512, input_wait=50, stim_dur=50, quiet_gap=100, nturns=5, N_rec=50, rec_noise=0,
stim_noise=0.1, dale_ratio=None, tau=100)
trial_info = generate_trials(params)
train_info = train(trial_info[0], trial_info[1], trial_info[2], trial_info[3])
run_flipflop(train_info[0], train_info[1], train_info[2])
| {
"content_hash": "8f091269c916d8b1e5b9650517aaba25",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 128,
"avg_line_length": 38.153153153153156,
"alnum_prop": 0.5976387249114522,
"repo_name": "ABAtanasov/KerasCog",
"id": "7adef3ef731b504b7439b7c725d07dbeb9fba5cc",
"size": "4235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/FlipFlop_ID.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26064"
}
],
"symlink_target": ""
} |
from __future__ import division
import sys
import datetime
import itertools
import ntpath
import multiprocessing as mp
try:
import simplejson as json
except ImportError:
import json
import numpy as np
from pymongo.errors import PyMongoError
from pymongo import MongoClient, GEOSPHERE
from netCDF4 import Dataset
from constants import MONGO
from atlas.utils.round_to_n import round_to_n
__author__ = "rblourenco@uchicago.edu"
# 2015-08-19 - Initial commit
uri = "mongodb://{}:{}@{}/{}?authMechanism=SCRAM-SHA-1".format(
MONGO['user'], MONGO['password'], MONGO['domain'], MONGO['database']
)
client = MongoClient(uri) if not MONGO['local'] \
else MongoClient('localhost', MONGO['port'])
db = client['atlas']
class NetCDFToMongo(object):
def __init__(self, nc_file, sigfigs=3):
"""Class for writing geospatial information to Mongo from netCDF files
:param nc_file: Path to netCDF input file
:type nc_file: str
:return: None
:rtype: None
"""
self.nc_file = nc_file
self.nc_dataset = Dataset(self.nc_file, 'r')
self.sigfigs = sigfigs
self.name = None
self.human_name = None
self._lon_var = None
self._lat_var = None
self._variables = None
self._dimensions = None
self._parameters = None
try:
self._lats = self.nc_dataset.variables[self.lat_var][:]
except KeyError:
raise Exception('Dataset must have a latitude dimension.')
try:
self._lons = self.nc_dataset.variables[self.lon_var][:]
except KeyError:
raise Exception('Dataset must have a longitude dimension.')
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, value):
self._parameters = value
@property
def dimensions(self):
"""List of dimensions other than longitude and latitude.
:return: List of dimensions in NetCDF file (excluding lonlat)
:rtype: list
"""
if self._dimensions is None:
self._dimensions = [d for d in self.nc_dataset.dimensions.keys()
if d not in [self.lon_var, self.lat_var]]
return self._dimensions
@property
def variables(self):
"""List of variables in NetCDF, other than dimensions in NetCDF.
:return: List of variables in NetCDF file (excluding dimensions)
:rtype: list
"""
if self._variables is None:
self._variables = [v for v in self.nc_dataset.variables.keys()
if v not in self.nc_dataset.dimensions.keys()]
return self._variables
@property
def lat_var(self):
if self._lat_var is None:
self._lat_var = 'lat'
return self._lat_var
@property
def lon_var(self):
if self._lon_var is None:
self._lon_var = 'lon'
return self._lon_var
@property
def lats(self):
return self._lats
@property
def lons(self):
return self._lons
@property
def pixel_side_length(self):
"""
Using degree decimals - if zero, states a point
:return:
:rtype: tuple
"""
return abs(np.diff(self.lons[:2])[0]), abs(np.diff(self.lats[:2])[0])
def num_or_null(self, arr):
"""Represent null values from netCDF as '--' and numeric values
as floats.
"""
print(arr)
if np.ma.getmask(arr):
if arr.count() == 0:
return None
arr = np.ma.filled(arr, None)
try:
return round_to_n(arr, self.sigfigs)
except ValueError:
print('\n*** Encountered uncoercible non-numeric ***\n{}\n\n'.format(
arr
))
pass
@property
def metadata(self):
return {}
def parallel_ingest(self):
self.ingest_metadata()
for variable in self.variables:
values = self.nc_dataset[variable][:]
jobs = []
n = mp.cpu_count()
for i in range(n):
p = mp.Process(target=self.ingest_data, args=(values, variable, n, i))
jobs.append(p)
p.start()
for j in jobs:
j.join()
def ingest_metadata(self):
db['raster_meta'].insert_one(self.metadata)
def ingest_data(self, values, variable, sectors=1, sector=0):
start_time = datetime.datetime.now()
print('*** Start Run ***\n{}\n\n'.format(start_time))
lons_lats = itertools.product(
enumerate(self.lats), enumerate(self.lons))
lons_lats = np.array_split(
np.array([x for x in lons_lats]), sectors)[sector]
try:
points = db['{}_{}'.format(self.name, variable)]
values = np.swapaxes(
values, self.nc_dataset.variables[variable].dimensions.index(
self.lat_var), 0)
values = np.swapaxes(
values, self.nc_dataset.variables[variable].dimensions.index(
self.lon_var), 1)
for (lat_idx, lat), (lon_idx, lon) in lons_lats:
try:
values = self.num_or_null(
values[lat_idx, lon_idx])
if values is None:
continue
tile = GenerateDocument(
lon, lat, values,
self.pixel_side_length[0],
self.pixel_side_length[1],
self.dimensions,
).as_dict
result = points.insert_one(tile)
except:
print('Unexpected error:', sys.exc_info()[0])
raise
# print '*** Inserted {} Points ***'.format(len(new_points))
# print result.inserted_ids
# print '*** End Points ***'
tile = {}
values[:] = []
except PyMongoError:
print('Error while committing on MongoDB')
raise
except:
print('Unexpected error:', sys.exc_info()[0])
raise
# start_index = datetime.datetime.now()
# print('\n*** Start Indexing ***\n{}\n'.format(start_index))
# points.create_index([('geometry', GEOSPHERE)])
# end_index = datetime.datetime.now()
# print('\n*** Elapsed ***\n{}\n'.format(end_index - start_index))
end_time = datetime.datetime.now()
print('\n*** End Run ***\n{}\n'.format(end_time))
elapsed_time = end_time - start_time
print('\n*** Elapsed ***\n{}\n'.format(elapsed_time))
# Define GeoJSON standard for ATLAS
class GenerateDocument(object):
def __init__(self, x, y, value, side_x, side_y, dimensions):
self.x = x
self.y = y
self.value = value
self.side_x = side_x
self.side_y = side_y
self.dimensions = dimensions
@property
def __geo_interface__(self):
"""Define polygon based on centroid (x, y) and side
ATTENTION: When referring to MongoDB user reference,
GeoJSON standard 'geometry' should be used instead of 'loc',
for geoindexing.
:return: GeoJSON object representing data point
:rtype: dict
"""
x2 = self.side_x / 2
y2 = self.side_y / 2
document = {
'type': 'Feature',
'geometry': {'type': 'Polygon', 'coordinates': [[
[self.x - x2, self.y + y2],
[self.x + x2, self.y + y2],
[self.x + x2, self.y - y2],
[self.x - x2, self.y - y2],
[self.x - x2, self.y + y2]]]},
'properties': {
'centroid': {'geometry': {
'type': 'Point', 'coordinates': [self.x, self.y]}},
'values': self.value,
'dimensions': self.dimensions,
}}
return document
@property
def as_dict(self):
return self.__geo_interface__
if __name__ == '__main__':
from constants import NC_FILE
try:
mi = NetCDFToMongo(NC_FILE)
mi.parallel_ingest()
except:
raise
| {
"content_hash": "2e9401c2457b1c4a81356ae904ce1423",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 86,
"avg_line_length": 30.086330935251798,
"alnum_prop": 0.5304878048780488,
"repo_name": "RDCEP/atlas-viewer",
"id": "e6a518c2875e7541023c331234509a3e1c340c9d",
"size": "8364",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "atlas/nc4_to_mongodb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24793"
},
{
"name": "HTML",
"bytes": "7864"
},
{
"name": "JavaScript",
"bytes": "24163"
},
{
"name": "Python",
"bytes": "23748"
}
],
"symlink_target": ""
} |
import os
import sys
TEST_PATH = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(TEST_PATH, '..'))
import pytest
from pyvirtualdisplay import Display
import webdriverwrapper
from webdriverwrapper.pytest import *
@pytest.yield_fixture(scope='session', autouse=True)
def display():
d = Display(visible=0, size=(1280, 700))
d.start()
yield
d.stop()
@pytest.fixture(scope='session')
def session_driver():
opt = webdriverwrapper.ChromeOptions()
opt.add_argument('--no-sandbox')
opt.add_argument('--proxy-auto-detect')
driver = webdriverwrapper.Chrome(options=opt)
return driver
@pytest.fixture(scope='function')
def _driver(session_driver):
session_driver.get('file://{}/html/some_page.html'.format(TEST_PATH))
return session_driver
@pytest.fixture
def driver_form(driver):
driver.get('file://{}/html/form.html'.format(TEST_PATH))
return driver
@pytest.fixture
def driver_error_page(driver):
driver.get('file://{}/html/error_page.html'.format(TEST_PATH))
return driver
@pytest.fixture
def driver_error_msgs(driver):
driver.get('file://{}/html/error_messages.html'.format(TEST_PATH))
return driver
@pytest.fixture
def driver_info_msgs(driver):
driver.get('file://{}/html/info_messages.html'.format(TEST_PATH))
return driver
@pytest.fixture
def driver_windows(driver):
driver.get('file://{}/html/windows.html'.format(TEST_PATH))
return driver
@pytest.fixture
def new_window_path():
return '{}/html/new_window.html'.format(TEST_PATH)
| {
"content_hash": "87a021490f556c1d69d516f42a547265",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 73,
"avg_line_length": 21.788732394366196,
"alnum_prop": 0.6994182288299935,
"repo_name": "horejsek/python-webdriverwrapper",
"id": "baafa4ea80cfcc9fbb6e9b42ef68ad80ebef46ed",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3187"
},
{
"name": "Makefile",
"bytes": "966"
},
{
"name": "Python",
"bytes": "85851"
}
],
"symlink_target": ""
} |
"""
sentry.contrib
~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
| {
"content_hash": "af5df5787aa791ec6de61dd90797a7bd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 22.142857142857142,
"alnum_prop": 0.6451612903225806,
"repo_name": "dcramer/sentry-old",
"id": "f02bb7cacea0e7f59a8909de5618c179be151962",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/contrib/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10085"
},
{
"name": "Python",
"bytes": "183975"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
import pandas as pd
from db import DemoDB, DB, list_profiles, remove_profile
import unittest
db = DemoDB()
class PandaSQLTest(unittest.TestCase):
def setUp(self):
pass
def test_query_rowsum(self):
df = db.query("select * from Artist;")
self.assertEqual(len(df), 275)
def test_query_groupby(self):
q = "select AlbumId, sum(1) from Track group by 1"
df = db.query(q)
self.assertEqual(len(df), 347)
def test_query_from_file_rowsum(self):
with open("/tmp/testscript.sql", "w") as f:
f.write("select * from Artist;")
df = db.query_from_file("/tmp/testscript.sql")
self.assertEqual(len(df), 275)
def test_add_profile(self):
profiles = list_profiles()
db.save_credentials(profile="test_profile")
self.assertEqual(len(profiles)+1, len(list_profiles()))
remove_profile("test_profile")
def test_remove_profile(self):
profiles = list_profiles()
db.save_credentials(profile="test_profile")
self.assertEqual(len(profiles)+1, len(list_profiles()))
remove_profile("test_profile")
def test_list_profiles(self):
db.save_credentials(profile="test_profile")
self.assertTrue(len(list_profiles()) > 0)
remove_profile("test_profile")
def test_table_head(self):
self.assertEqual(len(db.tables.Artist.head()), 6)
def test_table_all(self):
self.assertEqual(len(db.tables.Artist.all()), 275)
def test_table_select(self):
df = db.tables.Artist.select("ArtistId", "Name")
self.assertEqual(df.shape, (275, 2))
def test_table_sample(self):
df = db.tables.Artist.sample(n=10)
self.assertEqual(len(df), 10)
def test_column_head(self):
col = db.tables.Track.TrackId.head()
self.assertEqual(len(col), 6)
def test_column_all(self):
col = db.tables.Track.TrackId.all()
self.assertEqual(len(col), 3503)
def test_column_sample(self):
col = db.tables.Track.TrackId.sample(n=10)
self.assertEqual(len(col), 10)
if __name__=="__main__":
unittest.main()
| {
"content_hash": "bc6ebb433f98b0c6a2299046f9649b98",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 63,
"avg_line_length": 29.283783783783782,
"alnum_prop": 0.615597600369174,
"repo_name": "alienfluid/db.py",
"id": "5f01522e1ea5f9c966629e8770f4ebce28d369b9",
"size": "2167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db/tests/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
from CIM15.IEC61970.Core.Curve import Curve
class HydroGeneratingEfficiencyCurve(Curve):
"""Relationship between unit efficiency in percent and unit output active power for a given net head in meters. The relationship between efficiency, discharge, head, and power output is expressed as follows: E =KP/HQ Where: (E=percentage) (P=active power) (H=height) (Q=volume/time unit) (K=constant) For example, a curve instance for a given net head could relate efficiency (Y-axis) versus active power output (X-axis) or versus discharge on the X-axis.Relationship between unit efficiency in percent and unit output active power for a given net head in meters. The relationship between efficiency, discharge, head, and power output is expressed as follows: E =KP/HQ Where: (E=percentage) (P=active power) (H=height) (Q=volume/time unit) (K=constant) For example, a curve instance for a given net head could relate efficiency (Y-axis) versus active power output (X-axis) or versus discharge on the X-axis.
"""
def __init__(self, HydroGeneratingUnit=None, *args, **kw_args):
"""Initialises a new 'HydroGeneratingEfficiencyCurve' instance.
@param HydroGeneratingUnit: A hydro generating unit has an efficiency curve
"""
self._HydroGeneratingUnit = None
self.HydroGeneratingUnit = HydroGeneratingUnit
super(HydroGeneratingEfficiencyCurve, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["HydroGeneratingUnit"]
_many_refs = []
def getHydroGeneratingUnit(self):
"""A hydro generating unit has an efficiency curve
"""
return self._HydroGeneratingUnit
def setHydroGeneratingUnit(self, value):
if self._HydroGeneratingUnit is not None:
filtered = [x for x in self.HydroGeneratingUnit.HydroGeneratingEfficiencyCurves if x != self]
self._HydroGeneratingUnit._HydroGeneratingEfficiencyCurves = filtered
self._HydroGeneratingUnit = value
if self._HydroGeneratingUnit is not None:
if self not in self._HydroGeneratingUnit._HydroGeneratingEfficiencyCurves:
self._HydroGeneratingUnit._HydroGeneratingEfficiencyCurves.append(self)
HydroGeneratingUnit = property(getHydroGeneratingUnit, setHydroGeneratingUnit)
| {
"content_hash": "31e399df6095fa4bdd6fb1da7a7a3196",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 925,
"avg_line_length": 58.875,
"alnum_prop": 0.7184713375796178,
"repo_name": "rwl/PyCIM",
"id": "a9d146bfcd27904d471ecdce5d3bc139c419d47c",
"size": "3455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Generation/Production/HydroGeneratingEfficiencyCurve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
"""
Allow users to set and activate scenes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/scene/
"""
import asyncio
import importlib
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, SERVICE_TURN_ON)
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.state import HASS_DOMAIN
DOMAIN = 'scene'
STATE = 'scening'
STATES = 'states'
def _hass_domain_validator(config):
"""Validate platform in config for homeassistant domain."""
if CONF_PLATFORM not in config:
config = {
CONF_PLATFORM: HASS_DOMAIN, STATES: config}
return config
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
'homeassistant.components.scene.{}'.format(
config[CONF_PLATFORM]))
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
if not hasattr(platform, 'PLATFORM_SCHEMA'):
return config
return platform.PLATFORM_SCHEMA(config)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
_hass_domain_validator,
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_validator
), extra=vol.ALLOW_EXTRA)
SCENE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
@bind_hass
def activate(hass, entity_id=None):
"""Activate a scene."""
data = {}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
async def async_setup(hass, config):
"""Set up the scenes."""
logger = logging.getLogger(__name__)
component = hass.data[DOMAIN] = EntityComponent(logger, DOMAIN, hass)
await component.async_setup(config)
async def async_handle_scene_service(service):
"""Handle calls to the switch services."""
target_scenes = component.async_extract_from_service(service)
tasks = [scene.async_activate() for scene in target_scenes]
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_scene_service,
schema=SCENE_SERVICE_SCHEMA)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Scene(Entity):
"""A scene is a group of entities and the states we want them to be."""
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the scene."""
return STATE
def activate(self):
"""Activate scene. Try to get entities into requested state."""
raise NotImplementedError()
def async_activate(self):
"""Activate scene. Try to get entities into requested state.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.activate)
| {
"content_hash": "fe26a7bb406454f331486f6bc73a7403",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 75,
"avg_line_length": 26.976744186046513,
"alnum_prop": 0.6721264367816092,
"repo_name": "persandstrom/home-assistant",
"id": "8771a84c1d64d11a0181e704decd184b26107722",
"size": "3480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/scene/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
import sys
from os.path import dirname, realpath, join, normpath
from celery.schedules import crontab
import djcelery
djcelery.setup_loader()
SITE_ROOT = join(dirname(realpath(__file__)), '../../')
sys.path.insert(0, normpath(join(SITE_ROOT, "sorbet/")))
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = join(SITE_ROOT, 'media/')
MEDIA_URL = '/media/'
STATIC_ROOT = join(SITE_ROOT, 'static/')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
SECRET_KEY = 'v3qameh$9l3-oor&fjtrpc=2uxf3t1u0$xl4b2k_1(=hz0k_wc'
ROOT_URLCONF = 'sorbet.urls'
WSGI_APPLICATION = 'sorbet.wsgi.application'
ADMINS = [('Isaac Bythewood', 'isaac@bythewood.me'),
('Krzysztof Klimonda', 'kklimonda@syntaxhighlighted.com')]
MANAGERS = ADMINS
LOGIN_REDIRECT_URL = '/feeds/'
PROJECT_APPS = (
'sorbet.core',
'sorbet.feedmanager',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jenkins',
'gunicorn',
'compressor',
'south',
'djcelery',
'core',
'feedmanager',
'vendor'
)
AUTHENTICATION_BACKENDS = ['sorbet.core.backends.EmailAuthBackend']
AUTH_PROFILE_MODULE = 'core.UserProfile'
INVITE_ONLY = True
INVITES_PER_WEEK = 50
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
"send-invitations": {
"task": "core.tasks.send_invitations",
"schedule": crontab(hour=0, minute=0, day_of_week="monday"),
"args": (INVITES_PER_WEEK,),
},
"update-feeds": {
"task": "feedmanager.tasks.send_updates",
"schedule": timedelta(hours=3),
}
}
| {
"content_hash": "ed9e1684dac31949de4b2484c45040f5",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 69,
"avg_line_length": 25.20175438596491,
"alnum_prop": 0.6435781413156979,
"repo_name": "jacobjbollinger/sorbet",
"id": "5d3ee07111346762a169e31fb18fe99aaf6a858a",
"size": "2873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sorbet/settings/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
"""Interactive plot to illustrate the phenomenon of covariate shift.
That is, p(x,y) differs from training to testing phase. The interactive plot allows you
to specify p(x) via a probability tabels while p(y|x) remains fixed.
You can see that underspecified discriminative models are not immune to covariate shift.
Use the ``bokeh serve`` command to run the interactive plot:
bokeh serve bokeh_checkerboard.py
at your command prompt. Then navigate to the URL
http://localhost:5006/bokeh_checkerboard
in your browser.
"""
import numpy as np
from collections import OrderedDict
from os.path import join
from os.path import dirname
from bokeh.models.widgets import Button
from bokeh.layouts import widgetbox
from bokeh.layouts import row, column
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import Slider
from bokeh.models.widgets import Select
from bokeh.models import Spacer
from bokeh.models.widgets import Div
from bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,
Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,
PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,
RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,
YlGnBu9, YlOrBr9, YlOrRd9, Inferno9, Magma9,
Plasma9, Viridis9, Accent8, Dark2_8, Paired9,
Pastel1_9, Pastel2_8, Set1_9, Set2_8, Set3_9)
from checkerboard import View
from checkerboard import Table
from checkerboard import Controller
from checkerboard import Model
PALETTE = "Spectral9"
IMAGE_ALPHA = 210
FILL_ALPHA = 0.9
LINE_ALPHA = 0.8
standard_palettes = OrderedDict([("Blues9", Blues9), ("BrBG9", BrBG9),
("BuGn9", BuGn9), ("BuPu9", BuPu9),
("GnBu9", GnBu9), ("Greens9", Greens9),
("Greys9", Greys9), ("OrRd9", OrRd9),
("Oranges9", Oranges9), ("PRGn9", PRGn9),
("PiYG9", PiYG9), ("PuBu9", PuBu9),
("PuBuGn9", PuBuGn9), ("PuOr9", PuOr9),
("PuRd9", PuRd9), ("Purples9", Purples9),
("RdBu9", RdBu9), ("RdGy9", RdGy9),
("RdPu9", RdPu9), ("RdYlBu9", RdYlBu9),
("RdYlGn9", RdYlGn9), ("Reds9", Reds9),
("Spectral9", Spectral9), ("YlGn9", YlGn9),
("YlGnBu9", YlGnBu9), ("YlOrBr9", YlOrBr9),
("YlOrRd9", YlOrRd9), ("Inferno9", Inferno9),
("Magma9", Magma9), ("Plasma9", Plasma9),
("Viridis9", Viridis9), ("Accent8", Accent8),
("Dark2_8", Dark2_8), ("Paired9", Paired9),
("Pastel1_9", Pastel1_9),
("Pastel2_8", Pastel2_8), ("Set1_9", Set1_9),
("Set2_8", Set2_8), ("Set3_9", Set3_9)])
DEFAULT_SIZE = 10 # Default size of circles
KERNELS = ['linear', 'rbf']
INIT_ACTIVE_KERNEL = 'linear'
REWEIGHTINGS = list(Model.REWEIGHTING)
INIT_ACTIVE_REWEIGHTING = Model.REWEIGHTING.NONE
def get_label_colors(palette_name):
p = standard_palettes[palette_name]
return p[-1], p[0]
def template_title(dist, err):
return '{} Distribution Err: {}'.format(dist.title(), err)
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
class RGBAColorMapper(object):
"""Maps floating point values to rgb values over a palette
@author: Christine Doig
"""
def __init__(self, low, high, palette):
self.range = np.linspace(low, high, len(palette))
self.r, self.g, self.b = list(zip(*[hex_to_rgb(i) for i in palette]))
def color(self, data, alpha=255):
"""Maps your data values to the pallette with linear interpolation"""
red = np.interp(data, self.range, self.r)
blue = np.interp(data, self.range, self.b)
green = np.interp(data, self.range, self.g)
# Style plot to return a grey color when value is 'nan'
red[np.isnan(red)] = 240
blue[np.isnan(blue)] = 240
green[np.isnan(green)] = 240
colors = np.dstack([red.astype(np.uint8),
green.astype(np.uint8),
blue.astype(np.uint8),
np.full_like(data, alpha, dtype=np.uint8)])
return colors.view(dtype=np.uint32).reshape(data.shape)
class BokehView(View):
def __init__(self, controller):
super(BokehView, self).__init__(controller)
# define elements
self.gen_data_button = Button(label="Generate Data", button_type="success")
self.kernel_select = Select(title='Kernel',
options=KERNELS,
value=INIT_ACTIVE_KERNEL)
self.reweighting_select = Select(title='Reweighting',
options=REWEIGHTINGS,
value=INIT_ACTIVE_REWEIGHTING)
self.classify_button = Button(label="Classify", button_type="success")
self.train_table = BokehTable([[0.4, 0.1], [0.4, 0.1]])
self.test_table = BokehTable([[0.4, 0.4], [0.1, 0.1]])
self.train_fig = figure(plot_height=400, plot_width=400,
title=template_title('Train', '-'), tools='',
x_range=[0, 100], y_range=[-50, 5])
self.test_fig = figure(plot_height=400, plot_width=400,
title=template_title('test', '-'), tools='',
x_range=[0, 100], y_range=[-50, 50])
# wire callbacks
self.gen_data_button.on_click(controller.generate_data)
self._kernel = INIT_ACTIVE_KERNEL
self.kernel_select.on_change('value', self._update_kernel)
self._reweighting = INIT_ACTIVE_REWEIGHTING
self.reweighting_select.on_change('value', self._update_reweighting)
self.classify_button.on_click(self._classify_callback)
desc = Div(text=open(join(dirname(__file__), "description.html")).read(), width=1024)
# set layout
inputs = widgetbox(self.kernel_select,
self.reweighting_select,
self.gen_data_button,
self.classify_button)
layout = column(row(desc),
row(column(row(inputs)), column(row(self.train_fig, self.test_fig),
row(self.train_table.get_layout_element(),
Spacer(width=100, height=100),
self.test_table.get_layout_element()))))
self.layout = layout
def _classify_callback(self):
self.controller.classify(kernel=str(self._kernel))
def _update_kernel(self, attr, old, new_kernel):
self._kernel = new_kernel
def _update_reweighting(self, attr, old, new_reweighting):
self._reweighting = new_reweighting
self.controller.reweight(weight=self._reweighting)
def run(self):
# set layout and off we go
curdoc().add_root(self.layout)
print('added the root')
curdoc().title = "Checkerboard"
print('done')
def update(self, model):
pos_c, neg_c = get_label_colors(PALETTE)
color_code = lambda arr: np.where(arr == 1, pos_c, neg_c)
self.train_fig = figure(plot_height=400, plot_width=400,
title=template_title('train', model.trainerr), tools='',
x_range=[0, 100], y_range=[-50, 50])
self.test_fig = figure(plot_height=400, plot_width=400,
title=template_title('test', model.testerr), tools='',
x_range=[0, 100], y_range=[-50, 50])
if model.surface is not None:
X1, X2, Z = model.surface
cm = RGBAColorMapper(Z.min(), Z.max(), standard_palettes[PALETTE])
Y = cm.color(Z, alpha=IMAGE_ALPHA)
self.train_fig.image_rgba(image=[Y], x=[0], y=[-50], dw=[100], dh=[100])
self.test_fig.image_rgba(image=[Y], x=[0], y=[-50], dw=[100], dh=[100])
sample_weight = model.sample_weight
if sample_weight is None:
sample_weight = np.ones(model.train.shape[0])
sample_weight = np.sqrt(sample_weight) * DEFAULT_SIZE
self.train_fig.circle(x=model.train[:, 0], y=model.train[:, 1],
color=color_code(model.train[:, 2]),
line_color="#7c7e71", size=sample_weight,
fill_alpha=FILL_ALPHA, line_alpha=LINE_ALPHA)
self.test_fig.circle(x=model.test[:, 0], y=model.test[:, 1],
color=color_code(model.test[:, 2]),
line_color="#7c7e71", size=DEFAULT_SIZE,
fill_alpha=FILL_ALPHA, line_alpha=LINE_ALPHA)
# yeah.. i dont like that either
self.layout.children[1].children[1].children[0] = row(self.train_fig, self.test_fig)
class BokehTable(Table):
def __init__(self, init_vals=None):
if init_vals is None:
init_vals = [[0.25, 0.25], [0.25, 0.25]]
table_params = dict(start=0, end=1, step=.05, width=160)
self.nw = Slider(title="NW", value=init_vals[0][0], **table_params)
self.ne = Slider(title="NE", value=init_vals[0][1], **table_params)
self.sw = Slider(title="SW", value=init_vals[1][0], **table_params)
self.se = Slider(title="SE", value=init_vals[1][1], **table_params)
def get_pd(self):
return np.array([[self.nw.value, self.ne.value],
[self.sw.value, self.se.value]], dtype=np.float)
def get_layout_element(self):
return column(row(self.nw, self.ne), row(self.sw, self.se))
model = Model()
controller = Controller(model)
view = BokehView(controller)
model.add_observer(view)
view.update(model)
controller.set_train_pd(view.train_table)
controller.set_test_pd(view.test_table)
view.run()
| {
"content_hash": "e9e23062f878943086669e9384011884",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 100,
"avg_line_length": 42.97540983606557,
"alnum_prop": 0.5473965287049399,
"repo_name": "pprett/dataset-shift-osdc16",
"id": "5709550abc9ee39279a76e1899ca059f66cbc28c",
"size": "10486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkerboard/bokeh_checkerboard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1145"
},
{
"name": "Python",
"bytes": "24681"
}
],
"symlink_target": ""
} |
import collections
import six
import sys
import time
from oslo_log import log
from networking_odl.common._i18n import _LW
LOG = log.getLogger(__name__)
class CacheEntry(collections.namedtuple('CacheEntry', ['timeout', 'values'])):
error = None
@classmethod
def create(cls, timeout, *values):
return CacheEntry(timeout, list(values))
def add_value(self, value):
self.values.append(value)
def is_expired(self, current_clock):
return self.timeout <= current_clock
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
class Cache(object):
'''Generic mapping class used to cache mapping
Example of uses:
- host name to IP addresses mapping
- IP addresses to ODL networking topology elements mapping
'''
# TODO(Federico Ressi) after Mitaka: this class should store cached data
# in a place shared between more hosts using a caching mechanism coherent
# with other OpenStack libraries. This is specially interesting in the
# context of reliability when there are more Neutron instances and direct
# connection to ODL is broken.
create_new_entry = CacheEntry.create
def __init__(self, fetch_all_func):
if not callable(fetch_all_func):
message = 'Expected callable as parameter, got {!r}.'.format(
fetch_all_func)
raise TypeError(message)
self._fetch_all = fetch_all_func
self.clear()
def clear(self):
self._entries = collections.OrderedDict()
def fetch(self, key, timeout):
_, value = self.fetch_any([key], timeout=timeout)
return value
def fetch_any(self, keys, timeout):
return next(self.fetch_all(keys=keys, timeout=timeout))
def fetch_all(self, keys, timeout):
# this mean now in numbers
current_clock = time.clock()
# this is the moment in the future in which new entries will expires
new_entries_timeout = current_clock + timeout
# entries to be fetched because missing or expired
new_entries = collections.OrderedDict()
# all entries missing or expired
missing = collections.OrderedDict()
# captured error for the case a problem has to be reported
cause_exc_info = None
for key in keys:
entry = self._entries.get(key)
if entry is None or entry.is_expired(current_clock) or entry.error:
# this entry has to be fetched
new_entries[key] = missing[key] =\
self.create_new_entry(new_entries_timeout)
elif entry.values:
# Yield existing entry
for value in entry.values:
yield key, value
else:
# This entry is not expired and there were no error where it
# has been fetch. Therefore we accept that there are no values
# for given key until it expires. This is going to produce a
# KeyError if it is still missing at the end of this function.
missing[key] = entry
if missing:
if new_entries:
# Fetch some entries and update the cache
try:
new_entry_keys = tuple(new_entries)
for key, value in self._fetch_all(new_entry_keys):
entry = new_entries.get(key)
if entry:
# Add fresh new value
entry.add_value(value)
else:
# This key was not asked, but we take it in any
# way. "Noli equi dentes inspicere donati."
new_entries[key] = entry = self.create_new_entry(
new_entries_timeout, value)
# pylint: disable=broad-except
except Exception:
# Something has gone wrong: update and yield what got until
# now before raising any error
cause_exc_info = sys.exc_info()
LOG.warning(
_LW('Error fetching values for keys: %r'),
', '.join(repr(k) for k in new_entry_keys),
exc_info=cause_exc_info)
# update the cache with new fresh entries
self._entries.update(new_entries)
missing_keys = []
for key, entry in six.iteritems(missing):
if entry.values:
# yield entries that was missing before
for value in entry.values:
# Yield just fetched entry
yield key, value
else:
if cause_exc_info:
# mark this entry as failed
entry.error = cause_exc_info
# after all this entry is still without any value
missing_keys.append(key)
if missing_keys:
# After all some entry is still missing, probably because the
# key was invalid. It's time to raise an error.
missing_keys = tuple(missing_keys)
if not cause_exc_info:
# Search for the error cause in missing entries
for key in missing_keys:
error = self._entries[key].error
if error:
# A cached entry for which fetch method produced an
# error will produce the same error if fetch method
# fails to fetch it again without giving any error
# Is this what we want?
break
else:
# If the cause of the problem is not knwow then
# probably keys were wrong
message = 'Invalid keys: {!r}'.format(
', '.join(missing_keys))
error = KeyError(message)
try:
raise error
except KeyError:
cause_exc_info = sys.exc_info()
raise CacheFetchError(
missing_keys=missing_keys, cause_exc_info=cause_exc_info)
class CacheFetchError(KeyError):
def __init__(self, missing_keys, cause_exc_info):
super(CacheFetchError, self).__init__(str(cause_exc_info[1]))
self.cause_exc_info = cause_exc_info
self.missing_keys = missing_keys
def reraise_cause(self):
six.reraise(*self.cause_exc_info)
| {
"content_hash": "3a4db067a094f73ba71d70b13ae7992a",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 79,
"avg_line_length": 37.827777777777776,
"alnum_prop": 0.5338522543692172,
"repo_name": "FedericoRessi/networking-odl",
"id": "0154737827900a467cd4476da73307982dc90d9e",
"size": "7407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_odl/common/cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "4119"
},
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "290131"
},
{
"name": "Shell",
"bytes": "26159"
}
],
"symlink_target": ""
} |
"""
Functional test suite for the root controller.
This is an example of how functional tests can be written for controllers.
As opposed to a unit-test, which test a small unit of functionality,
functional tests exercise the whole application and its WSGI stack.
Please read http://pythonpaste.org/webtest/ for more information.
"""
from nose.tools import assert_true
from moksha.tests import TestController
class TestRootController(TestController):
def test_index(self):
resp = self.app.get('/')
assert '[ Moksha ]' in resp
def test_jquery_injection(self):
""" Ensure jQuery is getting injected on our main dashboard """
resp = self.app.get('/')
assert 'jquery' in resp
def test_global_resources(self):
""" Ensure we are getting our global resources injected """
resp = self.app.get('/')
assert 'moksha_csrf_token' in resp
# Disabled, since we don't want to ship the menu by default
#def test_menu(self):
# """ Ensure that our default menu is being created """
# resp = self.app.get('/')
# assert 'buildMenu' in resp
def test_tcpsocket(self):
""" Ensure our TCP socket is getting injected """
resp = self.app.get('/')
assert 'TCPSocket' in resp or 'moksha_amqp_conn' in resp
| {
"content_hash": "5a5b1464cc29dcd4f51e055949aa8407",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 32.19512195121951,
"alnum_prop": 0.6621212121212121,
"repo_name": "lmacken/moksha",
"id": "3ceaafacd57f81025761f7a720fe131d796fa2ad",
"size": "1963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moksha/tests/functional/test_root.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1347981"
},
{
"name": "Python",
"bytes": "653985"
},
{
"name": "Shell",
"bytes": "3879"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import copy
import datetime
import memcache
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
from keystone import token
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Token(token.Driver):
revocation_key = 'revocation-list'
def __init__(self, client=None):
self._memcache_client = client
@property
def client(self):
return self._memcache_client or self._get_memcache_client()
def _get_memcache_client(self):
memcache_servers = CONF.memcache.servers.split(',')
# NOTE(morganfainberg): The memcache client library for python is NOT
# thread safe and should not be passed between threads. This is highly
# specific to the cas() (compare and set) methods and the caching of
# the previous value(s). It appears greenthread should ensure there is
# a single data structure per spawned greenthread.
self._memcache_client = memcache.Client(memcache_servers, debug=0,
cache_cas=True)
return self._memcache_client
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
def _prefix_user_id(self, user_id):
return 'usertokens-%s' % user_id.encode('utf-8')
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id='')
ptk = self._prefix_token_id(token_id)
token_ref = self.client.get(ptk)
if token_ref is None:
raise exception.TokenNotFound(token_id=token_id)
return token_ref
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
ptk = self._prefix_token_id(token_id)
if not data_copy.get('expires'):
data_copy['expires'] = token.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
kwargs = {}
if data_copy['expires'] is not None:
expires_ts = utils.unixtime(data_copy['expires'])
kwargs['time'] = expires_ts
self.client.set(ptk, data_copy, **kwargs)
user_id = data['user']['id']
user_key = self._prefix_user_id(user_id)
# Append the new token_id to the token-index-list stored in the
# user-key within memcache.
self._update_user_list_with_cas(user_key, token_id, data_copy)
if CONF.trust.enabled and data.get('trust_id'):
# NOTE(morganfainberg): If trusts are enabled and this is a trust
# scoped token, we add the token to the trustee list as well. This
# allows password changes of the trustee to also expire the token.
# There is no harm in placing the token in multiple lists, as
# _list_tokens is smart enough to handle almost any case of
# valid/invalid/expired for a given token.
token_data = data_copy['token_data']
if data_copy['token_version'] == token.provider.V2:
trustee_user_id = token_data['access']['trust'][
'trustee_user_id']
elif data_copy['token_version'] == token.provider.V3:
trustee_user_id = token_data['OS-TRUST:trust'][
'trustee_user_id']
else:
raise token.provider.UnsupportedTokenVersionException(
_('Unknown token version %s') %
data_copy.get('token_version'))
trustee_key = self._prefix_user_id(trustee_user_id)
self._update_user_list_with_cas(trustee_key, token_id, data_copy)
return copy.deepcopy(data_copy)
def _convert_user_index_from_json(self, token_list, user_key):
try:
# NOTE(morganfainberg): Try loading in the old format
# of the list.
token_list = jsonutils.loads('[%s]' % token_list)
# NOTE(morganfainberg): Build a delta based upon the
# token TTL configured. Since we are using the old
# format index-list, we will create a "fake" expiration
# that should be further in the future than the actual
# expiry. To avoid locking up keystone trying to
# communicate to memcached, it is better to use a fake
# value. The logic that utilizes this list already
# knows how to handle the case of tokens that are
# no longer valid being included.
delta = datetime.timedelta(
seconds=CONF.token.expiration)
new_expiry = timeutils.normalize_time(
timeutils.utcnow()) + delta
for idx, token_id in enumerate(token_list):
token_list[idx] = (token_id, new_expiry)
except Exception:
# NOTE(morganfainberg): Catch any errors thrown here. There is
# nothing the admin or operator needs to do in this case, but
# it should be logged that there was an error and some action was
# taken to correct it
LOG.info(_('Unable to convert user-token-index to new format; '
'clearing user token index record "%s".'), user_key)
token_list = []
return token_list
def _update_user_list_with_cas(self, user_key, token_id, token_data):
cas_retry = 0
max_cas_retry = CONF.memcache.max_compare_and_set_retry
current_time = timeutils.normalize_time(timeutils.utcnow())
self.client.reset_cas()
while cas_retry <= max_cas_retry:
# NOTE(morganfainberg): cas or "compare and set" is a function of
# memcache. It will return false if the value has changed since the
# last call to client.gets(). This is the memcache supported method
# of avoiding race conditions on set(). Memcache is already atomic
# on the back-end and serializes operations.
#
# cas_retry is for tracking our iterations before we give up (in
# case memcache is down or something horrible happens we don't
# iterate forever trying to compare and set the new value.
cas_retry += 1
token_list = self.client.gets(user_key)
filtered_list = []
if token_list is not None:
if not isinstance(token_list, list):
token_list = self._convert_user_index_from_json(token_list,
user_key)
for token_i, expiry in token_list:
expires_at = timeutils.normalize_time(expiry)
if expires_at < current_time:
# skip tokens that are expired.
continue
# Add the still valid token_id to the list.
filtered_list.append((token_i, expiry))
# Add the new token_id and expiry.
filtered_list.append(
(token_id, timeutils.normalize_time(token_data['expires'])))
# Use compare-and-set (cas) to set the new value for the
# token-index-list for the user-key. Cas is used to prevent race
# conditions from causing the loss of valid token ids from this
# list.
if self.client.cas(user_key, filtered_list):
msg = _('Successful set of token-index-list for user-key '
'"%(user_key)s", #%(count)d records')
LOG.debug(msg, {'user_key': user_key,
'count': len(filtered_list)})
return filtered_list
# The cas function will return true if it succeeded or false if it
# failed for any reason, including memcache server being down, cas
# id changed since gets() called (the data changed between when
# this loop started and this point, etc.
error_msg = _('Failed to set token-index-list for user-key '
'"%(user_key)s". Attempt %(cas_retry)d of '
'%(cas_retry_max)d')
LOG.debug(error_msg,
{'user_key': user_key,
'cas_retry': cas_retry,
'cas_retry_max': max_cas_retry})
# Exceeded the maximum retry attempts.
error_msg = _('Unable to add token user list')
raise exception.UnexpectedError(error_msg)
def _add_to_revocation_list(self, data):
data_json = jsonutils.dumps(data)
if not self.client.append(self.revocation_key, ',%s' % data_json):
if not self.client.add(self.revocation_key, data_json):
if not self.client.append(self.revocation_key,
',%s' % data_json):
msg = _('Unable to add token to revocation list.')
raise exception.UnexpectedError(msg)
def delete_token(self, token_id):
# Test for existence
data = self.get_token(token_id)
ptk = self._prefix_token_id(token_id)
result = self.client.delete(ptk)
self._add_to_revocation_list(data)
return result
def list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
tokens = []
user_key = self._prefix_user_id(user_id)
current_time = timeutils.normalize_time(timeutils.utcnow())
token_list = self.client.get(user_key) or []
if not isinstance(token_list, list):
# NOTE(morganfainberg): This is for compatibility for old-format
# token-lists that were a JSON string of just token_ids. This code
# will reference the underlying expires directly from the
# token_ref vs in this list, so setting to none just ensures the
# loop works as expected.
token_list = [(i, None) for i in
jsonutils.loads('[%s]' % token_list)]
for token_id, expiry in token_list:
ptk = self._prefix_token_id(token_id)
token_ref = self.client.get(ptk)
if token_ref:
if tenant_id is not None:
tenant = token_ref.get('tenant')
if not tenant:
continue
if tenant.get('id') != tenant_id:
continue
if trust_id is not None:
trust = token_ref.get('trust_id')
if not trust:
continue
if trust != trust_id:
continue
if consumer_id is not None:
try:
oauth = token_ref['token_data']['token']['OS-OAUTH1']
if oauth.get('consumer_id') != consumer_id:
continue
except KeyError:
continue
if (timeutils.normalize_time(token_ref['expires']) <
current_time):
# Skip expired tokens.
continue
tokens.append(token_id)
return tokens
def list_revoked_tokens(self):
list_json = self.client.get(self.revocation_key)
if list_json:
return jsonutils.loads('[%s]' % list_json)
return []
| {
"content_hash": "f4b19dadc8ed5afc43eb3a8d23b2ff91",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 79,
"avg_line_length": 44.2851711026616,
"alnum_prop": 0.5620331415815232,
"repo_name": "rickerc/keystone_audit",
"id": "08c1c4098ca38174509dd74def78df7efdba4d86",
"size": "12278",
"binary": false,
"copies": "1",
"ref": "refs/heads/cis-havana-staging",
"path": "keystone/token/backends/memcache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2399953"
},
{
"name": "Shell",
"bytes": "11956"
}
],
"symlink_target": ""
} |
from .pygren import *
| {
"content_hash": "e2a4fd5ffb23aeaa68d0457039420742",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7272727272727273,
"repo_name": "PyGrEn/python-pygren",
"id": "4e038cd869a636e24b9f729b24e6af136d3be227",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygren/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import flask
import auth
import model
import util
import wtforms
from flask.ext import wtf
from main import app
class TrackUpdateForm(wtf.Form):
album = wtforms.StringField('Album', filters=[util.strip_filter])
title = wtforms.StringField('Title', filters=[util.strip_filter])
artist = wtforms.StringField('Artist', filters=[util.strip_filter])
albumartist = wtforms.StringField('Album Artist', filters=[util.strip_filter])
originaldate = wtforms.StringField('Original Date', filters=[util.strip_filter])
composer = wtforms.StringField('Composer', filters=[util.strip_filter])
lyricist = wtforms.StringField('Lyricist', filters=[util.strip_filter])
writer = wtforms.StringField('Writer', filters=[util.strip_filter])
totaltracks = wtforms.IntegerField('Total Tracks')
discnumber = wtforms.IntegerField('Disc Number')
genre = wtforms.SelectMultipleField('Genre', choices=[(t, t.title()) for t in model.Track.genre_list().keys()],
default="unknown")
mood = wtforms.SelectMultipleField('Mood', choices=[(t, t.title()) for t in model.Track.genre_list().keys()],
default="Okay")
rating = wtforms.IntegerField('Rating')
musicbrainz_recordingid = wtforms.StringField('musicbrainz_recordingid', filters=[util.strip_filter])
musicbrainz_trackid = wtforms.StringField('musicbrainz_trackid', filters=[util.strip_filter])
musicbrainz_albumid = wtforms.StringField('musicbrainz_albumid', filters=[util.strip_filter])
musicbrainz_artistid = wtforms.StringField('musicbrainz_artistid', filters=[util.strip_filter])
musicbrainz_albumartistid = wtforms.StringField('musicbrainz_albumartistid', filters=[util.strip_filter])
language = wtforms.StringField('language', filters=[util.strip_filter])
website = wtforms.StringField('website', filters=[util.strip_filter])
stream_url = wtforms.StringField('stream_url', filters=[util.strip_filter])
# ##############################################################################
# Tracks List
# ##############################################################################
@app.route('/track/')
@auth.login_required
def track_list():
track_dbs, track_cursor = model.Track.get_dbs(
# user_key=auth.current_user_key(),
)
return flask.render_template(
'track/track_list.html',
html_class='track-list',
title='Track List',
track_dbs=track_dbs,
next_url=util.generate_next_url(track_cursor),
)
# ##############################################################################
# Tracks Create
# ##############################################################################
@app.route('/track/create/', methods=['GET', 'POST'])
@auth.login_required
def track_create():
form = TrackUpdateForm()
if form.validate_on_submit():
track_db = model.Track()
form.populate_obj(track_db)
track_db.put()
return flask.redirect('/track/%i' % track_db.key.id())
return flask.render_template(
'track/track_update.html',
html_class='track-create',
title='Create Track',
form=form,
)
# ##############################################################################
# Tracks View
# ##############################################################################
@app.route('/track/<int:track_id>/')
@auth.login_required
def track_view(track_id):
track_db = model.Track.get_by_id(track_id)
# if not track_db or track_db.user_key != auth.current_user_key():
# flask.abort(404)
return flask.render_template(
'track/track_view.html',
html_class='track-view',
title=track_db.title,
track_db=track_db,
)
# ##############################################################################
# Tracks Update
# ##############################################################################
@app.route('/track/<int:track_id>/update/', methods=['GET', 'POST'])
@auth.login_required
def track_update(track_id):
track_db = model.Track.get_by_id(track_id)
# if not track_db or track_db.user_key != auth.current_user_key():
# flask.abort(404)
form = TrackUpdateForm(obj=track_db)
if form.validate_on_submit():
form.populate_obj(track_db)
track_db.put()
return flask.redirect(flask.url_for('track_list', order='-modified'))
return flask.render_template(
'track/track_update.html',
html_class='track-update',
title=track_db.title,
form=form,
track_db=track_db,
) | {
"content_hash": "2a766ced107a8e71519b06a3ff0249d9",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 115,
"avg_line_length": 40.73451327433628,
"alnum_prop": 0.5711492504888116,
"repo_name": "ssxenon01/music-app",
"id": "8416ce67e584c82689a71509a88519530895f616",
"size": "4620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/control/track.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "96943"
},
{
"name": "CoffeeScript",
"bytes": "9306"
},
{
"name": "HTML",
"bytes": "136403"
},
{
"name": "JavaScript",
"bytes": "49507"
},
{
"name": "PHP",
"bytes": "92328"
},
{
"name": "Python",
"bytes": "182431"
}
],
"symlink_target": ""
} |
"""
Django settings for rss_reader project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&4bbeqzo=i@oaq(xy-c$vxct-zu2$r9!42g&bmq1d3+rgq8dt%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Application definition
DEFAULT_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'djangular',
'rest_framework',
'watson',
'rest_auth',
'django_extensions'
)
LOCAL_APPS = (
'main',
)
INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'rss_reader.urls'
WSGI_APPLICATION = 'rss_reader.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'feeddb',
'USER': 'combustible',
'PASSWORD': 'lemons',
'HOST': 'localhost',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static/"),)
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
| {
"content_hash": "69fdd1419e660974df2d23d51220bfb2",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 71,
"avg_line_length": 23.36036036036036,
"alnum_prop": 0.6968762051677594,
"repo_name": "CombustibleLemons/rss-reader",
"id": "703ae19fa147ccc5178351a287fa483860e9d994",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rss_reader/conf/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6483"
},
{
"name": "JavaScript",
"bytes": "91192"
},
{
"name": "Python",
"bytes": "154076"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='django-urli18n',
version='0.1',
description='A reusable Django app to display the current activated language in the URL',
author='Torsten Engelbrecht',
author_email='torsten.engelbrecht@gmail.com',
url='https://github.com/Torte/django-urli18n',
download_url='https://github.com/Torte/django-urli18n/tarball/master',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
"Programming Language :: Python",
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules'
],
) | {
"content_hash": "c7845c056b067ccce5ddb58118d12a8a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 95,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6517857142857143,
"repo_name": "torte/django-urli18n",
"id": "51e3a73dbd4535d8bc704570af8809f103066e83",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64105"
}
],
"symlink_target": ""
} |
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
NEVER = 'n'
CONNECTION_ERROR = 'c'
TIMEOUT = 't'
CONNECTION_ERROR_AND_TIMEOUT = 'ct'
DEFAULT = CONNECTION_ERROR
DEFAULT_RETRY_LIMIT = 4
| {
"content_hash": "4db3f41fc05b9e3a583e8d194b518c77",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 20.818181818181817,
"alnum_prop": 0.7117903930131004,
"repo_name": "Willyham/tchannel-python",
"id": "5ec115e240e9edf9404ba1c2459f27d4f1306483",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tchannel/retry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1902"
},
{
"name": "Python",
"bytes": "719974"
},
{
"name": "Shell",
"bytes": "1473"
},
{
"name": "Thrift",
"bytes": "13859"
}
],
"symlink_target": ""
} |
import pickle
import pickletools
from test import support
from test.pickletester import AbstractPickleTests
import doctest
import unittest
class OptimizedPickleTests(AbstractPickleTests, unittest.TestCase):
def dumps(self, arg, proto=None, **kwargs):
return pickletools.optimize(pickle.dumps(arg, proto, **kwargs))
def loads(self, buf, **kwds):
return pickle.loads(buf, **kwds)
# Test relies on precise output of dumps()
test_pickle_to_2x = None
# Test relies on writing by chunks into a file object.
test_framed_write_sizes_with_delayed_writer = None
def test_optimize_long_binget(self):
data = [str(i) for i in range(257)]
data.append(data[-1])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(data, proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, data)
self.assertIs(unpickled[-1], unpickled[-2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, data)
self.assertIs(unpickled2[-1], unpickled2[-2])
self.assertNotIn(pickle.LONG_BINGET, pickled2)
self.assertNotIn(pickle.LONG_BINPUT, pickled2)
def test_optimize_binput_and_memoize(self):
pickled = (b'\x80\x04\x95\x15\x00\x00\x00\x00\x00\x00\x00'
b']\x94(\x8c\x04spamq\x01\x8c\x03ham\x94h\x02e.')
# 0: \x80 PROTO 4
# 2: \x95 FRAME 21
# 11: ] EMPTY_LIST
# 12: \x94 MEMOIZE
# 13: ( MARK
# 14: \x8c SHORT_BINUNICODE 'spam'
# 20: q BINPUT 1
# 22: \x8c SHORT_BINUNICODE 'ham'
# 27: \x94 MEMOIZE
# 28: h BINGET 2
# 30: e APPENDS (MARK at 13)
# 31: . STOP
self.assertIn(pickle.BINPUT, pickled)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, ['spam', 'ham', 'ham'])
self.assertIs(unpickled[1], unpickled[2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, ['spam', 'ham', 'ham'])
self.assertIs(unpickled2[1], unpickled2[2])
self.assertNotIn(pickle.BINPUT, pickled2)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'bytes_types',
'UP_TO_NEWLINE', 'TAKEN_FROM_ARGUMENT1',
'TAKEN_FROM_ARGUMENT4', 'TAKEN_FROM_ARGUMENT4U',
'TAKEN_FROM_ARGUMENT8U', 'ArgumentDescriptor',
'read_uint1', 'read_uint2', 'read_int4', 'read_uint4',
'read_uint8', 'read_stringnl', 'read_stringnl_noescape',
'read_stringnl_noescape_pair', 'read_string1',
'read_string4', 'read_bytes1', 'read_bytes4',
'read_bytes8', 'read_bytearray8', 'read_unicodestringnl',
'read_unicodestring1', 'read_unicodestring4',
'read_unicodestring8', 'read_decimalnl_short',
'read_decimalnl_long', 'read_floatnl', 'read_float8',
'read_long1', 'read_long4',
'uint1', 'uint2', 'int4', 'uint4', 'uint8', 'stringnl',
'stringnl_noescape', 'stringnl_noescape_pair', 'string1',
'string4', 'bytes1', 'bytes4', 'bytes8', 'bytearray8',
'unicodestringnl', 'unicodestring1', 'unicodestring4',
'unicodestring8', 'decimalnl_short', 'decimalnl_long',
'floatnl', 'float8', 'long1', 'long4',
'StackObject',
'pyint', 'pylong', 'pyinteger_or_bool', 'pybool', 'pyfloat',
'pybytes_or_str', 'pystring', 'pybytes', 'pybytearray',
'pyunicode', 'pynone', 'pytuple', 'pylist', 'pydict',
'pyset', 'pyfrozenset', 'pybuffer', 'anyobject',
'markobject', 'stackslice', 'OpcodeInfo', 'opcodes',
'code2op',
}
support.check__all__(self, pickletools, not_exported=not_exported)
def load_tests(loader, tests, pattern):
tests.addTest(doctest.DocTestSuite(pickletools))
return tests
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "542e3d05cc2db5ffd125528f37d02089",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 74,
"avg_line_length": 40.63461538461539,
"alnum_prop": 0.5847136772361571,
"repo_name": "brython-dev/brython",
"id": "d37af79e878a2e52042e2752f0ca5c9649a45bfa",
"size": "4226",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "www/src/Lib/test/test_pickletools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24308"
},
{
"name": "HTML",
"bytes": "5144999"
},
{
"name": "JavaScript",
"bytes": "4143100"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "22236375"
},
{
"name": "Roff",
"bytes": "21126"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="sizemode", parent_name="scatterpolargl.marker", **kwargs
):
super(SizemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs,
)
| {
"content_hash": "ace8569649f7ea7b01c66b7e9a1ea9e9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 35.357142857142854,
"alnum_prop": 0.6060606060606061,
"repo_name": "plotly/plotly.py",
"id": "5794ca9709a304b7961b7af75c0fbb723eea5d2a",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/marker/_sizemode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import configparser
import os
import shutil
from unittest import TestCase
from unittest.mock import MagicMock, patch
import click
from click.testing import CliRunner
from scripttest import TestFileEnvironment
from logger import auto, remote, cli, params, convert as conv, merge as mrg
from logger.cli import cmd, start, get, main, init, ls, clear, convert, merge
from logger.params import LogParam
class TestCli(TestCase):
@classmethod
def setUpClass(cls):
file_in_tests = os.path.join(os.getcwd(), 'tests', LogParam.FILE_NAME)
file_current = os.path.join(os.getcwd(), LogParam.FILE_NAME)
if not os.path.exists(file_current):
shutil.copy(file_in_tests, file_current)
@classmethod
def tearDownClass(cls):
file_current = os.path.join(os.getcwd(), LogParam.FILE_NAME)
os.remove(file_current)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=True))
def test_cmd(self):
runner = CliRunner()
result = runner.invoke(cmd, ['-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=True))
def test_cmd__start(self):
runner = CliRunner()
result = runner.invoke(cmd, ['start', '-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=True))
def test_start(self):
runner = CliRunner()
result = runner.invoke(start, ['-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=False))
def test_start__fail(self):
runner = CliRunner()
result = runner.invoke(start, ['-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=True))
def test_start__debug(self):
runner = CliRunner()
result = runner.invoke(start, ['--debug', '-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=False, side_effect=IOError('io error')))
def test_start__ioerror(self):
runner = CliRunner()
result = runner.invoke(start, ['--debug', '-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=False, side_effect=Exception('exception')))
def test_start__exception(self):
runner = CliRunner()
result = runner.invoke(start, ['--debug', '-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
def test_start__no_testnumber(self):
runner = CliRunner()
result = runner.invoke(start)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, 'Error: test-number を設定してください。\n')
@patch.object(click, 'prompt', MagicMock(return_value=""))
@patch.object(params.LogParam, 'write_ini', MagicMock(return_value="setting_file"))
@patch.object(params.LogParam, 'read_ini', MagicMock(return_value=False))
@patch.object(auto.AutoLogger, 'start', MagicMock(return_value=True))
def test_start__init(self):
runner = CliRunner()
result = runner.invoke(start, ['-t', '1-1-1'])
self.assertEqual(result.exit_code, 0)
self.assertRegex(result.output, '設定保存完了: setting_file.*')
@patch.object(params.LogParam, 'write_ini', MagicMock(return_value="setting_file"))
def test_init(self):
args = [
'--shell-cmd', 'telnet',
'--host-name', '192.168.11.2',
'--log-cmd', 'log_to_rom',
'--log-clear-cmd', 'log_clear',
'--log-extension', 'tar.gz',
'--remote-log-dir', '/home/user/log',
'--remote-dist-dir', '/home/user/log_dist',
'--local-src-dir', '/home/user/log_src',
'--convert-rule', '/home/user/rule.csv',
'--merge-dir', 'tests/logs',
'--usb-dir', '/mnt/USB0']
runner = CliRunner()
result = runner.invoke(init, args)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, '設定保存完了: setting_file\n')
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[]))
def test_get(self):
runner = CliRunner()
result = runner.invoke(get)
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[]))
def test_get__fail(self):
runner = CliRunner()
result = runner.invoke(get)
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[]))
def test_get__debug(self):
runner = CliRunner()
result = runner.invoke(get, ['--debug'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[], side_effect=IOError('io error')))
def test_get__ioerror(self):
runner = CliRunner()
result = runner.invoke(get, ['--debug'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[], side_effect=Exception('exception')))
def test_get__exception(self):
runner = CliRunner()
result = runner.invoke(get, ['--debug'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=["file1", "file2"]))
@patch.object(conv.Converter, 'exec', MagicMock(return_value=True))
def test_get__convert(self):
runner = CliRunner()
result = runner.invoke(get, ['-c', '--debug'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=["file1", "file2"]))
@patch.object(conv.Converter, 'exec', MagicMock(return_value=True))
def test_get__convert_long(self):
runner = CliRunner()
result = runner.invoke(get, ['--convert', '--debug'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[]))
def test_get__dir(self):
runner = CliRunner()
result = runner.invoke(get, ['-d', 'out_dir'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'move_log', MagicMock(return_value=[]))
def test_get__move(self):
runner = CliRunner()
result = runner.invoke(get, ['--debug', 'file_name'])
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'list_log', MagicMock(return_value=[]))
def test_ls__empty(self):
runner = CliRunner()
result = runner.invoke(ls)
self.assertEqual(result.output, '')
self.assertEqual(result.exit_code, 0)
@patch.object(remote.RemoteLogger, 'clear_log', MagicMock())
def test_clear(self):
runner = CliRunner()
result = runner.invoke(clear)
self.assertEqual(result.exit_code, 0)
@patch.object(click, 'prompt', MagicMock(return_value=""))
@patch.object(params.LogParam, 'write_ini', MagicMock(return_value="setting_file"))
@patch.object(params.LogParam, 'read_ini', MagicMock(return_value=False))
@patch.object(remote.RemoteLogger, 'get_log', MagicMock(return_value=[]))
def test_get__init(self):
runner = CliRunner()
result = runner.invoke(get)
self.assertEqual(result.exit_code, 0)
self.assertRegex(result.output, '設定保存完了: setting_file.*')
@patch.object(conv.Converter, 'exec', MagicMock(return_value=True))
def test_convert(self):
runner = CliRunner()
result = runner.invoke(convert, ["-s", "./tests/rule.csv", "./tests/test.tar.gz"])
self.assertEqual(result.exit_code, 0)
@patch.object(conv.Converter, 'exec', MagicMock(return_value=False))
def test_convert__fail(self):
runner = CliRunner()
result = runner.invoke(convert, ["--debug", "-s", "./tests/rule.csv", "./tests/test.tar.gz"])
self.assertEqual(result.exit_code, 0)
@patch.object(conv.Converter, 'exec', MagicMock(return_value=False))
def test_convert__file_1(self):
result = CliRunner().invoke(convert, ["--debug", "-s", "./tests/rule.csv", "-f", "./tests/test.log"])
self.assertEqual(result.exit_code, 0)
@patch.object(conv.Converter, 'exec', MagicMock(return_value=False))
def test_convert__file_2(self):
result = CliRunner().invoke(convert, ["--debug", "-f", "./tests/test.log", "-s", "./tests/rule.csv"])
self.assertEqual(result.exit_code, 0)
def test_convert__file_not_exists(self):
result = CliRunner().invoke(convert, ["--debug", "-s", "./tests/rule.csv", "./test.tar.gz"])
self.assertEqual(result.exit_code, 2)
def test_convert__rule_not_exists(self):
result = CliRunner().invoke(convert, ["--debug", "-s", "./rule.csv", "./tests/test.tar.gz"])
self.assertEqual(result.exit_code, 2)
def test_convert__rule_not_exists_conf(self):
result = CliRunner().invoke(convert, ["--debug", "./tests/test.tar.gz"])
self.assertEqual(result.exit_code, 2)
@patch.object(mrg.Merge, 'exec', MagicMock(return_value=True))
def test_merge(self):
result = CliRunner().invoke(merge, ["--debug", "./tests/logs"])
self.assertEqual(result.exit_code, 0)
def test_merge__empty_dir(self):
dir_name = "test_merge__empty_dir"
os.mkdir(dir_name)
result = CliRunner().invoke(merge, ["--debug", dir_name])
self.assertEqual(result.exit_code, 0)
os.rmdir(dir_name)
@patch.object(cli, 'cmd', MagicMock(return_value=True))
def test_main(self):
main()
def test_script__start(self):
env = TestFileEnvironment('./.tmp')
ini = configparser.ConfigParser()
ini[LogParam.DEFAULT] = {
'host_name': 'root@172.30.10.2',
'shell': 'ssh',
'log_cmd': 'log_to_rom',
'log_clear_cmd': 'log_clear',
'log_extension': 'tar.gz',
'remote_log_dir': '/root',
'remote_dist_dir': '/mnt/log',
'local_src_dir': '../',
'convert_rule': '../tests/rule.csv',
'merge_dir': 'logs',
'usb_dir': '/mnt/USB0'
}
file_path = os.path.join(os.getcwd(), '.tmp', LogParam.FILE_NAME)
with open(file_path, 'w') as file:
ini.write(file)
result = env.run('../logger/cli.py get -c --debug', cwd='.tmp', expect_stderr=True)
print(result)
self.assertRegex(result.stdout, '正常に終了しました。')
self.assertTrue(len(result.files_created) > 0)
| {
"content_hash": "72d596f929b302453afd5565ad4db8a7",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 113,
"avg_line_length": 41.567567567567565,
"alnum_prop": 0.6129481701653353,
"repo_name": "ujiro99/auto_logger",
"id": "48514ffc20879f8f57589b206a2ac48739aa70bc",
"size": "10890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75309"
},
{
"name": "Shell",
"bytes": "520"
}
],
"symlink_target": ""
} |
"""JSON Tree Library
"""
import collections
import datetime
import json
import json.scanner
import re
import sys
__version__ = (0,5,1)
__version_string__ = '.'.join(str(x) for x in __version__)
__author__ = 'Doug Napoleone'
__email__ = 'doug.napoleone+jsontree@gmail.com'
if sys.version_info.major > 2 :
basestring = str
# ISO/UTC date examples:
# 2013-04-29T22:45:35.294303Z
# 2013-04-29T22:45:35.294303
# 2013-04-29 22:45:35
# 2013-04-29T22:45:35.4361-0400
# 2013-04-29T22:45:35.4361-04:00
_datetime_iso_re = re.compile(
r'^(?P<parsable>\d{4}-\d{2}-\d{2}(?P<T>[ T])\d{2}:\d{2}:\d{2}'
r'(?P<f>\.\d{1,7})?)(?P<z>[-+]\d{2}\:?\d{2})?(?P<Z>Z)?')
_date = "%Y-%m-%d"
_time = "%H:%M:%S"
_f = '.%f'
_z = '%z'
class _FixedTzOffset(datetime.tzinfo):
def __init__(self, offset_str):
hours = int(offset_str[1:3], 10)
mins = int(offset_str[-2:], 10)
if offset_str[0] == '-':
hours = -hours
mins = -mins
self.__offset = datetime.timedelta(hours=hours,
minutes=mins)
self.__dst = datetime.timedelta(hours=hours-1,
minutes=mins)
self.__name = ''
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dst
def _datetimedecoder(dtstr):
match = _datetime_iso_re.match(dtstr)
if match:
gd = match.groupdict()
T = gd['T']
strptime = _date + T + _time
if gd['f']:
strptime += '.%f'
if gd['Z']:
strptime += 'Z'
try:
result = datetime.datetime.strptime(gd['parsable'], strptime)
if gd['z']:
result = result.replace(tzinfo=_FixedTzOffset(gd['z']))
return result
except ValueError:
return dtstr
return dtstr
def _datetimeencoder(dtobj):
return dtobj.isoformat()
class jsontree(collections.defaultdict):
"""Default dictionary where keys can be accessed as attributes and
new entries recursively default to be this class. This means the following
code is valid:
>>> mytree = jsontree()
>>> mytree.something.there = 3
>>> mytree['something']['there'] == 3
True
"""
def __init__(self, *args, **kwdargs):
super(jsontree, self).__init__(jsontree, *args, **kwdargs)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return self[name]
def __setattr__(self, name, value):
self[name] = value
return value
def mapped_jsontree_class(mapping):
"""Return a class which is a jsontree, but with a supplied attribute name
mapping. The mapping argument can be a mapping object
(dict, jsontree, etc.) or it can be a callable which takes a single
argument (the attribute name), and returns a new name.
This is useful in situations where you have a jsontree with keys that are
not valid python attribute names, to simplify communication with a client
library, or allow for configurable names.
For example:
>>> numjt = mapped_jsontree_class(dict(one='1', two='2', three='3'))
>>> number = numjt()
>>> number.one = 'something'
>>> dict(number)
{'1': 'something'}
This is very useful for abstracting field names that may change between
a development sandbox and production environment. Both FogBugz and Jira
bug trackers have custom fields with dynamically generated values. These
field names can be abstracted out into a configruation mapping, and the
jsontree code can be standardized.
This can also be iseful for JavaScript API's (PHPCake) which insist on
having spaces in some key names. A function can be supplied which maps
all '_'s in the attribute name to spaces:
>>> spacify = lambda name: name.replace('_', ' ')
>>> spacemapped = mapped_jsontree_class(spacify)
>>> sm = spacemapped()
>>> sm.hello_there = 5
>>> sm.hello_there
5
>>> list(sm.keys())
['hello there']
This will also work with non-string keys for translating from libraries
that use object keys in python over to string versions of the keys in JSON
>>> numjt = mapped_jsontree_class(dict(one=1, two=2))
>>> number = numjt()
>>> number.one = 'something'
>>> dict(number)
{1: 'something'}
>>> numjt_as_text = mapped_jsontree_class(dict(one='1', two='2'))
>>> dumped_number = dumps(number)
>>> loaded_number = loads(dumped_number, jsontreecls=numjt_as_text)
>>> str(loaded_number.one)
'something'
>>> repr(dict(loaded_number)).replace('u', '') # cheat the python2 tests
"{'1': 'something'}"
"""
mapper = mapping
if not callable(mapping):
if not isinstance(mapping, collections.Mapping):
raise TypeError("Argument mapping is not collable or an instance "
"of collections.Mapping")
mapper = lambda name: mapping.get(name, name)
class mapped_jsontree(collections.defaultdict):
def __init__(self, *args, **kwdargs):
super(mapped_jsontree, self).__init__(mapped_jsontree, *args, **kwdargs)
def __getattribute__(self, name):
mapped_name = mapper(name)
if not isinstance(mapped_name, basestring):
return self[mapped_name]
try:
return object.__getattribute__(self, mapped_name)
except AttributeError:
return self[mapped_name]
def __setattr__(self, name, value):
mapped_name = mapper(name)
self[mapped_name] = value
return value
return mapped_jsontree
def mapped_jsontree(mapping, *args, **kwdargs):
"""Helper function that calls mapped_jsontree_class, and passing the
rest of the arguments to the constructor of the new class.
>>> number = mapped_jsontree(dict(one='1', two='2', three='3', four='4'),
... {'1': 'something', '2': 'hello'})
>>> number.two
'hello'
>>> list(number.items())
[('1', 'something'), ('2', 'hello')]
"""
return mapped_jsontree_class(mapping)(*args, **kwdargs)
class JSONTreeEncoder(json.JSONEncoder):
"""JSON encoder class that serializes out jsontree object structures and
datetime objects into ISO strings.
"""
def __init__(self, *args, **kwdargs):
datetimeencoder = _datetimeencoder
if 'datetimeencoder' in kwdargs:
datetimeencoder = kwdargs.pop('datetimeencoder')
super(JSONTreeEncoder, self).__init__(*args, **kwdargs)
self.__datetimeencoder = datetimeencoder
def default(self, obj):
if isinstance(obj, datetime.datetime):
return self.__datetimeencoder(obj)
else:
return super(JSONTreeEncoder, self).default(obj)
class JSONTreeDecoder(json.JSONDecoder):
"""JSON decoder class for deserializing to a jsontree object structure
and building datetime objects from strings with the ISO datetime format.
"""
def __init__(self, *args, **kwdargs):
jsontreecls = jsontree
datetimedecoder = _datetimedecoder
if 'jsontreecls' in kwdargs:
jsontreecls = kwdargs.pop('jsontreecls')
if 'datetimedecoder' in kwdargs:
datetimedecoder = kwdargs.pop('datetimedecoder')
super(JSONTreeDecoder, self).__init__(*args, **kwdargs)
self.__parse_object = self.parse_object
self.__parse_string = self.parse_string
self.parse_object = self._parse_object
self.parse_string = self._parse_string
self.scan_once = json.scanner.py_make_scanner(self)
self.__jsontreecls = jsontreecls
self.__datetimedecoder = datetimedecoder
def _parse_object(self, *args, **kwdargs):
result = self.__parse_object(*args, **kwdargs)
return self.__jsontreecls(result[0]), result[1]
def _parse_string(self, *args, **kwdargs):
value, idx = self.__parse_string(*args, **kwdargs)
value = self.__datetimedecoder(value)
return value, idx
def clone(root, jsontreecls=jsontree, datetimeencoder=_datetimeencoder,
datetimedecoder=_datetimedecoder):
"""Clone an object by first searializing out and then loading it back in.
"""
return json.loads(json.dumps(root, cls=JSONTreeEncoder,
datetimeencoder=datetimeencoder),
cls=JSONTreeDecoder, jsontreecls=jsontreecls,
datetimedecoder=datetimedecoder)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=JSONTreeEncoder, indent=None, separators=None,
encoding="utf-8", default=None, sort_keys=False, **kargs):
"""JSON serialize to file function that defaults the encoding class to be
JSONTreeEncoder
"""
if sys.version_info.major == 2:
kargs['encoding'] = encoding
if sys.version_info.major > 2 :
kargs['sort_keys'] = sort_keys
kargs['default'] = default
return json.dump(obj, fp, skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan,
cls=cls, indent=indent, separators=separators, **kargs)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=JSONTreeEncoder, indent=None, separators=None,
encoding='utf-8', default=None, sort_keys=False, **kargs):
"""JSON serialize to string function that defaults the encoding class to be
JSONTreeEncoder
"""
if sys.version_info.major == 2:
kargs['encoding'] = encoding
if sys.version_info.major > 2 :
kargs['sort_keys'] = sort_keys
kargs['default'] = default
return json.dumps(obj, skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan,
cls=cls, indent=indent, separators=separators, **kargs)
def load(fp, encoding=None, cls=JSONTreeDecoder, object_hook=None,
parse_float=None, parse_int=None, parse_constant=None,
object_pairs_hook=None, **kargs):
"""JSON load from file function that defaults the loading class to be
JSONTreeDecoder
"""
if sys.version_info.major == 2:
kargs['encoding'] = encoding
return json.load(fp, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant,
object_pairs_hook=object_pairs_hook, **kargs)
def loads(s, encoding=None, cls=JSONTreeDecoder, object_hook=None,
parse_float=None, parse_int=None, parse_constant=None,
object_pairs_hook=None, **kargs):
"""JSON load from string function that defaults the loading class to be
JSONTreeDecoder
"""
return json.loads(s, encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant,
object_pairs_hook=object_pairs_hook, **kargs)
| {
"content_hash": "8291b24fdb660f421d56b8d561500e1d",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 85,
"avg_line_length": 37.85906040268456,
"alnum_prop": 0.6156709803226378,
"repo_name": "dougn/jsontree",
"id": "b1b10be64e70a1bf954a979970f3377cd33da6cc",
"size": "11282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsontree.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "12719"
}
],
"symlink_target": ""
} |
import unittest
from .extractor import Extractor
class BaseExtractorTestCase(unittest.TestCase):
def test_base_extract(self):
ext = Extractor()
self.assertRaises(NotImplementedError, ext.extract, '')
if __name__ == '__main__': # pragma: no cover
unittest.main()
| {
"content_hash": "99b3bb21bd39662b489799a436133bae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 22.384615384615383,
"alnum_prop": 0.6804123711340206,
"repo_name": "cnu/chronos",
"id": "0ae0afb741312027f5a7a5e68f1141beae223435",
"size": "338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chronos/test_extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35856"
}
],
"symlink_target": ""
} |
import glance_store
from oslo_log import log as logging
import webob.exc
from daisy.api import policy
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
import daisy.gateway
from daisy import i18n
import daisy.notifier
LOG = logging.getLogger(__name__)
_ = i18n._
class Controller(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
@utils.mutating
def update(self, req, image_id, tag_value):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
image.tags.add(tag_value)
image_repo.save(image)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to update tags for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Invalid as e:
msg = _("Could not update image: %s") % utils.exception_to_str(e)
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.ImageTagLimitExceeded as e:
msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id, "e": utils.exception_to_str(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def delete(self, req, image_id, tag_value):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
if tag_value not in image.tags:
raise webob.exc.HTTPNotFound()
image.tags.remove(tag_value)
image_repo.save(image)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to delete tags for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result):
response.status_int = 204
def delete(self, response, result):
response.status_int = 204
def create_resource():
"""Images resource factory method"""
serializer = ResponseSerializer()
controller = Controller()
return wsgi.Resource(controller, serializer=serializer)
| {
"content_hash": "2bc48582756e0eecd64e82464a241efd",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 36.4,
"alnum_prop": 0.6192630898513252,
"repo_name": "OpenDaisy/daisy-api",
"id": "745884eff1a8d9e36e2239d52f989cbe16f5c10c",
"size": "3729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisy/api/v2/image_tags.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1475450"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Note.image'
db.add_column(u'catalog_note', 'image',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Image'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Note.image'
db.delete_column(u'catalog_note', 'image_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['auth.User']"}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['auth.User']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog'] | {
"content_hash": "7f0f92186ddbd44a2c3403ad8a4c47d4",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 224,
"avg_line_length": 79.31458699472759,
"alnum_prop": 0.5428982938178595,
"repo_name": "Makeystreet/makeystreet",
"id": "d6ce44aea408e58e52124f9df3578b2d27a36e56",
"size": "45154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woot/apps/catalog/migrations/0087_auto__add_field_note_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1893401"
},
{
"name": "HTML",
"bytes": "2253311"
},
{
"name": "JavaScript",
"bytes": "1698946"
},
{
"name": "Python",
"bytes": "9010343"
}
],
"symlink_target": ""
} |
from flask import Flask, Response
from twilio.util import TwilioCapability
app = Flask(__name__)
@app.route('/token', methods=['GET'])
def get_capability_token():
"""Respond to incoming requests."""
# Find these values at twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
capability = TwilioCapability(account_sid, auth_token)
capability.allow_client_incoming("jenny")
token = capability.generate()
return Response(token, mimetype='application/jwt')
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "20bf10723c682267c202ebe99fb97b9a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 24.708333333333332,
"alnum_prop": 0.6947723440134908,
"repo_name": "teoreteetik/api-snippets",
"id": "c9553462de46d7624bc8c2c86a9da3b53a945ec6",
"size": "593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/capability-token-incoming/capability-token.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
import math
# Here it decided to have a simple Heap data structure and leave function max_heapify
# not as class methods, so that it's the same as the interface defined in CLRS
class Heap():
def __init__(self, A:list):
self.data = A
self.size = len(A)
self.root = A[0]
def left(self, i):
'''
Returns the index of left child of i
'''
idx = i * 2 + 1
if idx >= self.size:
return None
return idx
def right(self, i):
'''
Returns the index of right child of node i
'''
idx = i * 2 + 2
return idx if idx < self.size else None
def parent(self, i):
'''
Returns the parent of the given node
Eg. data[6] -> data[2], data[5] -> data[2]
'''
if i == 0:
# Root doesn't have a parent
return None
idx_parent = math.ceil(i/2) - 1
return idx_parent
| {
"content_hash": "9fea6f9cbf85d4b85faf36104d57de92",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 25.18421052631579,
"alnum_prop": 0.5151515151515151,
"repo_name": "JasonVann/CLRS",
"id": "ee67d798866c821a0d4fb9354a10728fe63334a9",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "S2_SortingAndOrderStatistics/C6_Heapsort/Heap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60634"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.test import SimpleTestCase
from localflavor.us.forms import (USZipCodeField, USPhoneNumberField,
USStateField, USStateSelect,
USSocialSecurityNumberField)
from .forms import USPlaceForm
class USLocalFlavorTests(SimpleTestCase):
def setUp(self):
self.form = USPlaceForm({'state': 'GA', 'state_req': 'NC', 'postal_code': 'GA', 'name': 'impossible'})
def test_get_display_methods(self):
"""Test that the get_*_display() methods are added to the model instances."""
place = self.form.save()
self.assertEqual(place.get_state_display(), 'Georgia')
self.assertEqual(place.get_state_req_display(), 'North Carolina')
def test_required(self):
"""Test that required USStateFields throw appropriate errors."""
form = USPlaceForm({'state': 'GA', 'name': 'Place in GA'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['state_req'], ['This field is required.'])
def test_field_blank_option(self):
"""Test that the empty option is there."""
state_select_html = """\
<select name="state" id="id_state">
<option value="">---------</option>
<option value="AL">Alabama</option>
<option value="AK">Alaska</option>
<option value="AS">American Samoa</option>
<option value="AZ">Arizona</option>
<option value="AR">Arkansas</option>
<option value="AA">Armed Forces Americas</option>
<option value="AE">Armed Forces Europe</option>
<option value="AP">Armed Forces Pacific</option>
<option value="CA">California</option>
<option value="CO">Colorado</option>
<option value="CT">Connecticut</option>
<option value="DE">Delaware</option>
<option value="DC">District of Columbia</option>
<option value="FL">Florida</option>
<option value="GA" selected="selected">Georgia</option>
<option value="GU">Guam</option>
<option value="HI">Hawaii</option>
<option value="ID">Idaho</option>
<option value="IL">Illinois</option>
<option value="IN">Indiana</option>
<option value="IA">Iowa</option>
<option value="KS">Kansas</option>
<option value="KY">Kentucky</option>
<option value="LA">Louisiana</option>
<option value="ME">Maine</option>
<option value="MD">Maryland</option>
<option value="MA">Massachusetts</option>
<option value="MI">Michigan</option>
<option value="MN">Minnesota</option>
<option value="MS">Mississippi</option>
<option value="MO">Missouri</option>
<option value="MT">Montana</option>
<option value="NE">Nebraska</option>
<option value="NV">Nevada</option>
<option value="NH">New Hampshire</option>
<option value="NJ">New Jersey</option>
<option value="NM">New Mexico</option>
<option value="NY">New York</option>
<option value="NC">North Carolina</option>
<option value="ND">North Dakota</option>
<option value="MP">Northern Mariana Islands</option>
<option value="OH">Ohio</option>
<option value="OK">Oklahoma</option>
<option value="OR">Oregon</option>
<option value="PA">Pennsylvania</option>
<option value="PR">Puerto Rico</option>
<option value="RI">Rhode Island</option>
<option value="SC">South Carolina</option>
<option value="SD">South Dakota</option>
<option value="TN">Tennessee</option>
<option value="TX">Texas</option>
<option value="UT">Utah</option>
<option value="VT">Vermont</option>
<option value="VI">Virgin Islands</option>
<option value="VA">Virginia</option>
<option value="WA">Washington</option>
<option value="WV">West Virginia</option>
<option value="WI">Wisconsin</option>
<option value="WY">Wyoming</option>
</select>"""
self.assertHTMLEqual(str(self.form['state']), state_select_html)
def test_full_postal_code_list(self):
"""Test that the full USPS code field is really the full list."""
usps_select_html = """\
<select name="postal_code" id="id_postal_code">
<option value="">---------</option>
<option value="AL">Alabama</option>
<option value="AK">Alaska</option>
<option value="AS">American Samoa</option>
<option value="AZ">Arizona</option>
<option value="AR">Arkansas</option>
<option value="AA">Armed Forces Americas</option>
<option value="AE">Armed Forces Europe</option>
<option value="AP">Armed Forces Pacific</option>
<option value="CA">California</option>
<option value="CO">Colorado</option>
<option value="CT">Connecticut</option>
<option value="DE">Delaware</option>
<option value="DC">District of Columbia</option>
<option value="FM">Federated States of Micronesia</option>
<option value="FL">Florida</option>
<option value="GA" selected="selected">Georgia</option>
<option value="GU">Guam</option>
<option value="HI">Hawaii</option>
<option value="ID">Idaho</option>
<option value="IL">Illinois</option>
<option value="IN">Indiana</option>
<option value="IA">Iowa</option>
<option value="KS">Kansas</option>
<option value="KY">Kentucky</option>
<option value="LA">Louisiana</option>
<option value="ME">Maine</option>
<option value="MH">Marshall Islands</option>
<option value="MD">Maryland</option>
<option value="MA">Massachusetts</option>
<option value="MI">Michigan</option>
<option value="MN">Minnesota</option>
<option value="MS">Mississippi</option>
<option value="MO">Missouri</option>
<option value="MT">Montana</option>
<option value="NE">Nebraska</option>
<option value="NV">Nevada</option>
<option value="NH">New Hampshire</option>
<option value="NJ">New Jersey</option>
<option value="NM">New Mexico</option>
<option value="NY">New York</option>
<option value="NC">North Carolina</option>
<option value="ND">North Dakota</option>
<option value="MP">Northern Mariana Islands</option>
<option value="OH">Ohio</option>
<option value="OK">Oklahoma</option>
<option value="OR">Oregon</option>
<option value="PW">Palau</option>
<option value="PA">Pennsylvania</option>
<option value="PR">Puerto Rico</option>
<option value="RI">Rhode Island</option>
<option value="SC">South Carolina</option>
<option value="SD">South Dakota</option>
<option value="TN">Tennessee</option>
<option value="TX">Texas</option>
<option value="UT">Utah</option>
<option value="VT">Vermont</option>
<option value="VI">Virgin Islands</option>
<option value="VA">Virginia</option>
<option value="WA">Washington</option>
<option value="WV">West Virginia</option>
<option value="WI">Wisconsin</option>
<option value="WY">Wyoming</option>
</select>"""
self.assertHTMLEqual(str(self.form['postal_code']), usps_select_html)
def test_USStateSelect(self):
f = USStateSelect()
out = '''<select name="state">
<option value="AL">Alabama</option>
<option value="AK">Alaska</option>
<option value="AS">American Samoa</option>
<option value="AZ">Arizona</option>
<option value="AR">Arkansas</option>
<option value="AA">Armed Forces Americas</option>
<option value="AE">Armed Forces Europe</option>
<option value="AP">Armed Forces Pacific</option>
<option value="CA">California</option>
<option value="CO">Colorado</option>
<option value="CT">Connecticut</option>
<option value="DE">Delaware</option>
<option value="DC">District of Columbia</option>
<option value="FL">Florida</option>
<option value="GA">Georgia</option>
<option value="GU">Guam</option>
<option value="HI">Hawaii</option>
<option value="ID">Idaho</option>
<option value="IL" selected="selected">Illinois</option>
<option value="IN">Indiana</option>
<option value="IA">Iowa</option>
<option value="KS">Kansas</option>
<option value="KY">Kentucky</option>
<option value="LA">Louisiana</option>
<option value="ME">Maine</option>
<option value="MD">Maryland</option>
<option value="MA">Massachusetts</option>
<option value="MI">Michigan</option>
<option value="MN">Minnesota</option>
<option value="MS">Mississippi</option>
<option value="MO">Missouri</option>
<option value="MT">Montana</option>
<option value="NE">Nebraska</option>
<option value="NV">Nevada</option>
<option value="NH">New Hampshire</option>
<option value="NJ">New Jersey</option>
<option value="NM">New Mexico</option>
<option value="NY">New York</option>
<option value="NC">North Carolina</option>
<option value="ND">North Dakota</option>
<option value="MP">Northern Mariana Islands</option>
<option value="OH">Ohio</option>
<option value="OK">Oklahoma</option>
<option value="OR">Oregon</option>
<option value="PA">Pennsylvania</option>
<option value="PR">Puerto Rico</option>
<option value="RI">Rhode Island</option>
<option value="SC">South Carolina</option>
<option value="SD">South Dakota</option>
<option value="TN">Tennessee</option>
<option value="TX">Texas</option>
<option value="UT">Utah</option>
<option value="VT">Vermont</option>
<option value="VI">Virgin Islands</option>
<option value="VA">Virginia</option>
<option value="WA">Washington</option>
<option value="WV">West Virginia</option>
<option value="WI">Wisconsin</option>
<option value="WY">Wyoming</option>
</select>'''
self.assertHTMLEqual(f.render('state', 'IL'), out)
def test_USZipCodeField(self):
error_format = ['Enter a zip code in the format XXXXX or XXXXX-XXXX.']
valid = {
'60606': '60606',
60606: '60606',
'04000': '04000',
'60606-1234': '60606-1234',
}
invalid = {
'4000': error_format,
'6060-1234': error_format,
'60606-': error_format,
}
self.assertFieldOutput(USZipCodeField, valid, invalid)
def test_USPhoneNumberField(self):
error_format = ['Phone numbers must be in XXX-XXX-XXXX format.']
valid = {
'312-555-1212': '312-555-1212',
'3125551212': '312-555-1212',
'312 555-1212': '312-555-1212',
'(312) 555-1212': '312-555-1212',
'312 555 1212': '312-555-1212',
'312.555.1212': '312-555-1212',
'312.555-1212': '312-555-1212',
' (312) 555.1212 ': '312-555-1212',
}
invalid = {
'555-1212': error_format,
'312-55-1212': error_format,
}
self.assertFieldOutput(USPhoneNumberField, valid, invalid)
def test_USStateField(self):
error_invalid = ['Enter a U.S. state or territory.']
valid = {
'il': 'IL',
'IL': 'IL',
'illinois': 'IL',
' illinois ': 'IL',
}
invalid = {
60606: error_invalid,
}
self.assertFieldOutput(USStateField, valid, invalid)
def test_USSocialSecurityNumberField(self):
error_invalid = ['Enter a valid U.S. Social Security number in XXX-XX-XXXX format.']
valid = {
'987-65-4330': '987-65-4330',
'987654330': '987-65-4330',
}
invalid = {
'078-05-1120': error_invalid,
}
self.assertFieldOutput(USSocialSecurityNumberField, valid, invalid)
| {
"content_hash": "5e0889e70c1c5c05261d91720c1700a9",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 110,
"avg_line_length": 37.68641114982579,
"alnum_prop": 0.6734467455621301,
"repo_name": "yakky/django-localflavor",
"id": "4c8146caa40bfd7a7495e6da6ef5bf8a16e714d9",
"size": "10816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_us/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "573411"
},
{
"name": "Shell",
"bytes": "6725"
}
],
"symlink_target": ""
} |
"""Generates C++ source files from a mojom.Module."""
from generate import mojom
from generate import mojom_pack
from generate import mojom_generator
from generate.template_expander import UseJinja
_kind_to_cpp_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.HANDLE: "mojo::Handle",
mojom.DCPIPE: "mojo::DataPipeConsumerHandle",
mojom.DPPIPE: "mojo::DataPipeProducerHandle",
mojom.MSGPIPE: "mojo::MessagePipeHandle",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
def GetCppType(kind):
if isinstance(kind, mojom.Struct):
return "%s_Data*" % kind.GetFullNameInternal("::")
if isinstance(kind, mojom.Array):
return "mojo::internal::Array_Data<%s>*" % GetCppType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::Interface<%s>::Handle" % kind.name
if kind.spec == 's':
return "mojo::internal::String_Data*"
return _kind_to_cpp_type[kind]
def GetCppArrayArgWrapperType(kind):
if isinstance(kind, mojom.Struct):
return kind.GetFullName("::")
if isinstance(kind, mojom.Array):
return "mojo::Array<%s >" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::Interface<%s>::Handle" % kind.name
if kind.spec == 's':
return "mojo::String"
return _kind_to_cpp_type[kind]
def GetCppWrapperType(kind):
if isinstance(kind, mojom.Struct):
return kind.GetFullName("::")
if isinstance(kind, mojom.Array):
return "mojo::Array<%s >" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::Passable<typename mojo::Interface<%s>::Handle>" % kind.name
if kind.spec == 's':
return "mojo::String"
if mojom_generator.IsHandleKind(kind):
return "mojo::Passable<%s>" % _kind_to_cpp_type[kind]
return _kind_to_cpp_type[kind]
def GetCppConstWrapperType(kind):
if isinstance(kind, mojom.Struct):
return "const %s&" % kind.GetFullName("::")
if isinstance(kind, mojom.Array):
return "const mojo::Array<%s >&" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::Interface<%s>::ScopedHandle" % kind.name
if kind.spec == 's':
return "const mojo::String&"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
return _kind_to_cpp_type[kind]
def GetCppFieldType(kind):
if isinstance(kind, mojom.Struct):
return ("mojo::internal::StructPointer<%s_Data>" %
kind.GetFullNameInternal("::"))
if isinstance(kind, mojom.Array):
return "mojo::internal::ArrayPointer<%s>" % GetCppType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::Interface<%s>::Handle" % kind.name
if kind.spec == 's':
return "mojo::internal::StringPointer"
return _kind_to_cpp_type[kind]
def IsStructWithHandles(struct):
for pf in struct.packed.packed_fields:
if mojom_generator.IsHandleKind(pf.field.kind):
return True
return False
def SubstituteNamespace(token, module):
for import_item in module.imports:
token = token.replace(import_item["namespace"] + ".",
import_item["namespace"] + "::")
return token
def ExpressionToText(value, module):
if value[0] != "EXPRESSION":
raise Exception("Expected EXPRESSION, got" + value)
return "".join(mojom_generator.ExpressionMapper(value,
lambda token: SubstituteNamespace(token, module)))
_HEADER_SIZE = 8
class Generator(mojom_generator.Generator):
cpp_filters = {
"camel_to_underscores": mojom_generator.CamelToUnderscores,
"cpp_const_wrapper_type": GetCppConstWrapperType,
"cpp_field_type": GetCppFieldType,
"cpp_type": GetCppType,
"cpp_wrapper_type": GetCppWrapperType,
"expression_to_text": ExpressionToText,
"get_pad": mojom_pack.GetPad,
"is_handle_kind": mojom_generator.IsHandleKind,
"is_object_kind": mojom_generator.IsObjectKind,
"is_string_kind": mojom_generator.IsStringKind,
"is_array_kind": lambda kind: isinstance(kind, mojom.Array),
"is_struct_with_handles": IsStructWithHandles,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
"struct_from_method": mojom_generator.GetStructFromMethod,
"stylize_method": mojom_generator.StudlyCapsToCamel,
"verify_token_type": mojom_generator.VerifyTokenType,
}
def GetJinjaExports(self):
return {
"module": self.module,
"module_name": self.module.name,
"namespace": self.module.namespace,
"imports": self.module.imports,
"kinds": self.module.kinds,
"enums": self.module.enums,
"structs": self.GetStructs(),
"interfaces": self.module.interfaces,
"include_prefix": self.GetIncludePrefix(),
}
@UseJinja("cpp_templates/module.h.tmpl", filters=cpp_filters)
def GenerateModuleHeader(self):
return self.GetJinjaExports()
@UseJinja("cpp_templates/module_internal.h.tmpl", filters=cpp_filters)
def GenerateModuleInternalHeader(self):
return self.GetJinjaExports()
@UseJinja("cpp_templates/module.cc.tmpl", filters=cpp_filters)
def GenerateModuleSource(self):
return self.GetJinjaExports()
def GenerateFiles(self):
self.Write(self.GenerateModuleHeader(), "%s.h" % self.module.name)
self.Write(self.GenerateModuleInternalHeader(),
"%s_internal.h" % self.module.name)
self.Write(self.GenerateModuleSource(), "%s.cc" % self.module.name)
def GetIncludePrefix(self):
if not self.header_dir:
return ""
if self.header_dir[-1] == "/":
return self.header_dir
return self.header_dir + "/"
| {
"content_hash": "a8100ce3a8dbf9c66a4ed51b5eacebd1",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 77,
"avg_line_length": 34.354651162790695,
"alnum_prop": 0.689118294127602,
"repo_name": "ChromiumWebApps/chromium",
"id": "4420aafd88f7f01f34e01cd1b14f2afb818fff4d",
"size": "6072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mojo/public/bindings/generators/mojom_cpp_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42286199"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "198616766"
},
{
"name": "CSS",
"bytes": "937333"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5695686"
},
{
"name": "JavaScript",
"bytes": "21967126"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2262"
},
{
"name": "Objective-C",
"bytes": "7602057"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1210885"
},
{
"name": "Python",
"bytes": "10774996"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1316721"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15243"
}
],
"symlink_target": ""
} |
import copy
import tempfile
import numpy as np
import torch
import torch.utils.data as data
from sklearn.metrics import (
accuracy_score,
precision_score,
roc_auc_score,
recall_score,
f1_score,
fbeta_score,
)
from torch.utils.data import DataLoader
from federatedml.framework.weights import OrderDictWeights, Weights
from federatedml.nn.homo_nn.nn_model import NNModel, DataConverter
def layers(layer, config, type):
if type == "cv":
if layer == "Conv2d":
return torch.nn.Conv2d()
if layer == "MaxPool2d":
return torch.nn.MaxPool2d()
if layer == "AvgPool2d":
return torch.nn.AvgPool2d()
elif type == "nlp":
if layer == "LSTM":
return torch.nn.LSTM()
if layer == "RNN":
return torch.nn.RNN()
elif type == "activate":
if layer == "Sigmoid":
return torch.nn.Sigmoid()
if layer == "Relu":
return torch.nn.ReLU()
if layer == "Selu":
return torch.nn.SELU()
if layer == "LeakyReLU":
return torch.nn.LeakyReLU()
if layer == "Tanh":
return torch.nn.Tanh()
if layer == "Softmax":
return torch.nn.Softmax(1)
if layer == "LogSoftmax":
return torch.nn.LogSoftmax(1)
elif type == "normal":
if layer == "Linear":
return torch.nn.Linear(config[0], config[1])
if layer == "BatchNorm2d":
return torch.nn.BatchNorm2d()
if layer == "dropout":
return torch.nn.Dropout(config)
else:
print("layer not support!")
def build_pytorch(nn_define, optimizer, loss, metrics, **kwargs):
model = torch.nn.Sequential()
for config in nn_define:
layer = layers(config.get("layer"), config.get("config"), config.get("type"))
model.add_module(config.get("name"), layer)
return PytorchNNModel(model, optimizer, loss, metrics)
def build_loss_fn(loss):
if loss == "CrossEntropyLoss":
return torch.nn.CrossEntropyLoss()
elif loss == "MSELoss":
return torch.nn.MSELoss()
elif loss == "BCELoss":
return torch.nn.BCELoss()
elif loss == "BCEWithLogitsLoss":
return torch.nn.BCEWithLogitsLoss()
elif loss == "NLLLoss":
return torch.nn.NLLLoss()
elif loss == "L1Loss":
return torch.nn.L1Loss()
elif loss == "SmoothL1Loss":
return torch.nn.SmoothL1Loss()
elif loss == "HingeEmbeddingLoss":
return torch.nn.HingeEmbeddingLoss()
else:
print("loss function not support!")
def build_optimzer(optim, model):
if optim.optimizer == "Adam":
return torch.optim.Adam(model.parameters(), **optim.kwargs)
elif optim.optimizer == "SGD":
return torch.optim.SGD(model.parameters(), **optim.kwargs)
elif optim.optimizer == "RMSprop":
return torch.optim.RMSprop(model.parameters(), **optim.kwargs)
elif optim.optimizer == "Adagrad":
return torch.optim.Adagrad(model.parameters(), **optim.kwargs)
else:
print("not support")
class PytorchNNModel(NNModel):
def __init__(self, model, optimizer=None, loss=None, metrics=None):
self._model: torch.nn.Sequential = model
self._optimizer = optimizer
self._loss = loss
self._metrics = metrics
def compile(self, loss, optimizer, metrics):
self._optimizer = optimizer
self._loss = loss
self._metrics = metrics
def get_model_weights(self) -> OrderDictWeights:
return OrderDictWeights(self._model.state_dict())
def set_model_weights(self, weights: Weights):
unboxed = weights.unboxed
self._model.load_state_dict(unboxed)
def train(self, data: data.Dataset, **kwargs):
loss_fn = build_loss_fn(self._loss)
optimizer = build_optimzer(self._optimizer, self._model)
epochs = 1
left_kwargs = copy.deepcopy(kwargs)
if "aggregate_every_n_epoch" in kwargs:
epochs = kwargs["aggregate_every_n_epoch"]
del left_kwargs["aggregate_every_n_epoch"]
train_data = DataLoader(data, batch_size=data.batch_size, shuffle=False)
for epoch in range(epochs):
for batch_id, (feature, label) in enumerate(train_data):
feature = torch.tensor(feature, dtype=torch.float32)
if isinstance(loss_fn, torch.nn.CrossEntropyLoss) | isinstance(
loss_fn, torch.nn.NLLLoss
):
label = torch.tensor(label, dtype=torch.long)
temp = label.t()
label = temp[0]
else:
label = torch.tensor(label, dtype=torch.float32)
y_pre = self._model(feature)
optimizer.zero_grad()
loss = loss_fn(y_pre, label)
loss.backward()
optimizer.step()
def evaluate(self, data: data.dataset, **kwargs):
metircs = {}
loss_metircs = []
loss_fuc = []
other_metrics = []
if self._metrics:
for func in self._metrics:
if func.endswith("Loss"):
loss_metircs.append(func)
loss_fuc.append(build_loss_fn(func))
else:
other_metrics.append(func)
self._model.eval()
loss_fn = build_loss_fn(self._loss)
evaluate_data = DataLoader(data, batch_size=data.batch_size, shuffle=False)
if loss_metircs:
loss_metircs_result = [0 for i in range(len(loss_metircs))]
loss = 0
for batch_id, (feature, label) in enumerate(evaluate_data):
feature = torch.tensor(feature, dtype=torch.float32)
y = self._model(feature)
if batch_id == 0:
result = y.detach().numpy()
eval_label = label.detach().numpy()
else:
result = np.vstack((result, y.detach().numpy()))
eval_label = np.vstack((eval_label, label.detach().numpy()))
if isinstance(loss_fn, torch.nn.CrossEntropyLoss) | isinstance(
loss_fn, torch.nn.NLLLoss
):
label = torch.tensor(label, dtype=torch.long)
temp = label.t()
label = temp[0]
else:
label = torch.tensor(label, dtype=torch.float32)
eval_loss = loss_fn(y, label)
if loss_metircs:
for i in range(len(loss_fuc)):
f = loss_fuc[i]
res = f(y, label)
loss_metircs_result[i] += res.item()
loss += eval_loss
metircs["loss"] = loss.item() * data.batch_size / len(data)
if loss_metircs:
i = 0
for func in loss_metircs:
metircs[func] = loss_metircs_result[i] * data.batch_size / len(data)
i += 1
num_output_units = result.shape[1]
if len(other_metrics) > 0:
if num_output_units == 1:
for i in range(len(data)):
if result[i] > 0.5:
result[i] = 1
else:
result[i] = 0
for fuc_name in other_metrics:
if fuc_name == "auccuray":
metircs[str(fuc_name)] = accuracy_score(result, eval_label)
elif fuc_name == "precision":
metircs[str(fuc_name)] = precision_score(result, eval_label)
elif fuc_name == "recall":
metircs[str(fuc_name)] = recall_score(result, eval_label)
elif fuc_name == "auc":
metircs[str(fuc_name)] = roc_auc_score(result, eval_label)
elif fuc_name == "f1":
metircs[str(fuc_name)] = f1_score(result, eval_label)
elif fuc_name == "fbeta":
metircs[str(fuc_name)] = fbeta_score(result, eval_label, beta=2)
else:
print("metrics not support ")
else:
acc = 0
if data.use_one_hot:
for i in range(len(data)):
if result[i].argmax() == eval_label[i].argmax():
acc += 1
else:
for i in range(len(data)):
if result[i].argmax() == eval_label[i]:
acc += 1
metircs["auccuray"] = acc / len(data)
return metircs
def predict(self, data: data.dataset, **kwargs):
predict_data = DataLoader(data, batch_size=data.batch_size, shuffle=False)
for batch_id, (feature, label) in enumerate(predict_data):
feature = torch.tensor(feature, dtype=torch.float32)
# label = torch.tensor(label, dtype=torch.float32)
y = self._model(feature)
if batch_id == 0:
result = y.detach().numpy()
else:
result = np.vstack((result, y.detach().numpy()))
return result
def export_model(self):
with tempfile.TemporaryFile() as f:
torch.save(self._model, f)
f.seek(0)
return f.read()
@classmethod
def restore_model(cls, model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
model = torch.load(f)
return PytorchNNModel(model)
class PytorchData(data.Dataset):
def __init__(self, data_instances, batch_size, encode_label, label_mapping):
self.size = data_instances.count()
if self.size <= 0:
raise ValueError("empty data")
if batch_size == -1:
self.batch_size = self.size
else:
self.batch_size = batch_size
_, one_data = data_instances.first()
self.x_shape = one_data.features.shape
num_label = len(label_mapping)
# encoding label in one-hot
if encode_label:
self.use_one_hot = True
if num_label > 2:
self.y_shape = (num_label,)
self.output_shape = self.y_shape
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._keys = []
for k, inst in data_instances.collect():
self._keys.append(k)
self.x[index] = inst.features
self.y[index][label_mapping[inst.label]] = 1
index += 1
else:
raise ValueError(f"num_label is {num_label}")
else:
self.use_one_hot = False
self.y_shape = (1,)
if num_label == 2:
self.output_shape = self.y_shape
elif num_label > 2:
self.output_shape = (num_label,)
else:
raise ValueError(f"num_label is {num_label}")
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._keys = []
for k, inst in data_instances.collect():
self._keys.append(k)
self.x[index] = inst.features
self.y[index] = label_mapping[inst.label]
index += 1
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.x)
def get_keys(self):
return self._keys
def get_shape(self):
return self.x_shape, self.output_shape
class PytorchDataConverter(DataConverter):
def convert(self, data, *args, **kwargs):
return PytorchData(data, *args, **kwargs)
| {
"content_hash": "fa3f1aef1844a5b47d8350f1791b0c6b",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 88,
"avg_line_length": 36.29483282674772,
"alnum_prop": 0.5269240432124612,
"repo_name": "FederatedAI/FATE",
"id": "a8024365da8e9ea22b68fd15803f1d4981c40fa9",
"size": "12557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/federatedml/nn/backend/pytorch/nn_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
"""
CLI Tool for Paessler's PRTG (http://www.paessler.com/)
"""
import argparse
import os
import logging
from prtg.client import Client
def load_config():
endpoint = None
username = None
password = None
try:
endpoint = os.environ['PRTGENDPOINT']
username = os.environ['PRTGUSERNAME']
password = os.environ['PRTGPASSWORD']
except KeyError as e:
print('Unable to load environment variable: {}'.format(e))
exit(1)
return endpoint, username, password
def get_response(response):
from prettytable import PrettyTable
attribs = list(response[0].__dict__.keys())
attribs.sort()
p = PrettyTable(attribs)
for resp in response:
p.add_row([resp.__getattribute__(x) for x in attribs])
return p
def get_parents(response):
parent_ids = [str(x.parentid) for x in response]
return '&filter_objid='.join(parent_ids)
def main(args):
"""
Parse commandline arguments for PRTG-CLI.
:param args: dict
:return: None
"""
logging.basicConfig(level=args.level)
endpoint, username, password = load_config()
c = Client(endpoint=endpoint, username=username, password=password)
if args.command == 'ls':
q = c.get_table_output(filter_string=args.filterstring, content=args.content)
if args.parents: # Lookup the parents of a sensor or device.
children = c.query(q)
print(children.response)
else:
c.query(q)
print(get_response(q.response))
if args.command == 'status':
q = c.get_status()
c.query(q)
print(get_response(q.response))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PRTG Command Line Interface')
parser.add_argument('command', type=str, help='command name', choices=['ls', 'status'])
parser.add_argument('-c', '--content', type=str, help='object type', default='devices',
choices=['devices', 'sensors'])
parser.add_argument('-f', '--filter-string', type=str, dest='filterstring', help='object filter string', default='')
parser.add_argument('-p', '--parents', action='store_true', help='Lookup parent objects of the sensor or device',
default=False)
parser.add_argument('-s', '--sort-by', type=str, help='Sort by a particular value', default='objid')
parser.add_argument('-n', '--new-tags', help='Add new tags', dest='newtags')
parser.add_argument('-l', '--level', help='Logging level', default='INFO')
main(parser.parse_args())
| {
"content_hash": "6c867e90f411901820983ff9a1f24e13",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 120,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6265945110166216,
"repo_name": "kevinschoon/prtgtagger",
"id": "4d5b991743a4111f945ff0de3d6efa27a46f5f47",
"size": "2611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prtgcli/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2611"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'hoa.views.home'),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
url(r'^logout/$', 'hoa.views.logout_view'),
url(r'^search/$', 'hoa.views.search'),
url(r'^debt/$', 'hoa.views.debt'),
url(r'^info/$', 'hoa.views.info'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "eae205b1f0ac20983f7ed6da5e2fc2af",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 89,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6556473829201102,
"repo_name": "desecho/hoa",
"id": "753d4564b0bef5d0ee95b5f65b923589a14e7f25",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hoa_project/hoa_project/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61105"
},
{
"name": "HTML",
"bytes": "13360"
},
{
"name": "JavaScript",
"bytes": "65046"
},
{
"name": "Python",
"bytes": "37091"
}
],
"symlink_target": ""
} |
import tempfile
import os
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.testing import tab_test_case
class TabStackTraceTest(tab_test_case.TabTestCase):
# Stack traces do not currently work on 10.6, but they are also being
# disabled shortly so just disable it for now.
# All platforms except chromeos should at least have a valid minidump.
@decorators.Disabled('snowleopard', 'chromeos')
def testValidDump(self):
with self.assertRaises(exceptions.DevtoolsTargetCrashException) as c:
self._tab.Navigate('chrome://crash', timeout=5)
self.assertTrue(c.exception.is_valid_dump)
# Stack traces aren't working on Android yet.
@decorators.Enabled('mac', 'linux')
@decorators.Disabled('snowleopard')
def testCrashSymbols(self):
with self.assertRaises(exceptions.DevtoolsTargetCrashException) as c:
self._tab.Navigate('chrome://crash', timeout=5)
self.assertIn('CrashIntentionally', '\n'.join(c.exception.stack_trace))
# Some platforms do not support full stack traces, this test requires only
# minimal symbols to be available.
@decorators.Enabled('mac', 'linux', 'win')
@decorators.Disabled('snowleopard')
def testCrashMinimalSymbols(self):
with self.assertRaises(exceptions.DevtoolsTargetCrashException) as c:
self._tab.Navigate('chrome://crash', timeout=5)
self.assertIn('PrepareRenderViewForNavigation',
'\n'.join(c.exception.stack_trace))
# The breakpad file specific test only apply to platforms which use the
# breakpad symbol format. This also must be tested in isolation because it can
# potentially interfere with other tests symbol parsing.
@decorators.Enabled('mac', 'linux')
@decorators.Isolated
def testBadBreakpadFileIgnored(self):
# pylint: disable=protected-access
executable_path = self._browser._browser_backend._executable
executable = os.path.basename(executable_path)
with tempfile.NamedTemporaryFile(mode='wt',
dir=os.path.dirname(executable_path),
prefix=executable + '.breakpad',
delete=True) as f:
garbage_hash = 'ABCDEF1234567'
f.write('MODULE PLATFORM ARCH %s %s' % (garbage_hash, executable))
f.flush()
with self.assertRaises(exceptions.DevtoolsTargetCrashException) as c:
self._tab.Navigate('chrome://crash', timeout=5)
# The symbol directory should now symbols for out executable.
tmp_dir = os.path.join(self._browser._browser_backend._tmp_minidump_dir)
symbol_dir = os.path.join(tmp_dir, 'symbols')
self.assertTrue(os.path.isdir(symbol_dir))
# Bad breakpad file should not be in the symbol directory
garbage_symbol_dir = os.path.join(symbol_dir, executable, garbage_hash)
self.assertFalse(os.path.isdir(garbage_symbol_dir))
# Stack trace should still work.
self.assertIn('CrashIntentionally', '\n'.join(c.exception.stack_trace))
| {
"content_hash": "1fbd6b106ef54783738944bf2afaea20",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 45.696969696969695,
"alnum_prop": 0.7045755968169761,
"repo_name": "google-ar/WebARonARCore",
"id": "a9d2744c8d3dc88d5452d0cf62b108dd3b525d24",
"size": "3179",
"binary": false,
"copies": "2",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "tools/perf/core/stacktrace_unittest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
import six
from taskflow.openstack.common.gettextutils import _ # noqa
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example:
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
this_exc_message = unicode(exc)
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func
| {
"content_hash": "19791599b5c8d455f2eeccc62433e357",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 36.36904761904762,
"alnum_prop": 0.564975450081833,
"repo_name": "jessicalucci/TaskManagement",
"id": "8f4f6b15bb2f5e7a8e269e749b1b8c5fd1eeaef9",
"size": "3746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskflow/openstack/common/excutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "363177"
},
{
"name": "Shell",
"bytes": "3255"
}
],
"symlink_target": ""
} |
import re
from bson.objectid import ObjectId, InvalidId
import datetime
from girder.models.model_base import AccessControlledModel
from girder.constants import AccessType
from girder.models.model_base import ValidationException
from girder.models.group import Group
from girder.models.item import Item
from girder.models.file import File
from girder import events
from girder.plugins.materialsdatabank.models.slug import Slug, SlugUpdateException
from girder.plugins.materialsdatabank.models.reconstruction import Reconstruction as ReconstructionModel
from girder.plugins.materialsdatabank.models.structure import Structure as StructureModel
from girder.plugins.materialsdatabank.models.projection import Projection as ProjectionModel
from ..constants import ELEMENT_SYMBOLS_LOWER, ELEMENT_SYMBOLS
class Dataset(AccessControlledModel):
def initialize(self):
self.name = 'mdb.datasets'
self.ensureIndices(('authors', 'title', 'atomicSpecies', 'mdbId'))
self.ensureTextIndex({
'authors': 1,
'title': 1
})
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'authors', 'title', 'atomicSpecies', 'doi', 'mdbId'))
def validate(self, dataset):
if 'mdbId' in dataset and dataset['mdbId'] is not None:
# Do we already have it
if len(list(self.find(mdb_id=dataset['mdbId'], force=True))) > 0:
raise ValidationException('"%s" has already been taken.' % dataset['mdbId'], field='mdbId')
return dataset
def _generate_mdb_id_prefix(self, species):
prefix = []
def _chars_left():
return 4 - sum([len(x) for x in prefix])
for s in species:
if len(s) <= _chars_left():
prefix.append(s)
prefix += ['X'] * _chars_left()
return ''.join(prefix)
def _generate_mdb_id_postfix(self, prefix):
# Search for existing datasets with this prefix
regex = re.compile('^%s(\d{5})' % prefix)
query = {
'mdbId': {
'$regex': regex
}
}
cursor = super(Dataset, self).find(query, fields=['mdbId'])
postfix = 0
for d in cursor:
match = regex.match(d['mdbId'])
p = int(match.group(1))
if p > postfix:
postfix = p
postfix += 1
return str(postfix).zfill(5)
def ensure_mdb_id(self, dataset, species, updates):
species = [ELEMENT_SYMBOLS[n] for n in species]
if 'mbdId' not in dataset:
prefix = self._generate_mdb_id_prefix(species)
# Try up to 5 times, in case we have overlapping updates
retry_count = 5
while True:
postfix = self._generate_mdb_id_postfix(prefix)
mdb_id = '%s%s' % (prefix, postfix)
# Now update atomically the slugs document
try:
Slug().add(mdb_id)
break
except SlugUpdateException:
if retry_count == 0:
raise Exception('Unable to create new mdb id after 5 retries.')
retry_count -= 1
# Now we have allocated the mdbId add it to the dataset model
updates.setdefault('$set', {})['mdbId'] = mdb_id
def create(self, authors, title=None, doi=None, microscope=None, image_file_id=None,
user=None, public=False):
dataset = {
'authors': authors,
'title': title,
'doi': doi,
'editable': False,
'deposited': datetime.datetime.utcnow(),
'updated': datetime.datetime.utcnow()
}
if image_file_id is not None:
dataset['imageFileId'] = ObjectId(image_file_id)
self.setPublic(dataset, public=public)
if user:
dataset['userId'] = user['_id']
self.setUserAccess(dataset, user=user, level=AccessType.ADMIN)
curator = list(Group().find({
'name': 'curator',
}))
if len(curator) > 0:
self.setGroupAccess(dataset, group=curator[0], level=AccessType.ADMIN)
else:
dataset['userId'] = None
dataset = self.save(dataset)
events.trigger('mdb.dataset.created', {
'dataset': dataset,
'user': user
})
return dataset
def update(self, dataset, dataset_updates=None, user=None, atomic_species=None, validation=None,
public=None):
query = {
'_id': dataset['_id']
}
if dataset_updates is None:
dataset_updates = {}
updates = {}
mutable_props = ['authors', 'title', 'doi', 'editable']
for prop in dataset_updates:
if prop in mutable_props:
updates.setdefault('$set', {})[prop] = dataset_updates[prop]
if atomic_species is not None:
new_atomic_species = set(dataset.get('atomicSpecies', {}))
new_atomic_species.update(atomic_species)
if atomic_species is not None:
updates.setdefault('$set', {})['atomicSpecies'] = list(new_atomic_species)
self.ensure_mdb_id(dataset, new_atomic_species, updates)
if public is not None:
updates.setdefault('$set', {})['public'] = public
# Trigger event if this dataset is being approved ( being made public )
if public and not dataset.get('public', False):
events.trigger('mdb.dataset.approved', {
'dataset': dataset,
'approver': user
})
updates.setdefault('$set', {})['released'] = datetime.datetime.utcnow()
if validation is not None:
updates.setdefault('$set', {})['validation'] = validation
if updates:
updates.setdefault('$set', {})['updated'] = datetime.datetime.utcnow()
super(Dataset, self).update(query, update=updates, multi=False)
return self.load(dataset['_id'], user=user, level=AccessType.READ)
return dataset
def _normalize_element(self, element):
# Try looking up element
try:
atomic_number = ELEMENT_SYMBOLS_LOWER.index(element.lower())
except ValueError:
# Try convert to int
atomic_number = int(element)
return atomic_number
def search(self, search_terms=None, atomic_species=None, offset=0, limit=None,
sort=None, user=None):
query = {}
if search_terms is not None:
filters = []
for search in search_terms:
filters.append({
'$text': {
'$search': search
}
})
filters.append({
'mdbId': search
})
try:
atomic_number = self._normalize_element(search)
filters.append({
'atomicSpecies': {
'$in': [atomic_number]
}
})
except ValueError:
# The search term can't be an atomic number
pass
query['$or'] = filters
cursor = super(Dataset, self).find(query=query, sort=sort, user=user)
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
def find(self, authors=None, title=None, atomic_species=None, mdb_id=None,
owner=None, offset=0, limit=None, sort=None, user=None, force=False):
query = {}
if authors is not None:
if not isinstance(authors, (list, tuple)):
authors = [authors]
author_regexs = []
for author in authors:
author_regexs.append(re.compile('.*%s.*' % author, re.IGNORECASE))
query['authors'] = {
'$in': author_regexs
}
if title is not None:
regex = re.compile('.*%s.*' % title, re.IGNORECASE)
query['title'] = {
'$regex': regex
}
if atomic_species:
species = []
for s in atomic_species:
try:
atomic_number = self._normalize_element(s)
species.append(atomic_number)
except ValueError:
# The search term can't be an atomic number
pass
query['atomicSpecies'] = {
'$in': species
}
if mdb_id is not None:
query['mdbId'] = mdb_id
if owner is not None:
if not isinstance(owner, ObjectId):
try:
owner = ObjectId(owner)
except InvalidId:
raise ValidationException('Invalid ObjectId: %s' % owner,
field='owner')
query['userId'] = owner
cursor = super(Dataset, self).find(query=query, sort=sort, user=user)
if not force:
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
else:
for r in cursor:
yield r
def load(self, id, user=None, level=AccessType.READ, force=False):
try:
ObjectId(id)
return super(Dataset, self).load(id, user=user, level=level, force=force)
except InvalidId:
# Try it as a mdb id
dataset = list(self.find(mdb_id=id, limit=1, user=user))
print(dataset)
if len(dataset) > 0:
return dataset[0]
return None
def delete(self, dataset, user):
dataset_id = dataset['_id']
# Delete reconstruction
try:
reconstruction = ReconstructionModel().find(dataset_id).next()
ReconstructionModel().delete(reconstruction, user)
except StopIteration:
pass
# Delete structure
try:
structure = StructureModel().find(dataset_id).next()
StructureModel().delete(structure, user)
except StopIteration:
pass
# Delete projection
try:
projection = ProjectionModel().find(dataset_id).next()
ProjectionModel().delete(projection, user)
except StopIteration:
pass
# Remove the slug
Slug().remove(dataset['mdbId'])
# Remove the image file
image_file = File().load(dataset['imageFileId'], force=True)
if image_file is not None:
item = Item().load(image_file['itemId'], force=True)
Item().remove(item)
# Now delete the dataset
super(Dataset, self).remove(dataset)
| {
"content_hash": "0b03fe005aea1b7c38d0b1483aed4ab0",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 107,
"avg_line_length": 33.40117994100295,
"alnum_prop": 0.5274220612911773,
"repo_name": "OpenChemistry/materialsdatabank",
"id": "c144b986e267c4da6f0e0e6253a02a5d0c0834c0",
"size": "11323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/materialsdatabank/server/models/dataset.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "938"
},
{
"name": "Dockerfile",
"bytes": "1007"
},
{
"name": "HTML",
"bytes": "1817"
},
{
"name": "JavaScript",
"bytes": "184971"
},
{
"name": "Mako",
"bytes": "1078"
},
{
"name": "Python",
"bytes": "147258"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
} |
"""Test the helper method for writing tests."""
import asyncio
import functools as ft
import json
import logging
import os
import uuid
import sys
import threading
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from unittest.mock import MagicMock, Mock, patch
import homeassistant.util.dt as date_util
import homeassistant.util.yaml.loader as yaml_loader
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers,
permissions as auth_permissions)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, SERVER_PORT, STATE_ON, STATE_OFF)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry, device_registry, entity, entity_platform, entity_registry,
intent, restore_state, storage)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.data['mqtt']._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
async def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = await async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
await hass.async_block_till_done()
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Integration {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name='Mock Group',
policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {
'name': name,
'policy': policy,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False, groups=None):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
'groups': groups or [],
'perm_lookup': None,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(
policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
platform_schema_base=None, async_setup=None,
async_setup_entry=None, async_unload_entry=None,
async_migrate_entry=None, async_remove_entry=None,
partial_manifest=None):
"""Initialize the mock module."""
self.__name__ = 'homeassistant.components.{}'.format(domain)
self.__file__ = 'homeassistant/components/{}'.format(domain)
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {})
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = 'homeassistant.components.light.bla'
__file__ = 'homeassistant/components/blah/light'
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=1, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None, options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or uuid.uuid4().hex,
'domain': domain,
'data': data or {},
'options': options,
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml_loader, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(
hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state['attributes'] = json.loads(json.dumps(
restored_state['attributes'], cls=JSONEncoder))
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now)
data.last_states = last_states
_LOGGER.debug('Restore cache: %s', data.last_states)
assert len(data.last_states) == len(states), \
"Duplicate entity_id? {}".format(states)
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(
data_to_write, cls=store._encoder))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data['system_health']['info'][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, 'homeassistant.components.{}'.format(module.DOMAIN), None,
module.mock_manifest())
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(
loader.DATA_INTEGRATIONS, {}
)[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split('.')
integration_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if platform_name not in integration_cache:
mock_integration(hass, MockModule(platform_name))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache["{}.{}".format(platform_name, domain)] = module
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
| {
"content_hash": "adc9913fba881823e3b7081b7b56eadd",
"timestamp": "",
"source": "github",
"line_count": 971,
"max_line_length": 79,
"avg_line_length": 31.579814624098866,
"alnum_prop": 0.6235650926167493,
"repo_name": "jabesq/home-assistant",
"id": "cb0e6c69cefeacbd565dbd9af7dda0980e01b157",
"size": "30664",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
} |
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReconnectSessionRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'ReconnectSessionRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""ReconnectSessionRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
self.data = data
@property
def data(self):
"""Gets the data of this ReconnectSessionRequestBody. # noqa: E501
:return: The data of this ReconnectSessionRequestBody. # noqa: E501
:rtype: ReconnectSessionRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ReconnectSessionRequestBody.
:param data: The data of this ReconnectSessionRequestBody. # noqa: E501
:type: ReconnectSessionRequestBodyData
"""
if data is None:
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReconnectSessionRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReconnectSessionRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "1d7139b48a6194859c4d7573948e550a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 90,
"avg_line_length": 29.926605504587155,
"alnum_prop": 0.5616186388718577,
"repo_name": "ltowarek/budget-supervisor",
"id": "15237fed3f81feb552370ee5267f1a65d02a0474",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/saltedge/swagger_client/models/reconnect_session_request_body.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7960"
},
{
"name": "JavaScript",
"bytes": "79489"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import datetime
import csv
import json
SLEEP_TIME=0.1 # TODO: Optimization of SLEEP_TIME
BIN_DIR = os.path.dirname(os.path.abspath(__file__))
# TOKENS_DIR = os.path.normpath(os.path.join(BIN_DIR,'../tokens/'))
DEFAULT_CONFIG_DIR = os.path.normpath(os.path.join(BIN_DIR,'../config/'))
class Wrapper:
def __init__(self, service):
self.last_request_time = datetime.datetime.now()
self.service = service
self.access_count = 0
def setup(self, config_path, tokens_path):
self.read_tokens(tokens_path)
self.read_config(config_path)
# Read lib/tokens/xxx_tokens.tsv
def read_tokens(self, path):
if path is None:
sys.stderr.write('Using default config...')
self.tokens_filename = './default_%s_tokens.tsv' % self.service
self.tokens_path = os.path.normpath(os.path.join(TOKENS_DIR, self.tokens_filename))
else:
self.tokens_path = path
try:
self.tokens_reader = csv.reader(file(self.tokens_path), delimiter='\t')
except:
sys.stderr.write("Unexpected error while reading %s:\n\t%s\n\t%s" % (self.tokens_path , sys.exc_info()[0], sys.exc_info()[1]))
self.tokens = []
# Read lib/config/xxx_config.json
def read_config(self, path):
if path is None:
sys.stderr.write('Using default config...')
self.config_filename = './default_%s_config.json' % self.service
self.config_path = os.path.normpath(os.path.join(DEFAULT_CONFIG_DIR, self.config_filename))
else:
self.config_path = path
try:
self.config_reader = open(self.config_path)
self.config = json.load(self.config_reader)
except:
sys.stderr.write("Unexpected error while reading %s:\n\t%s\n\t%s" % (self.config_path , sys.exc_info()[0], sys.exc_info()[1]))
self.config = {'interval': 1000, 'slow':1}
sys.stderr.write('Using default config...')
# Calculate actual interval and roop over tokens to wait and throw request
def access_wrapper(self, option=None):
# Calculate actual time interval based on config.
interval = float(self.config['slow']) * float(self.config['interval']) / float(len(self.tokens))
# Wait until request allowed
now = datetime.datetime.now()
print 'Waiting %s[ms] ...' % interval
while now <= self.last_request_time + datetime.timedelta(milliseconds=int(interval)):
time.sleep(SLEEP_TIME)
now = datetime.datetime.now()
index = self.access_count % len(self.tokens)
token = self.tokens[index]
result = self.single_access(token, option)
return result
# Simple wrapper for single request for each API request that returns a row response
def single_access(self, token, option=None):
self.access_count += 1
return token
| {
"content_hash": "85d5870f6964e62436ff9148be6f567d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 138,
"avg_line_length": 38.688311688311686,
"alnum_prop": 0.6196710305471634,
"repo_name": "AkihikoITOH/capybara",
"id": "6984fd82727bd5ee9c10a1ed3f89359efe2303e7",
"size": "3330",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "capybara/abst_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "44245"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "Groff",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "530"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "4240093"
},
{
"name": "Shell",
"bytes": "3855"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import csv
import os
import re
from datetime import datetime, timedelta
from io import StringIO
from unittest import mock
import boto3
import pytest
from zoneinfo import ZoneInfo
from sqlalchemy import func
from ichnaea.data.public import (
read_stations_from_csv,
write_stations_to_csv,
InvalidCSV,
)
from ichnaea.data.tasks import cell_export_full, cell_export_diff
from ichnaea.models import Radio, CellArea, CellShard, RegionStat
from ichnaea.taskapp.config import configure_data
from ichnaea.tests.factories import CellShardFactory
from ichnaea import util
UTC = ZoneInfo("UTC")
CELL_FIELDS = [
"radio",
"mcc",
"mnc",
"lac",
"cid",
"psc",
"lon",
"lat",
"range",
"samples",
"changeable",
"created",
"updated",
"averageSignal",
]
class FakeTask(object):
def __init__(self, app):
self.app = app
class TestExport(object):
def test_local_export(self, celery, session):
now = util.utcnow()
today = now.date()
long_ago = now - timedelta(days=367)
cell_fixture_fields = ("radio", "cid", "lat", "lon", "mnc", "mcc", "lac")
base_cell = CellShardFactory.build(radio=Radio.wcdma)
cell_key = {
"radio": Radio.wcdma,
"mcc": base_cell.mcc,
"mnc": base_cell.mnc,
"lac": base_cell.lac,
}
cells = set()
for cid in range(190, 200):
cell = dict(cid=cid, lat=base_cell.lat, lon=base_cell.lon, **cell_key)
CellShardFactory(**cell)
cell["lat"] = "%.7f" % cell["lat"]
cell["lon"] = "%.7f" % cell["lon"]
cell["radio"] = "UMTS"
cell_strings = [(field, str(value)) for (field, value) in cell.items()]
cell_tuple = tuple(sorted(cell_strings))
cells.add(cell_tuple)
# add one incomplete / unprocessed cell
CellShardFactory(cid=210, lat=None, lon=None, **cell_key)
# add one really old cell
CellShardFactory(
cid=220,
created=long_ago,
modified=long_ago,
last_seen=long_ago.date(),
**cell_key,
)
session.commit()
with util.selfdestruct_tempdir() as temp_dir:
path = os.path.join(temp_dir, "export.csv.gz")
write_stations_to_csv(session, path, today)
with util.gzip_open(path, "r") as gzip_wrapper:
with gzip_wrapper as gzip_file:
reader = csv.DictReader(gzip_file, CELL_FIELDS)
header = next(reader)
assert "area" in header.values()
exported_cells = set()
for exported_cell in reader:
exported_cell_filtered = [
(field, value)
for (field, value) in exported_cell.items()
if field in cell_fixture_fields
]
exported_cell = tuple(sorted(exported_cell_filtered))
exported_cells.add(exported_cell)
assert cells == exported_cells
def test_export_diff(self, celery, session):
CellShardFactory.create_batch(10, radio=Radio.gsm)
session.commit()
pattern = re.compile(r"MLS-diff-cell-export-\d+-\d+-\d+T\d+0000\.csv\.gz")
mock_conn = mock.MagicMock()
mock_bucket = mock.MagicMock(name="bucket")
mock_obj = mock.MagicMock()
mock_conn.return_value.Bucket.return_value = mock_bucket
mock_bucket.Object.return_value = mock_obj
with mock.patch.object(boto3, "resource", mock_conn):
cell_export_diff(_bucket="bucket")
s3_key = mock_bucket.Object.call_args[0][0]
assert pattern.search(s3_key)
tmp_file = mock_obj.upload_file.call_args[0][0]
assert pattern.search(tmp_file)
def test_export_full(self, celery, session):
now = util.utcnow()
long_ago = now - timedelta(days=367)
CellShardFactory.create_batch(10, radio=Radio.gsm)
CellShardFactory(
radio=Radio.gsm,
created=long_ago,
modified=long_ago,
last_seen=long_ago.date(),
)
session.commit()
pattern = re.compile(r"MLS-full-cell-export-\d+-\d+-\d+T000000\.csv\.gz")
mock_conn = mock.MagicMock()
mock_bucket = mock.MagicMock(name="bucket")
mock_obj = mock.MagicMock()
mock_conn.return_value.Bucket.return_value = mock_bucket
mock_bucket.Object.return_value = mock_obj
with mock.patch.object(boto3, "resource", mock_conn):
cell_export_full(_bucket="bucket")
s3_key = mock_bucket.Object.call_args[0][0]
assert pattern.search(s3_key)
tmp_file = mock_obj.upload_file.call_args[0][0]
assert pattern.search(tmp_file)
@pytest.fixture
def cellarea_queue(redis_client, celery):
"""Return the DataQueue for updating CellAreas by ID."""
return configure_data(redis_client)["update_cellarea"]
class TestImport:
def test_unexpected_csv(self, session, redis_client, cellarea_queue):
"""An unexpected CSV input exits early."""
csv = StringIO(
"""\
region,name
US,United States
UK,United Kingdom
"""
)
with pytest.raises(InvalidCSV):
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
def test_new_stations(self, session, redis_client, cellarea_queue):
"""New stations are imported, creating cell areas and region stats."""
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
GSM,208,10,30014,20669,,2.5112670,46.5992450,0,78,1,1566307030,1570119413,
LTE,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220588,1570120328,
"""
)
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
# Check the details of the WCDMA station
wcdma = session.query(CellShard.shard_model(Radio.wcdma)).one()
assert wcdma.mcc == 202
assert wcdma.mnc == 1
assert wcdma.lac == 2120
assert wcdma.cid == 12842
assert wcdma.lat == 38.8574351
assert wcdma.lon == 23.4123167
assert wcdma.max_lat == wcdma.lat
assert wcdma.min_lat == wcdma.lat
assert wcdma.max_lon == wcdma.lon
assert wcdma.min_lon == wcdma.lon
assert wcdma.radius == 0
assert wcdma.samples == 6
assert wcdma.created == datetime(2019, 9, 11, 16, 49, 24, tzinfo=UTC)
assert wcdma.modified == datetime(2019, 10, 3, 16, 31, 56, tzinfo=UTC)
assert wcdma.region == "GR"
# Check the counts of the other station types
gsm_model = CellShard.shard_model(Radio.gsm)
assert session.query(func.count(gsm_model.cellid)).scalar() == 1
lte_model = CellShard.shard_model(Radio.lte)
assert session.query(func.count(lte_model.cellid)).scalar() == 1
# New stations trigger the creation of new CellAreas
cell_areas = session.query(CellArea).order_by(CellArea.areaid).all()
area1, area2, area3 = cell_areas
assert area1.areaid == (Radio.gsm, 208, 10, 30014)
assert area2.areaid == (Radio.wcdma, 202, 1, 2120)
assert area3.areaid == (Radio.lte, 202, 1, 2120)
# New CellAreas trigger the creation of RegionStats
stats = session.query(RegionStat).order_by("region").all()
assert len(stats) == 2
actual = [
(stat.region, stat.gsm, stat.wcdma, stat.lte, stat.blue, stat.wifi)
for stat in stats
]
expected = [("FR", 1, 0, 0, 0, 0), ("GR", 0, 1, 1, 0, 0)]
assert actual == expected
def test_modified_station(self, session, redis_client, cellarea_queue):
"""A modified station updates existing records."""
station_data = {
"radio": Radio.wcdma,
"mcc": 202,
"mnc": 1,
"lac": 2120,
"cid": 12842,
"lat": 38.85,
"lon": 23.41,
"min_lat": 38.7,
"max_lat": 38.9,
"min_lon": 23.4,
"max_lon": 23.5,
"radius": 1,
"samples": 1,
"created": datetime(2019, 1, 1, tzinfo=UTC),
"modified": datetime(2019, 1, 1, tzinfo=UTC),
}
station = CellShard.create(_raise_invalid=True, **station_data)
session.add(station)
session.flush()
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
"""
)
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
# Check the details of the updated station
wcdma = session.query(CellShard.shard_model(Radio.wcdma)).one()
# New position, other details from import
assert wcdma.lat == 38.8574351
assert wcdma.lon == 23.4123167
assert wcdma.radius == 0
assert wcdma.samples == 6
assert wcdma.created == datetime(2019, 9, 11, 16, 49, 24, tzinfo=UTC)
assert wcdma.modified == datetime(2019, 10, 3, 16, 31, 56, tzinfo=UTC)
# Other details unchanged
assert wcdma.max_lat == station_data["max_lat"]
assert wcdma.min_lat == station_data["min_lat"]
assert wcdma.max_lon == station_data["max_lon"]
assert wcdma.min_lon == station_data["min_lon"]
assert wcdma.region == "GR"
# A Modified station triggers the creation of a new CellArea
cell_area = session.query(CellArea).order_by(CellArea.areaid).one()
assert cell_area.areaid == (Radio.wcdma, 202, 1, 2120)
# The new CellAreas triggers the creation of a RegionStat
stat = session.query(RegionStat).order_by("region").one()
assert stat.region == "GR"
assert stat.wcdma == 1
def test_outdated_station(self, session, redis_client, cellarea_queue):
"""An older statuon record does not update existing station records."""
station_data = {
"radio": Radio.wcdma,
"mcc": 202,
"mnc": 1,
"lac": 2120,
"cid": 12842,
"lat": 38.85,
"lon": 23.41,
"radius": 1,
"samples": 1,
"created": datetime(2019, 1, 1, tzinfo=UTC),
"modified": datetime(2019, 10, 7, tzinfo=UTC),
}
station = CellShard.create(_raise_invalid=True, **station_data)
session.add(station)
session.flush()
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
"""
)
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
# The existing station is unmodified
wcdma = session.query(CellShard.shard_model(Radio.wcdma)).one()
assert wcdma.lat == 38.85
assert wcdma.lon == 23.41
assert wcdma.created == datetime(2019, 1, 1, tzinfo=UTC)
assert wcdma.modified == datetime(2019, 10, 7, tzinfo=UTC)
# No CellAreas or RegionStats are generated
assert session.query(func.count(CellArea.areaid)).scalar() == 0
assert session.query(func.count(RegionStat.region)).scalar() == 0
def test_unexpected_radio_halts(self, session, redis_client, cellarea_queue):
"""
A row with an unexpected radio type halts processing of the CSV.
The public CSV export is limited to a few types of radios, so an unexpected
radio type suggests file corruption or other shenanigans.
"""
# In row 3, 'WCDMA' is not a valid radio string
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
WCDMA,203,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
GSM,208,10,30014,20669,,2.5112670,46.5992450,0,78,1,1566307030,1570119413,
"""
)
with pytest.raises(InvalidCSV):
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
def test_empty_radio_skipped(self, session, redis_client, cellarea_queue):
"""
A empty string for the radio type causes the row to be skipped.
The public CSV export encodes an unexpected radio type from the database
as an empty string. We can't determine what radio type was expected.
"""
# In row 3, the radio is an empty string
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
,203,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
GSM,208,10,30014,20669,,2.5112670,46.5992450,0,78,1,1566307030,1570119413,
"""
)
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
# The empty radio row is skipped, but the following row is processed.
wcdma = session.query(CellShard.shard_model(Radio.wcdma)).one()
assert wcdma.lat == 38.8574351
assert wcdma.lon == 23.4123167
gsm_model = CellShard.shard_model(Radio.gsm)
assert session.query(func.count(gsm_model.cellid)).scalar() == 1
assert session.query(func.count(CellArea.areaid)).scalar() == 2
assert session.query(func.count(RegionStat.region)).scalar() == 2
def test_invalid_row_skipped(self, session, redis_client, cellarea_queue):
"""A row that fails validation is skipped."""
# In GSM row, the longitude 202.5 is greater than max of 180
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
GSM,208,10,30014,20669,,202.5,46.5992450,0,78,1,1566307030,1570119413,
LTE,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220588,1570120328,
"""
)
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
# The invalid GSM row is skipped
gsm_model = CellShard.shard_model(Radio.gsm)
assert session.query(func.count(gsm_model.cellid)).scalar() == 0
# The valid WCDMA and LTE rows are processed, and in the same region
wcdma_model = CellShard.shard_model(Radio.wcdma)
lte_model = CellShard.shard_model(Radio.lte)
assert session.query(func.count(wcdma_model.cellid)).scalar() == 1
assert session.query(func.count(lte_model.cellid)).scalar() == 1
assert session.query(func.count(CellArea.areaid)).scalar() == 2
assert session.query(func.count(RegionStat.region)).scalar() == 1
def test_bad_data_skipped(self, session, redis_client, cellarea_queue):
"""A row that has invalid data (like a string for a number) is skipped."""
# In GSM row, the mcc field should be a number, not a string
csv = StringIO(
"""\
radio,mcc,net,area,cell,unit,lon,lat,range,samples,changeable,created,updated,averageSignal
UMTS,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220564,1570120316,
GSM,"MCC",10,30014,20669,,2.5112670,46.5992450,0,78,1,1566307030,1570119413,
LTE,202,1,2120,12842,,23.4123167,38.8574351,0,6,1,1568220588,1570120328,
"""
)
read_stations_from_csv(session, csv, redis_client, cellarea_queue)
# The invalid GSM row is skipped
gsm_model = CellShard.shard_model(Radio.gsm)
assert session.query(func.count(gsm_model.cellid)).scalar() == 0
# The valid WCDMA and LTE rows are processed, and in the same region
wcdma_model = CellShard.shard_model(Radio.wcdma)
lte_model = CellShard.shard_model(Radio.lte)
assert session.query(func.count(wcdma_model.cellid)).scalar() == 1
assert session.query(func.count(lte_model.cellid)).scalar() == 1
assert session.query(func.count(CellArea.areaid)).scalar() == 2
assert session.query(func.count(RegionStat.region)).scalar() == 1
| {
"content_hash": "78bc9751911e969abee93fca3c20b9a1",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 91,
"avg_line_length": 39.214797136038186,
"alnum_prop": 0.6146308806524253,
"repo_name": "mozilla/ichnaea",
"id": "e13947f7a7d9301ec0e7163395f01152f5f683d0",
"size": "16431",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ichnaea/data/tests/test_public.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "34767"
},
{
"name": "Cython",
"bytes": "16678"
},
{
"name": "Dockerfile",
"bytes": "2819"
},
{
"name": "HTML",
"bytes": "32679"
},
{
"name": "JavaScript",
"bytes": "139102"
},
{
"name": "Makefile",
"bytes": "11673"
},
{
"name": "Mako",
"bytes": "432"
},
{
"name": "Python",
"bytes": "1007139"
},
{
"name": "Shell",
"bytes": "8899"
}
],
"symlink_target": ""
} |
from seriesbutler import datasources
from seriesbutler.datasources import TheTvDb
from seriesbutler.models import DataSourceException
from unittest import mock
import datetime
from vcr import VCR
import responses
import requests
import pytest
vcr = VCR(cassette_library_dir='build/cassettes/datasources/')
@vcr.use_cassette()
def test_find_by_name_empty():
datasource = TheTvDb()
result = datasource.find_by_name('')
assert 0 == len(result)
@vcr.use_cassette()
def test_find_by_name_None():
datasource = TheTvDb()
result = datasource.find_by_name(None)
assert 0 == len(result)
@vcr.use_cassette()
def test_find_by_name_single_match():
datasource = TheTvDb()
result = datasource.find_by_name('Brooklyn Nine-Nine')
assert 1 == len(result)
assert 'Brooklyn Nine-Nine' == result[0][0]
assert 'tt2467372' == result[0][1]
@vcr.use_cassette()
def test_find_by_name_multi_match():
datasource = TheTvDb()
result = datasource.find_by_name('Family')
assert 5 < len(result)
assert 'Family' == result[0][0]
assert 'tt0073992' == result[0][1]
@vcr.use_cassette()
def test_episodes_for_series_without_tvdbid():
datasource = TheTvDb()
series = {'name': 'Person of interest', 'imdb': 'tt1839578',
'start_from': {'season': 1, 'episode': 1}}
result = datasource.episodes_for_series(series)
# There are 94 episodes listed - but only 90 have a date when they
# were aired for the first time
assert series['tvdb'] == '248742'
@responses.activate
def test_find_by_name_server_error():
# Prepare a fake server error response
responses.add(responses.GET, 'http://thetvdb.com/api/GetSeries.php',
status=404)
datasource = TheTvDb()
# Ensure an exception is thrown
with pytest.raises(DataSourceException) as exceptionInfo:
result = datasource.find_by_name('Breaking')
# Assert exception message
assert str(exceptionInfo.value) == 'Failed search by name (server error)'
def test_episodes_for_series_with_None():
datasource = TheTvDb()
# Ensure an exception is thrown
with pytest.raises(DataSourceException) as exceptionInfo:
result = datasource.episodes_for_series(None)
# Assert exception message
assert str(exceptionInfo.value) == 'No Series configuration provided'
@vcr.use_cassette()
def test_episodes_for_series_happy_path():
datasource = TheTvDb()
series = {'name': 'Breaking Bad', 'imdb': 'tt0903747',
'tvdb': '81189', 'start_from': {'season': 1, 'episode': 2}}
result = datasource.episodes_for_series(series)
assert len(result) == 62
@vcr.use_cassette('../../../tests/data/no_aired_date.yml')
def test_episodes_for_series_no_aired_date():
datasource = TheTvDb()
series = {'name': 'Person of interest', 'imdb': 'tt1839578',
'tvdb': '248742', 'start_from': {'season': 1, 'episode': 1}}
result = datasource.episodes_for_series(series)
# There are 94 episodes listed - but only 90 have a date when they
# were aired for the first time
assert len(result) == 90
@vcr.use_cassette('../../../tests/data/not_aired_yet.yml')
def test_episodes_for_series_not_aired_yet(monkeyplus):
datetime_patcher = mock.patch.object(
datasources, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.now.return_value = datetime.datetime(2015, 4, 6)
datasource = TheTvDb()
series = {'name': 'Person of interest', 'imdb': 'tt1839578',
'tvdb': '248742', 'start_from': {'season': 1, 'episode': 1}}
result = datasource.episodes_for_series(series)
datetime_patcher.stop()
# There are 94 episodes listed - but only 86 of these episodes were
# aired before 2015-04-06
assert len(result) == 86
@vcr.use_cassette('../../../tests/data/skip_specials.yml')
def test_episodes_for_series_skip_specials():
datasource = TheTvDb()
series = {'name': 'Last Week Tonight with John Oliver', 'tvdb': '278518',
'imdb': 'tt3530232', 'start_from': {'season': 1, 'episode': 1}}
result = datasource.episodes_for_series(series)
assert len(result) > 45
@vcr.use_cassette()
def test_episodes_for_series_without_tvdbid_invalid_imdb_id():
datasource = TheTvDb()
series = {'name': '?', 'imdb': 'tt3530002',
'start_from': {'season': 1, 'episode': 1}}
# Ensure an exception is thrown
with pytest.raises(DataSourceException) as exceptionInfo:
result = datasource.episodes_for_series(series)
# Assert exception message
assert str(exceptionInfo.value) == ('Failed to recieve tvdb id: '
'No unique match!')
@responses.activate
def test_episodes_for_series_server_error():
# Prepare a fake server error response
responses.add(responses.GET, 'http://thetvdb.com/api/'
'GetSeriesByRemoteID.php', status=404)
datasource = TheTvDb()
series = {'name': 'Last Week Tonight with John Oliver',
'imdb': 'tt3530232', 'start_from': {'season': 1, 'episode': 1}}
# Ensure an exception is thrown
with pytest.raises(DataSourceException) as exceptionInfo:
result = datasource.episodes_for_series(series)
# Assert exception message
assert str(exceptionInfo.value) == 'Failed to fetch tvdb id (server error)'
| {
"content_hash": "5f92042f7057321e154006dc1b5e8ba5",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 32.053254437869825,
"alnum_prop": 0.66069780321211,
"repo_name": "raphiz/seriesbutler",
"id": "5fcdfe01991c6f4e2c7245535952ac144bbe7101",
"size": "5417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/datasources_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51298"
}
],
"symlink_target": ""
} |
"""
Node.js Blueprint
=================
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.node
settings:
node:
# version: latest # Install latest node version
packages: # List of npm packages to install (Optional)
# - coffee-script
# - yuglify
# - less
"""
import io
import json
import os
from fabric.contrib import files
from fabric.context_managers import cd
from fabric.decorators import task
from refabric.api import info
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from refabric.operations import run
from .application.project import git_repository_path, \
sudo_project, user_name
from .util import maybe_managed
from . import debian
__all__ = ['setup', 'configure']
blueprint = blueprints.get(__name__)
@task
def setup():
"""
Setup Nodejs
"""
install()
configure()
@task
def configure():
"""
Install npm packages and, if bower is in the packages,
install bower dependencies.
"""
install_packages()
install_dependencies()
def get_version():
return blueprint.get('version')
def install(for_user=None):
version = get_version()
if version == 'latest':
info('Installing latest node from tarball', )
with sudo():
install_node_build_deps()
if for_user is not None:
cm = sudo(user=for_user)
else:
cm = None
with maybe_managed(cm):
return install_latest()
else:
info('Installing node from apt')
return install_deb()
def install_node_build_deps():
info('Installing build tools')
debian.apt_get_update()
debian.apt_get('install', 'build-essential node-rimraf')
def install_latest():
info('Installing latest node and NPM for {user}', user=run('whoami').stdout)
common = [
'set -x',
'set -o verbose',
'eval PREFIX=~/.local',
'eval PROFILE=~/.bash_profile',
'eval SRC=~/node-latest-install',
'source $PROFILE',
]
setup_env = [
'echo \'export PATH=$HOME/.local/bin:$PATH\' >> $PROFILE',
'echo \'export npm_config_userconfig=$HOME/.config/npmrc\' >> $PROFILE',
'source $PROFILE',
'mkdir $PREFIX || true',
'mkdir $SRC || true'
]
run(' && '.join(common + setup_env), shell=True)
install_node_and_npm = [
'cd $SRC',
('curl -z node-latest.tar.gz'
' -O http://nodejs.org/dist/node-latest.tar.gz'),
'tar xz --strip-components=1 --file node-latest.tar.gz',
'./configure --prefix=$PREFIX',
'make install',
'curl -L https://www.npmjs.org/install.sh | sh'
]
run(' && '.join(common + install_node_and_npm), shell=True)
def install_deb():
with sudo():
lbs_release = debian.lbs_release()
# 12.04 ships with really old nodejs, TODO: 14.04?
if lbs_release in ['10.04', '12.04']:
info('Adding ppa...')
debian.add_apt_ppa('chris-lea/node.js', src=True)
info('Installing Node.js')
debian.apt_get('install', 'nodejs')
if lbs_release in ['14.04', '16.04']:
info('Installing NPM')
debian.apt_get('install', 'npm')
debian.ln('/usr/bin/nodejs', '/usr/bin/node')
def install_packages():
packages = blueprint.get('packages', [])
if packages:
info('Installing Packages')
npm('install', *packages)
def npm(command, *options):
info('Running npm {}', command)
with sudo():
run('npm {} -g {}'.format(command, ' '.join(options)))
def install_dependencies(path=None, production=True, changed=True):
"""
Install dependencies from "package.json" at path.
:param path: Package path, current directory if None. [default: None]
:param production:
Boolean flag to toggle `--production` parameter for npm
:param changed:
Boolean flag or tuple of two commit sha to check if package.json and
bower.json were changed.
:return:
"""
dependency_path_root = path or git_repository_path()
has_file = lambda x: files.exists(os.path.join(dependency_path_root, x))
has_package = has_file('package.json')
has_bower = has_file('bower.json')
with sudo_project(), cd(dependency_path_root):
npm_changed = bower_changed = changed
if isinstance(changed, tuple): # i.e. commits: (from_sha, to_sha)
changed = '{}..{}'.format(*changed)
from blues import git
if has_package:
npm_changed = git.diff_stat(
git_repository_path(), changed, 'package.json')[0]
if has_bower:
bower_changed = git.diff_stat(
git_repository_path(), changed, 'bower.json')[0]
if has_package and npm_changed:
run('npm install' + (' --production' if production else ''))
if has_bower and bower_changed:
run('test -f bower.json && '
'bower install --config.interactive=false')
def create_symlinks(npm_path='../node_modules',
bower_path='../bower_components',
bowerrc_path='.bowerrc',
clear=False):
with cd(git_repository_path()), sudo_project():
# get bower components dir from config file
bower_destination = 'bower_components'
fd = io.BytesIO()
files_ = files.get(bowerrc_path, fd)
if files_.succeeded:
bower_destination = json.loads(fd.getvalue()).get(
'directory', bower_destination)
links = [
(npm_path, './'),
(bower_path, bower_destination),
]
for src, dst in links:
if src:
src = os.path.abspath(os.path.join(git_repository_path(), src))
dst = os.path.abspath(os.path.join(git_repository_path(), dst))
if clear:
debian.rm(src, recursive=True, force=True)
debian.mkdir(src, recursive=True, owner=user_name())
debian.ln(src, dst, symbolic=True)
| {
"content_hash": "01462062bbe36fd1c1ce3541c017f2a7",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 80,
"avg_line_length": 26.45531914893617,
"alnum_prop": 0.5731059996783014,
"repo_name": "5monkeys/blues",
"id": "cf8772b99fb190b5346962e1f2e573d29479efed",
"size": "6217",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "blues/node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2300"
},
{
"name": "Makefile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "234933"
},
{
"name": "Shell",
"bytes": "2706"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from collections import defaultdict
from itertools import chain
from types import MappingProxyType
from typing import Any
from typing import Callable
from typing import Sequence
from dynaconf import validator_conditions
from dynaconf.utils import ensure_a_list
from dynaconf.utils.functional import empty
EQUALITY_ATTRS = (
"names",
"must_exist",
"when",
"condition",
"operations",
"envs",
)
class ValidationError(Exception):
"""Raised when a validation fails"""
def __init__(self, message: str, *args, **kwargs):
self.details = kwargs.pop("details", [])
super().__init__(message, *args, **kwargs)
self.message = message
class Validator:
"""Validators are conditions attached to settings variables names
or patterns::
Validator('MESSAGE', must_exist=True, eq='Hello World')
The above ensure MESSAGE is available in default env and
is equal to 'Hello World'
`names` are a one (or more) names or patterns::
Validator('NAME')
Validator('NAME', 'OTHER_NAME', 'EVEN_OTHER')
Validator(r'^NAME', r'OTHER./*')
The `operations` are::
eq: value == other
ne: value != other
gt: value > other
lt: value < other
gte: value >= other
lte: value <= other
is_type_of: isinstance(value, type)
is_in: value in sequence
is_not_in: value not in sequence
identity: value is other
cont: contain value in
len_eq: len(value) == other
len_ne: len(value) != other
len_min: len(value) > other
len_max: len(value) < other
`env` is which env to be checked, can be a list or
default is used.
`when` holds a validator and its return decides if validator runs or not::
Validator('NAME', must_exist=True, when=Validator('OTHER', eq=2))
# NAME is required only if OTHER eq to 2
# When the very first thing to be performed when passed.
# if no env is passed to `when` it is inherited
`must_exist` is alias to `required` requirement. (executed after when)::
settings.get(value, empty) returns non empty
condition is a callable to be executed and return boolean::
Validator('NAME', condition=lambda x: x == 1)
# it is executed before operations.
"""
default_messages = MappingProxyType(
{
"must_exist_true": "{name} is required in env {env}",
"must_exist_false": "{name} cannot exists in env {env}",
"condition": "{name} invalid for {function}({value}) in env {env}",
"operations": (
"{name} must {operation} {op_value} "
"but it is {value} in env {env}"
),
"combined": "combined validators failed {errors}",
}
)
def __init__(
self,
*names: str,
must_exist: bool | None = None,
required: bool | None = None, # alias for `must_exist`
condition: Callable[[Any], bool] | None = None,
when: Validator | None = None,
env: str | Sequence[str] | None = None,
messages: dict[str, str] | None = None,
cast: Callable[[Any], Any] | None = None,
default: Any | Callable[[Any, Validator], Any] | None = empty,
description: str | None = None,
apply_default_on_none: bool | None = False,
**operations: Any,
) -> None:
# Copy immutable MappingProxyType as a mutable dict
self.messages = dict(self.default_messages)
if messages:
self.messages.update(messages)
if when is not None and not isinstance(when, Validator):
raise TypeError("when must be Validator instance")
if condition is not None and not callable(condition):
raise TypeError("condition must be callable")
self.names = names
self.must_exist = must_exist if must_exist is not None else required
self.condition = condition
self.when = when
self.cast = cast or (lambda value: value)
self.operations = operations
self.default = default
self.description = description
self.envs: Sequence[str] | None = None
self.apply_default_on_none = apply_default_on_none
if isinstance(env, str):
self.envs = [env]
elif isinstance(env, (list, tuple)):
self.envs = env
def __or__(self, other: Validator) -> CombinedValidator:
return OrValidator(self, other, description=self.description)
def __and__(self, other: Validator) -> CombinedValidator:
return AndValidator(self, other, description=self.description)
def __eq__(self, other: object) -> bool:
if self is other:
return True
if type(self).__name__ != type(other).__name__:
return False
identical_attrs = (
getattr(self, attr) == getattr(other, attr)
for attr in EQUALITY_ATTRS
)
if all(identical_attrs):
return True
return False
def validate(
self,
settings: Any,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
only_current_env: bool = False,
) -> None:
"""Raise ValidationError if invalid"""
# If only or exclude are not set, this value always passes startswith
only = ensure_a_list(only or [""])
if only and not isinstance(only[0], str):
raise ValueError("'only' must be a string or list of strings.")
exclude = ensure_a_list(exclude)
if exclude and not isinstance(exclude[0], str):
raise ValueError("'exclude' must be a string or list of strings.")
if self.envs is None:
self.envs = [settings.current_env]
if self.when is not None:
try:
# inherit env if not defined
if self.when.envs is None:
self.when.envs = self.envs
self.when.validate(settings, only=only, exclude=exclude)
except ValidationError:
# if when is invalid, return canceling validation flow
return
if only_current_env:
if settings.current_env.upper() in map(
lambda s: s.upper(), self.envs
):
self._validate_items(
settings, settings.current_env, only=only, exclude=exclude
)
return
# If only using current_env, skip using_env decoration (reload)
if (
len(self.envs) == 1
and self.envs[0].upper() == settings.current_env.upper()
):
self._validate_items(
settings, settings.current_env, only=only, exclude=exclude
)
return
for env in self.envs:
self._validate_items(
settings.from_env(env), only=only, exclude=exclude
)
def _validate_items(
self,
settings: Any,
env: str | None = None,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
) -> None:
env = env or settings.current_env
for name in self.names:
# Skip if only is set and name isn't in the only list
if only and not any(name.startswith(sub) for sub in only):
continue
# Skip if exclude is set and name is in the exclude list
if exclude and any(name.startswith(sub) for sub in exclude):
continue
if self.default is not empty:
default_value = (
self.default(settings, self)
if callable(self.default)
else self.default
)
else:
default_value = empty
value = self.cast(
settings.setdefault(
name,
default_value,
apply_default_on_none=self.apply_default_on_none,
)
)
# is name required but not exists?
if self.must_exist is True and value is empty:
_message = self.messages["must_exist_true"].format(
name=name, env=env
)
raise ValidationError(_message, details=[(self, _message)])
if self.must_exist is False and value is not empty:
_message = self.messages["must_exist_false"].format(
name=name, env=env
)
raise ValidationError(_message, details=[(self, _message)])
if self.must_exist in (False, None) and value is empty:
continue
# is there a callable condition?
if self.condition is not None:
if not self.condition(value):
_message = self.messages["condition"].format(
name=name,
function=self.condition.__name__,
value=value,
env=env,
)
raise ValidationError(_message, details=[(self, _message)])
# operations
for op_name, op_value in self.operations.items():
op_function = getattr(validator_conditions, op_name)
if not op_function(value, op_value):
_message = self.messages["operations"].format(
name=name,
operation=op_function.__name__,
op_value=op_value,
value=value,
env=env,
)
raise ValidationError(_message, details=[(self, _message)])
class CombinedValidator(Validator):
def __init__(
self,
validator_a: Validator,
validator_b: Validator,
*args: Any,
**kwargs: Any,
) -> None:
"""Takes 2 validators and combines the validation"""
self.validators = (validator_a, validator_b)
super().__init__(*args, **kwargs)
for attr in EQUALITY_ATTRS:
if not getattr(self, attr, None):
value = tuple(
getattr(validator, attr) for validator in self.validators
)
setattr(self, attr, value)
def validate(
self,
settings: Any,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
only_current_env: bool = False,
) -> None: # pragma: no cover
raise NotImplementedError(
"subclasses OrValidator or AndValidator implements this method"
)
class OrValidator(CombinedValidator):
"""Evaluates on Validator() | Validator()"""
def validate(
self,
settings: Any,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
only_current_env: bool = False,
) -> None:
"""Ensure at least one of the validators are valid"""
errors = []
for validator in self.validators:
try:
validator.validate(
settings,
only=only,
exclude=exclude,
only_current_env=only_current_env,
)
except ValidationError as e:
errors.append(e)
continue
else:
return
_message = self.messages["combined"].format(
errors=" or ".join(
str(e).replace("combined validators failed ", "")
for e in errors
)
)
raise ValidationError(_message, details=[(self, _message)])
class AndValidator(CombinedValidator):
"""Evaluates on Validator() & Validator()"""
def validate(
self,
settings: Any,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
only_current_env: bool = False,
) -> None:
"""Ensure both the validators are valid"""
errors = []
for validator in self.validators:
try:
validator.validate(
settings,
only=only,
exclude=exclude,
only_current_env=only_current_env,
)
except ValidationError as e:
errors.append(e)
continue
if errors:
_message = self.messages["combined"].format(
errors=" and ".join(
str(e).replace("combined validators failed ", "")
for e in errors
)
)
raise ValidationError(_message, details=[(self, _message)])
class ValidatorList(list):
def __init__(
self,
settings: Any,
validators: Sequence[Validator] | None = None,
*args: Validator,
**kwargs: Any,
) -> None:
if isinstance(validators, (list, tuple)):
args = list(args) + list(validators) # type: ignore
self._only = kwargs.pop("validate_only", None)
self._exclude = kwargs.pop("validate_exclude", None)
super().__init__(args, **kwargs) # type: ignore
self.settings = settings
def register(self, *args: Validator, **kwargs: Validator):
validators: list[Validator] = list(
chain.from_iterable(kwargs.values()) # type: ignore
)
validators.extend(args)
for validator in validators:
if validator and validator not in self:
self.append(validator)
def descriptions(self, flat: bool = False) -> dict[str, str | list[str]]:
if flat:
descriptions: dict[str, str | list[str]] = {}
else:
descriptions = defaultdict(list)
for validator in self:
for name in validator.names:
if isinstance(name, tuple) and len(name) > 0:
name = name[0]
if flat:
descriptions.setdefault(name, validator.description)
else:
descriptions[name].append( # type: ignore
validator.description
)
return descriptions
def validate(
self,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
only_current_env: bool = False,
) -> None:
for validator in self:
validator.validate(
self.settings,
only=only,
exclude=exclude,
only_current_env=only_current_env,
)
def validate_all(
self,
only: str | Sequence | None = None,
exclude: str | Sequence | None = None,
only_current_env: bool = False,
) -> None:
errors = []
details = []
for validator in self:
try:
validator.validate(
self.settings,
only=only,
exclude=exclude,
only_current_env=only_current_env,
)
except ValidationError as e:
errors.append(e)
details.append((validator, str(e)))
continue
if errors:
raise ValidationError(
"; ".join(str(e) for e in errors), details=details
)
| {
"content_hash": "81c1cd938717c65d69e8dd1fc083c5df",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 79,
"avg_line_length": 32.922105263157896,
"alnum_prop": 0.5300549942447883,
"repo_name": "rochacbruno/dynaconf",
"id": "f3c581d460e38a1f6375df9fd0f0a749d73983ae",
"size": "15638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynaconf/validator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2867"
},
{
"name": "Makefile",
"bytes": "11505"
},
{
"name": "Python",
"bytes": "1438471"
},
{
"name": "Shell",
"bytes": "14740"
}
],
"symlink_target": ""
} |
"""
Author: Mohamed K. Eid (mohamedkeid@gmail.com)
Description: stylizes an image using a generative model trained on a particular style
Args:
--input: path to the input image you'd like to apply a style to
--style: name of style (found in 'lib/generators') to apply to the input
--out: path to where the stylized image will be created
--styles: lists trained models available
"""
import argparse
import os
import time
import tensorflow as tf
import generator
import helpers
# Loss term weights
CONTENT_WEIGHT = 1.
STYLE_WEIGHT = 3.
TV_WEIGHT = .1
# Default image paths
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
TRAINED_MODELS_PATH = DIR_PATH + '/../lib/generators/'
INPUT_PATH, STYLE = None, None
OUT_PATH = DIR_PATH + '/../output/out_%.0f.jpg' % time.time()
if not os.path.isdir(DIR_PATH + '/../output'):
os.makedirs(DIR_PATH + '/../output')
# Parse arguments and assign them to their respective global variables
def parse_args():
global INPUT_PATH, STYLE, OUT_PATH
# Create flags
parser = argparse.ArgumentParser()
parser.add_argument('--input', help="path to the input image you'd like to apply a style to")
parser.add_argument('--style', help="name of style (found in 'lib/generators') to apply to the input")
parser.add_argument('--out', default=OUT_PATH, help="path to where the stylized image will be created")
parser.add_argument('--styles', action="store_true", help="list available styles")
args = parser.parse_args()
# Assign image paths from the arg parsing
if args.input and args.style:
INPUT_PATH = os.path.abspath(args.input)
STYLE = args.style
OUT_PATH = args.out
else:
if args.styles:
list_styles()
exit(0)
else:
parser.print_usage()
exit(1)
# Lists trained models
def list_styles():
print("Available styles:")
files = os.listdir(TRAINED_MODELS_PATH)
for file in files:
if os.path.isdir(TRAINED_MODELS_PATH + file):
print(file)
parse_args()
with tf.Session() as sess:
# Check if there is a model trained on the given style
if not os.path.isdir(TRAINED_MODELS_PATH + STYLE):
print("No trained model with the style '%s' was found." % STYLE)
list_styles()
exit(1)
# Load and initialize the image to be stlylized
input_img, _ = helpers.load_img(INPUT_PATH)
input_img = tf.convert_to_tensor(input_img, dtype=tf.float32)
input_img = tf.expand_dims(input_img, axis=0)
# Initialize new generative net
with tf.variable_scope('generator'):
gen = generator.Generator()
gen.build(tf.convert_to_tensor(input_img))
sess.run(tf.global_variables_initializer())
# Restore previously trained model
ckpt_dir = TRAINED_MODELS_PATH + STYLE
saved_path = ckpt_dir + "/{}".format(STYLE)
saver = tf.train.Saver()
saver.restore(sess, saved_path)
# Generate stylized image
img = sess.run(gen.output)
# Save the generated image and close the tf session
helpers.render(img, path_out=OUT_PATH)
sess.close()
| {
"content_hash": "7f914faed74782ec9849278739b73c32",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 107,
"avg_line_length": 31.8989898989899,
"alnum_prop": 0.6567447751741609,
"repo_name": "mohamedkeid/Feed-Forward-Style-Transfer",
"id": "afb7c16e074450715fd2eec78b7496f0a22d6424",
"size": "3177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36700"
}
],
"symlink_target": ""
} |
from django.contrib.staticfiles.testing import LiveServerTestCase, StaticLiveServerTestCase
from django.core.urlresolvers import reverse
from django.conf import settings
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class BasicFirefox(StaticLiveServerTestCase):
"""
Define the unit tests and use Firefox.
To use another browser, subclass this and override newDriver().
"""
@staticmethod
def newDriver():
"""
Override this method in a subclass to use other browser.
"""
return webdriver.Firefox()
@classmethod
def setUpClass(cls):
cls.driver = cls.newDriver()
super().setUpClass()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def url(self, path):
"""
Return the full URL (http://server/path) for the absolute path.
TODO FIX: The default 'self.live_server_url' (localhost:8081) does not find static/bower_components,
as they are not part of any application. Directing traffic to Nginx.
"""
return '{}{}'.format(settings.LIVE_SERVER_URL, path)
def test_javascript_unit_tests(self):
self.driver.get(self.url(reverse('test')))
selector = 'h2#qunit-banner.qunit-fail, h2#qunit-banner.qunit-pass'
elem = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
self.assertEqual(elem.get_attribute('class'), 'qunit-pass')
class BasicChromium(BasicFirefox):
@staticmethod
def newDriver():
if os.getenv('TRAVIS') == 'true':
options = Options()
options.add_argument('--no-sandbox')
return webdriver.Chrome(chrome_options=options)
return webdriver.Chrome()
| {
"content_hash": "23eb83bb8a3afc52d772236cb36c35a7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 108,
"avg_line_length": 32.516129032258064,
"alnum_prop": 0.6755952380952381,
"repo_name": "futurice/vimma2",
"id": "ec52f481d1ec353d6f39052f2424e70ae724ac38",
"size": "2016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimma/test_live.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10649"
},
{
"name": "HTML",
"bytes": "93105"
},
{
"name": "JavaScript",
"bytes": "102208"
},
{
"name": "Python",
"bytes": "286022"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.