text stringlengths 4 1.02M | meta dict |
|---|---|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Cluster'
db.create_table(u'clm_cluster', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=20)),
('port', self.gf('django.db.models.fields.IntegerField')()),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('state', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('clm', ['Cluster'])
# Adding model 'User'
db.create_table(u'clm_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first', self.gf('django.db.models.fields.CharField')(max_length=63)),
('last', self.gf('django.db.models.fields.CharField')(max_length=63)),
('default_cluster', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clm.Cluster'], null=True,
on_delete=models.SET_NULL,
blank=True)),
('login', self.gf('django.db.models.fields.CharField')(unique=True, max_length=63)),
('password', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('act_key', self.gf('django.db.models.fields.CharField')(max_length=63, null=True, blank=True)),
('organization', self.gf('django.db.models.fields.CharField')(max_length=63)),
('is_active', self.gf('django.db.models.fields.IntegerField')()),
('is_superuser', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('activation_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_login_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('clm', ['User'])
# Adding model 'Key'
db.create_table(u'clm_key', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clm.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=45)),
('fingerprint', self.gf('django.db.models.fields.CharField')(max_length=47)),
('data', self.gf('django.db.models.fields.TextField')()),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('clm', ['Key'])
# Adding model 'Message'
db.create_table(u'clm_message', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clm.User'])),
('code', self.gf('django.db.models.fields.CharField')(max_length=128)),
('params', self.gf('django.db.models.fields.CharField')(max_length=2048)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('level', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('clm', ['Message'])
# Adding model 'News'
db.create_table(u'clm_news', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('topic', self.gf('django.db.models.fields.CharField')(max_length=255)),
('content', self.gf('django.db.models.fields.TextField')()),
('sticky', self.gf('django.db.models.fields.IntegerField')()),
('date', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('clm', ['News'])
# Adding model 'Group'
db.create_table(u'clm_group', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('leader', self.gf('django.db.models.fields.related.ForeignKey')(related_name='group_leader_set',
to=orm['clm.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=45)),
('desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('clm', ['Group'])
# Adding model 'UserGroup'
db.create_table(u'clm_usergroup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clm.User'])),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clm.Group'])),
('status', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('clm', ['UserGroup'])
# Adding unique constraint on 'UserGroup', fields ['user', 'group']
db.create_unique(u'clm_usergroup', ['user_id', 'group_id'])
def backwards(self, orm):
# Removing unique constraint on 'UserGroup', fields ['user', 'group']
db.delete_unique(u'clm_usergroup', ['user_id', 'group_id'])
# Deleting model 'Cluster'
db.delete_table(u'clm_cluster')
# Deleting model 'User'
db.delete_table(u'clm_user')
# Deleting model 'Key'
db.delete_table(u'clm_key')
# Deleting model 'Message'
db.delete_table(u'clm_message')
# Deleting model 'News'
db.delete_table(u'clm_news')
# Deleting model 'Group'
db.delete_table(u'clm_group')
# Deleting model 'UserGroup'
db.delete_table(u'clm_usergroup')
models = {
'clm.cluster': {
'Meta': {'object_name': 'Cluster'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.IntegerField', [], {})
},
'clm.group': {
'Meta': {'object_name': 'Group'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leader': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'group_leader_set'", 'to': "orm['clm.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'users': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['clm.User']", 'through': "orm['clm.UserGroup']", 'symmetrical': 'False'})
},
'clm.key': {
'Meta': {'object_name': 'Key'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'data': ('django.db.models.fields.TextField', [], {}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '47'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clm.User']"})
},
'clm.message': {
'Meta': {'object_name': 'Message'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'params': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clm.User']"})
},
'clm.news': {
'Meta': {'object_name': 'News'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sticky': ('django.db.models.fields.IntegerField', [], {}),
'topic': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'clm.user': {
'Meta': {'object_name': 'User'},
'act_key': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'activation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_cluster': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['clm.Cluster']", 'null': 'True', 'on_delete': 'models.SET_NULL',
'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'first': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.IntegerField', [], {}),
'is_superuser': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'last': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'last_login_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'login': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '63'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'clm.usergroup': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'UserGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clm.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clm.User']"})
}
}
complete_apps = ['clm']
| {
"content_hash": "1978d59180e43b90ac81c720a83aae11",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 120,
"avg_line_length": 58.3421052631579,
"alnum_prop": 0.5461434370771313,
"repo_name": "Dev-Cloud-Platform/Dev-Cloud",
"id": "70c931a69656084eb2508c001119441641bada7d",
"size": "11783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev_cloud/cc1/src/clm/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "2622"
},
{
"name": "CSS",
"bytes": "2378722"
},
{
"name": "HTML",
"bytes": "1436411"
},
{
"name": "JavaScript",
"bytes": "2969320"
},
{
"name": "PHP",
"bytes": "13292"
},
{
"name": "Python",
"bytes": "1841054"
},
{
"name": "Shell",
"bytes": "22489"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import ListableAPIResource
class Account(ListableAPIResource):
"""
A Financial Connections Account represents an account that exists outside of Stripe, to which you have been granted some degree of access.
"""
OBJECT_NAME = "financial_connections.account"
@classmethod
def _cls_disconnect(
cls,
account,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/financial_connections/accounts/{account}/disconnect".format(
account=util.sanitize_id(account)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_disconnect")
def disconnect(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/financial_connections/accounts/{account}/disconnect".format(
account=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_list_owners(
cls,
account,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"get",
"/v1/financial_connections/accounts/{account}/owners".format(
account=util.sanitize_id(account)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_list_owners")
def list_owners(self, idempotency_key=None, **params):
return self._request(
"get",
"/v1/financial_connections/accounts/{account}/owners".format(
account=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_refresh_account(
cls,
account,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/financial_connections/accounts/{account}/refresh".format(
account=util.sanitize_id(account)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_refresh_account")
def refresh_account(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/financial_connections/accounts/{account}/refresh".format(
account=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
| {
"content_hash": "d5dd9e339346f5aa00d9814eb89f5165",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 142,
"avg_line_length": 30.085714285714285,
"alnum_prop": 0.561886672997784,
"repo_name": "stripe/stripe-python",
"id": "e7e46fb0962280e2e37acd8ac71d713fb38676a7",
"size": "3222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stripe/api_resources/financial_connections/account.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1198"
},
{
"name": "Python",
"bytes": "748390"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| {
"content_hash": "06ca95ed0230e0cce1d587c5254b43c5",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 86,
"avg_line_length": 27.212962962962962,
"alnum_prop": 0.5382783259612113,
"repo_name": "la0rg/Genum",
"id": "55888a616ddd28a0a9b7065fb8515cd7213e489e",
"size": "5878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GenumCore/vendor/urllib3/util/url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14809"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
} |
keys = ['eth1', 'eth2', 'eth3', 'eth4', 'eth5', 'eth6', 'eth7', 'eth8', 'eth9']
def merge(dbag, gn):
device = gn['device']
if not gn['add'] and device in dbag:
if dbag[device]:
device_to_die = dbag[device][0]
try:
dbag[device].remove(device_to_die)
except ValueError, e:
print "[WARN] cs_guestnetwork.py :: Error occurred removing item from databag. => %s" % device_to_die
del (dbag[device])
else:
del (dbag[device])
else:
dbag.setdefault(device, []).append(gn)
return dbag
| {
"content_hash": "7d40bac83dc77cc39bc9e5a881df80e1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 117,
"avg_line_length": 28,
"alnum_prop": 0.5178571428571429,
"repo_name": "remibergsma/cosmic",
"id": "dc65c16b4639849b3cfed9135f8397862e3bb57a",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/play/serviceofferings",
"path": "cosmic-core/systemvm/patches/debian/config/opt/cloud/bin/cs_guestnetwork.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "CSS",
"bytes": "355524"
},
{
"name": "FreeMarker",
"bytes": "1832"
},
{
"name": "Groovy",
"bytes": "135777"
},
{
"name": "HTML",
"bytes": "142254"
},
{
"name": "Java",
"bytes": "19541615"
},
{
"name": "JavaScript",
"bytes": "4569018"
},
{
"name": "Python",
"bytes": "1940322"
},
{
"name": "Shell",
"bytes": "274412"
},
{
"name": "XSLT",
"bytes": "165385"
}
],
"symlink_target": ""
} |
from ..utils.utils import *
from ..utils.functions import *
from ..database.database import *
from ..utils.settings import Settings
from .SuperCommand import *
from ..globals import *
from ..globals import GlobalVariables
class InfoCommand(SuperCommand):
def __init__(self,cmd_handler):
super().__init__(cmd_handler)
def parseCommandArgs(self,userInputList):
cmd_parser = ThrowingArgumentParser(prog="info",description='Information about the program.')
(self.cmd_args,self.help_text)=parseCommandArgs(cmd_parser,userInputList)
def execute(self):
size=os.path.getsize(GlobalVariables.CLI_PASSWORD_FILE)
formatString=getColumnFormatString(2,25,delimiter=": ",align="<")
print(formatString.format("Version",GlobalVariables.VERSION))
print(formatString.format("CLIPWDMGR_DATA_DIR",GlobalVariables.CLIPWDMGR_DATA_DIR))
print(formatString.format("CLI_PASSWORD_FILE",GlobalVariables.CLI_PASSWORD_FILE))
print(formatString.format("Password file size",sizeof_fmt(size)))
loadAccounts(GlobalVariables.KEY)
totalAccounts=selectFirst("select count(*) from accounts")
print(formatString.format("Total accounts",str(totalAccounts)))
lastUpdated=selectFirst("select updated from accounts order by updated desc")
print(formatString.format("Last updated",lastUpdated))
print(formatString.format("Settings file",Settings().getSettingsFile()))
print("Settings:")
printDictionary(SETTING_DEFAULT_VALUES,indent=" ")
| {
"content_hash": "b6b440bf2f3b4aa0f2e8b6eef8cd1361",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 101,
"avg_line_length": 40.282051282051285,
"alnum_prop": 0.7110120942075111,
"repo_name": "samisalkosuo/clipasswordmgr",
"id": "b17a257bc52205a1b6568c65d409d0819de9b0e6",
"size": "2793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clipwdmgr/commands/InfoCommand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105239"
}
],
"symlink_target": ""
} |
import contextlib
import exception
import os
from utils import log
from utils.cmd import run
def print_list(l, nl_before=False, nl_after=False):
if nl_before:
log.info("")
for e in l:
log.info("- %s" % e)
if nl_after:
log.info("")
def ensure_dir(path):
if os.path.exists(path):
if not os.path.isdir(path):
raise exception.NotADirectory(path=path)
else:
os.makedirs(path)
def download_file(url):
run('curl', '-L', '-O', url, direct=True)
def list_files(path='.'):
tree = set()
for root, dirs, files in os.walk(path):
for f in files:
tree.add(f)
return tree
@contextlib.contextmanager
def cdir(path):
if not path or path == '.':
yield
return
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
| {
"content_hash": "5a32445829b2c45d98947a4db2a490e2",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 52,
"avg_line_length": 18.5,
"alnum_prop": 0.5720720720720721,
"repo_name": "yac/rdoupdate",
"id": "25b008e2d45e93d75d42cf3370c36ef0667fa300",
"size": "888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdoupdate/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "48351"
}
],
"symlink_target": ""
} |
__all__ = ["ParseFromValue", \
"CreateValueFrom", \
"CreateElement", \
"CallAtomsJs", \
"VerifyElementClickable", \
"FindElement", \
"IsElementEnabled", \
"IsOptionElementSelected", \
"GetElementSize", \
"IsElementDisplayed", \
"IsOptionElementTogglable",\
"SetOptionElementSelected", \
"GetActiveElement", \
"GetElementAttribute", \
"GetElementTagName", \
"IsElementFocused", \
"ToggleOptionElement", \
"GetElementRegion", \
"GetElementEffectiveStyle", \
"GetElementBorder", \
"ScrollElementRegionIntoViewHelper", \
"IsElementAttributeEqualToIgnoreCase", \
"ScrollElementRegionIntoView", \
"ScrollElementIntoView", \
"GetElementClickableLocation"]
import time
import copy
from misc.basic_types import WebPoint
from misc.basic_types import WebSize
from misc.basic_types import WebRect
from third_party.atoms import *
from browser.status import *
from browser.js import *
kElementKey = "ELEMENT"
def ParseFromValue(value, target):
if type(value) != dict:
return False
# target is WebPoint
if isinstance(target, WebPoint):
x = value.get("x")
y = value.get("y")
if type(x) in [float, int] and type(y) in [float, int]:
target.x = int(x)
target.y = int(y)
return True
return False
# target is WebSize
if isinstance(target, WebSize):
width = value.get("width")
height = value.get("height")
if type(width) in [float, int] and type(height) in [float, int]:
target.width = int(width)
target.height = int(height)
return True
return False
# target is WebRect
if isinstance(target, WebRect):
x = value.get("left")
y = value.get("top")
width = value.get("width")
height = value.get("height")
if type(x) in [float, int] and type(y) in [float, int] and type(width) in [float, int] and type(height) in [float, int]:
target.origin.x = int(x)
target.origin.y = int(y)
target.size.width = int(width)
target.size.height = int(height)
return True
return False
def CreateValueFrom(target):
dict_value = {}
# create value from WebPoint
if isinstance(target, WebPoint):
dict_value["x"] = target.x
dict_value["y"] = target.y
return dict_value
# create value from WebSize
if isinstance(target, WebSize):
dict_value["width"] = target.width
dict_value["height"] = target.height
return dict_value
# create value from WebRect
if isinstance(target, WebRect):
dict_value["left"] = target.X()
dict_value["right"] = target.Y()
dict_value["width"] = target.Width()
dict_value["height"] = target.Height()
return dict_value
def CreateElement(element_id):
element = {}
element[kElementKey] = element_id
return element
def CallAtomsJs(frame, web_view, atom_function, args, result):
return web_view.CallFunction(frame, atom_function, args, result)
def VerifyElementClickable(frame, web_view, element_id, location):
args = []
args.append(CreateElement(element_id))
args.append(CreateValueFrom(location))
result = {}
status = CallAtomsJs(frame, web_view, IS_ELEMENT_CLICKABLE, args, result)
if status.IsError():
return status
is_clickable = False
is_clickable = result["value"].get("clickable")
if type(is_clickable) != bool:
return Status(kUnknownError, "failed to parse value of IS_ELEMENT_CLICKABLE")
if not is_clickable:
message = result.get("message")
if type(message) != str:
message = "element is not clickable"
return Status(kUnknownError, message)
return Status(kOk)
def FindElement(interval_ms, only_one, root_element_id, session, web_view, params, value):
strategy = params.get("using")
if type(strategy) != str:
return Status(kUnknownError, "'using' must be a string")
target = params.get("value")
if type(target) != str:
return Status(kUnknownError, "'value' must be a string")
script = FIND_ELEMENT if only_one else FIND_ELEMENTS
locator = {}
locator[strategy] = target
arguments = []
arguments.append(locator)
if root_element_id:
arguments.append(CreateElement(root_element_id))
start_time = time.time()
while True:
temp = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), script, arguments, temp)
if status.IsError():
return status
# no matter what kind of result, it will packed in {"value": RemoteObject} format
# RemoteObject can be JSON type
if temp != {}:
if only_one:
value.clear()
value.update(temp)
return Status(kOk)
else:
if type(temp["value"]) != list:
return Status(kUnknownError, "script returns unexpected result")
if len(temp["value"]) > 0:
value.clear()
value.update(temp)
return Status(kOk)
if ((time.time() - start_time) >= session.implicit_wait):
if only_one:
return Status(kNoSuchElement)
else:
value.update({"value": []})
return Status(kOk)
time.sleep(float(interval_ms)/1000)
return Status(kUnknownError)
# return status and is_enabled<bool>
def IsElementEnabled(session, web_view, element_id):
is_enabled = False
args = []
args.append(CreateElement(element_id))
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, IS_ENABLED, args, result)
if status.IsError():
return (status, is_enabled)
# we packed everything in key "value", remember?
is_enabled = result["value"]
if type(is_enabled) != bool:
return (Status(kUnknownError, "IS_ENABLED should return a boolean value"), False)
return (Status(kOk), is_enabled)
# return status and is_selected<bool>
def IsOptionElementSelected(session, web_view, element_id):
is_selected = False
args = []
args.append(CreateElement(element_id))
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, IS_SELECTED, args, result)
if status.IsError():
return (status, is_selected)
# we packed everything in key "value", remember?
is_selected = result["value"]
if type(is_selected) != bool:
return (Status(kUnknownError, "IS_SELECTED should return a boolean value"), False)
return (Status(kOk), is_selected)
def GetElementSize(session, web_view, element_id, size):
args = []
args.append(CreateElement(element_id))
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, GET_SIZE, args, result)
if status.IsError():
return status
# we packed everything in key "value", remember?
if not ParseFromValue(result["value"], size):
return Status(kUnknownError, "failed to parse value of GET_SIZE")
return Status(kOk)
# return status and is_displayed<bool>
def IsElementDisplayed(session, web_view, element_id, ignore_opacity):
is_displayed = False
args = []
args.append(CreateElement(element_id))
args.append(ignore_opacity)
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, IS_DISPLAYED, args, result)
if status.IsError():
return (status, is_displayed)
# we packed everything in key "value", remember?
is_displayed = result["value"]
if type(is_displayed) != bool:
return (Status(kUnknownError, "IS_DISPLAYED should return a boolean value"), False)
return (Status(kOk), is_displayed)
# return status and is_togglable<bool>
def IsOptionElementTogglable(session, web_view, element_id):
is_togglable = False
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kIsOptionElementToggleableScript, args, result)
if status.IsError():
return (status, is_togglable)
# we packed everything in key "value", remember?
is_togglable = result["value"]
if type(is_togglable) != bool:
return (Status(kUnknownError, "failed check if option togglable or not"), False)
return (Status(kOk), is_togglable)
def SetOptionElementSelected(session, web_view, element_id, selected):
# TODO(wyh): need to fix throwing error if an alert is triggered.
args = []
args.append(CreateElement(element_id))
args.append(selected)
return CallAtomsJs(session.GetCurrentFrameId(), web_view, CLICK, args, {})
def GetActiveElement(session, web_view, value):
return web_view.CallFunction(session.GetCurrentFrameId(), "function() { return document.activeElement || document.body }", [], value)
def GetElementAttribute(session, web_view, element_id, attribute_name, value):
args = []
args.append(CreateElement(element_id))
args.append(attribute_name)
return CallAtomsJs(session.GetCurrentFrameId(), web_view, GET_ATTRIBUTE, args, value)
# return status and name<string>
def GetElementTagName(session, web_view, element_id):
name = ""
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), "function(elem) { return elem.tagName.toLowerCase(); }", args, result)
if status.IsError():
return (status, name)
# we packed everything in key "value", remember?
name = result["value"]
if type(name) != str:
return (Status(kUnknownError, "failed to get element tag name"), "")
return (Status(kOk), name)
# return status and is_focused<bool>
def IsElementFocused(session, web_view, element_id):
is_focused = False
result = {}
status = GetActiveElement(session, web_view, result)
if status.IsError():
return (status, is_focused)
element_dict = CreateElement(element_id)
# we packed everything in key "value", remember?
is_focused = (result["value"] == element_dict)
return (Status(kOk), is_focused)
def ToggleOptionElement(session, web_view, element_id):
is_selected = False
(status, is_selected) = IsOptionElementSelected(session, web_view, element_id)
if status.IsError():
return status
return SetOptionElementSelected(session, web_view, element_id, not is_selected)
def GetElementRegion(session, web_view, element_id, rect):
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kGetElementRegionScript, args, result)
if status.IsError():
return status
if not ParseFromValue(result["value"], rect):
return Status(kUnknownError, "failed to parse value of getElementRegion")
return Status(kOk)
# return status and value<string>
def GetElementEffectiveStyle(frame, web_view, element_id, sproperty):
style = ""
args = []
args.append(CreateElement(element_id))
args.append(sproperty)
result = {}
status = web_view.CallFunction(frame, GET_EFFECTIVE_STYLE, args, result)
if status.IsError():
return (status, style)
style = result["value"]
if type(style) != str:
return (Status(kUnknownError, "failed to parse value of GET_EFFECTIVE_STYLE"), "")
return (Status(kOk), style)
# return status and border_left<int> and border_top<int>
def GetElementBorder(frame, web_view, element_id):
(status, border_left_str) = GetElementEffectiveStyle(frame, web_view, element_id, "border-left-width")
if status.IsError():
return (status, -1, -1)
(status, border_top_str) = GetElementEffectiveStyle(frame, web_view, element_id, "border-top-width")
if status.IsError():
return (status, -1, -1)
try:
border_left = int(border_left_str)
border_top = int(border_top_str)
except:
return (Status(kUnknownError, "failed to get border width of element"), -1, -1)
return (Status(kOk), border_left, border_top)
def ScrollElementRegionIntoViewHelper(frame, web_view, element_id, region, center, clickable_element_id, location):
tmp_location = copy.deepcopy(location)
args = []
args.append(CreateElement(element_id))
args.append(CreateValueFrom(region))
args.append(center)
# TODO(wyh): why append the following param between above two cause the null value of y?
result = {}
status = web_view.CallFunction(frame, GET_LOCATION_IN_VIEW, args, result)
if status.IsError():
return status
if not ParseFromValue(result["value"], tmp_location):
return Status(kUnknownError, "failed to parse value of GET_LOCATION_IN_VIEW")
if clickable_element_id:
middle = copy.deepcopy(tmp_location)
middle.Offset(region.Width() / 2, region.Height() / 2)
status = VerifyElementClickable(frame, web_view, clickable_element_id, middle)
if status.IsError():
return status
location.Update(tmp_location)
return Status(kOk)
# return status and is_equal<bool>
def IsElementAttributeEqualToIgnoreCase(session, web_view, element_id, attribute_name, attribute_value):
is_equal = False
result = {}
status = GetElementAttribute(session, web_view, element_id, attribute_name, result)
if status.IsError():
return (status, is_equal)
actual_value = result["value"]
if type(actual_value) == str:
is_equal = (actual_value.lower() == attribute_value.lower())
else:
is_equal = False
return (status, is_equal)
def ScrollElementRegionIntoView(session, web_view, element_id, region, center, clickable_element_id, location):
region_offset = region.origin;
region_size = region.size;
status = ScrollElementRegionIntoViewHelper(session.GetCurrentFrameId(), web_view, element_id, \
region, center, clickable_element_id, region_offset)
if status.IsError():
return status
kFindSubFrameScript = \
"function(xpath) {"\
" return document.evaluate(xpath, document, null,"\
" XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;"\
"}"
session_frames_tmp = copy.deepcopy(session.frames)
session_frames_tmp.reverse()
for rit in session_frames_tmp:
args = []
args.append("//*[@cd_frame_id_ = '%s']" % rit.xwalkdriver_frame_id)
result = {}
status = web_view.CallFunction(rit.parent_frame_id, kFindSubFrameScript, args, result)
if status.IsError():
return status
element_dict = result["value"]
if type(element_dict) != dict:
return Status(kUnknownError, "no element reference returned by script")
frame_element_id = element_dict.get(kElementKey)
if type(frame_element_id) != str:
return Status(kUnknownError, "failed to locate a sub frame")
# Modify |region_offset| by the frame's border.
(status, border_left, border_top) = GetElementBorder(rit.parent_frame_id, web_view, frame_element_id)
if status.IsError():
return status
region_offset.Offset(border_left, border_top)
status = ScrollElementRegionIntoViewHelper(rit.parent_frame_id, web_view, frame_element_id, \
WebRect(region_offset, region_size), center, frame_element_id, region_offset)
if status.IsError():
return status
location.Update(region_offset)
return Status(kOk)
def ScrollElementIntoView(session, web_view, sid, location):
size = WebSize()
status = GetElementSize(session, web_view, sid, size);
if status.IsError():
return status
status = ScrollElementRegionIntoView(session, web_view, sid, WebRect(WebPoint(0, 0), size), False, "", location)
return status
def GetElementClickableLocation(session, web_view, element_id, location):
(status, tag_name) = GetElementTagName(session, web_view, element_id)
if status.IsError():
return status
target_element_id = element_id
if (tag_name == "area"):
# Scroll the image into view instead of the area.
kGetImageElementForArea = \
"function (element) {"\
" var map = element.parentElement;"\
" if (map.tagName.toLowerCase() != 'map')"\
" throw new Error('the area is not within a map');"\
" var mapName = map.getAttribute('name');"\
" if (mapName == null)"\
" throw new Error ('area\\'s parent map must have a name');"\
" mapName = '#' + mapName.toLowerCase();"\
" var images = document.getElementsByTagName('img');"\
" for (var i = 0; i < images.length; i++) {"\
" if (images[i].useMap.toLowerCase() == mapName)"\
" return images[i];"\
" }"\
" throw new Error('no img is found for the area');"\
"}"
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kGetImageElementForArea, args, result)
if status.IsError():
return status
element_dict = result["value"]
if type(element_dict) != dict:
return Status(kUnknownError, "no element reference returned by script")
target_element_id = element_dict.get(kElementKey)
if type(target_element_id) != str:
return Status(kUnknownError, "no element reference returned by script")
(status, is_displayed) = IsElementDisplayed(session, web_view, target_element_id, True)
if status.IsError():
return status
if not is_displayed:
return Status(kElementNotVisible)
rect = WebRect()
status = GetElementRegion(session, web_view, element_id, rect)
if status.IsError():
return status
# TODO(wyh): manually change center to false make element.click() ok
status = ScrollElementRegionIntoView(session, web_view, target_element_id, rect, False, element_id, location)
if status.IsError():
return status
location.Offset(rect.Width() / 2, rect.Height() / 2)
return Status(kOk)
| {
"content_hash": "9e09eb2c41f4c98055578955ff1f6b1d",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 135,
"avg_line_length": 37.19700214132762,
"alnum_prop": 0.6787749697772149,
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"id": "6aebd982eb103d11abbdf7cf93a8cbe2f038cfb4",
"size": "17371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "command/element_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "241573"
}
],
"symlink_target": ""
} |
try:
from hashlib import md5
except ImportError:
from md5 import md5
import bisect
class ConsistentHashRing:
def __init__(self, nodes, replica_count=100):
self.ring = []
self.nodes = set()
self.replica_count = replica_count
for node in nodes:
self.add_node(node)
def compute_ring_position(self, key):
big_hash = md5( str(key) ).hexdigest()
small_hash = int(big_hash[:4], 16)
return small_hash
def add_node(self, node):
self.nodes.add(node)
for i in range(self.replica_count):
replica_key = "%s:%d" % (node, i)
position = self.compute_ring_position(replica_key)
while position in [r[0] for r in self.ring]:
position = position + 1
entry = (position, node)
bisect.insort(self.ring, entry)
def remove_node(self, node):
self.nodes.discard(node)
self.ring = [entry for entry in self.ring if entry[1] != node]
def get_node(self, key):
assert self.ring
node = None
node_iter = self.get_nodes(key)
node = node_iter.next()
node_iter.close()
return node
def get_nodes(self, key):
assert self.ring
nodes = set()
uniq_nodes = []
position = self.compute_ring_position(key)
search_entry = (position, None)
index = bisect.bisect_left(self.ring, search_entry) % len(self.ring)
last_index = (index - 1) % len(self.ring)
while len(nodes) < len(self.nodes) and index != last_index:
next_entry = self.ring[index]
(position, next_node) = next_entry
if next_node[0] not in uniq_nodes:
nodes.add(next_node)
uniq_nodes.append(next_node[0])
yield next_node
index = (index + 1) % len(self.ring)
| {
"content_hash": "3fe34971dbf98dbd4d58853b4f98ab2a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 28.559322033898304,
"alnum_prop": 0.626706231454006,
"repo_name": "mattrobenolt/carbon",
"id": "177acfafffe6835264a60f17f19a0ccf76bbd2ca",
"size": "1685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/carbon/hashing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import sys
import time
from datetime import datetime, timedelta, timezone
from libqtile.log_utils import logger
from libqtile.widget import base
try:
import pytz
except ImportError:
pass
try:
import dateutil.tz
except ImportError:
pass
class Clock(base.InLoopPollText):
"""A simple but flexible text-based clock"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('format', '%H:%M', 'A Python datetime format string'),
('update_interval', 1., 'Update interval for the clock'),
('timezone', None, 'The timezone to use for this clock, either as'
' string if pytz or dateutil is installed (e.g. "US/Central" or'
' anything in /usr/share/zoneinfo), or as tzinfo (e.g.'
' datetime.timezone.utc). None means the system local timezone and is'
' the default.')
]
DELTA = timedelta(seconds=0.5)
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(Clock.defaults)
if isinstance(self.timezone, str):
if "pytz" in sys.modules:
self.timezone = pytz.timezone(self.timezone)
elif "dateutil" in sys.modules:
self.timezone = dateutil.tz.gettz(self.timezone)
else:
logger.warning('Clock widget can not infer its timezone from a'
' string without pytz or dateutil. Install one'
' of these libraries, or give it a'
' datetime.tzinfo instance.')
if self.timezone is None:
logger.info('Defaulting to the system local timezone.')
def tick(self):
self.update(self.poll())
return self.update_interval - time.time() % self.update_interval
# adding .5 to get a proper seconds value because glib could
# theoreticaly call our method too early and we could get something
# like (x-1).999 instead of x.000
def poll(self):
if self.timezone:
now = datetime.now(timezone.utc).astimezone(self.timezone)
else:
now = datetime.now(timezone.utc).astimezone()
return (now + self.DELTA).strftime(self.format)
| {
"content_hash": "a540b21f2126c9f183ed332198799da7",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 36.57377049180328,
"alnum_prop": 0.6140744060959211,
"repo_name": "tych0/qtile",
"id": "98149064e92b5bbedc7a2a4df3481fc41820d069",
"size": "3426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libqtile/widget/clock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "1299146"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "8166"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
from builtins import *
import pandas as pd
from . import utils
class EntrustOrder(object):
def __init__(self, security, action, price, size):
self.security = security
self.action = action
self.price = price
self.size = size
def set_log_dir(log_dir):
if log_dir:
try:
import jrpc
jrpc.set_log_dir(log_dir)
except Exception as e:
print("Exception", e)
class TradeApi(object):
def __init__(self, addr, use_jrpc=True, prod_type="jzts"):
"""
use_jrpc:
True -- Use jrcp_client of C version, for jzts only
False -- Use pure python version
prod_type:
"jaqs" -- jrpc_msgpack_wth_snappy
"jzts" -- jrpc_msgpack
"""
self._remote = None
if prod_type == "jzts":
try:
if use_jrpc:
import jrpc
self._remote = jrpc.JsonRpcClient()
else:
from . import jrpc_py
self._remote = jrpc_py.JRpcClient(data_format="msgpack")
except Exception as e:
print("Exception", e)
if not self._remote:
from . import jrpc_py
self._remote = jrpc_py.JRpcClient(data_format="msgpack")
else:
from . import jrpc_py
self._remote = jrpc_py.JRpcClient(data_format="msgpack_snappy")
self._remote.on_rpc_callback = self._on_rpc_callback
self._remote.on_disconnected = self._on_disconnected
self._remote.on_connected = self._on_connected
self._remote.connect(addr)
self._ordstatus_callback = None
self._taskstatus_callback = None
self._internal_order_callback = None
self._trade_callback = None
self._on_connection_callback = None
self._connected = False
self._username = ""
self._password = ""
self._strategy_id = 0
self._strategy_selected = False
self._data_format = "default"
def __del__(self):
self._remote.close()
def _on_rpc_callback(self, method, data):
# print "_on_rpc_callback:", method, data
if method == "oms.orderstatus_ind":
if self._data_format == "obj":
data = utils.to_obj("Order", data)
if self._ordstatus_callback:
self._ordstatus_callback(data)
elif method == "oms.taskstatus_ind":
if self._data_format == "obj":
data = utils.to_obj("TaskStatus", data)
if self._taskstatus_callback:
self._taskstatus_callback(data)
elif method == "oms.trade_ind":
if self._data_format == "obj":
data = utils.to_obj("Trade", data)
if self._trade_callback:
self._trade_callback(data)
elif method == "oms.internal_order_ind":
if self._data_format == "obj":
data = utils.to_obj("QuoteOrder", data)
if self._internal_order_callback:
self._internal_order_callback(data)
def _on_disconnected(self):
print("TradeApi: _on_disconnected")
self._connected = False
self._strategy_selected = False
if self._on_connection_callback:
self._on_connection_callback(False)
def _on_connected(self):
print("TradeApi: _on_connected")
self._connected = True
self._do_login()
self._do_use_strategy()
if self._on_connection_callback:
self._on_connection_callback(True)
def _check_session(self):
if not self._connected:
return (False, "no connection")
if self._strategy_selected:
return (True, "")
r, msg = self._do_login()
if not r: return (r, msg)
if self._strategy_id:
return self._do_use_strategy()
else:
return (r, msg)
def set_data_format(self, format):
self._data_format = format
def set_connection_callback(self, callback):
self._on_connection_callback = callback
def set_ordstatus_callback(self, callback):
self._ordstatus_callback = callback
def set_trade_callback(self, callback):
self._trade_callback = callback
def set_task_callback(self, callback):
self._taskstatus_callback = callback
def set_quoteorder_callback(self, callback):
self._internal_order_callback = callback
def _get_format(self, format, default_format):
if format:
return format
elif self._data_format != "default":
return self._data_format
else:
return default_format
def login(self, username, password, format=""):
self._username = username
self._password = password
return self._do_login(format=format)
def _do_login(self, format=""):
# Shouldn't check connected flag here. ZMQ is a mesageq queue!
# if !self._connected :
# return (False, "-1,no connection")
if self._username and self._password:
rpc_params = {"username": self._username,
"password": self._password}
cr = self._remote.call("auth.login", rpc_params)
f = self._get_format(format, "")
if f != "obj" and f != "":
f = ""
return utils.extract_result(cr, format=f, class_name="UserInfo")
else:
return (False, "-1,empty username or password")
def logout(self):
rpc_params = {}
cr = self._remote.call("auth.logout", rpc_params)
return utils.extract_result(cr)
def close(self):
self._remote.close()
def use_strategy(self, strategy_id):
if strategy_id:
self._strategy_id = strategy_id
return self._do_use_strategy()
else:
# Query
rpc_params = {"account_id": 0}
cr = self._remote.call("auth.use_strategy", rpc_params)
r, msg = utils.extract_result(cr)
self._strategy_selected = r
return (r, msg)
def _do_use_strategy(self):
if self._strategy_id:
rpc_params = {"account_id": self._strategy_id}
cr = self._remote.call("auth.use_strategy", rpc_params)
r, msg = utils.extract_result(cr)
self._strategy_selected = r
return (r, msg)
else:
return (False, "-1,no strategy_id was specified")
def confirm_internal_order(self, task_id, confirmed):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id,
"confirmed": confirmed}
cr = self._remote.call("oms.confirm_internal_order", rpc_params)
return utils.extract_result(cr)
def order(self, security, price, size, algo="", algo_param={}, userdata=""):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"security": security,
"price": price,
"size": int(size),
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.order", rpc_params)
return utils.extract_result(cr)
def place_order(self, security, action, price, size, algo="", algo_param={}, userdata=""):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"security": security,
"action": action,
"price": price,
"size": int(size),
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.place_order", rpc_params)
return utils.extract_result(cr)
def batch_order(self, orders, algo="", algo_param={}, userdata=""):
"""
orders format:
[ {"security": "000001.SZ", "action": "Buy", "price": 10.0, "size" : 100}, ... ]
return (result, message)
if result is None, message contains error information
"""
if not orders or not isinstance(orders, (list, tuple)):
return (None, "empty order")
if isinstance(orders[0], EntrustOrder):
tmp = []
for o in orders:
tmp.append({"security": o.security,
"price": o.price,
"size": int(o.size)})
orders = tmp
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"orders": orders,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.batch_order", rpc_params)
return utils.extract_result(cr)
def place_batch_order(self, orders, algo="", algo_param={}, userdata=""):
"""
orders format:
[ {"security": "000001.SZ", "action": "Buy", "price": 10.0, "size" : 100}, ... ]
return (result, message)
if result is None, message contains error information
"""
if not orders or not isinstance(orders, (list, tuple)):
return (None, "empty order")
if isinstance(orders[0], EntrustOrder):
tmp = []
for o in orders:
tmp.append({"security": o.security,
"action": o.action,
"price": o.price,
"size": int(o.size)})
orders = tmp
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"orders": orders,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.place_batch_order", rpc_params)
return utils.extract_result(cr)
def cancel_order(self, task_id):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
cr = self._remote.call("oms.cancel_order", rpc_params)
return utils.extract_result(cr)
def query_account(self, format=""):
"""
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_account", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Account")
def query_position(self, mode="all", securities="", format=""):
"""
securities: seperate by ","
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"mode": mode,
"security": securities}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_position", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Position")
def query_net_position(self, mode="all", securities="", format=""):
"""
securities: seperate by ","
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"mode": mode,
"security": securities}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_net_position", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="NetPosition")
def query_repo_contract(self, format=""):
"""
securities: seperate by ","
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
cr = self._remote.call("oms.query_repo_contract", rpc_params)
return utils.extract_result(cr, format=self._get_format(format, "pandas"), class_name="RepoContract")
def query_task(self, task_id=-1, format=""):
"""
task_id: -1 -- all
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_task", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Task")
def query_order(self, task_id=-1, format=""):
"""
task_id: -1 -- all
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_order", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Order")
def query_trade(self, task_id=-1, format=""):
"""
task_id: -1 -- all
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_trade", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Trade")
def query_portfolio(self, format=""):
"""
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("pms.query_portfolio", rpc_params)
return utils.extract_result(cr, index_column="security", format=data_format, class_name="NetPosition")
def goal_portfolio(self, positions, algo="", algo_param={}, userdata=""):
"""
positions format:
[ {"security": "000001.SZ", "ref_price": 10.0, "size" : 100}, ...]
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (False, msg)
if type(positions) is pd.core.frame.DataFrame:
tmp = []
for i in range(0, len(positions)):
tmp.append({'security': positions.index[i], 'ref_price': float(positions['ref_price'][i]),
"size": int(positions['size'][i])})
positions = tmp
rpc_params = {"positions": positions,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("pms.goal_portfolio", rpc_params)
return utils.extract_result(cr)
def basket_order(self, orders, algo="", algo_param={}, userdata=""):
"""
orders format:
[ {"security": "000001.SZ", "ref_price": 10.0, "inc_size" : 100}, ...]
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (False, msg)
if type(orders) is pd.core.frame.DataFrame:
tmp = []
for i in range(0, len(orders)):
tmp.append({'security': orders.index[i], 'ref_price': float(orders['ref_price'][i]),
"inc_size": int(orders['inc_size'][i])})
orders = tmp
rpc_params = {"orders": orders,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("pms.basket_order", rpc_params)
return utils.extract_result(cr)
def stop_portfolio(self):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (False, msg)
rpc_params = {}
cr = self._remote.call("pms.stop_portfolio", rpc_params)
return utils.extract_result(cr)
def query_universe(self, format=""):
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_universe", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="UniverseItem")
def set_heartbeat(self, interval, timeout):
self._remote.set_hearbeat_options(interval, timeout)
print("heartbeat_interval =", self._remote._heartbeat_interval, ", heartbeat_timeout =",
self._remote._heartbeat_timeout)
| {
"content_hash": "49788f6ff4f30d520f7a4c34365246b6",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 110,
"avg_line_length": 33.298986486486484,
"alnum_prop": 0.5017501141378785,
"repo_name": "mumuwoyou/vnpy-master",
"id": "58f928d21520e6d735e0c119af6bd7fedd5ab335",
"size": "19713",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vnpy/trader/gateway/tkproGateway/TradeApi/trade_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1271"
},
{
"name": "C",
"bytes": "3171235"
},
{
"name": "C++",
"bytes": "8933200"
},
{
"name": "CMake",
"bytes": "63636"
},
{
"name": "Dockerfile",
"bytes": "5928"
},
{
"name": "HTML",
"bytes": "807"
},
{
"name": "Jupyter Notebook",
"bytes": "76901"
},
{
"name": "Makefile",
"bytes": "118013"
},
{
"name": "Objective-C",
"bytes": "22505"
},
{
"name": "PHP",
"bytes": "4107"
},
{
"name": "Python",
"bytes": "6595417"
},
{
"name": "Shell",
"bytes": "3907"
}
],
"symlink_target": ""
} |
import sqlite3
from settings import settings
from dryorm.mixin import Mixin
class SqliteMixin(Mixin):
_param_placeholder = '?'
_connection = None
@staticmethod
def connect():
if not SqliteMixin._connection:
SqliteMixin._connection = sqlite3.connect(settings['METADATA'])
print settings['METADATA']
return SqliteMixin._connection
@property
def connection(self):
if not self._connection:
self._connection = SqliteMixin.connect()
return self._connection
| {
"content_hash": "99aea8a29c8599fa8f38954e4f69a306",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 22.80952380952381,
"alnum_prop": 0.7494780793319415,
"repo_name": "jAlpedrinha/dryorm",
"id": "92b2ae8520e8e056e0044eb1d6fba2dba5dd0994",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dryorm/sqlite/mixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14186"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
__version__ = '0.1.0'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# autodoc/autosummary flags
autoclass_content = 'both'
autodoc_default_flags = ['members']
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'google-cloud-os-login'
copyright = u'2018 Google LLC'
author = u'Google APIs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = '.'.join(release.split('.')[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'google-cloud-os-login-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'google-cloud-os-login.tex',
u'google-cloud-os-login Documentation', author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'google-cloud-os-login',
u'google-cloud-os-login Documentation', [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'google-cloud-os-login',
u'google-cloud-os-login Documentation', author, 'google-cloud-os-login',
'GAPIC library for the {metadata.shortName} v1 service', 'APIs'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'gax': ('https://gax-python.readthedocs.org/en/latest/', None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| {
"content_hash": "1726379022e473f2afdd3a4d31ef24db",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 79,
"avg_line_length": 33.245791245791246,
"alnum_prop": 0.7053878873810006,
"repo_name": "jonparrott/google-cloud-python",
"id": "1ff2461b145db597d5c5d6827381ab2e97fb5216",
"size": "10247",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "oslogin/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.base import (ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin)
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_parameters_default_constructible,
check_regressors_classifiers_sparse_data,
check_transformer,
check_clustering,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_sparse_data,
check_transformer_pickle,
check_estimators_nan_inf,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_estimators_partial_fit_n_features,
check_cluster_overwrite_params,
check_sparsify_binary_classifier,
check_sparsify_multiclass_classifier,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_estimators_sparse_data():
# All estimators should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
estimators = all_estimators()
estimators = [(name, Estimator) for name, Estimator in estimators
if issubclass(Estimator, (ClassifierMixin, RegressorMixin))]
for name, Estimator in estimators:
yield check_regressors_classifiers_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_sparse_data, name, Transformer
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
def test_estimators_nan_inf():
# Test that all estimators check their input for NaN's and infs
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if (issubclass(E, ClassifierMixin) or
issubclass(E, RegressorMixin) or
issubclass(E, TransformerMixin) or
issubclass(E, ClusterMixin))]
for name, Estimator in estimators:
if name not in CROSS_DECOMPOSITION + ['Imputer']:
yield check_estimators_nan_inf, name, Estimator
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_cluster_overwrite_params, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
yield check_estimators_partial_fit_n_features, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
yield check_estimators_partial_fit_n_features, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
yield check_estimators_partial_fit_n_features, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
def test_estimators_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
for est_type in ["classifier", "regressor", "transformer"]:
estimators = all_estimators(type_filter=est_type)
for name, Estimator in estimators:
if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_sparsify_estimators():
#Test if predict with sparsified estimators works.
#Tests regression, binary classification, and multi-class classification.
estimators = all_estimators()
# test regression and binary classification
for name, Estimator in estimators:
try:
Estimator.sparsify
yield check_sparsify_binary_classifier, name, Estimator
except:
pass
# test multiclass classification
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
try:
Classifier.sparsify
yield check_sparsify_multiclass_classifier, name, Classifier
except:
pass
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif name in CROSS_DECOMPOSITION or (
name in ['LinearSVC', 'LogisticRegression']
):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
| {
"content_hash": "f0284991933bd2feb825f4ddf8e55dea",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 83,
"avg_line_length": 41.659033078880405,
"alnum_prop": 0.6399951136086001,
"repo_name": "evgchz/scikit-learn",
"id": "0dbf18e46bb0c4ff485f3db57e562a0d2ad4ae63",
"size": "16372",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/tests/test_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18469430"
},
{
"name": "C++",
"bytes": "1808975"
},
{
"name": "JavaScript",
"bytes": "22298"
},
{
"name": "Makefile",
"bytes": "4901"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5642425"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
} |
"""Custom marshmallow fields."""
from .datetime import DateString
from .generated import GenFunction, GenMethod
from .marshmallow_contrib import Function, Method
from .persistentidentifier import PersistentIdentifier
from .sanitizedhtml import SanitizedHTML
from .sanitizedunicode import SanitizedUnicode
from .trimmedstring import TrimmedString
__all__ = (
"DateString",
"Function",
"GenFunction",
"GenMethod",
"Method",
"PersistentIdentifier",
"SanitizedHTML",
"SanitizedUnicode",
"TrimmedString",
)
| {
"content_hash": "b2cd1ce2fbf7dfb5347edaa5fa645b8f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 54,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.75,
"repo_name": "inveniosoftware/invenio-records-rest",
"id": "3144d39e315aae75e38796fccc3bc5e42aaf4700",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_records_rest/schemas/fields/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "297117"
},
{
"name": "Shell",
"bytes": "778"
}
],
"symlink_target": ""
} |
__author__ = 'Yaroslav Halchenko'
__license__ = 'BSD'
from nose.tools import assert_equal
try:
from sklearn import *
_top_import_error = None
except Exception, e:
_top_import_error = e
def test_import_skl():
"""Test either above import has failed for some reason
"import *" is discouraged outside of the module level, hence we
rely on setting up the variable above
"""
assert_equal(_top_import_error, None)
| {
"content_hash": "48463e03e2ece6f52b30d621a874fc39",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 23.36842105263158,
"alnum_prop": 0.6756756756756757,
"repo_name": "ominux/scikit-learn",
"id": "18fb44b2a0601732fef3be269a53da7913158f99",
"size": "629",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/tests/test_init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "455969"
},
{
"name": "C++",
"bytes": "240380"
},
{
"name": "Makefile",
"bytes": "1411"
},
{
"name": "Python",
"bytes": "2064853"
},
{
"name": "Shell",
"bytes": "486"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_pool
short_description: "Manages F5 BIG-IP LTM pools"
description:
- Manages F5 BIG-IP LTM pools via iControl SOAP API
version_added: 1.2
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
requirements:
- bigsuds
options:
description:
description:
- Specifies descriptive text that identifies the pool.
required: false
version_added: "2.3"
state:
description:
- Pool/pool member state
required: false
default: present
choices:
- present
- absent
aliases: []
name:
description:
- Pool name
required: true
default: null
choices: []
aliases:
- pool
partition:
description:
- Partition of pool/pool member
required: false
default: 'Common'
choices: []
aliases: []
lb_method:
description:
- Load balancing method
version_added: "1.3"
required: False
default: 'round_robin'
choices:
- round_robin
- ratio_member
- least_connection_member
- observed_member
- predictive_member
- ratio_node_address
- least_connection_node_address
- fastest_node_address
- observed_node_address
- predictive_node_address
- dynamic_ratio
- fastest_app_response
- least_sessions
- dynamic_ratio_member
- l3_addr
- weighted_least_connection_member
- weighted_least_connection_node_address
- ratio_session
- ratio_least_connection_member
- ratio_least_connection_node_address
aliases: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "1.3"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on
newly added or freshly detected up pool members
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
reselect_tries:
description:
- Sets the number of times the system tries to contact a pool member
after a passive failure
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
service_down_action:
description:
- Sets the action to take when node goes down in pool
version_added: "1.3"
required: False
default: null
choices:
- none
- reset
- drop
- reselect
aliases: []
host:
description:
- "Pool member IP"
required: False
default: null
choices: []
aliases:
- address
port:
description:
- Pool member port
required: False
default: null
choices: []
aliases: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Create pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "least_connection_member"
slow_ramp_time: 120
delegate_to: localhost
- name: Modify load balancer method
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "round_robin"
- name: Add pool member
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4["address"] }}"
port: 80
- name: Remove pool member from pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4["address"] }}"
port: 80
- name: Delete pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
'''
RETURN = '''
'''
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_pool(api, pool, lb_method):
# create requires lb_method but we don't want to default
# to a value on subsequent runs
if not lb_method:
lb_method = 'round_robin'
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
members=[[]])
def remove_pool(api, pool):
api.LocalLB.Pool.delete_pool(pool_names=[pool])
def get_lb_method(api, pool):
lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def set_lb_method(api, pool, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
def get_monitors(api, pool):
result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
def get_slow_ramp_time(api, pool):
result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
return result
def set_slow_ramp_time(api, pool, seconds):
api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
def get_reselect_tries(api, pool):
result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0]
return result
def set_reselect_tries(api, pool, tries):
api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries])
def get_action_on_service_down(api, pool):
result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
return result
def set_action_on_service_down(api, pool, action):
action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
try:
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
# genuine exception
raise
return result
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
def set_description(api, pool, description):
api.LocalLB.Pool.set_description(
pool_names=[pool], descriptions=[description]
)
def get_description(api, pool):
return api.LocalLB.Pool.get_description(pool_names=[pool])[0]
def main():
lb_method_choices = ['round_robin', 'ratio_member',
'least_connection_member', 'observed_member',
'predictive_member', 'ratio_node_address',
'least_connection_node_address',
'fastest_node_address', 'observed_node_address',
'predictive_node_address', 'dynamic_ratio',
'fastest_app_response', 'least_sessions',
'dynamic_ratio_member', 'l3_addr',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
'ratio_least_connection_node_address']
monitor_type_choices = ['and_list', 'm_of_n']
service_down_choices = ['none', 'reset', 'drop', 'reselect']
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True, aliases=['pool']),
lb_method=dict(type='str', choices=lb_method_choices),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list'),
slow_ramp_time=dict(type='int'),
reselect_tries=dict(type='int'),
service_down_action=dict(type='str', choices=service_down_choices),
host=dict(type='str', aliases=['address']),
port=dict(type='int'),
description=dict(type='str')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
description = module.params['description']
name = module.params['name']
pool = fq_name(partition, name)
lb_method = module.params['lb_method']
if lb_method:
lb_method = lb_method.lower()
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
slow_ramp_time = module.params['slow_ramp_time']
reselect_tries = module.params['reselect_tries']
service_down_action = module.params['service_down_action']
if service_down_action:
service_down_action = service_down_action.lower()
host = module.params['host']
address = fq_name(partition, host)
port = module.params['port']
# sanity check user supplied values
if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
if port is not None and (0 > port or port > 65535):
module.fail_json(msg="valid ports must be in range 0 - 65535")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if host and port and pool:
# member removal takes precedent
if pool_exists(api, pool) and member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
deleted = delete_node_address(api, address)
result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
elif pool_exists(api, pool):
# no host/port supplied, must be pool removal
if not module.check_mode:
# hack to handle concurrent runs of module
# pool might be gone before we actually remove it
try:
remove_pool(api, pool)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = {'changed': False}
else:
# genuine exception
raise
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
update = False
if not pool_exists(api, pool):
# pool does not exist -- need to create it
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the pool doesn't exist,
# it may exist by the time we run create_pool().
# this catches the exception and does something smart
# about it!
try:
create_pool(api, pool, lb_method)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
update = True
else:
# genuine exception
raise
else:
if monitors:
set_monitors(api, pool, monitor_type, quorum, monitors)
if slow_ramp_time:
set_slow_ramp_time(api, pool, slow_ramp_time)
if reselect_tries:
set_reselect_tries(api, pool, reselect_tries)
if service_down_action:
set_action_on_service_down(api, pool, service_down_action)
if host and port:
add_pool_member(api, pool, address, port)
if description:
set_description(api, pool, description)
else:
# check-mode return value
result = {'changed': True}
else:
# pool exists -- potentially modify attributes
update = True
if update:
if lb_method and lb_method != get_lb_method(api, pool):
if not module.check_mode:
set_lb_method(api, pool, lb_method)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, pool, monitor_type, quorum, monitors)
result = {'changed': True}
if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool):
if not module.check_mode:
set_slow_ramp_time(api, pool, slow_ramp_time)
result = {'changed': True}
if reselect_tries and reselect_tries != get_reselect_tries(api, pool):
if not module.check_mode:
set_reselect_tries(api, pool, reselect_tries)
result = {'changed': True}
if service_down_action and service_down_action != get_action_on_service_down(api, pool):
if not module.check_mode:
set_action_on_service_down(api, pool, service_down_action)
result = {'changed': True}
if (host and port) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if (host and port == 0) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if description and description != get_description(api, pool):
if not module.check_mode:
set_description(api, pool, description)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| {
"content_hash": "a36344d06cbdbe9baa8dec1f33af5530",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 166,
"avg_line_length": 33.717543859649126,
"alnum_prop": 0.5708413549092044,
"repo_name": "mcgonagle/ansible_f5",
"id": "846ad2ce0e87efac26ac901fffc668fe74df877e",
"size": "19959",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "library_old/bigip_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2293136"
},
{
"name": "Shell",
"bytes": "3752"
},
{
"name": "Tcl",
"bytes": "80"
}
],
"symlink_target": ""
} |
"""
@author: achm
Final ensemble of strong and weak model, weight based on ../ensemble_weight
"""
import numpy as np
import pandas as pd
import sys
sys.path.append('../input')
import evaluation
import cPickle
import copy
import glob
import os
print("Load test data using pandas")
test = pd.read_csv("../input/test.csv")
################################################################################
# Load weak model submission
print("Load weak model")
ep = 1500
# Two final submission path
search_dir = "../weak_model/output/weak_prediction_ep_%i_a1_0_a2_10" %ep
#search_dir = "../weak_model/output/weak_prediction_ep_%i_a1_5_a2_5" %ep
temp_file = filter(os.path.isfile, glob.glob(search_dir + "*"))
temp_file.sort(key=lambda x: os.path.getmtime(x))
weak_model_sub = pd.read_csv(temp_file[-1])
# Double Check
if weak_model_sub.shape[0] != test.shape[0]:
print "ERROR!"
sys.exit(0)
if not all(weak_model_sub['id'] == test['id']):
print "ERROR!"
sys.exit(0)
if not all(weak_model_sub['prediction'] <= 1):
print "ERROR!"
sys.exit(0)
if not all(weak_model_sub['prediction'] >= 0):
print "ERROR!"
sys.exit(0)
################################################################################
# Load Semi-Strong model submission
print("Load Semi-strong model")
semi_strong_model_sub = pd.read_csv("../semi_strong_model/output/semi_strong_ensemble_submission.csv")
# Double Check
if semi_strong_model_sub.shape[0] != test.shape[0]:
print "ERROR!"
sys.exit(0)
if not all(semi_strong_model_sub['id'] == test['id']):
print "ERROR!"
sys.exit(0)
if not all(semi_strong_model_sub['prediction'] <= 1):
print "ERROR!"
sys.exit(0)
if not all(semi_strong_model_sub['prediction'] >= 0):
print "ERROR!"
sys.exit(0)
################################################################################
# Load strong model submission
print("Load strong model")
strong_model_sub = pd.read_csv("../strong_model/output/strong_ensemble_submission.csv")
# Double Check
if strong_model_sub.shape[0] != test.shape[0]:
print "ERROR!"
sys.exit(0)
if not all(strong_model_sub['id'] == test['id']):
print "ERROR!"
sys.exit(0)
if not all(strong_model_sub['prediction'] <= 1):
print "ERROR!"
sys.exit(0)
if not all(strong_model_sub['prediction'] >= 0):
print "ERROR!"
sys.exit(0)
################################################################################
# Load ensemble weighting
print("Load ensemble weighting")
temp_epoch=300
ensemble = pd.read_csv("../ensemble_weight/output/ensemble_weight_epoch_%i.csv" %temp_epoch)
# Double Check
if ensemble.shape[0] != test.shape[0]:
print "ERROR!"
sys.exit(0)
if not all(ensemble['id'] == test['id']):
print "ERROR!"
sys.exit(0)
################################################################################
# Setting
option = 7
rho = 0
def sigmoid_norm(x, norm):
return 1/(1+np.exp(-(x*(2*norm)-norm)))
def sigmoid_shift_slope(x, target, slope):
return 1/(1+np.exp(-(x-target)*slope))
# Ensemble Signal processing
if option==0: # simple weighting
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ((1-rho)*ensemble['weight']*ensemble['strong_pred'] +
rho*ensemble['weight']*ensemble['semi_strong_pred'] +
(1-ensemble['weight'])*ensemble['weak_pred'])
elif option ==1: #absulute cutoff
threshold = 0.15
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ((1.0*(ensemble['weight']>threshold)) * ensemble['strong_pred'] * (1-rho)+
(1.0*(ensemble['weight']>threshold)) * ensemble['semi_strong_pred'] * (rho) +
(1.0*(ensemble['weight']<=threshold)) * ensemble['weak_pred'])
elif option ==2: #absulute cutoff stacking
threshold = 0.15
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ((1.0*(ensemble['weight']>threshold)) * (ensemble['strong_pred']*(1-rho) + ensemble['semi_strong_pred']*rho + ensemble['weak_pred'])/2. +
(1.0*(ensemble['weight']<=threshold)) * ensemble['weak_pred'] )
elif option ==3: # cutoff with weighting
threshold = 0.15
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ( (1.0*(ensemble['weight']>threshold)) * (ensemble['weight']*ensemble['strong_pred']*(1-rho) +
ensemble['semi_strong_pred']*rho + (1-ensemble['weight'])*ensemble['weak_pred']) +
(1.0*(ensemble['weight']<=threshold)) * ensemble['weak_pred'] )
elif option ==4: # sigmoid norm
s_norm = 10
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ( sigmoid_norm(ensemble['weight'], norm=s_norm)*(ensemble['strong_pred']*(1-rho) + ensemble['semi_strong_pred']*rho) +
(1-sigmoid_norm(ensemble['weight'], norm=s_norm))*ensemble['weak_pred'] )
elif option ==5: # Tanh
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ( np.tanh(ensemble['weight'])*(ensemble['strong_pred']*(1-rho) + ensemble['semi_strong_pred']*rho) +
(1-np.tanh(ensemble['weight']))*ensemble['weak_pred'] )
elif option ==6: # cutoff with Sigmoid
threshold = 0.15
s_norm = 10
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ( (1.0*(ensemble['weight']>threshold)) *
(sigmoid(ensemble['weight'], norm=s_norm)*(ensemble['strong_pred']*(1-rho) + ensemble['semi_strong_pred']*rho) +
(1-sigmoid(ensemble['weight'], norm=s_norm))*ensemble['weak_pred']) +
(1.0*ensemble['weight']<=threshold) * ensemble['weak_pred'] )
elif option ==7: # Shifted Sigmoid with slope control
target = 0.092
slope = 50
ensemble['strong_pred'] = np.array(strong_model_sub['prediction'])
ensemble['semi_strong_pred'] = np.array(semi_strong_model_sub['prediction'])
ensemble['weak_pred'] = np.array(weak_model_sub['prediction'])
ensemble['prediction'] = ( sigmoid_shift_slope(ensemble['weight'], target, slope) * (ensemble['strong_pred']*(1-rho) + ensemble['semi_strong_pred']*rho) +
(1-sigmoid_shift_slope(ensemble['weight'], target, slope)) * ensemble['weak_pred'] )
if not all(ensemble['prediction'] <= 1):
print "ERROR!"
sys.exit(0)
if not all(ensemble['prediction'] >= 0):
print "ERROR!"
sys.exit(0)
# Write final output
print "Generate submission"
if option ==4:
with open('./output/ensemblesw_ep_%i_option_%i_norm_%i.csv' %(ep, option, s_norm), 'w') as f:
f.write('id,prediction\n')
for ID, p in zip(ensemble['id'], ensemble['prediction']):
f.write('%s,%.8f\n' % (ID, p))
elif option ==6:
with open('./output/ensemblesw_ep_%i_option_%i_tshold_%i_norm_%i.csv' %(ep, option, threshold*1000, s_norm), 'w') as f:
f.write('id,prediction\n')
for ID, p in zip(ensemble['id'], ensemble['prediction']):
f.write('%s,%.8f\n' % (ID, p))
elif option ==7:
with open('./output/ensemblesw_ep_%i_option_%i_ep2_%i_param_%i_%i.csv' %(ep, option, temp_epoch, target*1000, slope), 'w') as f:
f.write('id,prediction\n')
for ID, p in zip(ensemble['id'], ensemble['prediction']):
f.write('%s,%.8f\n' % (ID, p))
else:
with open('./output/ensemblesw_ep_%i_option_%i_tshold_%i_ep2_%i.csv' %(ep, option, threshold*1000, temp_epoch), 'w') as f:
f.write('id,prediction\n')
for ID, p in zip(ensemble['id'], ensemble['prediction']):
f.write('%s,%.8f\n' % (ID, p))
| {
"content_hash": "cd2c1e42b10d6f98d1602352b9b488f6",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 166,
"avg_line_length": 45.25373134328358,
"alnum_prop": 0.5798153034300791,
"repo_name": "achm6174/kaggle-physics-tau",
"id": "fe12ad06b35d2d54c70f3169192c6e64ab3f95e8",
"size": "9096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ensemble_model/main_ensemble_1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66795"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Atom1Feed
from django.utils.html import strip_tags
from mezzanine.core.templatetags.mezzanine_tags import richtext_filters
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.generic.models import Keyword
from mezzanine.pages.models import Page
from mezzanine.conf import settings
from mezzanine.utils.html import absolute_urls
User = get_user_model()
class PostsRSS(Feed):
"""
RSS feed for all blog posts.
"""
def __init__(self, *args, **kwargs):
"""
Use the title and description of the Blog page for the feed's
title and description. If the blog page has somehow been
removed, fall back to the ``SITE_TITLE`` and ``SITE_TAGLINE``
settings.
"""
self.tag = kwargs.pop("tag", None)
self.category = kwargs.pop("category", None)
self.username = kwargs.pop("username", None)
super(PostsRSS, self).__init__(*args, **kwargs)
self._public = True
try:
page = Page.objects.published().get(slug=settings.BLOG_SLUG)
except Page.DoesNotExist:
page = None
else:
self._public = not page.login_required
if self._public:
settings.use_editable()
if page is not None:
self._title = "%s | %s" % (page.title, settings.SITE_TITLE)
self._description = strip_tags(page.description)
else:
self._title = settings.SITE_TITLE
self._description = settings.SITE_TAGLINE
def title(self):
return self._title
def description(self):
return self._description
def link(self):
return reverse("blog_post_list")
def items(self):
if not self._public:
return []
blog_posts = BlogPost.objects.published().select_related("user")
if self.tag:
tag = get_object_or_404(Keyword, slug=self.tag)
blog_posts = blog_posts.filter(keywords__keyword=tag)
if self.category:
category = get_object_or_404(BlogCategory, slug=self.category)
blog_posts = blog_posts.filter(categories=category)
if self.username:
author = get_object_or_404(User, username=self.username)
blog_posts = blog_posts.filter(user=author)
limit = settings.BLOG_RSS_LIMIT
if limit is not None:
blog_posts = blog_posts[:settings.BLOG_RSS_LIMIT]
return blog_posts
def item_description(self, item):
description = richtext_filters(item.content)
absolute_urls_name = "mezzanine.utils.html.absolute_urls"
if absolute_urls_name not in settings.RICHTEXT_FILTERS:
description = absolute_urls(description)
return description
def categories(self):
if not self._public:
return []
return BlogCategory.objects.all()
def item_author_name(self, item):
return item.user.get_full_name() or item.user.username
def item_author_link(self, item):
username = item.user.username
return reverse("blog_post_list_author", kwargs={"username": username})
def item_pubdate(self, item):
return item.publish_date
def item_categories(self, item):
return item.categories.all()
class PostsAtom(PostsRSS):
"""
Atom feed for all blog posts.
"""
feed_type = Atom1Feed
def subtitle(self):
return self.description()
| {
"content_hash": "e1ac00d3bffc78fb8fe5f01cf17086d1",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 32.771929824561404,
"alnum_prop": 0.6354389721627409,
"repo_name": "Kniyl/mezzanine",
"id": "280330c9d8d83fb0e0090021bbca6eeb3c718903",
"size": "3736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/blog/feeds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "345"
},
{
"name": "CSS",
"bytes": "104823"
},
{
"name": "HTML",
"bytes": "88734"
},
{
"name": "JavaScript",
"bytes": "250115"
},
{
"name": "Nginx",
"bytes": "2261"
},
{
"name": "Python",
"bytes": "1173765"
}
],
"symlink_target": ""
} |
"""Certificate chain where the intermediate has a basic constraints extension
that indicates it is NOT a CA."""
import sys
sys.path += ['../..']
import gencerts
# Self-signed root certificate.
root = gencerts.create_self_signed_root_certificate('Root')
# Intermediate with incorrect basic constraints.
intermediate = gencerts.create_intermediate_certificate('Intermediate', root)
intermediate.get_extensions().set_property('basicConstraints',
'critical,CA:false')
# Target certificate.
target = gencerts.create_end_entity_certificate('Target', intermediate)
chain = [target, intermediate, root]
gencerts.write_chain(__doc__, chain, 'chain.pem')
| {
"content_hash": "e25960b4569d72b48a64bf540164e1c7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 33.04761904761905,
"alnum_prop": 0.7190201729106628,
"repo_name": "chromium/chromium",
"id": "f130ad6e63ee4f15b25074c9594f284d0d522f03",
"size": "857",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "net/data/verify_certificate_chain_unittest/intermediate-basic-constraints-ca-false/generate-chains.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
cur.executescript("""
create table person(
firstname,
lastname,
age
);
create table book(
title,
author,
published
);
insert into book(title, author, published)
values (
'Dirk Gently''s Holistic Detective Agency',
'Douglas Adams',
1987
);
""")
cur.execute("select * from book")
print cur.fetchall() | {
"content_hash": "3c8d4adc3073d63ae752dc6f874795eb",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 52,
"avg_line_length": 19.615384615384617,
"alnum_prop": 0.5215686274509804,
"repo_name": "janusnic/21v-python",
"id": "cab81861d46f9709273e75cea9e8489b067f87ec",
"size": "537",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_14/cur4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "990972"
},
{
"name": "SQLPL",
"bytes": "147"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
import os
import pytest
import django
from configurations import importer
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', 'StandaloneTest')
def pytest_configure():
importer.install()
# Setup command is only available in Django 1.7+ but is required
# to properly initialise the Django app config.
if django.VERSION[1] >= 7:
django.setup()
@pytest.fixture
def webtest_csrf_checks():
return True
@pytest.fixture(scope='function')
def webtest(request, webtest_csrf_checks, transactional_db):
"""
Provide the "app" object from WebTest as a fixture
Taken and adapted from https://gist.github.com/magopian/6673250
"""
from django_webtest import DjangoTestApp, WebTestMixin
# Patch settings on startup
wtm = WebTestMixin()
wtm.csrf_checks = webtest_csrf_checks
wtm._patch_settings()
# Unpatch settings on teardown
request.addfinalizer(wtm._unpatch_settings)
return DjangoTestApp()
@pytest.fixture
def splinter_webdriver():
return 'firefox'
| {
"content_hash": "cfbc1967a5664d94aa1ac70568c86c47",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 68,
"avg_line_length": 23.448979591836736,
"alnum_prop": 0.72236727589208,
"repo_name": "tangentlabs/django-fancypages",
"id": "05af486aa9a9bef012b869d1b2f48c95e67bfa60",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126894"
},
{
"name": "JavaScript",
"bytes": "532293"
},
{
"name": "Makefile",
"bytes": "8726"
},
{
"name": "Python",
"bytes": "635566"
},
{
"name": "Shell",
"bytes": "7077"
}
],
"symlink_target": ""
} |
import mock
from sahara import exceptions as ex
from sahara.plugins import base as pb
from sahara.plugins.cdh.v5_9_0 import edp_engine
from sahara.plugins import exceptions as pl_ex
from sahara.tests.unit import base as sahara_base
from sahara.tests.unit.plugins.cdh import utils as c_u
from sahara.utils import edp
def get_cluster(version='5.9.0'):
cluster = c_u.get_fake_cluster(plugin_name='CDH', hadoop_version=version)
return cluster
class EdpEngineTestV590(sahara_base.SaharaTestCase):
def setUp(self):
super(EdpEngineTestV590, self).setUp()
pb.setup_plugins()
def test_get_hdfs_user(self):
eng = edp_engine.EdpOozieEngine(get_cluster())
self.assertEqual('hdfs', eng.get_hdfs_user())
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop2')
def test_create_hdfs_dir(self, create_dir_hadoop2):
eng = edp_engine.EdpOozieEngine(get_cluster())
remote = mock.Mock()
dir_name = mock.Mock()
eng.create_hdfs_dir(remote, dir_name)
create_dir_hadoop2.assert_called_once_with(remote,
dir_name,
eng.get_hdfs_user())
def test_get_oozie_server_uri(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
uri = eng.get_oozie_server_uri(cluster)
self.assertEqual("http://1.2.3.5:11000/oozie", uri)
def test_get_name_node_uri(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
uri = eng.get_name_node_uri(cluster)
self.assertEqual("hdfs://master_inst.novalocal:8020", uri)
# has HDFS_JOURNALNODE
cluster = get_cluster()
jns_node_group = mock.MagicMock()
jns_node_group.node_processes = ['HDFS_JOURNALNODE']
jns_node_group.instances = [mock.Mock()]
list.append(cluster.node_groups, jns_node_group)
uri = eng.get_name_node_uri(cluster)
self.assertEqual("hdfs://nameservice01", uri)
def test_get_resource_manager_uri(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
uri = eng.get_resource_manager_uri(cluster)
self.assertEqual("master_inst.novalocal:8032", uri)
def test_get_oozie_server(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
actual = eng.get_oozie_server(cluster)
expect = cluster.node_groups[1].instances[0]
self.assertEqual(expect, actual)
@mock.patch('sahara.service.edp.oozie.engine.'
'OozieJobEngine.validate_job_execution')
def test_validate_job_execution(self, c):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
eng.validate_job_execution(cluster, mock.Mock(), mock.Mock())
# more than one oozie server
dict.__setitem__(cluster.node_groups[1], 'count', 2)
self.assertRaises(pl_ex.InvalidComponentCountException,
eng.validate_job_execution, cluster,
mock.Mock(), mock.Mock())
@mock.patch(
'sahara.plugins.cdh.confighints_helper.get_possible_hive_config_from',
return_value={})
def test_get_possible_job_config_hive(self,
get_possible_hive_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_HIVE)
get_possible_hive_config_from.assert_called_once_with(
'plugins/cdh/v5_9_0/resources/hive-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.cdh.v5_9_0.edp_engine.EdpOozieEngine')
def test_get_possible_job_config_java(self, BaseCDHEdpOozieEngine):
expected_config = {'job_config': {}}
BaseCDHEdpOozieEngine.get_possible_job_config.return_value = (
expected_config)
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_JAVA)
BaseCDHEdpOozieEngine.get_possible_job_config.assert_called_once_with(
edp.JOB_TYPE_JAVA)
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.cdh.confighints_helper.'
'get_possible_mapreduce_config_from',
return_value={})
def test_get_possible_job_config_mapreduce(
self, get_possible_mapreduce_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_MAPREDUCE)
get_possible_mapreduce_config_from.assert_called_once_with(
'plugins/cdh/v5_9_0/resources/mapred-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.cdh.confighints_helper.'
'get_possible_mapreduce_config_from',
return_value={})
def test_get_possible_job_config_mapreduce_streaming(
self, get_possible_mapreduce_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_MAPREDUCE_STREAMING)
get_possible_mapreduce_config_from.assert_called_once_with(
'plugins/cdh/v5_9_0/resources/mapred-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.cdh.confighints_helper.get_possible_pig_config_from',
return_value={})
def test_get_possible_job_config_pig(self,
get_possible_pig_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_PIG)
get_possible_pig_config_from.assert_called_once_with(
'plugins/cdh/v5_9_0/resources/mapred-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.cdh.v5_9_0.edp_engine.EdpOozieEngine')
def test_get_possible_job_config_shell(self, BaseCDHEdpOozieEngine):
expected_config = {'job_config': {}}
BaseCDHEdpOozieEngine.get_possible_job_config.return_value = (
expected_config)
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_SHELL)
BaseCDHEdpOozieEngine.get_possible_job_config.assert_called_once_with(
edp.JOB_TYPE_SHELL)
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.utils.get_config_value_or_default')
@mock.patch('sahara.plugins.utils.get_instance')
@mock.patch('sahara.service.edp.spark.engine.'
'SparkJobEngine.validate_job_execution')
def test_spark_engine_validate_job_execution(self,
validate_job_execution,
get_instance,
get_config_value_or_default):
# version unsupported
cluster = get_cluster(version='5.4.0')
eng = edp_engine.EdpSparkEngine(cluster)
self.assertRaises(ex.InvalidDataException,
eng.validate_job_execution, cluster,
mock.Mock(), mock.Mock())
# none yarn history server
cluster = get_cluster()
eng = edp_engine.EdpSparkEngine(cluster)
self.assertRaises(pl_ex.InvalidComponentCountException,
eng.validate_job_execution, cluster,
mock.Mock(), mock.Mock())
# valid
cluster = get_cluster()
yarn_history_node_group = mock.Mock()
yarn_history_node_group.node_processes = ['SPARK_YARN_HISTORY_SERVER']
yarn_history_node_group.count = 1
list.append(cluster.node_groups, yarn_history_node_group)
eng = edp_engine.EdpSparkEngine(cluster)
eng.validate_job_execution(cluster, mock.Mock(), mock.Mock())
| {
"content_hash": "dc8b080e938a8b931efe725d2f32f6e1",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 78,
"avg_line_length": 43.6972972972973,
"alnum_prop": 0.6262988619495299,
"repo_name": "egafford/sahara",
"id": "de0f6139f188d4b3637e5f6e9db7630e63562c05",
"size": "8671",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sahara/tests/unit/plugins/cdh/v5_9_0/test_edp_engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "3332337"
},
{
"name": "Shell",
"bytes": "52759"
}
],
"symlink_target": ""
} |
import os
import datetime
import logging
from django.db import models
from django.db.models import Q
from django.utils import timezone
from pysearpc import SearpcError
from seaserv import seafile_api
from seahub.auth.signals import user_logged_in
from seahub.utils import calc_file_path_hash, within_time_range, \
normalize_file_path, normalize_dir_path
from seahub.utils.timeutils import datetime_to_isoformat_timestr
from seahub.tags.models import FileUUIDMap
from .fields import LowerCaseCharField
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TimestampedModel(models.Model):
# A timestamp representing when this object was created.
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
# A timestamp reprensenting when this object was last updated.
updated_at = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
abstract = True
# By default, any model that inherits from `TimestampedModel` should
# be ordered in reverse-chronological order. We can override this on a
# per-model basis as needed, but reverse-chronological is a good
# default ordering for most models.
ordering = ['-created_at', '-updated_at']
class FileCommentManager(models.Manager):
def add(self, repo_id, parent_path, item_name, author, comment, detail=''):
fileuuidmap = FileUUIDMap.objects.get_or_create_fileuuidmap(repo_id,
parent_path,
item_name,
False)
c = self.model(uuid=fileuuidmap, author=author, comment=comment, detail=detail)
c.save(using=self._db)
return c
def add_by_file_path(self, repo_id, file_path, author, comment, detail=''):
file_path = self.model.normalize_path(file_path)
parent_path = os.path.dirname(file_path)
item_name = os.path.basename(file_path)
return self.add(repo_id, parent_path, item_name, author, comment, detail)
def get_by_file_path(self, repo_id, file_path):
parent_path = os.path.dirname(file_path)
item_name = os.path.basename(file_path)
uuid = FileUUIDMap.objects.get_fileuuidmap_by_path(repo_id, parent_path,
item_name, False)
objs = super(FileCommentManager, self).filter(
uuid=uuid)
return objs
def get_by_parent_path(self, repo_id, parent_path):
uuids = FileUUIDMap.objects.get_fileuuidmaps_by_parent_path(repo_id,
parent_path)
objs = super(FileCommentManager, self).filter(uuid__in=uuids)
return objs
class FileComment(models.Model):
"""
Model used to record file comments.
"""
uuid = models.ForeignKey(FileUUIDMap, on_delete=models.CASCADE)
author = LowerCaseCharField(max_length=255, db_index=True)
comment = models.TextField()
created_at = models.DateTimeField(default=timezone.now)
updated_at = models.DateTimeField(default=timezone.now)
resolved = models.BooleanField(default=False, db_index=True)
detail = models.TextField(default='')
objects = FileCommentManager()
@classmethod
def normalize_path(self, path):
return path.rstrip('/') if path != '/' else '/'
def to_dict(self):
o = self
return {
'id': o.pk,
'repo_id': o.uuid.repo_id,
'parent_path': o.uuid.parent_path,
'item_name': o.uuid.filename,
'comment': o.comment,
'created_at': datetime_to_isoformat_timestr(o.created_at),
'resolved': o.resolved,
'detail': o.detail,
}
########## starred files
class StarredFile(object):
def format_path(self):
if self.path == "/":
return self.path
# strip leading slash
path = self.path[1:]
if path[-1:] == '/':
path = path[:-1]
return path.replace('/', ' / ')
def __init__(self, org_id, repo, file_id, path, is_dir, size):
# always 0 for non-org repo
self.org_id = org_id
self.repo = repo
self.file_id = file_id
self.path = path
self.formatted_path = self.format_path()
self.is_dir = is_dir
self.size = size
self.last_modified = None
if not is_dir:
self.name = path.split('/')[-1]
class UserStarredFilesManager(models.Manager):
def get_starred_repos_by_user(self, email):
starred_repos = UserStarredFiles.objects.filter(email=email, path='/')
return starred_repos
def get_starred_item(self, email, repo_id, path):
path_list = [normalize_file_path(path), normalize_dir_path(path)]
starred_items = UserStarredFiles.objects.filter(email=email,
repo_id=repo_id).filter(Q(path__in=path_list))
return starred_items[0] if len(starred_items) > 0 else None
def add_starred_item(self, email, repo_id, path, is_dir, org_id=-1):
starred_item = UserStarredFiles.objects.create(email=email,
repo_id=repo_id, path=path, is_dir=is_dir, org_id=org_id)
return starred_item
def delete_starred_item(self, email, repo_id, path):
path_list = [normalize_file_path(path), normalize_dir_path(path)]
starred_items = UserStarredFiles.objects.filter(email=email,
repo_id=repo_id).filter(Q(path__in=path_list))
for item in starred_items:
item.delete()
def get_starred_files_by_username(self, username):
"""Get a user's starred files.
Arguments:
- `self`:
- `username`:
"""
starred_files = super(UserStarredFilesManager, self).filter(
email=username, is_dir=False, org_id=-1)
ret = []
repo_cache = {}
for sfile in starred_files:
# repo still exists?
if sfile.repo_id in repo_cache:
repo = repo_cache[sfile.repo_id]
else:
try:
repo = seafile_api.get_repo(sfile.repo_id)
except SearpcError:
continue
if repo is not None:
repo_cache[sfile.repo_id] = repo
else:
sfile.delete()
continue
# file still exists?
file_id = ''
# size = -1
if sfile.path != "/":
try:
file_id = seafile_api.get_file_id_by_path(sfile.repo_id,
sfile.path)
# size = seafile_api.get_file_size(file_id)
except SearpcError:
continue
if not file_id:
sfile.delete()
continue
f = StarredFile(sfile.org_id, repo, file_id, sfile.path,
sfile.is_dir, 0) # TODO: remove ``size`` from StarredFile
ret.append(f)
'''Calculate files last modification time'''
for sfile in ret:
if sfile.is_dir:
continue
try:
# get real path for sub repo
real_path = sfile.repo.origin_path + sfile.path if sfile.repo.origin_path else sfile.path
dirent = seafile_api.get_dirent_by_path(sfile.repo.store_id,
real_path)
if dirent:
sfile.last_modified = dirent.mtime
else:
sfile.last_modified = 0
except SearpcError as e:
logger.error(e)
sfile.last_modified = 0
ret.sort(key=lambda x: x.last_modified, reverse=True)
return ret
class UserStarredFiles(models.Model):
"""Starred files are marked by users to get quick access to it on user
home page.
"""
email = models.EmailField(db_index=True)
org_id = models.IntegerField()
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
is_dir = models.BooleanField()
objects = UserStarredFilesManager()
########## misc
class UserLastLoginManager(models.Manager):
def get_by_username(self, username):
"""Return last login record for a user, delete duplicates if there are
duplicated records.
"""
try:
return self.get(username=username)
except UserLastLogin.DoesNotExist:
return None
except UserLastLogin.MultipleObjectsReturned:
dups = self.filter(username=username)
ret = dups[0]
for dup in dups[1:]:
dup.delete()
logger.warn('Delete duplicate user last login record: %s' % username)
return ret
class UserLastLogin(models.Model):
username = models.CharField(max_length=255, db_index=True)
last_login = models.DateTimeField(default=timezone.now)
objects = UserLastLoginManager()
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user_last_login = UserLastLogin.objects.get_by_username(user.username)
if user_last_login is None:
user_last_login = UserLastLogin(username=user.username)
user_last_login.last_login = timezone.now()
user_last_login.save()
user_logged_in.connect(update_last_login)
class CommandsLastCheck(models.Model):
"""Record last check time for Django/custom commands.
"""
command_type = models.CharField(max_length=100)
last_check = models.DateTimeField()
class DeviceToken(models.Model):
"""
The iOS device token model.
"""
token = models.CharField(max_length=80)
user = LowerCaseCharField(max_length=255)
platform = LowerCaseCharField(max_length=32)
version = LowerCaseCharField(max_length=16)
pversion = LowerCaseCharField(max_length=16)
class Meta:
unique_together = (("token", "user"),)
def __unicode__(self):
return "/".join(self.user, self.token)
_CLIENT_LOGIN_TOKEN_EXPIRATION_SECONDS = 30
class ClientLoginTokenManager(models.Manager):
def get_username(self, tokenstr):
try:
token = super(ClientLoginTokenManager, self).get(token=tokenstr)
except ClientLoginToken.DoesNotExist:
return None
username = token.username
token.delete()
if not within_time_range(token.timestamp, timezone.now(),
_CLIENT_LOGIN_TOKEN_EXPIRATION_SECONDS):
return None
return username
class ClientLoginToken(models.Model):
# TODO: update sql/mysql.sql and sql/sqlite3.sql
token = models.CharField(max_length=32, primary_key=True)
username = models.CharField(max_length=255, db_index=True)
timestamp = models.DateTimeField(default=timezone.now)
objects = ClientLoginTokenManager()
def __unicode__(self):
return "/".join(self.username, self.token)
class RepoSecretKeyManager(models.Manager):
def get_secret_key(self, repo_id):
try:
repo_secret_key = self.get(repo_id=repo_id)
except RepoSecretKey.DoesNotExist:
return None
return repo_secret_key.secret_key
def add_secret_key(self, repo_id, secret_key):
repo_secret_key = self.model(repo_id=repo_id, secret_key=secret_key)
repo_secret_key.save(using=self._db)
return repo_secret_key
class RepoSecretKey(models.Model):
"""
"""
repo_id = models.CharField(unique=True, max_length=36, db_index=True)
secret_key = models.CharField(max_length=44)
objects = RepoSecretKeyManager()
| {
"content_hash": "1495edf6002b43273ee3971ca799097e",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 105,
"avg_line_length": 34.17142857142857,
"alnum_prop": 0.5929765886287626,
"repo_name": "miurahr/seahub",
"id": "068d87bd1c2a1d4f0a3d1c6a6018fc13eec7033a",
"size": "11999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seahub/base/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231001"
},
{
"name": "HTML",
"bytes": "750509"
},
{
"name": "JavaScript",
"bytes": "2430915"
},
{
"name": "Python",
"bytes": "1500021"
},
{
"name": "Shell",
"bytes": "8856"
}
],
"symlink_target": ""
} |
import logging
from neutronclient.neutron import v2_0 as neutronv20
class ListVPNService(neutronv20.ListCommand):
"""List VPNService configurations that belong to a given tenant."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.ListVPNService')
list_columns = [
'id', 'name', 'router_id', 'status'
]
_formatters = {}
pagination_support = True
sorting_support = True
class ShowVPNService(neutronv20.ShowCommand):
"""Show information of a given VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.ShowVPNService')
class CreateVPNService(neutronv20.CreateCommand):
"""Create a VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.CreateVPNService')
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help='set admin state up to false')
parser.add_argument(
'--name',
help='Set a name for the vpnservice')
parser.add_argument(
'--description',
help='Set a description for the vpnservice')
parser.add_argument(
'router', metavar='ROUTER',
help='Router unique identifier for the vpnservice')
parser.add_argument(
'subnet', metavar='SUBNET',
help='Subnet unique identifier for the vpnservice deployment')
def args2body(self, parsed_args):
_subnet_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'subnet',
parsed_args.subnet)
_router_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'router',
parsed_args.router)
body = {self.resource: {'subnet_id': _subnet_id,
'router_id': _router_id,
'admin_state_up': parsed_args.admin_state}, }
neutronv20.update_dict(parsed_args, body[self.resource],
['name', 'description',
'tenant_id'])
return body
class UpdateVPNService(neutronv20.UpdateCommand):
"""Update a given VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.UpdateVPNService')
class DeleteVPNService(neutronv20.DeleteCommand):
"""Delete a given VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.DeleteVPNService')
| {
"content_hash": "7559da3a86f49c723e4900c28ea3f611",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 32.43589743589744,
"alnum_prop": 0.5972332015810277,
"repo_name": "neumerance/deploy",
"id": "0f9e92b0d91a6206a55db4d0ceba78e9ad4f0f2d",
"size": "3298",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/neutronclient/neutron/v2_0/vpn/vpnservice.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core import exceptions
from django.utils.importlib import import_module
CLASS_PATH_ERROR = '''django-shop is unable to interpret settings value for %s. %s should ' \
'be in ther form of a tuple: (\'path.to.models.Class\',
\'app_label\').'''
def load_class(class_path, setting_name=None):
"""
Loads a class given a class_path. The setting value may be a string or a tuple.
The setting_name parameter is only there for pretty error output, and
therefore is optional
"""
if isinstance(class_path, basestring):
pass
else:
try:
class_path, app_label = class_path
except:
if setting_name:
raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % (setting_name, setting_name))
else:
raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % ("this setting", "It"))
try:
class_module, class_name = class_path.rsplit('.', 1)
except ValueError:
if setting_name:
txt = '%s isn\'t a valid module. Check your %s setting' % (
class_path, setting_name)
else:
txt = '%s isn\'t a valid module.' % class_path
raise exceptions.ImproperlyConfigured(txt)
try:
mod = import_module(class_module)
except ImportError, e:
if setting_name:
txt = 'Error importing backend %s: "%s". Check your %s setting' % (
class_module, e, setting_name)
else:
txt = 'Error importing backend %s: "%s".' % (class_module, e)
raise exceptions.ImproperlyConfigured(txt)
try:
clazz = getattr(mod, class_name)
except AttributeError:
if setting_name:
txt = ('Backend module "%s" does not define a "%s" class. Check'
' your %s setting' % (class_module, class_name,
setting_name))
else:
txt = 'Backend module "%s" does not define a "%s" class.' % (
class_module, class_name)
raise exceptions.ImproperlyConfigured(txt)
return clazz
def get_model_string(model_name):
"""
Returns the model string notation Django uses for lazily loaded ForeignKeys
(eg 'auth.User') to prevent circular imports.
This is needed to allow our crazy custom model usage.
"""
setting_name = 'SHOP_%s_MODEL' % model_name.upper().replace('_', '')
class_path = getattr(settings, setting_name, None)
if not class_path:
return 'shop.%s' % model_name
elif isinstance(class_path, basestring):
parts = class_path.split('.')
try:
index = parts.index('models') - 1
except ValueError, e:
raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % (setting_name, setting_name))
app_label, model_name = parts[index], parts[-1]
else:
try:
class_path, app_label = class_path
model_name = class_path.split('.')[-1]
except:
raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % (setting_name, setting_name))
return "%s.%s" % (app_label, model_name)
| {
"content_hash": "06fc3608ec323800cef0f21de25e3693",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 102,
"avg_line_length": 37.05747126436781,
"alnum_prop": 0.5911910669975186,
"repo_name": "airtonix/django-shop",
"id": "3f6043792d0d19507c62521056a6e9345b750ea6",
"size": "3247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/util/loader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1122"
},
{
"name": "Python",
"bytes": "315306"
},
{
"name": "Shell",
"bytes": "5030"
}
],
"symlink_target": ""
} |
class SkeletonProvider(object):
"""
A skeleton of a metadata provider.
"""
def __init__(self, *args):
"""
Initialize the metadata provider.
Parameters
----------
args:
Arguments given in the config file.
"""
def formats(self):
"""
List the available metadata formats.
Return
------
dict from unicode to (unicode, unicode):
Mapping from metadata prefixes to (namespace, schema location)
tuples.
"""
# NOTE: The OAI DC format is required by the OAI-PMH specification.
return {
u'oai_dc': (u'http://www.openarchives.org/OAI/2.0/oai_dc/',
u'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'),
}
def identifiers(self):
"""
List all identifiers.
Return
------
iterable of unicode:
OAI identifiers of all items
"""
return [u'oai:example.org:123']
def has_changed(self, identifier, since):
"""
Check wheter the given item has been modified.
Parameters
----------
identifier: unicode
The OAI identifier (as returned by identifiers()) of the item.
since: datetime.datetime
Ignore modifications before this date/time.
Return
------
bool:
`True`, if metadata or sets of the item have change since the
given time. Otherwise `False`.
"""
return False
def get_sets(self, identifier):
"""
List sets of an item.
Parameters
----------
identifier: unicode
The OAI identifier (as returned by identifiers()) of the item.
Return
------
iterable of (unicode, unicode):
(set spec, set name) pairs for all sets which contain the given
item. In case of hierarchical sets, return all sets in the
hierarchy (e.g. if the result contains the set `a:b:c`, sets
`a:b` and `a` must also be included).
"""
return [(u'example', u'Example Set'),
(u'example:example', u'Example Subset')]
def get_record(self, identifier, metadata_prefix):
"""
Fetch the metadata of an item.
Parameters
----------
identifier: unicode
The OAI identifier (as returned by identifiers()) of the item.
metadata_prefix: unicode
The metadata prefix (as returned by formats()) of the format.
Return
------
str or NoneType:
An XML fragment containing the metadata of the item in the
given format. If the format is not available for the item
return `None`.
Raises
------
Exception:
If converting or reading the metadata fails.
"""
return '''
<oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>Example Record</dc:title>
</oai_dc:dc>
'''
| {
"content_hash": "c14a33e32785042e53552add4cf32cc0",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 91,
"avg_line_length": 31.663716814159294,
"alnum_prop": 0.5036333147009503,
"repo_name": "Tietoarkisto/kuha",
"id": "aa9bddaaf0d453037095648158976b8b8f7aaaf1",
"size": "3578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kuha/importer/skeleton_provider.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "179813"
}
],
"symlink_target": ""
} |
import time
# third party related imports
from sqlalchemy import Column, Integer, String
# local library imports
from mobile_push.models.base import Base
class ApnsToken(Base):
__tablename__ = 'apns_tokens'
token = Column(String, primary_key=True)
application_arn = Column(String, primary_key=True)
endpoint_arn = Column(String, nullable=False)
user_data = Column(String, nullable=False, default='')
created_at = Column(Integer, default=lambda: int(time.time()))
| {
"content_hash": "d528c04edf0821ed1665721839f3d352",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 27.38888888888889,
"alnum_prop": 0.7221095334685599,
"repo_name": "theKono/mobile-push",
"id": "316b786a0801b89e7885f3ae96c56d7d77bf869e",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mobile_push/models/apns_token.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46921"
},
{
"name": "Shell",
"bytes": "2890"
}
],
"symlink_target": ""
} |
from bot import OgameBot
class LoggerBot(OgameBot):
"""Logging functions for the bot"""
def log_planets(self):
"""Log planets info"""
planets = self.planets
self.logger.info("Logging planets")
for planet in planets:
self.logger.info(planet)
def log_defenses(self):
"""Log defenses data"""
planets = self.planets
self.logger.info("Logging defenses")
for planet in planets:
self.logger.info(self.defense_client.get_defenses(planet))
def log_ships(self):
"""Log ships info"""
planets = self.planets
for planet in planets:
ships = self.hangar_client.get_ships(planet)
self.logger.info("Logging ships for planet %s:" % planet.name)
for ship in ships:
self.logger.info(ship)
def log_overview(self):
"""Log planets overview"""
planets = self.general_client.get_planets_overview()
for planet in planets:
self.logger.info("Planet %s:", planet)
self.logger.info("Resources: [%s]", planet.resources)
self.logger.info("Defenses: %s", planet.defenses)
self.logger.info("Fleet: %s", planet.fleet)
def log_planets_in_same_system(self):
"""Log planets on same system"""
planets = self.get_planets_in_same_ss()
for planet in planets:
self.logger.info(planet)
def log_nearest_planets(self):
planets = self.get_nearest_planets(nr_range=15)
for planet in planets:
self.logger.info(planet)
def log_nearest_inactive_planets(self):
planets = self.get_nearest_inactive_planets(nr_range=15)
for planet in planets:
self.logger.info(planet)
def log_spy_reports(self):
spy_reports = self.get_spy_reports()
for spy_report in spy_reports:
self.logger.info("Date:%s - %s" % (spy_report.report_datetime, spy_report))
def log_game_datetime(self):
datetime = self.general_client.get_game_datetime()
self.logger.info(datetime)
def log_fleet_movement(self):
movements = self.movement_client.get_fleet_movement()
for movement in movements:
self.logger.info(movement)
def log_fleet_slot_usage(self):
slot_usage = self.fleet_client.get_fleet_slots_usage()
self.logger.info(slot_usage)
| {
"content_hash": "2a67cfbc3b3262a99af9be85c847f795",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 87,
"avg_line_length": 33.21917808219178,
"alnum_prop": 0.6074226804123711,
"repo_name": "winiciuscota/OG-Bot",
"id": "017abed6153397b1be6db8ad03c20482e3ef978f",
"size": "2425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ogbot/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119264"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
import unittest
from dnc.memory import Memory
def random_softmax(shape, axis):
rand = np.random.uniform(0, 1, shape).astype(np.float32)
return np.exp(rand) / np.sum(np.exp(rand), axis=axis, keepdims=True)
class DNCMemoryTests(unittest.TestCase):
def test_construction(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
session.run(tf.initialize_all_variables())
self.assertEqual(mem.words_num, 4)
self.assertEqual(mem.word_size, 5)
self.assertEqual(mem.read_heads, 2)
self.assertEqual(mem.batch_size, 2)
def test_init_memory(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
M, u, p, L, ww, rw, r = session.run(mem.init_memory())
self.assertEqual(M.shape, (2, 4, 5))
self.assertEqual(u.shape, (2, 4))
self.assertEqual(L.shape, (2, 4, 4))
self.assertEqual(ww.shape, (2, 4))
self.assertEqual(rw.shape, (2, 4, 2))
self.assertEqual(r.shape, (2, 5, 2))
self.assertEqual(p.shape, (2, 4))
def test_lookup_weighting(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
initial_mem = np.random.uniform(0, 1, (2, 4, 5)).astype(np.float32)
keys = np.random.uniform(0, 1, (2, 5, 2)).astype(np.float32)
strengths = np.random.uniform(0, 1, (2 ,2)).astype(np.float32)
norm_mem = initial_mem / np.sqrt(np.sum(initial_mem ** 2, axis=2, keepdims=True))
norm_keys = keys/ np.sqrt(np.sum(keys ** 2, axis = 1, keepdims=True))
sim = np.matmul(norm_mem, norm_keys)
sim = sim * strengths[:, np.newaxis, :]
predicted_wieghts = np.exp(sim) / np.sum(np.exp(sim), axis=1, keepdims=True)
memory_matrix = tf.convert_to_tensor(initial_mem)
op = mem.get_lookup_weighting(memory_matrix, keys, strengths)
c = session.run(op)
self.assertEqual(c.shape, (2, 4, 2))
self.assertTrue(np.allclose(c, predicted_wieghts))
def test_update_usage_vector(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
free_gates = np.random.uniform(0, 1, (2, 2)).astype(np.float32)
init_read_weightings = random_softmax((2, 4, 2), axis=1)
init_write_weightings = random_softmax((2, 4), axis=1)
init_usage = np.random.uniform(0, 1, (2, 4)).astype(np.float32)
psi = np.product(1 - init_read_weightings * free_gates[:, np.newaxis, :], axis=2)
predicted_usage = (init_usage + init_write_weightings - init_usage * init_write_weightings) * psi
read_weightings = tf.convert_to_tensor(init_read_weightings)
write_weighting = tf.convert_to_tensor(init_write_weightings)
usage_vector = tf.convert_to_tensor(init_usage)
op = mem.update_usage_vector(usage_vector, read_weightings, write_weighting, free_gates)
u = session.run(op)
self.assertEqual(u.shape, (2, 4))
self.assertTrue(np.array_equal(u, predicted_usage))
def test_get_allocation_weighting(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
mock_usage = np.random.uniform(0.01, 1, (2, 4)).astype(np.float32)
sorted_usage = np.sort(mock_usage, axis=1)
free_list = np.argsort(mock_usage, axis=1)
predicted_weights = np.zeros((2, 4)).astype(np.float32)
for i in range(2):
for j in range(4):
product_list = [mock_usage[i, free_list[i,k]] for k in range(j)]
predicted_weights[i, free_list[i,j]] = (1 - mock_usage[i, free_list[i, j]]) * np.product(product_list)
op = mem.get_allocation_weighting(sorted_usage, free_list)
a = session.run(op)
self.assertEqual(a.shape, (2, 4))
self.assertTrue(np.allclose(a, predicted_weights))
def test_updated_write_weighting(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
write_gate = np.random.uniform(0, 1, (2,1)).astype(np.float32)
allocation_gate = np.random.uniform(0, 1, (2,1)).astype(np.float32)
lookup_weighting = random_softmax((2, 4, 1), axis=1)
allocation_weighting = random_softmax((2, 4), axis=1)
predicted_weights = write_gate * (allocation_gate * allocation_weighting + (1 - allocation_gate) * np.squeeze(lookup_weighting))
op = mem.update_write_weighting(lookup_weighting, allocation_weighting, write_gate, allocation_gate)
w_w = session.run(op)
self.assertEqual(w_w.shape, (2,4))
self.assertTrue(np.allclose(w_w, predicted_weights))
def test_update_memory(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
write_weighting = random_softmax((2, 4), axis=1)
write_vector = np.random.uniform(0, 1, (2, 5)).astype(np.float32)
erase_vector = np.random.uniform(0, 1, (2, 5)).astype(np.float32)
memory_matrix = np.random.uniform(-1, 1, (2, 4, 5)).astype(np.float32)
ww = write_weighting[:, :, np.newaxis]
v, e = write_vector[:, np.newaxis, :], erase_vector[:, np.newaxis, :]
predicted = memory_matrix * (1 - np.matmul(ww, e)) + np.matmul(ww, v)
memory_matrix = tf.convert_to_tensor(memory_matrix)
op = mem.update_memory(memory_matrix, write_weighting, write_vector, erase_vector)
M = session.run(op)
self.assertEqual(M.shape, (2, 4, 5))
self.assertTrue(np.allclose(M, predicted))
def test_update_precedence_vector(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
write_weighting = random_softmax((2, 4), axis=1)
initial_precedence = random_softmax((2, 4), axis=1)
predicted = (1 - write_weighting.sum(axis=1, keepdims=True)) * initial_precedence + write_weighting
precedence_vector = tf.convert_to_tensor(initial_precedence)
op = mem.update_precedence_vector(precedence_vector, write_weighting)
p = session.run(op)
self.assertEqual(p.shape, (2,4))
self.assertTrue(np.allclose(p, predicted))
def test_update_link_matrix(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
_write_weighting = random_softmax((2, 4), axis=1)
_precedence_vector = random_softmax((2, 4), axis=1)
initial_link = np.random.uniform(0, 1, (2, 4, 4)).astype(np.float32)
np.fill_diagonal(initial_link[0,:], 0)
np.fill_diagonal(initial_link[1,:], 0)
# calculate the updated link iteratively as in paper
# to check the correcteness of the vectorized implementation
predicted = np.zeros((2,4,4), dtype=np.float32)
for i in range(4):
for j in range(4):
if i != j:
reset_factor = (1 - _write_weighting[:,i] - _write_weighting[:,j])
predicted[:, i, j] = reset_factor * initial_link[:, i , j] + _write_weighting[:, i] * _precedence_vector[:, j]
link_matrix = tf.convert_to_tensor(initial_link)
precedence_vector = tf.convert_to_tensor(_precedence_vector)
write_weighting = tf.constant(_write_weighting)
op = mem.update_link_matrix(precedence_vector, link_matrix, write_weighting)
L = session.run(op)
self.assertTrue(np.allclose(L, predicted))
def test_get_directional_weightings(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
_link_matrix = np.random.uniform(0, 1, (2, 4, 4)).astype(np.float32)
_read_weightings = random_softmax((2, 4, 2), axis=1)
predicted_forward = np.matmul(_link_matrix, _read_weightings)
predicted_backward = np.matmul(np.transpose(_link_matrix, [0, 2, 1]), _read_weightings)
read_weightings = tf.convert_to_tensor(_read_weightings)
fop, bop = mem.get_directional_weightings(read_weightings, _link_matrix)
forward_weighting, backward_weighting = session.run([fop, bop])
self.assertTrue(np.allclose(forward_weighting, predicted_forward))
self.assertTrue(np.allclose(backward_weighting, predicted_backward))
def test_update_read_weightings(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
mem = Memory(4, 5, 2, 2)
lookup_weightings = random_softmax((2, 4, 2), axis=1)
forward_weighting = random_softmax((2, 4, 2), axis=1)
backward_weighting = random_softmax((2, 4, 2), axis=1)
read_mode = random_softmax((2, 3, 2), axis=1)
predicted_weights = np.zeros((2, 4, 2)).astype(np.float32)
# calculate the predicted weights using iterative method from paper
# to check the correcteness of the vectorized implementation
for i in range(2):
predicted_weights[:, :, i] = read_mode[:, 0,i, np.newaxis] * backward_weighting[:, :, i] + read_mode[:, 1, i, np.newaxis] * lookup_weightings[:, :, i] + read_mode[:, 2, i, np.newaxis] * forward_weighting[:, :, i]
op = mem.update_read_weightings(lookup_weightings, forward_weighting, backward_weighting, read_mode)
session.run(tf.initialize_all_variables())
w_r = session.run(op)
#updated_read_weightings = session.run(mem.read_weightings.value())
self.assertTrue(np.allclose(w_r, predicted_weights))
#self.assertTrue(np.allclose(updated_read_weightings, predicted_weights))
def test_update_read_vectors(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph = graph) as session:
mem = Memory(4, 5, 2, 4)
memory_matrix = np.random.uniform(-1, 1, (4, 4, 5)).astype(np.float32)
read_weightings = random_softmax((4, 4, 2), axis=1)
predicted = np.matmul(np.transpose(memory_matrix, [0, 2, 1]), read_weightings)
op = mem.update_read_vectors(memory_matrix, read_weightings)
session.run(tf.initialize_all_variables())
r = session.run(op)
#updated_read_vectors = session.run(mem.read_vectors.value())
self.assertTrue(np.allclose(r, predicted))
#self.assertTrue(np.allclose(updated_read_vectors, predicted))
def test_write(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph = graph) as session:
mem = Memory(4, 5, 2, 1)
M, u, p, L, ww, rw, r = session.run(mem.init_memory())
key = np.random.uniform(0, 1, (1, 5, 1)).astype(np.float32)
strength = np.random.uniform(0, 1, (1, 1)).astype(np.float32)
free_gates = np.random.uniform(0, 1, (1, 2)).astype(np.float32)
write_gate = np.random.uniform(0, 1, (1, 1)).astype(np.float32)
allocation_gate = np.random.uniform(0, 1, (1,1)).astype(np.float32)
write_vector = np.random.uniform(0, 1, (1, 5)).astype(np.float32)
erase_vector = np.zeros((1, 5)).astype(np.float32)
u_op, ww_op, M_op, L_op, p_op = mem.write(
M, u, rw, ww, p, L,
key, strength, free_gates, allocation_gate,
write_gate , write_vector, erase_vector
)
session.run(tf.initialize_all_variables())
u, ww, M, L, p = session.run([u_op, ww_op, M_op, L_op, p_op])
self.assertEqual(u.shape, (1, 4))
self.assertEqual(ww.shape, (1, 4))
self.assertEqual(M.shape, (1, 4, 5))
self.assertEqual(L.shape, (1, 4, 4))
self.assertEqual(p.shape, (1, 4))
def test_read(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph = graph) as session:
mem = Memory(4, 5, 2, 1)
M, u, p, L, ww, rw, r = session.run(mem.init_memory())
keys = np.random.uniform(0, 1, (1, 5, 2)).astype(np.float32)
strengths = np.random.uniform(0, 1, (1, 2)).astype(np.float32)
link_matrix = np.random.uniform(0, 1, (1, 4, 4)).astype(np.float32)
read_modes = random_softmax((1, 3, 2), axis=1).astype(np.float32)
memory_matrix = np.random.uniform(-1, 1, (1, 4, 5)).astype(np.float32)
wr_op, r_op = mem.read(memory_matrix, rw, keys, strengths, link_matrix, read_modes)
session.run(tf.initialize_all_variables())
wr, r = session.run([wr_op, r_op])
self.assertEqual(wr.shape, (1, 4, 2))
self.assertEqual(r.shape, (1, 5, 2))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "a2ea769674ee8d502e1566c76b61d46f",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 232,
"avg_line_length": 44.58536585365854,
"alnum_prop": 0.5420541575492341,
"repo_name": "Mostafa-Samir/DNC-tensorflow",
"id": "31705c7865dd9582ed665851005d9df552316270",
"size": "14624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit-tests/memory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "225669"
},
{
"name": "Python",
"bytes": "103063"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
"""
Contains utility functions and classes for Runners.
"""
from behave import parser
from behave.model import FileLocation
from bisect import bisect
import glob
import os.path
import re
import sys
import types
# -----------------------------------------------------------------------------
# EXCEPTIONS:
# -----------------------------------------------------------------------------
class FileNotFoundError(LookupError):
pass
class InvalidFileLocationError(LookupError):
pass
class InvalidFilenameError(ValueError):
pass
# -----------------------------------------------------------------------------
# CLASS: FileLocationParser
# -----------------------------------------------------------------------------
class FileLocationParser:
# -- pylint: disable=W0232
# W0232: 84,0:FileLocationParser: Class has no __init__ method
pattern = re.compile(r"^\s*(?P<filename>.*):(?P<line>\d+)\s*$", re.UNICODE)
@classmethod
def parse(cls, text):
match = cls.pattern.match(text)
if match:
filename = match.group("filename").strip()
line = int(match.group("line"))
return FileLocation(filename, line)
else:
# -- NORMAL PATH/FILENAME:
filename = text.strip()
return FileLocation(filename)
# @classmethod
# def compare(cls, location1, location2):
# loc1 = cls.parse(location1)
# loc2 = cls.parse(location2)
# return cmp(loc1, loc2)
# -----------------------------------------------------------------------------
# CLASSES:
# -----------------------------------------------------------------------------
class FeatureScenarioLocationCollector(object):
"""
Collects FileLocation objects for a feature.
This is used to select a subset of scenarios in a feature that should run.
USE CASE:
behave feature/foo.feature:10
behave @selected_features.txt
behave @rerun_failed_scenarios.txt
With features configuration files, like:
# -- file:rerun_failed_scenarios.txt
feature/foo.feature:10
feature/foo.feature:25
feature/bar.feature
# -- EOF
"""
def __init__(self, feature=None, location=None, filename=None):
if not filename and location:
filename = location.filename
self.feature = feature
self.filename = filename
self.use_all_scenarios = False
self.scenario_lines = set()
self.all_scenarios = set()
self.selected_scenarios = set()
if location:
self.add_location(location)
def clear(self):
self.feature = None
self.filename = None
self.use_all_scenarios = False
self.scenario_lines = set()
self.all_scenarios = set()
self.selected_scenarios = set()
def add_location(self, location):
if not self.filename:
self.filename = location.filename
# if self.feature and False:
# self.filename = self.feature.filename
# -- NORMAL CASE:
assert self.filename == location.filename, \
"%s <=> %s" % (self.filename, location.filename)
if location.line:
self.scenario_lines.add(location.line)
else:
# -- LOCATION WITHOUT LINE NUMBER:
# Selects all scenarios in a feature.
self.use_all_scenarios = True
@staticmethod
def select_scenario_line_for(line, scenario_lines):
"""
Select scenario line for any given line.
ALGORITHM: scenario.line <= line < next_scenario.line
:param line: A line number in the file (as number).
:param scenario_lines: Sorted list of scenario lines.
:return: Scenario.line (first line) for the given line.
"""
if not scenario_lines:
return 0 # -- Select all scenarios.
pos = bisect(scenario_lines, line) - 1
if pos < 0:
pos = 0
return scenario_lines[pos]
def discover_selected_scenarios(self, strict=False):
"""
Discovers selected scenarios based on the provided file locations.
In addition:
* discover all scenarios
* auto-correct BAD LINE-NUMBERS
:param strict: If true, raises exception if file location is invalid.
:return: List of selected scenarios of this feature (as set).
:raises InvalidFileLocationError:
If file location is no exactly correct and strict is true.
"""
assert self.feature
if not self.all_scenarios:
self.all_scenarios = self.feature.walk_scenarios()
# -- STEP: Check if lines are correct.
existing_lines = [scenario.line for scenario in self.all_scenarios]
selected_lines = list(self.scenario_lines)
for line in selected_lines:
new_line = self.select_scenario_line_for(line, existing_lines)
if new_line != line:
# -- AUTO-CORRECT BAD-LINE:
self.scenario_lines.remove(line)
self.scenario_lines.add(new_line)
if strict:
msg = "Scenario location '...:%d' should be: '%s:%d'" % \
(line, self.filename, new_line)
raise InvalidFileLocationError(msg)
# -- STEP: Determine selected scenarios and store them.
scenario_lines = set(self.scenario_lines)
selected_scenarios = set()
for scenario in self.all_scenarios:
if scenario.line in scenario_lines:
selected_scenarios.add(scenario)
scenario_lines.remove(scenario.line)
# -- CHECK ALL ARE RESOLVED:
assert not scenario_lines
return selected_scenarios
def build_feature(self):
"""
Determines which scenarios in the feature are selected and marks the
remaining scenarios as skipped. Scenarios with the following tags
are excluded from skipped-marking:
* @setup
* @teardown
If no file locations are stored, the unmodified feature is returned.
:return: Feature object to use.
"""
use_all_scenarios = not self.scenario_lines or self.use_all_scenarios
if not self.feature or use_all_scenarios:
return self.feature
# -- CASE: Select subset of all scenarios of this feature.
# Mark other scenarios as skipped (except in a few cases).
self.all_scenarios = self.feature.walk_scenarios()
self.selected_scenarios = self.discover_selected_scenarios()
unselected_scenarios = set(self.all_scenarios) - self.selected_scenarios
for scenario in unselected_scenarios:
if "setup" in scenario.tags or "teardown" in scenario.tags:
continue
scenario.mark_skipped()
return self.feature
class FeatureListParser(object):
"""
Read textual file, ala '@features.txt'. This file contains:
* a feature filename or FileLocation on each line
* empty lines (skipped)
* comment lines (skipped)
* wildcards are expanded to select 0..N filenames or directories
Relative path names are evaluated relative to the listfile directory.
A leading '@' (AT) character is removed from the listfile name.
"""
@staticmethod
def parse(text, here=None):
"""
Parse contents of a features list file as text.
:param text: Contents of a features list(file).
:param here: Current working directory to use (optional).
:return: List of FileLocation objects
"""
locations = []
for line in text.splitlines():
filename = line.strip()
if not filename:
continue # SKIP: Over empty line(s).
elif filename.startswith('#'):
continue # SKIP: Over comment line(s).
if here and not os.path.isabs(filename):
filename = os.path.join(here, line)
filename = os.path.normpath(filename)
if glob.has_magic(filename):
# -- WITH WILDCARDS:
for filename2 in glob.iglob(filename):
location = FileLocationParser.parse(filename2)
locations.append(location)
else:
location = FileLocationParser.parse(filename)
locations.append(location)
return locations
@classmethod
def parse_file(cls, filename):
"""
Read textual file, ala '@features.txt'.
:param filename: Name of feature list file.
:return: List of feature file locations.
"""
if filename.startswith('@'):
filename = filename[1:]
if not os.path.isfile(filename):
raise FileNotFoundError(filename)
here = os.path.dirname(filename) or "."
contents = open(filename).read()
return cls.parse(contents, here)
# -----------------------------------------------------------------------------
# FUNCTIONS:
# -----------------------------------------------------------------------------
def parse_features(feature_files, language=None):
"""
Parse feature files and return list of Feature model objects.
Handles:
* feature file names, ala "alice.feature"
* feature file locations, ala: "alice.feature:10"
:param feature_files: List of feature file names to parse.
:param language: Default language to use.
:return: List of feature objects.
"""
scenario_collector = FeatureScenarioLocationCollector()
features = []
for location in feature_files:
if not isinstance(location, FileLocation):
assert isinstance(location, basestring)
location = FileLocation(os.path.normpath(location))
if location.filename == scenario_collector.filename:
scenario_collector.add_location(location)
continue
elif scenario_collector.feature:
# -- ADD CURRENT FEATURE: As collection of scenarios.
current_feature = scenario_collector.build_feature()
features.append(current_feature)
scenario_collector.clear()
# -- NEW FEATURE:
assert isinstance(location, FileLocation)
filename = os.path.abspath(location.filename)
feature = parser.parse_file(filename, language=language)
if feature:
# -- VALID FEATURE:
# SKIP CORNER-CASE: Feature file without any feature(s).
scenario_collector.feature = feature
scenario_collector.add_location(location)
# -- FINALLY:
if scenario_collector.feature:
current_feature = scenario_collector.build_feature()
features.append(current_feature)
return features
def collect_feature_locations(paths, strict=True):
"""
Collect feature file names by processing list of paths (from command line).
A path can be a:
* filename (ending with ".feature")
* location, ala "{filename}:{line_number}"
* features configuration filename, ala "@features.txt"
* directory, to discover and collect all "*.feature" files below.
:param paths: Paths to process.
:return: Feature file locations to use (as list of FileLocations).
"""
locations = []
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
for filename in sorted(filenames):
if filename.endswith(".feature"):
location = FileLocation(os.path.join(dirpath, filename))
locations.append(location)
elif path.startswith('@'):
# -- USE: behave @list_of_features.txt
locations.extend(FeatureListParser.parse_file(path[1:]))
else:
# -- OTHERWISE: Normal filename or location (schema: filename:line)
location = FileLocationParser.parse(path)
if not location.filename.endswith(".feature"):
raise InvalidFilenameError(location.filename)
elif location.exists():
locations.append(location)
elif strict:
raise FileNotFoundError(path)
return locations
def make_undefined_step_snippet(step, language=None):
"""
Helper function to create an undefined-step snippet for a step.
:param step: Step to use (as Step object or step text).
:param language: i18n language, optionally needed for step text parsing.
:return: Undefined-step snippet (as string).
"""
if isinstance(step, types.StringTypes):
step_text = step
steps = parser.parse_steps(step_text, language=language)
step = steps[0]
assert step, "ParseError: %s" % step_text
prefix = u""
if sys.version_info[0] == 2:
prefix = u"u"
single_quote = "'"
if single_quote in step.name:
step.name = step.name.replace(single_quote, r"\'")
schema = u"@%s(%s'%s')\ndef step_impl(context):\n assert False\n\n"
snippet = schema % (step.step_type, prefix, step.name)
return snippet
def print_undefined_step_snippets(undefined_steps, stream=None, colored=True):
"""
Print snippets for the undefined steps that were discovered.
:param undefined_steps: List of undefined steps (as list<string>).
:param stream: Output stream to use (default: sys.stderr).
:param colored: Indicates if coloring should be used (default: True)
"""
if not undefined_steps:
return
if not stream:
stream = sys.stderr
msg = u"\nYou can implement step definitions for undefined steps with "
msg += u"these snippets:\n\n"
printed = set()
for step in undefined_steps:
if step in printed:
continue
printed.add(step)
msg += make_undefined_step_snippet(step)
if colored:
# -- OOPS: Unclear if stream supports ANSI coloring.
from behave.formatter.ansi_escapes import escapes
msg = escapes['undefined'] + msg + escapes['reset']
stream.write(msg)
stream.flush()
| {
"content_hash": "741121fc493b2293aaa153b7c0d64534",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 80,
"avg_line_length": 35.88413098236776,
"alnum_prop": 0.5859890495577706,
"repo_name": "WillisXChen/django-oscar",
"id": "ee1dd6ac19d69a067a6ce76fdbb7a5a1e578c025",
"size": "14270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/behave/runner_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "78"
},
{
"name": "C",
"bytes": "5979"
},
{
"name": "C++",
"bytes": "572"
},
{
"name": "CSS",
"bytes": "694578"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "21346"
},
{
"name": "HTML",
"bytes": "708061"
},
{
"name": "JavaScript",
"bytes": "1433937"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Makefile",
"bytes": "6656"
},
{
"name": "Python",
"bytes": "47548581"
},
{
"name": "Shell",
"bytes": "6790"
},
{
"name": "Smarty",
"bytes": "21023"
},
{
"name": "TeX",
"bytes": "56837"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
} |
from flask import jsonify, make_response, Response
from flask_sqlalchemy import SQLAlchemy
from src.webservice.base import Base
db = SQLAlchemy()
Base.query = db.session.query_property()
class Output(Base):
__tablename__ = 'tbl_OutputPin'
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer())
name = db.Column(db.String(50))
parent_id = db.Column(db.Integer, db.ForeignKey('tbl_Arduino.id'))
parent = db.relationship('Device')
type = db.Column(db.Integer(), db.ForeignKey('tbl_OutputPin_Type.id'))
type_name = db.relationship('OutputPin_Type')
actions = db.relationship("Action", secondary="tbl_Action_OutputPin")
@staticmethod
def get_all_outputs():
output_pins = Output.query.outerjoin(Output.actions).all()
output = []
for outputPin in output_pins:
output.append({'id': outputPin.id, 'name': outputPin.name, 'number': outputPin.number,
'type_name': outputPin.type_name.name, 'type': outputPin.type, 'device_name': outputPin.parent.name})
db.session.commit()
return jsonify({'response': output})
@staticmethod
def update_output(request):
data = request.get_json()
output = db.session.query(Output).filter_by(id=data['id']).first()
if 'name' in data:
output.name = data['name']
if 'type' in data:
if len(output.actions) == 0:
output.type = data['type']
else:
db.session.commit()
return "error", "500 Output pin is used in actions"
db.session.commit()
return jsonify({'result': 'Output pin has been changed'})
@staticmethod
def get_outputs(outputID):
output = db.session.query(Output).filter(Output.id.in_(outputID)).all()
db.session.commit()
return output
@staticmethod
def get_menu_fields_outputs():
outputs = db.session.query(Output).all()
output = []
for row in outputs:
output.append({'id': row.id, 'name': row.name})
db.session.close()
return jsonify({'response': output})
| {
"content_hash": "05dee673c125e2071f577d261fdae482",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 127,
"avg_line_length": 35.52459016393443,
"alnum_prop": 0.6119058606368251,
"repo_name": "deklungel/iRulez",
"id": "c40a12fbd14b3fe2410ddbec936219b6d691559a",
"size": "2167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webservice/_outputPin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "11720"
},
{
"name": "CSS",
"bytes": "318238"
},
{
"name": "HTML",
"bytes": "1697"
},
{
"name": "JavaScript",
"bytes": "522119"
},
{
"name": "PHP",
"bytes": "281732"
},
{
"name": "Python",
"bytes": "1529251"
},
{
"name": "Roff",
"bytes": "111150693"
}
],
"symlink_target": ""
} |
def sing(b, end):
print(b or 'No more', 'bottle' + ('s' if b-1 else ''), end)
for i in range(99, 0, -1):
sing(i, 'of beer on the wall,')
sing(i, 'of beer,')
print "Take one down, and pass it around,"
sing(i-1, 'of beer on the wall.\n')
verse = '''\
{some} bottles of beer on the wall
{some} bottles of beer
Take one down, pass it around
{less} bottles of beer on the wall
'''
for bottles in range(99, 0, -1):
print verse.format(some=bottles, less=bottles-1)
| {
"content_hash": "2bf00e53233fa776501b87ebf23e720a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.6164948453608248,
"repo_name": "juandc/platzi-courses",
"id": "bf5e8fba181be8754e864b932be16516d158cfad",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-Django-2016/Python/Clase2/beers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56245"
},
{
"name": "HTML",
"bytes": "61675"
},
{
"name": "JavaScript",
"bytes": "15712"
},
{
"name": "Python",
"bytes": "137764"
}
],
"symlink_target": ""
} |
'''
Python3 program for extracting fpkm data from cuffdiff outputs and writing it to a file in a two dimensional matrix of values with one gene per line
and each condition clustered. Output will preserve the exact order of the input. Written by Michael Weinstein, UCLA Cohn Lab and Collaboratory, 2015
'''
def yesanswer(question): #asks the question passed in and returns True if the answer is yes, False if the answer is no, and keeps the user in a loop until one of those is given. Also useful for walking students through basic logical python functions
answer = False #initializes the answer variable to false. Not absolutely necessary, since it should be undefined at this point and test to false, but explicit is always better than implicit
while not answer: #enters the loop and stays in it until answer is equal to True
print (question + ' (Y/N)') #Asks the question contained in the argument passed into this subroutine
answer = input('>>') #sets answer equal to some value input by the user
if str(answer) == 'y' or str(answer) == 'Y': #checks if the answer is a valid yes answer
return True #sends back a value of True because of the yes answer
elif str(answer) == 'n' or str(answer) == 'N': #checks to see if the answer is a valid form of no
return False #sends back a value of False because it was not a yes answer
else: #if the answer is not a value indicating a yes or no
print ('Invalid response.')
answer = False #set ansewr to false so the loop will continue until a satisfactory answer is given
class checkArgs(object):
def __init__(self):
import argparse #loads the required library for reading the commandline
import os #imports the library we will need to check if the file exists
parser = argparse.ArgumentParser()
parser.add_argument ("-c", "--cuffDiffOutput", help = "CuffDiff Output File")
parser.add_argument ("-g", "--geneList", help = "File containing a list of the genes of interest (1 per line).")
parser.add_argument ("-9", "--clobber", help = "Ignore potential file overwrites (use with caution).", action = "store_true")
args = parser.parse_args() #puts the arguments into the args object
self.geneList = args.geneList
self.cuffDiffOutput = args.cuffDiffOutput
self.outputMatrix = self.cuffDiffOutput + ".matrix"
self.outputKey = self.cuffDiffOutput + ".key"
if not args.geneList:
print("No gene of interest list set.")
self.geneList = False
if not self.cuffDiffOutput:
quit('No CuffDiff file specified.')
if not os.path.isfile(self.cuffDiffOutput):
quit('Unable to find CuffDiffOutput file: ' + self.cuffDiffOutput)
if self.geneList and not os.path.isfile(self.geneList):
quit('Unable to find gene list file: ' + self.geneList)
if os.path.isfile(self.outputMatrix) or os.path.isfile(self.outputKey):
if args.clobber:
print('Outputs already exist. Set to overwrite in command line arguments.')
else:
if not yesanswer('Output files already exist for this input. Overwrite them?'):
quit('OK!')
def getGenesOfInterest(geneListFile):
if not geneListFile:
return False
file = open(geneListFile, 'r')
geneListLine = file.readline()
geneList = []
while(geneListLine):
geneListLine = geneListLine.strip()
geneList.append(geneListLine)
geneListLine = file.readline()
file.close()
return geneList
class CuffDiffDeDataLine(object):
def __init__(self, line): #initializes this instance of the object, takes in the line from the cuff diff file
self.line = line #initializes the line value to the raw input
self.data = line.split("\t") #initializes the data value to the split line
if self.integrityCheck(): #checks to make sure we have enough values on the line
self.generate() #runs the subroutine to generate all the attributes
else:
raise IndexError("Line: " + self.line + " has less than 9 values.") #stops the program and reports the error
def integrityCheck(self):
if len(self.data) >= 9: #if there are 9 or more values on the line
return True #returns true to indicate passing the check
else:
return False #otherwise returns False
def generate(self): #takes no arguments and is called by the initializer. Initializes all our attributes.
self.tracking_id = self.data[0] #initializes the tracking_id value with the first element of the line
self.condition = self.data[1]
self.replicate = self.data[2]
self.raw_frags = self.data[3]
self.internal_scaled_frags = self.data[4]
self.external_scaled_frags = self.data[5]
self.fpkm = self.data[6]
self.effective_length = self.data[7]
self.status = self.data[8]
def createFpkmDict(deFileName, geneList):
deFile = open(deFileName, 'r')
counter = 0
line = deFile.readline() #initialize line to the first line of the deFile
interestDict = {} #intiialize an empty dictionary for our data
orderList = [] #initializing an empty list for the order
while line: #as long as there is some value in the line (so not end of file)
print("Processed " + str(counter) + " lines.", end = "\r")
if "tracking_id" in line: #check if the literal string "tracking_id" is in the line
line = deFile.readline() #if so, we have a header line, we read the next line
counter += 1
continue #and go back to the start of the loop
currentLine = CuffDiffDeDataLine(line) #initialize the object for the line. If we have this object left over from a previous line, we reinitalize it and get rid of the old data
if geneList and (currentLine.tracking_id not in geneList): #if there is a geneList and the gene for this line is not one of the genes of interest
line = deFile.readline() #read the next line
counter += 1
continue #and start the loop over again for it
else: #if the gene is in our list of interest
if [currentLine.tracking_id,currentLine.condition] not in orderList:
orderList.append([currentLine.tracking_id,currentLine.condition])
try: #try to directly drop the fpkm value directly into the appropriate dictionary. If that fails due to not having a key for it, keep moving up a level and defining the key
interestDict[currentLine.tracking_id][currentLine.condition][int(currentLine.replicate)] = float(currentLine.fpkm)
except KeyError:
try:
interestDict[currentLine.tracking_id][currentLine.condition] = {}
interestDict[currentLine.tracking_id][currentLine.condition][int(currentLine.replicate)] = float(currentLine.fpkm)
except KeyError:
interestDict[currentLine.tracking_id] = {}
interestDict[currentLine.tracking_id][currentLine.condition] = {}
interestDict[currentLine.tracking_id][currentLine.condition][int(currentLine.replicate)] = float(currentLine.fpkm)
counter += 1
line = deFile.readline() #read the next line to avoid an infinite loop
print("Processed " + str(counter) + " lines.")
deFile.close()
return (interestDict, orderList)
def matrixOutput(matrixOutputFileName, keyOutputFileName, fpkmDict, fpkmOrder):
counter = 0
matrixOutput = open(matrixOutputFileName, 'w') #open the file we plan to write the matrix to
keyOutput = open(keyOutputFileName, 'w') #open the file we will write the key output to
i = 0 #initialize our counter
while i < len(fpkmOrder): #go over the fpkmOrder list (each line being a list of gene, condition in the order of the original file)
print("Wrote " + str(counter) + " lines.", end="\r")
counter += 1
gene = fpkmOrder[i][0] #use the orderList to get our current gene name
conditions = [] #initialize an empty list for the different conditions for the gene
conditions.append(fpkmOrder[i][1]) #add the condition for the current line of the order list to our list of conditions for the current gene
lookAhead = i + 1 #move the index ahead by one
try: #setting this try/except block in case we run off the end of the list (which will happen when we reach the last gene)
while fpkmOrder[lookAhead][0] == gene: #as long as the next line has the same gene as the gene we are working on at the moment
conditions.append(fpkmOrder[lookAhead][1]) #add the condition to the list of conditions for the current gene
lookAhead += 1 #and then move the index up one more, then repeat the loop
except IndexError: #if we hit an index error (because we ran off the end of the list and finished up)
i = len(fpkmOrder) + 1 #set the index to one more than the length of the list, ensuring we exit this loop
i = lookAhead #after getting all the genes for the condition, our lookAhead value will be pointing at the first line for the next gene. Set our index to that for the next iteration of the loop
outputList = [] #initialize an empty list for collecting data to put in each line of the matrix output
conditionCount = [] #initialize an empty list for collecting data to put in each line of the key output (indicating what each matrix line contains)
for condition in conditions: #iterate over the conditinos for the gene we are looking at
replicates = list(fpkmDict[gene][condition].keys()) #get a list of keys
replicates.sort() #ensure that the list of replicates (should be integers) is sorted in numerical order. This is probably not absolutely necessary, but makes it explicit that it will be ordered
for replicate in replicates: #iterate over our list of replicates (which we just ensured is in order)
outputList.append(str(fpkmDict[gene][condition][replicate])) #append the string of the numerical FPKM value to our list of outputs for the matrix. This will be ordered by condition, then by replicate
conditionCount.append(condition + "(" + str(len(replicates)) + ")") #append a string to our condition counts list (for our key output) that will look like condition(N) where N is the number of replicates in the matrix
matrixOutputLine = gene + "\t" + "\t".join(outputList) + "\n" #create the output line to the matrix file by joining the list of values with delimiters
keyOutputLine = gene + "\t" + "\t".join(conditionCount) + "\n" #do the same for our key output
matrixOutput.write(matrixOutputLine) #write the appropriate output line to the matrix file
keyOutput.write(keyOutputLine) #do the same for the key
print("Wrote " + str(counter) + " lines.")
matrixOutput.close()
keyOutput.close()
return True
def main():
args = checkArgs()
geneList = getGenesOfInterest(args.geneList)
fpkmData = createFpkmDict(args.cuffDiffOutput, geneList)
fpkmDict = fpkmData[0]
fpkmOrder = fpkmData[1]
matrixOutput(args.outputMatrix, args.outputKey, fpkmDict, fpkmOrder)
print('Done!')
main() | {
"content_hash": "5fcb175f259319f51460603f45dbb462",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 251,
"avg_line_length": 68.77380952380952,
"alnum_prop": 0.6734464254803532,
"repo_name": "michael-weinstein/UCLA-CPU",
"id": "64f0d56f7c1d832cf03474b530ca489896dc37c0",
"size": "11578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fpkMatrixDEG/fpkmatrixDEG.0.2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53016"
}
],
"symlink_target": ""
} |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pyopencl as cl
import pyopencl.tools
import pytest
import os
import subprocess
from test import test_common
from test.test_common import compile_code
from test.test_common import offset_type
@pytest.fixture(scope='module')
def cuSourcecode():
with open('test/testcudakernel1.cu', 'r') as f:
return f.read()
# def compile_code(cl, context, kernelSource):
# for file in os.listdir('/tmp'):
# if file.startswith('testprog'):
# os.unlink('/tmp/%s' % file)
# with open('/tmp/testprog.cu', 'w') as f:
# f.write(kernelSource)
# print(subprocess.check_output([
# 'bin/cocl',
# '-c',
# '/tmp/testprog.cu'
# ]).decode('utf-8'))
# with open('/tmp/testprog-device.cl', 'r') as f:
# cl_sourcecode = f.read()
# prog = cl.Program(context, cl_sourcecode).build()
# return prog
# @pytest.fixture(scope='module')
# def testcudakernel1_cl():
# # cl_path = 'test/generated/testcudakernel1-device.cl'
# # print(subprocess.check_output([
# # 'make',
# # cl_path
# # ]).decode('utf-8'))
# # we need to fix this to be a bit more robust to makefile changes...
# for file in os.listdir('test/generated'):
# if file.startswith('testcudakernel'):
# os.unlink('test/generated/%s' % file)
# print(subprocess.check_output([
# 'bin/cocl',
# '-c', '-o', 'test/generated/testcudakernel1.o',
# 'test/testcudakernel1.cu'
# ] + test_common.cocl_options()).decode('utf-8'))
# cl_path = 'test/generated/testcudakernel1-device.cl'
# return cl_path
# def test_cl_generates(testcudakernel1_cl):
# pass
# @pytest.fixture(scope='module')
# def testcudakernel1(context, testcudakernel1_cl):
# with open(testcudakernel1_cl, 'r') as f:
# sourcecode = f.read()
# prog = cl.Program(context, sourcecode).build()
# return prog
# def test_program_compiles(testcudakernel1):
# pass
def test_insertvalue(context, q, float_data, float_data_gpu):
sourcecode = """
struct mystruct {
int f0;
float f1;
};
__device__ struct mystruct doSomething(struct mystruct foo, int somevalue);
__device__ struct mystruct doSomething(struct mystruct foo, int somevalue) {
foo.f0 = somevalue;
foo.f1 = 4.5f;
return foo;
}
__global__ void somekernel(float *data) {
struct mystruct foo;
foo.f0 = 3;
foo.f1 = 4.5;
foo = doSomething(foo, data[2]);
data[0] = (int)foo.f0;
data[1] = foo.f1;
}
"""
mangledname = test_common.mangle('somekernel', ['float *'])
cl_code = test_common.cu_to_cl(sourcecode, mangledname, num_clmems=1)
print('cl_code', cl_code)
kernel = test_common.build_kernel(context, cl_code, mangledname)
def test_foo(context, q, float_data, float_data_gpu, cuSourcecode):
kernelName = test_common.mangle('foo', ['float *'])
testcudakernel1 = compile_code(cl, context, cuSourcecode, kernelName, num_clmems=1)
testcudakernel1.__getattr__(kernelName)(q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
assert float_data[0] == 123
def test_copy_float(cuSourcecode, context, q, float_data, float_data_gpu):
argTypes = ['float *']
kernelName = test_common.mangle('copy_float', argTypes)
testcudakernel1 = compile_code(cl, context, cuSourcecode, kernelName, num_clmems=1)
testcudakernel1.__getattr__(kernelName)(q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
assert float_data[0] == float_data[1]
def test_use_tid2(cuSourcecode, context, q, int_data, int_data_gpu):
int_data_orig = np.copy(int_data)
kernelName = test_common.mangle('use_tid2', ['int *'])
testcudakernel1 = compile_code(cl, context, cuSourcecode, kernelName, num_clmems=1)
testcudakernel1.__getattr__(kernelName)(q, (32,), (32,), int_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
cl.enqueue_copy(q, int_data, int_data_gpu)
q.finish()
assert int_data[0] == int_data_orig[0] + 0
assert int_data[10] == int_data_orig[10] + 10
assert int_data[31] == int_data_orig[31] + 31
def test_use_template1(context, q, int_data, int_data_gpu, float_data, float_data_gpu):
code = """
template< typename T >
__device__ T addNumbers(T one, T two) {
return one + two;
}
__global__ void use_template1(float *data, int *intdata) {
if(threadIdx.x == 0 && blockIdx.x == 0) {
data[0] = addNumbers(data[1], data[2]);
intdata[0] = addNumbers(intdata[1], intdata[2]);
}
}
"""
kernelName = test_common.mangle('use_template1', ['float *', 'int *'])
prog = compile_code(cl, context, code, kernelName, num_clmems=2)
float_data_orig = np.copy(float_data)
int_data_orig = np.copy(int_data)
prog.__getattr__(kernelName)(q, (32,), (32,), float_data_gpu, offset_type(0), int_data_gpu, offset_type(0), offset_type(0), offset_type(0), cl.LocalMemory(4))
cl.enqueue_copy(q, float_data, float_data_gpu)
cl.enqueue_copy(q, int_data, int_data_gpu)
q.finish()
assert float_data[0] == float_data_orig[1] + float_data_orig[2]
assert int_data[0] == int_data_orig[1] + int_data_orig[2]
def test_ternary(context, q, float_data, float_data_gpu):
kernelSource = """
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
__global__ void testTernary(float *data) {
data[0] = data[1] > 0 ? data[2] : data[3];
}
"""
setValueKernelName = test_common.mangle('setValue', ['float *', 'int', 'float'])
setValueProg = compile_code(cl, context, kernelSource, setValueKernelName, num_clmems=1)
testTernaryName = test_common.mangle('testTernary', ['float *'])
testTernaryProg = compile_code(cl, context, kernelSource, testTernaryName, num_clmems=1)
float_data_orig = np.copy(float_data)
def set_float_value(gpu_buffer, idx, value):
setValueProg.__getattr__(setValueKernelName)(
q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), np.int32(idx), np.float32(value), cl.LocalMemory(4))
cl.enqueue_copy(q, float_data_gpu, float_data)
print('float_data[:8]', float_data[:8])
set_float_value(float_data_gpu, 1, 10)
testTernaryProg.__getattr__(testTernaryName)(q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
q.finish()
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
print('float_data[:8]', float_data[:8])
assert float_data[0] == float_data_orig[2]
set_float_value(float_data_gpu, 1, -2)
testTernaryProg.__getattr__(testTernaryName)(q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
q.finish()
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
print('float_data[:8]', float_data[:8])
assert float_data[0] == float_data_orig[3]
# Note: this test seems to fail on HD5500, but ok on 940M
# The generated opencl code seems correct, so...
# @pytest.mark.xfail(reason='fails on hd5500, not because of cocl itself, I think')
def test_structs(context, q, float_data, float_data_gpu, int_data, int_data_gpu):
code = """
struct MyStruct {
int myint;
float myfloat;
};
__global__ void testStructs(MyStruct *structs, float *float_data, int *int_data) {
int_data[0] = structs[0].myint;
float_data[0] = structs[0].myfloat;
float_data[1] = structs[1].myfloat;
}
"""
kernel = test_common.compile_code_v3(
cl, context, code, test_common.mangle('testStructs', ['MyStruct *', 'float *', 'int *']),
num_clmems=3
)['kernel']
# my_struct = np.dtype([("myfloat", np.float32), ("myint", np.int32)]) # I dont know why, but seems these are back to front...
my_struct = np.dtype([("myint", np.int32), ("myfloat", np.float32)]) # seems these are wrong way around on HD5500. Works ok on 940M
my_struct, my_struct_c_decl = pyopencl.tools.match_dtype_to_c_struct(
context.devices[0], "MyStruct", my_struct)
my_struct = cl.tools.get_or_register_dtype("MyStruct", my_struct)
structs = np.empty(2, my_struct)
structs[0]['myint'] = 123
structs[0]['myfloat'] = 567
structs[1]['myint'] = 33
structs[1]['myfloat'] = 44
structs_gpu = cl.array.to_device(q, structs)
# p = structs_gpu.map_to_host(q)
# print('p', p)
# q.finish()
kernel(
q, (32,), (32,),
structs_gpu.data, offset_type(0), float_data_gpu, offset_type(0), int_data_gpu, offset_type(0), offset_type(0), offset_type(0), offset_type(0), cl.LocalMemory(4))
q.finish()
cl.enqueue_copy(q, float_data, float_data_gpu)
cl.enqueue_copy(q, int_data, int_data_gpu)
q.finish()
print('int_data[0]', int_data[0])
print('int_data[1]', int_data[1])
print('float_data[0]', float_data[0])
print('float_data[1]', float_data[1])
assert int_data[0] == 123
assert float_data[0] == 567
assert float_data[1] == 44
# assert int_data[1] == 44
# @pytest.mark.xfail
def test_float4(cuSourcecode, context, ctx, q, float_data, float_data_gpu):
float_data_orig = np.copy(float_data)
kernelName = test_common.mangle('testFloat4', ['float4 *'])
testcudakernel1 = compile_code(cl, context, cuSourcecode, kernelName, num_clmems=1)
testcudakernel1.__getattr__(kernelName)(q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
print('float_data_orig[:8]', float_data_orig[:8])
print('float_data[:8]', float_data[:8])
assert float_data[1] == float_data_orig[4 + 2] * float_data_orig[4 + 3]
# @pytest.mark.xfail
def test_float4_test2(cuSourcecode, context, ctx, q, float_data, float_data_gpu):
float_data_orig = np.copy(float_data)
kernelName = test_common.mangle('testFloat4_test2', ['float4 *'])
testcudakernel1 = compile_code(cl, context, cuSourcecode, kernelName, num_clmems=1)
testcudakernel1.__getattr__(kernelName)(q, (32,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(4))
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
print('float_data_orig[:8]', float_data_orig[:8])
print('float_data[:8]', float_data[:8])
for i in range(4):
assert float_data[i] == float_data_orig[i + 4]
def test_long_conflicting_names(context, q):
cu_source = """
__device__ void mysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionname(float *d) {
d[1] = 1.0f;
}
__device__ void mysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnameb(float *d) {
d[2] = 3.0f;
}
__global__ void mysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamec(float *data) {
data[0] = 123.0f;
mysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionname(data);
mysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnameb(data);
}
"""
mangled_name = test_common.mangle(
'mysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamemysuperlongfunctionnamec',
['float *'])
cl_source = test_common.cu_to_cl(cu_source, mangled_name, num_clmems=1)
print('cl_source', cl_source)
for line in cl_source.split("\n"):
if line.strip().startswith('/*'):
continue
if not line.strip().replace('kernel ', '').strip().startswith('void'):
continue
name = line.replace('kernel ', '').replace('void ', '').split('(')[0]
if name != '':
print('name', name)
# assert len(name) <= 32
# test_common.build_kernel(context, cl_source, mangled_name[:31])
test_common.build_kernel(context, cl_source, mangled_name)
def test_short_names(context):
cu_source = """
__device__ void funca(float *d);
__device__ void funca(float *d) {
d[1] = 1.0f;
}
__device__ void funcb(float *d, int c) {
d[2] = 3.0f + 5 - d[c];
}
__global__ void funck(float *data) {
data[0] = 123.0f;
funca(data);
funcb(data, (int)data[6]);
for(int i = 0; i < 1000; i++) {
funcb(data + i, (int)data[i + 100]);
}
}
"""
mangled_name = test_common.mangle('funck', ['float *'])
cl_source = test_common.cu_to_cl(cu_source, mangled_name, num_clmems=1)
print('cl_source', cl_source)
test_common.build_kernel(context, cl_source, mangled_name[:31])
| {
"content_hash": "22e46eb3c0398e49f666d11deb16748a",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 216,
"avg_line_length": 38.625352112676055,
"alnum_prop": 0.6621936989498249,
"repo_name": "hughperkins/coriander",
"id": "cdf2b31e55583bb63f392e76c972fef7468c4a5c",
"size": "13742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_cloutput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39965"
},
{
"name": "C++",
"bytes": "611776"
},
{
"name": "CMake",
"bytes": "33510"
},
{
"name": "Cuda",
"bytes": "128452"
},
{
"name": "Dockerfile",
"bytes": "937"
},
{
"name": "LLVM",
"bytes": "851669"
},
{
"name": "Python",
"bytes": "127554"
},
{
"name": "Shell",
"bytes": "19577"
}
],
"symlink_target": ""
} |
__author__ = 'Bruno Quint'
import os
import unittest
from goodman_pipeline.core import check_version
__version__ = __import__('goodman_pipeline').__version__
class TestVersionChecker(unittest.TestCase):
def test_get_last(self):
try:
v = check_version.get_last()
self.assertRegex(v, '^(\*|\d+(\.\d+){0,2}(\.\*)?)$')
# self.assertEqual(v, __version__)
except ConnectionRefusedError: # pragma: no cover
pass
def test_get_last_no_token(self):
try:
v = check_version.get_last(github_api_token='NONEXISTANTVAR')
self.assertRegex(v, '^(\*|\d+(\.\d+){0,2}(\.\*)?)$')
# self.assertEqual(v, __version__)
except ConnectionRefusedError: # pragma: no cover
pass
except KeyError: # pragma: no cover
pass
def test_get_last_token(self):
os.environ['FAKETOKEN'] = 'ThisIsNotARealToken'
self.assertRaises(ConnectionRefusedError,
check_version.get_last,
'FAKETOKEN')
def test_am_i_updated(self):
try:
self.assertTrue(check_version.am_i_updated(__version__))
self.assertFalse(check_version.am_i_updated('v0.0.0'))
except ConnectionRefusedError: # pragma: no cover
pass
| {
"content_hash": "4d5f7cbc7f8df64db11404be0d0b643f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 31.325581395348838,
"alnum_prop": 0.5612472160356348,
"repo_name": "simontorres/goodman",
"id": "5bfbbd512f9e061e2d4aa3dfc7e62f8f7ad5e302",
"size": "1348",
"binary": false,
"copies": "2",
"ref": "refs/heads/improve_cross_correlation",
"path": "goodman_pipeline/core/tests/test_check_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "66969"
},
{
"name": "Makefile",
"bytes": "323"
},
{
"name": "Python",
"bytes": "418242"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
} |
from __future__ import print_function,division
import sys, os
sys.path.append(os.path.abspath("."))
import numpy as np
import matplotlib.pyplot as plt
__author__ = 'panzer'
COLORS = ["blue", "green", "red", "cyan", "magenta", "yellow", "saddlebrown", "orange", "darkgreen"]
def bar_plot(data, y_label, title, path, format_unit="%0.4f"):
if not data:
return
fig, ax = plt.subplots()
def auto_label(bars):
for bar in bars:
height = bar.get_height()
ax.text(bar.get_x() + bar.get_width()/2., 1.05*height,
format_unit % height,
ha='center', va='bottom')
means = []
iqrs = []
labels = data.keys()
for label in labels:
mean, iqr = data[label]
means.append(mean)
iqrs.append(iqr)
indices = np.arange(len(labels))
rects = ax.bar(indices, means, yerr=iqrs, color=COLORS[:len(labels)])
width = rects[0].get_width()/2
ax.set_ylabel(y_label)
ax.set_title(title)
ax.set_xticks(indices + width)
ax.set_xticklabels(labels)
auto_label(rects)
plt.savefig(path+"/%s.png"%y_label)
| {
"content_hash": "0f71683f87147a5a092aa3e41c3817c4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 100,
"avg_line_length": 26.625,
"alnum_prop": 0.6253521126760564,
"repo_name": "bigfatnoob/optima",
"id": "2aef10074f12e7bbef34c66f207467bceeb741b3",
"size": "1065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157500"
}
],
"symlink_target": ""
} |
"""Provides mechanisms to time-slice Reach Gym Environments.
Example usage -
agent1 = KittingAgent()
agent2 = KittingAgent(new_features=True)
with gym.make("benchmark-kitting-v0") as env:
env_time_slicer.randomized_run(
env, # Environment.
[agent1.attempt, agent2.attempt], # Functions to run different agents.
[0.3, 0.7], # Proportion of times an agent gets control.
divert_on={pausable_env.SessionEndReasons.AGENT_FINISHED},
# Optional set of reasons when to divert.
run_id = 'test' # Optional id to find this in the logs.
)
The usage above assumes that agent1.run() and agent2.run() return control after
each attempt. This is the recommended way to set up the agents for experiments.
"""
import bisect
import datetime
import random
import string
import sys
import threading
from typing import Any, Callable, Iterable, List, Optional
from pyreach.gyms import pausable_env
from pyreach.gyms import reach_env
_TOLERANCE = 1e-6
class ExperimentAssigner:
"""Manages multiple environments keeping exactly one active at a time."""
def __init__(
self,
env: reach_env.ReachEnv,
run_id: Optional[str] = None,
divert_on: Optional[Iterable[pausable_env.SessionEndReason]] = None,
divert_hook: Optional[Callable[
[reach_env.ReachEnv, pausable_env.SessionEndReason], bool]] = None,
num_attempts: Optional[int] = None):
"""Instantiates a Gym Environment Time Slicer.
This object can be used to share an environment between multiple agents.
Note that the randomized_run() method provides a convenient entry point
for this object, and in most cases one would not need to instantiate it
directly.
Args:
env: The environment which needs to be shared.
run_id: Optional context that will be logged. If not specified, a unique
id will be generated and logged.
divert_on: Can be passed to restrict diversion only to certain
reasons. E.g. divert_on={AGENT_FINISHED, AGENT_CALLED_RESET}.
Defaults to {AGENT_FINISHED}, which assumes that the agents will return
control after each attempt.
divert_hook: A method that will be called every time a diversion decision
needs to be taken. If the method returns False, the agent will not be
changed.
E.g. to ensure diversion only on resets -
divert_hook=lambda _, reason: reason == AGENT_CALLED_RESET
E.g. to call reset between all diversions -
divert_hook=lambda env, _: env.reset() or True.
num_attempts: If passed, the assignmer will stop after exactly this many
attempts are completed. This is should usually be paired with a call to
.wait(), which blocks until this happens.
"""
self._env = env
self._run_id: Optional[str] = run_id
self._validate_and_store_run_id()
self._sub_envs: List[pausable_env.PausableEnv] = []
self._cumulative_props: List[float] = [] # Filled by randomized_slicer()
self._divert_on = {pausable_env.SessionEndReason.AGENT_FINISHED}
if divert_on is not None:
self._divert_on = set(divert_on)
self._divert_hook = divert_hook
self._num_attempts = num_attempts
self._done_enough_attempts = threading.Event()
# Number of time an diversion happened.
self._diversion_count = 0
def _validate_and_store_run_id(self) -> None:
"""Stores the run_id for logging.
If self._run_id is None, generates a new one.
"""
if self._run_id is None:
# Create a randomized id based on current date and time.
timestamp_str = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
random_str = ''.join(
random.choice(string.digits + string.ascii_lowercase)
for _ in range(16))
self._run_id = timestamp_str + '-' + random_str
self._env.task_params['experiment_run_id'] = self._run_id
sys.stderr.write(f'Stored run_id as: {self._run_id!r}\n')
def _on_session_end(self, reason: pausable_env.SessionEndReason,
session_index: int) -> None:
"""Handles session-end notification."""
if reason not in self._divert_on:
return
if self._divert_hook is not None:
# Do not change agent if the divert_hook() returns False.
if not self._divert_hook(self._env, reason):
return
# The pause will be called on the same thread, hence it will be invoked
# before any new command that needs to be blocked.
self._sub_envs[session_index].pause()
sys.stderr.write(
f'ExperimentAssigner Paused environment #{session_index}\n')
self._roll_dice_and_resume()
@staticmethod
def _icdf(cdf: List[float], value: float) -> int:
"""Randomly selects one index given cumulative probabilities.
Args:
cdf: A list with floats in ascending order. For a proper probability CDF,
it will also end at 1, but we don't force the CDF to be normalized.
value: A number between 0 and cdf[-1].
Returns:
An int between 0 to len(cdf) - 1. If value is selected at random, this
number will be a random index drawn from the CDF as defined by cdf.
"""
index = bisect.bisect_left(cdf, value)
# Index may be larger if value > cdf[-1] (e.g. floating point rounding).
if index >= len(cdf):
index -= 1
return index
def _roll_dice_and_resume(self) -> None:
"""Selects an environment at random and resumes it."""
self._diversion_count += 1
if self._num_attempts is not None:
if self._diversion_count > self._num_attempts:
self._done_enough_attempts.set()
# We are now in a state where the environment is completely paused.
return
sys.stderr.write(f'EnvTimeSlicer Diversion #{self._diversion_count}\n')
new_env_index = self._icdf(self._cumulative_props, random.random())
self._sub_envs[new_env_index].resume()
sys.stderr.write(f'EnvTimeSlicer Resumed environment #{new_env_index}\n')
def randomized_slicer(self,
proportions: List[float]) -> List[reach_env.ReachEnv]:
"""Returns a list of env's which will get traffic as defined by proportions.
Args:
proportions: Set of probabilities. Should be positive and sum up to 1.
Returns:
A list of environments, one each for proportions passed as arg. It will
be guaranteed that only one environment will be active at a time, chosen
at random. Also over long period of time, number of times a particular
env is activated will converge to the requested proportions.
"""
self._cumulative_props = []
self._sub_envs = []
cumulative_proportion = 0.0 # Total proportion accounted for so far.
# Create a sub environment for each traffic.
for index, proportion in enumerate(proportions):
subenv = pausable_env.PausableEnv(self._env, start_paused=True)
subenv.add_session_end_callback(self._on_session_end, index)
cumulative_proportion += proportion
self._cumulative_props.append(cumulative_proportion)
self._sub_envs.append(subenv)
if abs(cumulative_proportion - 1) > _TOLERANCE:
raise ValueError(
f'Proportion of traffic {proportions!r} does not sum to 1')
self._roll_dice_and_resume()
return self._sub_envs
def wait(self) -> None:
"""Blocks until we finish number of attempts passed as num_attempts."""
self._done_enough_attempts.wait()
def _run_agent(env: pausable_env.PausableEnv,
agent_attempt: Callable[[reach_env.ReachEnv], None]) -> None:
"""Runs agent in loop, sends a notification to the env each time it ends."""
env.wait_till_active()
while True:
agent_attempt(env)
# Communicate to pausable environment that attempt has finished.
env.agent_ended()
def randomized_run(env: reach_env.ReachEnv,
runners: List[Callable[[reach_env.ReachEnv], None]],
traffic: Optional[List[float]] = None,
**kwargs: Any) -> None:
"""A method to run multiple environments with time-sharing.
This is a convenience method, which will need to be stopped with Ctrl+C.
If more flexibility is needed, consider instancing and using EnvTimeSlicer
directly.
Args:
env: The env on which the runners will work. Internally new sub-env's will
be created and each runner will be passed one.
runners: A function, typycally the .run() of an agent, which takes an env.
traffic: The proportions of traffic for each runner. If unspecified,
will use equal traffic.
**kwargs: Additional arguments for EnvTimeSlicer, e.g. run_id.
"""
assigner = ExperimentAssigner(env, **kwargs)
if traffic is None:
traffic = [1.0 / len(runners)] * len(runners)
envs = assigner.randomized_slicer(traffic)
threads: List[threading.Thread] = []
# Create and start threads for each runner with corresponding env.
for env, runner in zip(envs, runners):
threads.append(
threading.Thread(target=_run_agent, args=[env, runner], daemon=True))
threads[-1].start()
# Wait till the environment stops. This is needed to return control if
# num_attempts was passed.
assigner.wait()
| {
"content_hash": "40882420fa06c47dabb6dfbc94a026c3",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 80,
"avg_line_length": 39.24576271186441,
"alnum_prop": 0.6703735694234506,
"repo_name": "google-research/pyreach",
"id": "b2b35b671d66c8365d1447c493aa9720cf59479b",
"size": "9838",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyreach/gyms/experiment_assigner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5276899"
},
{
"name": "Shell",
"bytes": "15836"
}
],
"symlink_target": ""
} |
"""Pretrain."""
import dataclasses
import importlib
from typing import Mapping
import jax
import jax.numpy as jnp
from jestimator import amos
from jestimator.data.pipeline_lm import lm_data
from jestimator.data.pipeline_seqio import is_seqio, seqio_data # pylint: disable=g-multiple-import
from jestimator.models.rope import modeling
from jestimator.states import TrainState, MeanMetrics # pylint: disable=g-multiple-import
import ml_collections
from ml_collections.config_dict import config_dict
import optax
import seqio
import tensorflow as tf
def get_config():
"""Returns a config object for modeling flags."""
module_config = ml_collections.ConfigDict()
# Model config.
model_config = modeling.ModelConfig()
model_config = ml_collections.ConfigDict(dataclasses.asdict(model_config))
module_config.model_config = model_config
# Optimizer config.
opt_config = ml_collections.ConfigDict()
opt_config.optimizer = 'adamw'
opt_config.learning_rate = 1e-4
opt_config.warmup_steps = 10000
opt_config.linear_decay_to_step = config_dict.placeholder(int)
opt_config.momentum = 0.9
opt_config.beta = 0.999
opt_config.weight_decay = 0.01
module_config.opt_config = opt_config
# Other config.
module_config.mask_token_id = config_dict.placeholder(int)
module_config.mask_rate = 0.15
module_config.seqio_mixture_or_task_module = config_dict.placeholder(str)
module_config.seqio_pack = True
module_config.seqio_cached = False
return module_config
class PackOrPadConverter(seqio.FeatureConverter):
"""A feature converter that only packs or pads features.
Example: a packed dataset.
ds = [{"targets": [3, 9, 1]}, {"targets": [4, 1]}]
input_lengths = {"targets": 6}
converted_ds = {
"targets": [3, 9, 1, 4, 1, 0],
"targets_positions": [0, 1, 2, 0, 1, 0],
"targets_segment_ids": [1, 1, 1, 2, 2, 0]
}
Note that two examples are packed together into one example.
"""
TASK_FEATURES = {
'targets': seqio.FeatureConverter.FeatureSpec(dtype=tf.int32)
}
MODEL_FEATURES = {
'targets': seqio.FeatureConverter.FeatureSpec(dtype=tf.int32),
'input_mask': seqio.FeatureConverter.FeatureSpec(dtype=tf.bool)
}
PACKING_FEATURE_DTYPES = {}
def _convert_example(
self, features: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]:
targets = features['targets']
input_mask = seqio.non_padding_position(targets, tf.bool)
d = {'targets': targets, 'input_mask': input_mask}
return d
def _convert_features(
self, ds: tf.data.Dataset,
task_feature_lengths: Mapping[str, int]) -> tf.data.Dataset:
"""Convert the dataset to be fed to a language model."""
ds = self._pack_or_pad(ds, task_feature_lengths)
return ds.map(self._convert_example, num_parallel_calls=tf.data.AUTOTUNE)
def get_model_feature_lengths(
self, task_feature_lengths: Mapping[str, int]) -> Mapping[str, int]:
"""Define the length relationship between task and model features."""
decoder_length = task_feature_lengths['targets']
model_feature_lengths = {
'targets': decoder_length,
'input_mask': decoder_length
}
return model_feature_lengths
def load_config(global_flags):
"""Init config data from global flags."""
config = ml_collections.ConfigDict()
config.update(global_flags.module_config)
# Only a frozen config (hashable object) can be passed to jit functions
# (i.e. train_step/valid_step/infer_step).
config.frozen = ml_collections.FrozenConfigDict(config)
# Construct data pipelines in the following (using TensorFLow):
seq_length = config.model_config.max_length
if config.seqio_mixture_or_task_module is not None:
importlib.import_module(config.seqio_mixture_or_task_module)
def feature_fn(token_ids: tf.Tensor) -> Mapping[str, tf.Tensor]:
"""Builds a feature dict to be compatible with seqio."""
return {'targets': tf.ensure_shape(token_ids, (seq_length,))}
if is_seqio(global_flags.train_pattern):
train_data_fn = seqio_data({'targets': seq_length},
PackOrPadConverter(pack=config.seqio_pack),
use_cached=config.seqio_cached,
shuffle=True)
else:
train_data_fn = lm_data(
seq_length, random_skip=True, feature_fn=feature_fn, interleave=True)
if is_seqio(global_flags.valid_pattern):
valid_data_fn = seqio_data({'targets': seq_length},
PackOrPadConverter(pack=config.seqio_pack),
use_cached=config.seqio_cached)
else:
valid_data_fn = lm_data(seq_length, feature_fn=feature_fn)
config.train_data_fn = train_data_fn
config.valid_data_fn = valid_data_fn
return config
def get_train_state(config, rng) -> TrainState:
"""Create train state."""
model_config = modeling.ModelConfig(**config.model_config.to_dict())
model = modeling.ModelForPretrain(model_config)
opt_config = config.opt_config
warmup = opt_config.warmup_steps
decay = opt_config.linear_decay_to_step
def lr_schedule(step):
lr = opt_config.learning_rate
if warmup is not None:
lr *= jnp.minimum(1., step / warmup)
if decay is not None:
lr *= 1. - jnp.maximum(0., step - warmup) / (decay - warmup)
elif decay is not None:
lr *= 1. - step / decay
return lr
if opt_config.optimizer == 'adamw':
optimizer = optax.adamw(
learning_rate=lr_schedule,
b1=opt_config.momentum,
b2=opt_config.beta,
weight_decay=opt_config.weight_decay)
elif opt_config.optimizer == 'amos':
optimizer = amos.amos(
lr_schedule,
modeling.get_eta_fn(model_config),
shape_fn=modeling.get_shape_fn(model_config),
beta=opt_config.beta,
momentum=opt_config.momentum,
clip_value=1.)
metrics_mod = MeanMetrics.create('train_loss', 'valid_loss', 'valid_mrr')
return TrainState.create(metrics_mod, optimizer, model, rng, jnp.array([[0]]))
def train_step(config, train_batch, state: TrainState, metrics):
"""Training step."""
(loss, size), grads = state.value_and_grad_apply_fn(has_aux=True)(
state.params,
train_batch['targets'],
config.mask_token_id,
mask_rate=config.mask_rate,
input_mask=train_batch.get('input_mask'),
enable_dropout=True,
method=modeling.ModelForPretrain.mlm_train_loss)
_, metrics = state.metrics_mod.apply(
metrics,
'train_loss',
loss,
size,
method=MeanMetrics.update,
mutable=['metrics'])
return state.apply_gradients(grads=grads), metrics
def valid_step(config, valid_batch, state: TrainState, metrics):
"""Validation step."""
def body(i, metrics):
del i # Unused.
loss, mrr, size = state.apply_fn(
state.variables(),
valid_batch['targets'],
config.mask_token_id,
mask_rate=config.mask_rate,
input_mask=valid_batch.get('input_mask'),
method=modeling.ModelForPretrain.mlm_valid_metrics)
_, metrics = state.metrics_mod.apply(
metrics,
'valid_loss',
loss,
size,
method=MeanMetrics.update,
mutable=['metrics'])
_, metrics = state.metrics_mod.apply(
metrics,
'valid_mrr',
mrr,
size,
method=MeanMetrics.update,
mutable=['metrics'])
return metrics
return jax.lax.fori_loop(0, 20, body, metrics)
| {
"content_hash": "63cc29e97a26548034ee02517b91d5a1",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 100,
"avg_line_length": 33.325892857142854,
"alnum_prop": 0.6630944407233758,
"repo_name": "google-research/jestimator",
"id": "ae8b2f5deb6fdc319c3716f6852454df5472b718",
"size": "8053",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jestimator/models/rope/pretrain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10999"
},
{
"name": "Python",
"bytes": "223916"
}
],
"symlink_target": ""
} |
from django.forms import ModelForm
from django import forms
from .models import Denuncia
class DenunciaForm(ModelForm):
class Meta:
model = Denuncia
| {
"content_hash": "c802046726fd3fdabc9b46baf171a6e5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 34,
"avg_line_length": 18.22222222222222,
"alnum_prop": 0.75,
"repo_name": "oscarmcm/AlzoMiVoz",
"id": "b7b881d4a25129f6c47758c4da940c65adc9ed94",
"size": "188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alzomivoz/apps/denuncia/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120491"
},
{
"name": "HTML",
"bytes": "19314"
},
{
"name": "JavaScript",
"bytes": "5105"
},
{
"name": "Makefile",
"bytes": "65"
},
{
"name": "Python",
"bytes": "23943"
}
],
"symlink_target": ""
} |
"""Python datastore class User to be used as a datastore data type.
Classes defined here:
User: object representing a user.
Error: base exception type
UserNotFoundError: UserService exception
RedirectTooLongError: UserService exception
NotAllowedError: UserService exception
"""
import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import user_service_pb
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base User error type."""
class UserNotFoundError(Error):
"""Raised by User.__init__() when there's no email argument and no user is
logged in."""
class RedirectTooLongError(Error):
"""Raised by UserService calls if the generated redirect URL was too long.
"""
class NotAllowedError(Error):
"""Raised by UserService calls if the requested redirect URL is not allowed.
"""
class User(object):
"""A user.
We provide the email address, nickname, auth domain, and id for a user.
A nickname is a human-readable string which uniquely identifies a Google
user, akin to a username. It will be an email address for some users, but
not all.
"""
__user_id = None
def __init__(self, email=None, _auth_domain=None, _user_id=None):
"""Constructor.
Args:
email: An optional string of the user's email address. It defaults to
the current user's email address.
Raises:
UserNotFoundError: Raised if the user is not logged in and the email
argument is empty.
"""
if _auth_domain is None:
_auth_domain = os.environ.get('AUTH_DOMAIN')
else:
assert email is not None
assert _auth_domain
if email is None:
assert 'USER_EMAIL' in os.environ
email = os.environ['USER_EMAIL']
if _user_id is None and 'USER_ID' in os.environ:
_user_id = os.environ['USER_ID']
if not email:
raise UserNotFoundError
self.__email = email
self.__auth_domain = _auth_domain
self.__user_id = _user_id or None
def nickname(self):
"""Return this user's nickname.
The nickname will be a unique, human readable identifier for this user
with respect to this application. It will be an email address for some
users, but not all.
"""
if (self.__email and self.__auth_domain and
self.__email.endswith('@' + self.__auth_domain)):
suffix_len = len(self.__auth_domain) + 1
return self.__email[:-suffix_len]
else:
return self.__email
def email(self):
"""Return this user's email address."""
return self.__email
def user_id(self):
"""Return either a permanent unique identifying string or None.
If the email address was set explicity, this will return None.
"""
return self.__user_id
def auth_domain(self):
"""Return this user's auth domain."""
return self.__auth_domain
def __unicode__(self):
return unicode(self.nickname())
def __str__(self):
return str(self.nickname())
def __repr__(self):
if self.__user_id:
return "users.User(email='%s',_user_id='%s')" % (self.email(),
self.user_id())
else:
return "users.User(email='%s')" % self.email()
def __hash__(self):
return hash((self.__email, self.__auth_domain))
def __cmp__(self, other):
if not isinstance(other, User):
return NotImplemented
return cmp((self.__email, self.__auth_domain),
(other.__email, other.__auth_domain))
def create_login_url(dest_url):
"""Computes the login URL for this request and specified destination URL.
Args:
dest_url: String that is the desired final destination URL for the user
once login is complete. If 'dest_url' does not have a host
specified, we will use the host from the current request.
Returns:
string
"""
req = user_service_pb.StringProto()
resp = user_service_pb.StringProto()
req.set_value(dest_url)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLoginURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
elif (e.application_error ==
user_service_pb.UserServiceError.NOT_ALLOWED):
raise NotAllowedError
else:
raise e
return resp.value()
CreateLoginURL = create_login_url
def create_logout_url(dest_url):
"""Computes the logout URL for this request and specified destination URL.
Args:
dest_url: String that is the desired final destination URL for the user
once logout is complete. If 'dest_url' does not have a host
specified, we will use the host from the current request.
Returns:
string
"""
req = user_service_pb.StringProto()
resp = user_service_pb.StringProto()
req.set_value(dest_url)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLogoutURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
else:
raise e
return resp.value()
CreateLogoutURL = create_logout_url
def get_current_user():
try:
return User()
except UserNotFoundError:
return None
GetCurrentUser = get_current_user
def is_current_user_admin():
"""Return true if the user making this request is an admin for this
application, false otherwise.
We specifically make this a separate function, and not a member function of
the User class, because admin status is not persisted in the datastore. It
only exists for the user making this request right now.
"""
return (os.environ.get('USER_IS_ADMIN', '0')) == '1'
IsCurrentUserAdmin = is_current_user_admin
| {
"content_hash": "f2d3f44d32c0ff528c6a8ca1ea0699e2",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 78,
"avg_line_length": 27.347417840375588,
"alnum_prop": 0.6684978540772533,
"repo_name": "jamslevy/gsoc",
"id": "270df4d2dd7d9729fa20bbe7488a8c8c04149992",
"size": "6427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/api/users.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "JavaScript",
"bytes": "388268"
},
{
"name": "Perl",
"bytes": "66733"
},
{
"name": "Python",
"bytes": "8290513"
},
{
"name": "Shell",
"bytes": "5570"
}
],
"symlink_target": ""
} |
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_single_image_random_dot_stereograms_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_sirds_ops = loader.load_op_library(
resource_loader.get_path_to_datafile(
"_single_image_random_dot_stereograms.so"))
def single_image_random_dot_stereograms(depth_values,
hidden_surface_removal=None,
convergence_dots_size=None,
dots_per_inch=None,
eye_separation=None,
mu=None,
normalize=None,
normalize_max=None,
normalize_min=None,
border_level=None,
number_colors=None,
output_image_shape=None,
output_data_window=None):
"""Output a RandomDotStereogram Tensor for export via encode_PNG/JPG OP.
Given the 2-D tensor 'depth_values' with encoded Z values, this operation
will encode 3-D data into a 2-D image. The output of this Op is suitable
for the encode_PNG/JPG ops. Be careful with image compression as this may
corrupt the encode 3-D data within the image.
Based upon [this
paper](https://www.cs.waikato.ac.nz/~ihw/papers/94-HWT-SI-IHW-SIRDS-paper.pdf).
This outputs a SIRDS image as picture_out.png:
```python
img=[[1,2,3,3,2,1],
[1,2,3,4,5,2],
[1,2,3,4,5,3],
[1,2,3,4,5,4],
[6,5,4,4,5,5]]
session = tf.compat.v1.InteractiveSession()
sirds = single_image_random_dot_stereograms(
img,
convergence_dots_size=8,
number_colors=256,normalize=True)
out = sirds.eval()
png = tf.image.encode_png(out).eval()
with open('picture_out.png', 'wb') as f:
f.write(png)
```
Args:
depth_values: A `Tensor`. Must be one of the following types:
`float64`, `float32`, `int64`, `int32`. Z values of data to encode
into 'output_data_window' window, lower further away {0.0 floor(far),
1.0 ceiling(near) after norm}, must be 2-D tensor
hidden_surface_removal: An optional `bool`. Defaults to `True`.
Activate hidden surface removal
convergence_dots_size: An optional `int`. Defaults to `8`.
Black dot size in pixels to help view converge image, drawn on bottom
of the image
dots_per_inch: An optional `int`. Defaults to `72`.
Output device in dots/inch
eye_separation: An optional `float`. Defaults to `2.5`.
Separation between eyes in inches
mu: An optional `float`. Defaults to `0.3333`.
Depth of field, Fraction of viewing distance (eg. 1/3 = 0.3333)
normalize: An optional `bool`. Defaults to `True`.
Normalize input data to [0.0, 1.0]
normalize_max: An optional `float`. Defaults to `-100`.
Fix MAX value for Normalization (0.0) - if < MIN, autoscale
normalize_min: An optional `float`. Defaults to `100`.
Fix MIN value for Normalization (0.0) - if > MAX, autoscale
border_level: An optional `float`. Defaults to `0`.
Value of bord in depth 0.0 {far} to 1.0 {near}
number_colors: An optional `int`. Defaults to `256`. 2 (Black &
White), 256 (grayscale), and Numbers > 256 (Full Color) are
supported
output_image_shape: An optional `tf.TensorShape` or list of `ints`.
Defaults to shape `[1024, 768, 1]`. Defines output shape of returned
image in '[X,Y, Channels]' 1-grayscale, 3 color; channels will be
updated to 3 if number_colors > 256
output_data_window: An optional `tf.TensorShape` or list of `ints`.
Defaults to `[1022, 757]`. Size of "DATA" window, must be equal to or
smaller than `output_image_shape`, will be centered and use
`convergence_dots_size` for best fit to avoid overlap if possible
Returns:
A `Tensor` of type `uint8` of shape 'output_image_shape' with encoded
'depth_values'
"""
result = gen_single_image_random_dot_stereograms_ops.single_image_random_dot_stereograms( # pylint: disable=line-too-long
depth_values=depth_values,
hidden_surface_removal=hidden_surface_removal,
convergence_dots_size=convergence_dots_size,
dots_per_inch=dots_per_inch,
eye_separation=eye_separation,
mu=mu,
normalize=normalize,
normalize_max=normalize_max,
normalize_min=normalize_min,
border_level=border_level,
number_colors=number_colors,
output_image_shape=output_image_shape,
output_data_window=output_data_window)
return result
ops.NotDifferentiable("SingleImageRandomDotStereograms")
| {
"content_hash": "85bebbe9537b8adbc36cf34a459708bf",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 124,
"avg_line_length": 42.9059829059829,
"alnum_prop": 0.6258964143426294,
"repo_name": "chemelnucfin/tensorflow",
"id": "dfc6af3e55815763fab6b3d102a9b2508c511238",
"size": "5709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/image/python/ops/single_image_random_dot_stereograms.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1
async def sample_batch_import_model_evaluation_slices():
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.BatchImportModelEvaluationSlicesRequest(
parent="parent_value",
)
# Make the request
response = await client.batch_import_model_evaluation_slices(request=request)
# Handle the response
print(response)
# [END aiplatform_v1_generated_ModelService_BatchImportModelEvaluationSlices_async]
| {
"content_hash": "6a802b95227ddeeee5a9add64b13a260",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 29.210526315789473,
"alnum_prop": 0.7495495495495496,
"repo_name": "googleapis/python-aiplatform",
"id": "74c69c11cf9ef902862446569f5bddbcf1dd720d",
"size": "1984",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
'''
Created By: Quintin Leong
Created Date: November 5th, 2014
File Name: get_data.py
File Description: Utility to pull all consolidated content data from database in a clean nested format.
'''
import MySQLdb, sys
import ClassesGetData as classes
HOST = "sead.systems"
USER = "dataeng"
PASSWD = "dataeng"
DB = "shempserver"
DEBUG_MODE = False
FREQUENCY = {'AC Voltage':2400, 'AC Current':2400, 'Wattage':1, 'Temperature':0.2}
ENDPOINT = "scratch.data_landing"
def check_debug():
global DEBUG_MODE
if((len(sys.argv) == 2)):
if((sys.argv[1] == "DEBUG")):
DEBUG_MODE = True
print "DEBUG mode entered."
def execute_raw_query(cur, query):
data_raw = []
response_length = 0
try:
response_length = cur.execute(query)
data_raw = cur.fetchall()
except MySQLdb.Error, e:
try:
print "MySQLdb Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
print "MySQLdb Error: %s" % str(e)
if (DEBUG_MODE):
print "\nQuery executed successfully: "+query
print "Entries returned: "+str(response_length)
return list(data_raw)
def get_distinct_sensor_id(cur):
if(DEBUG_MODE):print "\nGetting distinct sensor_id in data_raw from MySQL..."
query = "SELECT DISTINCT(sensor_id) FROM shempserver.data_raw;"
response = []
response = execute_raw_query(cur, query)
response = [r[0] for r in response]
return response
def get_metadata(cur, sensor_id_list):
if(DEBUG_MODE):print "\nGetting metadata for each sensor_id..."
metadata = {}
packet_id = get_next_packet_id(cur)
for sensor_id in sensor_id_list:
query = "SELECT s.sensor_id, s.sensor_type_id, st.sensor_type, s.device_id "+\
"FROM sensors s JOIN sensor_types st "+\
"ON s.sensor_type_id = st.sensor_type_id "+\
"WHERE s.sensor_id = %s;" % (str(sensor_id),)
if(DEBUG_MODE):print "Executing: "+query
response_length = cur.execute(query)
if(response_length != 1): "Error executing query for sensor: "+str(sensor_id)
response = cur.fetchone()
if(response[0] not in metadata.keys()):
metadata[str(sensor_id)] = {"sensor_type_id": response[1], "packet_id":packet_id,
"sensor_type": response[2], "device_id":response[3]}
packet_id += 1
if(DEBUG_MODE):
print "\nMetadata returned: "
for key in metadata:
print "Key: "+key
print "Value: "+str(metadata[key])
return metadata
def get_next_packet_id(cur):
query = "SELECT MAX(packet_id) FROM "+ENDPOINT+";"
packet_id= execute_raw_query(cur, query)
# If no packet_id, then table must be empty, return 1 in this case
if(packet_id[0][0] == None):
if(DEBUG_MODE):
print "\nEndpoint table is empty, spawning first packet_id."
return 1
new_packet_id = int(packet_id[0][0]) + 1
if(DEBUG_MODE):
print "\nRetrieved next packet_id"
print "Old packet_id: "+str(packet_id)
print "New packet_id: "+str(new_packet_id)
return new_packet_id
def update_endpoint(cur, data_list):
query = "INSERT INTO "+ENDPOINT+" (device_id,sensor_id,packet_id,sensor_type_id,sensor_type,"+\
"`data`,frequency,microstamp) VALUES(%s,%s,%s,%s,%s,%s,%s,%s);"
for item in data_list:
print item.give_tuple()
def main():
global PRODUCT
check_debug()
data_raw = []
sensor_id = None
metadata = None
# Get references to the database
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DB)
cur = db.cursor()
truncate_query = "TRUNCATE TABLE shempserver.data_raw;"
# Get a distinct list of sensor_ids from shempserver.data_raw table
sensor_id = get_distinct_sensor_id(cur)
# Get the metadata associated with each sensor_id
metadata = get_metadata(cur, sensor_id)
# Get raw row data from seads plug data landing table
select_query = "SELECT * FROM shempserver.data_raw;"
data_raw = execute_raw_query(cur, select_query)
# Check that there is work to do
if(len(data_raw) == 0):
# No work to do, so exit
exit()
# Create DatRaw objects and fill in missing fields. Insert into product after.
product = []
while(len(data_raw) > 0):
data_row = classes.DataRaw(data_raw.pop())
sensor_id = data_row.get_sensor_id()
# Get values for fields to be entered in data_row object
device_id = metadata[sensor_id]['device_id']
packet_id = metadata[sensor_id]['packet_id']
sensor_type_id = metadata[sensor_id]['sensor_type_id']
sensor_type = metadata[sensor_id]['sensor_type']
frequency = FREQUENCY[sensor_type]
# Set all of the missing fields
data_row.set_device_id(device_id)
data_row.set_packet_id(packet_id)
data_row.set_sensor_type_id(sensor_type_id)
data_row.set_sensor_type(sensor_type)
data_row.set_frequency(frequency)
product.append(data_row)
if(DEBUG_MODE): print "[%d] data objects created" % (len(product),)
cur.close()
db.close()
# END MAIN ##############################################################################
if __name__ == "__main__":
main() | {
"content_hash": "2c065823a7e5973e777d31f6050910d9",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 103,
"avg_line_length": 30.089887640449437,
"alnum_prop": 0.6030619865571322,
"repo_name": "seadsystem/Backend",
"id": "f44d05737658826350cf88d4e507cdefe7225207",
"size": "5356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Analysis and Classification/Analysis/Code/quintin_work/python/code/get_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "71158"
},
{
"name": "Pascal",
"bytes": "70"
},
{
"name": "Protocol Buffer",
"bytes": "1194"
},
{
"name": "Puppet",
"bytes": "8195"
},
{
"name": "Python",
"bytes": "2905405"
},
{
"name": "Ruby",
"bytes": "3725"
},
{
"name": "Shell",
"bytes": "13204"
}
],
"symlink_target": ""
} |
import glooey
import pyglet
class MyPlaceholder(glooey.Placeholder):
custom_size_hint = 300, 200 # width, height
custom_alignment = 'center'
window = pyglet.window.Window()
gui = glooey.Gui(window)
widget = MyPlaceholder()
gui.add(widget)
pyglet.app.run()
| {
"content_hash": "96c99aebcd8923b47cb2d44a57a758be",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 48,
"avg_line_length": 18,
"alnum_prop": 0.725925925925926,
"repo_name": "kxgames/glooey",
"id": "798a721247c50cfbb84673de00842e13b26d5b2c",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/padding_alignment_size_hints/size_hints.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "446431"
},
{
"name": "Scheme",
"bytes": "485"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
} |
import unicode_tex
def escape_for_tex(chars):
escaped = ''.join(list(map(lambda x : unicode_tex.unicode_to_tex_map.get(x, x), chars)))
return escaped.replace('\space', ' ')
| {
"content_hash": "9a1d56653afe887e7eeb4500a27d5ddd",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 89,
"avg_line_length": 35.2,
"alnum_prop": 0.6875,
"repo_name": "emrehan/daily-planner",
"id": "833d37505b8a49481f46d48b4b06439e47d4396d",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2384"
},
{
"name": "TeX",
"bytes": "15516"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.template import RequestContext
from django.shortcuts import render, render_to_response
from django.views.generic import View
from django.http import HttpResponse
from django.core import serializers
from customers.models import Users as users
from customers.models import Vendors as vendors
from customers.models import Orders as orders
from customers.models import Product_Availability as product_availability
from customers.models import Address
import sys, json
import ast
class Dashboard(View):
try:
template_name = 'vendors/dashboard.html'
def get(self, request):
context_dict = {}
orders_list = []
json_data=[]
user = users.objects.get(username=request.user.username)
#Allow only admin and vendors to see the vendor pages otherwise redirect to the customer index page
if user.role == 'customer':
self.template_name = 'customers/index.html'
vendor_id = vendors.objects.get(user_id=user.id).id
products = product_availability.objects.filter(vendor=vendor_id).values('title','start_date','start_time','end_date','end_time')
book_products=""
if products:
for product in products:
book_products=book_products + "{start : '"+str(product['start_date'])+"T"+str(product['start_time'])+"',"+'end :' +"'"+str(product['end_date'])+"T"+str(product['end_time'])+"',"+'title :'+ "'"+str(product['title'])+"'},"
else:
book_products=None
context_dict.update({
'orders' : orders.objects.filter(vendor=vendor_id),
'book_products' : book_products
})
return render(request, self.template_name, context_dict)
except Exception as e:
print e
print sys.exc_traceback.tb_lineno
| {
"content_hash": "2fd907973f06c024e7467eb502bd0f0f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 235,
"avg_line_length": 43.395348837209305,
"alnum_prop": 0.6580921757770632,
"repo_name": "abhaystoic/barati",
"id": "5bba33d9228ebc330f7a930bb57b13c1c11911c9",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barati/vendors/views_cluster/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "363180"
},
{
"name": "CoffeeScript",
"bytes": "18651"
},
{
"name": "HTML",
"bytes": "314595"
},
{
"name": "JavaScript",
"bytes": "424509"
},
{
"name": "PHP",
"bytes": "742"
},
{
"name": "PLpgSQL",
"bytes": "156474"
},
{
"name": "Python",
"bytes": "192592"
},
{
"name": "Ruby",
"bytes": "198"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
from django.db import migrations
import datetime
data = [{u'comment': u'From *sw\u012b(s) '
'which reflects the PIE acc. 2 pl. *us-we.',
u'source_id': 273,
u'reliability': u'A',
u'pages': u'48-49',
u'modified': datetime.datetime(2015, 11, 2, 11, 9, 2, 72904)},
{u'comment': u"1. PIE *i\u032fu\u0301- 'you', "
"pronominal stem, 2nd person non-singular, "
"only nominative, suppletive oblique stem "
"*u\u032fo\u0301-.\r\n2. PIE *u\u032fo\u0301- "
"'you', pronominal stem, 2nd person non-singular, "
"oblique. Suppletive nominative PIE "
"*i\u032fu\u0301- 'you'. ",
u'source_id': 294,
u'reliability': u'A',
u'pages': u'388-90, 855-860',
u'modified': datetime.datetime(2015, 12, 9, 22, 4, 20, 365304)},
{u'comment': u'For the Slavic forms: "The anlaut of the pronoun '
'was apparently remodelled after the oblique cases. '
'This must have occurred before the delabialization '
'of \xfc, which was an allophone of /u/ '
'after a preceding *j."',
u'source_id': 81,
u'reliability': u'A',
u'pages': u'533',
u'modified': datetime.datetime(2016, 7, 1, 13, 23, 49, 867057)}]
def forwards_func(apps, schema_editor):
'''
This migration was added as a reaction to problems
with merging cognate classes described by @CasFre in [1].
https://github.com/lingdb/CoBL/issues/197
'''
CognateClass = apps.get_model('lexicon', 'CognateClass')
CognateClassCitation = apps.get_model('lexicon', 'CognateClassCitation')
# Id that needs to get CognateClassCitations attached:
target = 5822
try:
cognateClass = CognateClass.objects.get(id=target)
sourceIds = set([c.source_id for c in
cognateClass.cognateclasscitation_set.all()])
for d in data:
if d['source_id'] not in sourceIds:
CognateClassCitation.objects.create(
cognate_class_id=target, **d)
except CognateClass.DoesNotExist:
pass # Nothing to do
def reverse_func(apps, schema_editor):
print('Nothing to do for reverse_func of 0080_fix_cognateClassCitations')
class Migration(migrations.Migration):
dependencies = [('lexicon', '0079_auto_20160629_1150')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| {
"content_hash": "5e59a32c31a5aef77a2676162ac0b818",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 40.10606060606061,
"alnum_prop": 0.5799017755950132,
"repo_name": "lingdb/CoBL-public",
"id": "dc34049854f3d2d01eda501af2d63c1db398acbf",
"size": "2671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ielex/lexicon/migrations/0080_fix_cognateClassCitations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "76222"
},
{
"name": "HTML",
"bytes": "558967"
},
{
"name": "JavaScript",
"bytes": "189642"
},
{
"name": "Python",
"bytes": "858438"
},
{
"name": "Shell",
"bytes": "1258"
},
{
"name": "TeX",
"bytes": "119143"
},
{
"name": "Vim script",
"bytes": "870"
}
],
"symlink_target": ""
} |
from datetime import datetime
import os
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.management import call_command
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.logger.models import Attachment, Instance
from onadata.libs.utils.image_tools import image_url
class TestAttachment(TestBase):
def setUp(self):
super(self.__class__, self).setUp()
self._publish_transportation_form_and_submit_instance()
self.media_file = "1335783522563.jpg"
media_file = os.path.join(
self.this_directory, 'fixtures',
'transportation', 'instances', self.surveys[0], self.media_file)
self.instance = Instance.objects.all()[0]
self.attachment = Attachment.objects.create(
instance=self.instance,
media_file=File(open(media_file), media_file))
def test_mimetype(self):
self.assertEqual(self.attachment.mimetype, 'image/jpeg')
def test_create_attachment_with_media_file_length_more_the_100(self):
with self.assertRaises(ValueError):
Attachment.objects.create(
instance=self.instance,
media_file='a'*300
)
pre_count = Attachment.objects.count()
Attachment.objects.create(
instance=self.instance,
media_file='a'*150
)
self.assertEqual(pre_count + 1, Attachment.objects.count())
def test_thumbnails(self):
for attachment in Attachment.objects.filter(instance=self.instance):
url = image_url(attachment, 'small')
filename = attachment.media_file.name.replace('.jpg', '')
thumbnail = '%s-small.jpg' % filename
self.assertNotEqual(
url.find(thumbnail), -1)
for size in ['small', 'medium', 'large']:
thumbnail = '%s-%s.jpg' % (filename, size)
self.assertTrue(
default_storage.exists(thumbnail))
default_storage.delete(thumbnail)
def test_create_thumbnails_command(self):
call_command("create_image_thumbnails")
for attachment in Attachment.objects.filter(instance=self.instance):
filename = attachment.media_file.name.replace('.jpg', '')
for size in ['small', 'medium', 'large']:
thumbnail = '%s-%s.jpg' % (filename, size)
self.assertTrue(
default_storage.exists(thumbnail))
check_datetime = datetime.now()
# replace or regenerate thumbnails if they exist
call_command("create_image_thumbnails", force=True)
for attachment in Attachment.objects.filter(instance=self.instance):
filename = attachment.media_file.name.replace('.jpg', '')
for size in ['small', 'medium', 'large']:
thumbnail = '%s-%s.jpg' % (filename, size)
self.assertTrue(
default_storage.exists(thumbnail))
self.assertTrue(
default_storage.modified_time(thumbnail) > check_datetime)
default_storage.delete(thumbnail)
| {
"content_hash": "86608a61d5751ea23cdc3b6ec18c4d5b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 42.10526315789474,
"alnum_prop": 0.61375,
"repo_name": "smn/onadata",
"id": "e87d1fb7543d8aac5727fc284966c80b5c463973",
"size": "3200",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "onadata/apps/logger/tests/models/test_attachment.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "HTML",
"bytes": "248525"
},
{
"name": "JavaScript",
"bytes": "904742"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2816625"
},
{
"name": "Shell",
"bytes": "14149"
}
],
"symlink_target": ""
} |
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import strutils
import webob
from cinder.api import api_utils
from cinder.api import common
from cinder.api.contrib import resource_common_manage
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_manage
from cinder.api.v2.views import volumes as volume_views
from cinder.api import validation
from cinder.api.views import manageable_volumes as list_manageable_view
from cinder import exception
from cinder.i18n import _
from cinder.policies import manageable_volumes as policy
from cinder import volume as cinder_volume
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class VolumeManageController(wsgi.Controller):
"""The /os-volume-manage controller for the OpenStack API."""
_view_builder_class = volume_views.ViewBuilder
def __init__(self, *args, **kwargs):
super(VolumeManageController, self).__init__(*args, **kwargs)
self.volume_api = cinder_volume.API()
self._list_manageable_view = list_manageable_view.ViewBuilder()
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(volume_manage.volume_manage_create, mv.BASE_VERSION,
mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER))
@validation.schema(volume_manage.volume_manage_create_v316,
mv.VOLUME_MIGRATE_CLUSTER)
def create(self, req, body):
"""Instruct Cinder to manage a storage object.
Manages an existing backend storage object (e.g. a Linux logical
volume or a SAN disk) by creating the Cinder objects required to manage
it, and possibly renaming the backend storage object
(driver dependent)
From an API perspective, this operation behaves very much like a
volume creation operation, except that properties such as image,
snapshot and volume references don't make sense, because we are taking
an existing storage object into Cinder management.
Required HTTP Body:
.. code-block:: json
{
"volume": {
"host": "<Cinder host on which the existing storage resides>",
"cluster": "<Cinder cluster on which the storage resides>",
"ref": "<Driver-specific reference to existing storage object>"
}
}
See the appropriate Cinder drivers' implementations of the
manage_volume method to find out the accepted format of 'ref'.
This API call will return with an error if any of the above elements
are missing from the request, or if the 'host' element refers to a
cinder host that is not registered.
The volume will later enter the error state if it is discovered that
'ref' is bad.
Optional elements to 'volume' are::
name A name for the new volume.
description A description for the new volume.
volume_type ID or name of a volume type to associate with
the new Cinder volume. Does not necessarily
guarantee that the managed volume will have the
properties described in the volume_type. The
driver may choose to fail if it identifies that
the specified volume_type is not compatible with
the backend storage object.
metadata Key/value pairs to be associated with the new
volume.
availability_zone The availability zone to associate with the new
volume.
bootable If set to True, marks the volume as bootable.
"""
context = req.environ['cinder.context']
context.authorize(policy.MANAGE_POLICY)
volume = body['volume']
cluster_name, host = common.get_cluster_host(
req, volume, mv.VOLUME_MIGRATE_CLUSTER)
LOG.debug('Manage volume request body: %s', body)
kwargs = {}
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_by_name_or_id(
context, req_volume_type)
except exception.VolumeTypeNotFound:
msg = _("Cannot find requested '%s' "
"volume type") % req_volume_type
raise exception.InvalidVolumeType(reason=msg)
else:
kwargs['volume_type'] = {}
if volume.get('name'):
kwargs['name'] = volume.get('name').strip()
if volume.get('description'):
kwargs['description'] = volume.get('description').strip()
kwargs['metadata'] = volume.get('metadata', None)
kwargs['availability_zone'] = volume.get('availability_zone', None)
bootable = volume.get('bootable', False)
kwargs['bootable'] = strutils.bool_from_string(bootable, strict=True)
try:
new_volume = self.volume_api.manage_existing(context,
host,
cluster_name,
volume['ref'],
**kwargs)
except exception.ServiceNotFound:
msg = _("%(name)s '%(value)s' not found") % {
'name': 'Host' if host else 'Cluster',
'value': host or cluster_name}
raise exception.ServiceUnavailable(message=msg)
except exception.VolumeTypeDefaultMisconfiguredError as err:
raise webob.exc.HTTPInternalServerError(explanation=err.msg)
api_utils.add_visible_admin_metadata(new_volume)
return self._view_builder.detail(req, new_volume)
@wsgi.extends
def index(self, req):
"""Returns a summary list of volumes available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, False, self.volume_api.get_manageable_volumes,
self._list_manageable_view)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of volumes available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, True, self.volume_api.get_manageable_volumes,
self._list_manageable_view)
class Volume_manage(extensions.ExtensionDescriptor):
"""Allows existing backend storage to be 'managed' by Cinder."""
name = 'VolumeManage'
alias = 'os-volume-manage'
updated = '2014-02-10T00:00:00+00:00'
def get_resources(self):
controller = VolumeManageController()
res = extensions.ResourceExtension(Volume_manage.alias,
controller,
collection_actions=
{'detail': 'GET'})
return [res]
| {
"content_hash": "d3ab3f1b64cd237c95670f1f2c1200f8",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 79,
"avg_line_length": 41.66477272727273,
"alnum_prop": 0.607527614891586,
"repo_name": "mahak/cinder",
"id": "3cc246925a276805509f66af570c40add6eeea39",
"size": "7926",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/api/contrib/volume_manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078356"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
} |
import sys
import logging
import colorlog
from .main import Turbo
__all__ = ['Turbo']
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(
filename='turbo.log', encoding='utf-8', mode='w')
fh.setFormatter(logging.Formatter(
"[{asctime}] {levelname} ({filename} L{lineno}, {funcName}): {message}", style='{'
))
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(colorlog.LevelFormatter(
fmt={
"DEBUG": "{log_color}{levelname} ({module} L{lineno}, {funcName}): {message}",
"INFO": "{log_color}{message}",
"WARNING": "{log_color}{levelname}: {message}",
"ERROR": "{log_color}{levelname} ({module} L{lineno}, {funcName}): {message}",
"CRITICAL": "{log_color}{levelname} ({module} L{lineno}, {funcName}): {message}"
},
log_colors={
"DEBUG": "purple",
"INFO": "white",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bold_red"
},
style='{'
))
sh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(sh)
| {
"content_hash": "8b44a122ca99db1ed8b0a44f4e1f19b8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 88,
"avg_line_length": 29.027027027027028,
"alnum_prop": 0.6108007448789572,
"repo_name": "jaydenkieran/Turbo",
"id": "691d4e310241aaa19ca51388be0792d729a21372",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1136"
},
{
"name": "Python",
"bytes": "43200"
},
{
"name": "Shell",
"bytes": "398"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0015_auto_20150325_1306'),
]
operations = [
migrations.AlterField(
model_name='likertquestion',
name='scale',
field=models.CharField(max_length=32, default='5_point_agreement', choices=[('5_point_agreement', '5 Point Agreement'), ('5_point_reverse_agreement', '5 Point Reverse Agreement'), ('7_point_agreement', '7 Point Agreement'), ('7_point_reverse_agreement', '7 Point Reverse Agreement'), ('9_point_agreement', '9 Point Agreement'), ('9_point_reverse_agreement', '9 Point Reverse Agreement'), ('5_point_frequency', '5 Point Frequency'), ('7_point_frequency', '7 Point Frequency'), ('7_point_importance', '7 Point Importance'), ('5_point_satisfaction', '5 Point Satisfaction'), ('7_point_satisfaction', '7 Point Satisfaction')], help_text='Select the Scale for this question'),
preserve_default=True,
),
]
| {
"content_hash": "77c34b4b648c5a9faa2d2560e6350a1b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 683,
"avg_line_length": 55.36842105263158,
"alnum_prop": 0.6653992395437263,
"repo_name": "izzyalonso/tndata_backend",
"id": "2825a28f52112631ac256f0e9155fcf9f766659a",
"size": "1076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/survey/migrations/0016_auto_20150408_1416.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
import os.path
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from djblets.log import siteconfig as log_siteconfig
from djblets.siteconfig.django_settings import apply_django_settings, \
get_django_defaults, \
get_django_settings_map
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.admin.checks import get_can_enable_search, \
get_can_enable_syntax_highlighting
# A mapping of our supported authentication backend names to backend class
# paths.
auth_backend_map = {
'builtin': 'django.contrib.auth.backends.ModelBackend',
'nis': 'reviewboard.accounts.backends.NISBackend',
'ldap': 'reviewboard.accounts.backends.LDAPBackend',
'ad': 'reviewboard.accounts.backends.ActiveDirectoryBackend',
}
# A mapping of siteconfig setting names to Django settings.py names.
# This also contains all the djblets-provided mappings as well.
settings_map = {
'auth_ldap_anon_bind_uid': 'LDAP_ANON_BIND_UID',
'auth_ldap_anon_bind_passwd': 'LDAP_ANON_BIND_PASSWD',
'auth_ldap_email_domain': 'LDAP_EMAIL_DOMAIN',
'auth_ldap_email_attribute': 'LDAP_EMAIL_ATTRIBUTE',
'auth_ldap_tls': 'LDAP_TLS',
'auth_ldap_base_dn': 'LDAP_BASE_DN',
'auth_ldap_uid_mask': 'LDAP_UID_MASK',
'auth_ldap_uri': 'LDAP_URI',
'auth_ad_domain_name': 'AD_DOMAIN_NAME',
'auth_ad_use_tls': 'AD_USE_TLS',
'auth_ad_find_dc_from_dns': 'AD_FIND_DC_FROM_DNS',
'auth_ad_domain_controller': 'AD_DOMAIN_CONTROLLER',
'auth_ad_ou_name': 'AD_OU_NAME',
'auth_ad_group_name': 'AD_GROUP_NAME',
'auth_ad_search_root': 'AD_SEARCH_ROOT',
'auth_ad_recursion_depth': 'AD_RECURSION_DEPTH',
'auth_nis_email_domain': 'NIS_EMAIL_DOMAIN',
'site_domain_method': 'DOMAIN_METHOD',
}
settings_map.update(get_django_settings_map())
settings_map.update(log_siteconfig.settings_map)
# All the default values for settings.
defaults = get_django_defaults()
defaults.update(log_siteconfig.defaults)
defaults.update({
'auth_ldap_anon_bind_uid': '',
'auth_ldap_anon_bind_passwd': '',
'auth_ldap_email_domain': '',
'auth_ldap_tls': False,
'auth_ldap_uid_mask': '',
'auth_ldap_uri': '',
'auth_nis_email_domain': '',
'auth_require_sitewide_login': False,
'auth_custom_backends': [],
'auth_enable_registration': True,
'diffviewer_context_num_lines': 5,
'diffviewer_include_space_patterns': [],
'diffviewer_paginate_by': 20,
'diffviewer_paginate_orphans': 10,
'diffviewer_syntax_highlighting': True,
'diffviewer_syntax_highlighting_threshold': 0,
'diffviewer_show_trailing_whitespace': True,
'mail_send_review_mail': False,
'search_enable': False,
'site_domain_method': 'http',
# TODO: Allow relative paths for the index file later on.
'search_index_file': os.path.join(settings.REVIEWBOARD_ROOT,
'search-index'),
# Overwrite this.
'site_media_url': settings.SITE_ROOT + "media/"
})
def load_site_config():
"""
Loads any stored site configuration settings and populates the Django
settings object with any that need to be there.
"""
def apply_setting(settings_key, db_key, default=None):
db_value = siteconfig.settings.get(db_key)
if db_value:
setattr(settings, settings_key, db_value)
elif default:
setattr(settings, settings_key, default)
try:
siteconfig = SiteConfiguration.objects.get_current()
except SiteConfiguration.DoesNotExist:
raise ImproperlyConfigured, \
"The site configuration entry does not exist in the database. " \
"Re-run `./manage.py` syncdb to fix this."
except:
# We got something else. Likely, this doesn't exist yet and we're
# doing a syncdb or something, so silently ignore.
return
# Populate defaults if they weren't already set.
if not siteconfig.get_defaults():
siteconfig.add_defaults(defaults)
# Populate the settings object with anything relevant from the siteconfig.
apply_django_settings(siteconfig, settings_map)
# Now for some more complicated stuff...
# Do some dependency checks and disable things if we don't support them.
if not get_can_enable_search()[0]:
siteconfig.set('search_enable', False)
if not get_can_enable_syntax_highlighting()[0]:
siteconfig.set('diffviewer_syntax_highlighting', False)
# Site administrator settings
apply_setting("ADMINS", None, (
(siteconfig.get("site_admin_name", ""),
siteconfig.get("site_admin_email", "")),
))
apply_setting("MANAGERS", None, settings.ADMINS)
# Explicitly base this off the MEDIA_URL
apply_setting("ADMIN_MEDIA_PREFIX", None, settings.MEDIA_URL + "admin/")
# Set the auth backends
auth_backend = siteconfig.settings.get("auth_backend", "builtin")
builtin_backend = auth_backend_map['builtin']
if auth_backend == "custom":
custom_backends = siteconfig.settings.get("auth_custom_backends")
if isinstance(custom_backends, basestring):
custom_backends = (custom_backends,)
elif isinstance(custom_backends, list):
custom_backends = tuple(custom_backends)
settings.AUTHENTICATION_BACKENDS = custom_backends
if builtin_backend not in custom_backends:
settings.AUTHENTICATION_BACKENDS += (builtin_backend,)
elif auth_backend != "builtin" and auth_backend in auth_backend_map:
settings.AUTHENTICATION_BACKENDS = \
(auth_backend_map[auth_backend], builtin_backend)
else:
settings.AUTHENTICATION_BACKENDS = (builtin_backend,)
| {
"content_hash": "71d87dc107cea59739fe78e083c18d33",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 38.012269938650306,
"alnum_prop": 0.6223369916074887,
"repo_name": "asutherland/opc-reviewboard",
"id": "73ff16f1284db3ca8b79d160b587af03bdae6a4b",
"size": "6196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/admin/siteconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "101161"
},
{
"name": "JavaScript",
"bytes": "232746"
},
{
"name": "Python",
"bytes": "687315"
},
{
"name": "Shell",
"bytes": "1616"
}
],
"symlink_target": ""
} |
import os
from mock import Mock, MagicMock, patch, mock_open
import pexpect
from trove.common.exception import GuestError, ProcessExecutionError
from trove.common import utils
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
def _setUp_fake_spawn(return_val=0):
fake_spawn = pexpect.spawn('echo')
fake_spawn.expect = Mock(return_value=return_val)
pexpect.spawn = Mock(return_value=fake_spawn)
return fake_spawn
class VolumeDeviceTest(trove_testtools.TestCase):
def setUp(self):
super(VolumeDeviceTest, self).setUp()
self.volumeDevice = volume.VolumeDevice('/dev/vdb')
def tearDown(self):
super(VolumeDeviceTest, self).tearDown()
@patch.object(pexpect, 'spawn', Mock())
def test_migrate_data(self):
origin_execute = utils.execute
utils.execute = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
fake_spawn = _setUp_fake_spawn()
origin_unmount = self.volumeDevice.unmount
self.volumeDevice.unmount = MagicMock()
self.volumeDevice.migrate_data('/')
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, utils.execute.call_count)
self.assertEqual(1, self.volumeDevice.unmount.call_count)
utils.execute = origin_execute
self.volumeDevice.unmount = origin_unmount
os.path.exists = origin_os_path_exists
def test__check_device_exists(self):
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists()
self.assertEqual(1, utils.execute.call_count)
utils.execute = origin_execute
@patch('trove.guestagent.volume.LOG')
def test_fail__check_device_exists(self, mock_logging):
with patch.object(utils, 'execute', side_effect=ProcessExecutionError):
self.assertRaises(GuestError,
self.volumeDevice._check_device_exists)
@patch.object(pexpect, 'spawn', Mock())
def test__check_format(self):
fake_spawn = _setUp_fake_spawn()
self.volumeDevice._check_format()
self.assertEqual(1, fake_spawn.expect.call_count)
@patch.object(pexpect, 'spawn', Mock())
def test__check_format_2(self):
fake_spawn = _setUp_fake_spawn(return_val=1)
self.assertEqual(0, fake_spawn.expect.call_count)
self.assertRaises(IOError, self.volumeDevice._check_format)
@patch.object(pexpect, 'spawn', Mock())
def test__format(self):
fake_spawn = _setUp_fake_spawn()
self.volumeDevice._format()
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, pexpect.spawn.call_count)
def test_format(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_format = self.volumeDevice._format
origin_check_format = self.volumeDevice._check_format
self.volumeDevice._check_device_exists = MagicMock()
self.volumeDevice._check_format = MagicMock()
self.volumeDevice._format = MagicMock()
self.volumeDevice.format()
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(1, self.volumeDevice._format.call_count)
self.assertEqual(1, self.volumeDevice._check_format.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
self.volumeDevice._format = origin_format
self.volumeDevice._check_format = origin_check_format
def test_mount(self):
origin_ = volume.VolumeMountPoint.mount
volume.VolumeMountPoint.mount = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab
volume.VolumeMountPoint.write_to_fstab = Mock()
self.volumeDevice.mount(Mock)
self.assertEqual(1, volume.VolumeMountPoint.mount.call_count)
self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count)
volume.VolumeMountPoint.mount = origin_
volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab
os.path.exists = origin_os_path_exists
def test_resize_fs(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists = MagicMock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
self.volumeDevice.resize_fs('/mnt/volume')
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(2, utils.execute.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
os.path.exists = origin_os_path_exists
utils.execute = origin_execute
@patch.object(os.path, 'ismount', return_value=True)
@patch.object(utils, 'execute', side_effect=ProcessExecutionError)
@patch('trove.guestagent.volume.LOG')
def test_fail_resize_fs(self, mock_logging, mock_execute, mock_mount):
with patch.object(self.volumeDevice, '_check_device_exists'):
self.assertRaises(GuestError,
self.volumeDevice.resize_fs, '/mnt/volume')
self.assertEqual(1,
self.volumeDevice._check_device_exists.call_count)
self.assertEqual(1, mock_mount.call_count)
def test_unmount_positive(self):
self._test_unmount()
def test_unmount_negative(self):
self._test_unmount(False)
@patch.object(pexpect, 'spawn', Mock())
def _test_unmount(self, positive=True):
origin_ = os.path.exists
os.path.exists = MagicMock(return_value=positive)
fake_spawn = _setUp_fake_spawn()
self.volumeDevice.unmount('/mnt/volume')
COUNT = 1
if not positive:
COUNT = 0
self.assertEqual(COUNT, fake_spawn.expect.call_count)
os.path.exists = origin_
@patch.object(utils, 'execute', return_value=('/var/lib/mysql', ''))
def test_mount_points(self, mock_execute):
mount_point = self.volumeDevice.mount_points('/dev/vdb')
self.assertEqual(['/var/lib/mysql'], mount_point)
@patch.object(utils, 'execute', side_effect=ProcessExecutionError)
@patch('trove.guestagent.volume.LOG')
def test_fail_mount_points(self, mock_logging, mock_execute):
self.assertRaises(GuestError, self.volumeDevice.mount_points,
'/mnt/volume')
def test_set_readahead_size(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
self.volumeDevice._check_device_exists = MagicMock()
mock_execute = MagicMock(return_value=None)
readahead_size = 2048
self.volumeDevice.set_readahead_size(readahead_size,
execute_function=mock_execute)
blockdev = mock_execute.call_args_list[0]
blockdev.assert_called_with("sudo", "blockdev", "--setra",
readahead_size, "/dev/vdb")
self.volumeDevice._check_device_exists = origin_check_device_exists
@patch('trove.guestagent.volume.LOG')
def test_fail_set_readahead_size(self, mock_logging):
mock_execute = MagicMock(side_effect=ProcessExecutionError)
readahead_size = 2048
with patch.object(self.volumeDevice, '_check_device_exists'):
self.assertRaises(GuestError, self.volumeDevice.set_readahead_size,
readahead_size, execute_function=mock_execute)
self.volumeDevice._check_device_exists.assert_any_call()
class VolumeMountPointTest(trove_testtools.TestCase):
def setUp(self):
super(VolumeMountPointTest, self).setUp()
self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device',
'/dev/vdb')
def tearDown(self):
super(VolumeMountPointTest, self).tearDown()
@patch.object(pexpect, 'spawn', Mock())
def test_mount(self):
origin_ = os.path.exists
os.path.exists = MagicMock(return_value=False)
fake_spawn = _setUp_fake_spawn()
with patch.object(utils, 'execute_with_timeout',
return_value=('0', '')):
self.volumeMountPoint.mount()
self.assertEqual(1, os.path.exists.call_count)
self.assertEqual(1, utils.execute_with_timeout.call_count)
self.assertEqual(1, fake_spawn.expect.call_count)
os.path.exists = origin_
def test_write_to_fstab(self):
origin_execute = utils.execute
utils.execute = Mock()
m = mock_open()
with patch('%s.open' % volume.__name__, m, create=True):
self.volumeMountPoint.write_to_fstab()
self.assertEqual(1, utils.execute.call_count)
utils.execute = origin_execute
| {
"content_hash": "8c260bd5c100160047071f4806d15143",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 79,
"avg_line_length": 39.59649122807018,
"alnum_prop": 0.648980948161276,
"repo_name": "mmasaki/trove",
"id": "efbb96bb7cdf0c2badad400386ca5bdb97c1d2de",
"size": "9644",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trove/tests/unittests/guestagent/test_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60539"
},
{
"name": "Python",
"bytes": "4204079"
},
{
"name": "Shell",
"bytes": "19186"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
} |
import os
def data_path(name):
"""
Return the absolute path to a file in the varcode/test/data directory.
The name specified should be relative to varcode/test/data.
"""
return os.path.join(os.path.dirname(__file__), "data", name) | {
"content_hash": "16805a10da10d122bb5529c915969785",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 74,
"avg_line_length": 31.375,
"alnum_prop": 0.6772908366533864,
"repo_name": "hammerlab/gtftools",
"id": "0ea6986ecf2ce7d5ad8bead4f4a71c23e5458974",
"size": "251",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40836"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
import datetime, sys, os
import xlwt
import env
import common
import htmlreport
def generate_result_xls():
wbk = xlwt.Workbook()
style_red = xlwt.easyxf('font: colour red, bold False;')
style_green = xlwt.easyxf('font: colour green, bold False;')
style_bold = xlwt.easyxf('font: colour black, bold True;')
for m in env.EXCEL_REPORT_DATA:
if m.has_key("Name"):
sheet = wbk.add_sheet(m["Name"])
sheet.write(0, 0, 'Test Case Name', style_bold)
sheet.write(0, 1, 'IE', style_bold)
sheet.write(0, 2, 'Firefox', style_bold)
sheet.write(0, 3, 'Chrome', style_bold)
sheet.col(0).width = 256 * 80
sheet.col(1).width = 256 * 20
sheet.col(2).width = 256 * 20
sheet.col(3).width = 256 * 20
i = 1
for case in m["TestCases"]:
sheet.write(i, 0, case["Name"])
if case.has_key("IE"):
if case["IE"] == "Pass":
sheet.write(i, 1, case["IE"], style_green)
if case["IE"] == "Fail":
sheet.write(i, 1, case["IE"], style_red)
if case.has_key("Firefox"):
if case["Firefox"] == "Pass":
sheet.write(i, 2, case["Firefox"], style_green)
if case["Firefox"] == "Fail":
sheet.write(i, 2, case["Firefox"], style_red)
if case.has_key("Chrome"):
if case["Chrome"] == "Pass":
sheet.write(i, 3, case["Chrome"], style_green)
if case["Chrome"] == "Fail":
sheet.write(i, 3, case["Chrome"], style_red)
i = i + 1
wbk.save(common.force_delete_file(os.path.join(env.RESULT_PATH, "result", "result.xls")))
def add_excel_report_data(list_all=[], module_name="TestModule", case_name="TestCase", browser_type="IE", result="Pass"):
for module in list_all:
if module_name == module["Name"]:
for case in module["TestCases"]:
if case_name == case["Name"]:
case[browser_type] = result
return list_all
module["TestCases"].append({"Name": case_name, browser_type: result})
return list_all
list_all.append({"Name": module_name, "TestCases": [{"Name": case_name, browser_type: result}]})
return list_all
def start_test(case_name):
env.threadlocal.CASE_NAME = case_name
env.threadlocal.CASE_START_TIME = datetime.datetime.now().replace(microsecond=0)
env.threadlocal.CASE_PASS = True
env.threadlocal.CASE_WARNINGS = 0
write_log(os.path.join("testcase", "%s.log" % (case_name)),
"\n************** Test Case [%s] [%s] ***************\n" %(case_name, env.threadlocal.TESTING_BROWSER))
def start_total_test():
env.threadlocal.CASE_START_TIME = ""
env.threadlocal.CASE_STOP_TIME = ""
env.threadlocal.CASE_NAME = ""
env.threadlocal.CASE_PASS = True
env.threadlocal.CASE_WARNINGS = 0
env.threadlocal.MODULE_NAME = ""
env.threadlocal.BROWSER = None
env.threadlocal.TESTING_BROWSER = ""
env.threadlocal.TESTING_BROWSERS = ""
env.TOTAL_TESTCASE_PASS = 0
env.TOTAL_TESTCASE_FAIL = 0
env.HTMLREPORT_TESTCASES[:] = []
common.delete_file_or_folder(os.path.join(env.RESULT_PATH, "result", "testcase"))
common.delete_file_or_folder(os.path.join(env.RESULT_PATH, "result", "screenshots"))
env.TOTAL_START_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (">>>>>> [%s] => start testing...... <<<<<<" %
(
env.TOTAL_START_TIME,
)
)
htmlreport.generate_html_report([env.TOTAL_START_TIME, "N/A", "N/A", "N/A", "N/A", "N/A"], [])
def finish_total_test():
env.TOTAL_STOP_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (">>>>>> [%s] => [%s], duration [%s], case [%s], pass [%s], fail [%s] <<<<<<" %
(
env.TOTAL_START_TIME,
env.TOTAL_STOP_TIME,
datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS + env.TOTAL_TESTCASE_FAIL,
env.TOTAL_TESTCASE_PASS,
env.TOTAL_TESTCASE_FAIL,
)
)
print (
">>>>>> [%s] => [%s]" % (env.TOTAL_START_TIME, common.get_version_info())
)
htmlreport.generate_html_report([env.TOTAL_START_TIME, env.TOTAL_STOP_TIME, datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS+env.TOTAL_TESTCASE_FAIL, env.TOTAL_TESTCASE_PASS, env.TOTAL_TESTCASE_FAIL],
env.HTMLREPORT_TESTCASES,
countdown=False)
htmlreport.save_current_report_to_repository()
htmlreport.generate_report_history()
htmlreport.generate_html_report([env.TOTAL_START_TIME, env.TOTAL_STOP_TIME, datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS+env.TOTAL_TESTCASE_FAIL, env.TOTAL_TESTCASE_PASS, env.TOTAL_TESTCASE_FAIL],
env.HTMLREPORT_TESTCASES,
countdown=True)
env.TOTAL_TESTCASE_PASS = 0
env.TOTAL_TESTCASE_FAIL = 0
env.HTMLREPORT_TESTCASES[:] = []
print ("\n")
def stop_test():
try:
env.THREAD_LOCK.acquire()
env.threadlocal.CASE_STOP_TIME = datetime.datetime.now().replace(microsecond=0)
env.TOTAL_STOP_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if env.threadlocal.CASE_WARNINGS > 0:
warning_message = ", has [%s] warning(s)!" % env.threadlocal.CASE_WARNINGS
else:
warning_message = ""
if env.threadlocal.CASE_PASS == True:
print (u"%s [Pass] => [%s] [%s] [%s] [%s]%s" %(common.stamp_datetime(),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.MODULE_NAME,
env.threadlocal.CASE_NAME,
env.threadlocal.TESTING_BROWSER,
warning_message
))
env.TOTAL_TESTCASE_PASS = env.TOTAL_TESTCASE_PASS + 1
env.HTMLREPORT_TESTCASES.append(["%s => %s" % (env.threadlocal.CASE_START_TIME.strftime("%m-%d %H:%M:%S"), env.threadlocal.CASE_STOP_TIME.strftime("%m-%d %H:%M:%S")),
'<a href="testcase/%s.log">[%s] - %s</a>' % (env.threadlocal.CASE_NAME, env.threadlocal.MODULE_NAME, env.threadlocal.CASE_NAME),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.TESTING_BROWSER,
'<td>Pass</td>'
])
else:
print (u"%s [Fail] => [%s] [%s] [%s] [%s]%s :( " %(common.stamp_datetime(),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.MODULE_NAME,
env.threadlocal.CASE_NAME,
env.threadlocal.TESTING_BROWSER,
warning_message
))
env.TOTAL_TESTCASE_FAIL = env.TOTAL_TESTCASE_FAIL + 1
env.HTMLREPORT_TESTCASES.append(["%s => %s" % (env.threadlocal.CASE_START_TIME.strftime("%m-%d %H:%M:%S"),env.threadlocal.CASE_STOP_TIME.strftime("%m-%d %H:%M:%S")),
'<a href="testcase/%s.log">[%s] - %s</a>' % (env.threadlocal.CASE_NAME, env.threadlocal.MODULE_NAME, env.threadlocal.CASE_NAME),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.TESTING_BROWSER,
'<td class="tfail"><a href="screenshots/%s">Fail</a></td>' % env.HTMLREPORT_SCREENSHOT_NAME
])
htmlreport.generate_html_report([env.TOTAL_START_TIME, env.TOTAL_STOP_TIME, datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS+env.TOTAL_TESTCASE_FAIL, env.TOTAL_TESTCASE_PASS, env.TOTAL_TESTCASE_FAIL],
env.HTMLREPORT_TESTCASES)
env.threadlocal.CASE_PASS = True
env.threadlocal.CASE_WARNINGS = 0
finally:
env.THREAD_LOCK.release()
def step_section(message):
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"\n%s Section: %s\n" %(common.stamp_datetime(), message))
def step_normal(message):
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Step: %s\n" %(common.stamp_datetime(), message))
def step_pass(message):
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Pass: %s\n" %(common.stamp_datetime(), message))
def step_fail(message):
screenshot_name = "Fail__%s__%s__%s.png" % (common.stamp_datetime_coherent(), env.threadlocal.CASE_NAME, env.threadlocal.TESTING_BROWSER)
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Fail [%s] -------------------\n"%common.stamp_datetime())
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Fail: %s, Check ScreenShot [%s]\n" %(common.stamp_datetime(), message, screenshot_name))
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Fail [%s] --------------------------------------------\n"%common.stamp_datetime())
try:
save_screen_shot(screenshot_name)
except:
step_normal(str(sys.exc_info()))
env.HTMLREPORT_SCREENSHOT_NAME = screenshot_name
env.threadlocal.CASE_PASS = False
env.EXIT_STATUS = -1
raise AssertionError(message)
def step_warning(message):
screenshot_name = "Warning__%s__%s__%s.png" % (common.stamp_datetime_coherent(), env.threadlocal.CASE_NAME, env.threadlocal.TESTING_BROWSER)
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Warning [%s] -------------------\n"%common.stamp_datetime())
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Warning: %s, Check ScreenShot [%s]\n" %(common.stamp_datetime(), message, screenshot_name))
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Warning [%s] --------------------------------------------\n"%common.stamp_datetime())
try:
save_screen_shot(screenshot_name)
except:
step_normal(str(sys.exc_info()))
env.threadlocal.CASE_WARNINGS = env.threadlocal.CASE_WARNINGS + 1
def write_log(relative_path, log_message):
log_path = os.path.join(env.RESULT_PATH, "result", relative_path)
common.mkdirs(os.path.dirname(log_path))
with open(log_path, 'a') as f:
f.write(log_message)
def save_screen_shot(image_name):
image_path = os.path.join(env.RESULT_PATH, "result", "screenshots")
common.mkdirs(image_path)
env.threadlocal.BROWSER.save_screenshot(os.path.join(image_path, image_name))
def handle_error():
if env.threadlocal.CASE_PASS == False:
return
if sys.exc_info()[0] != None:
step_normal(common.exception_error())
screenshot_name = "Fail__%s__%s__%s.png" % (common.stamp_datetime_coherent(), env.threadlocal.CASE_NAME, env.threadlocal.TESTING_BROWSER)
try:
save_screen_shot(screenshot_name)
except:
step_warning(str(sys.exc_info()))
step_normal("Current step screen short [%s]" % (screenshot_name))
env.HTMLREPORT_SCREENSHOT_NAME = screenshot_name
env.threadlocal.CASE_PASS = False
env.EXIT_STATUS = -1
| {
"content_hash": "bc2ae0a5af89ec3ed49c56b6584c3672",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 226,
"avg_line_length": 40.21470588235294,
"alnum_prop": 0.5025232209463907,
"repo_name": "tangchenxuan/AutoMan",
"id": "5a84113cba386d392c68e8fb796677b438242a06",
"size": "13698",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "knitter/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17275"
},
{
"name": "Python",
"bytes": "94776"
}
],
"symlink_target": ""
} |
__author__ = 'To\xc3\xb1o G. Quintela (tgq.spm@gmail.com)'
"""Module of testing the functions programed in the module cooperativegames.
"""
import numpy as np
from cooperativegames.measures.cooperativegames_metrics import banzhaf_index,\
shapley_index, shapley_value, weighted_winning_coalitions,\
weighted_worsable_coalitions
def test():
"""Main function for testing power indexs of the cooperative games package.
"""
def test1_shapley_value(funct):
"""Glove game:
Consider a simplified description of a business. An owner, o, provides
crucial capital in the sense that without him no gains can be obtained.
There are k workers w1,...,wk, each of whom contributes an amount p to
the total profit. So N = {o, w1,...,wk} and v(S) = 0 if o is not a
member of S and v(S) = mp if S contains the owner and m workers.
Computing the Shapley value for this coalition game leads to a value of
kp/2 for the owner and p/2 for each worker.
(http://en.wikipedia.org/wiki/Shapley_value)
"""
sh_v1 = shapley_value(np.array(range(3)), funct)
result = np.array([1./6, 1./6, 2./3])
print("Testing Shapley value with the globe game.")
np.testing.assert_array_almost_equal(sh_v1, result)
print("Test passed.")
print("-------------------------------------------------------")
def test1_banzhaf_index(funct):
"""Voting game from Game Theory and Strategy by Phillip D. Straffin:
[6; 4, 3, 2, 1]
The numbers in the brackets mean a measure requires 6 votes to pass,
and voter A can cast four votes, B three votes, C two, and D one. The
winning groups, with parenthesis swing voters, are as follows:
(AB), (AC), (A)BC, (AB)D, (AC)D, (BCD), ABCD
There are 12 total swing votes, so by the Banzhaf inex is:
A = 5/12, B = 3/12, C = 3/12, D = 1/12
(http://en.wikipedia.org/wiki/Banzhaf_power_index)
"""
bzf_ind = banzhaf_index(*funct())
result = np.array([5./12, 3./12, 3./12, 1./12])
print("Testing Banzhaf index with Straffin voting game.")
np.testing.assert_array_almost_equal(bzf_ind, result, 3)
print("Test passed.")
print("-------------------------------------------------------")
def test2_banzhaf_index(funct):
"""US voting 1 wikipedia:
Consider the U.S. Electoral College. Each state has more or less power
than the next state. There are a total of 538 electoral votes. A
majority vote is considered 270 votes. The Banzhaf power index would be
a mathematical representation of how likely a single state would be
able to swing the vote. For a state such as California, which is
allocated 55 electoral votes, they would be more likely to swing the
vote than a state such as Montana, which only has 3 electoral votes.
The United States is having a presidential election between a
Republican and a Democrat. For simplicity, suppose that only three
states are participating: California (55 electoral votes), Texas (34
electoral votes), and New York (31 electoral votes).
Results are all with 1/3.
(http://en.wikipedia.org/wiki/Banzhaf_power_index)
"""
bzf_ind = banzhaf_index(*funct())
result = np.array([1./3, 1./3, 1./3])
print("Testing Banzhaf index with US voting game 1.")
np.testing.assert_array_almost_equal(bzf_ind, result, 3)
print("Test passed.")
print("-------------------------------------------------------")
def test3_banzhaf_index(funct):
"""US voting 2 wikipedia:
Consider the U.S. Electoral College. Each state has more or less power
than the next state. There are a total of 538 electoral votes. A
majority vote is considered 270 votes. The Banzhaf power index would be
a mathematical representation of how likely a single state would be
able to swing the vote. For a state such as California, which is
allocated 55 electoral votes, they would be more likely to swing the
vote than a state such as Montana, which only has 3 electoral votes.
The United States is having a presidential election between a
Republican and a Democrat. For simplicity, suppose that only three
states are participating: California (55 electoral votes), Texas (34
electoral votes), and Ohio (20 electoral votes).
Results are [1, 0, 0].
(http://en.wikipedia.org/wiki/Banzhaf_power_index)
"""
bzf_ind = banzhaf_index(*funct())
result = np.array([1., 0, 0])
print("Testing Banzhaf index with US voting game 2.")
np.testing.assert_array_almost_equal(bzf_ind, result, 3)
print("Test passed.")
print("-------------------------------------------------------")
def test4_banzhaf_index(funct):
"""Cartel game
Five companies (A, B, C, D, E) sign an agreement for the creation of a
monopoly. The size of the market is X = 54 millions units per year
(i.e. petroleum barrels) for a monopoly. The maximum production
capacity of these companies is A = 44, B = 32, C = 20, D = 8 and E = 4
millions of units per year. Therefore, there is a set of coalitions
able to provide the 54 millions of units necessary for the monopoly,
and a set of coalitions unable to provide that number. In each of the
sufficient coalitions we may have necessary members (for the coalition
to provide the required production) and unnecessary members (underlined
in the table below). Even when one of these unnecessary members goes
out of the sufficient coalition that coalition is able to provide the
required production. However, when one necessary member leaves, the
sufficient coalition becomes insufficient.
The monopoly's profit to be distributed among the coalition's members
is 100 millions of dollars per year.
A is necessary in 38,5% of the total cases, B in 23.1%, C in 23.1%, D
in 7.7% and E in 7.7% (these are the Banshaf indexes for each company).
"""
bzf_ind = banzhaf_index(*funct())
result = np.array([.385, .231, .231, .077, .077])
print("Testing Banzhaf index with Cartel game.")
np.testing.assert_array_almost_equal(bzf_ind, result, 3)
print("Test passed.")
print("-------------------------------------------------------")
def test5_banzhaf_index(funct):
"""EEC 1958-1972:
(https://en.wikipedia.org/wiki/Voting_in_the_Council_of_the_European_
Union#Treaty_of_Rome_.281958.E2.80.931973.29)
"""
bzf_ind = banzhaf_index(*funct())
result = np.array([5/21., 5/21., 5/21., 3/21., 3/21., 0])
print("Testing Banzhaf index with EEC voting.")
np.testing.assert_array_almost_equal(bzf_ind, result, 3)
print("Test passed.")
print("-------------------------------------------------------")
def test1_shapley_shubik(funct):
"""Example from the wikipedia:
(http://en.wikipedia.org/wiki/Shapley%E2%80%93Shubik_power_index)
"""
sh_sh_ind = shapley_index(*funct())
result = np.array([1/2., 1/6., 1/6., 1/6.])
print("Testing Shapley-Shubick index with example.")
np.testing.assert_array_almost_equal(sh_sh_ind, result, 3)
print("Test passed.")
print("-------------------------------------------------------")
@test1_shapley_value
def f_globe_game(set_):
set_ = list(set_)
if set_ in [[0, 2], [1, 2], [0, 1, 2]]:
return 1
else:
return 0
@test1_banzhaf_index
def f_voting_test1():
distrib_repr = np.array([4, 3, 2, 1])
win_thr = 0.5
return [distrib_repr, win_thr]
@test2_banzhaf_index
def f_usvoting1_test():
distrib_repr = np.array([55, 34, 31])
win_thr = 0.5
return [distrib_repr, win_thr]
@test3_banzhaf_index
def f_usvoting2_test():
distrib_repr = np.array([55, 34, 20])
win_thr = 0.5
return [distrib_repr, win_thr]
@test4_banzhaf_index
def f_cartel_test():
distrib_repr = np.array([44, 32, 20, 8, 4])
win_thr = float(54.)/np.sum(distrib_repr)
return [distrib_repr, win_thr]
@test5_banzhaf_index
def f_eu1_test():
"EEC 1958-1972"
distrib_repr = np.array([4, 4, 4, 2, 2, 1])
win_thr = 11.9/np.sum(distrib_repr)
return [distrib_repr, win_thr]
@test1_shapley_shubik
def f_ex_test():
distrib_repr = np.array([3, 2, 1, 1])
win_thr = .5
return [distrib_repr, win_thr]
################### Personalized coded
#######################################
distrib_repr = np.random.randint(0, 20, 10)
weights = np.random.random((10, 10))
weighted_winning_coalitions(distrib_repr, weights, win_thr=0.5)
weighted_worsable_coalitions(distrib_repr, weights, win_thr=0.5)
weights[0, 3] = 0
weighted_winning_coalitions(distrib_repr, weights, win_thr=0.5)
weighted_worsable_coalitions(distrib_repr, weights, win_thr=0.5)
| {
"content_hash": "9167eac2e3949714d8cdef70ad41c2b4",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 79,
"avg_line_length": 41.198237885462554,
"alnum_prop": 0.5960222412318221,
"repo_name": "tgquintela/CooperativeGames",
"id": "e9a0f8ae31fe600974c45df3522560fc38a29b86",
"size": "9353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cooperativegames/tests/test_measures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35395"
},
{
"name": "Shell",
"bytes": "4609"
}
],
"symlink_target": ""
} |
try:
import uasyncio as asyncio
except ImportError:
import asyncio
class Hardware(object):
def __init__(self, count):
self.count = count
def __await__(self): # Typical use, loop until an interface becomes ready.
while self.count:
print(self.count)
yield
self.count -= 1
__iter__ = __await__ # issue #2678
loop = asyncio.get_event_loop()
hardware = Hardware(10)
async def run():
await hardware
print('Done')
loop.run_until_complete(run())
| {
"content_hash": "895521880f68e02b43a84e9da4836e9f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 20.26923076923077,
"alnum_prop": 0.6015180265654649,
"repo_name": "peterhinch/micropython-async",
"id": "a9087f6f94c7240705c70d71f7cdf8b4bd07b918",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2/awaitable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "564278"
}
],
"symlink_target": ""
} |
import copy
import pickle
import time
# Third-party
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
import pytest
# Project
from ..io import load
from ...frame import StaticFrame
from ...hamiltonian import Hamiltonian
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from gala.tests.optional_deps import HAS_SYMPY
def partial_derivative(func, point, dim_ix=0, **kwargs):
xyz = np.array(point, copy=True)
def wraps(a):
xyz[dim_ix] = a
return func(xyz)
return derivative(wraps, point[dim_ix], **kwargs)
class PotentialTestBase:
name = None
potential = None # MUST SET THIS
frame = None
tol = 1e-5
show_plots = False
sympy_hessian = True
sympy_density = True
check_finite_at_origin = True
def setup(self):
# set up hamiltonian
if self.frame is None:
self.frame = StaticFrame(units=self.potential.units)
self.H = Hamiltonian(self.potential, self.frame)
self.rnd = np.random.default_rng(seed=42)
cls = self.__class__
if cls.name is None:
cls.name = cls.__name__[4:] # removes "Test"
print(f"Testing potential: {cls.name}")
self.w0 = np.array(self.w0)
self.ndim = self.w0.size // 2
# TODO: need to test also quantity objects and phasespacepositions!
# these are arrays we will test the methods on:
w0_2d = np.repeat(self.w0[:, None], axis=1, repeats=16)
w0_3d = np.repeat(w0_2d[..., None], axis=2, repeats=8)
w0_list = list(self.w0)
w0_slice = w0_2d[:, :4]
self.w0s = [self.w0, w0_2d, w0_3d, w0_list, w0_slice]
self._grad_return_shapes = [
self.w0[: self.ndim].shape + (1,),
w0_2d[: self.ndim].shape,
w0_3d[: self.ndim].shape,
self.w0[: self.ndim].shape + (1,),
w0_slice[: self.ndim].shape,
]
self._hess_return_shapes = [
(self.ndim,) + self.w0[: self.ndim].shape + (1,),
(self.ndim,) + w0_2d[: self.ndim].shape,
(self.ndim,) + w0_3d[: self.ndim].shape,
(self.ndim,) + self.w0[: self.ndim].shape + (1,),
(self.ndim,) + w0_slice[: self.ndim].shape,
]
self._valu_return_shapes = [x[1:] for x in self._grad_return_shapes]
def test_unitsystem(self):
assert isinstance(self.potential.units, UnitSystem)
if isinstance(self.potential.units, DimensionlessUnitSystem):
# Don't do a replace_units test for dimensionless potentials
return
# check that we can replace the units as expected
usys = UnitSystem([u.pc, u.Gyr, u.radian, u.Msun])
pot = copy.deepcopy(self.potential)
pot2 = pot.replace_units(usys)
assert pot2.units == usys
assert pot.units == self.potential.units
def test_energy(self):
assert self.ndim == self.potential.ndim
for arr, shp in zip(self.w0s, self._valu_return_shapes):
v = self.potential.energy(arr[: self.ndim])
assert v.shape == shp
g = self.potential.energy(arr[: self.ndim], t=0.1)
g = self.potential.energy(
arr[: self.ndim], t=0.1 * self.potential.units["time"]
)
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.energy(arr[: self.ndim], t=t)
g = self.potential.energy(
arr[: self.ndim], t=t * self.potential.units["time"]
)
if self.check_finite_at_origin:
val = self.potential.energy([0.0, 0, 0])
assert np.isfinite(val)
def test_gradient(self):
for arr, shp in zip(self.w0s, self._grad_return_shapes):
g = self.potential.gradient(arr[: self.ndim])
assert g.shape == shp
g = self.potential.gradient(arr[: self.ndim], t=0.1)
g = self.potential.gradient(
arr[: self.ndim], t=0.1 * self.potential.units["time"]
)
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.gradient(arr[: self.ndim], t=t)
g = self.potential.gradient(
arr[: self.ndim], t=t * self.potential.units["time"]
)
def test_hessian(self):
for arr, shp in zip(self.w0s, self._hess_return_shapes):
g = self.potential.hessian(arr[: self.ndim])
assert g.shape == shp
g = self.potential.hessian(arr[: self.ndim], t=0.1)
g = self.potential.hessian(
arr[: self.ndim], t=0.1 * self.potential.units["time"]
)
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.hessian(arr[: self.ndim], t=t)
g = self.potential.hessian(
arr[: self.ndim], t=t * self.potential.units["time"]
)
def test_mass_enclosed(self):
for arr, shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.mass_enclosed(arr[: self.ndim])
assert g.shape == shp
assert np.all(g > 0.0)
g = self.potential.mass_enclosed(arr[: self.ndim], t=0.1)
g = self.potential.mass_enclosed(
arr[: self.ndim], t=0.1 * self.potential.units["time"]
)
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.mass_enclosed(arr[: self.ndim], t=t)
g = self.potential.mass_enclosed(
arr[: self.ndim], t=t * self.potential.units["time"]
)
def test_circular_velocity(self):
for arr, shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.circular_velocity(arr[: self.ndim])
assert g.shape == shp
assert np.all(g > 0.0)
g = self.potential.circular_velocity(arr[: self.ndim], t=0.1)
g = self.potential.circular_velocity(
arr[: self.ndim], t=0.1 * self.potential.units["time"]
)
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.circular_velocity(arr[: self.ndim], t=t)
g = self.potential.circular_velocity(
arr[: self.ndim], t=t * self.potential.units["time"]
)
def test_repr(self):
pot_repr = repr(self.potential)
if isinstance(self.potential.units, DimensionlessUnitSystem):
assert "dimensionless" in pot_repr
else:
assert str(self.potential.units["length"]) in pot_repr
assert str(self.potential.units["time"]) in pot_repr
assert str(self.potential.units["mass"]) in pot_repr
for k in self.potential.parameters.keys():
assert "{}=".format(k) in pot_repr
def test_compare(self):
# skip if composite potentials
if len(self.potential.parameters) == 0:
return
other = self.potential.__class__(
units=self.potential.units, **self.potential.parameters
)
assert other == self.potential
pars = self.potential.parameters.copy()
for k in pars.keys():
if k != 0:
pars[k] = 1.1 * pars[k]
other = self.potential.__class__(units=self.potential.units, **pars)
assert other != self.potential
# check that comparing to non-potentials works
assert not self.potential == "sup"
assert self.potential is not None
def test_plot(self):
p = self.potential
f = p.plot_contours(
grid=(np.linspace(-10.0, 10.0, 100), 0.0, 0.0), labels=["X"]
)
f = p.plot_contours(
grid=(
np.linspace(-10.0, 10.0, 100),
np.linspace(-10.0, 10.0, 100),
0.0,
),
cmap="Blues",
)
f = p.plot_contours(
grid=(
np.linspace(-10.0, 10.0, 100),
1.0,
np.linspace(-10.0, 10.0, 100),
),
cmap="Blues",
labels=["X", "Z"],
)
f, a = p.plot_rotation_curve(R_grid=np.linspace(0.1, 10.0, 100))
plt.close("all")
if self.show_plots:
plt.show()
def test_save_load(self, tmpdir):
"""
Test writing to a YAML file, and reading back in
"""
fn = str(tmpdir.join("{}.yml".format(self.name)))
self.potential.save(fn)
p = load(fn)
p.energy(self.w0[: self.w0.size // 2])
p.gradient(self.w0[: self.w0.size // 2])
def test_numerical_gradient_vs_gradient(self):
"""
Check that the value of the implemented gradient function is close to a
numerically estimated value. This is to check the coded-up version.
"""
dx = 1e-3 * np.sqrt(np.sum(self.w0[: self.w0.size // 2] ** 2))
max_x = np.sqrt(np.sum([x ** 2 for x in self.w0[: self.w0.size // 2]]))
grid = np.linspace(-max_x, max_x, 8)
grid = grid[grid != 0.0]
grids = [grid for i in range(self.w0.size // 2)]
xyz = np.ascontiguousarray(
np.vstack(list(map(np.ravel, np.meshgrid(*grids)))).T
)
def energy_wrap(xyz):
xyz = np.ascontiguousarray(xyz[None])
return self.potential._energy(xyz, t=np.array([0.0]))[0]
num_grad = np.zeros_like(xyz)
for i in range(xyz.shape[0]):
num_grad[i] = np.squeeze(
[
partial_derivative(
energy_wrap, xyz[i], dim_ix=dim_ix, n=1, dx=dx, order=5
)
for dim_ix in range(self.w0.size // 2)
]
)
grad = self.potential._gradient(xyz, t=np.array([0.0]))
assert np.allclose(num_grad, grad, rtol=self.tol)
def test_orbit_integration(self):
"""
Make sure we can integrate an orbit in this potential
"""
w0 = self.w0
w0 = np.vstack((w0, w0, w0)).T
t1 = time.time()
orbit = self.H.integrate_orbit(w0, dt=0.1, n_steps=10000)
print("Integration time (10000 steps): {}".format(time.time() - t1))
if self.show_plots:
f = orbit.plot()
f.suptitle("Vector w0")
plt.show()
plt.close(f)
us = self.potential.units
w0 = PhaseSpacePosition(
pos=w0[: self.ndim] * us["length"],
vel=w0[self.ndim :] * us["length"] / us["time"],
)
orbit = self.H.integrate_orbit(w0, dt=0.1, n_steps=10000)
if self.show_plots:
f = orbit.plot()
f.suptitle("Object w0")
plt.show()
plt.close(f)
def test_pickle(self, tmpdir):
fn = str(tmpdir.join("{}.pickle".format(self.name)))
with open(fn, "wb") as f:
pickle.dump(self.potential, f)
with open(fn, "rb") as f:
p = pickle.load(f)
p.energy(self.w0[: self.w0.size // 2])
@pytest.mark.skipif(not HAS_SYMPY, reason="requires sympy to run this test")
def test_against_sympy(self):
# TODO: should really split this into separate tests for each check...
import sympy as sy
from sympy import Q
# compare Gala gradient, hessian, and density to sympy values
pot = self.potential
Phi, v, p = pot.to_sympy()
# Derive sympy gradient and hessian functions to evaluate:
from scipy.special import gamma, gammainc
def lowergamma(a, x): # noqa
# Differences between scipy and sympy lower gamma
return gammainc(a, x) * gamma(a)
modules = [
"numpy",
{
"atan": np.arctan,
"lowergamma": lowergamma,
"gamma": gamma,
"re": np.real,
"im": np.imag,
},
"sympy",
]
vars_ = list(p.values()) + list(v.values())
assums = np.bitwise_and.reduce([Q.real(x) for x in vars_])
# Phi = sy.refine(Phi, assums)
e_func = sy.lambdify(vars_, Phi, modules=modules)
if self.sympy_density:
dens_tmp = sum([sy.diff(Phi, var, 2) for var in v.values()]) / (
4 * sy.pi * p["G"]
)
# dens_tmp = sy.refine(dens_tmp, assums)
dens_func = sy.lambdify(vars_, dens_tmp, modules=modules)
grad = sy.derive_by_array(Phi, list(v.values()))
# grad = sy.refine(grad, assums)
grad_func = sy.lambdify(vars_, grad, modules=modules)
if self.sympy_hessian:
Hess = sy.hessian(Phi, list(v.values()))
# Hess = sy.refine(Hess, assums)
Hess_func = sy.lambdify(vars_, Hess, modules=modules)
# Make a dict of potential parameter values without units:
par_vals = {}
for k, v in pot.parameters.items():
par_vals[k] = v.value
N = 64 # MAGIC NUMBER:
trial_x = self.rnd.uniform(-10.0, 10.0, size=(pot.ndim, N))
x_dict = {k: v for k, v in zip(["x", "y", "z"], trial_x)}
f_gala = pot.energy(trial_x).value
f_sympy = e_func(G=pot.G, **par_vals, **x_dict)
e_close = np.allclose(f_gala, f_sympy)
test_cases = [e_close]
vals = [(f_gala, f_sympy)]
if self.sympy_density:
d_gala = pot.density(trial_x).value
d_sympy = dens_func(G=pot.G, **par_vals, **x_dict)
d_close = np.allclose(d_gala, d_sympy)
test_cases.append(d_close)
vals.append((d_gala, d_sympy))
G_gala = pot.gradient(trial_x).value
G_sympy = grad_func(G=pot.G, **par_vals, **x_dict)
g_close = np.allclose(G_gala, G_sympy)
test_cases.append(g_close)
vals.append((G_gala, G_sympy))
if self.sympy_hessian:
H_gala = pot.hessian(trial_x).value
H_sympy = Hess_func(G=pot.G, **par_vals, **x_dict)
h_close = np.allclose(H_gala, H_sympy)
test_cases.append(h_close)
vals.append((H_gala, H_sympy))
if not all(test_cases):
names = ["energy", "density", "gradient", "hessian"]
for name, (val1, val2), test in zip(names, vals, test_cases):
if not test:
print(trial_x)
print(f"{pot}: {name}\nGala:{val1}\nSympy:{val2}")
assert all(test_cases)
def test_regression_165(self):
if self.potential.ndim == 1:
# Skip!
return
with pytest.raises(ValueError):
self.potential.energy(8.0)
with pytest.raises(ValueError):
self.potential.gradient(8.0)
with pytest.raises(ValueError):
self.potential.circular_velocity(8.0)
class CompositePotentialTestBase(PotentialTestBase):
@pytest.mark.skip(reason="Skip composite potential repr test")
def test_repr(self):
pass
@pytest.mark.skip(reason="Skip composite potential compare test")
def test_compare(self):
pass
@pytest.mark.skip(reason="to_sympy() not implemented yet")
def test_against_sympy(self):
pass
| {
"content_hash": "bc1c1622177fd9b44570e342e7e0a633",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 80,
"avg_line_length": 33.75330396475771,
"alnum_prop": 0.5376533542156094,
"repo_name": "adrn/gala",
"id": "3083fa79c333f5c05547f2feed9b3e5a0f8a92a6",
"size": "15343",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gala/potential/potential/tests/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "176465"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Cython",
"bytes": "154003"
},
{
"name": "Python",
"bytes": "714200"
},
{
"name": "TeX",
"bytes": "3702"
}
],
"symlink_target": ""
} |
import re
from .base import ARRAY, ischema_names
from ... import types as sqltypes
from ...sql import functions as sqlfunc
from ...sql.operators import custom_op
from ... import util
__all__ = ('HSTORE', 'hstore')
# My best guess at the parsing rules of hstore literals, since no formal
# grammar is given. This is mostly reverse engineered from PG's input parser
# behavior.
HSTORE_PAIR_RE = re.compile(r"""
(
"(?P<key> (\\ . | [^"])* )" # Quoted key
)
[ ]* => [ ]* # Pair operator, optional adjoining whitespace
(
(?P<value_null> NULL ) # NULL value
| "(?P<value> (\\ . | [^"])* )" # Quoted value
)
""", re.VERBOSE)
HSTORE_DELIMITER_RE = re.compile(r"""
[ ]* , [ ]*
""", re.VERBOSE)
def _parse_error(hstore_str, pos):
"""format an unmarshalling error."""
ctx = 20
hslen = len(hstore_str)
parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
if len(parsed_tail) > ctx:
parsed_tail = '[...]' + parsed_tail[1:]
if len(residual) > ctx:
residual = residual[:-1] + '[...]'
return "After %r, could not parse residual at position %d: %r" % (
parsed_tail, pos, residual)
def _parse_hstore(hstore_str):
"""Parse an hstore from it's literal string representation.
Attempts to approximate PG's hstore input parsing rules as closely as
possible. Although currently this is not strictly necessary, since the
current implementation of hstore's output syntax is stricter than what it
accepts as input, the documentation makes no guarantees that will always
be the case.
"""
result = {}
pos = 0
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\")
if pair_match.group('value_null'):
value = None
else:
value = pair_match.group('value').replace(r'\"', '"').replace("\\\\", "\\")
result[key] = value
pos += pair_match.end()
delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
if delim_match is not None:
pos += delim_match.end()
pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
if pos != len(hstore_str):
raise ValueError(_parse_error(hstore_str, pos))
return result
def _serialize_hstore(val):
"""Serialize a dictionary into an hstore literal. Keys and values must
both be strings (except None for values).
"""
def esc(s, position):
if position == 'value' and s is None:
return 'NULL'
elif isinstance(s, util.string_types):
return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
else:
raise ValueError("%r in %s position is not a string." %
(s, position))
return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
for k, v in val.items())
class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the Postgresql HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', HSTORE)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
:class:`.HSTORE` provides for a wide range of operations, including:
* Index operations::
data_table.c.data['some key'] == 'some value'
* Containment operations::
data_table.c.data.has_key('some key')
data_table.c.data.has_all(['one', 'two', 'three'])
* Concatenation::
data_table.c.data + {"k1": "v1"}
For a full list of special methods see :class:`.HSTORE.comparator_factory`.
For usage with the SQLAlchemy ORM, it may be desirable to combine
the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
now part of the :mod:`sqlalchemy.ext.mutable`
extension. This extension will allow in-place changes to dictionary
values to be detected by the unit of work::
from sqlalchemy.ext.mutable import MutableDict
class MyClass(Base):
__tablename__ = 'data_table'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(HSTORE))
my_object = session.query(MyClass).one()
# in-place mutation, requires Mutable extension
# in order for the ORM to detect
my_object.data['some_key'] = 'some value'
session.commit()
.. versionadded:: 0.8
.. seealso::
:class:`.hstore` - render the Postgresql ``hstore()`` function.
"""
__visit_name__ = 'HSTORE'
class comparator_factory(sqltypes.Concatenable.Comparator):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.expr.op('?')(other)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in the PG
array.
"""
return self.expr.op('?&')(other)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in the PG
array.
"""
return self.expr.op('?|')(other)
def defined(self, key):
"""Boolean expression. Test for presence of a non-NULL value for
the key. Note that the key may be a SQLA expression.
"""
return _HStoreDefinedFunction(self.expr, key)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys are a superset of the keys of
the argument hstore expression.
"""
return self.expr.op('@>')(other)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument hstore expression.
"""
return self.expr.op('<@')(other)
def __getitem__(self, other):
"""Text expression. Get the value at a given key. Note that the
key may be a SQLA expression.
"""
return self.expr.op('->', precedence=5)(other)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
given key deleted. Note that the key may be a SQLA expression.
"""
if isinstance(key, dict):
key = _serialize_hstore(key)
return _HStoreDeleteFunction(self.expr, key)
def slice(self, array):
"""HStore expression. Returns a subset of an hstore defined by
array of keys.
"""
return _HStoreSliceFunction(self.expr, array)
def keys(self):
"""Text array expression. Returns array of keys."""
return _HStoreKeysFunction(self.expr)
def vals(self):
"""Text array expression. Returns array of values."""
return _HStoreValsFunction(self.expr)
def array(self):
"""Text array expression. Returns array of alternating keys and
values.
"""
return _HStoreArrayFunction(self.expr)
def matrix(self):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
def _adapt_expression(self, op, other_comparator):
if isinstance(op, custom_op):
if op.opstring in ['?', '?&', '?|', '@>', '<@']:
return op, sqltypes.Boolean
elif op.opstring == '->':
return op, sqltypes.Text
return sqltypes.Concatenable.Comparator.\
_adapt_expression(self, op, other_comparator)
def bind_processor(self, dialect):
if util.py2k:
encoding = dialect.encoding
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value).encode(encoding)
else:
return value
else:
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
else:
return value
return process
def result_processor(self, dialect, coltype):
if util.py2k:
encoding = dialect.encoding
def process(value):
if value is not None:
return _parse_hstore(value.decode(encoding))
else:
return value
else:
def process(value):
if value is not None:
return _parse_hstore(value)
else:
return value
return process
ischema_names['hstore'] = HSTORE
class hstore(sqlfunc.GenericFunction):
"""Construct an hstore value within a SQL expression using the
Postgresql ``hstore()`` function.
The :class:`.hstore` function accepts one or two arguments as described
in the Postgresql documentation.
E.g.::
from sqlalchemy.dialects.postgresql import array, hstore
select([hstore('key1', 'value1')])
select([
hstore(
array(['key1', 'key2', 'key3']),
array(['value1', 'value2', 'value3'])
)
])
.. versionadded:: 0.8
.. seealso::
:class:`.HSTORE` - the Postgresql ``HSTORE`` datatype.
"""
type = HSTORE
name = 'hstore'
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
type = sqltypes.Boolean
name = 'defined'
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
type = HSTORE
name = 'delete'
class _HStoreSliceFunction(sqlfunc.GenericFunction):
type = HSTORE
name = 'slice'
class _HStoreKeysFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'akeys'
class _HStoreValsFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'avals'
class _HStoreArrayFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'hstore_to_array'
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'hstore_to_matrix'
| {
"content_hash": "181239d3b2d05ae931af070ab5b29016",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 87,
"avg_line_length": 30.0561797752809,
"alnum_prop": 0.5693457943925234,
"repo_name": "sauloal/PiCastPy",
"id": "c645e25d2b28fe43cbee0c7d89e1d235c4661437",
"size": "10939",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sqlalchemy/dialects/postgresql/hstore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39552"
},
{
"name": "CSS",
"bytes": "327822"
},
{
"name": "JavaScript",
"bytes": "125590"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "5131708"
}
],
"symlink_target": ""
} |
import sys
import re
import copy
ENABLE_LOG = False
IN_PLACE = False
# Not actually needed because indent level is autodetected for each block
NO_NAMESPACE_INDENT = False
ANNOTATE_FILES = False
ANNOTATION_PREFIX = "STYLEFIX: "
ANNOTATION_POSTFIX = ""
def log(message):
if ENABLE_LOG:
print("-!- "+message)
input_filenames = []
for a in sys.argv[1:]:
if a == '-v':
ENABLE_LOG = True
elif a == '-i':
IN_PLACE = True
elif a == '-b':
NO_NAMESPACE_INDENT = True
elif a == '-a':
ANNOTATE_FILES = True
else:
input_filenames.append(a)
def create_zero_levels():
return {
"{}": 0,
"[]": 0,
"()": 0,
"''": 0,
'""': 0,
"<>": 0,
"=;": 0,
"/**/": 0,
"//": 0,
"#": 0,
"):{": 0,
";": 0,
# if(), for() and whatever without a block. Resets at end of statement
# and at beginning of actual block; set by caller.
"implicit_block": 0,
}
class ParenMatch:
def __init__(self):
self._level = create_zero_levels()
self.level_before_line = None
self.level_before_line_end = None
self.level_after_line = None
self.level_lowest_on_line = None
self.level_highest_on_line = None
self.assignment_begin_paren_level = 0
def feed_part(self, line, i):
if self._level["/**/"] > 0:
if line[i:i+2] == '*/' and line[i-1] != '/':
self._level["/**/"] = 0
return i + 2
return i + 1 # Ignore all other comment content
if line[i] == '\n':
if self._level["//"] > 0:
self._level["//"] = 0
if self._level["#"] > 0 and line[i-1] != '\\':
self._level["#"] = 0
return i + 1 # Ignore line end otherwise
if self._level["//"] > 0:
return i + 1 # Ignore all comment content
if self._level["''"] > 0:
if line[i] == "\\":
return i + 2
if line[i] == "'":
self._level["''"] = 0
return i + 1 # Ignore '' content
if self._level['""'] > 0:
if line[i] == "\\":
return i + 2
if line[i] == '"':
self._level['""'] = 0
return i + 1 # Ignore "" content
if line[i] == '/':
if line[i+1] == '*':
self._level["/**/"] = 1
return i + 1
if line[i+1] == '/':
self._level["//"] = 1
return i + 1
if line[i] == '#':
self._level["#"] = 1
return i + 1
if self._level["#"]:
return i + 1 # Ignore macro content
# Statements and implicit blocks
if self._level[";"] == 0:
if line[i] not in " \t\n()[]{};":
#print(repr(line[i])+" begins statement")
self._level[";"] = 1 # Automatically start a new statement
if self._level[";"] > 0:
if line[i] in ";{}":
self._level[";"] = 0
self._level["implicit_block"] = 0
if line[i] == ':' and line[i-1] != ':': # Not very precise
self._level[";"] = 0
self._level["implicit_block"] = 0
if line[i] == '{':
self._level["implicit_block"] = 0
if line[i] == '=':
if line[i+1] not in '=<>!' and line[i-1] not in '=<>!':
self._level["=;"] = 1
self.assignment_begin_paren_level = self._level["()"]
return i + 1
if line[i] == ';':
self._level["<>"] = 0
if self._level["=;"] > 0:
self._level["=;"] = 0
return i + 1
if self._level["):{"] > 0:
if line[i] == '{':
self._level["):{"] = 0
# Allow other processing
if line[i] == ':' and line[i-1] == ')':
self._level["):{"] = 1
return i + 1
if line[i] == '<':
if line[i-1] != '<' and line[i+1] != '<':
self._level["<>"] += 1
if line[i] == '>':
self._level["<>"] -= 1
if line[i] in ["|", "&", "(", ")", "]", "["]:
self._level["<>"] = 0
for k in ["{}", "[]", "()", "''", '""']:
if line[i] == k[0]:
self._level[k] += 1
return i + 1
if line[i] == k[1]:
self._level[k] -= 1
if self._level[k] < 0:
log("WARNING: resetting negative "+k+" level")
self._level[k] = 0
if k == "()" and self.assignment_begin_paren_level > self._level[k]:
self._level["=;"] = 0
return i + 1
return i + 1
def record_levels(self):
for bracetype, level in self._level.items():
if level < self.level_lowest_on_line[bracetype]:
self.level_lowest_on_line[bracetype] = level
if level > self.level_highest_on_line[bracetype]:
self.level_highest_on_line[bracetype] = level
def feed_line(self, line):
self.level_lowest_on_line = copy.copy(self._level)
self.level_highest_on_line = copy.copy(self._level)
self.level_before_line = copy.copy(self._level)
i = 0
while True:
# Record state just before the line ends
if line[i] == '\n':
self.level_before_line_end = copy.copy(self._level)
# Process current position
i = self.feed_part(line, i)
# Record levels
self.record_levels()
# Stop if at end of line
if i == len(line):
break
# Sanity check
if i > len(line):
print("Issue in feed_part()")
self.level_after_line = copy.copy(self._level)
class DetectedBlock:
def __init__(self, line_i, start_level, base_indent_level,
base_levels, block_type=None):
# If a block starts at the end of some line, the indentation of the
# block should always be one tab level, not more like uncrustify makes
# it in case the block belongs to a function parameter
self.start_line = line_i
self.open_level = start_level
self.base_indent_level = base_indent_level
if block_type == "namespace" and NO_NAMESPACE_INDENT:
self.inner_indent_level = base_indent_level
else:
self.inner_indent_level = base_indent_level + 4
self.base_levels = base_levels # Basae ParenMatch levels inside block
self.block_type = block_type # None/"namespace"/something
BLOCK_TYPE_REGEXES = [
("namespace", r'^[\t ]*namespace.*$'),
("struct", r'^[\t ]*struct.*$'),
("class", r'^[\t ]*class.*$'),
("if", r'^[\t ]*if.*$'),
("for", r'^[\t ]*for.*$'),
("while", r'^[\t ]*while.*$'),
("lambda", r'^.*\)\[.*$'),
("enum", r'^[\t ]*enum.*$'),
("=", r'^.*[^;=!<>]=[^;=!<>]*$'), # == is probably in if(.*==.*){
]
STRUCTURE_BLOCK_TYPES = ["namespace", "struct"]
CODE_BLOCK_TYPES = [None, "if", "for", "while", "lambda"]
VALUE_BLOCK_TYPES = ["enum", "="]
class State:
def __init__(self):
self.match = ParenMatch()
self.blocks = [] # Stack
self.paren_level_indentations = {} # Level -> starting indentation level
self.paren_level_start_lines = {} # Level -> starting line
self.paren_level_identifiers = {} # Level -> identifier
self.next_block_type = None
self.comment_orig_base_indent = None
# Output values
self.reset_output()
def reset_output(self):
# Annotations
self.fix_annotation = None
self.annotation_is_inside_macro = False # Need for macro continuation
self.annotation_is_inside_comment = False # Need for avoiding /*/**/*/
# Actual output
self.indent_level = 0
def print_debug_state(self, level_before, level_after):
if self.blocks:
log("base_levels : "+repr(self.blocks[-1].base_levels))
log("level_before: "+repr(level_before))
log("level_after : "+repr(level_after))
def add_fix_annotation(self, description):
log(description)
if self.fix_annotation is not None:
self.fix_annotation += "; "
else:
self.fix_annotation = ""
self.fix_annotation += description
def add_indent(self, d, description):
if d == 0:
return
self.add_fix_annotation("indent"+("+="+str(d) if d >= 0
else "-="+str(-d))+": "+description)
self.indent_level += d
def get_top_block_base_levels(self):
if not self.blocks:
return create_zero_levels()
top_block = self.blocks[-1]
return top_block.base_levels
def feed_line(self, line, line_i):
self.reset_output()
if line.strip() == "":
return
self.match.feed_line(line)
level_before = copy.copy(self.match.level_before_line)
level_at_end = copy.copy(self.match.level_before_line_end)
level_after = copy.copy(self.match.level_after_line)
level_lowest = copy.copy(self.match.level_lowest_on_line)
level_highest = copy.copy(self.match.level_highest_on_line)
#self.add_fix_annotation("Levels before line: "+repr(level_before))
# Measure original indentation level
orig_indent_level = 0
for c in line:
if c == "\t":
orig_indent_level += 4
elif c == " ":
orig_indent_level += 1
else:
break
line_is_comment = False
#if level_at_end["//"] > 0: # Bad; we care if the whole line is a comment
if re.match(r'[\t ]*//.*', line):
#self.add_fix_annotation("Line is C++ comment")
line_is_comment = True
if (re.match(r'[\t ]*/\*.*', line) or re.match(r'.*\*/', line) or
level_after['/**/'] > 0):
#self.add_fix_annotation("Line is C comment")
line_is_comment = True
if level_before['/**/'] > 0:
#self.add_fix_annotation("Line is C comment continuation")
line_is_comment = True
self.annotation_is_inside_comment = True
if not line_is_comment:
self.comment_orig_base_indent = None
line_is_macro = (level_at_end['#'] > 0)
macro_continued = (level_before['#'] > 0)
if line_is_macro:
# Leave macros as-is
self.indent_level = orig_indent_level
if not macro_continued:
self.add_fix_annotation("Line is macro")
else:
self.add_fix_annotation("Line is macro continuation")
self.annotation_is_inside_macro = True
return
# Set indent level based on block level
self.indent_level = 0
if self.blocks:
top_block = self.blocks[-1]
self.indent_level = top_block.inner_indent_level
# So let's process the REAL CODE
if line_is_comment:
# Fluctuate comment's indentation from block-based self.indent_level
# by the amount the indentation originally fluctuates
if self.comment_orig_base_indent is None:
self.comment_orig_base_indent = orig_indent_level
# Add difference to output level
d = orig_indent_level - self.comment_orig_base_indent
self.add_indent(d, "Comment's internal indentation variation")
return
# Detect parenthesis starting levels
if level_highest["()"] > level_before["()"]:
for paren_level in range(level_before["()"], level_highest["()"]):
# NOTE: This isn't accurate because the line can contain
# multiple opening parenthesis with all having a different
# keyword
identifier = None
m = re.match(r'^.*?([^(){}\t ]+)[ \t]*\(.*$', line)
if m is not None:
identifier = m.group(1)
#self.add_fix_annotation("identifier="+repr(identifier))
self.paren_level_indentations[paren_level] = self.indent_level
self.paren_level_start_lines[paren_level] = line_i
self.paren_level_identifiers[paren_level] = identifier
log("Detected paren level "+str(paren_level)+": indentation "+
str(self.indent_level)+", start line "+str(line_i)+
", identifier "+repr(identifier))
# Get current block type
current_block_type = None
if self.blocks:
block = self.blocks[-1]
current_block_type = block.block_type
is_value_block = (current_block_type in ["enum", "="])
#
# Block type
#
# Not ideal but generally works
# Reset block type in these (rather common) cases
if (level_lowest["{}"] < level_before["{}"] or
level_lowest["()"] != level_highest["()"] or
level_lowest["):{"] != level_highest["):{"]):
self.next_block_type = None
for (t, regex) in BLOCK_TYPE_REGEXES:
if re.match(regex, line):
self.next_block_type = t
break
# The '=' block type is inherited when there is no other option
if self.next_block_type is None and current_block_type == '=':
self.next_block_type = current_block_type
#self.add_fix_annotation("Current block type: "+repr(current_block_type))
#self.add_fix_annotation("Next block type: "+repr(self.next_block_type))
#
# Block level
#
# Update current block level
while self.blocks:
block = self.blocks[-1]
if level_lowest["{}"] <= block.open_level:
base_indent_level = block.base_indent_level
self.add_fix_annotation("Block level "+str(block.open_level)+
" end (begun on line "+str(block.start_line)+
", base_indent_level="+str(base_indent_level)+")")
self.blocks = self.blocks[:-1]
# Fix indentation of last line if it begins with '}'
if re.match(r'^[\t ]*}.*$', line):
self.print_debug_state(level_before, level_after)
self.add_indent(-4, "Fixing indent of block end")
else:
break
# Block level detection: Detect block starts
# Really works only if there is only one { on the line
if level_after["{}"] > level_lowest["{}"]:
block_open_level = level_lowest["{}"]
base_indent = self.indent_level
# If this line closes parenthesis, take the indent level from the
# parentheesis starting indentation level
use_paren_based_indentation = False
if level_after["()"] < level_before["()"]:
base_paren_level = level_after["()"]
base_indent = self.paren_level_indentations[
base_paren_level]
use_paren_based_indentation = True
self.add_fix_annotation("This line closes parenthesis; taking "+
"indent level from the line that started the "+
"parenthesis")
if not use_paren_based_indentation and self.blocks:
# Use the indent level of the outside block if possible
parent = self.blocks[-1]
if parent.inner_indent_level is not None:
base_indent = parent.inner_indent_level
self.add_fix_annotation("Basing on inner indentation level "+
str(base_indent)+" of outside block level "+
str(parent.open_level))
block_type = self.next_block_type
self.next_block_type = None
self.add_fix_annotation("Block level "+
str(block_open_level)+" begin; base indent "+
str(base_indent)+", type "+repr(block_type))
self.blocks.append(DetectedBlock(line_i, block_open_level,
base_indent, level_after, block_type))
if not use_paren_based_indentation:
# Fix { to be on the correct indentation level
d = base_indent - self.indent_level
self.add_indent(d, "Fixing { to have correct indentation")
#
# Indentation level fine-tuning
#
# Indent some stuff
block_base_levels = self.get_top_block_base_levels()
# Label
if re.match(r'^[\t ]*[a-zA-Z0-9_]*:$', line):
self.add_indent(-4, "Label")
# case
if re.match(r'^[\t ]*case .*:$', line):
self.add_indent(-4, "case")
# Detect statements that look enough like function calls to be
# considered statements (macro calls without trailing ';')
# (if(), for() and others look like this too)
# It can look like a statement only if the parenthesis are ending to the
# block's base parenthesis level
may_create_implicit_block = False
if (level_highest["()"] > level_after["()"] and
level_after["()"] == block_base_levels["()"] and
level_after["[]"] == block_base_levels["[]"] and
level_highest[";"] > 0 and
level_after[";"] == level_highest[";"]):
try:
identifier = self.paren_level_identifiers[level_after["()"]]
if identifier in ["if", "for", "while"]:
#self.add_fix_annotation("Keyword "+repr(identifier))
if level_lowest["{}"] == level_after["{}"]:
self.add_fix_annotation("May create implicit "+
repr(identifier)+" block")
self.match._level["implicit_block"] = 1
may_create_implicit_block = True
# Cheat the state
self.match._level[";"] = 0
elif identifier and re.match(r'^[a-zA-Z0-9_]*$', identifier):
if re.match(r'^.*\),?$', line):
self.add_fix_annotation("Isn't a full statement but looks "+
"like a function call to "+repr(identifier))
# Cheat the state
self.match._level[";"] = 0
else:
self.add_fix_annotation("Assuming multi-line statement")
else:
self.add_fix_annotation("Identifier "+repr(identifier)+
" doesn't make this look like a function call")
except KeyError:
pass
# Handle else's implicit block
if re.match(r'^[ \t]*else[ \t]*$', line):
self.add_fix_annotation("May create implicit "+
repr("else")+" block")
self.match._level["implicit_block"] = 1
may_create_implicit_block = True
# Cheat the state
self.match._level[";"] = 0
# Keep member initializers at proper indentation level
if (current_block_type in ["struct", "class"] and
level_before["):{"] > 0 and
level_after["()"] <= block_base_levels["()"]):
self.add_fix_annotation("Keeping member initializer at proper "+
"indentation level")
# Cheat the state
self.match._level[";"] = 0
# Implicit block indentation
if (not may_create_implicit_block and # Handle blockless nested loop
level_before["implicit_block"] > 0 and line.strip() != "{"):
self.add_indent(4, "Implicit block")
# Multi-line statements
if (
not is_value_block and
level_before[";"] > 0 and
(
level_lowest["{}"] == level_after["{}"] or
level_before["()"] > block_base_levels["()"]
) and
line.strip() not in ["{}", "{", "}", "};", ")", ");", "]", "];"]
):
self.add_indent(8, "Multi-line statement")
# Class member initializer indentation
if (level_before["):{"] > 0 and
line.strip() not in ["{}", "{", "}", "};"]):
self.add_indent(4, "Adding indentation between ): and {")
# Add two levels to inside multiline <> content because
# uncrustify does not do that.
if level_before["<>"] > block_base_levels["<>"]:
self.add_indent(4, "Adding indentation to regular multiline <>")
def fix_line(line, state):
# Remove all indentation
num_whitespace_chars = 0
for i in range(0, len(line)):
num_whitespace_chars = i
if line[i] not in ' \t':
break
line = line[num_whitespace_chars:]
# Set wanted indentation
tabs = int(state.indent_level / 4)
remaining_spaces = state.indent_level - tabs * 4
for i in range(0, remaining_spaces):
line = " " + line
for i in range(0, tabs):
line = "\t" + line
return line
for input_filename in input_filenames:
f = open(input_filename)
lines = f.readlines()
f.close()
state = State()
fixed_lines = []
for line_i, orig_line in enumerate(lines):
if ANNOTATION_PREFIX in orig_line:
continue
state.feed_line(orig_line, line_i)
fixed_line = fix_line(orig_line, state)
if state.fix_annotation and ANNOTATE_FILES:
if state.annotation_is_inside_comment:
pre = "("
post = ")"
elif state.annotation_is_inside_macro:
pre = "/* "
post = " */ \\"
else:
pre = "// "
post = ""
annotation_line = (pre + ANNOTATION_PREFIX + state.fix_annotation +
ANNOTATION_POSTFIX + post + "\n")
if IN_PLACE:
fixed_lines.append(annotation_line)
else:
sys.stdout.write(annotation_line)
if ENABLE_LOG:
sys.stdout.write("original "+str(line_i)+": "+orig_line)
if fixed_line != orig_line:
sys.stdout.write(" fixed "+str(line_i)+": "+fixed_line)
else:
if not IN_PLACE:
sys.stdout.write(fixed_line)
fixed_lines.append(fixed_line)
if IN_PLACE:
f.close()
f = open(input_filename, "w")
for line in fixed_lines:
f.write(line)
# vim: set noet ts=4 sw=4:
| {
"content_hash": "80cad2e91620e748771b962715145d97",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 75,
"avg_line_length": 31.596610169491527,
"alnum_prop": 0.6147945499409935,
"repo_name": "celeron55/buildat",
"id": "ae5338e00ac628eb6c88d54c887089ff04684f31",
"size": "18642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/cpp_indent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "41918"
},
{
"name": "C++",
"bytes": "570762"
},
{
"name": "Lua",
"bytes": "176171"
},
{
"name": "Python",
"bytes": "18642"
},
{
"name": "Shell",
"bytes": "3552"
}
],
"symlink_target": ""
} |
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api import httpbody_pb2 # type: ignore
from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.longrunning import operations_pb2 # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.discoveryengine_v1beta.types import (
import_config,
user_event,
user_event_service,
)
from .base import DEFAULT_CLIENT_INFO, UserEventServiceTransport
from .grpc import UserEventServiceGrpcTransport
class UserEventServiceGrpcAsyncIOTransport(UserEventServiceTransport):
"""gRPC AsyncIO backend transport for UserEventService.
Service for ingesting end user actions on a website to
Discovery Engine API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "discoveryengine.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "discoveryengine.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: Optional[aio.Channel] = None,
api_mtls_endpoint: Optional[str] = None,
client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def write_user_event(
self,
) -> Callable[
[user_event_service.WriteUserEventRequest], Awaitable[user_event.UserEvent]
]:
r"""Return a callable for the write user event method over gRPC.
Writes a single user event.
Returns:
Callable[[~.WriteUserEventRequest],
Awaitable[~.UserEvent]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "write_user_event" not in self._stubs:
self._stubs["write_user_event"] = self.grpc_channel.unary_unary(
"/google.cloud.discoveryengine.v1beta.UserEventService/WriteUserEvent",
request_serializer=user_event_service.WriteUserEventRequest.serialize,
response_deserializer=user_event.UserEvent.deserialize,
)
return self._stubs["write_user_event"]
@property
def collect_user_event(
self,
) -> Callable[
[user_event_service.CollectUserEventRequest], Awaitable[httpbody_pb2.HttpBody]
]:
r"""Return a callable for the collect user event method over gRPC.
Writes a single user event from the browser. This
uses a GET request to due to browser restriction of
POST-ing to a 3rd party domain.
This method is used only by the Discovery Engine API
JavaScript pixel and Google Tag Manager. Users should
not call this method directly.
Returns:
Callable[[~.CollectUserEventRequest],
Awaitable[~.HttpBody]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "collect_user_event" not in self._stubs:
self._stubs["collect_user_event"] = self.grpc_channel.unary_unary(
"/google.cloud.discoveryengine.v1beta.UserEventService/CollectUserEvent",
request_serializer=user_event_service.CollectUserEventRequest.serialize,
response_deserializer=httpbody_pb2.HttpBody.FromString,
)
return self._stubs["collect_user_event"]
@property
def import_user_events(
self,
) -> Callable[
[import_config.ImportUserEventsRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the import user events method over gRPC.
Bulk import of User events. Request processing might
be synchronous. Events that already exist are skipped.
Use this method for backfilling historical user events.
Operation.response is of type ImportResponse. Note that
it is possible for a subset of the items to be
successfully inserted. Operation.metadata is of type
ImportMetadata.
Returns:
Callable[[~.ImportUserEventsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_user_events" not in self._stubs:
self._stubs["import_user_events"] = self.grpc_channel.unary_unary(
"/google.cloud.discoveryengine.v1beta.UserEventService/ImportUserEvents",
request_serializer=import_config.ImportUserEventsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_user_events"]
def close(self):
return self.grpc_channel.close()
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
__all__ = ("UserEventServiceGrpcAsyncIOTransport",)
| {
"content_hash": "85d555b38e16673f51ddccdafd297f3b",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 90,
"avg_line_length": 44.888888888888886,
"alnum_prop": 0.6231140971239981,
"repo_name": "googleapis/google-cloud-python",
"id": "36e3beec06b0d884cd9dbbb3c675e8aaf254ac99",
"size": "17568",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-cloud-discoveryengine/google/cloud/discoveryengine_v1beta/services/user_event_service/transports/grpc_asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2895"
},
{
"name": "Python",
"bytes": "5620713"
},
{
"name": "Shell",
"bytes": "51704"
}
],
"symlink_target": ""
} |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.storage.models
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
class MgmtStorageTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtStorageTest, self).setUp()
self.storage_client = self.create_mgmt_client(
azure.mgmt.storage.StorageManagementClient
)
def test_storage_usage(self):
usages = list(self.storage_client.usage.list())
self.assertGreater(len(usages), 0)
@ResourceGroupPreparer()
def test_storage_accounts(self, resource_group, location):
account_name = self.get_resource_name('pyarmstorage')
result_check = self.storage_client.storage_accounts.check_name_availability(
account_name
)
self.assertTrue(result_check.name_available)
self.assertFalse(result_check.reason)
self.assertFalse(result_check.message)
params_create = azure.mgmt.storage.models.StorageAccountCreateParameters(
sku=azure.mgmt.storage.models.Sku(azure.mgmt.storage.models.SkuName.standard_lrs),
kind=azure.mgmt.storage.models.Kind.storage,
location=location,
)
result_create = self.storage_client.storage_accounts.create(
resource_group.name,
account_name,
params_create,
)
storage_account = result_create.result()
self.assertEqual(storage_account.name, account_name)
storage_account = self.storage_client.storage_accounts.get_properties(
resource_group.name,
account_name,
)
self.assertEqual(storage_account.name, account_name)
result_list_keys = self.storage_client.storage_accounts.list_keys(
resource_group.name,
account_name,
)
keys = {v.key_name: (v.value, v.permissions) for v in result_list_keys.keys}
self.assertEqual(len(keys), 2)
self.assertGreater(len(keys['key1'][0]), 0)
self.assertGreater(len(keys['key1'][0]), 0)
result_regen_keys = self.storage_client.storage_accounts.regenerate_key(
resource_group.name,
account_name,
"key1"
)
new_keys = {v.key_name: (v.value, v.permissions) for v in result_regen_keys.keys}
self.assertEqual(len(new_keys), 2)
self.assertNotEqual(
new_keys['key1'][0],
keys['key1'][0],
)
self.assertEqual(
new_keys['key2'][0],
keys['key2'][0],
)
result_list = self.storage_client.storage_accounts.list_by_resource_group(
resource_group.name,
)
result_list = list(result_list)
self.assertGreater(len(result_list), 0)
result_list = self.storage_client.storage_accounts.list()
result_list = list(result_list)
self.assertGreater(len(result_list), 0)
storage_account = self.storage_client.storage_accounts.update(
resource_group.name,
account_name,
azure.mgmt.storage.models.StorageAccountUpdateParameters(
sku=azure.mgmt.storage.models.Sku(azure.mgmt.storage.models.SkuName.standard_grs)
)
)
# should there be a test of the update operation?
self.storage_client.storage_accounts.delete(
resource_group.name,
account_name,
)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "79e51e4ce5c2e6c22c87d66af4ca1eb0",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 97,
"avg_line_length": 35.55045871559633,
"alnum_prop": 0.5883870967741935,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "cab229510344a7b2e0abbb0aaeabab6b6928e9ea",
"size": "3877",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "azure-mgmt-storage/tests/test_mgmt_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
run = ['sass', '-C', '-t', 'expanded']
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = cmd.communicate('@import %s' % self.main_module)
assert cmd.wait() == 0, ('Sass command returned bad result (did you '
'install Sass? http://sass-lang.com):\n%s'
% error)
return output
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
fp = open(path, 'r')
source = fp.read()
fp.close()
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
| {
"content_hash": "4de361efb53a34d620f3792268f70ac1",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 82,
"avg_line_length": 36.130434782608695,
"alnum_prop": 0.5465302847974328,
"repo_name": "flashycud/timestack",
"id": "064acea8a457aaaf8604aeb193e9c06e6f7a5228",
"size": "4986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediagenerator/filters/sass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48889"
},
{
"name": "JavaScript",
"bytes": "130064"
},
{
"name": "Python",
"bytes": "4067543"
}
],
"symlink_target": ""
} |
import SoftLayer
import sys
import logging
import time
class SoftlayerVirtualServerBasic(object):
__cached_sl_instance_id = None
def __init__(self, sl_client, instance_config):
self.sl_vs_manager = SoftLayer.VSManager(sl_client)
self.sl_client = sl_client
self.ic = instance_config
self.sl_virtual_guest = sl_client['Virtual_Guest']
def get_vs_id(self, cached_id=True):
if cached_id == True and self.__cached_sl_instance_id is not None:
return self.__cached_sl_instance_id
result = self.single_result(
self.sl_vs_manager.list_instances(
hostname=self.ic.get_host(),
domain=self.ic.get_domain(),
mask="id"))
if result is None:
__cached_sl_instance_id = None
return None
elif not cached_id:
self.__cached_sl_instance_id = None
else:
self.__cached_sl_instance_id = result.get("id")
return result.get("id")
def single_result(self, result_list):
if len(result_list) == 0:
return None
else: return result_list[0]
class SLClientConfig(object):
def __init__(self, params):
self.api_key= params.get("api_key")
self.sl_username = params.get("sl_username")
@staticmethod
def arg_spec():
return dict(
api_key = dict(type = 'str'),
sl_username = dict(type = 'str'),
)
class VSInstanceConfigBasic(object):
def __init__(self, ansible_config=None):
self.from_ansible_config(ansible_config)
def from_ansible_config(self, ansible_config):
self.fqdn = ansible_config.get("fqdn")
def get_host(self):
return self.fqdn.partition(".")[0]
def get_domain(self):
(hostname, dot, domain) = self.fqdn.partition(".")
if domain == "":
return hostname
else:
return domain
@staticmethod
def arg_spec():
return dict(
fqdn = dict(type = 'str', required=True),
) | {
"content_hash": "409d1a70cd1a941a642668b6e6bc41d6",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 29.958333333333332,
"alnum_prop": 0.5516921650440426,
"repo_name": "hubward/ansible-softlayer",
"id": "5b2888996e9a1d3fd5626cfd19fd17b4b68b26ce",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "softlayer_vs_basic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40022"
}
],
"symlink_target": ""
} |
from contextlib import closing
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Optional, Sequence
import MySQLdb
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.vertica.hooks.vertica import VerticaHook
from airflow.www import utils as wwwutils
if TYPE_CHECKING:
from airflow.utils.context import Context
# TODO: Remove renderer check when the provider has an Airflow 2.3+ requirement.
MYSQL_RENDERER = 'mysql' if 'mysql' in wwwutils.get_attr_renderer() else 'sql'
class VerticaToMySqlOperator(BaseOperator):
"""
Moves data from Vertica to MySQL.
:param sql: SQL query to execute against the Vertica database. (templated)
:param vertica_conn_id: source Vertica connection
:param mysql_table: target MySQL table, use dot notation to target a
specific database. (templated)
:param mysql_conn_id: Reference to :ref:`mysql connection id <howto/connection:mysql>`.
:param mysql_preoperator: sql statement to run against MySQL prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data). (templated)
:param mysql_postoperator: sql statement to run against MySQL after the
import, typically used to move data from staging to production
and issue cleanup commands. (templated)
:param bulk_load: flag to use bulk_load option. This loads MySQL directly
from a tab-delimited text file using the LOAD DATA LOCAL INFILE command.
This option requires an extra connection parameter for the
destination MySQL connection: {'local_infile': true}.
"""
template_fields: Sequence[str] = ('sql', 'mysql_table', 'mysql_preoperator', 'mysql_postoperator')
template_ext: Sequence[str] = ('.sql',)
template_fields_renderers = {
"sql": "sql",
"mysql_preoperator": MYSQL_RENDERER,
"mysql_postoperator": MYSQL_RENDERER,
}
ui_color = '#a0e08c'
def __init__(
self,
sql: str,
mysql_table: str,
vertica_conn_id: str = 'vertica_default',
mysql_conn_id: str = 'mysql_default',
mysql_preoperator: Optional[str] = None,
mysql_postoperator: Optional[str] = None,
bulk_load: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.mysql_postoperator = mysql_postoperator
self.vertica_conn_id = vertica_conn_id
self.bulk_load = bulk_load
def execute(self, context: 'Context'):
vertica = VerticaHook(vertica_conn_id=self.vertica_conn_id)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.bulk_load:
self._bulk_load_transfer(mysql, vertica)
else:
self._non_bulk_load_transfer(mysql, vertica)
if self.mysql_postoperator:
self.log.info("Running MySQL postoperator...")
mysql.run(self.mysql_postoperator)
self.log.info("Done")
def _non_bulk_load_transfer(self, mysql, vertica):
with closing(vertica.get_conn()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(self.sql)
selected_columns = [d.name for d in cursor.description]
self.log.info("Selecting rows from Vertica...")
self.log.info(self.sql)
result = cursor.fetchall()
count = len(result)
self.log.info("Selected rows from Vertica %s", count)
self._run_preoperator(mysql)
try:
self.log.info("Inserting rows into MySQL...")
mysql.insert_rows(table=self.mysql_table, rows=result, target_fields=selected_columns)
self.log.info("Inserted rows into MySQL %s", count)
except (MySQLdb.Error, MySQLdb.Warning):
self.log.info("Inserted rows into MySQL 0")
raise
def _bulk_load_transfer(self, mysql, vertica):
count = 0
with closing(vertica.get_conn()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(self.sql)
selected_columns = [d.name for d in cursor.description]
with NamedTemporaryFile("w") as tmpfile:
self.log.info("Selecting rows from Vertica to local file %s...", tmpfile.name)
self.log.info(self.sql)
csv_writer = csv.writer(tmpfile, delimiter='\t', encoding='utf-8')
for row in cursor.iterate():
csv_writer.writerow(row)
count += 1
tmpfile.flush()
self._run_preoperator(mysql)
try:
self.log.info("Bulk inserting rows into MySQL...")
with closing(mysql.get_conn()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(
f"LOAD DATA LOCAL INFILE '{tmpfile.name}' "
f"INTO TABLE {self.mysql_table} "
f"LINES TERMINATED BY '\r\n' ({', '.join(selected_columns)})"
)
conn.commit()
tmpfile.close()
self.log.info("Inserted rows into MySQL %s", count)
except (MySQLdb.Error, MySQLdb.Warning):
self.log.info("Inserted rows into MySQL 0")
raise
def _run_preoperator(self, mysql):
if self.mysql_preoperator:
self.log.info("Running MySQL preoperator...")
mysql.run(self.mysql_preoperator)
| {
"content_hash": "be55394d62b44cadfb3bf79502cf477b",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 102,
"avg_line_length": 40.8125,
"alnum_prop": 0.6120469627360898,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "595b2cb01b3dc5fd699e1bc6d1ca4938f8d2f680",
"size": "6665",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "airflow/providers/mysql/transfers/vertica_to_mysql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HProp1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HProp1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HProp1_ConnectedLHS, self).__init__(name='HProp1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Prop1')
# Set the node attributes
# match class EClass() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__EClass"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class EReference() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__EReference"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class EReference() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__EReference"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class EClass() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__EClass"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association EClass--eStructuralFeatures-->EReference node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# match association EReference--eType-->EClass node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eType"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_S"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# match association EClass--eStructuralFeatures-->EReference node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__directLink_S"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc6')
# match association EReference--eType-->EClass node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eType"
"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["MT_subtypes__"] = []
self.vs[7]["MT_dirty__"] = False
self.vs[7]["mm__"] = """MT_pre__directLink_S"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc7')
# Add the edges
self.add_edges([
(0,4), # match_class EClass() -> association eStructuralFeatures
(4,1), # association eStructuralFeatures -> match_class EReference()
(1,5), # match_class EReference() -> association eType
(5,3), # association eType -> match_class EClass()
(3,6), # match_class EClass() -> association eStructuralFeatures
(6,2), # association eStructuralFeatures -> match_class EReference()
(2,7), # match_class EReference() -> association eType
(7,0) # association eType -> match_class EClass()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eType"
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
def eval_attr18(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eType"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "895dd59c75d71d1d48b2a6cc0c5bb35a",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 125,
"avg_line_length": 52.817629179331306,
"alnum_prop": 0.4738447372964263,
"repo_name": "levilucio/SyVOLT",
"id": "34596c4b630645c7d88c7669d47842a84cd0ed9a",
"size": "17377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ECore_Copier_MM/properties/positive/HProp1_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from django.utils import timezone
from django.utils.text import slugify
from .models import Article
def create_articles(num):
article = Article.objects.all()[0]
pk = article.pk
for i in range(num):
article = Article()
article.title = "Article number " + str(i+pk)
article.slug = slugify(article.title)
article.published = timezone.now()
article.save()
| {
"content_hash": "44d42b99ab5d0c8373cbaf11fd8f788f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 53,
"avg_line_length": 27.133333333333333,
"alnum_prop": 0.6560196560196561,
"repo_name": "groundupnews/gu",
"id": "2ceacb5a800bc9dad5710c88de6cc4bf349b57a8",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newsroom/create_dummy_articles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222991"
},
{
"name": "HTML",
"bytes": "563742"
},
{
"name": "JavaScript",
"bytes": "790912"
},
{
"name": "PHP",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "598998"
},
{
"name": "Roff",
"bytes": "888"
},
{
"name": "Shell",
"bytes": "803"
},
{
"name": "XSLT",
"bytes": "870"
}
],
"symlink_target": ""
} |
"""
Holds user_metrics table model.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb'
__license__ = 'MIT'
import sqlalchemy as sa
from sqlalchemy import orm
from debexpo.model import meta, OrmObject
from debexpo.model.users import User
t_user_metrics = sa.Table('user_metrics', meta.metadata,
sa.Column('id', sa.types.Integer, primary_key=True),
sa.Column('user_id', sa.types.Integer, sa.ForeignKey('users.id')),
sa.Column('name', sa.types.Integer, nullable=False),
sa.Column('value', sa.types.Integer, nullable=False),
)
class UserMetric(OrmObject):
foreign = ['user']
orm.mapper(UserMetric, t_user_metrics, properties={
'user' : orm.relation(User, backref='user_metrics')
})
| {
"content_hash": "1513a8fbfef98e21e72db73d275c4e4d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 27.296296296296298,
"alnum_prop": 0.6919945725915875,
"repo_name": "swvist/Debexpo",
"id": "88c5a333cb643270279fccae3ab62d31d725051e",
"size": "2048",
"binary": false,
"copies": "3",
"ref": "refs/heads/gsoc",
"path": "debexpo/model/user_metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3986"
},
{
"name": "Python",
"bytes": "411992"
},
{
"name": "Shell",
"bytes": "3905"
}
],
"symlink_target": ""
} |
"""
Performance runner for d8.
Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
The suite json format is expected to be:
{
"path": <relative path chunks to perf resources and main file>,
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...],
"test_flags": [<flag to the test file>, ...],
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"resources": [<js file to be moved to android device>, ...]
"main": <main js perf runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"tests": [
{
"name": <name of the trace>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
}, ...
]
}
The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.
A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.
A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>
The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.
A suite without "tests" is considered a performance test itself.
Full example (suite with one runner):
{
"path": ["."],
"flags": ["--expose-gc"],
"test_flags": ["5"],
"archs": ["ia32", "x64"],
"run_count": 5,
"run_count_ia32": 3,
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Full example (suite with several runners):
{
"path": ["."],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
"run_count": 3,
"results_regexp": "^Richards: (.+)$"},
{"name": "NavierStokes",
"path": ["navier_stokes"],
"main": "run.js",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
"""
from collections import OrderedDict
import json
import logging
import math
import optparse
import os
import re
import sys
from testrunner.local import commands
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"android_x64",
"arm",
"ia32",
"mips",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64",
"arm64"]
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
def LoadAndroidBuildTools(path): # pragma: no cover
assert os.path.exists(path)
sys.path.insert(0, path)
from pylib.device import device_utils # pylint: disable=F0401
from pylib.device import device_errors # pylint: disable=F0401
from pylib.perf import cache_control # pylint: disable=F0401
from pylib.perf import perf_control # pylint: disable=F0401
import pylib.android_commands # pylint: disable=F0401
global cache_control
global device_errors
global device_utils
global perf_control
global pylib
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
The mean is calculated using log to avoid overflow.
"""
values = map(float, values)
return str(math.exp(sum(map(math.log, values)) / len(values)))
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
self.traces = traces or []
self.errors = errors or []
def ToDict(self):
return {"traces": self.traces, "errors": self.errors}
def WriteToFile(self, file_name):
with open(file_name, "w") as f:
f.write(json.dumps(self.ToDict()))
def __add__(self, other):
self.traces += other.traces
self.errors += other.errors
return self
def __str__(self): # pragma: no cover
return str(self.ToDict())
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
self._children = []
def AppendChild(self, child):
self._children.append(child)
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self):
super(DefaultSentinel, self).__init__()
self.binary = "d8"
self.run_count = 10
self.timeout = 60
self.path = []
self.graphs = []
self.flags = []
self.test_flags = []
self.resources = []
self.results_regexp = None
self.stddev_regexp = None
self.units = "score"
self.total = False
class Graph(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
super(Graph, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
assert isinstance(suite["name"], basestring)
assert isinstance(suite.get("flags", []), list)
assert isinstance(suite.get("test_flags", []), list)
assert isinstance(suite.get("resources", []), list)
# Accumulated values.
self.path = parent.path[:] + suite.get("path", [])
self.graphs = parent.graphs[:] + [suite["name"]]
self.flags = parent.flags[:] + suite.get("flags", [])
self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
# Values independent of parent node.
self.resources = suite.get("resources", [])
# Descrete values (with parent defaults).
self.binary = suite.get("binary", parent.binary)
self.run_count = suite.get("run_count", parent.run_count)
self.run_count = suite.get("run_count_%s" % arch, self.run_count)
self.timeout = suite.get("timeout", parent.timeout)
self.timeout = suite.get("timeout_%s" % arch, self.timeout)
self.units = suite.get("units", parent.units)
self.total = suite.get("total", parent.total)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
# suite name is expected.
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
regexp_default = parent.results_regexp % re.escape(suite["name"])
else:
regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default)
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite["name"])
else:
stddev_default = None
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
class Trace(Graph):
"""Represents a leaf in the suite tree structure.
Handles collection of measurements.
"""
def __init__(self, suite, parent, arch):
super(Trace, self).__init__(suite, parent, arch)
assert self.results_regexp
self.results = []
self.errors = []
self.stddev = ""
def ConsumeOutput(self, stdout):
try:
result = re.search(self.results_regexp, stdout, re.M).group(1)
self.results.append(str(float(result)))
except ValueError:
self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
% (self.results_regexp, self.graphs[-1]))
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.results_regexp, self.graphs[-1]))
try:
if self.stddev_regexp and self.stddev:
self.errors.append("Test %s should only run once since a stddev "
"is provided by the test." % self.graphs[-1])
if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.stddev_regexp, self.graphs[-1]))
def GetResults(self):
return Results([{
"graphs": self.graphs,
"units": self.units,
"results": self.results,
"stddev": self.stddev,
}], self.errors)
class Runnable(Graph):
"""Represents a runnable suite definition (i.e. has a main file).
"""
@property
def main(self):
return self._suite.get("main", "")
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
The tests are supposed to be relative to the suite configuration.
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
os.chdir(os.path.join(suite_dir, bench_dir))
def GetCommandFlags(self):
suffix = ["--"] + self.test_flags if self.test_flags else []
return self.flags + [self.main] + suffix
def GetCommand(self, shell_dir):
# TODO(machenbach): This requires +.exe if run on windows.
return [os.path.join(shell_dir, self.binary)] + self.GetCommandFlags()
def Run(self, runner):
"""Iterates over several runs and handles the output for all traces."""
for stdout in runner():
for trace in self._children:
trace.ConsumeOutput(stdout)
res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
if not res.traces or not self.total:
return res
# Assume all traces have the same structure.
if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
res.errors.append("Not all traces have the same number of results.")
return res
# Calculate the geometric means for all traces. Above we made sure that
# there is at least one trace and that the number of results is the same
# for each trace.
n_results = len(res.traces[0]["results"])
total_results = [GeometricMean(t["results"][i] for t in res.traces)
for i in range(0, n_results)]
res.traces.append({
"graphs": self.graphs + ["Total"],
"units": res.traces[0]["units"],
"results": total_results,
"stddev": "",
})
return res
class RunnableTrace(Trace, Runnable):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableTrace, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
for stdout in runner():
self.ConsumeOutput(stdout)
return self.GetResults()
class RunnableGeneric(Runnable):
"""Represents a runnable suite definition with generic traces."""
def __init__(self, suite, parent, arch):
super(RunnableGeneric, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
traces = OrderedDict()
for stdout in runner():
for line in stdout.strip().splitlines():
match = GENERIC_RESULTS_RE.match(line)
if match:
stddev = ""
graph = match.group(1)
trace = match.group(2)
body = match.group(3)
units = match.group(4)
match_stddev = RESULT_STDDEV_RE.match(body)
match_list = RESULT_LIST_RE.match(body)
errors = []
if match_stddev:
result, stddev = map(str.strip, match_stddev.group(1).split(","))
results = [result]
elif match_list:
results = map(str.strip, match_list.group(1).split(","))
else:
results = [body.strip()]
try:
results = map(lambda r: str(float(r)), results)
except ValueError:
results = []
errors = ["Found non-numeric in %s" %
"/".join(self.graphs + [graph, trace])]
trace_result = traces.setdefault(trace, Results([{
"graphs": self.graphs + [graph, trace],
"units": (units or self.units).strip(),
"results": [],
"stddev": "",
}], errors))
trace_result.traces[0]["results"].extend(results)
trace_result.traces[0]["stddev"] = stddev
return reduce(lambda r, t: r + t, traces.itervalues(), Results())
def MakeGraph(suite, arch, parent):
"""Factory method for making graph objects."""
if isinstance(parent, Runnable):
# Below a runnable can only be traces.
return Trace(suite, parent, arch)
elif suite.get("main") is not None:
# A main file makes this graph runnable. Empty strings are accepted.
if suite.get("tests"):
# This graph has subgraphs (traces).
return Runnable(suite, parent, arch)
else:
# This graph has no subgraphs, it's a leaf.
return RunnableTrace(suite, parent, arch)
elif suite.get("generic"):
# This is a generic suite definition. It is either a runnable executable
# or has a main js file.
return RunnableGeneric(suite, parent, arch)
elif suite.get("tests"):
# This is neither a leaf nor a runnable.
return Graph(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
def BuildGraphs(suite, arch, parent=None):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
parent = parent or DefaultSentinel()
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get("archs", SUPPORTED_ARCHS):
return None
graph = MakeGraph(suite, arch, parent)
for subsuite in suite.get("tests", []):
BuildGraphs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
def FlattenRunnables(node, node_cb):
"""Generator that traverses the tree structure and iterates over all
runnables.
"""
node_cb(node)
if isinstance(node, Runnable):
yield node
elif isinstance(node, Node):
for child in node._children:
for result in FlattenRunnables(child, node_cb):
yield result
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
class Platform(object):
@staticmethod
def GetPlatform(options):
if options.arch.startswith("android"):
return AndroidPlatform(options)
else:
return DesktopPlatform(options)
class DesktopPlatform(Platform):
def __init__(self, options):
self.shell_dir = options.shell_dir
def PreExecution(self):
pass
def PostExecution(self):
pass
def PreTests(self, node, path):
if isinstance(node, Runnable):
node.ChangeCWD(path)
def Run(self, runnable, count):
try:
output = commands.Execute(runnable.GetCommand(self.shell_dir),
timeout=runnable.timeout)
except OSError as e:
print ">>> OSError (#%d):" % (count + 1)
print e
return ""
print ">>> Stdout (#%d):" % (count + 1)
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
print ">>> Stderr (#%d):" % (count + 1)
print output.stderr
if output.timed_out:
print ">>> Test timed out after %ss." % runnable.timeout
return output.stdout
class AndroidPlatform(Platform): # pragma: no cover
DEVICE_DIR = "/data/local/tmp/v8/"
def __init__(self, options):
self.shell_dir = options.shell_dir
LoadAndroidBuildTools(options.android_build_tools)
if not options.device:
# Detect attached device if not specified.
devices = pylib.android_commands.GetAttachedDevices(
hardware=True, emulator=False, offline=False)
assert devices and len(devices) == 1, (
"None or multiple devices detected. Please specify the device on "
"the command-line with --device")
options.device = devices[0]
adb_wrapper = pylib.android_commands.AndroidCommands(options.device)
self.device = device_utils.DeviceUtils(adb_wrapper)
self.adb = adb_wrapper.Adb()
def PreExecution(self):
perf = perf_control.PerfControl(self.device)
perf.SetHighPerfMode()
# Remember what we have already pushed to the device.
self.pushed = set()
def PostExecution(self):
perf = perf_control.PerfControl(self.device)
perf.SetDefaultPerfMode()
self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
def _SendCommand(self, cmd):
logging.info("adb -s %s %s" % (str(self.device), cmd))
return self.adb.SendCommand(cmd, timeout_time=60)
def _PushFile(self, host_dir, file_name, target_rel=".",
skip_if_missing=False):
file_on_host = os.path.join(host_dir, file_name)
file_on_device_tmp = os.path.join(
AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
file_on_device = os.path.join(
AndroidPlatform.DEVICE_DIR, target_rel, file_name)
folder_on_device = os.path.dirname(file_on_device)
# Only attempt to push files that exist.
if not os.path.exists(file_on_host):
if not skip_if_missing:
logging.critical('Missing file on host: %s' % file_on_host)
return
# Only push files not yet pushed in one execution.
if file_on_host in self.pushed:
return
else:
self.pushed.add(file_on_host)
# Work-around for "text file busy" errors. Push the files to a temporary
# location and then copy them with a shell command.
output = self._SendCommand(
"push %s %s" % (file_on_host, file_on_device_tmp))
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
# Errors look like this: "failed to copy ... ".
if output and not re.search('^[0-9]', output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + output)
self._SendCommand("shell mkdir -p %s" % folder_on_device)
self._SendCommand("shell cp %s %s" % (file_on_device_tmp, file_on_device))
def PreTests(self, node, path):
suite_dir = os.path.abspath(os.path.dirname(path))
if node.path:
bench_rel = os.path.normpath(os.path.join(*node.path))
bench_abs = os.path.join(suite_dir, bench_rel)
else:
bench_rel = "."
bench_abs = suite_dir
self._PushFile(self.shell_dir, node.binary)
# Push external startup data. Backwards compatible for revisions where
# these files didn't exist.
self._PushFile(self.shell_dir, "natives_blob.bin", skip_if_missing=True)
self._PushFile(self.shell_dir, "snapshot_blob.bin", skip_if_missing=True)
if isinstance(node, Runnable):
self._PushFile(bench_abs, node.main, bench_rel)
for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel)
def Run(self, runnable, count):
cache = cache_control.CacheControl(self.device)
cache.DropRamCaches()
binary_on_device = AndroidPlatform.DEVICE_DIR + runnable.binary
cmd = [binary_on_device] + runnable.GetCommandFlags()
# Relative path to benchmark directory.
if runnable.path:
bench_rel = os.path.normpath(os.path.join(*runnable.path))
else:
bench_rel = "."
try:
output = self.device.RunShellCommand(
cmd,
cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
timeout=runnable.timeout,
retries=0,
)
stdout = "\n".join(output)
print ">>> Stdout (#%d):" % (count + 1)
print stdout
except device_errors.CommandTimeoutError:
print ">>> Test timed out after %ss." % runnable.timeout
stdout = ""
return stdout
# TODO: Implement results_processor.
def Main(args):
logging.getLogger().setLevel(logging.INFO)
parser = optparse.OptionParser()
parser.add_option("--android-build-tools",
help="Path to chromium's build/android.")
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="x64")
parser.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
parser.add_option("--device",
help="The device ID to run Android tests on. If not given "
"it will be autodetected.")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
parser.print_help()
return 1
if options.arch in ["auto", "native"]: # pragma: no cover
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
print "Unknown architecture %s" % options.arch
return 1
if (bool(options.arch.startswith("android")) !=
bool(options.android_build_tools)): # pragma: no cover
print ("Android architectures imply setting --android-build-tools and the "
"other way around.")
return 1
if (options.device and not
options.arch.startswith("android")): # pragma: no cover
print "Specifying a device requires an Android architecture to be used."
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if options.buildbot:
options.shell_dir = os.path.join(workspace, options.outdir, "Release")
else:
options.shell_dir = os.path.join(workspace, options.outdir,
"%s.release" % options.arch)
platform = Platform.GetPlatform(options)
results = Results()
for path in args:
path = os.path.abspath(path)
if not os.path.exists(path): # pragma: no cover
results.errors.append("Configuration file %s does not exist." % path)
continue
with open(path) as f:
suite = json.loads(f.read())
# If no name is given, default to the file name without .json.
suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
# Setup things common to one test suite.
platform.PreExecution()
# Build the graph/trace tree structure.
root = BuildGraphs(suite, options.arch)
# Callback to be called on each node on traversal.
def NodeCB(node):
platform.PreTests(node, path)
# Traverse graph/trace tree and interate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
print ">>> Running suite: %s" % "/".join(runnable.graphs)
def Runner():
"""Output generator that reruns several times."""
for i in xrange(0, max(1, runnable.run_count)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
yield platform.Run(runnable, i)
# Let runnable iterate over all runs and handle output.
results += runnable.Run(Runner)
platform.PostExecution()
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
print results
return min(1, len(results.errors))
if __name__ == "__main__": # pragma: no cover
sys.exit(Main(sys.argv[1:]))
| {
"content_hash": "1e6b92d776477ecf9d9d81ac67a36db6",
"timestamp": "",
"source": "github",
"line_count": 720,
"max_line_length": 79,
"avg_line_length": 33.147222222222226,
"alnum_prop": 0.6338305539260873,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "20a6537990ac8330b87c65a27841ad087e220088",
"size": "24053",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "v8/tools/run_perf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
'''
@summary: Bootstrapper file to run rest_server, and flask api's associated with it.
Ensures all submodules are added to sys path and configures all runtime settings.
Note: app instantiation is at module-level within project dir : api/init.py
@author devopsec
'''
import os
from api import app, db ,API
from flask import make_response, jsonify #, g, url_for ,abort, request
from sqlalchemy_utils import database_exists, create_database
# import endpoints #
from api.auth.endpoint import userAuth, userAuthToken
from api.user.endpoint import manageUser, manageUserList
from api.agent.endpoint import piController, manageAgent, manageAgentList
from api.company.endpoint import manageCompany, manageCompanyList
from api.asset.endpoint import manageAssets
from api.metron.endpoint import metronThreats
from api.notification.endpoint import manageNotifications
from api.facial.endpoint import manageFacial, manageFacialRepo, manageFacialSearch
#from api.metron_data.endpoint import asset_discovery, threat_intel
API.add_resource(manageNotifications, '/api/notifications/email', '/api/notifications/sms', '/api/notifications/alert')
API.add_resource(metronThreats, '/api/metron/threats/<string:_device_>')
API.add_resource(manageAssets, '/api/assets/<string:_device_>')
#TODO: finish asset puts and deletes, determine if we want data in both db's or just hbase
## API.add_resource(manageAssets, '/api/assets', '/api/assets/<string:_company_name_>/<string:_sites_>/<string:_asset_ip_>') #'/api/assets/<string:_company_name_>', '/api/assets/<string:_company_name_>/<string:_sites_>'
API.add_resource(piController, '/api/picontroller/time', '/api/picontroller/<string:_mac_address_>')
API.add_resource(manageAgent, '/api/agent/<string:_mac_address_>')
API.add_resource(manageAgentList, '/api/agent/list')
API.add_resource(userAuth, '/api/auth/<string:_username>/<string:_password>')
API.add_resource(userAuthToken, '/api/auth')
API.add_resource(manageUser, '/api/user/<string:_username_>')
API.add_resource(manageUserList, '/api/user/list')
API.add_resource(manageCompany, '/api/company/<string:_company_name_>')
API.add_resource(manageCompanyList, '/api/company', '/api/company/sites', '/api/company/<string:_company_name_>/sites',
'/api/company/poc', '/api/company/<string:_company_name_>/poc')
API.add_resource(manageFacial, '/api/facial')
API.add_resource(manageFacialRepo, '/api/facial/images/<string:_customerID_>/repo/<string:_fileName_>')
API.add_resource(manageFacialSearch, '/api/facial/search/<string:_customerID_>/<string:_userID_>', '/api/facial/search/<string:_customerID_>/<string:_userID_>/<string:_fileName_>')
#api.add_resource(manageNotifications, '/api/notification', '/api/notification/<string:_username_>')
#api.add_resource(threat_intel, '/api/metron_data/threat_intel', '/api/metron_data/threat_intel/<string:_company_name_>', '/api/metron_data/threat_intel/<string:_company_name_>/<string:_sites_>')
if __name__ == '__main__':
if not database_exists('mysql://tmp:tmp@127.0.0.1/tmp'):
create_database('mysql://tmp:tmp@127.0.0.1/tmp')
db.create_all()
app.run(host='0.0.0.0', port=7777, debug=True)
| {
"content_hash": "fce4d70d59d350c5ba535461a7f3d58e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 219,
"avg_line_length": 57.18181818181818,
"alnum_prop": 0.7516693163751987,
"repo_name": "flyballlabs/threatdetectionservice",
"id": "31d0c83416380310dd2619fe627bfbe5dd3adeb5",
"size": "3167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/rest_server_deprecated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5616"
},
{
"name": "HTML",
"bytes": "33418"
},
{
"name": "JavaScript",
"bytes": "16629"
},
{
"name": "Python",
"bytes": "1371503"
},
{
"name": "Shell",
"bytes": "35872"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyscreenshot'
copyright = u'2015, ponty'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0'
# The full version, including alpha/beta/rc tags.
__version__ = None
exec(open(os.path.join('..', project, 'about.py')).read())
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyscreenshotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyscreenshot.tex', u'pyscreenshot Documentation',
u'ponty', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyscreenshot', u'pyscreenshot Documentation',
[u'ponty'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyscreenshot', u'pyscreenshot Documentation',
u'ponty', 'pyscreenshot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "286afefbd0717adf9db7eaa170ce5b1d",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 31.625,
"alnum_prop": 0.704792490118577,
"repo_name": "daodaoliang/pyscreenshot",
"id": "b4319858e6344a71ed14d56b7838d11fc6c7e1eb",
"size": "8521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "30137"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import uuid
import datetime
from passlib.apps import custom_app_context as pwd_context
print('Loading function')
def lambda_handler(event, context):
if (event["user"]):
user = event["user"]
if (user["username"] and user["password"] and user["name"]):
username = user["username"]
secret = str(uuid.uuid4())
password = user["password"]
hashedPassword = pwd_context.encrypt(password)
signUpDate = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
name = user["name"]
item = {'username': username, 'secret': secret, 'password': hashedPassword, 'sign_up_date': signUpDate, 'name': name}
messageTable = boto3.resource('dynamodb').Table('auth_user')
try:
print("Attempting to insert user with username : " + username)
messageTable.put_item(Item=item, ConditionExpression=boto3.dynamodb.conditions.Attr("username").not_exists())
except ClientError as e:
if e.response['Error']['Code'] == "ConditionalCheckFailedException":
errorMessage = e.response['Error']['Message'] + " - that user already exists"
print(errorMessage)
return errorMessage
else:
raise
| {
"content_hash": "b196d4a39d7ab238e6b0d5a24631ca15",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 129,
"avg_line_length": 43.42424242424242,
"alnum_prop": 0.5938590369853455,
"repo_name": "austindlawless/cudas",
"id": "96d59a3e447d2ec662616fd5853ebabafa1a40d3",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambda/signup/lambda_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4137"
},
{
"name": "HTML",
"bytes": "13044"
},
{
"name": "JavaScript",
"bytes": "12026"
},
{
"name": "Python",
"bytes": "2651169"
}
],
"symlink_target": ""
} |
from getpass import getpass
from . import gpt
from . import mbr
from ...utils import print_error, print_warning, query_yes_no, system
from ...config import usr_cfg, get_logger
logger = get_logger(__name__)
def partition():
print_warning("WARNING! This will encrypt {0}".format(usr_cfg['drive']))
if not query_yes_no("> Continue?", 'no'):
return
pass_set = False
while not pass_set:
passwd = getpass(f'> Please enter a new password for {usr_cfg["drive"]}: ')
passwd_chk = getpass("> Confirm password: ")
if passwd == passwd_chk:
pass_set = True
else:
print_error("Password do not Match.")
del passwd_chk
if usr_cfg['gpt']:
if usr_cfg['uefi']:
gpt.uefi()
else:
gpt.non_uefi()
else:
mbr.format()
system("wipefs -afq /dev/{0}".format(usr_cfg['root']))
system("wipefs -afq /dev/{0}".format(usr_cfg['boot']))
system("lvm pvcreate /dev/{0}".format(usr_cfg['root']))
system("lvm vgcreate lvm /dev/{0}".format(usr_cfg['root']))
if usr_cfg['swap_space']:
system("lvm lvcreate -L {0} -n swap lvm ".format(usr_cfg['swap_space']))
system("lvm lvcreate -L 500M -n tmp lvm")
system("echo -e 'y' | lvm lvcreate -l 100%FREE -n lvroot lvm")
system(f'printf {passwd} | cryptsetup luksFormat -c aes-xts-plain64 -s 512 /dev/lvm/lvroot -')
system(f'printf {passwd} | cryptsetup open --type luks /dev/lvm/lvroot root -')
del passwd
system("wipefs -afq /dev/mapper/root")
if usr_cfg['filesystem'] == 'jfs' or usr_cfg['filesystem'] == 'reiserfs':
system(f'echo -e "y" | mkfs.{usr_cfg["filesystem"]} /dev/mapper/root')
else:
system('mkfs.{0} /dev/mapper/root'.format(usr_cfg['filesystem']))
if usr_cfg['uefi']:
system("mkfs.vfat -F32 /dev/{0}".format(usr_cfg['boot']))
else:
system("wipefs -afq /dev/{0}".format(usr_cfg['boot']))
system("mkfs.ext4 /dev/{0}".format(usr_cfg['boot']))
system("mount /dev/mapper/root /mnt")
system("mkdir -p /mnt/boot")
system("mount /dev/{0} /mnt/boot".format(usr_cfg['boot']))
| {
"content_hash": "3e9f46ed4109bba0d9e29abea849f27e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 98,
"avg_line_length": 34.22222222222222,
"alnum_prop": 0.5936920222634509,
"repo_name": "ArchStrike/archstrike-installer",
"id": "0f3ad5c9e1400283365284fbc7962b67d513412a",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asinstaller/partitions/encrypted/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62688"
}
],
"symlink_target": ""
} |
import unittest
from shapely import prepared
from shapely import geometry
class PreparedGeometryTestCase(unittest.TestCase):
def test_prepared(self):
polygon = geometry.Polygon([
(0, 0), (1, 0), (1, 1), (0, 1)
])
p = prepared.PreparedGeometry(polygon)
self.assertTrue(p.contains(geometry.Point(0.5, 0.5)))
self.assertFalse(p.contains(geometry.Point(0.5, 1.5)))
def test_op_not_allowed(self):
p = prepared.PreparedGeometry(geometry.Point(0.0, 0.0).buffer(1.0))
self.assertRaises(ValueError, geometry.Point(0.0, 0.0).union, p)
def test_predicate_not_allowed(self):
p = prepared.PreparedGeometry(geometry.Point(0.0, 0.0).buffer(1.0))
self.assertRaises(ValueError, geometry.Point(0.0, 0.0).contains, p)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(
PreparedGeometryTestCase
)
| {
"content_hash": "397b5cf75eca1a0019812d9a7dbb949d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 34.964285714285715,
"alnum_prop": 0.6189989785495403,
"repo_name": "aaronr/shapely",
"id": "0ec947d19e4a875f1f1e9cc122faad23e7a20d7f",
"size": "979",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "shapely/tests/test_prepared.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
ODE and DAE routines
====================
dae -- Solve DAE systems
ode -- Solve ODE systems, an extension to the scipy.integrate API
Interface to numerical integrators of DAE/ODE systems.
* Via the dae class:
ida -- The sundials IDA general dae solver. BDF method or Adams
method, ideal for stiff problems
ddaspk -- General dae solver in Fortran present for hystorical and
comparison reasons (use ida instead)
lsodi -- General dae solver in Fortran present for hystorical and
comparison reasons (use ida instead)
* Via the ode class:
cvode -- The sundials CVODE general ode solver. BDF method or Adams
method, ideal for stiff problems
dopri5 -- Runge Kutta solver, ideal for non-stiff problems
Note: as part of scipy.integrate:
=================================
odeint -- General integration of ordinary differential equations.
ode -- Integrate ode
"""
postpone_import = 1
| {
"content_hash": "b4292ce218d15a083199e9ded27722c1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 33.40625,
"alnum_prop": 0.598690364826941,
"repo_name": "logicabrity/odes",
"id": "dfc2a886abafc6479bfaabb49901d1da83f3f5c3",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scikits/odes/info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6697"
},
{
"name": "C",
"bytes": "1033052"
},
{
"name": "C++",
"bytes": "46252"
},
{
"name": "FORTRAN",
"bytes": "415025"
},
{
"name": "Makefile",
"bytes": "6754"
},
{
"name": "Python",
"bytes": "366845"
}
],
"symlink_target": ""
} |
'''
Code for hooking import address tables by making them invalid
pointers and catching the exceptions...
'''
import PE
import vtrace
import vtrace.watchpoints as vt_watchpoints
class IatHook(vt_watchpoints.Watchpoint):
'''
Abuse the PageWatch subsystem to allow function pointers to be
frob'd to create breakpoint like behavior.
'''
newptr = 0xfbfbf000
def __init__(self, ptraddr, iatname):
fakeptr = IatHook.newptr
IatHook.newptr += 4096 # FIXME race... sigh...
vt_watchpoints.Watchpoint.__init__(self, fakeptr)
self.ptraddr = ptraddr
self.fakeptr = fakeptr
self.iatname = iatname
self.origptr = None
def getName(self):
#bname = Breakpoint.getName(self)
return self.iatname
def resolveAddr(self, trace, addr):
pass
def activate(self, trace):
if self.origptr is None:
self.origptr = trace.readMemoryFormat(self.ptraddr, '<P')[0]
trace.writeMemoryFormat(self.ptraddr, '<P', self.fakeptr)
def deactivate(self, trace):
if self.origptr is not None:
trace.writeMemoryFormat(self.ptraddr, '<P', self.origptr)
def notify(self, event, trace):
# We have to fake out the program counter...
trace.setProgramCounter(self.origptr)
trace.setCurrentSignal(None)
return vt_watchpoints.Watchpoint.notify(self, event, trace)
def hookIat(trace, libname, implib='*', impfunc='*', fast=False):
'''
Hook the IAT with special "breakpoint" like objects which
handle the memory access errors and document the calls...
Set fast=True for them to be "Fastbreak" breakpoints.
This returns a list of (name, bpid) tuples...
Example:
for impname, bpid in hookIat(t, 'ws2_32')
t.setBreakpointCode(bpid, codestr)
...
'''
ret = []
baseaddr = trace.parseExpression(libname)
pe = PE.peFromMemoryObject(trace, baseaddr)
origs = {}
implib = implib.lower()
impfunc = impfunc.lower()
for rva, ilib, ifunc in pe.getImports():
ilib = ilib.lower().replace('.dll', '')
if ilib != implib and implib != '*':
continue
if ifunc.lower() != impfunc and impfunc!='*':
continue
iatname = '%s.%s.%s' % (libname, ilib, ifunc)
wp = IatHook(baseaddr + rva, iatname)
wp.fastbreak = fast
bpid = trace.addBreakpoint(wp)
ret.append( (iatname, bpid) )
return ret
| {
"content_hash": "17d9a1fe311ae5ebc6b7a9435e14e493",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 72,
"avg_line_length": 29.127906976744185,
"alnum_prop": 0.6227544910179641,
"repo_name": "cmaruti/vivisect",
"id": "a735a4a210eb6f7d192ec50e74d5e31179149e87",
"size": "2505",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "vtrace/tools/iathook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "17699753"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
} |
from corehq.apps.users.models import CouchUser
from django.contrib.auth.forms import AuthenticationForm
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
class EmailAuthenticationForm(AuthenticationForm):
username = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_username(self):
username = self.cleaned_data['username'].lower()
return username
class CloudCareAuthenticationForm(EmailAuthenticationForm):
username = forms.EmailField(label=_("Username"), max_length=75)
| {
"content_hash": "8ea4717b203a7230c0b0e5a4aacbaaed",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 37.25,
"alnum_prop": 0.7785234899328859,
"repo_name": "gmimano/commcaretest",
"id": "77abf9c401262af0db1479f9046915e4dcad1377",
"size": "596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/hqwebapp/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
} |
from runner.koan import *
class Proxy:
def __init__(self, target_object):
# WRITE CODE HERE
self._messages = []
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
# WRITE CODE HERE
def __getattr__(self, attr_name):
self._messages.append(attr_name)
return self._obj.__getattribute__(attr_name)
def __setattr__(self, attr_name, value):
self_attrs = ['_messages', '_obj', '_messages', 'was_called']
if attr_name in self_attrs:
object.__setattr__(self, attr_name, value)
else:
self._messages.append(attr_name)
self._obj.__setattr__(attr_name, value)
def messages(self):
return self._messages
def was_called(self, attr_name):
return attr_name in self._messages
def number_of_times_called(self, attr_name):
return len([x for x in self._messages if x == attr_name])
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
with self.assertRaises(AttributeError):
tv.no_such_method()
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television:
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| {
"content_hash": "b8a356cd442379e5c9e9ca145806ab68",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 70,
"avg_line_length": 26.145569620253166,
"alnum_prop": 0.5792786250302591,
"repo_name": "erikld/Bobo",
"id": "c9732b3e1e44070a370d1d37ac347735ebfd8aad",
"size": "4833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_proxy_object_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "333000"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
def backpopulate_lines(apps, schema_editor):
"""Create a BootcampApplicationLine for each BootcampApplication"""
BootcampApplication = apps.get_model("applications", "BootcampApplication")
BootcampApplicationLine = apps.get_model("applications", "BootcampApplicationLine")
applications = BootcampApplication.objects.iterator()
for application in applications:
BootcampApplicationLine.objects.get_or_create(application=application)
class Migration(migrations.Migration):
dependencies = [
("applications", "0014_application_refund_status"),
]
operations = [
migrations.CreateModel(
name="BootcampApplicationLine",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
(
"application",
models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="line",
to="applications.bootcampapplication",
unique=True,
),
),
],
options={
"abstract": False,
},
),
migrations.RunPython(backpopulate_lines, migrations.RunPython.noop),
]
| {
"content_hash": "87665dcb14d65fba894dd73d99bfcdce",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 87,
"avg_line_length": 34.96078431372549,
"alnum_prop": 0.5255187885586091,
"repo_name": "mitodl/bootcamp-ecommerce",
"id": "209398b3e14d9c526f4bec69088ccc9cb12cbe8e",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/migrations/0015_bootcampapplicationline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "325"
},
{
"name": "Dockerfile",
"bytes": "998"
},
{
"name": "HTML",
"bytes": "70605"
},
{
"name": "JavaScript",
"bytes": "491664"
},
{
"name": "Procfile",
"bytes": "293"
},
{
"name": "Python",
"bytes": "1236492"
},
{
"name": "SCSS",
"bytes": "72463"
},
{
"name": "Shell",
"bytes": "7329"
}
],
"symlink_target": ""
} |
"""YouTube Playlist Reddit Bot
Usage:
$ python youtubeplaylistbot.py
You can also get help on all the command-line flags the program understands
by running:
$ python youtubeplaylistbot.py --help
"""
__author__ = 'Jon Minter (jdiminter@gmail.com)'
__version__ = '1.0a'
import time
import datetime
import sqlite3
import praw
import logging
import settings
import urllib
import socket
from pprint import pprint
import re
import httplib
import argparse
import httplib2
import os
import sys
import requests.exceptions
import apiclient.errors
from apiclient import discovery
from oauth2client import file
from oauth2client import client
from oauth2client import tools
SCRIPT_NAME = os.path.basename(__file__)
SQLITE_FILENAME = 'youtubeplaylistbot.db'
SQLITE_CREATE_SCHEMA_TEST = "SELECT count(*) FROM sqlite_master WHERE type='table' AND name='reddit_submissions_processed'"
REDDIT_USER_AGENT = 'YoutubePlaylistBot by /u/jonmdev v 1.0a https://github.com/jonminter/youtubeplaylistbot'
REDDIT_PLAY_CATCHUP = False
REDDIT_SUBMISSION_CATCHUP_LIMIT = 1000
REDDIT_SUBMISSION_LIMIT = 100
REDDIT_SUBMISSION_GET_TRY_LIMIT = 10
REDDIT_SLEEP_INTERVAL = 120
REDDIT_SLEEP_MAX_INTERVAL = 3600
GOOGLE_USER_AGENT = 'Reddit YoutubePlaylistBot'
GOOGLE_OAUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_OAUTH_REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
GOOGLE_OAUTH_SCOPE = 'https://www.googleapis.com/auth/youtube'
GOOGLE_OAUTH_TOKEN_VARIABLE = 'gdata_oauth2_token'
YOUTUBE_VIDEO_ID_REGEX_LIST = [
re.compile(r'youtube(?:-nocookie)?\.com/watch[#\?].*?v=(?P<video_id>[^"\& ]+)'),
re.compile(r'youtube(?:-nocookie)?\.com/embed/(?P<video_id>[^"\&\? ]+)'),
re.compile(r'youtube(?:-nocookie)?\.com/v/(?P<video_id>[^"\&\? ]+)'),
re.compile(r'youtube(?:-nocookie)?\.com/\?v=(?P<video_id>[^"\& ]+)'),
re.compile(r'youtu\.be/(?P<video_id>[^"\&\? ]+)'),
re.compile(r'gdata\.youtube\.com/feeds/api/videos/(?P<video_id>[^"\&\? ]+)')
]
# Opens the connection to the SQLite3 database and tests to see if the schema needs to be
# created or updated and performs queries to update the schema if neccessary
def get_db_connection():
db_connection = sqlite3.connect(SQLITE_FILENAME)
#test to see if we need to run schema.sql
result = db_connection.execute(SQLITE_CREATE_SCHEMA_TEST)
if result.fetchone() is None:
sql_schema = open('schema.sql').read()
db_cursor = db_connection.cursor()
db_cursor.executescript(sql_schema)
return db_connection
# Adds the specified video to the playlist using the YouTube Data API object
def add_video_to_playlist(yt_service, playlist_id, video_id):
try:
add_video_request=yt_service.playlistItems().insert(
part="snippet",
body={
'snippet': {
'playlistId': playlist_id,
'resourceId': {
'kind': 'youtube#video',
'videoId': video_id
}
#'position': 0
}
}
).execute()
return add_video_request
except (IOError,httplib.HTTPException,apiclient.errors.HttpError) as e:
logging.warning("Http error occurred when trying to add video '" + video_id + "' to playlist '" + playlist_id + "'. Message: " + str(e))
return False
# Parses a YouTube URL and retrieves the Video ID from that URL.
# Uses a set of regular expressions to parse many different forms of YouTube video URLs.
def get_youtube_video_id_from_url(url):
for regex in YOUTUBE_VIDEO_ID_REGEX_LIST:
match = regex.search(url)
if match:
return match.group('video_id')
# Method to run the logic for the bot. Connects to the Reddit API and periodically polls the API to get the latest submissions to the subreddits
# that the bot is watching. It loops through those submissions and for the ones that have not been processed yet if they are links to youtube
# it adds the videos to a playlist.
def run_bot(yt_service):
db_connection = get_db_connection()
db_cursor = db_connection.cursor()
r = praw.Reddit(REDDIT_USER_AGENT)
r.login(settings.reddit['username'], settings.reddit['password'])
first_pass = True
play_catchup = REDDIT_PLAY_CATCHUP
current_sleep_interval = REDDIT_SLEEP_INTERVAL
while True:
pass_start_time = time.time()
multireddit = '+'.join(settings.reddit['subreddits'])
subreddit = r.get_subreddit(multireddit)
current_pull_limit = REDDIT_SUBMISSION_CATCHUP_LIMIT if first_pass and play_catchup else REDDIT_SUBMISSION_LIMIT
first_pass = False
try:
for submission in subreddit.get_new(limit=current_pull_limit):
# make sure the sleep interval is reset since we have a successful request
current_sleep_interval = REDDIT_SLEEP_INTERVAL
logging.debug('Submission -> ID: ' + submission.id + ', URL: ' + submission.url)
sql_result = db_cursor.execute('SELECT COUNT(submission_id) FROM reddit_submissions_processed WHERE submission_id = ?', [submission.id])
submission_processed = db_cursor.fetchone()
if submission_processed[0] == 0:
logging.debug('Submission not processed yet')
is_youtube_link = False
youtube_video_id = get_youtube_video_id_from_url(submission.url)
add_video_success = False
if youtube_video_id:
is_youtube_link = True
logging.debug('YouTube Video ID: ' + youtube_video_id)
add_video_result = add_video_to_playlist(yt_service, settings.google['youtube']['playlist_id'], youtube_video_id)
logging.debug('Add video result = ' + str(add_video_result));
if add_video_result != False:
add_video_success = True
else:
logging.debug('Not a YouTube link')
if is_youtube_link == False or add_video_success == True:
db_cursor.execute("INSERT INTO reddit_submissions_processed (submission_id, url) values (?,?)", (submission.id, submission.url))
db_connection.commit()
else:
logging.debug('Submission already processed')
except (socket.error, requests.exceptions.HTTPError) as e:
logging.error('HTTP error occurred trying to load reddit submissions: ' + str(e))
# double the wait time every time we get an HTTP error until we hit the max wait interval
# to prevent from continuing to hit the server frequently if it's down or busy
# sleep interval will reset with a successful query
if current_sleep_interval < REDDIT_SLEEP_MAX_INTERVAL:
current_sleep_interval += current_sleep_interval
logging.debug('Waiting ' + str(datetime.timedelta(seconds=current_sleep_interval)) + ' to try next request')
pass_total_time = time.time() - pass_start_time
logging.debug('Pass through last ' + str(current_pull_limit) + ' submissions took ' + str(datetime.timedelta(seconds=pass_total_time)))
time.sleep(current_sleep_interval)
# Setup logging
logging.basicConfig(filename=SCRIPT_NAME + '.log',level=settings.logging['level'])
logger = logging.getLogger()
logger.disabled = settings.logging['disabled']
# Parser for command-line arguments.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
# CLIENT_SECRETS is name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret. You can see the Client ID
# and Client secret on the APIs page in the Cloud Console:
# <https://cloud.google.com/console#/project/233647656699/apiui>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Set up a Flow object to be used for authentication.
# Add one or more of the following scopes. PLEASE ONLY ADD THE SCOPES YOU
# NEED. For more information on using scopes please see
# <https://developers.google.com/+/best-practices>.
FLOW = client.flow_from_clientsecrets(CLIENT_SECRETS,
scope=[
'https://www.googleapis.com/auth/youtube',
'https://www.googleapis.com/auth/youtube.readonly',
'https://www.googleapis.com/auth/youtube.upload',
'https://www.googleapis.com/auth/youtubepartner',
'https://www.googleapis.com/auth/youtubepartner-channel-audit',
],
message=tools.message_if_missing(CLIENT_SECRETS))
def main(argv):
# Parse the command-line flags.
flags = parser.parse_args(argv[1:])
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to the file.
storage = file.Storage('youtubeplaylistbot_credentials.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(FLOW, storage, flags)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Construct the service object for the interacting with the YouTube Data API.
service = discovery.build('youtube', 'v3', http=http)
try:
run_bot(service)
except client.AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
# For more information on the YouTube Data API you can visit:
#
# https://developers.google.com/youtube/v3
#
# For more information on the YouTube Data API Python library surface you
# can visit:
#
# https://developers.google.com/resources/api-libraries/documentation/youtube/v3/python/latest/
#
# For information on the Python Client Library visit:
#
# https://developers.google.com/api-client-library/python/start/get_started
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "20e1bf376dce173ea72df575083414e8",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 144,
"avg_line_length": 39.38912133891213,
"alnum_prop": 0.7262587635436584,
"repo_name": "jonminter/youtubeplaylistbot",
"id": "3e98ad7f9ce8021d369614a3a69522999512a4f3",
"size": "9547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youtubeplaylistbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9547"
}
],
"symlink_target": ""
} |
from wtforms import TextField, validators
from lib.forms import Form
class RegisterForm(Form):
username = TextField('Username', [
validators.Required(message = "必须填写用户名"),
validators.Length(min = 3, message = "用户名长度过短(3-12个字符)"),
validators.Length(max = 12, message = "用户名长度过长(3-12个字符)"),
validators.Regexp("^[a-zA-Z][a-zA-Z0-9_]*$", message = "用户名格式错误(英文字母开头,数字,下划线构成)"),
])
email = TextField('Email', [
validators.Required(message = "必须填写Email"),
validators.Length(min = 4, message = "Email长度有误"),
validators.Email(message = "Email地址无效"),
])
password = TextField('Password', [
validators.Required(message = "必须填写密码"),
validators.Length(min = 6, message = "密码长度过短(6-64个字符)"),
validators.Length(max = 64, message = "密码长度过长(6-64个字符)"),
validators.EqualTo('password_confirm', message='两次输入密码不一致'),
])
password_confirm = TextField('Password_confirm')
class LoginForm(Form):
email = TextField('Email', [
validators.Required(message = "必须填写Email"),
validators.Length(min = 4, message = "Email长度有误"),
validators.Email(message = "Email地址无效"),
])
password = TextField('Password', [
validators.Required(message = "必须填写密码"),
validators.Length(min = 6, message = "密码长度过短(6-64个字符)"),
validators.Length(max = 64, message = "密码长度过长(6-64个字符)"),
])
class ForgotPasswordForm(Form):
username = TextField('Username', [
validators.Required(message = "必须填写用户名"),
validators.Length(min = 3, message = "用户名长度过短(3-12个字符)"),
validators.Length(max = 12, message = "用户名长度过长(3-12个字符)"),
validators.Regexp("^[a-zA-Z][a-zA-Z0-9_]*$", message = "用户名格式错误(英文字母开头,数字,下划线构成)"),
])
email = TextField('Email', [
validators.Required(message = "必须填写Email"),
validators.Length(min = 4, message = "Email长度有误"),
validators.Email(message = "Email地址无效"),
])
class SettingPasswordForm(Form):
password_old = TextField('Password_old', [
validators.Required(message = "必须填写当前密码"),
validators.Length(min = 6, message = "密码长度过短(6-64个字符)"),
validators.Length(max = 64, message = "密码长度过长(6-64个字符)"),
])
password = TextField('Password', [
validators.Required(message = "必须填写新密码"),
validators.Length(min = 6, message = "密码长度过短(6-64个字符)"),
validators.Length(max = 64, message = "密码长度过长(6-64个字符)"),
validators.EqualTo('password_confirm', message='两次输入密码不一致'),
])
password_confirm = TextField('Password_confirm')
class SettingForm(Form):
username = TextField('Username') # readonly
email = TextField('Email') # readonly
nickname = TextField('Nickname', [
validators.Optional(),
validators.Length(min = 2, message = "昵称长度过短(2-12个字符)"),
validators.Length(max = 12, message = "昵称长度过长(3-12个字符)"),
])
signature = TextField('Signature', [
validators.Optional(),
])
location = TextField('Location', [
validators.Optional(),
])
website = TextField('Website', [
validators.Optional(),
validators.URL(message = "请填写合法的URL地址(如:http://flower.com)")
])
company = TextField('Company', [
validators.Optional(),
])
github = TextField('Github', [
validators.Optional(),
])
twitter = TextField('Twitter', [
validators.Optional(),
])
douban = TextField('Douban', [
validators.Optional(),
])
self_intro = TextField('Self_intro', [
validators.Optional(),
])
| {
"content_hash": "20ddae435d04057db3ef005cd7273b05",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 91,
"avg_line_length": 35.254901960784316,
"alnum_prop": 0.6112347052280311,
"repo_name": "Angel-fund/skycity",
"id": "738817b0755e4d8199bed6e0d725a9c60e238553",
"size": "4366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "463627"
},
{
"name": "JavaScript",
"bytes": "2269019"
},
{
"name": "Python",
"bytes": "133768"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0004_auto_20160329_0405'),
]
operations = [
migrations.RemoveField(
model_name='likes',
name='image',
),
migrations.AddField(
model_name='performer',
name='image',
field=models.URLField(default='/static/images/noimage.png'),
),
]
| {
"content_hash": "4145a0d21ec80cdb221c762e94315df5",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 22.727272727272727,
"alnum_prop": 0.566,
"repo_name": "EricZaporzan/evention",
"id": "bd9c735a440a74b37a6746e385ae79bceced0884",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evention/events/migrations/0005_auto_20160329_0413.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2356"
},
{
"name": "HTML",
"bytes": "30156"
},
{
"name": "JavaScript",
"bytes": "16883"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "1595275"
},
{
"name": "Shell",
"bytes": "7661"
}
],
"symlink_target": ""
} |
import logging
import os, os.path
from collections import namedtuple
from random import shuffle
logger = logging.getLogger(__name__)
LookItem = namedtuple("LookItem", ["username", "top", "middle", "bottom"])
def iterate_subfolders(folder):
for item in os.listdir(folder):
full_path = os.path.join(folder, item)
if os.path.isdir(full_path):
yield item
def iterate_files(image_folder):
for file_name in os.listdir(image_folder):
full_path = os.path.join(image_folder, file_name)
if os.path.isfile(full_path):
yield file_name
class ImageStorage(object):
def __init__(self, site_name, location=None):
self.site_name = site_name
self.location = location
def get_types(self):
logger.debug("location: === {}".format(self.location))
types = sorted(list(iterate_subfolders(self.location)))
return types
def get_random_image_names(self, type_name, limit=None):
type_folder = self._get_folder_for_type(type_name)
image_names = list(iterate_files(type_folder))
shuffle(image_names)
return image_names[:limit]
def get_random_image_paths(self, type_name, limit=None, prefix=None):
image_names = self.get_random_image_names(type_name, limit=limit)
prefix = prefix or os.path.join(self.site_name, type_name)
image_paths = list(
os.path.join(prefix, image_name)
for image_name in image_names
)
return image_paths
@classmethod
def build_for(cls, site_name, static_image_path):
location = os.path.join(static_image_path, site_name)
return cls(site_name, location)
def _get_folder_for_type(self, type_name):
return os.path.join(self.location, type_name)
def configure_flask_logger(flask_logger):
# TODO: use jsonlines for better parsing
formatter = logging.Formatter(fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler("look_viewer.log")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
flask_logger.addHandler(file_handler)
flask_logger.setLevel(logging.INFO)
class LookLogParser(object):
look_pattern = "user provided look:"
def contain_look(self, log_line):
if self.look_pattern not in log_line:
return False
look_items = log_line.split(self.look_pattern)[-1].split("+")
if len(look_items) != 3:
return False
return True
def parse(self, log_line):
user_info, look_info = log_line.split(self.look_pattern)
username = self._parse_username(user_info)
top, middle, bottom = look_info.split("+")
return LookItem(
username=username, top=top.strip(),
middle=middle.strip(), bottom=bottom.strip()
)
def _parse_username(self, user_info):
username = user_info.strip().split()[-1]
username = username.replace(":", "")
return username
def parse_look_collection_from_log(log_file_path):
look_log_parser = LookLogParser()
with open(log_file_path) as fin:
for line in fin:
if look_log_parser.contain_look(line):
look = look_log_parser.parse(line)
yield look
| {
"content_hash": "3aa0730fa080da67f2e5b47f0b63e44e",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 93,
"avg_line_length": 31.046728971962615,
"alnum_prop": 0.6327513546056592,
"repo_name": "alex-33/look_viewer",
"id": "39d424990767b4f7fab3793d59a43d8fbca760be",
"size": "3322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "look_viewer/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6512"
},
{
"name": "Python",
"bytes": "7177"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.electric_load_center import GeneratorFuelCellWaterSupply
log = logging.getLogger(__name__)
class TestGeneratorFuelCellWaterSupply(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_generatorfuelcellwatersupply(self):
pyidf.validation_level = ValidationLevel.error
obj = GeneratorFuelCellWaterSupply()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_reformer_water_flow_rate_function_of_fuel_rate_curve_name = "object-list|Reformer Water Flow Rate Function of Fuel Rate Curve Name"
obj.reformer_water_flow_rate_function_of_fuel_rate_curve_name = var_reformer_water_flow_rate_function_of_fuel_rate_curve_name
# object-list
var_reformer_water_pump_power_function_of_fuel_rate_curve_name = "object-list|Reformer Water Pump Power Function of Fuel Rate Curve Name"
obj.reformer_water_pump_power_function_of_fuel_rate_curve_name = var_reformer_water_pump_power_function_of_fuel_rate_curve_name
# real
var_pump_heat_loss_factor = 4.4
obj.pump_heat_loss_factor = var_pump_heat_loss_factor
# alpha
var_water_temperature_modeling_mode = "TemperatureFromAirNode"
obj.water_temperature_modeling_mode = var_water_temperature_modeling_mode
# node
var_water_temperature_reference_node_name = "node|Water Temperature Reference Node Name"
obj.water_temperature_reference_node_name = var_water_temperature_reference_node_name
# object-list
var_water_temperature_schedule_name = "object-list|Water Temperature Schedule Name"
obj.water_temperature_schedule_name = var_water_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.generatorfuelcellwatersupplys[0].name, var_name)
self.assertEqual(idf2.generatorfuelcellwatersupplys[0].reformer_water_flow_rate_function_of_fuel_rate_curve_name, var_reformer_water_flow_rate_function_of_fuel_rate_curve_name)
self.assertEqual(idf2.generatorfuelcellwatersupplys[0].reformer_water_pump_power_function_of_fuel_rate_curve_name, var_reformer_water_pump_power_function_of_fuel_rate_curve_name)
self.assertAlmostEqual(idf2.generatorfuelcellwatersupplys[0].pump_heat_loss_factor, var_pump_heat_loss_factor)
self.assertEqual(idf2.generatorfuelcellwatersupplys[0].water_temperature_modeling_mode, var_water_temperature_modeling_mode)
self.assertEqual(idf2.generatorfuelcellwatersupplys[0].water_temperature_reference_node_name, var_water_temperature_reference_node_name)
self.assertEqual(idf2.generatorfuelcellwatersupplys[0].water_temperature_schedule_name, var_water_temperature_schedule_name) | {
"content_hash": "e97047ee2cdd2dbb1ab2b86084c67638",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 186,
"avg_line_length": 50.725806451612904,
"alnum_prop": 0.7259141494435613,
"repo_name": "rbuffat/pyidf",
"id": "a0c80a75da044053c85d3745c10e6b872289be46",
"size": "3145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_generatorfuelcellwatersupply.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from events.orfik import models
def graph(green, total):
if not total:
return ('<div style="width: 100px; height: 10px; '
'border:1px solid black"></div>')
percentage_passed = int(green * 100.0 / total)
return ('<div style="width: 100px; height: 10px; border: 1px solid '
'black; background: red"><div style="width:'
'' + str(percentage_passed) + 'px; height: 10px; '
'background: green"></div></div>')
class QuestionAdmin(admin.ModelAdmin):
list_display = ['number', '__str__']
class PlayerAdmin(admin.ModelAdmin):
list_display = ['nickname','max_level','last_solve','error_rate']
list_filter = ['max_level']
def error_rate(self, obj):
"Returns the error rate of a player"
attempts = models.Attempt.objects.filter(player=obj).values('correct')
total = 0.0
correct = 0.0
print(attempts)
for at in attempts:
total += 1.0
if at['correct'] == True:
correct+=1
return graph(correct, total)
error_rate.short_description = 'Correct/Wrong attempts'
error_rate.allow_tags = True
admin.site.register(models.Attempt)
admin.site.register(models.Player, PlayerAdmin)
admin.site.register(models.Question, QuestionAdmin)
| {
"content_hash": "fe8931bbcf144a24dd963de3f5567f21",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 31.952380952380953,
"alnum_prop": 0.6192250372578242,
"repo_name": "compsoc-ssc/compsocssc",
"id": "54a54ee80be7cf9e97cfde0122098e6aadb5c8a9",
"size": "1342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/orfik/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33670"
},
{
"name": "HTML",
"bytes": "29750"
},
{
"name": "Python",
"bytes": "65006"
},
{
"name": "Shell",
"bytes": "755"
}
],
"symlink_target": ""
} |
from __future__ import print_function
module_level = 1
def defaultValueTest1(no_default, some_default_constant = 1):
return some_default_constant
def defaultValueTest2(no_default, some_default_computed = module_level*2):
local_var = no_default
return local_var, some_default_computed
def defaultValueTest3(no_default, func_defaulted = defaultValueTest1(module_level)):
return [ func_defaulted for _i in range(8) ]
def defaultValueTest4(no_default, funced_defaulted = lambda x: x**2):
c = 1
d = 1
return ( i+c+d for i in range(8) )
def defaultValueTest5(no_default, tuple_defaulted = (1,2,3)):
return tuple_defaulted
def defaultValueTest6(no_default, list_defaulted = [1,2,3]):
list_defaulted.append(5)
return list_defaulted
print(defaultValueTest1("ignored"))
# The change of the default variable doesn't influence the default
# parameter of defaultValueTest2, that means it's also calculated
# at the time the function is defined.
module_level = 7
print(defaultValueTest2("also ignored"))
print(defaultValueTest3("nono not again"))
print(list(defaultValueTest4("unused")))
print(defaultValueTest5("unused"))
print(defaultValueTest6("unused"), end = "")
print(defaultValueTest6("unused"))
print(defaultValueTest6.__defaults__)
defaultValueTest6.func_defaults = ([1,2,3],)
print(defaultValueTest6.__defaults__)
print(defaultValueTest6(1))
| {
"content_hash": "cf874b4220b27fe9fde848752b72f19d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 84,
"avg_line_length": 27.9,
"alnum_prop": 0.7405017921146954,
"repo_name": "wfxiang08/Nuitka",
"id": "88c47c5f45eb6b22ef73fc3801a54821f53c57fd",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/basics/DefaultParameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "441058"
},
{
"name": "Python",
"bytes": "4431574"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TemplateMapper(Document):
pass
| {
"content_hash": "bd613ebc853e785758949213d7b97cab",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.8161764705882353,
"repo_name": "rohitwaghchaure/New_Theme_frappe",
"id": "08264fa7f222376535996b328fae32dba0c4c795",
"size": "259",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/template_mapper/template_mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87540"
},
{
"name": "HTML",
"bytes": "80049"
},
{
"name": "JavaScript",
"bytes": "1268762"
},
{
"name": "Python",
"bytes": "963309"
},
{
"name": "SQLPL",
"bytes": "9576"
}
],
"symlink_target": ""
} |
"""
Metrics API module: http://metrics-api.wikimedia.org/
Defines the API which exposes metrics on Wikipedia users. The metrics
are defined at https://meta.wikimedia.org/wiki/Research:Metrics.
"""
from user_metrics.utils import nested_import
from user_metrics.config import settings
from user_metrics.api.broker import FileBroker
from user_metrics.config import settings as conf
BROKER_HOME = conf.__data_file_dir__
REQUEST_BROKER_TARGET = BROKER_HOME + 'request_broker.txt'
RESPONSE_BROKER_TARGET = BROKER_HOME + 'response_broker.txt'
PROCESS_BROKER_TARGET = BROKER_HOME + 'process_broker.txt'
umapi_broker_context = FileBroker()
query_mod = nested_import(settings.__query_module__)
# Error codes for web requests
# ############################
error_codes = {
-1: 'Metrics API HTTP request error.',
0: 'Job already running.',
1: 'Badly Formatted timestamp',
2: 'Could not locate stored request.',
3: 'Could not find User ID.',
4: 'Bad metric name.',
5: 'Failed to retrieve users.',
6: 'Job is currently queued.',
}
class MetricsAPIError(Exception):
""" Basic exception class for UserMetric types """
def __init__(self, message="Error processing API request.",
error_code=-1):
self.error_code_index = error_code
Exception.__init__(self, message)
@property
def error_code(self):
return self.error_code_index
| {
"content_hash": "9b5768353c5dbad0f3f9171a259c3fe9",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 29.625,
"alnum_prop": 0.6751054852320675,
"repo_name": "wikimedia/analytics-user-metrics",
"id": "1a16895f4e3b2e20dbd9030ae8faf7bfe1d3e9ee",
"size": "1422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_metrics/api/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143697"
},
{
"name": "JavaScript",
"bytes": "58528"
},
{
"name": "Python",
"bytes": "342392"
},
{
"name": "Shell",
"bytes": "1835"
}
],
"symlink_target": ""
} |
from .proxy_only_resource import ProxyOnlyResource
class DiagnosticSettingsResource(ProxyOnlyResource):
"""The diagnostic setting resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param storage_account_id: The resource ID of the storage account to which
you would like to send Diagnostic Logs.
:type storage_account_id: str
:param event_hub_authorization_rule_id: The resource Id for the event hub
authorization rule.
:type event_hub_authorization_rule_id: str
:param event_hub_name: The name of the event hub. If none is specified,
the default event hub will be selected.
:type event_hub_name: str
:param metrics: the list of metric settings.
:type metrics: list[~azure.mgmt.monitor.models.MetricSettings]
:param logs: the list of logs settings.
:type logs: list[~azure.mgmt.monitor.models.LogSettings]
:param workspace_id: The workspace ID (resource ID of a Log Analytics
workspace) for a Log Analytics workspace to which you would like to send
Diagnostic Logs. Example:
/subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2
:type workspace_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
'event_hub_authorization_rule_id': {'key': 'properties.eventHubAuthorizationRuleId', 'type': 'str'},
'event_hub_name': {'key': 'properties.eventHubName', 'type': 'str'},
'metrics': {'key': 'properties.metrics', 'type': '[MetricSettings]'},
'logs': {'key': 'properties.logs', 'type': '[LogSettings]'},
'workspace_id': {'key': 'properties.workspaceId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DiagnosticSettingsResource, self).__init__(**kwargs)
self.storage_account_id = kwargs.get('storage_account_id', None)
self.event_hub_authorization_rule_id = kwargs.get('event_hub_authorization_rule_id', None)
self.event_hub_name = kwargs.get('event_hub_name', None)
self.metrics = kwargs.get('metrics', None)
self.logs = kwargs.get('logs', None)
self.workspace_id = kwargs.get('workspace_id', None)
| {
"content_hash": "d90715b8fdbc688fe6a9de3d298787ee",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 152,
"avg_line_length": 45.21311475409836,
"alnum_prop": 0.6555474981870921,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "09cbca388e28402d48c60d8a069e53dcf9c433e5",
"size": "3232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/models/diagnostic_settings_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`airflow.providers.amazon.aws.operators.s3_list`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.s3_list import S3ListOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.s3_list`.",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "9e9b2a44b67652b095ead196fb4e5e0e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 98,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.7590673575129534,
"repo_name": "sekikn/incubator-airflow",
"id": "b9eb3542287f5c4c7cd81707453310366da62f51",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/contrib/operators/s3_list_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import os
from importlib import resources
import emailer
def get_version():
return f'{emailer.__version__}'
def get_sample_config():
return resources.read_text(emailer, 'sample-emailer.json')
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c',
'--config-dir',
default=os.getcwd(),
help='Directory containing config file. Default is '
'current working directory.')
parser.add_argument('-k',
'--key-names',
nargs='*',
help='Key name(s) matching key(s) in the config.')
parser.add_argument('--all-keys',
action='store_true',
help='Run for all available keys in config.')
parser.add_argument('-d',
'--date',
default=datetime.date.today().isoformat(),
help='Date for which to send emails (YYYY-MM-DD). The '
'default is today.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Display more logging output')
parser.add_argument('-V',
'--version',
action='store_true',
help='Print the current emailer module version and exit.')
parser.add_argument('--sample-config',
action='store_true',
help='Print a sample config. Save as emailer.json or '
'.emailer.json and exit.')
parser.add_argument('--skip-send',
action='store_true',
help='Avoid actually sending emails, useful for testing.')
parser.add_argument('--save-sheet-to',
help='Save the sheet into the following JSON file.')
parser.add_argument('--stdin',
action='store_true',
help='Use STDIN to get the sheet data, instead of '
'directly from Google Sheets.')
parser.add_argument('--stdout-markdown',
action='store_true',
help='Print a JSON array of unhighlighted email,'
'still in markdown.')
parser.add_argument('--stdout-email',
action='store_true',
help='Print a JSON array all highlighted email messages.')
# Only one group can be specified per-run.
group = parser.add_mutually_exclusive_group()
group.add_argument('--active',
action='store_true',
help='Send emails to all active recipients.')
group.add_argument('--dryrun',
action='store_true',
help='Send emails one day early to dryrun recipients.')
group.add_argument('--test',
action='store_true',
help='Send emails only to test recipients.')
return parser
def get_parsed_args(argv=None):
return get_parser().parse_args(argv)
| {
"content_hash": "c6f1ff411edc6bc574261eb932355a9c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 39.40506329113924,
"alnum_prop": 0.5210407966591712,
"repo_name": "WhiteHalmos/emailer",
"id": "36fce99bfc3612eccef615029bfdc7c0ccc5c401",
"size": "3113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emailer/args.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "546"
},
{
"name": "Python",
"bytes": "58411"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 54cfab92f5a1
Revises: 968fa5ab793
Create Date: 2015-04-23 02:03:14.208399
"""
# revision identifiers, used by Alembic.
revision = '54cfab92f5a1'
down_revision = '968fa5ab793'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('team_user',
sa.Column('team_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('team_user')
### end Alembic commands ###
| {
"content_hash": "22e5457c3679337366faf7fc52beb6cf",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 25.129032258064516,
"alnum_prop": 0.6623876765083441,
"repo_name": "philipschoemig/TACTourney",
"id": "404df54d6d37fbfc62303e1ea19eb4467b7d08ec",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/migrations/versions/54cfab92f5a1_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8273"
},
{
"name": "JavaScript",
"bytes": "23910"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "49303"
}
],
"symlink_target": ""
} |
from .tools import add_constant, categorical
from statsmodels import NoseWrapper as Tester
test = Tester().test
| {
"content_hash": "6a5e7786681db1e6d107f6baa386b78c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 28.25,
"alnum_prop": 0.8053097345132744,
"repo_name": "rgommers/statsmodels",
"id": "08bdbeb2a4c0a055119f02a2099ac10520f98a09",
"size": "113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/tools/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "12092"
},
{
"name": "CSS",
"bytes": "30159"
},
{
"name": "JavaScript",
"bytes": "16353"
},
{
"name": "Python",
"bytes": "7397890"
},
{
"name": "R",
"bytes": "21637"
},
{
"name": "Shell",
"bytes": "5232"
},
{
"name": "Stata",
"bytes": "16079"
}
],
"symlink_target": ""
} |
def msg():
print "discriptor"
| {
"content_hash": "171a153ff8fe788957c50bee94272a26",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 17,
"alnum_prop": 0.6176470588235294,
"repo_name": "haikentcode/haios",
"id": "357fbc1c86abb87eb683faebbae1c079a79a9fe4",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haios/descriptor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34898"
},
{
"name": "HTML",
"bytes": "3046"
},
{
"name": "Python",
"bytes": "33006"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import awesome_gans.modules as t
tf.set_random_seed(777) # reproducibility
class AdaGAN:
def __init__(
self,
s,
batch_size=64,
height=28,
width=28,
channel=1,
n_classes=10,
sample_num=64,
sample_size=8,
n_input=784,
df_dim=16,
gf_dim=16,
fc_unit=256,
z_dim=100,
d_lr=1e-3,
g_lr=5e-3,
c_lr=1e-4,
):
"""
# General Settings
:param s: TF Session
:param batch_size: training batch size, default 32
:param height: input image height, default 28
:param width: input image width, default 28
:param channel: input image channel, default 1 (gray-scale)
:param n_classes: input DataSet's classes
# Output Settings
:param sample_num: the number of output images, default 64
:param sample_size: sample image size, default 8
# Hyper Parameters
:param n_input: input image size, default 784(28x28)
:param df_dim: D net filter, default 16
:param gf_dim: G net filter, default 16
:param fc_unit: fully connected units, default 256
# Training Option
:param z_dim: z dimension (kinda noise), default 100
:param d_lr: discriminator learning rate, default 1e-3
:param g_lr: generator learning rate, default 5e-3
:param c_lr: classifier learning rate, default 1e-4
"""
self.s = s
self.batch_size = batch_size
self.height = height
self.width = width
self.channel = channel
self.image_shape = [self.batch_size, self.height, self.width, self.channel]
self.n_classes = n_classes
self.sample_num = sample_num
self.sample_size = sample_size
self.n_input = n_input
self.df_dim = df_dim
self.gf_dim = gf_dim
self.fc_unit = fc_unit
self.z_dim = z_dim
self.beta1 = 0.5
self.d_lr = d_lr
self.g_lr = g_lr
self.c_lr = c_lr
self.d_loss = 0.0
self.g_loss = 0.0
self.c_loss = 0.0
self.g = None
self.d_op = None
self.g_op = None
self.c_op = None
self.merged = None
self.saver = None
self.writer = None
# Placeholder
self.x = tf.placeholder(tf.float32, shape=[None, self.n_input], name="x-image") # (-1, 784)
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim], name='z-noise') # (-1, 100)
self.build_adagan() # build AdaGAN model
def classifier(self, x, reuse=None):
with tf.variable_scope("classifier", reuse=reuse):
pass
def discriminator(self, x, reuse=None):
with tf.variable_scope("discriminator", reuse=reuse):
for i in range(1, 3):
x = t.conv2d(x, self.df_dim * i, 5, 2, name='disc-conv2d-%d' % i)
x = t.batch_norm(x, name='disc-bn-%d' % i)
x = tf.nn.leaky_relu(x, alpha=0.3)
x = t.flatten(x)
logits = t.dense(x, 1, name='disc-fc-1')
prob = tf.nn.sigmoid(logits)
return prob, logits
def generator(self, z, reuse=None, is_train=True):
with tf.variable_scope("generator", reuse=reuse):
x = t.dense(z, self.gf_dim * 7 * 7, name='gen-fc-1')
x = t.batch_norm(x, name='gen-bn-1')
x = tf.nn.leaky_relu(x, alpha=0.3)
x = tf.reshape(x, [-1, 7, 7, self.gf_dim])
for i in range(1, 3):
x = t.deconv2d(x, self.gf_dim, 5, 2, name='gen-deconv2d-%d' % (i + 1))
x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % (i + 1))
x = tf.nn.leaky_relu(x, alpha=0.3)
x = t.deconv2d(x, 1, 5, 1, name='gen-deconv2d-3')
x = tf.nn.sigmoid(x)
return x
def build_adagan(self):
# Generator
self.g = self.generator(self.z)
# Discriminator
d_real, _ = self.discriminator(self.x)
d_fake, _ = self.discriminator(self.g, reuse=True)
# Losses
d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
d_fake_loss = -tf.reduce_mean(t.safe_log(1.0 - d_fake))
self.d_loss = d_real_loss + d_fake_loss
self.g_loss = tf.reduce_mean(t.safe_log(d_fake))
# Summary
tf.summary.scalar("loss/d_real_loss", d_real_loss)
tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
tf.summary.scalar("loss/d_loss", self.d_loss)
tf.summary.scalar("loss/g_loss", self.g_loss)
tf.summary.scalar("loss/c_loss", self.c_loss)
# Optimizer
t_vars = tf.trainable_variables()
d_params = [v for v in t_vars if v.name.startswith('d')]
g_params = [v for v in t_vars if v.name.startswith('g')]
c_params = [v for v in t_vars if v.name.startswith('c')]
self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=self.beta1).minimize(
self.d_loss, var_list=d_params
)
self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=self.beta1).minimize(
self.g_loss, var_list=g_params
)
self.c_op = tf.train.AdamOptimizer(learning_rate=self.c_lr, beta1=self.beta1).minimize(
self.c_loss, var_list=c_params
)
# Merge summary
self.merged = tf.summary.merge_all()
# Model saver
self.saver = tf.train.Saver(max_to_keep=1)
self.writer = tf.summary.FileWriter('./model/', self.s.graph)
| {
"content_hash": "49d312ea49407b4b4a0d81e4de0fefb7",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 100,
"avg_line_length": 32.293103448275865,
"alnum_prop": 0.5495639793557573,
"repo_name": "kozistr/Awesome-GANs",
"id": "71b489356a9b6a4e2d8539e259760ad20b98179d",
"size": "5619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awesome_gans/adagan/adagan_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "301"
},
{
"name": "Python",
"bytes": "469452"
}
],
"symlink_target": ""
} |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import filelib
from genomicode import parallel
vcf_node = in_data
vcf_filenames = filelib.list_files_in_path(
vcf_node.identifier, endswith=".vcf", not_empty=True)
assert vcf_filenames, "No VCF files found."
filelib.safe_mkdir(out_path)
metadata = {}
# Figure out whether the user wants SNPs or INDELs.
assert "vartype" in out_attributes
vartype = out_attributes["vartype"]
assert vartype in ["snp", "indel"]
metadata["filter"] = vartype
jobs = [] # list of filelib.GenericObject
for in_filename in vcf_filenames:
p, f = os.path.split(in_filename)
out_filename = os.path.join(out_path, f)
x = filelib.GenericObject(
in_filename=in_filename, out_filename=out_filename)
jobs.append(x)
# Filter each of the VCF files.
jobs2 = []
for j in jobs:
args = vartype, j.in_filename, j.out_filename
x = filter_by_vartype, args, {}
jobs2.append(x)
parallel.pyfun(jobs2, num_procs=num_cores)
metadata["num_cores"] = num_cores
return metadata
def name_outfile(self, antecedents, user_options):
return "jointsnvmix.vcf"
def is_snp(var):
from genomicode import vcflib
# FILTER is always "PASS" or "INDL"
assert len(var.filter_) == 1
x = var.filter[0]
assert x in ["PASS", "INDL"]
return vcflib.is_pass_filter(var, FILTER_doesnotcontain="INDL")
def is_indel(var):
from genomicode import vcflib
# FILTER is always "PASS" or "INDL"
assert len(var.filter_) == 1
x = var.filter[0]
assert x in ["PASS", "INDL"]
return vcflib.is_pass_filter(var, FILTER_doesnotcontain="PASS")
def filter_by_vartype(vartype, infile, outfile):
# Filter out snps or indels.
import shutil
from genomicode import vcflib
assert vartype in ["all", "snp", "indel"]
if vartype == "all":
shutil.copy2(infile, outfile)
return
vcf = vcflib.read(infile)
fn = is_snp
if vartype == "indel":
fn = is_indel
vcf = vcflib.select_variants(vcf, fn)
vcflib.write(outfile, vcf)
| {
"content_hash": "8f303313816bae29092d0a27cb041c4d",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 72,
"avg_line_length": 29.011627906976745,
"alnum_prop": 0.6032064128256514,
"repo_name": "jefftc/changlab",
"id": "f35c0f0e5a10e419ae2355a0cecab4bc517094a4",
"size": "2495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Betsy/Betsy/modules/filter_variants_jointsnvmix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "116953"
},
{
"name": "CSS",
"bytes": "75418"
},
{
"name": "Groff",
"bytes": "10237"
},
{
"name": "HTML",
"bytes": "200459"
},
{
"name": "JavaScript",
"bytes": "159618"
},
{
"name": "Makefile",
"bytes": "11719"
},
{
"name": "Python",
"bytes": "9300228"
},
{
"name": "R",
"bytes": "94670"
},
{
"name": "Shell",
"bytes": "63514"
},
{
"name": "TeX",
"bytes": "64"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0025_exporttask_created_at'),
]
operations = [
migrations.AlterModelOptions(
name='exporttask',
options={'ordering': ['created_at'], 'managed': True},
),
]
| {
"content_hash": "f53f05f906d8a1568fe4ee3b84313294",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 21.941176470588236,
"alnum_prop": 0.6005361930294906,
"repo_name": "dodobas/osm-export-tool2",
"id": "66a0cf5e62be47f3f97d021fbcab61b7dada3753",
"size": "397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tasks/migrations/0026_auto_20150724_1437.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "119726"
},
{
"name": "HTML",
"bytes": "152701"
},
{
"name": "JavaScript",
"bytes": "4936760"
},
{
"name": "Python",
"bytes": "366592"
},
{
"name": "Shell",
"bytes": "755"
}
],
"symlink_target": ""
} |
import sys
import struct
import Image
#from cliente.cliente import socketcliente
def main ():
#print struct.unpack('I',p)
#print "{:<15}".format('si')+'s'
s = bytearray()
s.append(10)
#s.append(bytearray('megalol'))
for c in s: print(c)
if __name__=="__main__":
main() | {
"content_hash": "637dc935fbc8056465292e9f5f39f6f1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 42,
"avg_line_length": 16.470588235294116,
"alnum_prop": 0.6428571428571429,
"repo_name": "EnriquePS/ProtocoloPokemon",
"id": "45a65e53da2b126692ae256da8e56d87fef95ba7",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6241"
}
],
"symlink_target": ""
} |
import re
from storage.ssh import SSHSession
WWN_REGEX = re.compile('[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}'
, re.I)
class BrocadeSwitch(SSHSession):
"""
Establish SSH Connection to Brocade FC Switch which can be used to run commands against it.
Provide commonly used commands as methods and also command method to add more functionality
"""
@staticmethod
def fidify_command(cmd, fid):
"""
If fid, return command that can run in given fid. Otherwise return as it is
"""
if fid:
return 'fosexec --fid %s -cmd "%s" ' % (fid, cmd)
else:
return cmd
def isDirectorClass(self, switch_type):
"""
Use SwitchType from switchShow command output.
"""
DIRECTOR_CLASS_TYPES = ['42', '62', '77', '120', '121']
if switch_type.split('.')[0] in DIRECTOR_CLASS_TYPES:
return True
else:
return False
def aliShow(self, pattern='*', fid=None):
"""
Returns dictionary with alias name as key and it's members as values. Default pattern '*' will return all
aliases
"""
aliases = {}
cmd = self.fidify_command('aliShow %s' % pattern, fid)
output, error = self.command(cmd)
if output and not re.search('does not exist', " ".join(output), re.IGNORECASE):
alias_regex = re.compile('alias:(.*)')
key = None
values = []
for line in output:
line = line.strip()
if alias_regex.search(line):
key = alias_regex.search(line).group(1).strip()
values = []
elif WWN_REGEX.search(line):
values = values + WWN_REGEX.findall(line)
if key:
aliases[key] = list(set(values))
return aliases
def fabricShow(self, membership=False, chassis=False, fid=None):
"""
Returns fabricshow output with each switch as dictionary
"""
fabric = {}
cmd = 'fabricShow'
if membership and chassis:
pass # Defaults to fabricShow as both arguments can't be True
elif membership:
cmd = 'fabricShow -membership'
elif chassis:
cmd = 'fabricShow -chassis'
cmd = self.fidify_command(cmd, fid)
output, error = self.command(cmd)
if output:
for line in output:
line = line.strip()
if re.match(r'^\d+:', line):
values = line.split()
key = values.pop(0).replace(':','')
fabric[key] = values
return fabric
def switchName(self, fid=None):
"""
Returns Switch Name.
"""
cmd = self.fidify_command('switchName', fid)
output, error = self.command(cmd)
if output:
return "".join(output).strip()
def switchShow(self, fid=None):
"""
Returns Switch Show in a dictionary format
"""
cmd = self.fidify_command('switchShow', fid)
output, error = self.command(cmd)
dct = {}
if output:
#First Get Key Values
for line in output:
line = line.strip()
if re.match(r'^[a-zA-z\s]+:', line):
key, value = line.split(':',1)
dct[key] = value.strip()
#Get All Ports
concatenated_output = "".join(output)
if re.search('===+', concatenated_output ):
port_info = re.search('===+((.|\n)*)', concatenated_output ).groups(1)[0]
port_info_lines = port_info.strip().split('\n')
ports = []
for line in port_info_lines:
ports.append(line)
dct['ports'] = ports
#a = dct['ports'][0]
#print re.split('\s*',a)
return dct
def version(self):
"""
Returns dictionary with version information.
FID not required here
"""
dct = {}
output, error = self.command('version')
#print output
for line in output:
line = line.strip()
if re.split('\W+ ', line) and len(re.split('\W+ ', line))==2:
key, value = re.split('\W+ ', line)
dct[key] = value
return dct
def zoneShow(self, pattern='*', fid=None):
"""
Returns dictionary with alias name as key and it's members as values
Pattern:'*' will return all aliases.
"""
zones = {}
cmd = self.fidify_command('zoneShow %s' % pattern, fid)
output, error = self.command(cmd)
if output and not re.search('does not exist', " ".join(output), re.IGNORECASE):
zone_regex = re.compile('zone:(.*)')
key = None
values = []
for line in output:
line = line.strip()
if zone_regex.search(line):
key = zone_regex.search(line).group(1).strip()
values = []
else:
items = [x.strip() for x in line.split(';') if x]
if items:
values = values + items
if key:
zones[key] = list(set(values))
return zones
def is_wwn_on_fabric(self, wwn, fid=None):
"""
Check if given WWN (WWNN or WWPN) exists on fabric.
Return True if exists, otherwise False
"""
cmd = self.fidify_command('nodefind %s' % wwn, fid)
output, error = self.command(cmd)
if output and re.search(wwn, " ".join(output), re.IGNORECASE):
return True
else:
return False
def wwn_alias_map(self, fid=None):
"""
Return dictionary with wwn as key and aliases as values.
"""
aliases = self.aliShow(fid=fid)
map = dict()
for alias, wwpns in aliases.items():
for wwpn in wwpns:
if wwpn not in map:
map[wwpn] = [alias]
else:
if alias not in map[wwpn]:
map[wwpn].append(alias)
return map
def get_alias_zones(self, alias, fid=None):
"""
Return a list of zones alias is part of.
"""
zones = self.zoneShow(fid=fid)
alias_zones = []
for zone, aliases in zones.items():
if alias in aliases:
alias_zones.append(zone)
return alias_zones
def get_wwn_aliases(self, wwn, fid=None):
"""
Return a list of Aliases for given wwn
"""
map = self.wwn_alias_map(fid)
if wwn.lower() in map:
return map[wwn.lower()]
elif wwn.upper() in map:
return map[wwn.upper()]
else:
return []
def get_current_active_config_name(self, fid=None ):
"""
Return current active zone configuration name
"""
cmd = self.fidify_command('cfgactvshow', fid)
output, error = self.command(cmd)
CONFIG_REGEX = re.compile('cfg:(.*)\n')
if output:
output = "".join(output)
if CONFIG_REGEX.search(output):
config = CONFIG_REGEX.search(output).groups()[0]
return config.strip()
| {
"content_hash": "f22a7cf6a056dd4514db8dab56508065",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 120,
"avg_line_length": 30.678861788617887,
"alnum_prop": 0.49940373658407317,
"repo_name": "OpenSRM/storage",
"id": "eb3e57e2a46151c62428dceb152d4f4fa3ecf94c",
"size": "7547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storage/brocade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "65"
},
{
"name": "Python",
"bytes": "34238"
}
],
"symlink_target": ""
} |
import datetime
from unittest import mock
import ddt
import iso8601
from oslo_utils import versionutils
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import api_version_request as api_version
from cinder.api.v3 import clusters
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_cluster
from cinder.tests.unit import test
CLUSTERS = [
fake_cluster.fake_db_cluster(
id=1,
replication_status='error',
frozen=False,
active_backend_id='replication1',
last_heartbeat=datetime.datetime(2016, 6, 1, 2, 46, 28),
updated_at=datetime.datetime(2016, 6, 1, 2, 46, 28),
created_at=datetime.datetime(2016, 6, 1, 2, 46, 28)),
fake_cluster.fake_db_cluster(
id=2, name='cluster2', num_hosts=2, num_down_hosts=1, disabled=True,
replication_status='error',
frozen=True,
active_backend_id='replication2',
updated_at=datetime.datetime(2016, 6, 1, 1, 46, 28),
created_at=datetime.datetime(2016, 6, 1, 1, 46, 28))
]
CLUSTERS_ORM = [fake_cluster.fake_cluster_orm(**kwargs) for kwargs in CLUSTERS]
EXPECTED = [{'created_at': datetime.datetime(2016, 6, 1, 2, 46, 28),
'disabled_reason': None,
'last_heartbeat': datetime.datetime(2016, 6, 1, 2, 46, 28),
'name': 'cluster_name',
'binary': 'cinder-volume',
'num_down_hosts': 0,
'num_hosts': 0,
'state': 'up',
'status': 'enabled',
'replication_status': 'error',
'frozen': False,
'active_backend_id': 'replication1',
'updated_at': datetime.datetime(2016, 6, 1, 2, 46, 28)},
{'created_at': datetime.datetime(2016, 6, 1, 1, 46, 28),
'disabled_reason': None,
'last_heartbeat': '',
'name': 'cluster2',
'binary': 'cinder-volume',
'num_down_hosts': 1,
'num_hosts': 2,
'state': 'down',
'status': 'disabled',
'replication_status': 'error',
'frozen': True,
'active_backend_id': 'replication2',
'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28)}]
class FakeRequest(object):
def __init__(self, is_admin=True, version=mv.CLUSTER_SUPPORT, **kwargs):
self.GET = kwargs
self.headers = {'OpenStack-API-Version': 'volume ' + version}
self.api_version_request = api_version.APIVersionRequest(version)
self.environ = {
'cinder.context': context.RequestContext(user_id=None,
project_id=None,
is_admin=is_admin,
read_deleted='no',
overwrite=False)
}
def fake_utcnow(with_timezone=False):
tzinfo = iso8601.UTC if with_timezone else None
return datetime.datetime(2016, 6, 1, 2, 46, 30, tzinfo=tzinfo)
@ddt.ddt
@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow)
class ClustersTestCase(test.TestCase):
"""Test Case for Clusters."""
LIST_FILTERS = ({}, {'is_up': True}, {'disabled': False}, {'num_hosts': 2},
{'num_down_hosts': 1}, {'binary': 'cinder-volume'},
{'is_up': True, 'disabled': False, 'num_hosts': 2,
'num_down_hosts': 1, 'binary': 'cinder-volume'})
REPLICATION_FILTERS = ({'replication_status': 'error'}, {'frozen': True},
{'active_backend_id': 'replication'})
def _get_expected(self,
version=mv.get_prior_version(mv.REPLICATION_CLUSTER)):
if (versionutils.convert_version_to_tuple(version) >=
versionutils.convert_version_to_tuple(mv.REPLICATION_CLUSTER)):
return EXPECTED
expect = []
for cluster in EXPECTED:
cluster = cluster.copy()
for key in ('replication_status', 'frozen', 'active_backend_id'):
cluster.pop(key)
expect.append(cluster)
return expect
def setUp(self):
super(ClustersTestCase, self).setUp()
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = clusters.ClusterController(self.ext_mgr)
@mock.patch('cinder.db.cluster_get_all', return_value=CLUSTERS_ORM)
def _test_list(self, get_all_mock, detailed, filters=None, expected=None,
version=mv.get_prior_version(mv.REPLICATION_CLUSTER)):
filters = filters or {}
req = FakeRequest(version=version, **filters)
method = getattr(self.controller, 'detail' if detailed else 'index')
clusters = method(req)
filters = filters.copy()
filters.setdefault('is_up', None)
filters.setdefault('read_deleted', 'no')
self.assertEqual(expected, clusters)
get_all_mock.assert_called_once_with(
req.environ['cinder.context'],
get_services=False,
services_summary=detailed,
**filters)
@ddt.data(*LIST_FILTERS)
def test_index_detail(self, filters):
"""Verify that we get all clusters with detailed data."""
expected = {'clusters': self._get_expected()}
self._test_list(detailed=True, filters=filters, expected=expected)
@ddt.data(*LIST_FILTERS)
def test_index_summary(self, filters):
"""Verify that we get all clusters with summary data."""
expected = {'clusters': [{'name': 'cluster_name',
'binary': 'cinder-volume',
'state': 'up',
'status': 'enabled'},
{'name': 'cluster2',
'binary': 'cinder-volume',
'state': 'down',
'status': 'disabled'}]}
self._test_list(detailed=False, filters=filters, expected=expected)
@ddt.data(*REPLICATION_FILTERS)
def test_index_detail_fail_old(self, filters):
self.assertRaises(exception.InvalidInput, self._test_list,
detailed=True, filters=filters)
@ddt.data(*REPLICATION_FILTERS)
def test_index_summary_fail_old(self, filters):
self.assertRaises(exception.InvalidInput, self._test_list,
detailed=False, filters=filters)
@ddt.data(True, False)
def test_index_unauthorized(self, detailed):
"""Verify that unauthorized user can't list clusters."""
self.assertRaises(exception.PolicyNotAuthorized,
self._test_list, detailed=detailed,
filters={'is_admin': False})
@ddt.data(True, False)
def test_index_wrong_version(self, detailed):
"""Verify the wrong version so that user can't list clusters."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self._test_list, detailed=detailed,
version=mv.get_prior_version(mv.CLUSTER_SUPPORT))
@ddt.data(*REPLICATION_FILTERS)
def test_index_detail_replication_new_fields(self, filters):
expected = {'clusters': self._get_expected(mv.REPLICATION_CLUSTER)}
self._test_list(detailed=True, filters=filters, expected=expected,
version=mv.REPLICATION_CLUSTER)
@ddt.data(*REPLICATION_FILTERS)
def test_index_summary_replication_new_fields(self, filters):
expected = {'clusters': [{'name': 'cluster_name',
'binary': 'cinder-volume',
'state': 'up',
'replication_status': 'error',
'status': 'enabled'},
{'name': 'cluster2',
'binary': 'cinder-volume',
'state': 'down',
'replication_status': 'error',
'status': 'disabled'}]}
self._test_list(detailed=False, filters=filters, expected=expected,
version=mv.REPLICATION_CLUSTER)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get',
return_value=CLUSTERS_ORM[0])
def test_show(self, get_mock):
req = FakeRequest()
expected = {'cluster': self._get_expected()[0]}
cluster = self.controller.show(req, 'cluster_name',
'cinder-volume')
self.assertEqual(expected, cluster)
get_mock.assert_called_once_with(
req.environ['cinder.context'],
None,
services_summary=True,
name='cluster_name',
binary='cinder-volume')
def test_show_unauthorized(self):
req = FakeRequest(is_admin=False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, 'name')
def test_show_wrong_version(self):
req = FakeRequest(version=mv.get_prior_version(mv.CLUSTER_SUPPORT))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, req, 'name')
@mock.patch('cinder.db.sqlalchemy.api.cluster_update')
@mock.patch('cinder.db.sqlalchemy.api.cluster_get',
return_value=CLUSTERS_ORM[1])
def test_update_enable(self, get_mock, update_mock):
req = FakeRequest()
expected = {'cluster': {'name': 'cluster2',
'binary': 'cinder-volume',
'state': 'down',
'status': 'enabled',
'disabled_reason': None}}
res = self.controller.update(req, 'enable',
body={'name': 'cluster_name',
'binary': 'cinder-volume'})
self.assertEqual(expected, res)
ctxt = req.environ['cinder.context']
get_mock.assert_called_once_with(ctxt,
None, binary='cinder-volume',
name='cluster_name')
update_mock.assert_called_once_with(ctxt, get_mock.return_value.id,
{'disabled': False,
'disabled_reason': None})
@mock.patch('cinder.db.sqlalchemy.api.cluster_update')
@mock.patch('cinder.db.sqlalchemy.api.cluster_get',
return_value=CLUSTERS_ORM[0])
def test_update_disable(self, get_mock, update_mock):
req = FakeRequest()
disabled_reason = 'For testing'
expected = {'cluster': {'name': 'cluster_name',
'state': 'up',
'binary': 'cinder-volume',
'status': 'disabled',
'disabled_reason': disabled_reason}}
res = self.controller.update(req, 'disable',
body={'name': 'cluster_name',
'binary': 'cinder-volume',
'disabled_reason': disabled_reason})
self.assertEqual(expected, res)
ctxt = req.environ['cinder.context']
get_mock.assert_called_once_with(ctxt,
None, binary='cinder-volume',
name='cluster_name')
update_mock.assert_called_once_with(
ctxt, get_mock.return_value.id,
{'disabled': True, 'disabled_reason': disabled_reason})
def test_update_wrong_action(self):
req = FakeRequest()
self.assertRaises(exception.NotFound, self.controller.update,
req, 'action', body={'name': 'cluster_name'})
@ddt.data('enable', 'disable')
def test_update_missing_name(self, action):
req = FakeRequest()
self.assertRaises(exception.ValidationError, self.controller.update,
req, action, body={'binary': 'cinder-volume'})
def test_update_with_binary_more_than_255_characters(self):
req = FakeRequest()
self.assertRaises(exception.ValidationError, self.controller.update,
req, 'enable', body={'name': 'cluster_name',
'binary': 'a' * 256})
def test_update_with_name_more_than_255_characters(self):
req = FakeRequest()
self.assertRaises(exception.ValidationError, self.controller.update,
req, 'enable', body={'name': 'a' * 256,
'binary': 'cinder-volume'})
@ddt.data('a' * 256, ' ')
def test_update_wrong_disabled_reason(self, disabled_reason):
req = FakeRequest()
self.assertRaises(exception.ValidationError, self.controller.update,
req, 'disable',
body={'name': 'cluster_name',
'disabled_reason': disabled_reason})
@ddt.data('enable', 'disable')
def test_update_unauthorized(self, action):
req = FakeRequest(is_admin=False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update, req, action,
body={'name': 'fake_name'})
@ddt.data('enable', 'disable')
def test_update_wrong_version(self, action):
req = FakeRequest(version=mv.get_prior_version(mv.CLUSTER_SUPPORT))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, req, action, {})
| {
"content_hash": "677a34d814a10a15a12461f8b43bf1f5",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 79,
"avg_line_length": 44.47452229299363,
"alnum_prop": 0.5387039026136771,
"repo_name": "openstack/cinder",
"id": "93d042fad86658d149e2184dcc1d9d85acc0da92",
"size": "14598",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/api/v3/test_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078349"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.