repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
matthiask/django-chet
|
chet/south_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 4,095
| 0.008547
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Album'
db.create_table(u'chet_album', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime.now)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=200)),
))
db.send_create_signal(u'chet', ['Album'])
# Adding model 'Photo'
db.create_table(u'chet_photo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('file', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('album', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['chet.Album'])),
('shot_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
|
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('is_dark', self.gf(
|
'django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'chet', ['Photo'])
def backwards(self, orm):
# Deleting model 'Album'
db.delete_table(u'chet_album')
# Deleting model 'Photo'
db.delete_table(u'chet_photo')
models = {
u'chet.album': {
'Meta': {'ordering': "['-date']", 'object_name': 'Album'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'chet.photo': {
'Meta': {'ordering': "['-created_on']", 'object_name': 'Photo'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': u"orm['chet.Album']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_dark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shot_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['chet']
|
Xperia-Nicki/android_platform_sony_nicki
|
external/webkit/LayoutTests/http/tests/websocket/tests/hybi/close-on-unload_wsh.py
|
Python
|
apache-2.0
| 2,370
| 0
|
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import msgutil
# we don't use set() here, because python on mac tiger doesn't support it.
connections = {}
def web_socket_do_extra_handshake(r
|
equest):
pass # Always accept.
def web_socket_transfer_data(request):
global connections
connections[request] = True
socketName = None
try:
socketName = msgutil.receive_message(request)
# notify to client that socketName is received by server.
msgutil.send_message(request, socketName)
msgutil.receive_message(request) # wait, and exception by close.
socketName = socketName + ': receive next message'
finally:
# request is closed. notify this socketName to other web sockets.
del connections[request]
for ws in connections.keys():
msgutil.send_message(ws, socketName)
|
zhangjunli177/sahara
|
sahara/main.py
|
Python
|
apache-2.0
| 6,825
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import flask
from oslo_config import cfg
from oslo_log import log
import oslo_middleware.cors as cors_middleware
from oslo_middleware import request_id
from oslo_service import systemd
import six
import stevedore
from werkzeug import exceptions as werkzeug_exceptions
from sahara.api import acl
from sahara.api.middleware import auth_valid
from sahara.api.middleware import log_exchange
from sahara.api import v10 as api_v10
from sahara.api import v11 as api_v11
from sahara import config
from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import base as plugins_base
from sahara.service import api as service_api
from sahara.service.edp import api as edp_api
from sahara.service import ops as service_ops
from sahara.service import periodic
from sahara.utils import api as api_utils
from sahara.utils.openstack import cinder
from sahara.utils import remote
from sahara.utils import rpc as messaging
from sahara.utils import wsgi
LOG = log.getLogger(__name__)
opts = [
cfg.StrOpt('os_region_name',
help='Region name used to get services endpoints.'),
cfg.StrOpt('infrastructure_engine',
default='heat',
help='An engine which will be used to provision '
'infrastructure for Hadoop cluster.'),
cfg.StrOpt('remote',
default='ssh',
help='A method for Sahara to execute commands '
'on VMs.'),
cfg.IntOpt('api_workers', default=0,
help="Number of workers for Sahara API service (0 means "
"all-in-one-thread configuration).")
]
CONF = cfg.CONF
CONF.register_opts(opts)
def setup_common(possible_topdir, service_name):
dev_conf = os.path.join(possible_topdir,
'etc',
'sahara',
'sahara.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
config.parse_configs(config_files)
log.setup(CONF, "sahara")
# Validate other configurations (that may produce logs) here
cinder.validate_config()
if service_name != 'all-in-one' or cfg.CONF.enable_notifications:
messaging.setup()
plugins_base.setup_plugins()
LOG.info(_LI('Sahara {service} started').format(service=service_name))
def setup_sahara_api(mode):
ops = _get_ops_driver(mode)
service_api.setup_service_api(ops)
edp_api.setup_edp_api(ops)
def setup_sahara_engine():
periodic.setup()
engine = _get_infrastructure_engine()
service_ops.setup_ops(engine)
remote_driver = _get_remote_driver()
remote.setup_remote(remote_driver, engine)
def setup_auth_policy():
acl.setup_policy()
def make_app():
"""App builder (wsgi)
Entry point for Sahara REST API server
"""
app = flask.Flask('sahara.api')
@app.route('/', methods=['GET'])
def version_list():
context.set_ctx(None)
return api_utils.render({
"versions": [
{"id": "v1.0", "status": "SUPPORTED"},
{"id": "v1.1", "status": "CURRENT"}
]
})
@app.teardown_request
def teardown_request(_ex=None):
context.set_ctx(None)
app.register_blueprint(api_v10.rest, url_prefix='/v1.0')
app.register_blueprint(api_v10.rest, url_prefix='/v1.1')
app.register_blueprint(api_v11.rest, url_prefix='/v1.1')
def make_json_error(ex):
status_code = (ex.code
if isinstance(ex, werkzeug_exceptions.HTTPException)
else 500)
|
description = (ex.description
if isinstance(ex, werkzeug_exceptions.HTTPException)
else str(ex))
return api_utils.render({'error': status_code,
'error_message': description},
status=status_code)
for code in six.iterkeys(werkzeug_exceptions.default_exceptions):
app.error_handler_spec[None][code] = make_json_error
if CONF.debug
|
and not CONF.log_exchange:
LOG.debug('Logging of request/response exchange could be enabled using'
' flag --log-exchange')
# Create a CORS wrapper, and attach sahara-specific defaults that must be
# included in all CORS responses.
app.wsgi_app = cors_middleware.CORS(app.wsgi_app, CONF)
app.wsgi_app.set_latent(
allow_headers=['X-Auth-Token', 'X-Server-Management-Url'],
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=['X-Auth-Token', 'X-Server-Management-Url']
)
if CONF.log_exchange:
app.wsgi_app = log_exchange.LogExchange.factory(CONF)(app.wsgi_app)
app.wsgi_app = auth_valid.wrap(app.wsgi_app)
app.wsgi_app = acl.wrap(app.wsgi_app)
app.wsgi_app = request_id.RequestId(app.wsgi_app)
return app
def _load_driver(namespace, name):
extension_manager = stevedore.DriverManager(
namespace=namespace,
name=name,
invoke_on_load=True
)
LOG.info(_LI("Driver {name} successfully loaded").format(name=name))
return extension_manager.driver
def _get_infrastructure_engine():
"""Import and return one of sahara.service.*_engine.py modules."""
LOG.debug("Infrastructure engine {engine} is loading".format(
engine=CONF.infrastructure_engine))
if CONF.infrastructure_engine == "direct":
LOG.warning(_LW("Direct infrastructure engine is deprecated in Liberty"
" release and will be removed after that release."
" Use Heat infrastructure engine instead."))
return _load_driver('sahara.infrastructure.engine',
CONF.infrastructure_engine)
def _get_remote_driver():
LOG.debug("Remote {remote} is loading".format(remote=CONF.remote))
return _load_driver('sahara.remote', CONF.remote)
def _get_ops_driver(driver_name):
LOG.debug("Ops {driver} is loading".format(driver=driver_name))
return _load_driver('sahara.run.mode', driver_name)
def start_server(app):
server = wsgi.Server()
server.start(app)
systemd.notify_once()
server.wait()
|
pikeszfish/Leetcode.py
|
leetcode.py/CompareVersionNumbers.py
|
Python
|
mit
| 791
| 0.001264
|
class Solution:
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(self, version1, version2):
v1 = version1.split('.')
v2 = version2.spli
|
t('.')
for i in range(0, min(len(v1), len(v2))):
if int(v1[i]) > int(v2[i]):
return 1
if int(v1[i]) < int(v2[i]):
return -1
if len(v1) == len(v2):
return 0
elif len(v1) > len(v2):
for i in range(len(v2), len(v1)):
if int(v1[i]) != 0:
return 1
return 0
else:
f
|
or i in range(len(v1), len(v2)):
if int(v2[i]) != 0:
return -1
return 0
|
jhuapl-boss/boss-manage
|
cloud_formation/configs/migrations/api/00010002_dns_update.py
|
Python
|
apache-2.0
| 1,140
| 0.003509
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib import aws
from lib import console
def pre_
|
update(bosslet_config):
# With version 2 the DNS records are now part of the CloudFormation template, so
# remove the existing DNS record so the update can happen
console.warning("Removing existing Api public DNS entry, so CloudFormation can manage the DNS record")
aws.route53_delete_records(bosslet_config.session,
|
bosslet_config.EXTERNAL_DOMAIN,
bosslet_config.names.public_dns('api'))
|
aetel/3D-printer
|
prusa_i3/Firmware/Marlin-2.0.x/buildroot/share/PlatformIO/scripts/STM32F1_create_variant.py
|
Python
|
gpl-2.0
| 1,009
| 0.003964
|
import os,shutil
from SCons.Script import DefaultEnvironment
from platformio import util
def copytree(src, dst, symlinks=False, ignore=None):
|
for item in os.listdir(src):
|
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoststm32-maple")
assert os.path.isdir(FRAMEWORK_DIR)
assert os.path.isdir("buildroot/share/PlatformIO/variants")
variant = board.get("build.variant")
variant_dir = os.path.join(FRAMEWORK_DIR, "STM32F1", "variants", variant)
source_dir = os.path.join("buildroot/share/PlatformIO/variants", variant)
assert os.path.isdir(source_dir)
if os.path.isdir(variant_dir):
shutil.rmtree(variant_dir)
if not os.path.isdir(variant_dir):
os.mkdir(variant_dir)
copytree(source_dir, variant_dir)
|
lmorchard/badger
|
libs/activitystreams/atom.py
|
Python
|
bsd-3-clause
| 8,938
| 0.004363
|
from activitystreams import Activity, Object, MediaLink, ActionLink, Link
import re
import datetime
import time
class AtomActivity(Activity):
pass
# This is a weird enum-like thing.
class ObjectParseMode(object):
def __init__(self, reprstring):
self.reprstring = reprstring
def __repr__(self):
return self.reprstring
ObjectParseMode.ATOM_ENTRY = ObjectParseMode("ObjectParseMode.ATOM_ENTRY")
ObjectParseMode.ATOM_AUTHOR = ObjectParseMode("ObjectParseMode.ATOM_AUTHOR")
ObjectParseMode.ACTIVITY_OBJECT = ObjectParseMode("ObjectParseMode.ACTIVITY_OBJECT")
ATOM_PREFIX = "{http://www.w3.org/2005/Atom}"
ACTIVITY_PREFIX = "{http://activitystrea.ms/spec/1.0/}"
MEDIA_PREFIX = "{http://purl.org/syndication/atommedia}"
ATOM_FEED = ATOM_PREFIX + "feed"
ATOM_ENTRY = ATOM_PREFIX + "entry"
ATOM_ID = ATOM_PREFIX + "id"
ATOM_AUTHOR = ATOM_PREFIX + "author"
ATOM_SOURCE = ATOM_PREFIX + "source"
ATOM_TITLE = ATOM_PREFIX + "title"
ATOM_SUMMARY = ATOM_PREFIX + "summary"
ATOM_CONTENT = ATOM_PREFIX + "content"
ATOM_LINK = ATOM_PREFIX + "link"
ATOM_PUBLISHED = ATOM_PREFIX + "published"
ATOM_NAME = ATOM_PREFIX + "name"
ATOM_URI = ATOM_PREFIX + "uri"
ATOM_GENERATOR = ATOM_PREFIX + "generator"
ATOM_ICON = ATOM_PREFIX + "icon"
ACTIVITY_SUBJECT = ACTIVITY_PREFIX + "subject"
ACTIVITY_OBJECT = ACTIVITY_PREFIX + "object"
ACTIVITY_OBJECT_TYPE = ACTIVITY_PREFIX + "object-type"
ACTIVITY_VERB = ACTIVITY_PREFIX + "verb"
ACTIVITY_TARGET = ACTIVITY_PREFIX + "target"
ACTIVITY_ACTOR = ACTIVITY_PREFIX + "actor"
POST_VERB = "http://activitystrea.ms/schema/1.0/post"
MEDIA_WIDTH = MEDIA_PREFIX + "width"
MEDIA_HEIGHT = MEDIA_PREFIX + "height"
MEDIA_DURATION = MEDIA_PREFIX + "duration"
MEDIA_DESCRIPTION = MEDIA_PREFIX + "description"
def make_activities_from_feed(et):
feed_elem = et.getroot()
entry_elems = feed_elem.findall(ATOM_ENTRY)
activities = []
for entry_elem in entry_elems:
activities.extend(make_activities_from_entry(entry_elem, feed_elem))
return activities
def make_activities_from_entry(entry_elem, feed_elem):
object_elems = entry_elem.findall(ACTIVITY_OBJECT)
activity_is_implied = False
if len(object_elems) == 0:
# Implied activity, so the entry itself represents the object.
activity_is_implied = True
object_elems = [ entry_elem ]
author_elem = entry_elem.find(ATOM_AUTHOR)
if author_elem is None:
source_elem = entry_elem.find(ATOM_SOURCE)
if source_elem is not None:
author_elem = source_elem.find(ATOM_AUTHOR)
if author_elem is None:
author_elem = feed_elem.find(ATOM_AUTHOR)
target_elem = entry_elem.find(ACTIVITY_TARGET)
published_elem = entry_elem.find(ATOM_PUBLISHED)
published_datetime = None
if published_elem is not None:
published_w3cdtf = published_elem.text
published_datetime = _parse_date_w3cdtf(published_w3cdtf)
verb_elem = entry_elem.find(ACTIVITY_VERB)
verb = None
if verb_elem is not None:
verb = verb_elem.text
else:
verb = POST_VERB
generator_elem = entry_elem.find(ATOM_GENERATOR)
icon_url = None
icon_elem = entry_elem.find(ATOM_ICON)
if icon_elem is not None:
icon_url = icon_elem.text
target = None
if target_elem:
target = make_object_from_elem(target_elem, feed_elem, ObjectParseMode.ACTIVITY_OBJECT)
actor = None
if author_elem:
actor = make_object_from_elem(author_elem, feed_elem, ObjectParseMode.ATOM_AUTHOR)
activities = []
for object_elem in object_elems:
if activity_is_implied:
object = make_object_from_elem(object_elem, feed_elem, ObjectParseMode.ATOM_ENTRY)
else:
object = make_object_from_elem(object_elem, feed_elem, ObjectParseMode.ACTIVITY_OBJECT)
activity = Activity(object=object, actor=actor, target=target, verb=verb, time=published_datetime, icon_url=icon_url)
activities.append(activity)
return activities
def make_object_from_elem(object_elem, feed_elem, mode):
id = None
id_elem = object_elem.find(ATOM_ID)
if id_elem is not None:
id = id_elem.text
summary = None
summary_elem = object_elem.find(ATOM_SUMMARY)
if summary_elem is not None:
summary = summary_elem.text
name_tag_name = ATOM_TITLE
# The ATOM_AUT
|
HOR parsing mode looks in atom:name instead of atom:title
if mode == ObjectParseMode.ATOM_AUTHOR:
name_tag_name = ATOM_NAME
name = None
name_elem = object_elem.find(name_tag_name)
if name_elem is not None
|
:
name = name_elem.text
url = None
image = None
for link_elem in object_elem.findall(ATOM_LINK):
type = link_elem.get("type")
rel = link_elem.get("rel")
if rel is None or rel == "alternate":
if type is None or type == "text/html":
url = link_elem.get("href")
if rel == "preview":
if type is None or type == "image/jpeg" or type == "image/gif" or type == "image/png":
# FIXME: Should pull out the width/height/duration attributes from AtomMedia too.
image = MediaLink(url=link_elem.get("href"))
# In the atom:author parse mode we fall back on atom:uri if there's no link rel="alternate"
if url is None and mode == ObjectParseMode.ATOM_AUTHOR:
uri_elem = object_elem.find(ATOM_URI)
if uri_elem is not None:
url = uri_elem.text
object_type_elem = object_elem.find(ACTIVITY_OBJECT_TYPE)
object_type = None
if object_type_elem is not None:
object_type = object_type_elem.text
return Object(id=id, name=name, url=url, object_type=object_type, image=image, summary=summary)
# This is pilfered from Universal Feed Parser.
def _parse_date_w3cdtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(float(seconds))
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
|
ojii/ircbotframework
|
ircbotframework/app.py
|
Python
|
bsd-3-clause
| 719
| 0.005563
|
# -*- coding: utf-8 -*-
from flask import request
class View(object):
def __init__(self, core):
self.core = core
def __call__(self, *args, **kwargs):
method = request.method.lower()
handler = getattr(self, method, None)
if callable(handler):
return handler(request=request, *args, **kwargs)
else:
return "Bad Request", 403
class Application(object):
def __init_
|
_(self, core):
self.core = core
def
|
get_urls(self):
"""
Returns a list of tuples: (route, View)
"""
return []
def get_plugins(self):
"""
Returns a list of plugin classes
"""
return []
|
YzPaul3/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_bernoulli_synthetic_data_GBM_medium.py
|
Python
|
apache-2.0
| 3,234
| 0.009895
|
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o import H2OFrame
import numpy as np
import numpy.random
import scipy.stats
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def bernoulli_synthetic_data_gbm_medium():
# Generate training dataset (adaptation of http://www.stat.missouri.edu/~speckman/stat461/boost.R)
train_rows = 10000
train_cols = 10
# Generate variables V1, ... V10
X_train = np.random.randn(train_rows, train_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_train = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_train,X_train).tolist()]])
# Train scikit gbm
# TODO: grid-search
distribution = "bernoulli"
ntrees = 150
min_rows = 1
max_depth = 2
learn_rate = .01
nbins = 20
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(X_train,y_train)
# Generate testing dataset
test_rows = 2000
test_cols = 10
# Generate variables V1, ... V10
X_test = np.random.randn(test_rows, test_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_test = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_test,X_test).tolist()]])
# Score (AUC) the scikit gbm model on the test data
auc_sci = roc_auc_score(y_test, gbm_sci.predict_proba(X_test)[:,1])
# Compare this result to H2O
xtrain = np.transpose(X_train).tolist()
ytrain = y_train.tolist()
xtest = np.transpose(X_test).tolist()
ytest = y_test.tolist()
train_h2o = H2OFrame(list(zip(*[ytrain]+xtrain)))
test_h2o = H2OFrame(list(zip(*[ytest]+xtest)
|
))
train_h2o["C1"] = train_h2o["C1"].asfactor()
test_h2o["C1"] = test_h2o["C1"].asfactor()
gbm_h2o = H2OGradientBoostingEsti
|
mator(distribution=distribution,
ntrees=ntrees,
min_rows=min_rows,
max_depth=max_depth,
learn_rate=learn_rate,
nbins=nbins)
gbm_h2o.train(x=list(range(1,train_h2o.ncol)), y="C1", training_frame=train_h2o)
gbm_perf = gbm_h2o.model_performance(test_h2o)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert abs(auc_h2o - auc_sci) < 1e-2, "h2o (auc) performance degradation, with respect to scikit. h2o auc: {0} " \
"scickit auc: {1}".format(auc_h2o, auc_sci)
if __name__ == "__main__":
pyunit_utils.standalone_test(bernoulli_synthetic_data_gbm_medium)
else:
bernoulli_synthetic_data_gbm_medium()
|
GabrielNicolasAvellaneda/dd-agent
|
checks.d/supervisord.py
|
Python
|
bsd-3-clause
| 6,841
| 0.000877
|
# stdlib
from collections import defaultdict
import itertools
import re
import socket
import time
import xmlrpclib
# 3p
import supervisor.xmlrpc
# project
from checks import AgentCheck
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = '9001'
DEFAULT_SOCKET_IP = 'http://127.0.0.1'
DD_STATUS = {
'STOPPED': AgentCheck.CRITICAL,
'STARTING': AgentCheck.UNKNOWN,
'RUNNING': AgentCheck.OK,
'BACKOFF': AgentCheck.CRITICAL,
'STOPPING': AgentCheck.CRITICAL,
'EXITED': AgentCheck.CRITICAL,
'FATAL': AgentCheck.CRITICAL,
'UNKNOWN': AgentCheck.UNKNOWN
}
PROCESS_STATUS = {
AgentCheck.CRITICAL: 'down',
AgentCheck.OK: 'up',
AgentCheck.UNKNOWN: 'unknown'
}
SERVER_TAG = 'supervisord_server'
PROCESS_TAG = 'supervisord_process'
FORMAT_TIME = lambda x: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x))
SERVER_SERVICE_CHECK = 'supervisord.can_connect'
PROCESS_SERVICE_CHECK = 'supervisord.process.status'
class SupervisordCheck(AgentCheck):
def check(self, instance):
server_name = instance.get('name')
if not server_name or not server_name.strip():
raise Exception("Supervisor server name not specified in yaml configuration.")
server_service_check_tags = ['%s:%s' % (SERVER_TAG, server_name)]
supe = self._connect(instance)
count_by_status = defaultdict(int)
# Gather all process information
try:
processes = supe.getAllProcessInfo()
except xmlrpclib.Fault, error:
raise Exception(
'An error occurred while reading process information: %s %s'
% (error.faultCode, error.faultString)
)
except socket.error, e:
host = instance.get('host', DEFAULT_HOST)
port = instance.get('port', DEFAULT_PORT)
sock = instance.get('socket')
if sock is None:
msg = 'Cannot connect to http://%s:%s. ' \
'Make sure supervisor is running and XML-RPC ' \
'inet interface is enabled.' % (host, port)
else:
msg = 'Cannot connect to %s. Make sure sure supervisor ' \
'is running and socket is enabled and socket file' \
' has the right permissions.' % sock
self.service_check(SERVER_SERVICE_CHECK, AgentCheck.CRITICAL,
tags=server_service_check_tags,
message=msg)
raise Exception(msg)
except xmlrpclib.ProtocolError, e:
if e.errcode == 401: # authorization error
msg = 'Username or password to %s are incorrect.' % server_name
else:
msg = "An error occurred while connecting to %s: "\
"%s %s " % (server_name, e.errcode, e.errmsg)
self.service_check(SERVER_SERVICE_CHECK, AgentCheck.CRITICAL,
tags=server_service_check_tags,
message=msg)
raise Exception(msg)
# If we're here, we were able to connect to the server
self.service_check(SERVER_SERVICE_CHECK, AgentCheck.OK,
tags=server_service_check_tags)
# Filter monitored processes on configuration directives
proc_regex = instance.get('proc_regex', [])
if not isinstance(proc_regex, list):
raise Exception("Empty or invalid proc_regex.")
proc_names = instance.get('proc_names', [])
if not isinstance(proc_names, list):
raise Exception("Empty or invalid proc_names.")
# Collect information on each monitored process
monitored_processes = []
# monitor all processes if no filters were specified
if len(proc_regex) == 0 and len(proc_names) == 0:
monitored_processes = processes
for pattern, process in itertools.product(proc_regex, processes):
if re.match(pattern, process['name']) and process not in monitored_processes:
monitored_processes.append(process)
for process in processes:
if process['name'] in proc_names and process not in monitored_processes:
monitored_processes.append(process)
# Report service checks and uptime for each process
for proc in monitored_processes:
proc_name = proc['name']
tags = ['%s:%s' % (SERVER_TAG, server_name),
|
'%s:%s' % (PROCESS_TAG, proc_name)]
# Report Service Check
status = DD_STATUS[proc['statename']]
msg = self._build_message(proc)
count_by_status[status] += 1
self.service_check(PROCESS_SERVICE_CHECK,
status, tags=tags, message=msg)
# Report Uptime
u
|
ptime = self._extract_uptime(proc)
self.gauge('supervisord.process.uptime', uptime, tags=tags)
# Report counts by status
tags = ['%s:%s' % (SERVER_TAG, server_name)]
for status in PROCESS_STATUS:
self.gauge('supervisord.process.count', count_by_status[status],
tags=tags + ['status:%s' % PROCESS_STATUS[status]])
@staticmethod
def _connect(instance):
sock = instance.get('socket')
if sock is not None:
host = instance.get('host', DEFAULT_SOCKET_IP)
transport = supervisor.xmlrpc.SupervisorTransport(None, None, sock)
server = xmlrpclib.ServerProxy(host, transport=transport)
else:
host = instance.get('host', DEFAULT_HOST)
port = instance.get('port', DEFAULT_PORT)
user = instance.get('user')
password = instance.get('pass')
auth = '%s:%s@' % (user, password) if user and password else ''
server = xmlrpclib.Server('http://%s%s:%s/RPC2' % (auth, host, port))
return server.supervisor
@staticmethod
def _extract_uptime(proc):
start, now = int(proc['start']), int(proc['now'])
status = proc['statename']
active_state = status in ['BACKOFF', 'RUNNING', 'STOPPING']
return now - start if active_state else 0
@staticmethod
def _build_message(proc):
start, stop, now = int(proc['start']), int(proc['stop']), int(proc['now'])
proc['now_str'] = FORMAT_TIME(now)
proc['start_str'] = FORMAT_TIME(start)
proc['stop_str'] = '' if stop == 0 else FORMAT_TIME(stop)
return """Current time: %(now_str)s
Process name: %(name)s
Process group: %(group)s
Description: %(description)s
Error log file: %(stderr_logfile)s
Stdout log file: %(stdout_logfile)s
Log file: %(logfile)s
State: %(statename)s
Start time: %(start_str)s
Stop time: %(stop_str)s
Exit Status: %(exitstatus)s""" % proc
|
philipdexter/rain
|
rain/module.py
|
Python
|
mit
| 12,253
| 0.014037
|
from . import ast as A
from . import runtime
from . import scope as S
from . import static
from . import token as K
from . import types as T
from contextlib import contextmanager
from llvmlite import binding
from llvmlite import ir
from os.path import isdir, isfile
from os.path import join
import os.path
import re
name_chars = re.compile('[^a-z0-9]')
TRACE_MAIN = -1
TRACE_INIT = -2
TRACE_UNKNOWN = -3
# get default paths
def get_paths():
path = os.environ['RAINPATH'].split(':') if 'RAINPATH' in os.environ else []
core = [os.environ['RAINBASE'], os.environ['RAINLIB']]
return path + core
# normalize a name - remove all special characters and cases
def normalize_name(name):
return name_chars.sub('', name.lower())
# find a rain file from a module identifier
def find_rain(src, paths=[]):
if src[0] == '/':
paths = ['']
elif src[0] != '.':
paths = get_paths() + paths
for path in paths:
if isfile(join(path, src) + '.rn'):
return join(path, src) + '.rn'
elif isfile(join(path, src)) and src.endswith('.rn'):
return join(path, src)
elif isdir(join(path, src)) and isfile(join(path, src, '_pkg.rn')):
return join(path, src, '_pkg.rn')
# find any file from a string
def find_file(src, paths=[]):
if src[0] == '/':
paths = ['']
elif src[0] != '.':
paths = paths + get_paths()
for path in paths:
if os.path.isfile(join(path, src)):
return join(path, src)
# find a module name
def find_name(src):
path = os.path.abspath(src)
path, name = os.path.split(path)
fname, ext = os.path.splitext(name)
if fname == '_pkg':
_, fname = os.path.split(path)
mname = normalize_name(fname)
proot = []
while path and os.path.isfile(join(path, '_pkg.rn')):
path, name = os.path.split(path)
proot.insert(0, normalize_name(name))
if not src.endswith('_pkg.rn'):
proot.append(mname)
qname = '.'.join(proot)
return (qname, mname)
class Module(S.Scope):
@staticmethod
def dekey(key):
if isinstance(key, (A.name_node, A.str_node)):
key = key.value
if isinstance(key, (K.name_token, K
|
.string_token)):
key = key.value
return normalize_name(key)
def __init__(self, file=None, name=None):
S.Scope.__init__(self)
if name:
self.qname = self.mname = name
els
|
e:
self.file = file
self.qname, self.mname = find_name(self.file)
self.llvm = ir.Module(name=self.qname)
self.llvm.triple = binding.get_default_triple()
self.imports = set()
self.links = set()
self.libs = set()
self.runtime = runtime.Runtime(self)
self.static = static.Static(self)
self.runtime.declare()
typ = T.arr(T.i8, len(self.qname) + 1)
ptr = self.add_global(typ, name=self.mangle('_name'))
ptr.initializer = typ(bytearray(self.qname + '\0', 'utf-8'))
self.name_ptr = ptr.gep([T.i32(0), T.i32(0)])
self.builder = None
self.arg_ptrs = None
self.landingpad = None
self.before = None
self.loop = None
self.after = None
self.ret_ptr = None
self.name_counter = 0
def __str__(self):
return 'Module {!r}'.format(self.qname)
def __repr__(self):
return '<{!s}>'.format(self)
def __getitem__(self, key):
return super().__getitem__(self.dekey(key))
def __setitem__(self, key, val):
super().__setitem__(self.dekey(key), val)
def __contains__(self, key):
return super().__contains__(self.dekey(key))
# wrapper to emit IR for a node
def emit(self, node):
return node.emit(self)
@property
def ir(self):
return str(self.llvm)
@property
def is_global(self):
return (not self.builder)
@property
def is_local(self):
return bool(self.builder)
# save and restore some module attributes around a code block
@contextmanager
def stack(self, *attrs):
saved = [getattr(self, attr) for attr in attrs]
yield
for attr, val in zip(attrs, saved):
setattr(self, attr, val)
# mangle a name
def mangle(self, name):
return self.qname + '.' + name
# generate a unique name
def uniq(self, name):
ret = self.mangle('{}.{}'.format(name, self.name_counter))
self.name_counter += 1
return ret
# Global helpers ############################################################
# add a new function
def add_func(self, typ, name=None):
if not name:
name = self.uniq('func')
return ir.Function(self.llvm, typ, name=name)
# add or get an existing function
def find_func(self, typ, name):
if name in self.llvm.globals:
return self.llvm.get_global(name)
return self.add_func(typ, name=name)
# add a new global
def add_global(self, typ, name=None):
if not name:
name = self.uniq('glob')
return ir.GlobalVariable(self.llvm, typ, name=name)
def get_global(self, name):
return self.llvm.get_global(name)
# add or get an existing global
def find_global(self, typ, name):
if name in self.llvm.globals:
return self.llvm.get_global(name)
return self.add_global(typ, name=name)
# import globals from another module
def import_llvm(self, other):
for val in other.llvm.global_values:
if val.name in self.llvm.globals:
continue
if isinstance(val, ir.Function):
ir.Function(self.llvm, val.ftype, name=val.name)
else:
g = ir.GlobalVariable(self.llvm, val.type.pointee, name=val.name)
g.linkage = 'available_externally'
g.initializer = val.initializer
# import the scope from other modules
def import_scope(self, other):
for name, val in other.globals.items():
self[name] = val
# Block helpers #############################################################
@contextmanager
def add_builder(self, block):
with self.stack('builder'):
self.builder = ir.IRBuilder(block)
yield
@contextmanager
def add_func_body(self, func):
with self.stack('ret_ptr', 'arg_ptrs', 'landingpad'):
entry = func.append_basic_block('entry')
body = func.append_basic_block('body')
self.ret_ptr = func.args[0]
self.arg_ptrs = []
self.landingpad = None
with self.add_builder(entry):
self.builder.branch(body)
with self.add_builder(body):
yield
@contextmanager
def add_loop(self):
with self.stack('before', 'loop', 'after'):
self.before = self.builder.append_basic_block('before')
self.loop = self.builder.append_basic_block('loop')
self.after = self.builder.append_basic_block('after')
self.builder.branch(self.before)
yield
self.builder.position_at_end(self.after)
@contextmanager
def add_catch(self):
with self.stack('landingpad'):
self.landingpad = self.builder.append_basic_block('catch')
yield
def catch(self, branch, into=None):
with self.goto(self.landingpad):
lp = self.builder.landingpad(T.lp)
lp.add_clause(ir.CatchClause(T.ptr(T.i8)(None)))
if into:
self.runtime.catch(into)
else:
self.runtime.abort()
self.builder.branch(branch)
@contextmanager
def goto(self, block):
with self.builder.goto_block(block):
yield
@contextmanager
def goto_entry(self):
with self.builder.goto_entry_block():
yield
@contextmanager
def trace(self, pos, mod=None):
label = mod or self.name_ptr
line, col = TRACE_UNKNOWN, TRACE_UNKNOWN
if pos:
line, col = pos.line, pos.col
self.builder.call(self.runtime['push'], (label, T.i32(line), T.i32(col)))
yield
self.builder.call(self.runtime['pop'], ())
# Box helpers ###############################################################
def get_type(self, box):
return self.extract(box, T.TYPE)
def get_value(self, box, typ=None):
val = self.extract(box, T.DATA)
if isinstance(typ, T.func):
return self.builder.inttoptr(val, T.ptr(typ))
return val
def get_size(self, box):
return self.extract(box, T.SIZE)
def get_env(self, box):
return self.extract(box, T.ENV)
def get_vt(self, name):
return self.find_global(T.box, 'core.types.' + name)
def load_exception(self, name):
glob = self.find_global(T.ptr(T.
|
mattduan/proof
|
mapper/DatabaseMap.py
|
Python
|
bsd-3-clause
| 3,615
| 0.008852
|
"""
DatabaseMap is used to model a database.
"""
__version__= '$Revision: 3194 $'[11:-2]
__author__ = "Duan Guoqiang (mattgduan@gmail.com)"
import string
import proof.pk.IDMethod as IDMethod
import proof.pk.generator.IDBroker as IDBroker
import proof.mapper.TableMap as TableMap
class DatabaseMap:
def __init__(self, name):
""" Constructor.
@param name Name of the database.
"""
# Name of the database.
self.__name = name
# Name of the tables in the database.
self.__tables = {}
# A special table used to generate primary keys for the other
# tables.
self.__idTable = None
# The IDBroker that goes with the idTable.
self.__idBroker = None
# The IdGenerators, keyed by type of idMethod.
self.__idGenerators = {}
def containsTable(self, table):
""" Does this database contain this specific table?
@param table The TableMap/String representation of the table.
@return True if the database contains the table.
"""
if isinstance( table, TableMap.TableMap ):
table = table.getName()
if string.find( table, "." ) > 0:
table, column = string.split(table, ".", 1)
return self.__tables.has_key(table)
def getIdTable(self):
""" Get the ID table for this database.
@return A TableMap.
"""
return self.__idTable
def getIDBroker(self):
""" Get the IDBroker for this database.
@return An IDBroker.
"""
return self.__idBroker
def getName(self):
""" Get the name of this database.
@return A String.
"""
return self.__name
def getTable(self, name):
""" Get a TableMap for the table by name.
@param name Name of the table.
@return A TableMap, null if the table was not found.
"""
return self.__tables.get(name)
def getTables(self):
""" Get all of the tables in the database.
"""
return self.__tables.values()
de
|
f addTable(self, table):
""" Add a new TableMap to the database.
@param map The Name/TableMap representation.
"""
if not isinstance( table, TableMap.TableMap ):
table = TableMap.TableMap( table, self )
self.__tables[table.getName()] = table
def setIdTable(self, idTable):
""" Set the ID table for this database.
@param idTable The Name/TableMap representation for the ID table.
"""
if n
|
ot isinstance( idTable, TableMap.TableMap ):
idTable = TableMap.TableMap( idTable, self )
self.__idTable = idTable
self.addTable(idTable)
idBroker = IDBroker.IDBroker(idTable)
self.addIdGenerator(IDMethod.ID_BROKER, idBroker)
def addIdGenerator(self, idType, idGen):
""" Add a type of id generator for access by a TableMap.
@param type a <code>IDMethod</code> value
@param idGen an <code>IdGenerator</code> value
"""
if idType in IDMethod.VALID_ID_METHODS:
self.__idGenerators[ idType ] = idGen
def getIdGenerator(self, idType):
""" Get a type of id generator. Valid values are listed in the
IDMethod.
@param type a <code>IDMethod</code> value
@return an <code>IdGenerator</code> value
"""
return self.__idGenerators.get(idType)
|
vecnet/om
|
website/tests/test_views.py
|
Python
|
mpl-2.0
| 874
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet OpenMalaria Portal.
# For copyright and licensing information about this package, see the
# NOTI
|
CE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obt
|
ain one at http://mozilla.org/MPL/2.0/.
from django.test.testcases import TestCase
from django.urls.base import reverse
class Http500Test(TestCase):
def test(self):
url = reverse("test_http_code_500")
self.assertRaises(RuntimeError, self.client.get, url)
class IndexViewTest(TestCase):
def test(self):
response = self.client.get(reverse("index"))
self.assertEqual(response.status_code, 200)
|
missionpinball/mpf-mc
|
mpfmc/tests/test_Animation.py
|
Python
|
mit
| 18,424
| 0.000326
|
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
class TestAnimation(MpfMcTestCase):
def get_machine_path(self):
return 'tests/machine_files/animation'
def get_config_file(self):
return 'test_animation.yaml'
def test_animation_config_processing(self):
# The animation sections are more complex than regular sections,
# so they do some more complex pre-processing before they go to the
# config file validator.
# slide def, list of animations
s1w0 = self.mc.slides['slide1']['widgets'][0]['animations']
self.assertIs(type(s1w0['show_slide']), list)
self.assertEqual(len(s1w0['show_slide']), 2)
self.assertIs(type(s1w0['show_slide'][0]), dict)
self.assertIs(type(s1w0['show_slide'][1]), dict)
self.assertEqual(s1w0['show_slide'][0]['value'], ['101'])
self.assertEqual(s1w0['show_slide'][1]['value'], ['100'])
# slide def, single dict animation
s2w0 = self.mc.slides['slide2']['widgets'][0]['animations']
self.assertIs(type(s2w0['entrance2']), list)
self.assertEqual(len(s2w0['entrance2']), 1)
self.assertIs(type(s2w0['entrance2'][0]), dict)
self.assertEqual(s2w0['entrance2'][0]['value'], ['0' ,'0'])
self.assertEqual(s2w0['entrance2'][0]['property'], ['x', 'y'])
# slide def, 1 event, list of 2 named animations
s3w0 = self.mc.slides['slide3']['widgets'][0]['animations']
self.assertIs(type(s3w0['entrance3']), list)
self.assertEqual(len(s3w0['entrance3']), 2)
self.assertIs(type(s3w0['entrance3'][0]), dict)
self.assertIs(type(s3w0['entrance3'][1]), dict)
self.assertEqual
|
(s3w0['entrance3'][0]['named_animation'], 'fade_in')
self.assertEqual(s3w0[
|
'entrance3'][1]['named_animation'], 'multi')
# slide def, 2 events, list of named animations
s4w0 = self.mc.slides['slide4']['widgets'][0]['animations']
self.assertIs(type(s4w0['entrance4']), list)
self.assertEqual(len(s4w0['entrance4']), 2)
self.assertIs(type(s4w0['entrance4'][0]), dict)
self.assertIs(type(s4w0['entrance4'][1]), dict)
self.assertEqual(s4w0['entrance4'][0]['named_animation'], 'fade_in')
self.assertEqual(s4w0['entrance4'][1]['named_animation'], 'multi')
self.assertIs(type(s4w0['some_event4']), list)
self.assertEqual(len(s4w0['some_event4']), 1)
self.assertIs(type(s4w0['some_event4'][0]), dict)
self.assertEqual(s4w0['some_event4'][0]['named_animation'], 'multi')
# slide def, 2 events, 1 named animation, 1 dict
s5w0 = self.mc.slides['slide5']['widgets'][0]['animations']
self.assertIs(type(s5w0['entrance5']), list)
self.assertEqual(len(s5w0['entrance5']), 2)
self.assertIs(type(s5w0['entrance5'][0]), dict)
self.assertIs(type(s5w0['entrance5'][1]), dict)
self.assertEqual(s5w0['entrance5'][0]['named_animation'], 'fade_in')
self.assertEqual(s5w0['entrance5'][1]['named_animation'], 'multi')
self.assertIs(type(s5w0['event5']), list)
self.assertEqual(len(s5w0['event5']), 1)
self.assertIs(type(s5w0['event5'][0]), dict)
self.assertEqual(s5w0['event5'][0]['value'], ['98'])
# slide with 1 widget with no animations
self.assertIn('animations', self.mc.slides['slide6']['widgets'][0])
self.assertIsNone(self.mc.slides['slide6']['widgets'][0]['animations'])
# Move on to test the named animations section
self.assertEqual(len(self.mc.animations), 6)
# single animation, dict
self.assertIs(type(self.mc.animations['fade_in']), list)
self.assertEqual(len(self.mc.animations['fade_in']), 1)
self.assertIs(type(self.mc.animations['fade_in'][0]), dict)
self.assertEqual(self.mc.animations['fade_in'][0]['property'],
['opacity'])
self.assertEqual(self.mc.animations['fade_in'][0]['easing'],
'linear')
# two animations, list, with values as percent strings
self.assertIs(type(self.mc.animations['multi']), list)
self.assertEqual(len(self.mc.animations['multi']), 2)
self.assertIs(type(self.mc.animations['multi'][0]), dict)
self.assertEqual(self.mc.animations['multi'][0]['property'],
['y'])
self.assertEqual(self.mc.animations['multi'][0]['easing'],
'linear')
self.assertFalse(self.mc.animations['multi'][0]['relative'])
self.assertIs(type(self.mc.animations['multi'][1]), dict)
self.assertEqual(self.mc.animations['multi'][1]['property'],
['x'])
self.assertEqual(self.mc.animations['multi'][1]['easing'],
'linear')
self.assertFalse(self.mc.animations['multi'][1]['relative'])
self.assertIs(type(self.mc.animations['advance_x_50']), list)
self.assertEqual(len(self.mc.animations['advance_x_50']), 1)
self.assertIs(type(self.mc.animations['advance_x_50'][0]), dict)
self.assertEqual(self.mc.animations['advance_x_50'][0]['property'],
['x'])
self.assertEqual(self.mc.animations['advance_x_50'][0]['easing'],
'linear')
self.assertTrue(self.mc.animations['advance_x_50'][0]['relative'])
self.assertIs(type(self.mc.animations['advance_y_50']), list)
self.assertEqual(len(self.mc.animations['advance_y_50']), 1)
self.assertIs(type(self.mc.animations['advance_y_50'][0]), dict)
self.assertEqual(self.mc.animations['advance_y_50'][0]['property'],
['y'])
self.assertEqual(self.mc.animations['advance_y_50'][0]['easing'],
'linear')
self.assertTrue(self.mc.animations['advance_y_50'][0]['relative'])
self.assertIs(type(self.mc.animations['advance_xy_50']), list)
self.assertEqual(len(self.mc.animations['advance_xy_50']), 1)
self.assertIs(type(self.mc.animations['advance_xy_50'][0]), dict)
self.assertIs(type(self.mc.animations['advance_xy_50'][0]['property']), list)
self.assertIn('x', self.mc.animations['advance_xy_50'][0]['property'])
self.assertIn('y', self.mc.animations['advance_xy_50'][0]['property'])
self.assertEqual(self.mc.animations['advance_xy_50'][0]['easing'],
'linear')
self.assertTrue(self.mc.animations['advance_xy_50'][0]['relative'])
def test_reset_animations_pre_show_slide(self):
self.mc.events.post('show_slide13')
self.advance_time(.1)
widget = self.mc.active_slides['slide13'].children[0].children[0]
self.assertAlmostEqual(-140, widget.anchor_offset_pos[0], delta=20)
self.assertEqual(100, widget.x)
self.advance_time(.5)
self.assertAlmostEqual(150, widget.x, delta=20)
self.advance_time(.5)
self.assertEqual(200.0, widget.x)
self.mc.events.post('show_base_slide')
self.advance_time()
# animations should start from orig position
self.mc.events.post('show_slide13')
self.advance_time(.1)
# refetch widget because this is another slide instance
widget = self.mc.active_slides['slide13'].children[0].children[0]
self.assertAlmostEqual(-140, widget.anchor_offset_pos[0], delta=20)
self.assertEqual(100, widget.x)
self.advance_time(.5)
self.assertAlmostEqual(150, widget.x, delta=20)
self.advance_time(.5)
self.assertEqual(200.0, widget.x)
def test_reset_animations_slide_play(self):
self.mc.events.post('show_slide14')
self.advance_time(.1)
widget = self.mc.active_slides['slide14'].children[0].children[0]
self.assertAlmostEqual(-120, widget.anchor_offset_pos[0], delta=20)
self.assertEqual(100, widget.x)
self.advance_time(.5)
self.assertAlmostEqual(150, widget.x, delta=20)
self.advance_time(.5)
self.assertEqual(200.0, widget.x)
self.mc.events.post('show_base_sli
|
stackforge/python-senlinclient
|
senlinclient/tests/unit/v1/test_cluster_policy.py
|
Python
|
apache-2.0
| 4,136
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from senlinclient.tests.unit.v1 import fakes
from senlinclient.v1 import cluster_policy as osc_cluster_policy
class TestClusterPolicy(fakes.TestClusteringv1):
def setUp(self):
super(TestClusterPolicy, self).setUp()
self.mock_client = self.app.client_manager.clustering
class TestClusterPolicyList(TestClusterPolicy):
def setUp(self):
super(TestClusterPolicyList, self).setUp()
self.cmd = osc_cluster_policy.ClusterPolicyList(self.app, None)
fake_cluster = mock.Mock(id='C1')
self.mock_client.get_cluster = mock.Mock(return_value=fake_cluster)
fake_binding = mock.Mock(
cluster_id="7d85f602-a948-4a30-afd4-e84f47471c15",
cluster_name="my_cluster",
is_enabled=True,
id="06be3a1f-b238-4a96-a737-ceec5714087e",
policy_id="714fe676-a08f-4196-b7af-61d52eeded15",
policy_name="my_policy",
policy_type="senlin.policy.deletion-1.0"
)
fake_binding.to_dict = mock.Mock(return_value={})
self.mock_client.cluster_policies = mock.Mock(
return_value=[fake_binding])
def test_cluster_policy_list(self):
arglist = ['--sort', 'name:asc', '--filter', 'name=my_policy',
'my_cluster', '--full-id']
parsed_args = self.check_parser(self.cmd, arglist, [])
expected_columns = ['policy_id', 'policy_name', 'policy_type',
'is_enabled']
columns, data = self.cmd.take_action(parsed_args)
self.mock_client.get_cluster.assert_called_with('my_cluster')
self.mock_client.cluster_policies.assert_called_with(
'C1', name="my_policy", sort="name:asc")
self.assertEqual(expected_columns, columns)
class TestClusterPolicyShow(TestClusterPolicy):
def setUp(self):
super(TestClusterPolicyShow, self).setUp()
self.cmd = osc_cluster_policy.ClusterPolicyShow(self.app, None)
fake_binding = mock.Mock(
cluster_id="7d85f602-a948-4a30-afd4-e84f47471c15",
cluster_name="my_cluster",
is_enabled=True,
id="06be3a1f-b238-4a96-a737-ceec5714087e",
policy_id="714fe676-a08f-4196-b7af-61d52eeded15",
policy_name="my_policy",
policy_type="senlin.policy.deletion-1.0"
)
fake_binding.to_dict = mock.Mock(return_value={})
self.mock_client.get_cluster_policy = mock.Mock(
return_value=fake_binding)
def test_cluster_policy_show(self):
arglist = ['--policy', 'my_policy', 'my_cluster']
parsed_args = self.check_parser(self.cmd, arglist, [])
self.cmd.take_action(parsed_args)
self.mock_client.get_cluster_policy.assert_called_with('my_policy',
'my_cluster')
class TestClusterPolicyUpdate(TestClusterPolicy):
def setUp(self):
super(TestClusterPolicyUpdate, self).setUp()
self.cmd = osc_cluster_policy.ClusterPolicyUpdate(self.app, None)
fake_resp = {"action": "8bb476c3-0f4c-44ee-9f64-c7b0260814de"}
self.mock_client.update_cluster_policy = moc
|
k.Mock(
return_value=fake_resp)
def test_cluster_policy_update(self):
arglist = ['--policy', 'my_policy', '--enabled', 'true', 'my_cluster']
parsed_args = self.check_parser(self.
|
cmd, arglist, [])
self.cmd.take_action(parsed_args)
self.mock_client.update_cluster_policy.assert_called_with(
'my_cluster', 'my_policy', enabled=True)
|
SanaMobile/sana.mds
|
src/mds/api/v1/v2compatlib.py
|
Python
|
bsd-3-clause
| 14,907
| 0.012075
|
""" Utilities for transforming from the 1.x to other versions.
:Authors: Sana dev team
:Version: 1.1
"""
import logging
from uuid import UUID
import re
import cjson as _json
import shutil, os
from django.contrib.auth.models import User, UserManager
from django.views.generic import RedirectView
#from django.views.generic import redirect_to
from xml.etree import ElementTree
from xml.etree.ElementTree import parse
from mds.core import models as v2
_deprecated = ('patientEnrolled',
"patientId",
"patientGender",
'patientFirstName',
'patientLastName',
'patientBirthdateDay',
'patientBirthdateMonth',
'patientBirthdateYear',
"patientIdNew",
"patientGenderNew",
'patientFirstNameNew',
'patientLastNameNew',
'patientBirthdateDayNew',
'patientBirthdateMonthNew',
'patientBirthdateYearNew',)
""" Deprecated terms not used within observations """
LCOMPLEX_TYPES = { 'PICTURE': 'image/jpeg',
'SOUND': 'audio/3gpp',
'VIDEO': 'video/3gpp',
'BINARYFILE': 'application/octet-stream'}
def redirect_to_v1(request, url, query_string=True, **kwargs):
return redirect_to(request, url, query_string=query_string, **kwargs)
class V1RedirectView(RedirectView):
query_string = True
def strip_deprecated_observations(observations):
""" Removes old bio glomming in the observation dict
Parameters:
observations
A dictionary of observations.
"""
_obs = {}
#_obs = dict([(lambda x: (k,v) for k not in deprecated)(observations)])
for k,v in observations.items():
if k not in _deprecated:
_obs[k] = v
return _obs
def element2obs(obs, allow_null=False):
""" Remaps the old format to new api Observation model for a single
observation. Returns a dictionary, not an observation instance.
"""
# Should only be one
_obs = {}
#For now we require non-empty strings
if obs['answer']:
_obs['value'] = obs['answer']
else:
return {}
node = obs.keys()[0]
_obs['node'] = node
_obs['concept'] = obs['concept']
return _obs
def elements2obs(observations, allow_null=False):
""" Remaps the old format to new api Observation model. Returns only the
text dictionary, not the actual observations.
"""
_obs_set = {}
for k,v in observations.items():
_obs = {}
#For now we require non-empty strings
if v['answer']:
_obs['value'] = v['answer']
else:
continue
_obs['node'] = k
_obs['concept'] = v['concept']
logging.debug('Obs: %s' % _obs)
_obs_set[k] = _obs
return _obs_set
# TODO
def bchunk2bpacket(form):
""" Converts the old binary chunk packet form into the v2 api
"""
encounter = form.cleaned_data['procedure_guid']
node = form.cleaned_data['element_id']
subnode = form.cleaned_data['binary_guid']
node = '%s-%s'% (node, subnode)
type = form.cleaned_data['element_type']
size = form.cleaned_data['file_size']
offset = form.cleaned_data['byte_start']
byte_end = form.cleaned_data['byte_end']
return {}
class LProcedureParsable:
""" A new parsed legacy procedure backed by an ElementTree.
The default behavior of the constructor is to use the
text parameter as the xml if both text and xml are not None
Parameters
text
An xml string
path
The path to a file containing the xml to parse
"""
def __init__(self, text=None, path=None ):
self.root = None
self._parse(text=text, path=path)
def _parse(self, text=None, path=None):
if text:
self.root = ElementTree.XML(text)
elif path:
self.root = parse(path).getroot()
def __call__(self,text=None, path=None):
self._parse(text=text, path=path)
@property
def concepts(self):
def _map(x):
mime = LCOMPLEX_TYPES.get(x.attrib['type'], 'text/plain')
return { 'name' : x.attrib['concept'],
'description' : x.attrib['question'],
'is_complex' : (mime != 'text/plain'),
'data_type' : mime }
return list(_map(x) for x in self.root.findall('Page/Element'))
@property
def pages(self):
return list(x for x in self.root.findall('Page'))
@property
def elements(self):
return list(x.attrib for x in self.root.findall('Page/Element'))
def to_python(self):
''' Converts the parsed object to a dict '''
_p = {}
self._rdict(_p, self.root)
return _p
def _rdict(self, pdict, node, indent=0):
""" Recursive helper method for building the python object """
# append all of the children as 'tag': dict([*node.attrs, dict(children)
_current = node.attrib
for n in list(node):
_a = {}
self._append(_current, n, indent+4)
if n.tag in _current:
list(_current[n.tag]).append(_a)
else:
_current[n.tag] = _a
pdict[node.tag] = _current
lpp = LProcedureParsable
def responses_to_observations(encounter, responses,sort=False,reverse=False):
observations = []
logging.info("Converting %d observations" % len(responses))
# Begin loop over observations
for node,data in responses.items():
obs = None
concept_name = data.get('concept', None)
logging.info("" + node + ":"+concept_name)
if not concept_name:
logging.debug("No concept_name")
continue
try:
concept = v2.Concept.objects.get(name=concept_name)
except:
logging.error("Unable to find concept with name: %s" % concept_name)
continue
answer = data['answer']
uuid = data['uuid']
if concept and concept.is_complex:
logging.info("
|
Got complex concept: node: %s, answer: %s" % (node,answer))
if not answer or answer == "":
continue
answer, _, additional = answer.partition(',')
# Begin Complex obs loop
while True:
value_t
|
ext = "complex data"
try:
obs = v2.Observation.objects.get(
uuid=uuid)
obs.encounter=encounter
obs.node=node
obs.concept=concept
obs.value_text=value_text
obs.save()
logging.debug("Updated complex observation: %s" % obs.uuid)
except:
logging.info("Creating new complex obs for encounter: %s" % encounter.uuid)
obs = v2.Observation.objects.create(
uuid=data['uuid'],
encounter=encounter,
node=node,
concept=concept,
value_text=answer)
obs.save()
logging.debug("Created complex observation: %s" % obs.uuid)
observations.append(obs)
answer, _, additional = answer.partition(',') if answer else None
if not additional:
break
# End complex obs loop
else:
answer = answer if answer else 'no response'
try:
obs = v2.Observation.objects.get(
uuid=uuid)
obs.encounter=encounter
obs.node=node
obs.concept=concept
obs.value_text = answer
obs.save()
logging.debug("Updated observation: %s" % obs.uuid)
except:
logging.info("Creating new obs for encounter: %s" %
|
esdalmaijer/PyOpenGaze
|
example/example_pygaze.py
|
Python
|
gpl-3.0
| 561
| 0.008913
|
from pygaze.display import Display
from pygaze.screen import
|
Screen
from pygaze.eyetracker import EyeTracker
import pygaze.libtime as timer
disp = Display()
scr = Screen()
scr.draw_text("Preparing experiment...", fontsize=20)
disp.fill(scr)
disp.show()
tracker = EyeTracker(disp)
tracker.calibrate()
tracker.start_recording()
t0 = timer.get_time()
while timer.get_time() - t0 < 5000:
gazepos = tracker
|
.sample()
scr.clear()
scr.draw_fixation(fixtype='dot', pos=gazepos)
disp.fill(scr)
disp.show()
tracker.stop_recording()
tracker.close()
disp.close()
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_sfs_imperial_4.py
|
Python
|
mit
| 482
| 0.045643
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE
|
DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_sfs_imperial_4.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_sfs_imperial_4_n")
#### BEGIN MODI
|
FICATIONS ####
#### END MODIFICATIONS ####
return result
|
Geoion/TorCMS
|
torlite/handlers/reply_handler.py
|
Python
|
mit
| 3,236
| 0.003399
|
# -*- coding:utf-8 -*-
'''
Author: Bu Kun
E-mail: bukun@osgeo.cn
CopyRight: http://www.yunsuan.org
'''
import tornado.web
import tornado.escape
import json
from torlite.core import tools
from torlite.core.base_handler import BaseHandler
from torlite.model.mwiki import MWiki
from torlite.model.mcatalog import MCatalog
from torlite.model.mspec import SpesubModel
from torlite.model.mwiki_hist import MWikiHist
from torlite.model.muser import MUser
from torlite.model.mreply import MReply
from torlite.model.mreply2user import MReply2User
from torlite.model.core_tab import CabReply
from torlite.model.core_tab import CabUser2Reply
class ReplyHandler(BaseHandler):
def initialize(self):
self.muser = MUser()
self.mreply = MReply()
self.mreply2user = MReply2User()
if self.get_current_user():
self.userinfo = self.muser.get_by_id(self.get_current_user())
else:
self.userinfo = None
def get(self, url_str=''):
if url_str == '':
return
url_arr = url_str.split(r'/')
if url_arr[0] == 'delete_reply':
self.delete_by_id(url_arr[1])
elif url_arr[0] == 'get':
self.get_by_id(url_arr[1])
elif url_arr[0] == 'zan':
self.zan(url_arr[1])
def get_by_id(self, reply_id):
reply = self.mreply.get_reply_by_uid(reply_id)
self.render( 'tplite/reply/show_reply.html',
cnt = reply.cnt_md,
username=reply.user_name,
date=reply.date,
vote=reply.vote,
uid=reply.uid,
userinfo=self.userinfo,
)
# @tornado.web.authenticated
# def add_one(self, id_reply):
# # post_data = {}
# # for key in self.request.arguments:
# # post_data[key] = self.get_ar
|
guments(key)
# # post_data['user_id'] = self.userinfo.uid
#
# cur_coun
|
t = self.mreply2user.insert_data(self.userinfo.uid, id_reply)
# if cur_count:
# self.mreply.update_vote(id_reply, cur_count)
#
# out_dic = {'zan_count': cur_count}
# return json.dump(out_dic)
@tornado.web.authenticated
def zan(self, id_reply):
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)
post_data['user_id'] = self.userinfo.uid
# zs = CabUser2Reply.select().where(CabUser2Reply.reply_id == id_reply).count()
cur_count = self.mreply2user.insert_data(self.userinfo.uid, id_reply)
if cur_count:
self.mreply.update_vote(id_reply, cur_count)
output = {
'text_zan': cur_count,
}
else:
output = {
'text_zan': 0,
}
return json.dump(output, self)
def delete_by_id(self, del_id):
is_deleted = self.mreply2user.delete(del_id)
# self.redirect('/post/{0}.html'.format(del_id))
if is_deleted:
output = {
'del_zan': 1
}
else:
output = {
'del_zan': 0,
}
return json.dump(output, self)
|
VHAINNOVATIONS/DmD
|
scrubber/MIST_2_0_4/src/MAT/lib/mat/python/MAT/Utilities.py
|
Python
|
apache-2.0
| 719
| 0.009736
|
# Copyright
|
(C) 2011 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# I just have no place for this.
import socket
import MAT
def findPort(startingAt):
while True:
v = socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1', startingAt))
if
|
v == 0:
startingAt += 1
else:
return startingAt
def portTaken(portNum):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1', portNum)) == 0
def configurationPortTaken(configName):
return portTaken(int(MAT.Config.MATConfig[configName]))
def configurationFindPort(configName):
return findPort(int(MAT.Config.MATConfig[configName]))
|
phil-mansfield/gotetra
|
render/scripts/plot_caustic_3d.py
|
Python
|
mit
| 637
| 0.00157
|
import numpy as np
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
if len(sys.argv) != 2:
print "Correct usage: $ %s caustic_file" % sys.argv[1]
exit(1)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
rows = np.loadtxt(sys.argv[1])
rs = np.array(zip(*cols)[-1])
max_r = np.max(rs)
for row in rows:
x, y, z, r = row
line_r = max_r if np.isnan(r) else r
term_x, term_y, term_z = x * line_r, y * line_r, z * line_r
ax.plot([0, term_x], [0, term_y], [0, term_z], c="k")
if not np.isnan(r):
|
ax.scatter([term_x], [term_y]
|
, [term_z], c="r")
plt.show()
|
northern-bites/nao-man
|
noggin/players/pData.py
|
Python
|
gpl-3.0
| 1,354
| 0.004431
|
import os
from . import SoccerFSA
from . import DataStates
class SoccerPlayer(SoccerFSA.SoccerFSA):
def __init__(self, brain):
SoccerFSA.SoccerFSA.__init__(self,brain)
self.addStates(DataStates)
self.setName('pData')
self.postDistance = 50
self.lastDistance = 0
# Specify which object is being studied
self.objects = (self.brain.ygrp, self.brain.yglp)
def savePostInfo(self):
both_zero = True
for obj in self.objects:
if obj.dist != 0.0:
both_zero = False
break
if both_zero:
return
file
|
name = "/home/root/postDistData" + str(self.postDistance) + ".csv"
# need to remove it if it exists already and make way
# for new data
if self.lastDistance != self.postDistance and \
os.path.exists(filename):
self.lastDistance = self.postDistance
os.remove(filename)
csv = open(filename,'a+')
|
csv.write("dist,bearing\n")
else :
csv = open(filename,'a+')
for obj in self.objects:
if obj.dist !=0.0 and abs(obj.dist - self.postDistance) < 100:
csv.write(str(obj.dist) + "," + str(obj.bearing) + '\n')
print obj.dist, obj.bearing
csv.close()
|
simleo/openmicroscopy
|
components/tools/OmeroPy/test/unit/test_model.py
|
Python
|
gpl-2.0
| 12,180
| 0.000493
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple unit test which makes various calls on the code
generated model.
Copyright 2007-2014 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import pytest
import omero
import omero.clients
from omero_model_ChannelI import ChannelI
from omero_model_PixelsI import PixelsI
from omero_model_ImageI import ImageI
from omero_model_DatasetI import DatasetI
from omero_model_ExperimenterI import ExperimenterI
from omero_model_ExperimenterGroupI import ExperimenterGroupI
from omero_model_GroupExperimenterMapI import GroupExperimenterMapI
from omero_model_DatasetImageLinkI import DatasetImageLinkI
from omero_model_ScriptJobI import ScriptJobI
from omero_model_DetailsI import DetailsI
from omero_model_ElectricPotentialI import ElectricPotentialI
from omero_model_FrequencyI import FrequencyI
from omero_model_LengthI import LengthI
from omero_model_PowerI import PowerI
from omero_model_PressureI import PressureI
from omero_model_TemperatureI import TemperatureI
from omero_model_TimeI import TimeI
from omero.rtypes import rbool
from omero.rtypes import rlong
from omero.rtypes import rstring
from omero.rtypes import rtime
class TestProxyString(object):
@pytest.mark.parametrize("data", (
("", None, None, None),
("1", None, None, None),
("Image", None, None, None),
("ImageI", None, None, None),
("Image:1", None, ImageI, 1),
("ImageI:1", None, ImageI, 1),
("ImageI:1", "ImageI", ImageI, 1),
("Image:1", "ImageI", ImageI, 1),
("1", "ImageI", ImageI, 1),
("1", "Image", ImageI, 1),
))
def testAll(self, data):
source = data[0]
default = data[1]
type = data[2]
id = data[3]
err = (type is None and id is None)
try:
obj = omero.proxy_to_instance(source, default)
assert isinstance(obj, type)
assert obj.id == id
assert not obj.loaded
if err:
assert False, "should have raised"
except Exception, e:
if not err:
assert "should not have raised", e
class TestModel(object):
def testVirtual(self):
img = ImageI()
imgI = ImageI()
img.unload()
imgI.unload()
def testUnloadCollections(self):
pix = PixelsI()
assert pix.sizeOfSettings() >= 0
pix.unloadCollections()
assert pix.sizeOfSettings() < 0
def testSimpleCtor(self):
img = ImageI()
assert img.isLoaded()
assert img.sizeOfPixels() >= 0
def testUnloadedCtor(self):
img = ImageI(rlong(1), False)
assert not img.isLoaded()
try:
assert img.sizeOfDatasetLinks() < 0
assert False, "Should throw"
except:
# Is true, but can't test it.
pass
def testUnloadCheckPtr(self):
img = ImageI()
|
assert img.isLoaded()
assert img.getDetails() # details are auto instantiated
assert not img.getName() # no other single-valued field is
img.unload()
assert not img.isLoaded()
pytest.raises(omero.UnloadedEntityException, img.getDetails)
def testUnloadField(self):
img = ImageI()
|
assert img.getDetails()
img.unloadDetails()
assert not img.getDetails()
def testSequences(self):
img = ImageI()
assert img.sizeOfAnnotationLinks() >= 0
img.linkAnnotation(None)
img.unload()
try:
assert not img.sizeOfAnnotationLinks() >= 0
assert len(img.copyAnnotationLinks()) == 0
assert False, "can't reach here"
except:
# These are true, but can't be tested
pass
def testAccessors(self):
name = rstring("name")
img = ImageI()
assert not img.getName()
img.setName(name)
assert img.getName()
name = img.getName()
assert name.val == "name"
assert name == name
img.setName(rstring("name2"))
assert img.getName().val == "name2"
assert img.getName()
img.unload()
try:
assert not img.getName()
assert False, "should fail"
except:
# Is true, but cannot test
pass
def testUnloadedAccessThrows(self):
unloaded = ImageI(rlong(1), False)
pytest.raises(omero.UnloadedEntityException, unloaded.getName)
def testIterators(self):
d = DatasetI()
image = ImageI()
image.linkDataset(d)
it = image.iterateDatasetLinks()
count = 0
for i in it:
count += 1
assert count == 1
def testClearSet(self):
img = ImageI()
assert img.sizeOfPixels() >= 0
img.addPixels(PixelsI())
assert 1 == img.sizeOfPixels()
img.clearPixels()
assert img.sizeOfPixels() >= 0
assert 0 == img.sizeOfPixels()
def testUnloadSet(self):
img = ImageI()
assert img.sizeOfPixels() >= 0
img.addPixels(PixelsI())
assert 1 == img.sizeOfPixels()
img.unloadPixels()
assert img.sizeOfPixels() < 0
# Can't check size assert 0==img.sizeOfPixels()
def testRemoveFromSet(self):
pix = PixelsI()
img = ImageI()
assert img.sizeOfPixels() >= 0
img.addPixels(pix)
assert 1 == img.sizeOfPixels()
img.removePixels(pix)
assert 0 == img.sizeOfPixels()
def testLinkGroupAndUser(self):
user = ExperimenterI()
group = ExperimenterGroupI()
link = GroupExperimenterMapI()
link.id = rlong(1)
link.link(group, user)
user.addGroupExperimenterMap(link, False)
group.addGroupExperimenterMap(link, False)
count = 0
for i in user.iterateGroupExperimenterMap():
count += 1
assert count == 1
def testLinkViaLink(self):
user = ExperimenterI()
user.setFirstName(rstring("test"))
user.setLastName(rstring("user"))
user.setOmeName(rstring("UUID"))
user.setLdap(rbool(False))
# possibly setOmeName() and setOmeName(string) ??
# and then don't need omero/types.h
group = ExperimenterGroupI()
# TODOuser.linkExperimenterGroup(group)
link = GroupExperimenterMapI()
link.parent = group
link.child = user
def testLinkingAndUnlinking(self):
d = DatasetI()
i = ImageI()
d.linkImage(i)
assert d.sizeOfImageLinks() == 1
d.unlinkImage(i)
assert d.sizeOfImageLinks() == 0
d = DatasetI()
i = ImageI()
d.linkImage(i)
assert i.sizeOfDatasetLinks() == 1
i.unlinkDataset(d)
assert d.sizeOfImageLinks() == 0
d = DatasetI()
i = ImageI()
dil = DatasetImageLinkI()
dil.link(d, i)
d.addDatasetImageLink(dil, False)
assert d.sizeOfImageLinks() == 1
assert i.sizeOfDatasetLinks() == 0
def testScriptJobHasLoadedCollections(self):
s = ScriptJobI()
assert s.sizeOfOriginalFileLinks() >= 0
#
# Python specific
#
def testGetAttrGood(self):
i = ImageI()
assert i.loaded
assert i.isLoaded()
assert not i.name
i.name = rstring("name")
assert i.name
i.setName(None)
assert not i.getName()
i.copyAnnotationLinks()
i.linkAnnotation(omero.model.BooleanAnnotationI())
def testGetAttrBad(self):
i = ImageI()
def assign_loaded():
i.loaded = False
pytest.raises(AttributeError, assign_loaded)
pytest.raises(AttributeError, lambda: i.foo)
def assign_foo():
i.foo = 1
pytest.raises(AttributeError, assign_foo)
pytest.raises(AttributeError, lambda: i.annotationLinks)
pytest.raises(AttributeError, lambda: i.getAnnotationLinks())
def assign_links():
i.annotationLinks = []
pytes
|
13pi/HipstaChat
|
api/decorators.py
|
Python
|
gpl-2.0
| 684
| 0.002924
|
from datetime import datetime
from api.utils import api_response
def auth_required(fn):
def wrapped(self, reque
|
st, *args, **kwargs):
if not request.user.is_authenticated():
return api_response({"error": "not authenticated"}, status=401)
request.user.last_action=datetime.now().replace(tzinfo=None)
request.user.save()
return fn(self, request, *args, **kwargs)
return wrapped
def payload_required(fn):
def wrapped(self, request, *args, **kwargs):
if not request.body:
return api_response({"error": "payload required"}, status=403)
re
|
turn fn(self, request, *args, **kwargs)
return wrapped
|
cs-au-dk/Artemis
|
WebKit/Tools/Scripts/webkitpy/tool/commands/upload.py
|
Python
|
gpl-3.0
| 22,709
| 0.003479
|
#!/usr/bin/env python
# Copyright (c) 2009, 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import sys
from optparse import make_option
from webkitpy.tool import steps
from webkitpy.common.checkout.changelog import parse_bug_id_from_changelog
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.system.deprecated_logging import error, log
from webkitpy.common.system.user import User
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.comments import bug_comment_from_svn_revision
from webkitpy.tool.grammar import pluralize, join_with_separators
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
name = "commit-message"
help_text = "Print a commit message suitable for the uncommitted changes"
def __init__(self):
options = [
steps.Options.git_commit,
]
AbstractDeclarativeCommand.__init__(self, options=options)
def execute(self, options, args, tool):
# This command is a useful test to make sure commit_message_for_this_commit
# always returns the right value regardless of the current working directory.
print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message()
class CleanPendingCommit(AbstractDeclarativeCommand):
name = "clean-pending-commit"
help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list."
# NOTE: This was designed to be generic, but right now we're only processing patches from the pending-commit list, so only r+ matters.
def _flags_to_clear_on_patch(self, patch):
if not patch.is_obsolete():
return None
what_was_cleared = []
if patch.review() == "+":
if patch.reviewer():
what_was_cleared.append("%s's review
|
+" % patch.reviewer().full_name)
else:
what_was_cleared.append("review+")
return join_with_separators(what_was_cleared)
def execute(self, options, args, tool):
committers = Com
|
mitterList()
for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
bug = self._tool.bugs.fetch_bug(bug_id)
patches = bug.patches(include_obsolete=True)
for patch in patches:
flags_to_clear = self._flags_to_clear_on_patch(patch)
if not flags_to_clear:
continue
message = "Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id())
self._tool.bugs.obsolete_attachment(patch.id(), message)
# FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit
class CleanReviewQueue(AbstractDeclarativeCommand):
name = "clean-review-queue"
help_text = "Clear r? on obsolete patches so they do not appear in the pending-review list."
def execute(self, options, args, tool):
queue_url = "http://webkit.org/pending-review"
# We do this inefficient dance to be more like webkit.org/pending-review
# bugs.queries.fetch_bug_ids_from_review_queue() doesn't return
# closed bugs, but folks using /pending-review will see them. :(
for patch_id in tool.bugs.queries.fetch_attachment_ids_from_review_queue():
patch = self._tool.bugs.fetch_attachment(patch_id)
if not patch.review() == "?":
continue
attachment_obsolete_modifier = ""
if patch.is_obsolete():
attachment_obsolete_modifier = "obsolete "
elif patch.bug().is_closed():
bug_closed_explanation = " If you would like this patch reviewed, please attach it to a new bug (or re-open this bug before marking it for review again)."
else:
# Neither the patch was obsolete or the bug was closed, next patch...
continue
message = "Cleared review? from %sattachment %s so that this bug does not appear in %s.%s" % (attachment_obsolete_modifier, patch.id(), queue_url, bug_closed_explanation)
self._tool.bugs.obsolete_attachment(patch.id(), message)
class AssignToCommitter(AbstractDeclarativeCommand):
name = "assign-to-committer"
help_text = "Assign bug to whoever attached the most recent r+'d patch"
def _patches_have_commiters(self, reviewed_patches):
for patch in reviewed_patches:
if not patch.committer():
return False
return True
def _assign_bug_to_last_patch_attacher(self, bug_id):
committers = CommitterList()
bug = self._tool.bugs.fetch_bug(bug_id)
if not bug.is_unassigned():
assigned_to_email = bug.assigned_to_email()
log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
return
reviewed_patches = bug.reviewed_patches()
if not reviewed_patches:
log("Bug %s has no non-obsolete patches, ignoring." % bug_id)
return
# We only need to do anything with this bug if one of the r+'d patches does not have a valid committer (cq+ set).
if self._patches_have_commiters(reviewed_patches):
log("All reviewed patches on bug %s already have commit-queue+, ignoring." % bug_id)
return
latest_patch = reviewed_patches[-1]
attacher_email = latest_patch.attacher_email()
committer = committers.committer_by_email(attacher_email)
if not committer:
log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id))
return
reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message)
def execute(self, options, args, tool):
for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
self._assign_bug_to_last_patch_attacher(bug_id)
class ObsoleteAttachments(AbstractSequencedCommand):
name = "obsolete-attachments"
help_text = "Mark all attachments on a bug as obsolete"
argument_names = "BUGID"
steps = [
steps.ObsoletePatches,
]
def _prepare_state(self, options, args, tool):
return { "bug_
|
wgoulet/CTPyClient
|
fetchroots.py
|
Python
|
apache-2.0
| 1,184
| 0.017736
|
import os
import base64
from requests import Session, Request
from OpenSSL import crypto
url = 'http://ct.googleapis.com/aviator/ct/v1/get-roots'
s = Session()
r = Request('GET',
url)
prepped = r.prepare()
r = s.send(prepped)
if r.status_code == 200:
roots = r.json()
# RFC 6962 defines the certificate objects as base64 encoded certs.
# Importantly, these are not PEM formatted certs but base64 encoded
# ASN.1 (DER) encoded
for i in roots:
certs = roots[i]
for k in certs:
try:
certobj = crypto.load
|
_certificate(crypto.FILETYPE_ASN1,base64.b64decode(k))
subject = certobj.get_subject()
print 'CN={},OU={},O={},L={},S={},C={}'.format(subject.commonName,
subject.organizationalUnitName,
subject.organizationName,
subje
|
ct.localityName,
subject.stateOrProvinceName,
subject.countryName)
except:
print subject.get_components()
|
googlearchive/titan
|
titan/common/lib/google/apputils/datelib.py
|
Python
|
apache-2.0
| 15,413
| 0.007591
|
#!/usr/bin/env python
# Copyright 2002 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of classes and functions for dealing with dates and timestamps.
The BaseTimestamp and Timestamp are timezone-aware wrappers around Python
datetime.datetime class.
"""
import calendar
import copy
import datetime
import re
import sys
import time
import types
import warnings
from dateutil import parser
import pytz
_MICROSECONDS_PER_SECOND = 1000000
_MICROSECONDS_PER_SECOND_F = float(_MICROSECONDS_PER_SECOND)
def SecondsToMicroseconds(seconds):
"""Convert seconds to microseconds.
Args:
seconds: number
Returns:
microseconds
"""
return seconds * _MICROSECONDS_PER_SECOND
def MicrosecondsToSeconds(microseconds):
"""Convert microseconds to seconds.
Args:
microseconds: A number representing some duration of time measured in
microseconds.
Returns:
A number representing the same duration of time measured in seconds.
"""
return microseconds / _MICROSECONDS_PER_SECOND_F
def _GetCurrentTimeMicros():
"""Get the current time in microseconds, in UTC.
Returns:
The number of microseconds since the epoch.
"""
return int(SecondsT
|
oMicroseconds(time.time()))
def GetSecondsSinceEpoch(time_tuple):
"""Convert time_tuple (in UTC) to seconds (also in UTC).
Args:
time_tuple: tuple with at least 6 items.
Returns:
seconds.
"""
return calendar.timegm(time_tuple[:6] + (0, 0, 0))
def GetTimeMicros(time_tuple):
"""Get a time in microseconds.
Arguments:
time_tuple: A (year, month, day, hour, minute, second) tuple (the python
time tuple format) in the UTC time zone.
Returns:
The number of microseconds s
|
ince the epoch represented by the input tuple.
"""
return int(SecondsToMicroseconds(GetSecondsSinceEpoch(time_tuple)))
def DatetimeToUTCMicros(date):
"""Converts a datetime object to microseconds since the epoch in UTC.
Args:
date: A datetime to convert.
Returns:
The number of microseconds since the epoch, in UTC, represented by the input
datetime.
"""
# Using this guide: http://wiki.python.org/moin/WorkingWithTime
# And this conversion guide: http://docs.python.org/library/time.html
# Turn the date parameter into a tuple (struct_time) that can then be
# manipulated into a long value of seconds. During the conversion from
# struct_time to long, the source date in UTC, and so it follows that the
# correct transformation is calendar.timegm()
micros = calendar.timegm(date.utctimetuple()) * _MICROSECONDS_PER_SECOND
return micros + date.microsecond
def DatetimeToUTCMillis(date):
"""Converts a datetime object to milliseconds since the epoch in UTC.
Args:
date: A datetime to convert.
Returns:
The number of milliseconds since the epoch, in UTC, represented by the input
datetime.
"""
return DatetimeToUTCMicros(date) / 1000
def UTCMicrosToDatetime(micros, tz=None):
"""Converts a microsecond epoch time to a datetime object.
Args:
micros: A UTC time, expressed in microseconds since the epoch.
tz: The desired tzinfo for the datetime object. If None, the
datetime will be naive.
Returns:
The datetime represented by the input value.
"""
# The conversion from micros to seconds for input into the
# utcfromtimestamp function needs to be done as a float to make sure
# we dont lose the sub-second resolution of the input time.
dt = datetime.datetime.utcfromtimestamp(
micros / _MICROSECONDS_PER_SECOND_F)
if tz is not None:
dt = tz.fromutc(dt)
return dt
def UTCMillisToDatetime(millis, tz=None):
"""Converts a millisecond epoch time to a datetime object.
Args:
millis: A UTC time, expressed in milliseconds since the epoch.
tz: The desired tzinfo for the datetime object. If None, the
datetime will be naive.
Returns:
The datetime represented by the input value.
"""
return UTCMicrosToDatetime(millis * 1000, tz)
UTC = pytz.UTC
US_PACIFIC = pytz.timezone('US/Pacific')
class TimestampError(ValueError):
"""Generic timestamp-related error."""
pass
class TimezoneNotSpecifiedError(TimestampError):
"""This error is raised when timezone is not specified."""
pass
class TimeParseError(TimestampError):
"""This error is raised when we can't parse the input."""
pass
# TODO(user): this class needs to handle daylight better
class LocalTimezoneClass(datetime.tzinfo):
"""This class defines local timezone."""
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
STDOFFSET = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = datetime.timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
def utcoffset(self, dt):
"""datetime -> minutes east of UTC (negative for west of UTC)."""
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC."""
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
"""datetime -> string name of time zone."""
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
"""Return true if given datetime is within local DST."""
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
def __repr__(self):
"""Return string '<Local>'."""
return '<Local>'
def localize(self, dt, unused_is_dst=False):
"""Convert naive time to local time."""
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, unused_is_dst=False):
"""Correct the timezone information on the given datetime."""
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.replace(tzinfo=self)
LocalTimezone = LocalTimezoneClass()
class BaseTimestamp(datetime.datetime):
"""Our kind of wrapper over datetime.datetime.
The objects produced by methods now, today, fromtimestamp, utcnow,
utcfromtimestamp are timezone-aware (with correct timezone).
We also overload __add__ and __sub__ method, to fix the result of arithmetic
operations.
"""
LocalTimezone = LocalTimezone
@classmethod
def AddLocalTimezone(cls, obj):
"""If obj is naive, add local timezone to it."""
if not obj.tzinfo:
return obj.replace(tzinfo=cls.LocalTimezone)
return obj
@classmethod
def Localize(cls, obj):
"""If obj is naive, localize it to cls.LocalTimezone."""
if not obj.tzinfo:
return cls.LocalTimezone.localize(obj)
return obj
def __add__(self, *args, **kwargs):
"""x.__add__(y) <==> x+y."""
r = super(BaseTimestamp, self).__add__(*args, **kwargs)
return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second,
r.microsecond, r.tzinfo)
def __sub__(self, *args, **kwargs):
"""x.__add__(y) <==> x-y."""
r = super(BaseTimestamp, self).__sub__(*args, **kwargs)
if isinstance(r, datetime.datetime):
return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second,
r.microsecond, r.tzinfo)
return r
@classmethod
def now(cls, *args, **kwargs):
"""Get a timestamp corresponding to right now.
Args:
args: Positional arguments to pass to datetime.datetime.now().
kwargs: Keyword arguments to pass to datetime.datetime.now(). If tz is not
spec
|
lgastako/loglint
|
example.py
|
Python
|
mit
| 413
| 0
|
#!/usr/bin/env python
import logging
import optparse
from zillion.utils
|
import cmdline
logger = logging.getLogger(__name__)
def main():
parser = optparse.OptionParser()
options, args = parser.parse_args()
logger.debug("foo")
logger.debug("foo", 1)
logger.debug("foo: %s", 1)
logger.debug("foo: %s %s",
|
2, 3, 4)
logger.debug("foo: %s %s", 5)
cmdline.entry_point(__name__, main)
|
whitesource/python-plugin
|
agent/api/model/Coordinates.py
|
Python
|
apache-2.0
| 559
| 0.003578
|
class Coordinates:
""" WhiteSource model for
|
artifact's coordinates. """
def __init__(self, group_id, artifact_id, version_id):
self.groupId = group_id
self.artifactId = artifact_id
self.versionId = version_id
def create_project_coordinates(distribution):
""" Creates a 'Coordinates' instance for the user package"""
dist_name = distribution.get_name()
dist_version = distribution.get_version()
coordinates = Coor
|
dinates(group_id=None, artifact_id=dist_name, version_id=dist_version)
return coordinates
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1/services/migration_service/async_client.py
|
Python
|
apache-2.0
| 17,064
| 0.001641
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "Lic
|
ense");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions a
|
nd
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.migration_service import pagers
from google.cloud.aiplatform_v1.types import migratable_resource
from google.cloud.aiplatform_v1.types import migration_service
from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
from .client import MigrationServiceClient
class MigrationServiceAsyncClient:
"""A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
"""
_client: MigrationServiceClient
DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT
annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path)
parse_annotated_dataset_path = staticmethod(
MigrationServiceClient.parse_annotated_dataset_path
)
dataset_path = staticmethod(MigrationServiceClient.dataset_path)
parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path)
dataset_path = staticmethod(MigrationServiceClient.dataset_path)
parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path)
dataset_path = staticmethod(MigrationServiceClient.dataset_path)
parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path)
model_path = staticmethod(MigrationServiceClient.model_path)
parse_model_path = staticmethod(MigrationServiceClient.parse_model_path)
model_path = staticmethod(MigrationServiceClient.model_path)
parse_model_path = staticmethod(MigrationServiceClient.parse_model_path)
version_path = staticmethod(MigrationServiceClient.version_path)
parse_version_path = staticmethod(MigrationServiceClient.parse_version_path)
common_billing_account_path = staticmethod(
MigrationServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
MigrationServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(MigrationServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
MigrationServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
MigrationServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
MigrationServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(MigrationServiceClient.common_project_path)
parse_common_project_path = staticmethod(
MigrationServiceClient.parse_common_project_path
)
common_location_path = staticmethod(MigrationServiceClient.common_location_path)
parse_common_location_path = staticmethod(
MigrationServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceAsyncClient: The constructed client.
"""
return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceAsyncClient: The constructed client.
"""
return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> MigrationServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MigrationServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, MigrationServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the migration service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MigrationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
|
hubinary/flasky
|
app/main/errors.py
|
Python
|
mit
| 255
| 0.003922
|
from flask import render_t
|
emplate
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.ht
|
ml'), 500
|
fnl/libfnl
|
src/fnl/stat/textclass.py
|
Python
|
agpl-3.0
| 15,148
| 0.001783
|
"""
.. py:module:: fnl.stat.textclass
:synopsis: Tools for developing a text classifier.
.. moduleauthor:: Florian Leitner <florian.leitner@gmail.com>
.. License: GNU Affero GPL v3 (http://www.gnu.org/licenses/agpl.html)
"""
from collections import defaultdict, namedtuple, Counter
from itertools import chain
from functools import partial
import numpy as np
from sklearn import metrics
from sklearn.externals import joblib, six
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.joblib import delayed
from sklearn.feature_extraction import DictVectorizer
from sklearn.grid_search import GridSearchCV
# Note: the minority label (always first, i.e., at index 0)
# should be used as the positive label to ensure
# precision and recall produce meaningful results
# and that the F-score is robust.
from fnl.text.sentence import SentenceParser, Sentence
METRICS = [
('Accuracy', metrics.accuracy_score),
('Precision', partial(metrics.precision_score, pos_label=0)),
('Recall', partial(metrics.recall_score, pos_label=0)),
('F1-score', partial(metrics.f1_score, pos_label=0)),
('MCC score', metrics.matthews_corrcoef),
]
# A scoring function that is robust against class-imbalances.
Scorer = metrics.make_scorer(metrics.matthews_corrcoef)
# A less restrictive stop-word list
# (compared to the built-in scikit-learn list).
STOP_WORDS = {
'a',
'about',
'again',
'all',
'also',
'an',
'and',
'any',
'are',
'as',
'at',
'be',
'because',
'been',
'before',
'being',
'between',
'both',
'but',
'by',
'can',
'could',
'did',
'do',
'does',
'during',
'each',
'for',
'from',
'further',
'had',
'has',
'have',
'having',
'here',
'how',
'however',
'i',
'if',
'in',
'into',
'is',
'it',
'its',
'itself',
'most',
'no',
'nor',
'not',
'of',
'on',
'or',
'our',
'should',
'so',
'some',
'such',
'than',
'that',
'the',
'their',
'theirs',
'them',
'then',
'there',
'therefor',
'therefore',
'these',
'they',
'this',
'those',
'through',
'thus',
'to',
'very',
'was',
'we',
'were',
'what',
'when',
'which',
'while',
'with',
'would',
}
# Contrary to the scikit-learn built in list,
# also add capitalized versions of all words
# to filter case-sensitive texts, too.
STOP_WORDS.update(w.capitalize() for w in list(STOP_WORDS))
STOP_WORDS = frozenset(STOP_WORDS)
# Words that are often classified as gene names.
UNMASK = frozenset({
#'-',
#'.',
'Ab',
'anti',
'antibody',
'antibodies',
'binding',
'ChIP',
'Chromatin',
'construct',
'constructs',
'enhancer',
'element',
'elements',
'exon',
'factor',
'family',
'Fig',
'fragment',
'gene',
'genes',
'GFP',
'human',
'islets',
'isoform',
'isoforms',
'kb',
'luciferase',
'mouse',
'motif',
'mutant',
'mutants',
'mRNA',
'proximal',
'promoter',
'promoters',
'protein',
'proteins',
'rat',
'reporter',
'region',
'regions',
'repressor',
'sequence',
'sequences',
'shRNA',
'shRNAs',
'siRNA',
'siRNAs',
'silencer',
'site',
'sites',
'Table',
'transcription',
})
# Reporting setup as chosen by the user.
Report = namedtuple('Report',
'parameters top worst fn fp classification folds')
def subAll(patterns, mask, lines):
return [patterns.sub(mask, line) for line in lines]
def asDict(sentence: Sentence, ngrams=2):
"""Convert a :class:`fnl.text.sentence.Sentence` into a feature dictionary."""
d = {'gene-count': sentence.countEntity('B-gene')}
stems = list(sentence.maskedStems())
pos = sentence.posTags()
tokens = Counter('{}/{}'.format(s, t) for s, t in zip(stems, pos))
d.update(tokens)
if "TARGET/NN" in d and "FACTOR/NN" in d:
d['has-all-entities'] = 1
gram = list(stems)
while ngrams > 1:
ngrams =- 1
tokens = Counter('{} {}'.format(s, g) for s, g in zip(stems, gram
|
[1:]))
d.update(tokens)
return d
class Data:
"""
The data object is a container for all data relevant to the classifiers.
"""
# FIXME: this class is coder's hell...
|
def __init__(self, *files, columns=None, ngrams=2, decap=False, patterns=None, mask=None):
"""
Create a new data object with the following attributes:
* instances - list of raw text instances
* labels - array of instance labels in same order as raw text
* features - matrix of feature vectors per text instance
* names - array of feature names in same order as features
Both features and names are undefined until extracted
using some Vectorizer.
Exclusive options for either BIO-NER vs. plain-text input:
1. **BIO-NER** paramters: Define a `columns` integer to define the number of disregarded
columns and thereby declare that the input will be in BIO-NER format. In addtion, the
`ngram` option can be set to define the ngram size of the tokens to generate.
All other keyword parameter will be ignored.
2. **plain-text** keyword parameters: Set `decap=True` to lower-case the first letter of
each plain-text line. Use a list of regex `patterns` and a repacement string `mask` to
"mask" pattern-matched words in regular (non-`column`) input.
"""
try:
if columns is None:
inputs = [[l.strip('\r\n') for l in f] for f in files]
if decap:
for i in range(len(inputs)):
inputs[i] = ["{}{}".format(l[0].lower(), l[1:])
for l in inputs[i] if len(l)]
if patterns and mask:
self.instances = []
splits = joblib.cpu_count()
for lines in inputs:
jobs = tuple(lines[i::splits] for i in range(splits))
jobs = joblib.Parallel(n_jobs=splits)(
delayed(subAll)(patterns, mask, lines) for lines in jobs
)
self.instances.append(list(zip(lines, chain(*jobs))))
else:
self.instances = [list(zip(lines, lines)) for lines in inputs]
else:
self.instances = []
for f in files:
# FIXME: instead of two hardcoded entity masks,
# FIXME: this has to be dynamic or generic...
sentences = SentenceParser(f, ('FACTOR', 'TARGET'), id_columns=columns)
if not columns:
sentences = list(enumerate(sentences, start=1))
data = [(sid, asDict(s, ngrams)) for sid, s in sentences]
self.instances.append(data)
except UnicodeDecodeError as e:
import sys
print('decoding error:', e.reason, 'in input file')
sys.exit(1)
# ensure the minority label(s) come first (important for the evaluation, too!)
self.instances = sorted(self.instances, key=len)
self.classes = len(self.instances)
self.labels = np.concatenate([
(np.zeros(len(data), dtype=np.uint8) + i)
for i, data in enumerate(self.instances)
])
self.ids = None
self.raw = None
self.features = None
self.names = None
if columns is None:
self.raw, self.instances = zip(*list(chain.from_iterable(self.instances)))
if len(self.raw) and '\t' in self.raw[0]:
self.ids = [l.split('\t', 1)[0] for l in self.raw]
else:
self.ids = self.raw
else:
self.ids, self.instances = zip(*list(chain.from_iterable(self.instances)))
def extract(sel
|
chrta/canfestival-3-ct
|
objdictgen/commondialogs.py
|
Python
|
lgpl-2.1
| 69,421
| 0.008398
|
#!/usr/bin/env python
# -*- coding: utf-
|
8 -*-
#This file is part of CanFestival, a library implementing CanOpen Stack.
#
#Copyright (C): Edouard TISSERANT, Francis DUPIN and Laurent BESSARD
#
#See COPYING file for copyrights details.
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the
|
License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import wx
import wx.grid
import os
from node import BE_to_LE, LE_to_BE
ScriptDirectory = os.path.split(__file__)[0]
#-------------------------------------------------------------------------------
# Editing Communication Dialog
#-------------------------------------------------------------------------------
[ID_COMMUNICATIONDIALOG, ID_COMMUNICATIONDIALOGPOSSIBLEINDEXES,
ID_COMMUNICATIONDIALOGCURRENTINDEXES, ID_COMMUNICATIONDIALOGSELECT,
ID_COMMUNICATIONDIALOGUNSELECT, ID_COMMUNICATIONDIALOGSTATICTEXT1,
ID_COMMUNICATIONDIALOGSTATICTEXT2
] = [wx.NewId() for _init_ctrls in range(7)]
class CommunicationDialog(wx.Dialog):
def _init_coll_flexGridSizer1_Items(self, parent):
parent.AddSizer(self.MainSizer, 0, border=20, flag=wx.GROW|wx.TOP|wx.LEFT|wx.RIGHT)
parent.AddSizer(self.ButtonSizer, 0, border=20, flag=wx.ALIGN_RIGHT|wx.BOTTOM|wx.LEFT|wx.RIGHT)
def _init_coll_flexGridSizer1_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(0)
def _init_coll_MainSizer_Items(self, parent):
parent.AddSizer(self.LeftGridSizer, 0, border=0, flag=wx.GROW)
parent.AddSizer(self.MiddleGridSizer, 0, border=0, flag=wx.GROW)
parent.AddSizer(self.RightGridSizer, 0, border=0, flag=wx.GROW)
def _init_coll_MainSizer_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableCol(2)
parent.AddGrowableRow(0)
def _init_coll_LeftGridSizer_Items(self, parent):
parent.AddWindow(self.staticText1, 0, border=0, flag=wx.GROW)
parent.AddWindow(self.PossibleIndexes, 0, border=0, flag=wx.GROW)
def _init_coll_LeftGridSizer_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(1)
def _init_coll_MiddleGridSizer_Items(self, parent):
parent.AddWindow(self.Select, 0, border=0, flag=wx.ALIGN_BOTTOM)
parent.AddWindow(self.Unselect, 0, border=0, flag=wx.ALIGN_TOP)
def _init_coll_MiddleGridSizer_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(0)
parent.AddGrowableRow(1)
def _init_coll_RightGridSizer_Items(self, parent):
parent.AddWindow(self.staticText2, 0, border=0, flag=wx.GROW)
parent.AddWindow(self.CurrentIndexes, 0, border=0, flag=wx.GROW)
def _init_coll_RightGridSizer_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(1)
def _init_sizers(self):
self.flexGridSizer1 = wx.FlexGridSizer(cols=1, hgap=0, rows=2, vgap=10)
self.MainSizer = wx.FlexGridSizer(cols=3, hgap=15, rows=1, vgap=0)
self.LeftGridSizer = wx.FlexGridSizer(cols=1, hgap=0, rows=2, vgap=5)
self.MiddleGridSizer = wx.FlexGridSizer(cols=1, hgap=0, rows=2, vgap=40)
self.RightGridSizer = wx.FlexGridSizer(cols=1, hgap=0, rows=2, vgap=5)
self._init_coll_flexGridSizer1_Items(self.flexGridSizer1)
self._init_coll_flexGridSizer1_Growables(self.flexGridSizer1)
self._init_coll_MainSizer_Items(self.MainSizer)
self._init_coll_MainSizer_Growables(self.MainSizer)
self._init_coll_LeftGridSizer_Items(self.LeftGridSizer)
self._init_coll_LeftGridSizer_Growables(self.LeftGridSizer)
self._init_coll_MiddleGridSizer_Items(self.MiddleGridSizer)
self._init_coll_MiddleGridSizer_Growables(self.MiddleGridSizer)
self._init_coll_RightGridSizer_Items(self.RightGridSizer)
self._init_coll_RightGridSizer_Growables(self.RightGridSizer)
self.SetSizer(self.flexGridSizer1)
def _init_ctrls(self, prnt):
wx.Dialog.__init__(self, id=ID_COMMUNICATIONDIALOG,
name='CommunicationDialog', parent=prnt, pos=wx.Point(234, 216),
size=wx.Size(726, 437), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER,
title=_('Edit Communication Profile'))
self.SetClientSize(wx.Size(726, 437))
self.staticText1 = wx.StaticText(id=ID_COMMUNICATIONDIALOGSTATICTEXT1,
label=_('Possible Profile Indexes:'), name='staticText1',
parent=self, pos=wx.Point(0, 0), size=wx.Size(0,
17), style=0)
self.PossibleIndexes = wx.ListBox(choices=[],
id=ID_COMMUNICATIONDIALOGPOSSIBLEINDEXES,
name='PossibleIndexes', parent=self, pos=wx.Point(0, 0),
size=wx.Size(0, 0), style=wx.LB_EXTENDED)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnPossibleIndexesDClick,
id=ID_COMMUNICATIONDIALOGPOSSIBLEINDEXES)
self.Select = wx.Button(id=ID_COMMUNICATIONDIALOGSELECT, label='>>',
name='Select', parent=self, pos=wx.Point(0, 0),
size=wx.Size(32, 32), style=0)
self.Select.Bind(wx.EVT_BUTTON, self.OnSelectButton,
id=ID_COMMUNICATIONDIALOGSELECT)
self.Unselect = wx.Button(id=ID_COMMUNICATIONDIALOGUNSELECT,
label='<<', name='Unselect', parent=self,
pos=wx.Point(0, 0), size=wx.Size(32, 32), style=0)
self.Unselect.Bind(wx.EVT_BUTTON, self.OnUnselectButton,
id=ID_COMMUNICATIONDIALOGUNSELECT)
self.staticText2 = wx.StaticText(id=ID_COMMUNICATIONDIALOGSTATICTEXT2,
label=_('Current Profile Indexes:'), name='staticText2',
parent=self, pos=wx.Point(0, 0), size=wx.Size(0,
17), style=0)
self.CurrentIndexes = wx.ListBox(choices=[],
id=ID_COMMUNICATIONDIALOGCURRENTINDEXES, name='CurrentIndexes',
parent=self, pos=wx.Point(0, 0), size=wx.Size(0, 0),
style=wx.LB_EXTENDED)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnCurrentIndexesDClick,
id=ID_COMMUNICATIONDIALOGCURRENTINDEXES)
self.ButtonSizer = self.CreateButtonSizer(wx.OK|wx.CANCEL)
self._init_sizers()
def __init__(self, parent):
self._init_ctrls(parent)
self.AllList = []
self.CurrentList = []
self.IndexDictionary = {}
def SetIndexDictionary(self, dictionary):
self.IndexDictionary = dictionary
def SetCurrentList(self, list):
self.CurrentList = []
self.CurrentList.extend(list)
self.CurrentList.sort()
def GetCurrentList(self):
return self.CurrentList
def RefreshLists(self):
self.PossibleIndexes.Clear()
self.CurrentIndexes.Clear()
self.AllList = []
for index in self.IndexDictionary.iterkeys():
if index not in self.CurrentList:
self.AllList.append(index)
self.AllList.sort()
for index in self.AllList:
self.PossibleIndexes.Append("0x%04X %s"%(index, self.IndexDictionary[index][0]))
for index in self.CurrentList:
if index in self.IndexDictionary:
self.CurrentIndexes.Append("0x%04X %s"%(index, self.IndexDictionary[index][0]))
def OnPossibleIndexesDClick(self, event):
self.SelectPossible()
event.Skip()
def OnCurrentIndexesDClick(self, event):
self.UnselectCurrent()
event.Skip()
def OnSelectButton(self, event):
self.SelectPossibl
|
shengshuyang/StanfordCNNClass
|
assignment1/cs231n/classifiers/softmax.py
|
Python
|
gpl-3.0
| 3,970
| 0.019395
|
import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
losses = np.zeros([num_train,1])
for i in xrange(num_train):
f = X[i].dot(W)
f -= np.max(f)
p = np.exp(f) / np.sum(np.exp(f)) # now sum(p) should be 1.0
losses[i] = - np.log(p[y[i]])
dW += np.outer(X[i],p)
dW[:,y[i]] -= X[i]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss = np.sum(losses) / num_train
dW /= num_train
# Add regularization to the loss.
loss += 0.5 * reg * np.sum(W * W)
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
dW = np.zeros_like(W)
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
#############################################################################
# TODO: Compute the so
|
ftmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# compute the loss and the gradient
scores = X.dot(W)
scores -= np.max(scores,axis=1)[:,np.newaxis] # has to add newax
|
is for broadcasting
p = np.exp(scores) / np.sum(np.exp(scores), axis = 1)[:,np.newaxis]
loss = -np.sum(np.log(p[range(num_train),y]))
# Always think of the problem with the variable scores in mind, so dLoss/dW
# should be dLoss/dscores (nonlinear scalar to matrix derivative) times dscores/dW
# (simply X since X*W = scores)
dscores = p
dscores[range(num_train),y] -= 1
dW = X.T.dot(dscores)
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW /= num_train
# Add regularization to the loss.
loss += 0.5 * reg * np.sum(W * W)
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
|
MartinHjelmare/home-assistant
|
homeassistant/components/automation/sun.py
|
Python
|
apache-2.0
| 1,227
| 0
|
"""Offer sun based automation rules."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_EVENT, CONF_OFFSET, CONF_PLATFORM, SUN_EVENT_SUNRISE)
from homeassistant.helpers.event import async_track_sunrise, async_track_sunset
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'sun',
vol.Required(CONF_EVENT): cv.sun_event,
vol.Required(CONF_OFFSET, default=timedelta(0)): cv.time_period,
})
async def async_trigger(hass, config, action, automation_info):
"""Listen for events based on configuration."""
event = config.get(CONF_EVENT)
offset = config.get(CONF_OFFSET)
@callback
def call_action():
"""Call action with right context."""
hass.async_run_job(action, {
'trigger': {
'platform': 'sun',
'event': event,
'offset': offset,
},
})
|
if event == SUN_EVENT_SUNRISE:
return async_track_sunrise(
|
hass, call_action, offset)
return async_track_sunset(hass, call_action, offset)
|
dhermyt/WONS
|
analysis/textclassification/SklearnClassifierFactory.py
|
Python
|
bsd-2-clause
| 2,285
| 0.001751
|
from nltk import NaiveBayesClassifier, SklearnClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.svm impo
|
rt LinearSVC
from sklearn.svm import NuSVC
from sklearn.svm import SVC
from analysis.textclassification.NltkClassifierWrapper import NltkClassifierWrapper
from analysis.textclassification.SklearnClassifierWrapper import SklearnClassifierWrapper
class SklearnClassifierFactory(object):
@staticmethod
def SklearnMultinomialNB():
r
|
eturn SklearnClassifierWrapper(MultinomialNB)
@staticmethod
def SklearnBernoulliNB():
return SklearnClassifierWrapper(BernoulliNB)
@staticmethod
def SklearnLogisticRegression():
return SklearnClassifierWrapper(LogisticRegression)
@staticmethod
def SklearnSGDClassifier():
return SklearnClassifierWrapper(lambda: SGDClassifier(loss='log'))
@staticmethod
def SklearnSVC():
return SklearnClassifierWrapper(lambda : SVC(probability=True))
@staticmethod
def SklearnLinearSVC():
return SklearnClassifierWrapper(LinearSVC)
@staticmethod
def SklearnNuSVC():
return SklearnClassifierWrapper(lambda : NuSVC(probability=True))
@staticmethod
def SklearnRidgeClassifier():
return SklearnClassifierWrapper(RidgeClassifier)
@staticmethod
def SklearnPerceptron():
return SklearnClassifierWrapper(Perceptron)
@staticmethod
def SklearnPassiveAggressive():
return SklearnClassifierWrapper(PassiveAggressiveClassifier)
@staticmethod
def SklearnKNeighbours():
return SklearnClassifierWrapper(KNeighborsClassifier)
@staticmethod
def SklearnNearestCentroid():
return SklearnClassifierWrapper(NearestCentroid)
@staticmethod
def SklearnRandomForest():
return SklearnClassifierWrapper(RandomForestClassifier)
|
cboelsen/tastytopping
|
tastytopping/field.py
|
Python
|
lgpl-3.0
| 6,454
| 0.001085
|
# -*- coding: utf-8 -*-
"""
.. module: field
:platform: Unix, Windows
:synopsis: Separates field_type-dependent functionality into Field classes.
.. moduleauthor:: Christian Boelsen <christian.boelsen@hds.com>
"""
__all__ = ('create_field', )
from datetime import datetime
from .exceptions import (
InvalidFieldValue,
BadUri,
)
from . import tastytypes
class Field(object):
"""Wrap a field with a generic value."""
def __init__(self, value):
self._value = value
self._str = value
def stream(self):
"""Return the representation of this field that can be sent over HTTP."""
return self._str
def value(self):
"""Return the wrapped value."""
return self._value
def filter(self, field):
"""Return a (field_name, field_value) tuple for this field that can be
used in GET requests.
This method is exposed primarily because uris cannot be used for
resources in GET requests in tastypie.
"""
return field, self.stream()
class DateTimeField(Field):
"""Wrap a datetime field."""
def __init__(self, value):
if isinstance(value, datetime):
value = value
stream = value.strftime(tastytypes.DATETIME_FORMAT1)
else:
stream = value
# Try with milliseconds, otherwise without.
try:
value = datetime.strptime(value, tastytypes.DATETIME_FORMAT1)
except ValueError:
try:
value = datetime.strptime(value, tastytypes.DATETIME_FORMAT2)
except ValueError:
value = datetime.strptime(value, tastytypes.DATETIME_FORMAT3)
super(DateTimeField, self).__init__(value)
self._str = stream
class ResourceField(Field):
"""Wrap a Resource in a to_one relationship."""
def __init__(self, value, factory):
if hasattr(value, 'uri'):
value = value
else:
resource_type = self._get_resource_type(value)
resource_class = getattr(factory, resource_type)
value = resource_class(_fields=value)
super(ResourceField, self).__init__(value)
@staticmethod
def _get_resource_type(details):
try:
|
return details['resource_uri'].split('/')[-3]
except TypeError:
return details.split('/')[-3]
def stream(self):
return self._value.uri()
def filter(self, field):
related_field = self.value().filter
|
_field()
filtered_field = self.value()._schema().append_to_filter(field, related_field)
return filtered_field, getattr(self.value(), related_field)
class ResourceListField(Field):
"""Wrap a list of Resources in a to_many relationship."""
def __init__(self, values, factory):
value = [ResourceField(v, factory) for v in values]
super(ResourceListField, self).__init__(value)
def stream(self):
return [v.stream() for v in self._value]
def value(self):
return [v.value() for v in self._value]
def filter(self, field):
try:
related_field = self.value()[0].filter_field()
filtered_field = self.value()[0]._schema().append_to_filter(field, related_field)
return filtered_field, [getattr(v, related_field) for v in self.value()]
except IndexError:
return field, []
class _FieldCreator(object):
def __init__(self, field, field_type, factory):
self._field = field
self._field_type = field_type
self._factory = factory
def _is_probably_resource(self, field=None):
if field is None:
field = self._field
return (
hasattr(field, 'split') or
hasattr(field, 'uri') or (
isinstance(field, dict) and
'resource_uri' in field
)
)
def _is_probably_datetime(self):
return (
isinstance(self._field, datetime) or (
hasattr(self._field, 'format') and
self._field.count(':') == 2 and
self._field.count('-') == 2 and
self._field.count('T') == 1
)
)
def _is_probably_resource_list(self):
return (
isinstance(self._field, list) and
self._is_probably_resource(self._field[0])
)
def _try_remaining_types(self):
if self._is_probably_datetime():
return DateTimeField(self._field)
else:
return Field(self._field)
def _create_guessed_field(self):
try:
if self._is_probably_resource():
result = ResourceField(self._field, self._factory)
result.value().full_uri()
elif self._is_probably_resource_list():
result = ResourceListField(self._field, self._factory)
result.value()[0].full_uri()
else:
result = self._try_remaining_types()
except (BadUri, IndexError, AttributeError):
result = self._try_remaining_types()
return result
def _create_known_field(self):
try:
if self._field_type == tastytypes.RELATED:
if self._is_probably_resource(self._field):
result = ResourceField(self._field, self._factory)
else:
result = ResourceListField(self._field, self._factory)
elif self._field_type == tastytypes.DATETIME:
result = DateTimeField(self._field)
else:
result = Field(self._field)
except Exception as error:
raise InvalidFieldValue(
error,
'Encountered "{0}" while creating a "{1}" Field with the value "{2}"'.format(
error, self._field_type, self._field
)
)
return result
def create(self):
"""Create a Field object based on the construction params."""
if self._field is None:
return Field(None)
if self._field_type is None:
return self._create_guessed_field()
return self._create_known_field()
def create_field(field, field_type, factory):
"""Create an appropriate Field based on the field_type."""
creator = _FieldCreator(field, field_type, factory)
return creator.create()
|
SudarshanGp/R_Test
|
test.py
|
Python
|
mit
| 1,088
| 0.05239
|
import csv
import json
csvfile = open('su15.csv', 'r')
jsonfile = open('output.json', 'w')
output = {'name' : 'Summer 2015', 'children': []}
gender = {'children' : []}
i = 0
a = {'test' : []}
for row in csv.DictReader(csvfile):
a['test'].append({'name' : 'Male', 'size' : row['Male']
})
a['test'].append({'name' : 'Female', 'size' : r
|
ow['Female']
})
gender['children'].append(
{
'name' : row['Courses'],
'children' : a['test']
})
a = {'test' : []}
output['children'].append(
{
'name' : 'Gender',
'children' : gender['children']
})
csvfile.close()
print "success 1"
csvfile = open('su15_2.csv', 'r')
instate = {'children' : []}
a = {'test' : []}
for row in csv.DictReader(csvfile):
a['test'].append({'name' : 'Instate', 'size' : row['Illinois']
|
})
a['test'].append({'name' : 'Outstate', 'size' : row['Non-Illinois']
})
instate['children'].append(
{
'name' : row['Courses'],
'children' : a['test']
})
a = {'test' : []}
output['children'].append(
{
'name' : 'Instate',
'children' : instate['children']
})
json.dump(output, jsonfile)
jsonfile.close()
|
cursoweb/python-archivos
|
Merlo-Joaquin/wordcount.py
|
Python
|
mit
| 2,864
| 0.007749
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
La función main() de abajo ya está definida y completa. Llama a las funciones
print_words() y print_top() que escribiste.
1. Para la bandera --count, implementar una función print_words(nombre_archivo) que cuenta
qué tan frecuentemente cada palabra aparece en el texto e imprime:
palabra1 cantidad1
palabra2 cantidad2
...
Imprimir la lista de arriba ordenadas por palabras (python ordenará para que la puntuación
venga antes de
|
las letras -- no se preocupen por eso). Guardar todas las palabras en minúsculas,
así 'The' y 'the' cuentan como la misma palabra.
2. para la bandera --topcount, implementar una función print_top(nombre_archivo) que es
similar a print_words() pero imprime sólo las 20 palabras más comunes ordenadas
para que aparezca la palabra más común primero, luego la siguiente más común, y así.
Utilizar str.split() (sin argumentos) para dividir todo por los espacios en blanco.
Flujo de trabajo: no
|
construyas todo el programa de una vez. Llega hasta un hito intermedio
e imprime tu estructura de datos y luego sys.exit(0).
Cuando eso funcione, intenta con el siguiente hito.
Opcional: defina una función de ayuda para evitar duplicar código dentro de
print_words() y print_top().
"""
import sys
# +++tu código aquí+++
# Define las funciones print_words(nombre_archivo) y print_top(nombre_archivo).
# Puedes escribir una función de ayuda que lee un archivo y construye y retorna
# un diccionario palabra/cantidad.
# Luego print_words() y print_top() pueden llamar directamente a la función de ayuda.
def print_words(namefile):
f = open(namefile,'rU')
diccionario={}
contador = 1
for linea in f:
palabras = linea.split()
for cadena in palabras:
if not diccionario.has_key(cadena):
diccionario[cadena]= contador
else:
diccionario[cadena]= diccionario.get(cadena)+1
f.close()
return diccionario
def print_top(namefile):
pass
return
###
# Se provee este código básico de parseado de argumentos de línea de comandos
# que llama a las funciones print_words() y print_top() que debes definir.
def main():
if len(sys.argv) != 3:
print 'uso: ./wordcount.py {--count | --topcount} archivo'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'opcion desconocida: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
|
silverfield/pythonsessions
|
s13_python_day/__init__.py
|
Python
|
mit
| 23
| 0
|
__au
|
tho
|
r__ = 'ferrard'
|
esilgard/caisis_query_and_de_identify
|
sweeper.py
|
Python
|
apache-2.0
| 2,097
| 0.012399
|
'''
author@esilgard April 2016
"sweep" through output directory files
searching for potential PHI and print out warning statements
'''
import os, re
# general directory for output files
file_dir = 'H:\DataExtracts\OncoscapeLungHoughton-4229\Output'
## common names or potential phi (assume first line is a header/column description)
first_names = set([x.strip() for x in open('resources' + os.path.sep + 'First_Names.txt', 'r').readlines()[1:]])
last_names = set([x.strip() for x in open('resources' + os.path.sep + 'Last_Names.txt', 'r').readlines()[1:]])
potential_phi_keywords = set([x.strip() for x in open('resources' + os.path.
|
sep + 'PHI_Keywords.txt', 'r').readlines()[1:]])
print 'sweeping output files for potential PHI: mrns, path numbers, ',\
len(first_names), 'first names, ', len(last_names), 'last names, ',\
len(potential_phi_keywords), 'PHI indicator keywords'
for path, directory, files in os.walk(file_dir):
for f in files:
text = open(path + os.
|
path.sep + f, 'r').read()
alpha_words = set(re.split('[\W]', text))
## PHI patterns
mrn = re.search('[UH][0-9]{7}', text)
pathnum = re.search('[A-B][\-][\-\d]{2,11}', text)
if mrn:
print 'WARNING. Potential MRN found in ' + path + f + ' at ' + str(mrn.start())
if pathnum:
print 'WARNING. Potential accession number found in ' + path + f + ' at ' + str(pathnum.start())
if first_names.intersection(alpha_words):
print 'WARNING. Potential first name(s) found in ' + path + f + ' -- ' + ','.join(first_names.intersection(alpha_words))
if last_names.intersection(alpha_words):
print 'WARNING. Potential last name(s) found in ' + path + f + ' -- ' + ','.join(last_names.intersection(alpha_words))
if potential_phi_keywords.intersection(alpha_words):
print 'WARNING. Potential PHI indicator keyword found in ' + path + f + ' -- ' + ','.join(potential_phi_keywords.intersection(alpha_words))
|
negrinho/deep_architect
|
examples/contrib/kubernetes/master.py
|
Python
|
mit
| 9,581
| 0.000417
|
import argparse
import time
import subprocess
import logging
from deep_architect import search_logging as sl
from deep_architect import utils as ut
from deep_architect.contrib.communicators.mongo_communicator import MongoCommunicator
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
logging.basicConfig(format='[%(levelname)s] %(asctime)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
BUCKET_NAME = 'deep_architect'
RESULTS_TOPIC = 'results'
ARCH_TOPIC = 'architectures'
KILL_SIGNAL = 'kill'
PUBLISH_SIGNAL = 'publish'
def process_config_and_args():
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=
'/deep_architect/examples/contrib/kubernetes/experiment_config.json')
parser.add_argument('--bucket',
'-b',
action='store',
dest='bucket',
default=BUCKET_NAME)
# Other arguments
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
parser.add_argument('--mongo-host',
'-m',
action='store',
dest='mongo_host',
default='127.0.0.1')
parser.add_argument('--mongo-port',
'-p',
action='store',
dest='mongo_port',
default=27017)
parser.add_argument('--log',
choices=['debug', 'info', '
|
warning', 'error'],
default='info')
parser.add_argument('--repetition', default=0)
options = parser.parse_args()
numeric_level = getattr(log
|
ging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log)
logging.getLogger().setLevel(numeric_level)
configs = ut.read_jsonfile(options.config_file)
config = configs[options.config_name]
config['bucket'] = options.bucket
comm = MongoCommunicator(host=options.mongo_host,
port=options.mongo_port,
data_refresher=True,
refresh_period=10)
datasets = {
'cifar10': ('data/cifar10/', 10),
}
_, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
config['save_every'] = 1 if 'save_every' not in config else config[
'save_every']
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
config['num_samples'] = -1 if 'samples' not in config else config['samples']
# SET UP GOOGLE STORE FOLDER
config['search_name'] = config['search_name'] + '_' + str(
options.repetition)
search_logger = sl.SearchLogger(config['search_folder'],
config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
config['save_filepath'] = ut.join_paths(
(search_data_folder, config['searcher_file_name']))
config['eval_path'] = sl.get_all_evaluations_folderpath(
config['search_folder'], config['search_name'])
config['full_search_folder'] = sl.get_search_folderpath(
config['search_folder'], config['search_name'])
config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
'eval_hparams']
state = {
'epochs': 0,
'models_sampled': 0,
'finished': 0,
'best_accuracy': 0.0
}
if options.resume:
try:
download_folder(search_data_folder, config['full_search_folder'],
config['bucket'])
searcher.load_state(search_data_folder)
if ut.file_exists(config['save_filepath']):
old_state = ut.read_jsonfile(config['save_filepath'])
state['epochs'] = old_state['epochs']
state['models_sampled'] = old_state['models_sampled']
state['finished'] = old_state['finished']
state['best_accuracy'] = old_state['best_accuracy']
except:
pass
return comm, search_logger, searcher, state, config
def download_folder(folder, location, bucket):
logger.info('Downloading gs://%s/%s to %s/', bucket, folder, location)
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', 'gs://' + bucket + '/' + folder,
location + '/'
])
def upload_folder(folder, location, bucket):
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', folder,
'gs://' + bucket + '/' + location + '/'
])
def get_topic_name(topic, config):
return config['search_folder'] + '_' + config['search_name'] + '_' + topic
def update_searcher(message, comm, search_logger, searcher, state, config):
data = message['data']
if not data == PUBLISH_SIGNAL:
results = data['results']
vs = data['vs']
evaluation_id = data['evaluation_id']
searcher_eval_token = data['searcher_eval_token']
log_results(results, vs, evaluation_id, searcher_eval_token,
search_logger, config)
searcher.update(results['validation_accuracy'], searcher_eval_token)
update_searcher_state(state, config, results)
save_searcher_state(searcher, state, config, search_logger)
publish_new_arch(comm, searcher, state, config)
comm.finish_processing(get_topic_name(RESULTS_TOPIC, config), message)
def save_searcher_state(searcher, state, config, search_logger):
logger.info('Models finished: %d Best Accuracy: %f', state['finished'],
state['best_accuracy'])
searcher.save_state(search_logger.get_search_data_folderpath())
state = {
'finished': state['finished'],
'models_sampled': state['models_sampled'],
'epochs': state['epochs'],
'best_accuracy': state['best_accuracy']
}
ut.write_jsonfile(state, config['save_filepath'])
upload_folder(search_logger.get_search_data_folderpath(),
config['full_search_folder'], config['bucket'])
return state
def update_searcher_state(state, config, results):
state['best_accuracy'] = max(state['best_accuracy'],
results['validation_accuracy'])
state['finished'] += 1
state['epochs'] += config['eval_epochs']
def log_results(results, vs, evaluation_id, searcher_eval_token, search_logger,
config):
logger.info("Updating searcher with evaluation %d and results %s",
evaluation_id, str(results))
eval_logger = search_logger.get_evaluation_logger(evaluation_id)
eval_logger.log_config(vs, searcher_eval_token)
eval_logger.log_results(results)
upload_folder(eval_logger.get_evaluation_folderpath(), config['eval_path'],
config['bucket'])
def publish_new_arch(comm, searcher, state, config):
while comm.check_data_exists(get_topic_name(ARCH_TOPIC, config),
'evaluation_id', state['models_sampled']):
state['models_sampled'] += 1
if should_end_searcher(state, config):
logger.info('Search finished, sending kill signal')
comm.publish(get_topic_name(ARCH_TOPIC, config), KILL_SIGNAL)
state['search_finished'] = True
elif should_continue(state, config):
logger.info('Publishing architecture number %d',
state['models_sampled'])
_, _, vs, searcher_eval_token = searcher.sample()
arch = {
|
proofchains/python-proofmarshal
|
proofmarshal/serialize.py
|
Python
|
gpl-3.0
| 14,738
| 0.003393
|
# Copyright (C) 2015 Peter Todd <pete@petertodd.org>
#
# This file is part of python-proofmarshal.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-proofmarshal, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import binascii
import hashlib
import io
import uuid
"""Deterministic, (mostly)context-free, object (de)serialization, and hashing
Motivation
==========
Standard serialization libraries/formats aren't suitable for cryptographic
purposes as they rarely, if ever, support deterministic round-trip encoding.
They also fail to define a way to cryptographically hash the serialized data,
let alone recursively hash that data. Finally they are unable to handle
serialization of data whose structure is a DAG rather than a tree; they don't
efficiently support references to previously serialized data.
Basic grammar
=============
FixedBytes(n) - A fixed length byte array
uIntLEB128(max) - An unsigned, little-endian, base128, integer in the range 0 <= i < n
IntLEB128(min,max) - Signed, little-endian, base128, inte
|
ger in the range min < i < max
Struct - Zero or more of the above, (de)serialized in a fixed order to form a
structure.
Serialization contexts
======================
We would like to be
|
able to serialize/deserialize
Hashing
=======
Cryptographic hashing of serializable objects is performed by re-using the
serialization machinery. For efficiency reasons objects with serialized
representations that are less than the length of a hash return a so-called
"hash" that is simply the serialized object itself.
"""
DIGEST_LENGTH = 32
class DeserializationError(Exception):
"""Base class for all errors encountered during deserialization"""
class TruncationError(DeserializationError):
"""Truncated data encountered while deserializing"""
class SerializerTypeError(TypeError):
"""Wrong type for specified serializer"""
class SerializerValueError(ValueError):
"""Inappropriate value to be serialized (of correct type)"""
class SerializationContext:
"""Context for serialization
Allows multiple serialization targets to share the same codebase, for
instance bytes, memoized serialization, hashing, etc.
"""
def write_bool(self, value):
"""Write a bool"""
raise NotImplementedError
def write_varuint(self, value):
"""Write a variable-length unsigned integer"""
raise NotImplementedError
def write_bytes(self, value):
"""Write fixed-length bytes"""
raise NotImplementedError
def write_varbytes(self, value):
"""Write variable-length bytes"""
raise NotImplementedError
def write_obj(self, value, serialization_class=None):
"""Write a (memoizable/hashable) object
The object *must* have the hash attribute.
If serialization_class is specified, that class is used as the
Serializer; otherwise value.__class__ is used.
"""
raise NotImplementedError
class DeserializationContext:
"""Context for deserialization
Allows multiple deserialization sources to share the same codebase, for
instance bytes, memoized serialization, hashing, etc.
"""
def read_bool(self):
"""Read a bool"""
raise NotImplementedError
def read_varuint(self, max_int):
"""Read a variable-length unsigned integer"""
raise NotImplementedError
def read_bytes(self, expected_length):
"""Read fixed-length bytes"""
raise NotImplementedError
def read_varbytes(self, value, max_length=None):
"""Read variable-length bytes
No more than max_length bytes will be read.
"""
raise NotImplementedError
def read_obj(self, serialization_class):
"""Read a (potentially memoizable/hashable) object"""
raise NotImplementedError
class StreamSerializationContext(SerializationContext):
def __init__(self, fd):
"""Serialize to a stream"""
self.fd = fd
def write_bool(self, value):
# unsigned little-endian base128 format (LEB128)
if value is True:
self.fd.write(b'\xff')
elif value is False:
self.fd.write(b'\x00')
else:
raise TypeError('Expected bool; got %r' % value.__class__)
def write_varuint(self, value):
# unsigned little-endian base128 format (LEB128)
if value == 0:
self.fd.write(b'\x00')
else:
while value != 0:
b = value & 0b01111111
if value > 0b01111111:
b |= 0b10000000
self.fd.write(bytes([b]))
if value <= 0b01111111:
break
value >>= 7
def write_bytes(self, value):
self.fd.write(value)
def write_obj(self, value, serialization_class=None):
if serialization_class is None:
serialization_class = value.__class__
serialization_class.ctx_serialize(value, self)
class StreamDeserializationContext(DeserializationContext):
def __init__(self, fd):
"""Deserialize from a stream"""
self.fd = fd
def fd_read(self, l):
r = self.fd.read(l)
if len(r) != l:
raise TruncationError('Tried to read %d bytes but got only %d bytes' % \
(l, len(r)))
return r
def read_bool(self):
# unsigned little-endian base128 format (LEB128)
b = self.fd_read(1)[0]
if b == 0xff:
return True
elif b == 0x00:
return False
else:
raise DeserializationError('read_bool() expected 0xff or 0x00; got %d' % b)
def read_varuint(self):
value = 0
shift = 0
while True:
b = self.fd_read(1)[0]
value |= (b & 0b01111111) << shift
if not (b & 0b10000000):
break
shift += 7
return value
def read_bytes(self, expected_length=None):
if expected_length is None:
expected_length = self.read_varuint(None)
return self.fd_read(expected_length)
def read_obj(self, serialization_class):
return serialization_class.ctx_deserialize(self)
class BytesSerializationContext(StreamSerializationContext):
def __init__(self):
"""Serialize to bytes"""
super().__init__(io.BytesIO())
def getbytes(self):
"""Return the bytes serialized to date"""
return self.fd.getvalue()
class BytesDeserializationContext(StreamDeserializationContext):
def __init__(self, buf):
"""Deserialize from bytes"""
super().__init__(io.BytesIO(buf))
# FIXME: need to check that there isn't extra crap at end of object
class Serializer:
"""(De)serialize an instance of a class
Base class for all serialization classes. The actual serialization is
performed by serialization *instances*, not classes, and an instance may be
its own serializer.
"""
__slots__ = []
@classmethod
def check_instance(cls, instance):
"""Check that an instance can be serialized by this serializer
Raises SerializerTypeError if the instance class is not the expected
class, and SerializerValueError if the class is correct, but the actual
value is incorrect. (e.g. an out of range integer)
"""
raise NotImplementedError
@classmethod
def ctx_serialize(cls, self, ctx):
"""Serialize to a context"""
raise NotImplementedError
@classmethod
def ctx_deserialize(cls, ctx):
"""Deserialize from a context"""
raise NotImplementedError
@classmethod
def serialize(cls, self):
"""Serialize to bytes"""
ctx = BytesSerializationContext()
cls.ctx_serialize(self, ctx)
return ctx.getbytes()
@classmethod
def deserialize(cls, serialized_value):
"""Deserialize from bytes"""
ctx = BytesDeserializationConte
|
emencia/emencia_paste_djangocms_3
|
emencia_paste_djangocms_3/django_buildout/project/accounts/urls.py
|
Python
|
mit
| 2,293
| 0.004361
|
"""
URLconf for registration and activation, using django-registration's
default mods backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('project.registration.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize registration behavior, feel free to set up
your own URL patterns for these views instead.
"""
from django.conf.urls import patterns
from django.conf.urls import include
from django.conf.urls import url
from django.views.generic.base import TemplateView
from .views import login, password_change, password_reset, password_reset_confirm, ActivationView, RegistrationView
urlpatterns = patterns('',
url(r'^activate/co
|
mplete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
ActivationView.as_view(),
|
name='registration_activate'),
url(r'^register/$',
RegistrationView.as_view(),
name='registration_register'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
# Override some urls from "registration.auth_urls"
url(r'^login/$', login, {'template_name': 'registration/login.html'}, name='auth_login'),
url(r'^password/change/$', password_change, name='auth_password_change'),
url(r'^password/reset/$', password_reset, name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm, name='auth_password_reset_confirm'),
(r'', include('registration.auth_urls')),
)
|
matrix-org/synapse
|
synapse/replication/http/__init__.py
|
Python
|
apache-2.0
| 1,864
| 0
|
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from synapse.http.server import JsonResource
from synapse.replication.http import (
account_data,
devices,
federation,
login,
membership,
presence,
push,
register,
send_event,
streams,
)
if TYPE_CHECKING:
from synapse.server import HomeServer
REPLICATION_PREFIX = "/_synapse/replication"
class ReplicationRestResource(JsonResource):
def __init__(self, hs: "HomeServer"):
|
# We enable extracting jaeger contexts here as these are internal APIs.
super().__init__(hs, canonical_json=False, extract_context=True)
self.register_servlets(hs)
def register_servlets(self, hs: "HomeServer") -> None:
send_event.register_servlets(hs, self)
federation.register_servlets(hs, self)
presence.register_servlets(hs, self)
membership.register_servlets(hs, self)
|
streams.register_servlets(hs, self)
account_data.register_servlets(hs, self)
push.register_servlets(hs, self)
# The following can't currently be instantiated on workers.
if hs.config.worker.worker_app is None:
login.register_servlets(hs, self)
register.register_servlets(hs, self)
devices.register_servlets(hs, self)
|
hovo1990/deviser
|
generator/tests/test_cpp_code/run_cpp_tests.py
|
Python
|
lgpl-2.1
| 17,841
| 0.000224
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from code_files import CppFiles, ExtensionFiles, ValidationFiles
from parseXML import ParseXML
from tests import test_functions
##############################################################################
# Set up variables
fails = []
not_tested = []
##############################################################################
# Specific generation functions
def generate_new_cpp_header(filename, num):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
working_class = ob['baseElements'][num]
os.chdir('./temp')
all_files = CppFiles.CppFiles(working_class, True)
all_files.write_files()
os.chdir('../.')
def generate_extension_header(filename):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ExtensionFiles.ExtensionFiles(ob, '', True)
all_files.write_files()
os.chdir('../.')
def generate_types_header(filename):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ExtensionFiles.ExtensionFiles(ob, 'types', True)
all_files.write_files()
os.chdir('../.')
def generate_fwd_header(filename):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ExtensionFiles.ExtensionFiles(ob, 'fwd', True)
all_files.write_files()
os.chdir('../.')
def generate_plugin_header(filename, num):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ExtensionFiles.ExtensionFiles(ob, '', True)
all_files.write_plugin_files(num)
os.chdir('../.')
def generate_error_header(filename):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ValidationFiles.ValidationFiles(ob, True)
all_files.write_error_header()
all_files.write_error_table_header()
os.chdir('../.')
def generate_validator(filename):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ValidationFiles.ValidationFiles(ob, True)
all_files.write_validator_files()
os.chdir('../.')
def generate_constraints(filename):
parser = ParseXML.ParseXML(filename)
ob = parser.parse_deviser_xml()
os.chdir('./temp')
all_files = ValidationFiles.ValidationFiles(ob, True)
all_files.write_constraints()
os.chdir('../.')
#############################################################################
# Specific compare functions
def compare_files(correct_file, temp_file):
return test_functions.compare_files(correct_file, temp_file, fails,
not_tested)
def compare_code_headers(class_name):
correct_file = '.\\test-code\\{0}.h'.format(class_name)
temp_file = '.\\temp\\{0}.h'.format(class_name)
return compare_files(correct_file, temp_file)
def compare_ext_headers(class_name):
correct_file = '.\\test-extension\\{0}.h'.format(class_name)
temp_file = '.\\temp\\{0}.h'.format(class_name)
return compare_files(correct_file, temp_file)
def compare_code_impl(class_name):
correct_file = '.\\test-code\\{0}.cpp'.format(class_name)
temp_file = '.\\temp\\{0}.cpp'.format(class_name)
return compare_files(correct_file, temp_file)
def compare_ext_impl(class_name, declared=False):
if declared:
correct_file = '.\\test-extension\\{0}Declared.cxx'.format(class_name)
temp_file = '.\\temp\\{0}Declared.cxx'.format(class_name)
else:
correct_file = '.\\test-extension\\{0}.cpp'.format(class_name)
temp_file = '.\\temp\\{0}.cpp'.format(class_name)
return compare_files(correct_file, temp_file)
#############################################################################
# Specific test functions
def run_test(name, num, class_name, test_case, list_of):
filename = test_functions.set_up_test(name, class_name, test_case)
generate_new_cpp_header(filename, num)
fail = compare_code_headers(class_name)
fail += compare_code_impl(class_name)
if len(list_of) > 0:
class_name = list_of
fail += compare_code_headers(class_name)
fail += comp
|
are_code_impl(class_name)
print('')
return fail
def run_ext_test(name, class_name, test_case, test):
filename = test_functions.set_up_test(name, class_name, test_case)
if test == 0:
generate_extension_he
|
ader(filename)
elif test == 1:
generate_types_header(filename)
else:
generate_fwd_header(filename)
fail = compare_ext_headers(class_name)
if test == 0:
fail += compare_ext_impl(class_name)
print('')
return fail
def run_plug_test(name, class_name, test_case, num):
filename = test_functions.set_up_test(name, class_name, test_case)
generate_plugin_header(filename, num)
fail = compare_ext_headers(class_name)
fail += compare_ext_impl(class_name)
print('')
return fail
def run_valid_test(name, class_name, test_case, is_ext=True):
filename = test_functions.set_up_test(name, class_name, test_case)
if is_ext:
generate_error_header(filename)
fail = compare_ext_headers(class_name)
fail += compare_ext_headers('{0}Table'.format(class_name))
else:
generate_validator(filename)
fail = compare_ext_headers(class_name)
fail += compare_ext_impl(class_name)
print('')
return fail
def run_constraints_test(name, class_name, test_case):
filename = test_functions.set_up_test(name, class_name, test_case)
generate_constraints(filename)
fail = compare_ext_impl(class_name)
fail += compare_ext_impl(class_name, declared=True)
print('')
return fail
#########################################################################
# Main function
def main():
# set up the enivornment
this_dir = os.path.dirname(os.path.abspath(__file__))
(path_to_tests, other) = os.path.split(this_dir)
test_functions.set_path_to_tests(path_to_tests)
if not os.path.isdir('temp'):
os.mkdir('temp')
fail = 0
# run the individual tests
name = 'test_att'
num = 1
class_name = 'Unit'
list_of = ''
test_case = 'unit sid ref'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'test_att'
num = 2
class_name = 'MyLoTest'
list_of = 'ListOfMyLoTests'
test_case = 'attribute on ListOf'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'test_att'
num = 0
class_name = 'MyTestClass'
list_of = ''
test_case = 'all types of attributes'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'test_att'
num = 3
class_name = 'MyRequiredClass'
list_of = ''
test_case = 'all types attributes required'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'test_att'
num = 4
class_name = 'ArrayChild'
list_of = ''
test_case = 'child elements and arrays'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'test_att'
num = 5
class_name = 'Container'
list_of = ''
test_case = 'a listOf child that uses listOfFoo as the name'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'test_att'
class_name = 'TestSBMLError'
test_case = 'error enumeration '
fail += run_valid_test(name, class_name, test_case)
name = 'qual'
num = 5
class_name = 'FunctionTerm'
list_of = 'ListOfFunctionTerms'
test_case = 'an element on ListOf'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'qual'
num = 3
class_name = 'Output'
list_of = 'ListOfOutputs'
test_case = 'simple class'
fail += run_test(name, num, class_name, test_case, list_of)
name = 'qual'
num = 1
class_name = 'Transition'
list_of = 'ListOfTransitions'
test_case = 'cl
|
danielhers/ucca
|
uccaapp/upload_streussel_passages.py
|
Python
|
gpl-3.0
| 3,363
| 0.003271
|
#!/usr/bin/env python3
import sys
import argparse
from ucca.convert import from_text, to_json
from uccaapp.api import ServerAccessor
desc = """Upload a passage from a streussel format file"""
class StreusselPassageUploader(ServerAccessor):
def __init__(self, user_id, source_id, project_id, **kwargs):
super().__init__(**kwargs)
self.set_source(source_id)
self.set_project(project_id)
self.set_user(user_id)
def upload_streussel_passage_file(self, filenames, log=None, **kwargs):
del kwargs
log_h = open(log, "w", encoding="utf-8") if log else None
with open(filenames) as f_all:
for filename in f_all:
passage_text = ""
external_id = "None given"
filename = filename.strip()
with open(filename, encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
elif line.startswith("#"):
fields = line.split()
if len(fields) != 4 or fields[1] != "sent_id":
print("FORMAT ERROR in " + filename, file=sys.stderr)
else:
external_id = fields[3].split("-")[1]
|
else:
passage_text = passage_text + " " + line
passage_out = self.create_passage(text=passage_text.strip(), external_id=external_id, type="PUBLIC",
source=self.source)
task_in = dict(type="TOKENIZATION", status="SUBMITTED", project=self.project,
user=self.user, passage=passage_out, manager_comment="External ID: " + external_id,
user_comment="", parent=None, is_demo=False, is_active=True)
tok_task_out = self.create_task(**task_in)
tok_user_task_in = dict(tok_task_out)
passage = list(from_text(passage_text.split(), tokenized=True))[0]
tok_user_task_in.update(to_json(passage, return_dict=True, tok_task=True))
self.submit_task(**tok_user_task_in)
print("Uploaded passage " + filename + " successfully.", file=sys.stderr)
if log:
print(filename.split(".")[-2], passage_out["id"], tok_task_out["id"], file=log_h, sep="\t")
if log:
log_h.close()
@staticmethod
def add_arguments(argparser):
argparser.add_argument("filenames", help="passage file names to convert and upload")
argparser.add_argument("-l", "--log", help="filename to write log of uploaded passages to")
ServerAccessor.add_project_id_argument(argparser)
ServerAccessor.add_source_id_argument(argparser)
ServerAccessor.add_user_id_argument(argparser)
ServerAccessor.add_arguments(argparser)
def main(**kwargs):
StreusselPassageUploader(**kwargs).upload_streussel_passage_file(**kwargs)
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(description=desc)
StreusselPassageUploader.add_arguments(argument_parser)
main(**vars(argument_parser.parse_args()))
sys.exit(0)
|
|
michkot/benchexec
|
benchexec/tools/predatorhp.py
|
Python
|
apache-2.0
| 3,054
| 0.002292
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by ap
|
plicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WI
|
THOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
import subprocess
import os
import sys
class Tool(benchexec.tools.template.BaseTool):
"""
Wrapper for a Predator - Hunting Party
http://www.fit.vutbr.cz/research/groups/verifit/tools/predator-hp/
"""
def executable(self):
executable = util.find_executable('predatorHP.py')
executableDir = os.path.dirname(executable)
if not os.path.isfile(os.path.join(executableDir, "predator-build-ok")):
self._buildPredatorHp(executableDir)
return executable
def _buildPredatorHp(self, executableDir):
proc = subprocess.Popen([os.path.join(executableDir, 'build-all.sh')], cwd=executableDir)
proc.communicate()
if proc.returncode:
sys.exit('Failed to build Predator-HP, please fix the build first.')
def name(self):
return 'Predator-HP'
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
spec = ["--propertyfile", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
status = "ERROR"
if "UNKNOWN" in output:
status = result.RESULT_UNKNOWN
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
elif "FALSE(valid-memtrack)" in output:
status = result.RESULT_FALSE_MEMTRACK
elif "FALSE(valid-deref)" in output:
status = result.RESULT_FALSE_DEREF
elif "FALSE(valid-free)" in output:
status = result.RESULT_FALSE_FREE
elif "FALSE" in output:
status = result.RESULT_FALSE_REACH
if (status == "ERROR" and isTimeout):
status = "TIMEOUT"
return status
def program_files(self, executable):
""" List of files/directories necessary to build and run the tool. """
executableDir = os.path.dirname(executable)
dependencies = [
"predator-repo",
"build-all.sh"
]
return [executable] + util.flatten(util.expand_filename_pattern(dep, installDir) for dep in dependencies)
def working_directory(self, executable):
return os.path.dirname(executable)
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/benchmarks/benchmarks/go_benchmark_functions/go_funcs_N.py
|
Python
|
mit
| 4,191
| 0.000716
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import cos, sqrt, sin, abs
from .go_benchmark import Benchmark
class NeedleEye(Benchmark):
r"""
NeedleEye objective function.
This class defines the Needle-Eye [1]_ global optimization problem. This is a
a multimodal minimization problem defined as follows:
.. math::
f_{\text{NeedleEye}}(x) =
\begin{cases}
1 & \textrm{if }\hspace{5pt} \lvert x_i \rvert < eye \hspace{5pt}
\forall i \\
\sum_{i=1}^n (100 + \lvert x_i \rvert) & \textrm{if } \hspace{5pt}
\lvert x_i \rvert > eye \\
0 & \textrm{otherwise}\\
\end{cases}
Where, in this exercise, :math:`eye = 0.0001`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
f = fp = 0.0
eye = 0.0001
for val in x:
if abs(val) >= eye:
fp = 1.0
f += 100.0 + abs(val)
else:
f += 1.0
if fp < 1e-6:
f = f / self.N
return f
class NewFunction01(Benchmark):
r"""
NewFunction01 objective function.
This class defines the NewFunction01 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{NewFunction01}}(x) = \left | {\cos\left(\sqrt{\left|{x_{1}^{2}
+ x_{2}}\right|}\right)} \right |^{0.5} + (x_{1} + x_{2})/100
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.18459899925` for
:math:`x = [-8.46669057, -9.99982177]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 355
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-8.46668984648, -9.99980944557]]
self.fglob = -0.184648852475
def fun(self, x, *args):
self.nfev += 1
return ((abs(cos(sqrt(abs(x[0] ** 2 + x[1]))))) ** 0.5
+ 0.01 * (x[0] + x[1]))
class NewFunction02(Benchmark):
r"""
NewFunction02 objective function.
This class defines the NewFunction02 global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{NewFunction02}}(x) = \left | {\sin\left(\sqrt{\lvert{x_{1}^{2}
+ x_{2}}\rvert}\right)} \right |^{0.5} + (x_{1} + x_{2})/100
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.19933159253` for
:math:`x = [-9.94103375, -9.99771235]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark
|
Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 368
TODO WARNING, minimum value is estimated from running many optimisations and
choosing the best.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip
|
([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-9.94114736324, -9.99997128772]]
self.fglob = -0.199409030092
def fun(self, x, *args):
self.nfev += 1
return ((abs(sin(sqrt(abs(x[0] ** 2 + x[1]))))) ** 0.5
+ 0.01 * (x[0] + x[1]))
# Newfunction 3 from Gavana is entered as Mishra05.
|
dwysocki/trivial
|
doc/stats/plot.py
|
Python
|
gpl-3.0
| 2,722
| 0.005878
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def separate_windows(arr):
ret = dict()
for k, v in zip(*arr):
if k in ret:
ret[k].append(v)
else:
ret[k] = [v]
return ret
def load_file(filename, *args, **kwargs):
data = np.genfromtxt(
filename,
usecols=(0,1,2),
dtype=np.float, delimiter='\t',
skip_header=1,
*args, **kwargs)
time_ms, bytes, window_size = data.T
throughput = bytes*8 / time_ms # kbps
window_throughput = np.array([window_size, throughput])
return separate_windows(window_throughput)
def make_plot(throughputs, output, title,
xlabel="Size", ylabel="Throughput (kbps)",
*args, **kwargs):
N = len(throughputs)
x_offsets = np.arange(N)
fig, ax = plt.subplots()
ax.boxplot(list(throughputs.get(k)
for k in sorted(throughputs.keys())))
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticklabels(list(sorted(throughputs)))
def finalize(output, *args, **kwargs):
plt.savefig(output, *args, dpi=500, **kwargs)
plt.close()
plt.cla()
def plot_data(filename, output, title,
xlabel="Window Size", ylabel="Throughput (kbps)",
*args, **kwargs):
window_to_throughput = load_file(filename)
labels = list(map(int, sorted(window_to_throughput.keys())))
N = len(window_to_throughput)
x_offsets = np.arange(N)
fig, ax = plt.subplots()
ax.boxplot(list(window_to_throughput.get(k)
for k in labels))
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticklabels(labels)
finalize(output, *args, **kwargs)
|
def plot_multi(filenames, output, window, title,
xlabel="Trial", ylabel="Throughput (kbps)",
*args, **kwargs):
all_windows = {fname.split('.')[0] : load_file(fname)
for fname in filenames}
stop_wait = {trial : all_windows[trial][1]
for trial in all_windows}
optimal = {trial : all_windows[trial][window]
|
for trial in all_windows}
trials = sorted(optimal.keys())
N = len(trials)
x_offsets = np.arange(N)
fig, ax = plt.subplots()
ax.boxplot(list(optimal.get(k)
for k in trials))
plt.setp(ax.boxplot(list(stop_wait.get(k)
for k in trials),
patch_artist=True)['boxes'],facecolor='cyan',alpha=0.5)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticklabels(trials)
finalize(output, *args, **kwargs)
|
ashishbaghudana/mthesis-ashish
|
resources/tees/Utils/InteractionXML/CopyParse.py
|
Python
|
mit
| 4,989
| 0.008419
|
try:
import xml.etree.cElementTree as ET
except ImportError:
import cElementTree as ET
import Utils.ElementTreeUtils as ETUtils
import sys
import CorpusElements
from optparse import OptionParser
def copyParse(input, source, output, parse, tokenization):
print >> sys.stderr, "Loading input file", input
inputTree = ETUtils.ETFromObj(input)
inputRoot = inputTree.getroot()
print >> sys.stderr, "Loading source:",
sourceElements = CorpusElements.loadCorpus(source, parse, tokenization)
sourceSentencesByText = {}
for sentence in sourceElements.sentences:
sentenceText = sentence.sentence.get("text")
#assert not sourceSentencesByText.has_key(sentenceText)
if sourceSentencesByText.has_key(sentenceText):
print >> sys.stderr, "Duplicate text", sentence.sentence.get("id"), sourceSentencesByText[sentenceText].sentence.get("id")
sourceSentencesByText[sentenceText] = sentence
parsesCopied = [0,0]
tokenizationsCopied = [0,0]
for sentence in inputRoot.getiterator("sentence"):
parsesCopied[1] += 1
tokenizationsCopied[1] += 1
#sourceSentence = sourceElements.sentencesByOrigId[sentence.attrib["origId"]]
if not sourceSentencesByText.has_key(sentence.get("text")):
print >> sys.stderr, "Warning, no text found for sentence", sentence.get("id")
continue
sourceSentence = sourceSentencesByText[sentence.get("text")]
# Create analyses element (if needed)
targetAnalysesElement = sentence.find("sentenceanalyses")
if targetAnalysesElement == None:
targetAnalysesElement = ET.Element("sentenceanalyses")
sentence.append(targetAnalysesElement)
# Create parses element (if needed)
targetParsesElement = targetAnalysesEle
|
ment.find("parses")
if targetParsesElement == None:
targetParsesElement = ET.Element("parses")
targetAnalysesElement.append(targetParsesElement)
# Check whether parse already exists
targetParseElements = targetParsesElement.findall("parse")
newParse = None
for parseElement in targetParseElements:
if parseElement.get("parser") == parse:
newParse = parseElement
break
# Copy parse i
|
f it doesn't
if newParse == None and sourceSentence.parseElement != None:
targetParsesElement.append(sourceSentence.parseElement)
parsesCopied[0] += 1
# Create tokenizations element (if needed)
targetTokenizationsElement = targetAnalysesElement.find("tokenizations")
if targetTokenizationsElement == None:
targetTokenizationsElement = ET.Element("tokenizations")
targetAnalysesElement.append(targetTokenizationsElement)
# Check whether tokenization already exists
targetTokenizationElements = targetTokenizationsElement.findall("tokenization")
newTokenization = None
for tokenizationElement in targetTokenizationElements:
if tokenizationElement.attrib["tokenizer"] == newParse.attrib["tokenizer"]:
newTokenization = tokenizationElement
break
# Copy parse if it doesn't
if newTokenization == None and sourceSentence.tokenizationElement != None:
targetTokenizationsElement.append(sourceSentence.tokenizationElement)
tokenizationsCopied[0] += 1
print >> sys.stderr, "Copied parse elements", parsesCopied
print >> sys.stderr, "Copied tokenization elements", tokenizationsCopied
if output != None:
print >> sys.stderr, "Writing output to", output
ETUtils.write(inputTree, output)
return inputTree
if __name__=="__main__":
print >> sys.stderr, "##### Copy Parse #####"
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
optparser = OptionParser(usage="%prog [options]\nCreate an html visualization for a corpus.")
optparser.add_option("-i", "--input", default=None, dest="input", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-s", "--source", default=None, dest="source", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-o", "--output", default=None, dest="output", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-t", "--tokenization", default=None, dest="tokenization", help="Tokenization element name")
optparser.add_option("-p", "--parse", default=None, dest="parse", help="Parse element name")
(options, args) = optparser.parse_args()
assert(options.input != None)
assert(options.source != None)
assert(options.output != None)
copyParse(options.input, options.source, options.output, options.parse, options.tokenization)
|
HewlettPackard/ratekeeper-neutron-ml2-plugin
|
patches/nova/ratekeeper_monkey_patch.py
|
Python
|
apache-2.0
| 2,041
| 0.00196
|
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Decorator to monkey patch nova --specifically the function
nova.network.neutronv2.api._populate_neutron_extension_values--
to enable passing to neutron at port creation time the rk_rates
specified by the image flavor
"""
from oslo_log import log
from networking_hp.plugins.ml2.drivers.ratekeeper.common import (
constants as rk_const)
LOG = log.getLogger(__name__)
def ratekeeper_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param fn: - object of the function
:returns: fn -- decorated function
"""
def wrapped_func(*args, **kwarg):
|
if fn.func_name == '_populate_neutron_extension_values':
LOG.debug("RK: wrapping function call: %s" % fn.func_name)
foo, context, instance, pci_request_id, port_req_body = args
flavor = instance.get_flavor()
extra_specs = flavor.get('extra_specs', {})
min_rate =
|
int(extra_specs.get(rk_const.RK_MIN_RATE, 0))
max_rate = int(extra_specs.get(rk_const.RK_MAX_RATE, 0))
port_req_body['port'][rk_const.RK_MIN_RATE] = min_rate
port_req_body['port'][rk_const.RK_MAX_RATE] = max_rate
ret = fn(*args, **kwarg)
LOG.debug("RK: Extended neutron request: %s" % args[4])
return ret
return fn(*args, **kwarg)
return wrapped_func
|
firasbenmakhlouf/JobLookup
|
metadata/admin.py
|
Python
|
mit
| 169
| 0
|
from django.contrib import admin
from metadata.models impo
|
rt *
# Register your models here.
admin.site.register(TanitJobsCategory)
admin.site.register
|
(KeeJobsCategory)
|
pulsar-chem/Pulsar-Core
|
lib/systems/l-arginine.py
|
Python
|
bsd-3-clause
| 1,416
| 0.000706
|
import pulsar as psr
def load_ref_system():
""" Returns l-arginine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
N 0.8592 -2.9103 -0.8578
C 1.3352 -1.5376 -1.1529
C 2.8596 -1.5658 -1.1624
O 3.6250 -1.8965 -0.2757
O 3.4285 -1.1488 -2.3160
C 0.8699 -0.4612 -0.1600
H 1.4712 0.4581 -0.3123
H 1.0768 -0.7804 0.8827
C -0.6054 -0.1308 -0.3266
H -1.2260 -1.0343 -0.1569
H -0.8065 0.1769 -1.3739
C -1.0120 0.9757 0.6424
H -0.4134 1.8919 0.4482
H -0.7821 0.6714 1.6839
N -2.4750 1.2329 0.5383
H -2.7251 1.4082 -
|
0.4139
C -2.9738 2.2808 1.4124
N -3.4837 3.3356 0.8530
H -3.9046 4.0108 1.4410
N -2.8404 2.0695 2.8280
H -2.7215 1.1094 3.0676
H -3.5979 2.4725 3.3357
H -0.1386 -2.9250 -0.8895
H 1.1675 -3.1979 0.0476
H 0.9
|
562 -1.2864 -2.1768
H 4.3768 -1.2078 -2.2540
""")
|
Egomania/SOME-IP_Generator
|
src/attacks/sendErrorOnError.py
|
Python
|
agpl-3.0
| 1,693
| 0.005316
|
"""
Answers with an Error message to a previous Error message.
"""
import copy
import random
from src import Msg
from src import SomeIPPacket
from src.attacks import Attacker
|
Helper
def sendErrorOnError(a, msgOrig):
""" Attack Specific Function. """
sender = msgOrig.receiver
receiver = msgOrig.sender
timestamp = None
message = {}
message['service'] = msgOrig.message['service'
|
]
message['method'] = msgOrig.message['method']
message['client'] = msgOrig.message['client']
message['session'] = msgOrig.message['session']
message['proto'] = SomeIPPacket.VERSION
message['iface'] = SomeIPPacket.INTERFACE
message['type'] = SomeIPPacket.messageTypes['ERROR']
errors = ['E_NOT_OK', 'E_NOT_READY', 'E_NOT_REACHABLE', 'E_TIMEOUT', 'E_MALFORMED_MESSAGE']
message['ret'] = SomeIPPacket.errorCodes[random.choice(errors)]
msg = Msg.Msg(sender, receiver, message, timestamp)
return msg
def doAttack(curAttack, msgOrig, a, attacksSuc):
""" Generic Function called from Attacker module. """
RetVal = {}
if a.verbose:
print ('Send Error On Error Attack')
if msgOrig.message['type'] == SomeIPPacket.messageTypes['ERROR']:
msg = sendErrorOnError(a, msgOrig)
if a.verbose:
print ('MALICIOUS MSG: ', msg.message, ' FROM=', msg.sender, ' TO=', msg.receiver)
RetVal['msg'] = msg
RetVal['attackOngoing'] = False
RetVal['dropMsg'] = False
RetVal['counter'] = attacksSuc + 1
else:
RetVal['msg'] = None
RetVal['attackOngoing'] = True
RetVal['dropMsg'] = False
RetVal['counter'] = attacksSuc
return RetVal
|
tgquintela/pyDataProcesser
|
pyDataProcesser/DataManagement.py
|
Python
|
mit
| 16,526
| 0.001029
|
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime
import numpy as np
from DataDictObject import DataDictObject
from TablonReader import TablonReader
from TablonEncoder import TablonEncoder
from TablonLoader import TablonLoaderDB
class DataManagementObject:
"""This is the class which contains the whole parser pipeline in order to
transform between the different possible states of the data.
These states are:
* RawData: this format is identified by a string which is the path
and the filename of the raw data.
* Tablon: this format is the data parsed and each column formatted
to one of the possible accepted formats of a common DB.
It is characterized by a pandas dataframe.
* DataFrame: this format contains special types of objects which can
describe better the essence of the data and allow to be analize in
special ways. It is characterized by a pandas dataframe.
* DB: this is the server database. It is characterized by the
direction of the database.
"""
def __init__(self, client='', client_code='', pathdata='',
date_compilationdata='', delimiter='', typefile=''):
"""This object is recommended to be initialized which as information as
we can give to it.
"""
# common for the client. It is supposed not to change
self.client = client
self.client_code = client_code
# for the same client we can use different files. This could change.
self.pathdata = pathdata
self.date_compilationdata = date_compilationdata
self.date_treatmentdata = datetime.now()
self.delimiter = delimiter
# Initialization of the central object of the data process.
#It will save all the information.
self.datatransformation = DataProcessCenter()
# The script only requires this one.
def add_datadictobject(self, dataprocessobject):
self.datatransformation = dataprocessobject
# Collection of functions to set the class DataProcessCenter at this level.
def add_parser(self, parser):
self.datatransformation.parser = parser
def add_encoder(self, encoder):
self.datatransformation.encoder = encoder
def add_format(self, format):
self.datatransformation.format = format
def add_loader(self, loader):
self.datatransformation.loader = loader
def add_preexploratory(self, preexploratory):
self.datatransformation.preexploratory = preexploratory
# Collection of functions for specify information of the
def add_parser_parserdict(self, parserdict):
self.datatransformation.parser.add_transformationdict(parserdict)
def add_parser_expansiondict(self, expansiondict):
self.datatransformation.parser.add_expansiondict(expansiondict)
def add_parser_filteringdict(self, filteringdict):
self.datatransformation.parser.add_filteringdict(filteringdict)
def add_encode_encoderdict(self, encoderdict):
self.datatransformation.encoder.add_transformationdict(encoderdict)
def add_encode_expansiondict(self, expansiondict):
self.datatransformation.encoder.add_expansiondict(expansiondict)
def add_encode_filteringdict(self, filteringdict):
self.datatransformation.encoder.add_filteringdict(filteringdict)
def add_format_formatdict(self, formatdict):
self.datatransformation.format.add_transformationdict(formatdict)
def add_format_expansiondict(self, expansiondict):
self.datatransformation.format.add_expansiondict(expansiondict)
def add_format_filtering(self, filteringdict):
self.datatransformation.format.add_filteringdict(filteringdict)
def add_loader_loaderdict(self, loaderdict):
self.datatransformation.loader.add_transformationdict(loaderdict)
def add_loader_expansiondict(self, expansiondict):
self.datatransformation.loader.add_expansiondict(expansiondict)
def add_loader_filtering(self, filteringdict):
self.datatransformation.loader.add_filteringdict(filteringdict)
# calling to self.datatransformation
# TODO: it has to be implemented the inputs of the data.
def parse(self, filepath):
self.datatransformation.parse(filepath)
# def encode(self,tablon):
# def format(self,tablon):
# def load(self,tablon):
def apply_pipeline(self, pipelineselection, startdataframe=''):
"""Run all the process of the data for the determine processes in the
pipelineselection list or numpy array ({0,1} or boolean).
"""
self.datatransformation.apply_pipeline(pipelineselection,
startdataframe)
# TODO: probably has to be done outside
################
def save_Manager(self, filename_manager, path_manager=''):
"""With this function we are able to save the manager and all the
information stored in order to redo the process of the data.
"""
pass
def load_Manager(self, filename_manager, path_manager=''):
"""With this function we are able to load the manager and all the
information stored in order to redo the process with other data.
It returns an object which we can change the main parameters but we
keep the parser, format, codification and loading configuration.
"""
pass
#pipeline?
def show(self):
"""TODO:"""
return
class DataProcessCenter:
"""This class is oriented to agglomerate all the processes related with
the dictionaries and settings of the data treatment.
It works as an interface between the general pipeline class
(DataManagementObject) and the class related to the basic treatment
information of the data (DataDictObject).
"""
# TODO: generalize self.input, self.process, self.output
# TODO: exploratory and preexploratory in TablonDescriptor
# TODO: probablente mejor un dictionary como input
def __init__(self, client):
"""The initialization of the DataDictCente. It initializes all the the
subprocesses.
"""
# Read parameter file.
## That file has to retrieve:
# key
# filename
# path_data
# baseurl
execfile("Scripts/" + client + "_parameters.py")
compulsoryvars = {'filename': '', 'path_data': 'Data/', 'key': '',
'baseurl': ''}
for e in compulsoryvars:
if not e in locals():
exec("global " + e)
exec(e + ' = ' + '"' + compulsoryvars[e] + '"')
self.filename = filename
self.path_data = path_data
# Initialize operators
self.parser = TablonReader(DataDictObject('parser'))
self.encoder = TablonEncoder(DataDictObject('encoder'))
self.loader = TablonLoaderDB(DataDictObject('loader'), key, baseurl)
self.preexploratory = DataDictObject('preexploratory')
self.exploratory = DataDictObject('exploratory')
# Initialize pipeline
self.pipeline = [self.parser, self.preexploratory, self.encoder,
self.exploratory, self
|
.loader]
self.pipelineselection = ['parser', 'preexploratory', 'encoder',
'loader', 'exploratory']
# The processes that could generat
|
e an input to this system
self.inputters = ['parser', 'downloader']
# The processes that generate a tablon
self.tabloners = ['encoder'] + self.inputters
# The other parematers
self.typeoutput = '' # tablon, formattablon,analyticaltablon, others
self.defaultpipe = ['parser', 'preexploratory', 'encoder', 'loader',
'exploratory']
def apply_pipeline(self, pipelineselection, startdataframe=''):
"""This function apply a selection of tasks indicated in the nump
|
hyperized/ansible
|
lib/ansible/modules/network/eos/eos_vrf.py
|
Python
|
gpl-3.0
| 10,723
| 0.001585
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on Arista EOS network devices
description:
- This module provides declarative management of VRFs
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the VRF.
required: true
rd:
description:
- Route distinguisher of the VRF
interfaces:
description:
- Identifies the set of interfaces that
should be configured in the VRF. Interfaces must be routed
interfaces in order to be placed into a VRF. The name of interface
should be in expanded format and not abbreviated.
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vrf C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vrf interfaces on device it will result in failure.
version_added: "2.5"
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
type: bool
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Create vrf
eos_vrf:
name: test
rd: 1:200
interfaces:
- Ethernet2
state: present
- name: Delete VRFs
eos_vrf:
name: test
state: absent
- name: Create aggregate of VRFs with purge
eos_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
eos_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- vrf definition test
- rd 1:100
- interface Ethernet1
- vrf forwarding test
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
for w in want:
name = w['name']
rd = w['rd']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent':
if obj_in_have:
commands.append('no vrf definition %s' % name)
elif state == 'present':
if not obj_in_have:
commands.append('vrf definition %s' % name)
if rd is not None:
commands.append('rd %s' % rd)
if w['interfaces']:
for i in w['interfaces']:
commands.append('inter
|
face %s' % i)
commands.append('vrf forwarding %s' % w['name'])
else:
if w['rd'] is not None and w['rd'] != obj_in_have['rd']:
commands.append('vrf definition %s' % w['name'])
commands.append('rd %s' % w['rd'])
if w['interfaces']:
|
if not obj_in_have['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
elif set(w['interfaces']) != obj_in_have['interfaces']:
missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf definition %s' % h['name'])
return commands
def map_config_to_obj(module):
objs = []
output = run_commands(module, {'command': 'show vrf', 'output': 'text'})
lines = output[0].strip().splitlines()[3:]
out_len = len(lines)
index = 0
while out_len > index:
line = lines[index]
if not line:
continue
splitted_line = re.split(r'\s{2,}', line.strip())
if len(splitted_line) == 1:
index += 1
continue
else:
obj = dict()
obj['name'] = splitted_line[0]
obj['rd'] = splitted_line[1]
obj['interfaces'] = []
if len(splitted_line) > 4:
obj['interfaces'] = []
interfaces = splitted_line[4]
if interfaces.endswith(','):
while interfaces.endswith(','):
# gather all comma separated interfaces
if out_len <= index:
break
index += 1
line = lines[index]
vrf_line = re.split(r'\s{2,}', line.strip())
interfaces += vrf_line[-1]
for i in interfaces.split(','):
obj['interfaces'].append(i.strip().lower())
index += 1
objs.append(obj)
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
if item.get('interfaces'):
item['interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('interfaces') if intf]
if item.get('associated_interfaces'):
item['associated_interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('associated_interfaces') if intf]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'state': module.params['state'],
'rd': module.params['rd'],
'interfaces': [intf.replace(" ", "").lower() for intf in module.params['interfaces']] if module.params['interfaces'] else [],
'associated_interfaces': [intf.replace(" ", "").lower() for intf in
module.params['associated_interfaces']] if module.params['associated_interfaces'] else []
})
return obj
def check_declarative_intent_param
|
Blazemeter/taurus
|
examples/molotov/blazedemo.py
|
Python
|
apache-2.0
| 169
| 0
|
import molotov
@molotov.scenario(100)
async def Molotov_test(session):
async with
|
session.get('https://blazedemo.com/') as resp:
assert
|
resp.status == 200
|
benjello/liam2
|
liam2/entities.py
|
Python
|
gpl-3.0
| 33,581
| 0.000566
|
# encoding: utf-8
from __future__ import print_function
import collections
import sys
import warnings
# import bcolz
import numpy as np
import tables
import config
from data import (merge_arrays, get_fields, ColumnArray, index_table,
build_period_array)
from expr import (Variable, VariableMethodHybrid, GlobalVariable, GlobalTable,
GlobalArray, Expr, BinaryOp, MethodSymbol, normalize_type)
from exprtools import parse
from process import Assignment, ProcessGroup, While, Function, Return
from utils import (count_occurrences, field_str_to_type, size2str,
WarnOverrideDict, split_signature, argspec,
UserDeprecationWarning)
from tfunc import ValueForPeriod
default_value_by_strtype = {"bool": False, "float": np.nan, 'int': -1}
max_vars = 0
# def compress_column(a, level):
# arr = bcolz.carray(a, cparams=bcolz.cparams(level))
# print "%d -> %d (%.2f)" % (arr.nbytes, arr.cbytes,
# float(arr.nbytes) / arr.cbytes),
# return arr
#
#
# def decompress_column(a):
# return a[:]
def global_symbols(globals_def):
# FIXME: these should be computed once somewhere else, not for each
# entity. I guess they should have a class of their own
symbols = {}
for name, global_def in globals_def.iteritems():
global_type = global_def.get('fields')
if isinstance(global_type, list):
# add namespace for table
symbols[name] = GlobalTable(name, global_type)
if name == 'periodic':
# special case to add periodic variables in the global
# namespace
symbols.update(
(name, GlobalVariable('periodic', name, type_))
for name, type_ in global_type)
else:
global_type = global_def['type']
assert isinstance(global_type, type), "not a type: %s" % global_type
symbols[name] = GlobalArray(name, global_type)
return symbols
# This is an awful workaround for the fact that tables.Array does not support
# fancy indexes with negative indices.
# See https://github.com/PyTables/PyTables/issues/360
class DiskBackedArray(object):
def __init__(self, arr):
self.arr = arr
def __getitem__(self, item):
# load the array entirely in memory before indexing it
return self.arr[:][item]
def __getattr__(self, item):
return getattr(self.arr, item)
def __len__(self):
return len(self.arr)
class Field(object):
def __init__(self, name, dtype, input=True, output=True, default_value=None):
self.name = name
self.dtype = dtype
self.default_value = default_value
self.input = input
self.output = output
class FieldCollection(list):
def __init__(self, iterable=None):
list.__init__(self, iterable)
for f in self:
assert isinstance(f, Field)
@property
def in_input(self):
return FieldCollection(f for f in self if f.input)
@property
def in_output(self):
return FieldCollection(f for f in self if f.output)
@property
def names(self):
for f in self:
yield f.name
@property
def name_types(self):
return [(f.name, f.dtype) for f in self]
@property
def dtype(self):
return np.dtype(list(self.name_types))
@property
def default_values(self):
return dict((f.name, f.default_value) for f in self)
class Entity(object):
def __init__(self, name, fields=None, links=None, macro_strings=None,
process_strings=None, array=None):
"""
Parameters
----------
name
fields : list of tuple (name, type)
links : {name: links.Link}
macro_strings
process_strings
array
"""
self.name = name
# we should have exactly one of either array or fields defined
assert ((fields is None and array is not None) or
(fields is not None and array is None))
if array is not None:
if fields is None:
fields = get_fields(array)
array_period = np.min(array['period'])
else:
array_period = None
if not isinstance(fields, FieldCollection):
def fdef2field(name, fielddef):
initialdata = True
output = True
default_value = None
if isinstance(fielddef, Field):
return fielddef
elif isinstance(fielddef, (dict, str)):
if isinstance(fielddef, dict):
strtype = fielddef['type']
initialdata = fielddef.get('initialdata', True)
output = fielddef.get('output', True)
default_value = fielddef.get('default', default_value_by_strtype[strtype])
elif isinstance(fielddef, str):
strtype = fielddef
default_value = default_value_by_strtype[strtype]
else:
raise Exception('invalid field definition')
dtype = field_str_to_type(strtype, "field '%s'" % name)
else:
assert isinstance(fielddef, type)
dtype = normalize_type(fielddef)
return Field(name, dtype, initialdata, output, default_value)
fields = FieldCollection(fdef2field(name, fdef)
for name, fdef in fields)
duplicate_names = [name
for name, num
|
in count_occurrences(fields.names)
if num > 1]
if dupli
|
cate_names:
raise Exception("duplicate fields in entity '%s': %s"
% (self.name, ', '.join(duplicate_names)))
fnames = set(fields.names)
if 'id' not in fnames:
fields.insert(0, Field('id', int))
if 'period' not in fnames:
fields.insert(0, Field('period', int))
self.fields = fields
self.links = links
if macro_strings is None:
macro_strings = {}
self.macro_strings = macro_strings
self.process_strings = process_strings
self.processes = None
self.expectedrows = tables.parameters.EXPECTED_ROWS_TABLE
self.table = None
self.input_table = None
self.indexed_input_table = None
self.indexed_output_table = None
self.input_rows = {}
# TODO: it is unnecessary to keep periods which have already been
# simulated, because (currently) when we go back in time, we always go
# back using the output table... but periods before the start_period
# are only present in input_index
self.input_index = {}
self.output_rows = {}
self.output_index = {}
self.output_index_node = None
self.base_period = None
# we need a separate field, instead of using array['period'] to be able
# to get the period even when the array is empty.
self.array_period = array_period
self.array = array
self.lag_fields = []
self.array_lag = None
self.num_tmp = 0
self.temp_variables = {}
self.id_to_rownum = None
if array is not None:
rows_per_period, index_per_period = index_table(array)
self.input_rows = rows_per_period
self.output_rows = rows_per_period
self.input_index = index_per_period
self.output_index = index_per_period
self.id_to_rownum = index_per_period[array_period]
self._variables = None
self._methods = None
@classmethod
def from_yaml(cls, ent_name, entity_def):
|
cursoweb/ejemplo-django
|
manage.py
|
Python
|
mit
| 248
| 0
|
#!/usr/bin/en
|
v python
import os
import sys
i
|
f __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "curso.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
portante/sosreport
|
sos/plugins/ipsec.py
|
Python
|
gpl-2.0
| 1,514
| 0.009908
|
## Copyright (C) 2007 Sadique Puthen <sputhenp@redhat.com>
### This program is free software; you can redistribute it and/or modify
## it under the terms
|
of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for mo
|
re details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class IPSec(Plugin):
"""ipsec related information
"""
plugin_name = "ipsec"
packages = ('ipsec-tools',)
class RedHatIpsec(IPSec, RedHatPlugin):
"""ipsec related information for Red Hat distributions
"""
files = ('/etc/racoon/racoon.conf',)
def setup(self):
self.add_copy_spec("/etc/racoon")
class DebianIPSec(IPSec, DebianPlugin, UbuntuPlugin):
"""ipsec related information for Debian distributions
"""
files = ('/etc/ipsec-tools.conf',)
def setup(self):
self.add_copy_specs([
"/etc/ipsec-tools.conf",
"/etc/ipsec-tools.d",
"/etc/default/setkey"
])
# vim: et ts=4 sw=4
|
KayaBaber/Computational-Physics
|
Assignment_3_chaos_and_pendulums/Phys440_Assignment03_Prob2.py
|
Python
|
mit
| 2,476
| 0.023021
|
'''
Kaya Baber
Physics 440 - Computational Physics
Assignment 3
Problem 2
'''
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
import math
def f(thetas, t, b, gamma, omega):
#pendulum driven-damped function
theta=thetas[0]
thetaDot=thetas[1]
thetaDouble=-b*thetaDot - math.sin(theta) + gamma*math.cos(omega*t)
return thetaDot, thetaDouble
#initial conditions
theta0=-0.0
thetaDot0=0.0
thetas=[theta0,thetaDot0]
#constants
b=0.05
omega=0.7
#computation parameters
steps=100
periods=310
t = np.linspace(0, periods*(math.pi*2.0*omega), steps*periods+1)
#generating loop
for i in range(7):
gamma=0.4+(i*0.1)
#ODE solution
sol = odeint(f, thetas, t, args=(b, gamma, omega))
#Cut off data from before 200 driving periods
#plot theta vs time
plt.plot(t[210*steps:], sol[:, 1][210*steps:], 'b', label='thetaDot(t)')
plt.xlabel('time')
plt.ylabel('theta-Dot')
plt.grid()
plt.savefig('plots/gamma'+str(gamma)+'_thetaDot_t.png',bbox_inches='tight')
#plt.savefig('plots\\gamma'+str(gamma)+'_thetaDot_t.png',bbox_inches='tight')
#plt.show()
plt.clf()
#clips the plot to keep theta between -pi and +pi
thetaLog=((np.a
|
rray(sol[:,0][210*steps:])+math.pi)%(2*math.pi))-math.pi
#plot phase space plot
plt.pl
|
ot(thetaLog, sol[:, 1][210*steps:], 'g.', label='theta-Dot(theta)')
plt.xlabel('theta')
plt.ylabel('theta-Dot')
plt.title('Phase Space Plot')
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig('plots/gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.savefig('plots\\gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.show()
plt.clf()
#selects only points that coincide with the period omega
strobedTheta=sol[:,0][210*steps:-1:steps]
strobedThetaDot=sol[:,1][210*steps:-1:steps]
strobedTheta=((strobedTheta+math.pi)%(2*math.pi))-math.pi
#plot strobed phase space plot
plt.plot(strobedTheta, strobedThetaDot, 'r.', label='theta-Dot(theta)')
plt.xlabel('theta')
plt.ylabel('theta-Dot')
plt.title('Strobed Phase Space Plot')
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig('plots/gamma'+str(gamma)+'_thetaDot_theta_strobed.png',bbox_inches='tight')
#plt.savefig('plots\\gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.show()
plt.clf()
|
OldPanda/B551
|
hw4/simpleGreedy.py
|
Python
|
gpl-2.0
| 907
| 0.083793
|
import gamePlay
from copy import deepcopy
def value(board):
value = 0
for row in board:
for elem in row:
if elem == "W":
value = value + 1
elif elem == "B":
value = value - 1
return value
def betterThan(val1, val2, color, reversed):
if color == "W":
retVal = val1 > val2
else:
retVal = val2 < val1
if reversed:
return not retVal
else:
return retVal
def nextMove(board, color, time, reversed = False):
moves = []
for i in range(8):
for j in range(8):
if gamePlay.valid(board, c
|
olor, (i,j)):
moves.append((i,j))
if len(moves) == 0:
return "pass"
best = None
for move in moves
|
:
newBoard = deepcopy(board)
gamePlay.doMove(newBoard,color,move)
moveVal = value(newBoard)
if best == None or betterThan(moveVal, best, color, reversed):
bestMove = move
best = moveVal
return bestMove
|
elena/django
|
django/utils/deprecation.py
|
Python
|
bsd-3-clause
| 4,818
| 0.000623
|
import asyncio
import inspect
import warnings
from asgiref.sync import sync_to_async
class RemovedInDjango41Warning(DeprecationWarning):
pass
class RemovedInDjango50Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango41Warning
class warn_about_renamed_method:
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new
|
_method))
return new_class
class DeprecationInstanceCheck(type):
def __instancecheck__(self, instance):
|
warnings.warn(
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
self.deprecation_warning, 2
)
return super().__instancecheck__(instance)
class MiddlewareMixin:
sync_capable = True
async_capable = True
def __init__(self, get_response):
if get_response is None:
raise ValueError('get_response must be provided.')
self.get_response = get_response
self._async_check()
super().__init__()
def _async_check(self):
"""
If get_response is a coroutine function, turns us into async mode so
a thread is not consumed during a whole request.
"""
if asyncio.iscoroutinefunction(self.get_response):
# Mark the class as async-capable, but do the actual switch
# inside __call__ to avoid swapping out dunder methods
self._is_coroutine = asyncio.coroutines._is_coroutine
def __call__(self, request):
# Exit out to async mode, if needed
if asyncio.iscoroutinefunction(self.get_response):
return self.__acall__(request)
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
response = response or self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
async def __acall__(self, request):
"""
Async version of __call__ that is swapped in when an async request
is running.
"""
response = None
if hasattr(self, 'process_request'):
response = await sync_to_async(
self.process_request,
thread_sensitive=True,
)(request)
response = response or await self.get_response(request)
if hasattr(self, 'process_response'):
response = await sync_to_async(
self.process_response,
thread_sensitive=True,
)(request, response)
return response
|
chrys87/fenrir
|
src/fenrirscreenreader/commands/commands/review_curr_line.py
|
Python
|
lgpl-3.0
| 1,171
| 0.01281
|
#!/bin/python
# -*- coding: utf-8 -*-
# Fenr
|
ir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
from fenrirscreenreader.utils import line_utils
class command():
def __init__(self):
pass
def initialize(self, environment):
self.env = environment
def shutdown(self):
pass
def getDescription(self):
return _('current line')
def run(self):
|
self.env['runtime']['cursorManager'].enterReviewModeCurrTextCursor()
self.env['screen']['newCursorReview']['x'], self.env['screen']['newCursorReview']['y'], currLine = \
line_utils.getCurrentLine(self.env['screen']['newCursorReview']['x'], self.env['screen']['newCursorReview']['y'], self.env['screen']['newContentText'])
if currLine.isspace():
self.env['runtime']['outputManager'].presentText(_("blank"), soundIcon='EmptyLine', interrupt=True, flush=False)
else:
self.env['runtime']['outputManager'].presentText(currLine, interrupt=True, flush=False)
def setCallback(self, callback):
pass
|
mathemage/h2o-3
|
py/h2o_test_utils.py
|
Python
|
apache-2.0
| 31,016
| 0.005771
|
import sys, os, time, json, datetime, errno, stat, getpass, requests, pprint
if sys.version_info[0] < 3: import urlparse
else: import urllib.parse as urlparse
import h2o
debug_rest = False
verbosity = 0 # 0, 1, 2, 3
pp = pprint.PrettyPrinter(indent=4) # pretty printer for debugging
def setVerbosity(level):
global verbosity
if level: verbosity = level
def isVerbose():
global verbosity
return verbosity > 0
def isVerboser():
global verbosity
return verbosity > 1
def isVerbosest():
global verbosity
return verbosity > 2
def sleep(secs):
if getpass.getuser() == 'jenkins':
period = max(secs, 120)
else:
period = secs
# if jenkins, don't let it sleep more than 2 minutes
# due to left over h2o.sleep(3600)
time.sleep(period)
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
def check_params_update_kwargs(params_dict, kw, function, print_params):
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k in kw:
if k in params_dict:
params_dict[k] = kw[k]
else:
raise Exception("illegal parameter '%s' in %s" % (k, function))
if print_params:
print("%s parameters:" % function + repr(params_dict))
sys.stdout.flush()
######################
# Assertion-type stuff
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def followPath(d, path_elems):
for path_elem in path_elems:
if "" != path_elem:
idx = -1
if path_elem.endswith("]"):
idx = int(path_elem[path_elem.find("[") + 1:path_elem.find("]")])
path_elem = path_elem[:path_elem.find("[")]
assert path_elem in d, "FAIL: Failed to find key: " + path_elem + " in dict: " + repr(d)
if -1 == idx:
d = d[path_elem]
else:
d = d[path_elem][idx]
return d
def assertKeysExist(d, path, keys):
path_elems = path.split("/")
d = followPath(d, path_elems)
for key in keys:
assert key in d, "FAIL: Failed to find key: " + key + " in dict: " + repr(d)
def assertKeysExistAndNonNull(d, path, keys):
path_elems = path.split("/")
d = followPath(d, path_elems)
for key in keys:
assert key in d, "FAIL: Failed to find key: " + key + " in dict: " + repr(d)
assert d[key] != None, "FAIL: Value unexpectedly null: " + key + " in dict: " + repr(d)
def assertKeysDontExist(d, path, keys):
path_elems = path.split("/")
d = followPath(d, path_elems)
for key in keys:
assert key not in d, "FAIL: Unexpectedly found key: " + key + " in dict: " + repr(d)
###############
# LOGGING STUFF
# used to rename the sandbox when running multiple tests in same dir (in different shells)
def get_sandbox_name():
if "H2O_SANDBOX_NAME" in os.environ:
a = os.environ["H2O_SANDBOX_NAME"]
print("H2O_SANDBOX_NAME", a)
return a
else:
return "sandbox"
LOG_DIR = get_sandbox_name()
make_sure_path_exists(LOG_DIR)
def log(cmd, comment=None):
filename = LOG_DIR + '/commands.log'
# everyone can read
with open(filename, 'a') as f:
f.write(str(datetime.datetime.now()) + ' -- ')
# what got sent to h2o
# f.write(cmd)
# let's try saving the unencoded url instead..human readable
if cmd:
f.write(urlparse.unquote(cmd))
if comment:
f.write(' #')
f.write(comment)
f.write("\n")
elif comment: # for comment-only
f.write(comment + "\n")
# jenkins runs as 0xcustomer, and the file wants to be archived by jenkins who isn't in his group
permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(filename, permissions)
def log_rest(s):
if not debug_rest:
return
rest_log_file = open(os.path.join(LOG_DIR, "rest.log"), "a")
rest_log_file.write(s)
rest_log_file.write("\n")
rest_log_file.close()
######################
# REST API stuff
def list_to_dict(l, key):
'''
Given a List and a key to look for in each element return a Dict which maps the value of that key to the element.
Also handles nesting for the key, so you can use this for things like a list of elements which contain H2O Keys and
return a Dict indexed by the 'name" element within the key.
list_to_dict([{'key': {'name': 'joe', 'baz': 17}}, {'key': {'name': 'bobby', 'baz': 42}}], 'key/name') =>
{'joe': {'key': {'name': 'joe', 'baz': 17}}, 'bobby': {'key': {'name': 'bobby', 'baz': 42}}}
'''
result = {}
for entry in l:
# print 'In list_to_dict, entry: ', repr(entry)
part = entry
k = None
for keyp
|
art in key.split('/'):
part = part[keypart]
k = keypart
|
# print 'for keypart: ', keypart, ' part: ', repr(part)
result[part] = entry
# print 'result: ', repr(result)
return result
def validate_builder(algo, builder):
''' Validate that a model builder seems to have a well-formed parameters list. '''
assert 'parameters' in builder, "FAIL: Failed to find parameters list in builder: " + algo + " (" + repr(builder) + ")"
assert isinstance(builder['parameters'], list), "FAIL: 'parameters' element is not a list in builder: " + algo + " (" + repr(builder) + ")"
parameters = builder['parameters']
assert len(parameters) > 0, "FAIL: parameters list is empty: " + algo + " (" + repr(builder) + ")"
for parameter in parameters:
assertKeysExist(parameter, '', ['name', 'help', 'required', 'type', 'default_value', 'actual_value', 'level', 'values'])
assert 'can_build' in builder, "FAIL: Failed to find can_build list in builder: " + algo + " (" + repr(builder) + ")"
assert isinstance(builder['can_build'], list), "FAIL: 'can_build' element is not a list in builder: " + algo + " (" + repr(builder) + ")"
assert len(builder['can_build']) > 0, "FAIL: 'can_build' list is empty in builder: " + algo + " (" + repr(builder) + ")"
def validate_model_builder_result(result, original_params, model_name):
'''
Validate that a model build result has no parameter validation errors,
and that it has a Job with a Key. Note that model build will return a
Job if successful, and a ModelBuilder with errors if it's not.
'''
global pp
error = False
if result is None:
print('FAIL: result for model %s is None, timeout during build? result: %s' % (model_name, result))
error = True
elif result['__http_response']['status_code'] != requests.codes.ok:
error = True
print("FAIL: expected 200 OK from a good validation request, got: " + str(result['__http_response']['status_code']))
print("dev_msg: " + result['dev_msg'])
elif 'error_count' in result and result['error_count'] > 0:
# error case
print('FAIL: Parameters validation error for model: ', model_name)
error = True
if error:
print('Input parameters: ')
pp.pprint(original_params)
print('Returned result: ')
pp.pprint(result)
assert result['error_count'] == 0, "FAIL: Non-zero error_count for model: " + model_name
assert 'job' in result, "FAIL: Failed to find job key for model: " + model_name + ": " + pp.pprint(result)
job = result['job']
assert type(job) is dict, "FAIL: Job element for model is not a dict: " + model_name + ": " + pp.pprint(result)
assert 'key' in job, "FAIL: Failed to find key in job for model: " + model_name + ": " + pp.pprint(result)
def validate_grid_builder_result(result, original_params, grid_params, grid_id):
'''
Validate that a grid build result has no parameter validation errors,
and that it has a Job with a Key.
'''
global pp
error = False
if result is None:
print('FAIL: result for grid %s is None, timeout during build? resul
|
maxive/erp
|
addons/im_livechat/__manifest__.py
|
Python
|
agpl-3.0
| 1,269
| 0.000788
|
# -*- coding: utf-8 -*-
{
'name' : 'Live Chat',
'version': '1.0',
'sequence': 170,
'summary': 'Live Chat with Visitors/Customers',
'category': 'Website',
'complexity': 'easy',
'website': 'https://www.odoo.com/page/live-chat',
'description':
"""
Live Chat Support
==========================
Al
|
low to drop instant messaging widgets on any web page that will communicate
with the current server and dispatch visitors request amongst several live
chat operators.
Help your customers with this chat, and analyse their feedback.
""",
'data': [
"security/im_livechat_channel_security.xml",
"security/ir.model.access.csv",
"views/rating_views.xml",
"views/mail_channel_views.xml",
"views/im_livechat_channel_views.xm
|
l",
"views/im_livechat_channel_templates.xml",
"views/res_users_views.xml",
"report/im_livechat_report_channel_views.xml",
"report/im_livechat_report_operator_views.xml"
],
'demo': [
"data/im_livechat_channel_demo.xml",
'data/mail_shortcode_demo.xml',
],
'depends': ["mail", "rating"],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': False,
'application': True,
}
|
madgik/exareme
|
Exareme-Docker/src/mip-algorithms/tests/integration_tests/test_exareme_integration_logistic_regression.py
|
Python
|
mit
| 894
| 0.001119
|
import json
import numpy as np
import pytest
import requests
from mipframework.testutils import get_test_params
from tests import vm_url
from tests.algorithm_tests.test_logistic_regression import expected_file
headers = {"Content-type": "application/json", "Accept": "text/plain"}
url = vm_url + "LOGISTIC_REGRESSION"
@pytest.mark.parametrize(
"test_input, expected", get_test_params(expected_file, slice(95,
|
100))
)
def test_logistic_regression_algorithm_exareme(test_input, expected):
result = requests.post(url, data=json.dumps(test_input), headers=headers)
result = json.loads(result.text)
result = result["result"][0]["data"]
assert are_collinear(result["Coefficients"], expected["coeff"])
def are_collinear(u, v):
cosine_similarity = np.dot(v, u) / (np.sqrt(np.dot(v, v)) * np.sqrt(np.dot(u, u)))
return np.isclose(abs(cosine_similarity), 1, rtol=1e-5)
|
|
Canpio/Paddle
|
python/paddle/fluid/tests/unittests/test_array_read_write_op.py
|
Python
|
apache-2.0
| 3,404
| 0
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import default_main_program
import numpy
class TestArrayReadWrite(unittest.TestCase):
def test_read_write(self):
x = [
layers.data(
name='x0', shape=[100]), layers.data(
name='x1', shape=[100]), layers.data(
name='x2', shape=[100])
]
for each_x in x:
each_x.stop_gradient = False
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
arr = layers.array_write(x=x[0], i=i)
i = layers.increment(x=i)
arr = layers.array_write(x=x[1], i=i, array=arr)
i = layers.increment(x=i)
arr = layers.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
a0 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i)
a1 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i)
a2 = layers.array_read(array=arr, i=i)
mean_a0 = layers.mean(a0)
mean_a1 = layers.mean(a1)
mean_a2 = layers.mean(a2)
a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])
mean_x0 = layers.mean(x[0])
mean_x1 = layers.mean(x[1])
mean_x2 = layers.mean(x[2])
x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])
scope = core.Scope()
cpu = core.CPUPlace()
exe = Executor(cpu)
tensor = numpy.random.random(size=(100, 100)).astype('float32')
outs = exe.run(feed={'x0': tensor,
'x1': tensor,
'x2': tensor},
fetch_list=[a_sum, x_sum],
scope=scope)
self.assertEqual(outs[0], outs[1])
total_sum = layers.sums(input=[a_sum, x_sum])
total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)
append_backward(total_sum_scaled)
g_vars = map(default_main_program().global_block().var,
[each_x.name + "@GRAD" for each_x in x])
g_out = [
item.sum()
for item in exe.run(
feed={'x0': tensor,
'x1': tensor,
'x2': tensor},
fetch_list=g_vars)
]
g_out_sum = numpy.array(g_out).sum()
# since our final gradi
|
ent is 1 and the neural network are all linear
# with mean_op.
# the input gradient should also be 1
self.assertAlmostEqual(1.0, g_out_sum, delta=0.
|
1)
if __name__ == '__main__':
unittest.main()
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/ns/nspbr_args.py
|
Python
|
apache-2.0
| 1,045
| 0.025837
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable la
|
w or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class nspbr_args :
""" Provide
|
s additional arguments required for fetching the nspbr resource.
"""
def __init__(self) :
self._detail = False
@property
def detail(self) :
"""To get a detailed view.
"""
try :
return self._detail
except Exception as e:
raise e
@detail.setter
def detail(self, detail) :
"""To get a detailed view.
"""
try :
self._detail = detail
except Exception as e:
raise e
|
lopopolo/hyperbola
|
hyperbola/frontpage/views.py
|
Python
|
mit
| 284
| 0.003521
|
from d
|
jango.shortcuts import render
from .m
|
odels import Blurb, Schedule
def index(request):
blurbs = Blurb.objects.filter(display=True)
schedule = Schedule.objects.filter(display=True)
return render(request, "frontpage.html", {"blurbs": blurbs, "schedule": schedule})
|
lootr/netzob
|
netzob/test/src/test_netzob/test_Common/suite_Type.py
|
Python
|
gpl-3.0
| 2,410
| 0.012469
|
#!/usr/bin/python
|
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#|
|
|
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import unittest
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
def getSuite():
typeSuite = unittest.TestSuite()
return typeSuite
|
shakamunyi/tensorflow
|
tensorflow/python/framework/ops.py
|
Python
|
apache-2.0
| 181,558
| 0.006059
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import linecache
import re
import sys
import threading
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_contextlib
# Temporary global switch determining if we should enable the work-in-progress
# calls to the C API. Currently disabled by default but can be manually enabled
# e.g. in tests. This will be removed once all functionality is supported and
# there's no performance penalty with it enabled.
#
# TODO(skyewm) before we can remove this:
# - functions
# - import_graph_def() incrementally adds inputs to ops (i.e. creates an
# Operation and then calls _add_input()). The current code requires that all
# inputs be specified when creating the Operation (since we call
# TF_FinishOperation()).
# - ops_test.py (and others?) create unregistered op types
# - while loop
# - performance (e.g. delete/refactor redundant Python functionality, switch to
# new session API)
_USE_C_API = False
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _NullContextmanager(object):
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a
|
`name` property")
except AttributeError:
raise TypeError("Type %s does not define a `name` property")
try:
if not isinstance(tens
|
or_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property")
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property")
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul_
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/IPython/qt.py
|
Python
|
artistic-2.0
| 744
| 0
|
"""
Shim to maintain backwards compatibility with old IPython.qt imports.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from IPython.uti
|
ls.shimmodule import ShimModule, ShimWarning
warn("The `IPython.qt` package has been deprecated. "
"You should import from qtconsole instead.", ShimWarning)
# Unco
|
nditionally insert the shim into sys.modules so that further import calls
# trigger the custom attribute access above
_console = sys.modules['IPython.qt.console'] = ShimModule(
src='IPython.qt.console', mirror='qtconsole')
_qt = ShimModule(src='IPython.qt', mirror='qtconsole')
_qt.console = _console
sys.modules['IPython.qt'] = _qt
|
hallover/alloy-database
|
code/timeErrorPlot.py
|
Python
|
mit
| 3,797
| 0.008428
|
import os as os
import zipfile as Z
from os.path import isfile, join
from getpass import getuser
from matplotlib import pyplot as plt
zipfiles = []
kptList = []
name = []
inputzips = []
netID = getuser()
zippath = "/fslhome/" + netID + "/vasp/alloydatabase/alloyzips/"
newpath = "/fslhome/" + netID + "/vasp/alloydatabase/metalsdir/"
finishedpath = "/fslhome/" + netID + "/vasp/alloydatabase/finished/"
databasepath = "/fslhome/" + netID + "/vasp/database/code/"
def plotData(freeEnergy, setloc, totalCPUtime, path, name):
error_alldata = []
for i in range(0, len(freeEnergy) - 1):
A = []
A.append(freeEnergy[i])
for j in range(1, len(freeEnergy)):
a = abs(freeEnergy[len(freeEnergy)-1] - freeEnergy[i])
A.append(a)
#A[2] = alldatazip[i][2]
error_alldata.append(A)
kpts = 4
eTOTEN = []
#ikpts = []
for h in range(0, 14):
a = []
irrk = []
for i in range(len(freeEnergy)):
if setloc[i] == kpts:
a.append(error_alldata[i])
# irrk.append(erro
|
r_alldata[i][2])
# print(error_alldata[i][0][0])
kpts = 3 * h + 4
# print(kpts)
eTOTEN.append(a)
#ikpts.append(irrk)
del totalCPUtime[0]
del eTOTEN[0]
#del ikpts[0]
print(totalCPUtime)
print(eTOTEN)
graphpath = "/fslhome/" + netID + "/vasp/database/code/graphs/"
# print(eTOTEN)
# print(totalCPUtime)
for i in range(len(freeEnergy))
|
:
# print(etoten[0][i])
# print(totalCPUtime[i])
for j in range(len(freeEnergy[i]))
plt.plot(eTOTEN[i][j], totalCPUtime[i])
plt.loglog()
plt.xlabel("Error")
plt.ylabel("CPU Usage Time")
plt.title(name + "Time-Error Efficiency")
plt.savefig(graphpath + name + '_kpts.pdf')
plt.close()
return
def gatherData():
newpath = "/fslhome/" + netID + "/vasp/alloydatabase/metalsdir/"
dirs = sorted([d for d in os.listdir(newpath) if os.path.isdir(os.path.join(newpath, d))])
for metal in range(len(dirs)):
name = dirs[metal]
freeEnergy = []
# irrkpts = []
totalCPUtime = []
setloc = []
# path = "/fslhome/holiver2/work/vasp/alloydatabase/finished/0-CdCoN"
path = newpath + dirs[metal]
print(dirs[metal])
userinput = input("1 = yes, 0 = no")
if userinput == 1:
for n in range(4, 45, 3):
lvl2path = path + "/" + str(n).zfill(2) + "frzkpts"
for k in range(n, 44, 3):
lvl3path = lvl2path + "/" + str(k).zfill(2) + "kpts"
outcarpath = lvl3path + "/OUTCAR"
with open(outcarpath, 'r') as f:
mylines = f.readlines()
for line in mylines:
if "free energy TOTEN" in line:
freeEnergy1 = line
if "Total CPU time used " in line:
totalCPUtime1 = line
if "irreducible k-points:" in line:
irrkpts1 = line
freeEnergy.append(float(freeEnergy1.split()[4]))
totalCPUtime.append(float(totalCPUtime1.split()[5]))
setloc.append([n,k])
# irrkpts.append(int(irrkpts1.split()[1]))
# alldatazip = zip(setloc, totalCPUtime, irrkpts, freeEnergy)
# print(setloc)
# print(totalCPUtime)
plotData(freeEnergy, setloc, totalCPUtime, path, name)
# del alldatazip[:]
print("happy Kennedy")
return
|
agile-geoscience/welly
|
tests/test_quality.py
|
Python
|
apache-2.0
| 2,002
| 0
|
# -*- coding: utf 8 -*-
"""
Define a suite a tests for the canstrat functions.
"""
from welly import Well
import welly.quality as q
tests = {
'Each': [
q.no_flat,
q.no_monotonic,
q.no_gaps,
],
'Gamma': [
q.all_positive,
q.all_below(450),
q.check_units(['API', 'GAPI']),
],
'DT': [
q.all_positive,
],
'Sonic': [
q.all_between(1, 10000), # 1333 to 5000 m/s
q.no_spikes(10), # 10 spikes allowed
],
}
alias = {
"Gamma": ["GR", "GAM", "GRC", "SGR", "NGT"],
"Density": ["RHOZ", "RHOB", "DEN", "RHOZ"],
"Sonic": ["DT", "AC", "DTP", "DT4P"],
"Caliper": ["CAL", "CALI", "CALS", "C1"],
'Porosity SS': ['NPSS', 'DPSS'],
}
def test_quality():
"""
Test basic stuff.
"""
w = Well.from_las('tests/assets/P-129_out.LAS')
r = w.qc_data(tests, alias=alias)
assert len(r['GR'].values()) == 6
assert sum(r['GR'].values()) == 3
assert len(r['DT'].values()) == 6
html = w.qc_table_html(tests, alias=alias)
assert len(html) == 10057
assert '<table><tr><th>Curve</th><th>Passed</th><th>Score</th>' in html
assert '<tr><th>GR</th><td>3 / 6</td><td>0.500</td><td style=' in html
r_curve_group = w.qc_curve_group(tests, alias=alias)
assert isinstance(r_curve_group, dict)
def test_quality_curve():
"""
Test qc funct
|
ions in class Curve
"""
w = Well.from_las('tests/assets/P-129_out.LAS')
c = w.get_curve(mnemonic='CALI')
tests_curve = c.quality(tests=tests)
assert isinstance(tests_curve, dict)
|
assert len(tests_curve) == 3
tests_curve_qflag = c.qflag(tests=tests, alias=alias)
assert isinstance(tests_curve_qflag, dict)
assert len(tests_curve_qflag) == 3
tests_curve_qflags = c.qflags(tests=tests, alias=alias)
assert isinstance(tests_curve_qflags, dict)
assert len(tests_curve_qflags) == 3
test_score = c.quality_score(tests=tests_curve)
assert test_score == -1
|
hyunchel/webargs
|
tests/test_bottleparser.py
|
Python
|
mit
| 4,813
| 0.004363
|
# -*- coding: utf-8 -*-
import pytest
from bottle import Bottle, debug, request, response
from webtest import TestApp
from webargs import ValidationError, fields
from webargs.bottleparser import BottleParser
hello_args = {
'name': fields.Str(missing='World', validate=lambda n: len(n) >= 3),
}
hello_multiple = {
'name': fields.List(fields.Str())
}
parser = BottleParser()
@pytest.fixture
def app():
app = Bottle()
@app.route('/echo', method=['GET', 'POST'])
def index():
return parser.parse(hello_args, request)
@app.route('/echomulti/', method=['GET', 'POST'])
def multi():
return parser.parse(hello_multiple, request)
debug(True)
return app
@pytest.fixture
def testapp(app):
return TestApp(app)
def test_parse_querystring_args(testapp):
assert testapp.get('/echo?name=Fred').json == {'name': 'Fred'}
def test_parse_querystring_multiple(testapp):
expected = {'name': ['steve', 'Loria']}
assert testapp.get('/echomulti/?name=steve&name=Loria').json == expected
def test_parse_form_multiple(testapp):
expected = {'name': ['steve', 'Loria']}
assert testapp.post('/echomulti/', {'name': ['steve', 'Loria']}).json == expected
def test_parse_form(testapp):
assert testapp.post('/echo', {'name': 'Joe'}).json == {'name': 'Joe'}
def test_parse_json(testapp):
assert testapp.post_json('/echo', {'name': 'Fred'}).json == {'name': 'Fred'}
def test_parse_json_default(testapp):
assert testapp.post_json('/echo', {}).json == {'name': 'World'}
def test_parsing_form_default(testapp):
assert testapp.post('/echo', {}).json == {'name': 'World'}
def test_abort_called_on_validation_error(testapp):
res = testapp.post('/echo', {'name': 'b'}, expect_errors=True)
assert res.status_code == 422
def test_validator_that_raises_validation_error(app):
def always_fail(value):
raise ValidationError('something went wrong')
args = {'text': fields.Str(validate=always_fail)}
@app.route('/validated', method=['POST'])
def validated_route():
parser.parse(args)
vtestapp = TestApp(app)
res = vtestapp.post_json('/validated', {'text': 'bar'}, expect_errors=True)
assert res.status_code == 422
def test_use_args_decorator(app, testapp):
@app.route('/foo/', method=['GET', 'POST'])
@parser.use_args({'myvalue': fields.Int()})
def echo2(args):
return args
assert testapp.post('/foo/', {'myvalue': 23}).json == {'myvalue': 23}
def test_use_args_with_validation(app, testapp):
@app.route('/foo/', method=['GET', 'POST'])
@parser.use_args({'myvalue': fields.Int()}, validate=lambda args: args['myvalue'] > 42)
def echo(args):
return args
result = testapp.post('/foo/', {'myvalue': 43}, expect_errors=True)
assert result.status_code == 200
result = testapp.post('/foo/', {'myvalue': 41}, expect_errors=True)
assert result.status_code == 422
def test_use_args_with_url_params(app, testapp):
@app.route('/foo/<name>')
@parser.use_args({'myvalue': fields.Int()})
def foo(args, name):
return args
assert testapp.get('/foo/Fred?myvalue=42').json == {'myvalue': 42}
def test_use_kwargs_decorator(app, testapp):
@app.route('/foo/', method=['GET', 'POST'])
@parser.use_kwargs({'myvalue': fields.Int()})
def echo2(
|
myvalue):
return {'myvalue': myvalue}
assert testapp.p
|
ost('/foo/', {'myvalue': 23}).json == {'myvalue': 23}
def test_use_kwargs_with_url_params(app, testapp):
@app.route('/foo/<name>')
@parser.use_kwargs({'myvalue': fields.Int()})
def foo(myvalue, name):
return {'myvalue': myvalue}
assert testapp.get('/foo/Fred?myvalue=42').json == {'myvalue': 42}
def test_parsing_headers(app, testapp):
@app.route('/echo2')
def echo2():
args = parser.parse(hello_args, request, locations=('headers',))
return args
res = testapp.get('/echo2', headers={'name': 'Fred'}).json
assert res == {'name': 'Fred'}
def test_parsing_cookies(app, testapp):
@app.route('/setcookie')
def setcookie():
response.set_cookie('name', 'Fred')
return {}
@app.route('/echocookie')
def echocookie():
args = parser.parse(hello_args, request, locations=('cookies',))
return args
testapp.get('/setcookie')
assert testapp.get('/echocookie').json == {'name': 'Fred'}
def test_arg_specific_locations(app, testapp):
testargs = {
'name': fields.Str(location='json'),
'age': fields.Int(location='querystring'),
}
@app.route('/echo', method=['POST'])
def echo():
args = parser.parse(testargs, request)
return args
resp = testapp.post_json('/echo?age=42', {'name': 'Fred'})
assert resp.json['age'] == 42
assert resp.json['name'] == 'Fred'
|
innovimax/vxquery
|
vxquery-benchmark/src/main/resources/util/diff_xml_files.py
|
Python
|
apache-2.0
| 3,401
| 0.006469
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may n
|
ot use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica
|
ble law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt, glob, os, sys
def main(argv):
f1 = ""
f2 = ""
# Get the base folder
try:
opts, args = getopt.getopt(argv, "h", ["f1=", "f2="])
except getopt.GetoptError:
print 'The file options for build_saxon_collection_xml.py were not correctly specified.'
print 'To see a full list of options try:'
print ' $ python build_saxon_collection_xml.py -h'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Options:'
print ' -f The base folder to create collection XML file.'
sys.exit()
elif opt in ('--f1'):
# check if file exists.
if os.path.exists(arg):
f1 = arg
else:
print 'Error: Argument must be a file name for --f1.'
sys.exit()
elif opt in ('--f2'):
# check if file exists.
if os.path.exists(arg):
f2 = arg
else:
print 'Error: Argument must be a file name for --f2.'
sys.exit()
# Required fields to run the script.
if f1 == "" or not os.path.exists(f1):
print 'Error: The file path option must be supplied: --f1.'
sys.exit()
if f2 == "" or not os.path.exists(f2):
print 'Error: The file path option must be supplied: --f2.'
sys.exit()
missing_in_f1 = []
missing_in_f2 = []
found_in_both = []
with open(f1) as f:
content_f1 = f.readlines()
set_f1 = set(content_f1)
with open(f2) as f:
content_f2 = f.readlines()
set_f2 = set(content_f2)
missing_in_f1 = set_f2.difference(set_f1)
missing_in_f2 = set_f1.difference(set_f2)
found_in_both = set_f1.intersection(set_f2)
print ""
print "Missing files in " + f1
for f1_name in missing_in_f1:
print " + " + f1_name.strip()
print ""
print "Missing files in " + f2
for f2_name in missing_in_f2:
print " + " + f2_name.strip()
offset = 40
print ""
print "XML Summary"
print (" - Found in both:").ljust(offset) + str(len(found_in_both))
print (" - " + f1 + " diff set vs list:").ljust(offset) + str(len(content_f1) - len(set_f1))
print (" - " + f2 + " diff set vs list:").ljust(offset) + str(len(content_f2) - len(set_f2))
print (" - " + f1 + " missing:").ljust(offset) + str(len(missing_in_f1))
print (" - " + f2 + " missing:").ljust(offset) + str(len(missing_in_f2))
if __name__ == "__main__":
main(sys.argv[1:])
|
andersk/zulip
|
zerver/webhooks/freshdesk/tests.py
|
Python
|
apache-2.0
| 4,844
| 0.001034
|
from unittest.mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
class FreshdeskHookTests(WebhookTestCase):
STREAM_NAME = "freshdesk"
URL_TEMPLATE = "/api/v1/external/freshdesk?stream={stream}"
WEBHOOK_DIR_NAME = "freshdesk"
def test_ticket_creation(self) -> None:
"""
Messages are generated on ticket creation through Freshdesk's
"Dispatch'r" service.
"""
expected_topic = "#11: Test ticket subject ☃"
expected_message = """
Requester ☃ Bob <requester-bob@example.com> created [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
``` quote
Test ticket description ☃.
```
* **Type**: Incident
* **Priority**: High
* **Status**: Pending
""".strip()
self.api_stream_message(
self.test_user,
"ticket_created",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_status_change(self) -> None:
"""
Messages are generated when a ticket's status changes through
Freshdesk's "Observer" service.
"""
expected_topic = "#11: Test ticket subject ☃"
expected_message = """
Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
* **Status**: Resolved -> Waiting on Customer
""".strip()
self.api_stream_message(
self.test_user,
"status_changed",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_priority_change(self) -> None:
"""
Messages are generated when a ticket's priority changes through
Freshdesk's "Observer" service.
"""
expected_topic = "#11: Test ticket subject"
expected_message = """
Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
* **Priority**: High -> Low
""".strip()
self.api_stream_message(
self.test_user,
"priority_changed",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
@patch("zerver.lib.webhooks.common.check_send_webhook_message")
def test_unknown_event_payload_ignore(self, check_send_webhook_message_mock: MagicMock) -> None:
"""
Ignore unknown event payloads.
"""
self.url = self.build_webhook_url()
payload = self.get_body("unknown_payload")
kwargs = {
"HTTP_AUTHORIZATION": self.encode_email(self.test_user.email),
"content_type": "application/x-www-form-urlencoded",
}
result = self.client_post(self.url, payload, **kwargs)
self.assertFalse(check_send_webhook_message_mock.called)
|
self.assert_json_success(result)
def note_change(self, fixture: str, note_type: str) -
|
> None:
"""
Messages are generated when a note gets added to a ticket through
Freshdesk's "Observer" service.
"""
expected_topic = "#11: Test ticket subject"
expected_message = """
Requester Bob <requester-bob@example.com> added a {} note to \
[ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11).
""".strip().format(
note_type
)
self.api_stream_message(
self.test_user,
fixture,
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_private_note_change(self) -> None:
self.note_change("private_note", "private")
def test_public_note_change(self) -> None:
self.note_change("public_note", "public")
def test_inline_image(self) -> None:
"""
Freshdesk sends us descriptions as HTML, so we have to make the
descriptions Zulip Markdown-friendly while still doing our best to
preserve links and images.
"""
expected_topic = "#12: Not enough ☃ guinea pigs"
expected_message = """
Requester \u2603 Bob <requester-bob@example.com> created [ticket #12](http://test1234zzz.freshdesk.com/helpdesk/tickets/12):\n\n``` quote\nThere are too many cat pictures on the internet \u2603. We need more guinea pigs.\nExhibit 1:\n\n \n\n[guinea_pig.png](http://cdn.freshdesk.com/data/helpdesk/attachments/production/12744808/original/guinea_pig.png)\n```\n\n* **Type**: Problem\n* **Priority**: Urgent\n* **Status**: Open
""".strip()
self.api_stream_message(
self.test_user,
"inline_images",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
|
tomka/CATMAID
|
django/lib/custom_testrunner.py
|
Python
|
gpl-3.0
| 635
| 0.00315
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test.runner import DiscoverRunner
from pipeline.conf import s
|
ettings as pipeline_settings
class TestSuiteRunner(DiscoverRunner):
def __init__(self, *args, **kwargs):
settings.TESTING_ENVIRONMENT = True
super(TestSuiteRunner, self).__init__(*args, **kwargs)
def setup_test_environment(self, **kwargs):
'''Override STATICFILES_STORAGE and pipeline D
|
EBUG.'''
super().setup_test_environment(**kwargs)
settings.STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage'
pipeline_settings.DEBUG = True
|
brata-hsdc/brata.station
|
bin/pibrellaMidi.py
|
Python
|
apache-2.0
| 1,422
| 0.006329
|
from mido import MidiFile
from time import sleep
import pibrella
""" fade test
pibrella.light.red.fade(0,100,10)
sleep(11)
pibrella.light.red.fade(100,0,10)
sleep(11)
"""
""" start
pibrella.buzzer.note(-9)
sleep(.9)
pibrella.buzzer.off()
sleep(0.1)
pibrella.buzzer.note(-9)
sleep(0.9)
pibrella.buzzer.off()
sleep(0.1)
pibrella.buzzer.note(-9)
sleep(0.9)
pibrella.buzzer.off()
sleep(0.1)
pibrella.buzzer.note(3)
sleep(0.9)
pibrella.buzzer.off()
"""
""" fail
pibrella.buzzer.no
|
te(0)
sleep(1.25)
pibrella.buzzer.note(-7)
sleep(2)
pibrellay.buzzer.off()
"""
""" Mike notes for success likely bond theme
and need a calibration mode
push button yellow goes on then as turn the light can change untl the light changes
press red button again to go back to operational state
"""
""" it knows it is a comment """
mid = MidiFile('bond.mi
|
d')
for i, track in enumerate(mid.tracks):
print('Track ')
print(track.name)
if track.name == '':
for message in track:
if message.type == 'note_on':
# print('Turn on ')
note = message.note - 69
print(note)
pibrella.buzzer.note(note)
duration = 0.0 + message.time
elif message.type == 'note_off':
print(duration)
duration = message.time - duration
if duration > 0:
sleep(duration/1000.0)
pibrella.buzzer.off()
pibrella.buzzer.off()
|
hughdbrown/sed
|
sed/engine/sed_util.py
|
Python
|
mit
| 3,112
| 0.000321
|
#!/usr/bin/env python
from itertools import count
import doctest
def delete_range(lines, r=None):
"""
>>> a = list(range(10))
>>> delete_range(a, (1, 3))
[0, 4, 5, 6, 7, 8, 9]
"""
r = r or (0, len(lines))
return rep
|
lace_range(lines, [], (r[0], r[1] + 1))
def insert_range(lines, new_lines, line_no):
"""
>>> a = list(range(10))
>>> b = list(range(11, 13))
>>> insert_range(a, 3, b)
[0, 1, 2, 11, 12, 3, 4, 5, 6, 7, 8, 9]
>>> insert_range(a, 0, b)
[11, 12, 0, 1, 2,
|
3, 4, 5, 6, 7, 8, 9]
>>> insert_range(a, 9, b)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 9]
"""
return replace_range(lines, new_lines, (line_no, line_no))
def append_range(lines, new_lines, line_no):
"""
>>> a = list(range(10))
>>> b = list(range(11, 13))
>>> append_range(a, 3, b)
[0, 1, 2, 3, 11, 12, 4, 5, 6, 7, 8, 9]
>>> append_range(a, 0, b)
[0, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> append_range(a, 9, b)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12]
"""
return replace_range(lines, new_lines, (line_no + 1, line_no + 1))
def replace_range(old_lines, new_lines, r=None):
"""
>>> a = list(range(10))
>>> b = list(range(11, 13))
>>> replace_range(a, b, (0, 2))
[11, 12, 2, 3, 4, 5, 6, 7, 8, 9]
>>> replace_range(a, b, (8, 10))
[0, 1, 2, 3, 4, 5, 6, 7, 11, 12]
>>> replace_range(a, b, (0, 10))
[11, 12]
>>> replace_range(a, [], (0, 10))
[]
>>> replace_range(a, [], (0, 9))
[9]
"""
start, end = r or (0, len(old_lines))
assert 0 <= start <= end <= len(old_lines)
return old_lines[:start] + new_lines + old_lines[end:]
def find_line(lines, regex):
matches = [(i, regex.match(line)) for i, line in enumerate(lines)]
for (i, m) in reversed(matches):
if m:
yield i, m.groupdict()
def find_any_line(lines, regexes):
for regex in regexes:
i, m = find_line(lines, regex)
if m:
yield i, m
def add_terminator(lines, terminator):
def terminator_gen(last_lineno, terminator):
for i in count():
yield terminator if i != last_lineno else ""
return [line + sep
for line, sep in zip(lines, terminator_gen(len(lines) - 1, terminator))]
def comma_terminate(lines):
"""
>>> a = ["a", "b", "c"]
>>> comma_terminate(a)
['a,', 'b,', 'c']
"""
return add_terminator(lines, ",")
def entab(lines, space_count=4):
return [line.replace("\t", " " * space_count) for line in lines]
def sort_range(lines, r=None):
"""
>>> a = sorted(range(10), reverse=True)
>>> sort_range(a, (1, 1))
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> sort_range(a, (1, 2))
[9, 7, 8, 6, 5, 4, 3, 2, 1, 0]
>>> sort_range(a, (0, 2))
[7, 8, 9, 6, 5, 4, 3, 2, 1, 0]
>>> sort_range(a)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> sort_range(a, (0, 5))
[4, 5, 6, 7, 8, 9, 3, 2, 1, 0]
"""
start, end = r or (0, len(lines))
return lines[:start] + sorted(lines[start: end + 1]) + lines[end + 1:]
if __name__ == '__main__':
doctest.testmod()
|
chngchinboon/intercomstats
|
scripts/tagclassifer_keras.py
|
Python
|
mit
| 9,514
| 0.018604
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 25 13:50:30 2017
@author: Owner
"""
from __future__ import print_function
import os
import numpy as np
np.random.seed(1337)
import matplotlib.pyplot as plt
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
import sys
#from sklearn.cross_validation import train_test_split
BASE_DIR = os.path.dirname( __file__ )
GLOVE_DIR = BASE_DIR + '/glove.6B/'
TEXT_DATA_DIR = BASE_DIR + '/20_newsgroup/'
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
#load from csv
outputfolder=os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'output'))
floc=os.path.abspath(os.path.join(outputfolder,'topconvstats.csv'))
topconvdfcopy=pd.read_csv(floc, sep='\t', encoding='utf-8',index_col=False)
if hasattr(topconvdfcopy, u'Unnamed: 0'): del topconvdfcopy['Unnamed: 0']#might be hiding poorly merge attempts
if hasattr(topconvdfcopy, u'Unnamed: 0.1'): del topconvdfcopy['Unnamed: 0.1']#might be hiding poorly merge attempts
if hasattr(topconvdfcopy, 'convid'): topconvdfcopy['convid']=topconvdfcopy['convid'].astype('unicode')#loading auto changes this to int
print ('Loaded file from ' + floc)
def expandtag(df,tagtype): #need to double check to see if truly duplicating properly--------------------------------------------------------
#use nested expandtag(expandtag(df,tagtype),tagtype) for both issue and school
if tagtype=='issue':
emptyrow=df[df['numissues']==0]#collect rows with issues equal to 0
filledrow=df[df['numissues']>0]#collect rows with issues greater than 1
elif tagtype=='school':
emptyrow=df[df['school']=='None']#collect rows with schools with none
filledrow=df[df['school']!='None']#collect rows with schools
#Build new df
newdf=[]
for index, row in filledrow.iterrows():
if type(row[tagtype])==unicode:
row[tagtype]=row[tagtype][1:-1].split(', ')
for multitag in row[tagtype]:
temprow=row.copy()#duplicate row
temprow[tagtype]=multitag#replace multi issue of duplicated row with single issue
newdf.append(temprow)
filledrow=pd.DataFrame(newdf)
expandeddf=emptyrow.append(filledrow) #recombine
expandeddf.sort_index(inplace=True) #sort
return expandeddf
dataset=expandtag(topconvdfcopy,'issue').copy()
dataset=dataset.reset_index()
dataset=dataset[['issue','firstmessage']]
#remove those with no messages
dataset=dataset[~(dataset.firstmessage=='None')]
#remove those with no tags
dataset=dataset[~(dataset.issue=='None')]
#print info of dataset
dataset.groupby('issue').describe()
dataset_length= dataset['issue'].map(lambda text: len(text))
#dataset_length.plot(bins=20, kind='hist')
dataset_length.describe()
dataset_distribution=dataset.groupby('issue').count().sort_values('firstmessage',ascending=False)
#data is too poorly conditioned and biased, use only top 6 and the rest put as Unknown <----- doesn't really improve results :(
issuetoclassify=['Login Help','Forward to School','Check In/Out','Admin','Portfolio','LFR','Unknown']
#issuetoclassify=['Login Help','Unknown']
def modissue(s,issuelist):
if s not in issuelist:
s='Unknown'
return s
dataset['issue']=dataset.issue.apply(lambda s: modissue(s,issuetoclassify))
#dataset.groupby('label').count()
issuename=issuetoclassify
issuename.sort()
#prep for keras
#prepare dictionary mapping for label name to numeric id
'''
issuename = []
with open(os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,'issuelist.txt'))) as inputfile:
for line in inputfile:
issuename.append(line.strip())
'''
texts = [] # list of text samples
labels_index = pd.Series(sorted(issuename)).to_dict() # dictionary mapping label name to numeric id
index_labels={v: k for k, v in labels_index.iteritems()}
labels = [] # list of label ids
texts=dataset['firstmessage'].tolist()
texts = [s.encode('ascii', 'ignore') for s in texts]
labels=dataset['issue'].tolist()
labels = [index_labels[s] for s in labels]
'''
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_i
|
ndex)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
|
else:
f = open(fpath, encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
'''
print('Found %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(256, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(256, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(256, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)
'''
from keras.models import Sequential
from keras.layers import Dropout, Activation
nb_classes=len(issuename)+1
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(seque
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/utility/test_xl_cell_to_rowcol_abs.py
|
Python
|
bsd-2-clause
| 1,642
| 0
|
######################################################
|
#########################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...utility import xl_cell_to_rowcol_abs
class TestUtility(unittest.TestCase):
"""
Test xl_cell_to_rowcol_abs() utility function.
"""
def test_
|
xl_cell_to_rowcol_abs(self):
"""Test xl_cell_to_rowcol_abs()"""
tests = [
# row, col, A1 string
(0, 0, 'A1'),
(0, 1, 'B1'),
(0, 2, 'C1'),
(0, 9, 'J1'),
(1, 0, 'A2'),
(2, 0, 'A3'),
(9, 0, 'A10'),
(1, 24, 'Y2'),
(7, 25, 'Z8'),
(9, 26, 'AA10'),
(1, 254, 'IU2'),
(1, 255, 'IV2'),
(1, 256, 'IW2'),
(0, 16383, 'XFD1'),
(1048576, 16384, 'XFE1048577'),
]
for row, col, string in tests:
exp = (row, col, 0, 0)
got = xl_cell_to_rowcol_abs(string)
self.assertEqual(got, exp)
def test_xl_cell_to_rowcol_abs_abs(self):
"""Test xl_cell_to_rowcol_abs() with absolute references"""
tests = [
# row, col, row_abs, col_abs, A1 string
(0, 0, 0, 0, 'A1'),
(0, 0, 1, 0, 'A$1'),
(0, 0, 0, 1, '$A1'),
(0, 0, 1, 1, '$A$1'),
]
for row, col, row_abs, col_abs, string in tests:
exp = (row, col, row_abs, col_abs)
got = xl_cell_to_rowcol_abs(string)
self.assertEqual(got, exp)
|
sshnaidm/openstack-sqe
|
tools/cloud/domain.py
|
Python
|
apache-2.0
| 3,609
| 0.003325
|
import os
import yaml
from network import Network
from storage import Storage
from cloudtools import conn, make_network_name
from config import TEMPLATE_PATH
with open(os.path.join(TEMPLATE_PATH, "network.yaml")) as f:
netconf = yaml.load(f)
with open(os.path.join(TEMPLATE_PATH, "vm.yaml")) as f:
vmconf = yaml.load(f)
with open(os.path.join(TEMPLATE_PATH, "lab.yaml")) as f:
env = yaml.load(f)
def construct_net_ip(lab_ip, net_shift):
return ".".join(lab_ip.split(".")[:2] + [str(int(lab_ip.split(".")[-1]) + net_shift)] + ["0"])
class VM:
pool = {}
def __init__(self, lab_id, path, config, box):
self.path = path
self.lab_id = lab_id
self.box = box
self.conf = config["servers"][box]
self.full_conf = config
self.report = []
self.names = [self.lab_id + "-" + self.box + "%.2d" % num if self.conf['params']['count'] != 1
else self.lab_id + "-" + self.box for num in xrange(self.conf['params']['count'])]
ydict = yaml.load(vmconf[self.box]["user-yaml"])
self.pool[box] = [
{
"vm_name": name,
"user": ydict['users'][1]['name'],
"password": ydict['users'][1]['passwd']
} for name in self.names
]
def network(self, index):
xml = ""
for key, net in enumerate(self.conf['params']['networks']):
net_params = [i for i in self.full_conf['networks'] if net in i][0]
box_net = Network.hosts[0][self.box][index]
if net_params[net]["dhcp"] or len(self.conf['params']['networks']) == 1: # True or False
mac = box_net["mac"]
xml += netconf['template']["interface_dhcp"].format(
net_name=make_network_name(self.lab_id, net),
mac=mac
)
self.pool[self.box][index]["mac"] = mac
self.pool[self.box][index]["ip"] = box_net["ip"]
self.pool[self.box][index]["admin_interface"] = "eth" + str(key)
self.pool[self.box][index]["eth" + str(key)] = box_net["ip"]
else:
xml += netconf['template']["interface"].format(net_name=make_network_name(self.lab_id, net))
self.pool[self.box][index]["hostname"] = box_net["hostname"]
if net_params[net]["external"]:
self.pool[self.box][index]["external_interface"] = "eth" + str(key)
if "external_net" not in self.pool:
if "ipv" in net_params[net] and net_params[net]["ipv"] == 6:
net_name = make_network_name(self.lab_id, net)
self.pool["external_net"] = Network.pool[net_name][1].net_ip
else:
self.pool["external_net"] = construct_net_ip(env[self.lab_id]["net_start"], key)
|
if not net_params[net]["nat"]:
self.pool[self.box][index]["internal_interface"] = "eth" + str(key)
return xml
def storage(self, index):
return Storage.disks[self.names[index]]
def define(self):
return [vmconf[self.box]["xml"].format(
name=self.names[num],
ram=self.conf
|
['params']["ram"]*1024*1024,
cpu=self.conf['params']["cpu"],
network=self.network(num),
disk=self.storage(num),
) for num in xrange(self.conf['params']['count'])]
def start(self):
vm_xmls = self.define()
for vm_xml in vm_xmls:
vm = conn.defineXML(vm_xml)
vm.create()
|
AlbertWeichselbraun/davify
|
src/davify/keyring.py
|
Python
|
gpl-3.0
| 1,784
| 0
|
#!/usr/bin/env python
'''
Handles access to the WebDAV's server creditentials.
'''
from collections import namedtuple
import secretstorage
APPLICATION_NAME = "davify"
Fi
|
leStorage = namedtuple('FileStorage',
'username password protocol server port path')
def get_secret_storage():
bus = secretstorage.dbus_init()
return secretstorage.get_default_collection(bus)
def store_password(username, pwd, protocol, server, port, path):
'''
stores the given password in the gnome keyring
'''
secret_storage = get_secret_storage()
attrs = {'application': APPLICATIO
|
N_NAME,
'username': username,
'server': server,
'protocol': protocol,
'port': str(port),
'path': path}
description = f'davify WebDAV password for <{protocol}://{username}@' \
'{server}:{port}/{path}>'
secret_storage.create_item(description, attrs, pwd.encode('utf-8'))
def get_passwords():
'''
retrieves the stored login data from the keyring
'''
secret_storage = get_secret_storage()
if secret_storage.is_locked():
secret_storage.unlock()
items = [_parse_item(item) for item in secret_storage.search_items(
{'application': APPLICATION_NAME})]
return items
def _parse_item(item):
item_attr = item.get_attributes()
return FileStorage(username=item_attr['username'],
password=item.get_secret().decode('utf-8'),
protocol=item_attr['protocol'],
server=item_attr['server'],
port=item_attr['port'],
path=item_attr['path'])
if __name__ == '__main__':
print(get_passwords())
|
sohail-aspose/Aspose_Words_Cloud
|
SDKs/Aspose.Words_Cloud_SDK_for_Python/asposewordscloud/models/DrawingObjectResponse.py
|
Python
|
mit
| 822
| 0.013382
|
#!/usr/bin/env python
cl
|
ass DrawingObjectResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definitio
|
n.
"""
self.swaggerTypes = {
'DrawingObject': 'DrawingObject',
'Code': 'str',
'Status': 'str'
}
self.attributeMap = {
'DrawingObject': 'DrawingObject','Code': 'Code','Status': 'Status'}
self.DrawingObject = None # DrawingObject
self.Code = None # str
self.Status = None # str
|
zmap/ztag
|
ztag/log.py
|
Python
|
apache-2.0
| 1,994
| 0
|
from datetime import datetime
class Logger(object):
FATAL = 0
ERROR = 1
WARN = 2
INFO = 3
DEBUG = 4
TRACE = 5
def __init__(self, f, log_level=3):
level = int(log_level)
if level < 0 or level > Logger.TRACE:
raise Exception("Invalid Log Level %d" % level)
self.f = f
self.level = level
@staticmethod
def make_time():
return Logger.format_time(datetime.now())
@staticmethod
def format_time(ts):
ms_str = str(ts.microsecond / 1000)
t = ''.join([ts.strftime("%b %d %H:%M:%S."), ms_str])
return t
@staticmethod
def rfc_time_from_utc(ts):
return ts.strftime("%Y-%m-%dT%H:%M:%S+0
|
000")
def fatal(self, msg):
t = self.make_time()
out = "%s [ERROR] %s: %s\n" % (t, "ztag", msg)
self.f.write(out)
self.f.flush()
raise Exception("Fatal!")
|
def error(self, msg):
if self.level < Logger.ERROR:
return
t = self.make_time()
out = "%s [ERROR] %s: %s\n" % (t, "ztag", msg)
self.f.write(out)
self.f.flush()
def warn(self, msg):
if self.level < Logger.WARN:
return
t = self.make_time()
out = "%s [WARN] %s: %s\n" % (t, "ztag", msg)
self.f.write(out)
self.f.flush()
def info(self, msg):
if self.level < Logger.INFO:
return
t = self.make_time()
out = "%s [INFO] %s: %s\n" % (t, "ztag", msg)
self.f.write(out)
self.f.flush()
def debug(self, msg):
if self.level < Logger.DEBUG:
return
t = self.make_time()
out = "%s [DEBUG] %s: %s\n" % (t, "ztag", msg)
self.f.write(out)
self.f.flush()
def trace(self, msg):
if self.level < Logger.TRACE:
return
t = self.make_time()
out = "%s [DEBUG] %s: %s\n" % (t, "ztag", msg)
self.f.write(out)
self.f.flush()
|
neurosnap/Flask-User
|
flask_user/settings.py
|
Python
|
bsd-2-clause
| 10,164
| 0.01525
|
""" This file handles default application config settings for Flask-User.
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio (ling.thio@gmail.com)
:license: Simplified BSD License, see LICENSE.txt for more details."""
def set_default_settings(user_manager, app_config):
""" Set default app.config settings, but only if they have not been set before """
# define short names
um = user_manager
sd = app_config.setdefault
# Retrieve obsoleted settings
# These plural settings have been replaced by singular settings
obsoleted_enable_emails = sd('USER_ENABLE_EMAILS', True)
obsoleted_enable_retype_passwords = sd('USER_ENABLE_RETYPE_PASSWORDS', True)
obsoleted_enable_usernames = sd('USER_ENABLE_USERNAMES', True)
obsoleted_enable_registration = sd('USER_ENABLE_REGISTRATION', True)
# General settings
um.app_name = sd('USER_APP_NAME', 'AppName')
# Set default features
um.enable_change_password = sd('USER_ENABLE_CHANGE_PASSWORD', True)
um.enable_change_username = sd('USER_ENABLE_CHANGE_USERNAME', True)
um.enable_email = sd('USER_ENABLE_EMAIL', obsoleted_enable_emails)
um.enable_confirm_email = sd('USER_ENABLE_CONFIRM_EMAIL', um.enable_email)
um.enable_forgot_password = sd('USER_ENABLE_FORGOT_PASSWORD', um.enable_email)
um.enable_login_without_confirm_email = sd('USER_ENABLE_LOGIN_WITHOUT_CONFIRM_EMAIL', False)
um.enable_multiple_emails = sd('USER_ENABLE_MULTIPLE_EMAILS', False)
um.enable_register = sd('USER_ENABLE_REGISTER', obsoleted_enable_registration)
um.enable_remember_me = sd('USER_ENABLE_REMEMBER_ME', True)
um.enable_retype_password = sd('USER_ENABLE_RETYPE_PASSWORD', obsoleted_enable_retype_passwords)
um.enable_username = sd('USER_ENABLE_USERNAME', obsoleted_enable_usernames)
# Set default settings
um.auto_login = sd('USER_AUTO_LOGIN', True)
um.auto_login_after_confirm = sd('USER_AUTO_LOGIN_AFTER_CONFIRM', um.auto_login)
um.auto_login_after_register = sd('USER_AUTO_LOGIN_AFTER_REGISTER', um.auto_login)
um.auto_login_after_reset_password = sd('USER_AUTO_LOGIN_AFTER_RESET_PASSWORD', um.auto_login)
um.auto_login_at_login = sd('USER_AUTO_LOGIN_AT_LOGIN', um.auto_login)
um.confirm_email_expiration = sd('USER_CONFIRM_EMAIL_EXPIRATION', 2*24*3600) # 2 days
um.invite_expiration = sd('USER_INVITE_EXPIRATION', 90*24*3600) # 90 days
um.password_hash_mode = sd('USER_PASSWORD_HASH_MODE', 'passlib')
um.password_hash = sd('USER_PASSWORD_HASH', 'bcrypt')
um.password_salt = sd('USER_PASSWORD_SALT', app_config['SECRET_KEY'])
um.reset_password_expiration = sd('USER_RESET_PASSWORD_EXPIRATION', 2*24*3600) # 2 days
um.enable_invitation = sd('USER_ENABLE_INVITATION', False)
um.require_invitation = sd('USER_REQUIRE_INVITATION', False)
um.send_password_changed_email = sd('USER_SEND_PASSWORD_CHANGED_EMAIL',um.enable_email)
um.send_registered_email = sd('USER_SEND_REGISTERED_EMAIL', um.enable_email)
um.send_username_changed_email = sd('USER_SEND_USERNAME_CHANGED_EMAIL',um.enable_email)
# Set default URLs
um.change_password_url = sd('USER_CHANGE_PASSWORD_URL', '/user/change-password/')
um.change_username_url = sd('USER_CHANGE_USERNAME_URL', '/user/change-username/')
um.confirm_email_url = sd('USER_CONFIRM_EMAIL_URL', '/user/confirm-email/<token>/')
um.email_action_url
|
= sd('USER_EMAIL_ACTION_URL', '/user/email/<id>/<action>/')
um.forgot_password_url = sd('USER_FORGOT_PASSWORD_URL', '/user/forgot-password/')
um.login_url = sd('USER_LOGIN_URL', '/user/sign-in/')
um.logout_url = sd('USER_LOG
|
OUT_URL', '/user/sign-out/')
um.manage_emails_url = sd('USER_MANAGE_EMAILS_URL', '/user/manage-emails/')
um.register_url = sd('USER_REGISTER_URL', '/user/register/')
um.resend_confirm_email_url = sd('USER_RESEND_CONFIRM_EMAIL_URL', '/user/resend-confirm-email/')
um.reset_password_url = sd('USER_RESET_PASSWORD_URL', '/user/reset-password/<token>/')
um.user_profile_url = sd('USER_PROFILE_URL', '/user/profile/')
um.invite_url = sd('USER_INVITE_URL', '/user/invite/')
# Set default ENDPOINTs
home_endpoint = ''
login_endpoint = um.login_endpoint = 'user.login'
um.after_change_password_endpoint = sd('USER_AFTER_CHANGE_PASSWORD_ENDPOINT', home_endpoint)
um.after_change_username_endpoint = sd('USER_AFTER_CHANGE_USERNAME_ENDPOINT', home_endpoint)
um.after_confirm_endpoint = sd('USER_AFTER_CONFIRM_ENDPOINT', home_endpoint)
um.after_forgot_password_endpoint = sd('USER_AFTER_FORGOT_PASSWORD_ENDPOINT', home_endpoint)
um.after_login_endpoint = sd('USER_AFTER_LOGIN_ENDPOINT', home_endpoint)
um.after_logout_endpoint = sd('USER_AFTER_LOGOUT_ENDPOINT', login_endpoint)
um.after_register_endpoint = sd('USER_AFTER_REGISTER_ENDPOINT', home_endpoint)
um.after_resend_confirm_email_endpoint = sd('USER_AFTER_RESEND_CONFIRM_EMAIL_ENDPOINT', home_endpoint)
um.after_reset_password_endpoint = sd('USER_AFTER_RESET_PASSWORD_ENDPOINT', home_endpoint)
um.after_invite_endpoint = sd('USER_INVITE_ENDPOINT', home_endpoint)
um.unconfirmed_email_endpoint = sd('USER_UNCONFIRMED_EMAIL_ENDPOINT', home_endpoint)
um.unauthenticated_endpoint = sd('USER_UNAUTHENTICATED_ENDPOINT', login_endpoint)
um.unauthorized_endpoint = sd('USER_UNAUTHORIZED_ENDPOINT', home_endpoint)
# Set default template files
um.change_password_template = sd('USER_CHANGE_PASSWORD_TEMPLATE', 'flask_user/change_password.html')
um.change_username_template = sd('USER_CHANGE_USERNAME_TEMPLATE', 'flask_user/change_username.html')
um.forgot_password_template = sd('USER_FORGOT_PASSWORD_TEMPLATE', 'flask_user/forgot_password.html')
um.login_template = sd('USER_LOGIN_TEMPLATE', 'flask_user/login.html')
um.manage_emails_template = sd('USER_MANAGE_EMAILS_TEMPLATE', 'flask_user/manage_emails.html')
um.register_template = sd('USER_REGISTER_TEMPLATE', 'flask_user/register.html')
um.resend_confirm_email_template = sd('USER_RESEND_CONFIRM_EMAIL_TEMPLATE', 'flask_user/resend_confirm_email.html')
um.reset_password_template = sd('USER_RESET_PASSWORD_TEMPLATE', 'flask_user/reset_password.html')
um.user_profile_template = sd('USER_PROFILE_TEMPLATE', 'flask_user/user_profile.html')
um.invite_template = sd('USER_INVITE_TEMPLATE', 'flask_user/invite.html')
um.invite_accept_template = sd('USER_INVITE_ACCEPT_TEMPLATE', 'flask_user/register.html')
# Set default email template files
um.confirm_email_email_template = sd('USER_CONFIRM_EMAIL_EMAIL_TEMPLATE', 'flask_user/emails/confirm_email')
um.forgot_password_email_template = sd('USER_FORGOT_PASSWORD_EMAIL_TEMPLATE', 'flask_user/emails/forgot_password')
um.password_changed_email_template = sd('USER_PASSWORD_CHANGED_EMAIL_TEMPLATE', 'flask_user/emails/password_changed')
um.registered_email_template = sd('USER_REGISTERED_EMAIL_TEMPLATE', 'flask_user/emails/registered')
um.username_changed_email_template = sd('USER_USERNAME_CHANGED_EMAIL_TEMPLATE', 'flask_
|
iPatso/PyGameProjs
|
PyGames/CS254_platform/CS254_platform/src/menu.py
|
Python
|
apache-2.0
| 2,114
| 0.005203
|
import CONST
from CONST import *
clock = pygame.time.Clock()
class Menu():
def __init__(self):
self.bg_image = pygame.image.load(os.path.join("data","title1.png")).convert_alpha()
self.subLogo = textFont("Initial Fantasy: Glitch", BLACK, 36, "Coalition.ttf")
self.selection = 0;
self.optionsSelections = 0
def subLogoAnimation(self, screen):
subLogo_pos = self.subLogo.get_rect(centerx=screen.get_width()/2)
screen.blit(self.subLogo, (subLogo_pos[0], 190))
def showSelections(self, screen, selection):
#Start, Options, Exit
colors = [DARK_GREY,DARK_GREY,DARK_GREY]
colors[selection] = DARK_ORANGE
START = textFont("START", colors[0], 20, "Basica.ttf")
OPTIONS = textFont("OPTIONS", colors[1], 20, "Basica.ttf")
EXIT = textFont("EXIT", colors[2], 20, "Basica.ttf")
START_pos = START.get_rect(centerx=screen.get_width()/2)
OPTIONS_pos = OPTIONS.get_rect(centerx=screen.get_width()/2)
EXIT_pos = EXIT.get_rect(centerx=screen.get_width()/2)
screen.blit(START, (START_pos[0], 260))
screen.blit(OPTIONS, (OPTIONS_pos[0], 280))
screen.blit(EXIT, (EXIT_pos[0], 300))
def showOptions(self, screen, optionsSelections):
"""
Toggles the music and sound fx.
MUST make toggles do what they must
"""
colors = [DARK_GREY,DARK_GREY, DARK_GREY]
colors[optionsSelections] = DARK_ORANGE
TOGGLE_MUSIC = textFont("Music: ON", colors[0], 15, "Basica.ttf")
TOGGLE_SOUNDFX = textFont("Sound FX: ON", colors[1], 15, "Basica.ttf")
BACK = textFont("Back", colors[2], 15, "Basica.ttf")
TOGGLE_MUSIC_pos = TOGGLE_MUSIC.get_rect(x=screen.get_width()/2 + 65)
TOGGKE_SOUNDFX_pos = TOGGLE_SOUNDFX
|
.get_
|
rect(x=screen.get_width()/2 + 65)
BACK_pos = BACK.get_rect(x=screen.get_width()/2 + 65)
screen.blit(TOGGLE_MUSIC, (TOGGLE_MUSIC_pos[0], 265))
screen.blit(TOGGLE_SOUNDFX, (TOGGKE_SOUNDFX_pos[0], 280))
screen.blit(BACK, (BACK_pos[0], 300))
|
v-iam/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
|
Python
|
mit
| 2,874
| 0.001392
|
# coding=utf-8
# -------------------------------------------------------------
|
-------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# r
|
egenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceCertificateResource(Resource):
"""Key Vault container ARM resource for a certificate that is purchased
through Azure.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values
include: 'Initialized', 'WaitingOnCertificateOrder', 'Succeeded',
'CertificateOrderFailed', 'OperationNotPermittedOnKeyVault',
'AzureServiceUnauthorizedToAccessKeyVault', 'KeyVaultDoesNotExist',
'KeyVaultSecretDoesNotExist', 'UnknownError', 'ExternalPrivateKey',
'Unknown'
:vartype provisioning_state: str or :class:`KeyVaultSecretStatus
<azure.mgmt.web.models.KeyVaultSecretStatus>`
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'KeyVaultSecretStatus'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None, key_vault_id=None, key_vault_secret_name=None):
super(AppServiceCertificateResource, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.provisioning_state = None
|
joshsomma/rice_python_1
|
memory.py
|
Python
|
apache-2.0
| 1,375
| 0.015273
|
# implementation of card game - Memory
import simplegui
import random
#globals
frame_size = [800,100]
l1 = range(1,9) + range(1,9)
random.shuffle(l1)
#exposed = [False for l in range(len(l1))]
exposed = [True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False]
# helper function to initialize globals
def new_game():
global l1
# define event handlers
def mouseclick(pos):
# add game state logic here
pass
# cards are logically 50x100 pixels in size
def draw(canvas):
global l1,exposed
draw_letter = 10
sq_tr = [0,0]
sq_tl = [50,0]
sq_br = [50,100]
sq_bl = [0,100]
for n in l1:
canvas.draw_text(str(n), (draw_letter,80), 48, 'WHITE')
draw_letter += 50
for i in range(len(l1)):
|
if not exposed[i]:
canvas.draw_polygon([sq_tr,sq_tl,sq_br,sq_bl], 2, 'Orange', 'Green')
sq_tr[0] += 50
sq_tl[0] += 50
sq_
|
br[0] += 50
sq_bl[0] += 50
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", frame_size[0], frame_size[1])
frame.add_button("Reset", new_game)
label = frame.add_label("Turns = 0")
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
# get things rolling
new_game()
frame.start()
# Always remember to review the grading rubric
|
Alex-Chizhov/python_training
|
home_work_6/test/test_del_group.py
|
Python
|
apache-2.0
| 201
| 0.034826
|
de
|
f test_delite_group(app):
app.session.login( username="admin", password="secret")
app.group.delete_first_group()
app.session.logout()
if __name__ == '__main__':
pytest.main('tes
|
t_del_group.py')
|
xiao0720/leetcode
|
binary_search.py
|
Python
|
mit
| 415
| 0.019277
|
def bs(data, target, low, high):
if low > high:
return False
else:
mid = (low + high)//2
if target == data[mid]:
return True
elif target < data[mid]:
return bs(data, target, low, mid - 1)
else:
return bs(data, target, mid + 1, high)
if __name__ == '__main__':
nums = [1,2,3,4,5,6,7,8]
print(bs(nums, 9, 0, len(nums) -
|
1))
|
|
zapcoop/vertex
|
vertex_api/vertex/settings/dev.py
|
Python
|
agpl-3.0
| 714
| 0.001401
|
from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ip5ezdgcr92+p@(o4g$kc5lzw&$-rjqf7d9-h)16(!z&(lzils'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ('127.0.0.1', '::1',)
ALLOWED_HOSTS = ['*']
INSTALLED_APPS += ('debug_toolbar',
'django_extensions',)
MIDDLEWARE = ['debug_toolbar.middleware.DebugToolbarMi
|
ddleware',] + MIDDLEWARE
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'vertex',
|
'USER': 'vertex',
'PASSWORD': '1a2e233f53edaf690',
'HOST': '127.0.0.1',
},
}
|
bytebit-ch/uguubot
|
plugins/choose.py
|
Python
|
gpl-3.0
| 632
| 0.017405
|
import re
import random
from util import hook
# @hook.regex(r'^uguubot(.*)')
@hook.command(
|
'decide')
@hook.command
def choose(inp):
"choose <choice1>, [choice2], [choice3], [choice4], ... -- " \
"Randomly picks one of the given choices."
try: inp = inp.group(1)
except: inp = inp
replacewords = {'should','could','?', ' i ',' you '}
for word in replacewords
|
:
inp = inp.replace(word,'')
if ':' in inp: inp = inp.split(':')[1]
c = inp.split(', ')
if len(c) == 1:
c = inp.split(' or ')
if len(c) == 1:
c = ['Yes','No']
return random.choice(c).strip()
|
XDatum/prodsys-pa-model
|
p2pamodel/providers/deftclient.py
|
Python
|
apache-2.0
| 4,707
| 0
|
#
# Author:
# - Dmitry Golubkov, <dmitry.v.golubkov@cern.ch>, 2016-2018
#
# Updates by:
# - Mikhail Titov, <mikhail.titov@cern.ch>, 2018
#
import json
try:
import requests
import urllib
except ImportError:
pass
API_VERSION = 'v1'
class DEFTClient(object):
API_BASE_PATH = '/api/{0}'.format(API_VERSION)
def __init__(self, auth_user, auth_key, base_url, verify_ssl_cert=False):
self.api_url = '{0}{1}'.format(base_url, self.API_BASE_PAT
|
H)
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'ApiKey {0}:{1}'.format(auth_user, auth_key)}
self.verify_ssl_cert =
|
verify_ssl_cert
def _get_action_list(self):
action_cls = 'actions'
response = requests.get(url='{0}/{1}/'.format(
self.api_url,
action_cls),
headers=self.headers,
verify=self.verify_ssl_cert)
if response.status_code == requests.codes.ok:
return json.loads(response.content)['result']
else:
raise Exception('Invalid HTTP response code: {0}'.
format(response.status_code))
def _create_request(self, action, owner, body):
action_list = self._get_action_list()
if action not in action_list:
raise Exception('Invalid action: {0} ({1})'.
format(action, str(action_list)))
action_cls = 'request'
response = requests.post(url='{0}/{1}/'.format(
self.api_url,
action_cls),
headers=self.headers,
data=json.dumps({
'action': action,
'owner': owner,
'body': '{0}'.format(json.dumps(body))}),
verify=self.verify_ssl_cert)
if response.status_code == requests.codes.created:
api_request_object = json.loads(response.content)
return api_request_object['id']
elif response.status_code == requests.codes.unauthorized:
raise Exception('Access denied')
else:
raise Exception('Invalid HTTP response code: {0}'.
format(response.status_code))
def _create_task_search(self, filter_dict):
if filter_dict:
filter_dict.update({'limit': 0})
filter_string = urllib.urlencode(filter_dict)
action_cls = 'task'
response = requests.get(url='{0}/{1}/?{2}'.format(
self.api_url,
action_cls,
filter_string),
headers=self.headers,
verify=self.verify_ssl_cert)
if response.status_code == requests.codes.ok:
return json.loads(response.content)
else:
raise Exception('Invalid HTTP response code: {0}'.
format(response.status_code))
def get_status(self, request_id):
action_cls = 'request'
response = requests.get(url='{0}/{1}/{2}/'.format(
self.api_url,
action_cls,
request_id),
headers=self.headers,
verify=self.verify_ssl_cert)
if response.status_code == requests.codes.ok:
status_string = json.loads(response.content)['status']
if status_string:
return json.loads(status_string)
elif response.status_code == requests.codes.unauthorized:
raise Exception('Access denied')
else:
raise Exception('Invalid HTTP response code: {0}'.
format(response.status_code))
def set_ttcr(self, owner, ttcr_dict):
return self._create_request(action='set_ttcr',
owner=owner,
body={'ttcr_dict': ttcr_dict})
def set_ttcj(self, owner, ttcj_dict):
return self._create_request(action='set_ttcj',
owner=owner,
body={'ttcj_dict': ttcj_dict})
def get_task(self, task_id):
if task_id:
response = self._create_task_search(filter_dict={'id': task_id})
if response.get('objects'):
return response['objects'][0]
|
BorgERP/borg-erp-6of3
|
verticals/medical61/web_doc_oemedical/__init__.py
|
Python
|
agpl-3.0
| 25
| 0.08
|
#k
|
gb import contr
|
ollers
|
ibara1454/pyss
|
pyss/util/analysis.py
|
Python
|
mit
| 443
| 0.002257
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
impo
|
rt numpy as np
import numpy.linalg
def norm_along_column(a, ord=2):
norm = lambda x: np.linalg.norm(x, ord=ord)
return np.apply_a
|
long_axis(norm, 0, a)
def eig_residul(a, b, x, v, rel=True):
av = a @ v
bv = b @ v
rs = norm_along_column(av - x * bv)
if rel:
return rs / (norm_along_column(av) + np.abs(x) * norm_along_column(bv))
else:
return rs
|
0x0mar/memex-explorer
|
source/apps/crawl_space/settings.py
|
Python
|
bsd-2-clause
| 1,268
| 0.003155
|
"""Crawl settings."""
import os, sys
"""
Inserts path to project root into sys.path of of crawl_supervisor.
Splits the directory path to this settings file, and c
|
uts off the path up
to the root of the project d
|
irectory, allowing crawl_supervisor to import
modules from other apps.
"""
sys.path.insert(1, '/'.join(os.path.dirname(__file__).split('/')[:-2]))
"""
Ensures that the settings module used by crawl_supervisor is the one
used by the rest of the apps in the project.
"""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "memex.settings")
import django
from django.conf import settings
resources_dir = settings.MEDIA_ROOT
# ACHE language detection files.
# TODO Investigate using conda-installed ACHE resources.
LANG_DETECT_PATH = os.path.join(resources_dir, 'profiles')
CRAWL_PATH = os.path.join(resources_dir, 'crawls')
MODEL_PATH = os.path.join(resources_dir, 'models')
CONFIG_PATH = os.path.join(resources_dir, 'configs')
IMAGES_PATH = os.path.join(resources_dir, 'images')
# Directory to store seed files temporary. See `Crawl.save()` in
# `crawl_space.models`
SEEDS_TMP_DIR = os.path.join(resources_dir, 'seeds_tmp')
MODELS_TMP_DIR = os.path.join(resources_dir, 'models_tmp')
#Location of SOLR instance
SOLR_URL = "http://localhost:8983/solr/"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.